2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
80 struct l2cap_chan *c, *r = NULL;
84 list_for_each_entry_rcu(c, &conn->chan_l, list) {
95 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
97 struct l2cap_chan *c, *r = NULL;
101 list_for_each_entry_rcu(c, &conn->chan_l, list) {
102 if (c->scid == cid) {
112 /* Find channel with given SCID.
113 * Returns locked socket */
114 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
116 struct l2cap_chan *c;
118 c = __l2cap_get_chan_by_scid(conn, cid);
124 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
126 struct l2cap_chan *c, *r = NULL;
130 list_for_each_entry_rcu(c, &conn->chan_l, list) {
131 if (c->ident == ident) {
141 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
143 struct l2cap_chan *c;
145 c = __l2cap_get_chan_by_ident(conn, ident);
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
153 struct l2cap_chan *c;
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
162 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
166 write_lock(&chan_list_lock);
168 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
181 for (p = 0x1001; p < 0x1100; p += 2)
182 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
183 chan->psm = cpu_to_le16(p);
184 chan->sport = cpu_to_le16(p);
191 write_unlock(&chan_list_lock);
195 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
197 write_lock(&chan_list_lock);
201 write_unlock(&chan_list_lock);
206 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
208 u16 cid = L2CAP_CID_DYN_START;
210 for (; cid < L2CAP_CID_DYN_END; cid++) {
211 if (!__l2cap_get_chan_by_scid(conn, cid))
218 static char *state_to_string(int state)
222 return "BT_CONNECTED";
232 return "BT_CONNECT2";
241 return "invalid state";
244 static void l2cap_state_change(struct l2cap_chan *chan, int state)
246 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
247 state_to_string(state));
250 chan->ops->state_change(chan->data, state);
253 static void l2cap_chan_timeout(struct work_struct *work)
255 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
257 struct sock *sk = chan->sk;
260 BT_DBG("chan %p state %d", chan, chan->state);
264 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
265 reason = ECONNREFUSED;
266 else if (chan->state == BT_CONNECT &&
267 chan->sec_level != BT_SECURITY_SDP)
268 reason = ECONNREFUSED;
272 l2cap_chan_close(chan, reason);
276 chan->ops->close(chan->data);
277 l2cap_chan_put(chan);
280 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
282 struct l2cap_chan *chan;
284 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
290 write_lock(&chan_list_lock);
291 list_add(&chan->global_l, &chan_list);
292 write_unlock(&chan_list_lock);
294 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
296 chan->state = BT_OPEN;
298 atomic_set(&chan->refcnt, 1);
300 BT_DBG("sk %p chan %p", sk, chan);
305 void l2cap_chan_destroy(struct l2cap_chan *chan)
307 write_lock(&chan_list_lock);
308 list_del(&chan->global_l);
309 write_unlock(&chan_list_lock);
311 l2cap_chan_put(chan);
314 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
316 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
317 chan->psm, chan->dcid);
319 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
323 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
324 if (conn->hcon->type == LE_LINK) {
326 chan->omtu = L2CAP_LE_DEFAULT_MTU;
327 chan->scid = L2CAP_CID_LE_DATA;
328 chan->dcid = L2CAP_CID_LE_DATA;
330 /* Alloc CID for connection-oriented socket */
331 chan->scid = l2cap_alloc_cid(conn);
332 chan->omtu = L2CAP_DEFAULT_MTU;
334 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
335 /* Connectionless socket */
336 chan->scid = L2CAP_CID_CONN_LESS;
337 chan->dcid = L2CAP_CID_CONN_LESS;
338 chan->omtu = L2CAP_DEFAULT_MTU;
340 /* Raw socket can send/recv signalling messages only */
341 chan->scid = L2CAP_CID_SIGNALING;
342 chan->dcid = L2CAP_CID_SIGNALING;
343 chan->omtu = L2CAP_DEFAULT_MTU;
346 chan->local_id = L2CAP_BESTEFFORT_ID;
347 chan->local_stype = L2CAP_SERV_BESTEFFORT;
348 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
349 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
350 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
351 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
353 l2cap_chan_hold(chan);
355 list_add_rcu(&chan->list, &conn->chan_l);
359 * Must be called on the locked socket. */
360 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
362 struct sock *sk = chan->sk;
363 struct l2cap_conn *conn = chan->conn;
364 struct sock *parent = bt_sk(sk)->parent;
366 __clear_chan_timer(chan);
368 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
371 /* Delete from channel list */
372 list_del_rcu(&chan->list);
375 l2cap_chan_put(chan);
378 hci_conn_put(conn->hcon);
381 l2cap_state_change(chan, BT_CLOSED);
382 sock_set_flag(sk, SOCK_ZAPPED);
388 bt_accept_unlink(sk);
389 parent->sk_data_ready(parent, 0);
391 sk->sk_state_change(sk);
393 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
394 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
397 skb_queue_purge(&chan->tx_q);
399 if (chan->mode == L2CAP_MODE_ERTM) {
400 struct srej_list *l, *tmp;
402 __clear_retrans_timer(chan);
403 __clear_monitor_timer(chan);
404 __clear_ack_timer(chan);
406 skb_queue_purge(&chan->srej_q);
408 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
415 static void l2cap_chan_cleanup_listen(struct sock *parent)
419 BT_DBG("parent %p", parent);
421 /* Close not yet accepted channels */
422 while ((sk = bt_accept_dequeue(parent, NULL))) {
423 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
424 __clear_chan_timer(chan);
426 l2cap_chan_close(chan, ECONNRESET);
428 chan->ops->close(chan->data);
432 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
434 struct l2cap_conn *conn = chan->conn;
435 struct sock *sk = chan->sk;
437 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
439 switch (chan->state) {
441 l2cap_chan_cleanup_listen(sk);
443 l2cap_state_change(chan, BT_CLOSED);
444 sock_set_flag(sk, SOCK_ZAPPED);
449 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
450 conn->hcon->type == ACL_LINK) {
451 __clear_chan_timer(chan);
452 __set_chan_timer(chan, sk->sk_sndtimeo);
453 l2cap_send_disconn_req(conn, chan, reason);
455 l2cap_chan_del(chan, reason);
459 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
460 conn->hcon->type == ACL_LINK) {
461 struct l2cap_conn_rsp rsp;
464 if (bt_sk(sk)->defer_setup)
465 result = L2CAP_CR_SEC_BLOCK;
467 result = L2CAP_CR_BAD_PSM;
468 l2cap_state_change(chan, BT_DISCONN);
470 rsp.scid = cpu_to_le16(chan->dcid);
471 rsp.dcid = cpu_to_le16(chan->scid);
472 rsp.result = cpu_to_le16(result);
473 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
474 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
478 l2cap_chan_del(chan, reason);
483 l2cap_chan_del(chan, reason);
487 sock_set_flag(sk, SOCK_ZAPPED);
492 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
494 if (chan->chan_type == L2CAP_CHAN_RAW) {
495 switch (chan->sec_level) {
496 case BT_SECURITY_HIGH:
497 return HCI_AT_DEDICATED_BONDING_MITM;
498 case BT_SECURITY_MEDIUM:
499 return HCI_AT_DEDICATED_BONDING;
501 return HCI_AT_NO_BONDING;
503 } else if (chan->psm == cpu_to_le16(0x0001)) {
504 if (chan->sec_level == BT_SECURITY_LOW)
505 chan->sec_level = BT_SECURITY_SDP;
507 if (chan->sec_level == BT_SECURITY_HIGH)
508 return HCI_AT_NO_BONDING_MITM;
510 return HCI_AT_NO_BONDING;
512 switch (chan->sec_level) {
513 case BT_SECURITY_HIGH:
514 return HCI_AT_GENERAL_BONDING_MITM;
515 case BT_SECURITY_MEDIUM:
516 return HCI_AT_GENERAL_BONDING;
518 return HCI_AT_NO_BONDING;
523 /* Service level security */
524 int l2cap_chan_check_security(struct l2cap_chan *chan)
526 struct l2cap_conn *conn = chan->conn;
529 auth_type = l2cap_get_auth_type(chan);
531 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
534 static u8 l2cap_get_ident(struct l2cap_conn *conn)
538 /* Get next available identificator.
539 * 1 - 128 are used by kernel.
540 * 129 - 199 are reserved.
541 * 200 - 254 are used by utilities like l2ping, etc.
544 spin_lock(&conn->lock);
546 if (++conn->tx_ident > 128)
551 spin_unlock(&conn->lock);
556 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
558 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
561 BT_DBG("code 0x%2.2x", code);
566 if (lmp_no_flush_capable(conn->hcon->hdev))
567 flags = ACL_START_NO_FLUSH;
571 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
572 skb->priority = HCI_PRIO_MAX;
574 hci_send_acl(conn->hchan, skb, flags);
577 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
579 struct hci_conn *hcon = chan->conn->hcon;
582 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
585 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
586 lmp_no_flush_capable(hcon->hdev))
587 flags = ACL_START_NO_FLUSH;
591 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
592 hci_send_acl(chan->conn->hchan, skb, flags);
595 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
598 struct l2cap_hdr *lh;
599 struct l2cap_conn *conn = chan->conn;
602 if (chan->state != BT_CONNECTED)
605 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
606 hlen = L2CAP_EXT_HDR_SIZE;
608 hlen = L2CAP_ENH_HDR_SIZE;
610 if (chan->fcs == L2CAP_FCS_CRC16)
611 hlen += L2CAP_FCS_SIZE;
613 BT_DBG("chan %p, control 0x%8.8x", chan, control);
615 count = min_t(unsigned int, conn->mtu, hlen);
617 control |= __set_sframe(chan);
619 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
620 control |= __set_ctrl_final(chan);
622 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
623 control |= __set_ctrl_poll(chan);
625 skb = bt_skb_alloc(count, GFP_ATOMIC);
629 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
630 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
631 lh->cid = cpu_to_le16(chan->dcid);
633 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
635 if (chan->fcs == L2CAP_FCS_CRC16) {
636 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
637 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
640 skb->priority = HCI_PRIO_MAX;
641 l2cap_do_send(chan, skb);
644 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
646 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
647 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
648 set_bit(CONN_RNR_SENT, &chan->conn_state);
650 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
652 control |= __set_reqseq(chan, chan->buffer_seq);
654 l2cap_send_sframe(chan, control);
657 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
659 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
662 static void l2cap_do_start(struct l2cap_chan *chan)
664 struct l2cap_conn *conn = chan->conn;
666 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
667 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
670 if (l2cap_chan_check_security(chan) &&
671 __l2cap_no_conn_pending(chan)) {
672 struct l2cap_conn_req req;
673 req.scid = cpu_to_le16(chan->scid);
676 chan->ident = l2cap_get_ident(conn);
677 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
679 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
683 struct l2cap_info_req req;
684 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
686 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
687 conn->info_ident = l2cap_get_ident(conn);
689 schedule_delayed_work(&conn->info_timer,
690 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
692 l2cap_send_cmd(conn, conn->info_ident,
693 L2CAP_INFO_REQ, sizeof(req), &req);
697 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
699 u32 local_feat_mask = l2cap_feat_mask;
701 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
704 case L2CAP_MODE_ERTM:
705 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
706 case L2CAP_MODE_STREAMING:
707 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
713 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
716 struct l2cap_disconn_req req;
723 if (chan->mode == L2CAP_MODE_ERTM) {
724 __clear_retrans_timer(chan);
725 __clear_monitor_timer(chan);
726 __clear_ack_timer(chan);
729 req.dcid = cpu_to_le16(chan->dcid);
730 req.scid = cpu_to_le16(chan->scid);
731 l2cap_send_cmd(conn, l2cap_get_ident(conn),
732 L2CAP_DISCONN_REQ, sizeof(req), &req);
734 l2cap_state_change(chan, BT_DISCONN);
738 /* ---- L2CAP connections ---- */
739 static void l2cap_conn_start(struct l2cap_conn *conn)
741 struct l2cap_chan *chan;
743 BT_DBG("conn %p", conn);
747 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
748 struct sock *sk = chan->sk;
752 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
757 if (chan->state == BT_CONNECT) {
758 struct l2cap_conn_req req;
760 if (!l2cap_chan_check_security(chan) ||
761 !__l2cap_no_conn_pending(chan)) {
766 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
767 && test_bit(CONF_STATE2_DEVICE,
768 &chan->conf_state)) {
769 /* l2cap_chan_close() calls list_del(chan)
770 * so release the lock */
771 l2cap_chan_close(chan, ECONNRESET);
776 req.scid = cpu_to_le16(chan->scid);
779 chan->ident = l2cap_get_ident(conn);
780 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
782 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
785 } else if (chan->state == BT_CONNECT2) {
786 struct l2cap_conn_rsp rsp;
788 rsp.scid = cpu_to_le16(chan->dcid);
789 rsp.dcid = cpu_to_le16(chan->scid);
791 if (l2cap_chan_check_security(chan)) {
792 if (bt_sk(sk)->defer_setup) {
793 struct sock *parent = bt_sk(sk)->parent;
794 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
795 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
797 parent->sk_data_ready(parent, 0);
800 l2cap_state_change(chan, BT_CONFIG);
801 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
802 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
805 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
806 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
809 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
812 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
813 rsp.result != L2CAP_CR_SUCCESS) {
818 set_bit(CONF_REQ_SENT, &chan->conf_state);
819 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
820 l2cap_build_conf_req(chan, buf), buf);
821 chan->num_conf_req++;
830 /* Find socket with cid and source bdaddr.
831 * Returns closest match, locked.
833 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
835 struct l2cap_chan *c, *c1 = NULL;
837 read_lock(&chan_list_lock);
839 list_for_each_entry(c, &chan_list, global_l) {
840 struct sock *sk = c->sk;
842 if (state && c->state != state)
845 if (c->scid == cid) {
847 if (!bacmp(&bt_sk(sk)->src, src)) {
848 read_unlock(&chan_list_lock);
853 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
858 read_unlock(&chan_list_lock);
863 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
865 struct sock *parent, *sk;
866 struct l2cap_chan *chan, *pchan;
870 /* Check if we have socket listening on cid */
871 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
880 /* Check for backlog size */
881 if (sk_acceptq_is_full(parent)) {
882 BT_DBG("backlog full %d", parent->sk_ack_backlog);
886 chan = pchan->ops->new_connection(pchan->data);
892 hci_conn_hold(conn->hcon);
894 bacpy(&bt_sk(sk)->src, conn->src);
895 bacpy(&bt_sk(sk)->dst, conn->dst);
897 bt_accept_enqueue(parent, sk);
899 l2cap_chan_add(conn, chan);
901 __set_chan_timer(chan, sk->sk_sndtimeo);
903 l2cap_state_change(chan, BT_CONNECTED);
904 parent->sk_data_ready(parent, 0);
907 release_sock(parent);
910 static void l2cap_chan_ready(struct sock *sk)
912 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
913 struct sock *parent = bt_sk(sk)->parent;
915 BT_DBG("sk %p, parent %p", sk, parent);
917 chan->conf_state = 0;
918 __clear_chan_timer(chan);
920 l2cap_state_change(chan, BT_CONNECTED);
921 sk->sk_state_change(sk);
924 parent->sk_data_ready(parent, 0);
927 static void l2cap_conn_ready(struct l2cap_conn *conn)
929 struct l2cap_chan *chan;
931 BT_DBG("conn %p", conn);
933 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
934 l2cap_le_conn_ready(conn);
936 if (conn->hcon->out && conn->hcon->type == LE_LINK)
937 smp_conn_security(conn, conn->hcon->pending_sec_level);
941 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
942 struct sock *sk = chan->sk;
946 if (conn->hcon->type == LE_LINK) {
947 if (smp_conn_security(conn, chan->sec_level))
948 l2cap_chan_ready(sk);
950 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
951 __clear_chan_timer(chan);
952 l2cap_state_change(chan, BT_CONNECTED);
953 sk->sk_state_change(sk);
955 } else if (chan->state == BT_CONNECT)
956 l2cap_do_start(chan);
964 /* Notify sockets that we cannot guaranty reliability anymore */
965 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
967 struct l2cap_chan *chan;
969 BT_DBG("conn %p", conn);
973 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
974 struct sock *sk = chan->sk;
976 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
983 static void l2cap_info_timeout(struct work_struct *work)
985 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
988 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
989 conn->info_ident = 0;
991 l2cap_conn_start(conn);
994 static void l2cap_conn_del(struct hci_conn *hcon, int err)
996 struct l2cap_conn *conn = hcon->l2cap_data;
997 struct l2cap_chan *chan, *l;
1003 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1005 kfree_skb(conn->rx_skb);
1008 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1011 l2cap_chan_del(chan, err);
1013 chan->ops->close(chan->data);
1016 hci_chan_del(conn->hchan);
1018 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1019 __cancel_delayed_work(&conn->info_timer);
1021 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1022 __cancel_delayed_work(&conn->security_timer);
1023 smp_chan_destroy(conn);
1026 hcon->l2cap_data = NULL;
1030 static void security_timeout(struct work_struct *work)
1032 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1033 security_timer.work);
1035 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1038 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1040 struct l2cap_conn *conn = hcon->l2cap_data;
1041 struct hci_chan *hchan;
1046 hchan = hci_chan_create(hcon);
1050 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1052 hci_chan_del(hchan);
1056 hcon->l2cap_data = conn;
1058 conn->hchan = hchan;
1060 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1062 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1063 conn->mtu = hcon->hdev->le_mtu;
1065 conn->mtu = hcon->hdev->acl_mtu;
1067 conn->src = &hcon->hdev->bdaddr;
1068 conn->dst = &hcon->dst;
1070 conn->feat_mask = 0;
1072 spin_lock_init(&conn->lock);
1074 INIT_LIST_HEAD(&conn->chan_l);
1076 if (hcon->type == LE_LINK)
1077 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1079 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1081 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1086 /* ---- Socket interface ---- */
1088 /* Find socket with psm and source bdaddr.
1089 * Returns closest match.
1091 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1093 struct l2cap_chan *c, *c1 = NULL;
1095 read_lock(&chan_list_lock);
1097 list_for_each_entry(c, &chan_list, global_l) {
1098 struct sock *sk = c->sk;
1100 if (state && c->state != state)
1103 if (c->psm == psm) {
1105 if (!bacmp(&bt_sk(sk)->src, src)) {
1106 read_unlock(&chan_list_lock);
1111 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1116 read_unlock(&chan_list_lock);
1121 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1123 struct sock *sk = chan->sk;
1124 bdaddr_t *src = &bt_sk(sk)->src;
1125 struct l2cap_conn *conn;
1126 struct hci_conn *hcon;
1127 struct hci_dev *hdev;
1131 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1134 hdev = hci_get_route(dst, src);
1136 return -EHOSTUNREACH;
1142 /* PSM must be odd and lsb of upper byte must be 0 */
1143 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1144 chan->chan_type != L2CAP_CHAN_RAW) {
1149 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1154 switch (chan->mode) {
1155 case L2CAP_MODE_BASIC:
1157 case L2CAP_MODE_ERTM:
1158 case L2CAP_MODE_STREAMING:
1167 switch (sk->sk_state) {
1171 /* Already connecting */
1176 /* Already connected */
1190 /* Set destination address and psm */
1191 bacpy(&bt_sk(sk)->dst, dst);
1195 auth_type = l2cap_get_auth_type(chan);
1197 if (chan->dcid == L2CAP_CID_LE_DATA)
1198 hcon = hci_connect(hdev, LE_LINK, dst,
1199 chan->sec_level, auth_type);
1201 hcon = hci_connect(hdev, ACL_LINK, dst,
1202 chan->sec_level, auth_type);
1205 err = PTR_ERR(hcon);
1209 conn = l2cap_conn_add(hcon, 0);
1216 /* Update source addr of the socket */
1217 bacpy(src, conn->src);
1219 l2cap_chan_add(conn, chan);
1221 l2cap_state_change(chan, BT_CONNECT);
1222 __set_chan_timer(chan, sk->sk_sndtimeo);
1224 if (hcon->state == BT_CONNECTED) {
1225 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1226 __clear_chan_timer(chan);
1227 if (l2cap_chan_check_security(chan))
1228 l2cap_state_change(chan, BT_CONNECTED);
1230 l2cap_do_start(chan);
1236 hci_dev_unlock(hdev);
1241 int __l2cap_wait_ack(struct sock *sk)
1243 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1244 DECLARE_WAITQUEUE(wait, current);
1248 add_wait_queue(sk_sleep(sk), &wait);
1249 set_current_state(TASK_INTERRUPTIBLE);
1250 while (chan->unacked_frames > 0 && chan->conn) {
1254 if (signal_pending(current)) {
1255 err = sock_intr_errno(timeo);
1260 timeo = schedule_timeout(timeo);
1262 set_current_state(TASK_INTERRUPTIBLE);
1264 err = sock_error(sk);
1268 set_current_state(TASK_RUNNING);
1269 remove_wait_queue(sk_sleep(sk), &wait);
1273 static void l2cap_monitor_timeout(struct work_struct *work)
1275 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1276 monitor_timer.work);
1277 struct sock *sk = chan->sk;
1279 BT_DBG("chan %p", chan);
1282 if (chan->retry_count >= chan->remote_max_tx) {
1283 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1288 chan->retry_count++;
1289 __set_monitor_timer(chan);
1291 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1295 static void l2cap_retrans_timeout(struct work_struct *work)
1297 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1298 retrans_timer.work);
1299 struct sock *sk = chan->sk;
1301 BT_DBG("chan %p", chan);
1304 chan->retry_count = 1;
1305 __set_monitor_timer(chan);
1307 set_bit(CONN_WAIT_F, &chan->conn_state);
1309 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1313 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1315 struct sk_buff *skb;
1317 while ((skb = skb_peek(&chan->tx_q)) &&
1318 chan->unacked_frames) {
1319 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1322 skb = skb_dequeue(&chan->tx_q);
1325 chan->unacked_frames--;
1328 if (!chan->unacked_frames)
1329 __clear_retrans_timer(chan);
1332 static void l2cap_streaming_send(struct l2cap_chan *chan)
1334 struct sk_buff *skb;
1338 while ((skb = skb_dequeue(&chan->tx_q))) {
1339 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1340 control |= __set_txseq(chan, chan->next_tx_seq);
1341 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1343 if (chan->fcs == L2CAP_FCS_CRC16) {
1344 fcs = crc16(0, (u8 *)skb->data,
1345 skb->len - L2CAP_FCS_SIZE);
1346 put_unaligned_le16(fcs,
1347 skb->data + skb->len - L2CAP_FCS_SIZE);
1350 l2cap_do_send(chan, skb);
1352 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1356 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1358 struct sk_buff *skb, *tx_skb;
1362 skb = skb_peek(&chan->tx_q);
1366 while (bt_cb(skb)->tx_seq != tx_seq) {
1367 if (skb_queue_is_last(&chan->tx_q, skb))
1370 skb = skb_queue_next(&chan->tx_q, skb);
1373 if (chan->remote_max_tx &&
1374 bt_cb(skb)->retries == chan->remote_max_tx) {
1375 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1379 tx_skb = skb_clone(skb, GFP_ATOMIC);
1380 bt_cb(skb)->retries++;
1382 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1383 control &= __get_sar_mask(chan);
1385 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1386 control |= __set_ctrl_final(chan);
1388 control |= __set_reqseq(chan, chan->buffer_seq);
1389 control |= __set_txseq(chan, tx_seq);
1391 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1393 if (chan->fcs == L2CAP_FCS_CRC16) {
1394 fcs = crc16(0, (u8 *)tx_skb->data,
1395 tx_skb->len - L2CAP_FCS_SIZE);
1396 put_unaligned_le16(fcs,
1397 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1400 l2cap_do_send(chan, tx_skb);
1403 static int l2cap_ertm_send(struct l2cap_chan *chan)
1405 struct sk_buff *skb, *tx_skb;
1410 if (chan->state != BT_CONNECTED)
1413 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1415 if (chan->remote_max_tx &&
1416 bt_cb(skb)->retries == chan->remote_max_tx) {
1417 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1421 tx_skb = skb_clone(skb, GFP_ATOMIC);
1423 bt_cb(skb)->retries++;
1425 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1426 control &= __get_sar_mask(chan);
1428 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1429 control |= __set_ctrl_final(chan);
1431 control |= __set_reqseq(chan, chan->buffer_seq);
1432 control |= __set_txseq(chan, chan->next_tx_seq);
1434 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1436 if (chan->fcs == L2CAP_FCS_CRC16) {
1437 fcs = crc16(0, (u8 *)skb->data,
1438 tx_skb->len - L2CAP_FCS_SIZE);
1439 put_unaligned_le16(fcs, skb->data +
1440 tx_skb->len - L2CAP_FCS_SIZE);
1443 l2cap_do_send(chan, tx_skb);
1445 __set_retrans_timer(chan);
1447 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1449 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1451 if (bt_cb(skb)->retries == 1)
1452 chan->unacked_frames++;
1454 chan->frames_sent++;
1456 if (skb_queue_is_last(&chan->tx_q, skb))
1457 chan->tx_send_head = NULL;
1459 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1467 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1471 if (!skb_queue_empty(&chan->tx_q))
1472 chan->tx_send_head = chan->tx_q.next;
1474 chan->next_tx_seq = chan->expected_ack_seq;
1475 ret = l2cap_ertm_send(chan);
1479 static void __l2cap_send_ack(struct l2cap_chan *chan)
1483 control |= __set_reqseq(chan, chan->buffer_seq);
1485 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1486 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1487 set_bit(CONN_RNR_SENT, &chan->conn_state);
1488 l2cap_send_sframe(chan, control);
1492 if (l2cap_ertm_send(chan) > 0)
1495 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1496 l2cap_send_sframe(chan, control);
1499 static void l2cap_send_ack(struct l2cap_chan *chan)
1501 __clear_ack_timer(chan);
1502 __l2cap_send_ack(chan);
1505 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1507 struct srej_list *tail;
1510 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1511 control |= __set_ctrl_final(chan);
1513 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1514 control |= __set_reqseq(chan, tail->tx_seq);
1516 l2cap_send_sframe(chan, control);
1519 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1521 struct l2cap_conn *conn = chan->conn;
1522 struct sk_buff **frag;
1525 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1531 /* Continuation fragments (no L2CAP header) */
1532 frag = &skb_shinfo(skb)->frag_list;
1534 count = min_t(unsigned int, conn->mtu, len);
1536 *frag = chan->ops->alloc_skb(chan, count,
1537 msg->msg_flags & MSG_DONTWAIT, &err);
1541 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1544 (*frag)->priority = skb->priority;
1549 frag = &(*frag)->next;
1555 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1556 struct msghdr *msg, size_t len,
1559 struct sock *sk = chan->sk;
1560 struct l2cap_conn *conn = chan->conn;
1561 struct sk_buff *skb;
1562 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1563 struct l2cap_hdr *lh;
1565 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1567 count = min_t(unsigned int, (conn->mtu - hlen), len);
1569 skb = chan->ops->alloc_skb(chan, count + hlen,
1570 msg->msg_flags & MSG_DONTWAIT, &err);
1573 return ERR_PTR(err);
1575 skb->priority = priority;
1577 /* Create L2CAP header */
1578 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1579 lh->cid = cpu_to_le16(chan->dcid);
1580 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1581 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1583 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1584 if (unlikely(err < 0)) {
1586 return ERR_PTR(err);
1591 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1592 struct msghdr *msg, size_t len,
1595 struct sock *sk = chan->sk;
1596 struct l2cap_conn *conn = chan->conn;
1597 struct sk_buff *skb;
1598 int err, count, hlen = L2CAP_HDR_SIZE;
1599 struct l2cap_hdr *lh;
1601 BT_DBG("sk %p len %d", sk, (int)len);
1603 count = min_t(unsigned int, (conn->mtu - hlen), len);
1605 skb = chan->ops->alloc_skb(chan, count + hlen,
1606 msg->msg_flags & MSG_DONTWAIT, &err);
1609 return ERR_PTR(err);
1611 skb->priority = priority;
1613 /* Create L2CAP header */
1614 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1615 lh->cid = cpu_to_le16(chan->dcid);
1616 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1618 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1619 if (unlikely(err < 0)) {
1621 return ERR_PTR(err);
1626 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1627 struct msghdr *msg, size_t len,
1628 u32 control, u16 sdulen)
1630 struct sock *sk = chan->sk;
1631 struct l2cap_conn *conn = chan->conn;
1632 struct sk_buff *skb;
1633 int err, count, hlen;
1634 struct l2cap_hdr *lh;
1636 BT_DBG("sk %p len %d", sk, (int)len);
1639 return ERR_PTR(-ENOTCONN);
1641 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1642 hlen = L2CAP_EXT_HDR_SIZE;
1644 hlen = L2CAP_ENH_HDR_SIZE;
1647 hlen += L2CAP_SDULEN_SIZE;
1649 if (chan->fcs == L2CAP_FCS_CRC16)
1650 hlen += L2CAP_FCS_SIZE;
1652 count = min_t(unsigned int, (conn->mtu - hlen), len);
1654 skb = chan->ops->alloc_skb(chan, count + hlen,
1655 msg->msg_flags & MSG_DONTWAIT, &err);
1658 return ERR_PTR(err);
1660 /* Create L2CAP header */
1661 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1662 lh->cid = cpu_to_le16(chan->dcid);
1663 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1665 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1668 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1670 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1671 if (unlikely(err < 0)) {
1673 return ERR_PTR(err);
1676 if (chan->fcs == L2CAP_FCS_CRC16)
1677 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1679 bt_cb(skb)->retries = 0;
1683 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1685 struct sk_buff *skb;
1686 struct sk_buff_head sar_queue;
1690 skb_queue_head_init(&sar_queue);
1691 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1692 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1694 return PTR_ERR(skb);
1696 __skb_queue_tail(&sar_queue, skb);
1697 len -= chan->remote_mps;
1698 size += chan->remote_mps;
1703 if (len > chan->remote_mps) {
1704 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1705 buflen = chan->remote_mps;
1707 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1711 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1713 skb_queue_purge(&sar_queue);
1714 return PTR_ERR(skb);
1717 __skb_queue_tail(&sar_queue, skb);
1721 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1722 if (chan->tx_send_head == NULL)
1723 chan->tx_send_head = sar_queue.next;
1728 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1731 struct sk_buff *skb;
1735 /* Connectionless channel */
1736 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1737 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1739 return PTR_ERR(skb);
1741 l2cap_do_send(chan, skb);
1745 switch (chan->mode) {
1746 case L2CAP_MODE_BASIC:
1747 /* Check outgoing MTU */
1748 if (len > chan->omtu)
1751 /* Create a basic PDU */
1752 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1754 return PTR_ERR(skb);
1756 l2cap_do_send(chan, skb);
1760 case L2CAP_MODE_ERTM:
1761 case L2CAP_MODE_STREAMING:
1762 /* Entire SDU fits into one PDU */
1763 if (len <= chan->remote_mps) {
1764 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1765 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1768 return PTR_ERR(skb);
1770 __skb_queue_tail(&chan->tx_q, skb);
1772 if (chan->tx_send_head == NULL)
1773 chan->tx_send_head = skb;
1776 /* Segment SDU into multiples PDUs */
1777 err = l2cap_sar_segment_sdu(chan, msg, len);
1782 if (chan->mode == L2CAP_MODE_STREAMING) {
1783 l2cap_streaming_send(chan);
1788 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1789 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1794 err = l2cap_ertm_send(chan);
1801 BT_DBG("bad state %1.1x", chan->mode);
1808 /* Copy frame to all raw sockets on that connection */
1809 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1811 struct sk_buff *nskb;
1812 struct l2cap_chan *chan;
1814 BT_DBG("conn %p", conn);
1818 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1819 struct sock *sk = chan->sk;
1820 if (chan->chan_type != L2CAP_CHAN_RAW)
1823 /* Don't send frame to the socket it came from */
1826 nskb = skb_clone(skb, GFP_ATOMIC);
1830 if (chan->ops->recv(chan->data, nskb))
1837 /* ---- L2CAP signalling commands ---- */
1838 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1839 u8 code, u8 ident, u16 dlen, void *data)
1841 struct sk_buff *skb, **frag;
1842 struct l2cap_cmd_hdr *cmd;
1843 struct l2cap_hdr *lh;
1846 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1847 conn, code, ident, dlen);
1849 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1850 count = min_t(unsigned int, conn->mtu, len);
1852 skb = bt_skb_alloc(count, GFP_ATOMIC);
1856 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1857 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1859 if (conn->hcon->type == LE_LINK)
1860 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1862 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1864 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1867 cmd->len = cpu_to_le16(dlen);
1870 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1871 memcpy(skb_put(skb, count), data, count);
1877 /* Continuation fragments (no L2CAP header) */
1878 frag = &skb_shinfo(skb)->frag_list;
1880 count = min_t(unsigned int, conn->mtu, len);
1882 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1886 memcpy(skb_put(*frag, count), data, count);
1891 frag = &(*frag)->next;
1901 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1903 struct l2cap_conf_opt *opt = *ptr;
1906 len = L2CAP_CONF_OPT_SIZE + opt->len;
1914 *val = *((u8 *) opt->val);
1918 *val = get_unaligned_le16(opt->val);
1922 *val = get_unaligned_le32(opt->val);
1926 *val = (unsigned long) opt->val;
1930 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1934 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1936 struct l2cap_conf_opt *opt = *ptr;
1938 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1945 *((u8 *) opt->val) = val;
1949 put_unaligned_le16(val, opt->val);
1953 put_unaligned_le32(val, opt->val);
1957 memcpy(opt->val, (void *) val, len);
1961 *ptr += L2CAP_CONF_OPT_SIZE + len;
1964 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1966 struct l2cap_conf_efs efs;
1968 switch (chan->mode) {
1969 case L2CAP_MODE_ERTM:
1970 efs.id = chan->local_id;
1971 efs.stype = chan->local_stype;
1972 efs.msdu = cpu_to_le16(chan->local_msdu);
1973 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1974 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1975 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1978 case L2CAP_MODE_STREAMING:
1980 efs.stype = L2CAP_SERV_BESTEFFORT;
1981 efs.msdu = cpu_to_le16(chan->local_msdu);
1982 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1991 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1992 (unsigned long) &efs);
1995 static void l2cap_ack_timeout(struct work_struct *work)
1997 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2000 BT_DBG("chan %p", chan);
2002 lock_sock(chan->sk);
2003 __l2cap_send_ack(chan);
2004 release_sock(chan->sk);
2006 l2cap_chan_put(chan);
2009 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2011 chan->expected_ack_seq = 0;
2012 chan->unacked_frames = 0;
2013 chan->buffer_seq = 0;
2014 chan->num_acked = 0;
2015 chan->frames_sent = 0;
2017 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2018 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2019 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2021 skb_queue_head_init(&chan->srej_q);
2023 INIT_LIST_HEAD(&chan->srej_l);
2026 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2029 case L2CAP_MODE_STREAMING:
2030 case L2CAP_MODE_ERTM:
2031 if (l2cap_mode_supported(mode, remote_feat_mask))
2035 return L2CAP_MODE_BASIC;
2039 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2041 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2044 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2046 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2049 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2051 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2052 __l2cap_ews_supported(chan)) {
2053 /* use extended control field */
2054 set_bit(FLAG_EXT_CTRL, &chan->flags);
2055 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2057 chan->tx_win = min_t(u16, chan->tx_win,
2058 L2CAP_DEFAULT_TX_WINDOW);
2059 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2063 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2065 struct l2cap_conf_req *req = data;
2066 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2067 void *ptr = req->data;
2070 BT_DBG("chan %p", chan);
2072 if (chan->num_conf_req || chan->num_conf_rsp)
2075 switch (chan->mode) {
2076 case L2CAP_MODE_STREAMING:
2077 case L2CAP_MODE_ERTM:
2078 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2081 if (__l2cap_efs_supported(chan))
2082 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2086 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2091 if (chan->imtu != L2CAP_DEFAULT_MTU)
2092 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2094 switch (chan->mode) {
2095 case L2CAP_MODE_BASIC:
2096 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2097 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2100 rfc.mode = L2CAP_MODE_BASIC;
2102 rfc.max_transmit = 0;
2103 rfc.retrans_timeout = 0;
2104 rfc.monitor_timeout = 0;
2105 rfc.max_pdu_size = 0;
2107 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2108 (unsigned long) &rfc);
2111 case L2CAP_MODE_ERTM:
2112 rfc.mode = L2CAP_MODE_ERTM;
2113 rfc.max_transmit = chan->max_tx;
2114 rfc.retrans_timeout = 0;
2115 rfc.monitor_timeout = 0;
2117 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2118 L2CAP_EXT_HDR_SIZE -
2121 rfc.max_pdu_size = cpu_to_le16(size);
2123 l2cap_txwin_setup(chan);
2125 rfc.txwin_size = min_t(u16, chan->tx_win,
2126 L2CAP_DEFAULT_TX_WINDOW);
2128 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2129 (unsigned long) &rfc);
2131 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2132 l2cap_add_opt_efs(&ptr, chan);
2134 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2137 if (chan->fcs == L2CAP_FCS_NONE ||
2138 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2139 chan->fcs = L2CAP_FCS_NONE;
2140 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2143 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2144 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2148 case L2CAP_MODE_STREAMING:
2149 rfc.mode = L2CAP_MODE_STREAMING;
2151 rfc.max_transmit = 0;
2152 rfc.retrans_timeout = 0;
2153 rfc.monitor_timeout = 0;
2155 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2156 L2CAP_EXT_HDR_SIZE -
2159 rfc.max_pdu_size = cpu_to_le16(size);
2161 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2162 (unsigned long) &rfc);
2164 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2165 l2cap_add_opt_efs(&ptr, chan);
2167 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2170 if (chan->fcs == L2CAP_FCS_NONE ||
2171 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2172 chan->fcs = L2CAP_FCS_NONE;
2173 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2178 req->dcid = cpu_to_le16(chan->dcid);
2179 req->flags = cpu_to_le16(0);
2184 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2186 struct l2cap_conf_rsp *rsp = data;
2187 void *ptr = rsp->data;
2188 void *req = chan->conf_req;
2189 int len = chan->conf_len;
2190 int type, hint, olen;
2192 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2193 struct l2cap_conf_efs efs;
2195 u16 mtu = L2CAP_DEFAULT_MTU;
2196 u16 result = L2CAP_CONF_SUCCESS;
2199 BT_DBG("chan %p", chan);
2201 while (len >= L2CAP_CONF_OPT_SIZE) {
2202 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2204 hint = type & L2CAP_CONF_HINT;
2205 type &= L2CAP_CONF_MASK;
2208 case L2CAP_CONF_MTU:
2212 case L2CAP_CONF_FLUSH_TO:
2213 chan->flush_to = val;
2216 case L2CAP_CONF_QOS:
2219 case L2CAP_CONF_RFC:
2220 if (olen == sizeof(rfc))
2221 memcpy(&rfc, (void *) val, olen);
2224 case L2CAP_CONF_FCS:
2225 if (val == L2CAP_FCS_NONE)
2226 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2229 case L2CAP_CONF_EFS:
2231 if (olen == sizeof(efs))
2232 memcpy(&efs, (void *) val, olen);
2235 case L2CAP_CONF_EWS:
2237 return -ECONNREFUSED;
2239 set_bit(FLAG_EXT_CTRL, &chan->flags);
2240 set_bit(CONF_EWS_RECV, &chan->conf_state);
2241 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2242 chan->remote_tx_win = val;
2249 result = L2CAP_CONF_UNKNOWN;
2250 *((u8 *) ptr++) = type;
2255 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2258 switch (chan->mode) {
2259 case L2CAP_MODE_STREAMING:
2260 case L2CAP_MODE_ERTM:
2261 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2262 chan->mode = l2cap_select_mode(rfc.mode,
2263 chan->conn->feat_mask);
2268 if (__l2cap_efs_supported(chan))
2269 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2271 return -ECONNREFUSED;
2274 if (chan->mode != rfc.mode)
2275 return -ECONNREFUSED;
2281 if (chan->mode != rfc.mode) {
2282 result = L2CAP_CONF_UNACCEPT;
2283 rfc.mode = chan->mode;
2285 if (chan->num_conf_rsp == 1)
2286 return -ECONNREFUSED;
2288 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2289 sizeof(rfc), (unsigned long) &rfc);
2292 if (result == L2CAP_CONF_SUCCESS) {
2293 /* Configure output options and let the other side know
2294 * which ones we don't like. */
2296 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2297 result = L2CAP_CONF_UNACCEPT;
2300 set_bit(CONF_MTU_DONE, &chan->conf_state);
2302 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2305 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2306 efs.stype != L2CAP_SERV_NOTRAFIC &&
2307 efs.stype != chan->local_stype) {
2309 result = L2CAP_CONF_UNACCEPT;
2311 if (chan->num_conf_req >= 1)
2312 return -ECONNREFUSED;
2314 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2316 (unsigned long) &efs);
2318 /* Send PENDING Conf Rsp */
2319 result = L2CAP_CONF_PENDING;
2320 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2325 case L2CAP_MODE_BASIC:
2326 chan->fcs = L2CAP_FCS_NONE;
2327 set_bit(CONF_MODE_DONE, &chan->conf_state);
2330 case L2CAP_MODE_ERTM:
2331 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2332 chan->remote_tx_win = rfc.txwin_size;
2334 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2336 chan->remote_max_tx = rfc.max_transmit;
2338 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2340 L2CAP_EXT_HDR_SIZE -
2343 rfc.max_pdu_size = cpu_to_le16(size);
2344 chan->remote_mps = size;
2346 rfc.retrans_timeout =
2347 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2348 rfc.monitor_timeout =
2349 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2351 set_bit(CONF_MODE_DONE, &chan->conf_state);
2353 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2354 sizeof(rfc), (unsigned long) &rfc);
2356 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2357 chan->remote_id = efs.id;
2358 chan->remote_stype = efs.stype;
2359 chan->remote_msdu = le16_to_cpu(efs.msdu);
2360 chan->remote_flush_to =
2361 le32_to_cpu(efs.flush_to);
2362 chan->remote_acc_lat =
2363 le32_to_cpu(efs.acc_lat);
2364 chan->remote_sdu_itime =
2365 le32_to_cpu(efs.sdu_itime);
2366 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2367 sizeof(efs), (unsigned long) &efs);
2371 case L2CAP_MODE_STREAMING:
2372 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2374 L2CAP_EXT_HDR_SIZE -
2377 rfc.max_pdu_size = cpu_to_le16(size);
2378 chan->remote_mps = size;
2380 set_bit(CONF_MODE_DONE, &chan->conf_state);
2382 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2383 sizeof(rfc), (unsigned long) &rfc);
2388 result = L2CAP_CONF_UNACCEPT;
2390 memset(&rfc, 0, sizeof(rfc));
2391 rfc.mode = chan->mode;
2394 if (result == L2CAP_CONF_SUCCESS)
2395 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2397 rsp->scid = cpu_to_le16(chan->dcid);
2398 rsp->result = cpu_to_le16(result);
2399 rsp->flags = cpu_to_le16(0x0000);
2404 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2406 struct l2cap_conf_req *req = data;
2407 void *ptr = req->data;
2410 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2411 struct l2cap_conf_efs efs;
2413 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2415 while (len >= L2CAP_CONF_OPT_SIZE) {
2416 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2419 case L2CAP_CONF_MTU:
2420 if (val < L2CAP_DEFAULT_MIN_MTU) {
2421 *result = L2CAP_CONF_UNACCEPT;
2422 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2425 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2428 case L2CAP_CONF_FLUSH_TO:
2429 chan->flush_to = val;
2430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2434 case L2CAP_CONF_RFC:
2435 if (olen == sizeof(rfc))
2436 memcpy(&rfc, (void *)val, olen);
2438 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2439 rfc.mode != chan->mode)
2440 return -ECONNREFUSED;
2444 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2445 sizeof(rfc), (unsigned long) &rfc);
2448 case L2CAP_CONF_EWS:
2449 chan->tx_win = min_t(u16, val,
2450 L2CAP_DEFAULT_EXT_WINDOW);
2451 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2455 case L2CAP_CONF_EFS:
2456 if (olen == sizeof(efs))
2457 memcpy(&efs, (void *)val, olen);
2459 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2460 efs.stype != L2CAP_SERV_NOTRAFIC &&
2461 efs.stype != chan->local_stype)
2462 return -ECONNREFUSED;
2464 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2465 sizeof(efs), (unsigned long) &efs);
2470 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2471 return -ECONNREFUSED;
2473 chan->mode = rfc.mode;
2475 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2477 case L2CAP_MODE_ERTM:
2478 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2479 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2480 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2482 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2483 chan->local_msdu = le16_to_cpu(efs.msdu);
2484 chan->local_sdu_itime =
2485 le32_to_cpu(efs.sdu_itime);
2486 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2487 chan->local_flush_to =
2488 le32_to_cpu(efs.flush_to);
2492 case L2CAP_MODE_STREAMING:
2493 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2497 req->dcid = cpu_to_le16(chan->dcid);
2498 req->flags = cpu_to_le16(0x0000);
2503 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2505 struct l2cap_conf_rsp *rsp = data;
2506 void *ptr = rsp->data;
2508 BT_DBG("chan %p", chan);
2510 rsp->scid = cpu_to_le16(chan->dcid);
2511 rsp->result = cpu_to_le16(result);
2512 rsp->flags = cpu_to_le16(flags);
2517 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2519 struct l2cap_conn_rsp rsp;
2520 struct l2cap_conn *conn = chan->conn;
2523 rsp.scid = cpu_to_le16(chan->dcid);
2524 rsp.dcid = cpu_to_le16(chan->scid);
2525 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2526 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2527 l2cap_send_cmd(conn, chan->ident,
2528 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2530 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2533 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2534 l2cap_build_conf_req(chan, buf), buf);
2535 chan->num_conf_req++;
2538 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2542 struct l2cap_conf_rfc rfc;
2544 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2546 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2549 while (len >= L2CAP_CONF_OPT_SIZE) {
2550 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2553 case L2CAP_CONF_RFC:
2554 if (olen == sizeof(rfc))
2555 memcpy(&rfc, (void *)val, olen);
2560 /* Use sane default values in case a misbehaving remote device
2561 * did not send an RFC option.
2563 rfc.mode = chan->mode;
2564 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2565 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2566 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2568 BT_ERR("Expected RFC option was not found, using defaults");
2572 case L2CAP_MODE_ERTM:
2573 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2574 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2575 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2577 case L2CAP_MODE_STREAMING:
2578 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2582 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2584 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2586 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2589 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2590 cmd->ident == conn->info_ident) {
2591 cancel_delayed_work(&conn->info_timer);
2593 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2594 conn->info_ident = 0;
2596 l2cap_conn_start(conn);
2602 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2604 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2605 struct l2cap_conn_rsp rsp;
2606 struct l2cap_chan *chan = NULL, *pchan;
2607 struct sock *parent, *sk = NULL;
2608 int result, status = L2CAP_CS_NO_INFO;
2610 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2611 __le16 psm = req->psm;
2613 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2615 /* Check if we have socket listening on psm */
2616 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2618 result = L2CAP_CR_BAD_PSM;
2626 /* Check if the ACL is secure enough (if not SDP) */
2627 if (psm != cpu_to_le16(0x0001) &&
2628 !hci_conn_check_link_mode(conn->hcon)) {
2629 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2630 result = L2CAP_CR_SEC_BLOCK;
2634 result = L2CAP_CR_NO_MEM;
2636 /* Check for backlog size */
2637 if (sk_acceptq_is_full(parent)) {
2638 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2642 chan = pchan->ops->new_connection(pchan->data);
2648 /* Check if we already have channel with that dcid */
2649 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2650 sock_set_flag(sk, SOCK_ZAPPED);
2651 chan->ops->close(chan->data);
2655 hci_conn_hold(conn->hcon);
2657 bacpy(&bt_sk(sk)->src, conn->src);
2658 bacpy(&bt_sk(sk)->dst, conn->dst);
2662 bt_accept_enqueue(parent, sk);
2664 l2cap_chan_add(conn, chan);
2668 __set_chan_timer(chan, sk->sk_sndtimeo);
2670 chan->ident = cmd->ident;
2672 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2673 if (l2cap_chan_check_security(chan)) {
2674 if (bt_sk(sk)->defer_setup) {
2675 l2cap_state_change(chan, BT_CONNECT2);
2676 result = L2CAP_CR_PEND;
2677 status = L2CAP_CS_AUTHOR_PEND;
2678 parent->sk_data_ready(parent, 0);
2680 l2cap_state_change(chan, BT_CONFIG);
2681 result = L2CAP_CR_SUCCESS;
2682 status = L2CAP_CS_NO_INFO;
2685 l2cap_state_change(chan, BT_CONNECT2);
2686 result = L2CAP_CR_PEND;
2687 status = L2CAP_CS_AUTHEN_PEND;
2690 l2cap_state_change(chan, BT_CONNECT2);
2691 result = L2CAP_CR_PEND;
2692 status = L2CAP_CS_NO_INFO;
2696 release_sock(parent);
2699 rsp.scid = cpu_to_le16(scid);
2700 rsp.dcid = cpu_to_le16(dcid);
2701 rsp.result = cpu_to_le16(result);
2702 rsp.status = cpu_to_le16(status);
2703 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2705 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2706 struct l2cap_info_req info;
2707 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2709 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2710 conn->info_ident = l2cap_get_ident(conn);
2712 schedule_delayed_work(&conn->info_timer,
2713 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2715 l2cap_send_cmd(conn, conn->info_ident,
2716 L2CAP_INFO_REQ, sizeof(info), &info);
2719 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2720 result == L2CAP_CR_SUCCESS) {
2722 set_bit(CONF_REQ_SENT, &chan->conf_state);
2723 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2724 l2cap_build_conf_req(chan, buf), buf);
2725 chan->num_conf_req++;
2731 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2733 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2734 u16 scid, dcid, result, status;
2735 struct l2cap_chan *chan;
2739 scid = __le16_to_cpu(rsp->scid);
2740 dcid = __le16_to_cpu(rsp->dcid);
2741 result = __le16_to_cpu(rsp->result);
2742 status = __le16_to_cpu(rsp->status);
2744 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2747 chan = l2cap_get_chan_by_scid(conn, scid);
2751 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2759 case L2CAP_CR_SUCCESS:
2760 l2cap_state_change(chan, BT_CONFIG);
2763 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2765 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2768 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2769 l2cap_build_conf_req(chan, req), req);
2770 chan->num_conf_req++;
2774 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2778 l2cap_chan_del(chan, ECONNREFUSED);
2786 static inline void set_default_fcs(struct l2cap_chan *chan)
2788 /* FCS is enabled only in ERTM or streaming mode, if one or both
2791 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2792 chan->fcs = L2CAP_FCS_NONE;
2793 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2794 chan->fcs = L2CAP_FCS_CRC16;
2797 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2799 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2802 struct l2cap_chan *chan;
2806 dcid = __le16_to_cpu(req->dcid);
2807 flags = __le16_to_cpu(req->flags);
2809 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2811 chan = l2cap_get_chan_by_scid(conn, dcid);
2817 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2818 struct l2cap_cmd_rej_cid rej;
2820 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2821 rej.scid = cpu_to_le16(chan->scid);
2822 rej.dcid = cpu_to_le16(chan->dcid);
2824 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2829 /* Reject if config buffer is too small. */
2830 len = cmd_len - sizeof(*req);
2831 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2832 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2833 l2cap_build_conf_rsp(chan, rsp,
2834 L2CAP_CONF_REJECT, flags), rsp);
2839 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2840 chan->conf_len += len;
2842 if (flags & 0x0001) {
2843 /* Incomplete config. Send empty response. */
2844 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2845 l2cap_build_conf_rsp(chan, rsp,
2846 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2850 /* Complete config. */
2851 len = l2cap_parse_conf_req(chan, rsp);
2853 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2857 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2858 chan->num_conf_rsp++;
2860 /* Reset config buffer. */
2863 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2866 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2867 set_default_fcs(chan);
2869 l2cap_state_change(chan, BT_CONNECTED);
2871 chan->next_tx_seq = 0;
2872 chan->expected_tx_seq = 0;
2873 skb_queue_head_init(&chan->tx_q);
2874 if (chan->mode == L2CAP_MODE_ERTM)
2875 l2cap_ertm_init(chan);
2877 l2cap_chan_ready(sk);
2881 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2883 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2884 l2cap_build_conf_req(chan, buf), buf);
2885 chan->num_conf_req++;
2888 /* Got Conf Rsp PENDING from remote side and asume we sent
2889 Conf Rsp PENDING in the code above */
2890 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2891 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2893 /* check compatibility */
2895 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2896 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2898 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2899 l2cap_build_conf_rsp(chan, rsp,
2900 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2908 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2910 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2911 u16 scid, flags, result;
2912 struct l2cap_chan *chan;
2914 int len = cmd->len - sizeof(*rsp);
2916 scid = __le16_to_cpu(rsp->scid);
2917 flags = __le16_to_cpu(rsp->flags);
2918 result = __le16_to_cpu(rsp->result);
2920 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2921 scid, flags, result);
2923 chan = l2cap_get_chan_by_scid(conn, scid);
2930 case L2CAP_CONF_SUCCESS:
2931 l2cap_conf_rfc_get(chan, rsp->data, len);
2932 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2935 case L2CAP_CONF_PENDING:
2936 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2938 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2941 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2944 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2948 /* check compatibility */
2950 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2951 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2953 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2954 l2cap_build_conf_rsp(chan, buf,
2955 L2CAP_CONF_SUCCESS, 0x0000), buf);
2959 case L2CAP_CONF_UNACCEPT:
2960 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2963 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2964 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2968 /* throw out any old stored conf requests */
2969 result = L2CAP_CONF_SUCCESS;
2970 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2973 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2977 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2978 L2CAP_CONF_REQ, len, req);
2979 chan->num_conf_req++;
2980 if (result != L2CAP_CONF_SUCCESS)
2986 sk->sk_err = ECONNRESET;
2987 __set_chan_timer(chan,
2988 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT));
2989 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2996 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2998 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2999 set_default_fcs(chan);
3001 l2cap_state_change(chan, BT_CONNECTED);
3002 chan->next_tx_seq = 0;
3003 chan->expected_tx_seq = 0;
3004 skb_queue_head_init(&chan->tx_q);
3005 if (chan->mode == L2CAP_MODE_ERTM)
3006 l2cap_ertm_init(chan);
3008 l2cap_chan_ready(sk);
3016 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3018 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3019 struct l2cap_disconn_rsp rsp;
3021 struct l2cap_chan *chan;
3024 scid = __le16_to_cpu(req->scid);
3025 dcid = __le16_to_cpu(req->dcid);
3027 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3029 chan = l2cap_get_chan_by_scid(conn, dcid);
3035 rsp.dcid = cpu_to_le16(chan->scid);
3036 rsp.scid = cpu_to_le16(chan->dcid);
3037 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3039 sk->sk_shutdown = SHUTDOWN_MASK;
3041 l2cap_chan_del(chan, ECONNRESET);
3044 chan->ops->close(chan->data);
3048 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3050 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3052 struct l2cap_chan *chan;
3055 scid = __le16_to_cpu(rsp->scid);
3056 dcid = __le16_to_cpu(rsp->dcid);
3058 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3060 chan = l2cap_get_chan_by_scid(conn, scid);
3066 l2cap_chan_del(chan, 0);
3069 chan->ops->close(chan->data);
3073 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3075 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3078 type = __le16_to_cpu(req->type);
3080 BT_DBG("type 0x%4.4x", type);
3082 if (type == L2CAP_IT_FEAT_MASK) {
3084 u32 feat_mask = l2cap_feat_mask;
3085 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3086 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3087 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3089 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3092 feat_mask |= L2CAP_FEAT_EXT_FLOW
3093 | L2CAP_FEAT_EXT_WINDOW;
3095 put_unaligned_le32(feat_mask, rsp->data);
3096 l2cap_send_cmd(conn, cmd->ident,
3097 L2CAP_INFO_RSP, sizeof(buf), buf);
3098 } else if (type == L2CAP_IT_FIXED_CHAN) {
3100 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3103 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3105 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3107 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3108 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3109 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3110 l2cap_send_cmd(conn, cmd->ident,
3111 L2CAP_INFO_RSP, sizeof(buf), buf);
3113 struct l2cap_info_rsp rsp;
3114 rsp.type = cpu_to_le16(type);
3115 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3116 l2cap_send_cmd(conn, cmd->ident,
3117 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3123 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3125 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3128 type = __le16_to_cpu(rsp->type);
3129 result = __le16_to_cpu(rsp->result);
3131 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3133 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3134 if (cmd->ident != conn->info_ident ||
3135 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3138 cancel_delayed_work(&conn->info_timer);
3140 if (result != L2CAP_IR_SUCCESS) {
3141 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3142 conn->info_ident = 0;
3144 l2cap_conn_start(conn);
3149 if (type == L2CAP_IT_FEAT_MASK) {
3150 conn->feat_mask = get_unaligned_le32(rsp->data);
3152 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3153 struct l2cap_info_req req;
3154 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3156 conn->info_ident = l2cap_get_ident(conn);
3158 l2cap_send_cmd(conn, conn->info_ident,
3159 L2CAP_INFO_REQ, sizeof(req), &req);
3161 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3162 conn->info_ident = 0;
3164 l2cap_conn_start(conn);
3166 } else if (type == L2CAP_IT_FIXED_CHAN) {
3167 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3168 conn->info_ident = 0;
3170 l2cap_conn_start(conn);
3176 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3177 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3180 struct l2cap_create_chan_req *req = data;
3181 struct l2cap_create_chan_rsp rsp;
3184 if (cmd_len != sizeof(*req))
3190 psm = le16_to_cpu(req->psm);
3191 scid = le16_to_cpu(req->scid);
3193 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3195 /* Placeholder: Always reject */
3197 rsp.scid = cpu_to_le16(scid);
3198 rsp.result = L2CAP_CR_NO_MEM;
3199 rsp.status = L2CAP_CS_NO_INFO;
3201 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3207 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3208 struct l2cap_cmd_hdr *cmd, void *data)
3210 BT_DBG("conn %p", conn);
3212 return l2cap_connect_rsp(conn, cmd, data);
3215 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3216 u16 icid, u16 result)
3218 struct l2cap_move_chan_rsp rsp;
3220 BT_DBG("icid %d, result %d", icid, result);
3222 rsp.icid = cpu_to_le16(icid);
3223 rsp.result = cpu_to_le16(result);
3225 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3228 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3229 struct l2cap_chan *chan, u16 icid, u16 result)
3231 struct l2cap_move_chan_cfm cfm;
3234 BT_DBG("icid %d, result %d", icid, result);
3236 ident = l2cap_get_ident(conn);
3238 chan->ident = ident;
3240 cfm.icid = cpu_to_le16(icid);
3241 cfm.result = cpu_to_le16(result);
3243 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3246 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3249 struct l2cap_move_chan_cfm_rsp rsp;
3251 BT_DBG("icid %d", icid);
3253 rsp.icid = cpu_to_le16(icid);
3254 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3257 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3258 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3260 struct l2cap_move_chan_req *req = data;
3262 u16 result = L2CAP_MR_NOT_ALLOWED;
3264 if (cmd_len != sizeof(*req))
3267 icid = le16_to_cpu(req->icid);
3269 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3274 /* Placeholder: Always refuse */
3275 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3280 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3281 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3283 struct l2cap_move_chan_rsp *rsp = data;
3286 if (cmd_len != sizeof(*rsp))
3289 icid = le16_to_cpu(rsp->icid);
3290 result = le16_to_cpu(rsp->result);
3292 BT_DBG("icid %d, result %d", icid, result);
3294 /* Placeholder: Always unconfirmed */
3295 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3300 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3301 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3303 struct l2cap_move_chan_cfm *cfm = data;
3306 if (cmd_len != sizeof(*cfm))
3309 icid = le16_to_cpu(cfm->icid);
3310 result = le16_to_cpu(cfm->result);
3312 BT_DBG("icid %d, result %d", icid, result);
3314 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3319 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3320 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3322 struct l2cap_move_chan_cfm_rsp *rsp = data;
3325 if (cmd_len != sizeof(*rsp))
3328 icid = le16_to_cpu(rsp->icid);
3330 BT_DBG("icid %d", icid);
3335 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3340 if (min > max || min < 6 || max > 3200)
3343 if (to_multiplier < 10 || to_multiplier > 3200)
3346 if (max >= to_multiplier * 8)
3349 max_latency = (to_multiplier * 8 / max) - 1;
3350 if (latency > 499 || latency > max_latency)
3356 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3357 struct l2cap_cmd_hdr *cmd, u8 *data)
3359 struct hci_conn *hcon = conn->hcon;
3360 struct l2cap_conn_param_update_req *req;
3361 struct l2cap_conn_param_update_rsp rsp;
3362 u16 min, max, latency, to_multiplier, cmd_len;
3365 if (!(hcon->link_mode & HCI_LM_MASTER))
3368 cmd_len = __le16_to_cpu(cmd->len);
3369 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3372 req = (struct l2cap_conn_param_update_req *) data;
3373 min = __le16_to_cpu(req->min);
3374 max = __le16_to_cpu(req->max);
3375 latency = __le16_to_cpu(req->latency);
3376 to_multiplier = __le16_to_cpu(req->to_multiplier);
3378 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3379 min, max, latency, to_multiplier);
3381 memset(&rsp, 0, sizeof(rsp));
3383 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3385 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3387 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3389 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3393 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3398 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3399 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3403 switch (cmd->code) {
3404 case L2CAP_COMMAND_REJ:
3405 l2cap_command_rej(conn, cmd, data);
3408 case L2CAP_CONN_REQ:
3409 err = l2cap_connect_req(conn, cmd, data);
3412 case L2CAP_CONN_RSP:
3413 err = l2cap_connect_rsp(conn, cmd, data);
3416 case L2CAP_CONF_REQ:
3417 err = l2cap_config_req(conn, cmd, cmd_len, data);
3420 case L2CAP_CONF_RSP:
3421 err = l2cap_config_rsp(conn, cmd, data);
3424 case L2CAP_DISCONN_REQ:
3425 err = l2cap_disconnect_req(conn, cmd, data);
3428 case L2CAP_DISCONN_RSP:
3429 err = l2cap_disconnect_rsp(conn, cmd, data);
3432 case L2CAP_ECHO_REQ:
3433 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3436 case L2CAP_ECHO_RSP:
3439 case L2CAP_INFO_REQ:
3440 err = l2cap_information_req(conn, cmd, data);
3443 case L2CAP_INFO_RSP:
3444 err = l2cap_information_rsp(conn, cmd, data);
3447 case L2CAP_CREATE_CHAN_REQ:
3448 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3451 case L2CAP_CREATE_CHAN_RSP:
3452 err = l2cap_create_channel_rsp(conn, cmd, data);
3455 case L2CAP_MOVE_CHAN_REQ:
3456 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3459 case L2CAP_MOVE_CHAN_RSP:
3460 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3463 case L2CAP_MOVE_CHAN_CFM:
3464 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3467 case L2CAP_MOVE_CHAN_CFM_RSP:
3468 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3472 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3480 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3481 struct l2cap_cmd_hdr *cmd, u8 *data)
3483 switch (cmd->code) {
3484 case L2CAP_COMMAND_REJ:
3487 case L2CAP_CONN_PARAM_UPDATE_REQ:
3488 return l2cap_conn_param_update_req(conn, cmd, data);
3490 case L2CAP_CONN_PARAM_UPDATE_RSP:
3494 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3499 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3500 struct sk_buff *skb)
3502 u8 *data = skb->data;
3504 struct l2cap_cmd_hdr cmd;
3507 l2cap_raw_recv(conn, skb);
3509 while (len >= L2CAP_CMD_HDR_SIZE) {
3511 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3512 data += L2CAP_CMD_HDR_SIZE;
3513 len -= L2CAP_CMD_HDR_SIZE;
3515 cmd_len = le16_to_cpu(cmd.len);
3517 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3519 if (cmd_len > len || !cmd.ident) {
3520 BT_DBG("corrupted command");
3524 if (conn->hcon->type == LE_LINK)
3525 err = l2cap_le_sig_cmd(conn, &cmd, data);
3527 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3530 struct l2cap_cmd_rej_unk rej;
3532 BT_ERR("Wrong link type (%d)", err);
3534 /* FIXME: Map err to a valid reason */
3535 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3536 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3546 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3548 u16 our_fcs, rcv_fcs;
3551 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3552 hdr_size = L2CAP_EXT_HDR_SIZE;
3554 hdr_size = L2CAP_ENH_HDR_SIZE;
3556 if (chan->fcs == L2CAP_FCS_CRC16) {
3557 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3558 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3559 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3561 if (our_fcs != rcv_fcs)
3567 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3571 chan->frames_sent = 0;
3573 control |= __set_reqseq(chan, chan->buffer_seq);
3575 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3576 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3577 l2cap_send_sframe(chan, control);
3578 set_bit(CONN_RNR_SENT, &chan->conn_state);
3581 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3582 l2cap_retransmit_frames(chan);
3584 l2cap_ertm_send(chan);
3586 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3587 chan->frames_sent == 0) {
3588 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3589 l2cap_send_sframe(chan, control);
3593 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3595 struct sk_buff *next_skb;
3596 int tx_seq_offset, next_tx_seq_offset;
3598 bt_cb(skb)->tx_seq = tx_seq;
3599 bt_cb(skb)->sar = sar;
3601 next_skb = skb_peek(&chan->srej_q);
3603 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3606 if (bt_cb(next_skb)->tx_seq == tx_seq)
3609 next_tx_seq_offset = __seq_offset(chan,
3610 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3612 if (next_tx_seq_offset > tx_seq_offset) {
3613 __skb_queue_before(&chan->srej_q, next_skb, skb);
3617 if (skb_queue_is_last(&chan->srej_q, next_skb))
3620 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3623 __skb_queue_tail(&chan->srej_q, skb);
3628 static void append_skb_frag(struct sk_buff *skb,
3629 struct sk_buff *new_frag, struct sk_buff **last_frag)
3631 /* skb->len reflects data in skb as well as all fragments
3632 * skb->data_len reflects only data in fragments
3634 if (!skb_has_frag_list(skb))
3635 skb_shinfo(skb)->frag_list = new_frag;
3637 new_frag->next = NULL;
3639 (*last_frag)->next = new_frag;
3640 *last_frag = new_frag;
3642 skb->len += new_frag->len;
3643 skb->data_len += new_frag->len;
3644 skb->truesize += new_frag->truesize;
3647 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3651 switch (__get_ctrl_sar(chan, control)) {
3652 case L2CAP_SAR_UNSEGMENTED:
3656 err = chan->ops->recv(chan->data, skb);
3659 case L2CAP_SAR_START:
3663 chan->sdu_len = get_unaligned_le16(skb->data);
3664 skb_pull(skb, L2CAP_SDULEN_SIZE);
3666 if (chan->sdu_len > chan->imtu) {
3671 if (skb->len >= chan->sdu_len)
3675 chan->sdu_last_frag = skb;
3681 case L2CAP_SAR_CONTINUE:
3685 append_skb_frag(chan->sdu, skb,
3686 &chan->sdu_last_frag);
3689 if (chan->sdu->len >= chan->sdu_len)
3699 append_skb_frag(chan->sdu, skb,
3700 &chan->sdu_last_frag);
3703 if (chan->sdu->len != chan->sdu_len)
3706 err = chan->ops->recv(chan->data, chan->sdu);
3709 /* Reassembly complete */
3711 chan->sdu_last_frag = NULL;
3719 kfree_skb(chan->sdu);
3721 chan->sdu_last_frag = NULL;
3728 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3730 BT_DBG("chan %p, Enter local busy", chan);
3732 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3734 __set_ack_timer(chan);
3737 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3741 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3744 control = __set_reqseq(chan, chan->buffer_seq);
3745 control |= __set_ctrl_poll(chan);
3746 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3747 l2cap_send_sframe(chan, control);
3748 chan->retry_count = 1;
3750 __clear_retrans_timer(chan);
3751 __set_monitor_timer(chan);
3753 set_bit(CONN_WAIT_F, &chan->conn_state);
3756 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3757 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3759 BT_DBG("chan %p, Exit local busy", chan);
3762 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3764 if (chan->mode == L2CAP_MODE_ERTM) {
3766 l2cap_ertm_enter_local_busy(chan);
3768 l2cap_ertm_exit_local_busy(chan);
3772 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3774 struct sk_buff *skb;
3777 while ((skb = skb_peek(&chan->srej_q)) &&
3778 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3781 if (bt_cb(skb)->tx_seq != tx_seq)
3784 skb = skb_dequeue(&chan->srej_q);
3785 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3786 err = l2cap_reassemble_sdu(chan, skb, control);
3789 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3793 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3794 tx_seq = __next_seq(chan, tx_seq);
3798 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3800 struct srej_list *l, *tmp;
3803 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3804 if (l->tx_seq == tx_seq) {
3809 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3810 control |= __set_reqseq(chan, l->tx_seq);
3811 l2cap_send_sframe(chan, control);
3813 list_add_tail(&l->list, &chan->srej_l);
3817 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3819 struct srej_list *new;
3822 while (tx_seq != chan->expected_tx_seq) {
3823 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3824 control |= __set_reqseq(chan, chan->expected_tx_seq);
3825 l2cap_send_sframe(chan, control);
3827 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3831 new->tx_seq = chan->expected_tx_seq;
3833 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3835 list_add_tail(&new->list, &chan->srej_l);
3838 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3843 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3845 u16 tx_seq = __get_txseq(chan, rx_control);
3846 u16 req_seq = __get_reqseq(chan, rx_control);
3847 u8 sar = __get_ctrl_sar(chan, rx_control);
3848 int tx_seq_offset, expected_tx_seq_offset;
3849 int num_to_ack = (chan->tx_win/6) + 1;
3852 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3853 tx_seq, rx_control);
3855 if (__is_ctrl_final(chan, rx_control) &&
3856 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3857 __clear_monitor_timer(chan);
3858 if (chan->unacked_frames > 0)
3859 __set_retrans_timer(chan);
3860 clear_bit(CONN_WAIT_F, &chan->conn_state);
3863 chan->expected_ack_seq = req_seq;
3864 l2cap_drop_acked_frames(chan);
3866 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3868 /* invalid tx_seq */
3869 if (tx_seq_offset >= chan->tx_win) {
3870 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3874 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3875 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3876 l2cap_send_ack(chan);
3880 if (tx_seq == chan->expected_tx_seq)
3883 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3884 struct srej_list *first;
3886 first = list_first_entry(&chan->srej_l,
3887 struct srej_list, list);
3888 if (tx_seq == first->tx_seq) {
3889 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3890 l2cap_check_srej_gap(chan, tx_seq);
3892 list_del(&first->list);
3895 if (list_empty(&chan->srej_l)) {
3896 chan->buffer_seq = chan->buffer_seq_srej;
3897 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3898 l2cap_send_ack(chan);
3899 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3902 struct srej_list *l;
3904 /* duplicated tx_seq */
3905 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3908 list_for_each_entry(l, &chan->srej_l, list) {
3909 if (l->tx_seq == tx_seq) {
3910 l2cap_resend_srejframe(chan, tx_seq);
3915 err = l2cap_send_srejframe(chan, tx_seq);
3917 l2cap_send_disconn_req(chan->conn, chan, -err);
3922 expected_tx_seq_offset = __seq_offset(chan,
3923 chan->expected_tx_seq, chan->buffer_seq);
3925 /* duplicated tx_seq */
3926 if (tx_seq_offset < expected_tx_seq_offset)
3929 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3931 BT_DBG("chan %p, Enter SREJ", chan);
3933 INIT_LIST_HEAD(&chan->srej_l);
3934 chan->buffer_seq_srej = chan->buffer_seq;
3936 __skb_queue_head_init(&chan->srej_q);
3937 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3939 /* Set P-bit only if there are some I-frames to ack. */
3940 if (__clear_ack_timer(chan))
3941 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3943 err = l2cap_send_srejframe(chan, tx_seq);
3945 l2cap_send_disconn_req(chan->conn, chan, -err);
3952 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3954 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3955 bt_cb(skb)->tx_seq = tx_seq;
3956 bt_cb(skb)->sar = sar;
3957 __skb_queue_tail(&chan->srej_q, skb);
3961 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3962 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3965 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3969 if (__is_ctrl_final(chan, rx_control)) {
3970 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3971 l2cap_retransmit_frames(chan);
3975 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3976 if (chan->num_acked == num_to_ack - 1)
3977 l2cap_send_ack(chan);
3979 __set_ack_timer(chan);
3988 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3990 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3991 __get_reqseq(chan, rx_control), rx_control);
3993 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3994 l2cap_drop_acked_frames(chan);
3996 if (__is_ctrl_poll(chan, rx_control)) {
3997 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3998 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3999 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4000 (chan->unacked_frames > 0))
4001 __set_retrans_timer(chan);
4003 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4004 l2cap_send_srejtail(chan);
4006 l2cap_send_i_or_rr_or_rnr(chan);
4009 } else if (__is_ctrl_final(chan, rx_control)) {
4010 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4012 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4013 l2cap_retransmit_frames(chan);
4016 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4017 (chan->unacked_frames > 0))
4018 __set_retrans_timer(chan);
4020 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4021 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4022 l2cap_send_ack(chan);
4024 l2cap_ertm_send(chan);
4028 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4030 u16 tx_seq = __get_reqseq(chan, rx_control);
4032 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4034 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4036 chan->expected_ack_seq = tx_seq;
4037 l2cap_drop_acked_frames(chan);
4039 if (__is_ctrl_final(chan, rx_control)) {
4040 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4041 l2cap_retransmit_frames(chan);
4043 l2cap_retransmit_frames(chan);
4045 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4046 set_bit(CONN_REJ_ACT, &chan->conn_state);
4049 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4051 u16 tx_seq = __get_reqseq(chan, rx_control);
4053 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4055 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4057 if (__is_ctrl_poll(chan, rx_control)) {
4058 chan->expected_ack_seq = tx_seq;
4059 l2cap_drop_acked_frames(chan);
4061 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4062 l2cap_retransmit_one_frame(chan, tx_seq);
4064 l2cap_ertm_send(chan);
4066 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4067 chan->srej_save_reqseq = tx_seq;
4068 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4070 } else if (__is_ctrl_final(chan, rx_control)) {
4071 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4072 chan->srej_save_reqseq == tx_seq)
4073 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4075 l2cap_retransmit_one_frame(chan, tx_seq);
4077 l2cap_retransmit_one_frame(chan, tx_seq);
4078 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4079 chan->srej_save_reqseq = tx_seq;
4080 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4085 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4087 u16 tx_seq = __get_reqseq(chan, rx_control);
4089 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4091 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4092 chan->expected_ack_seq = tx_seq;
4093 l2cap_drop_acked_frames(chan);
4095 if (__is_ctrl_poll(chan, rx_control))
4096 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4098 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4099 __clear_retrans_timer(chan);
4100 if (__is_ctrl_poll(chan, rx_control))
4101 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4105 if (__is_ctrl_poll(chan, rx_control)) {
4106 l2cap_send_srejtail(chan);
4108 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4109 l2cap_send_sframe(chan, rx_control);
4113 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4115 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4117 if (__is_ctrl_final(chan, rx_control) &&
4118 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4119 __clear_monitor_timer(chan);
4120 if (chan->unacked_frames > 0)
4121 __set_retrans_timer(chan);
4122 clear_bit(CONN_WAIT_F, &chan->conn_state);
4125 switch (__get_ctrl_super(chan, rx_control)) {
4126 case L2CAP_SUPER_RR:
4127 l2cap_data_channel_rrframe(chan, rx_control);
4130 case L2CAP_SUPER_REJ:
4131 l2cap_data_channel_rejframe(chan, rx_control);
4134 case L2CAP_SUPER_SREJ:
4135 l2cap_data_channel_srejframe(chan, rx_control);
4138 case L2CAP_SUPER_RNR:
4139 l2cap_data_channel_rnrframe(chan, rx_control);
4147 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4151 int len, next_tx_seq_offset, req_seq_offset;
4153 control = __get_control(chan, skb->data);
4154 skb_pull(skb, __ctrl_size(chan));
4158 * We can just drop the corrupted I-frame here.
4159 * Receiver will miss it and start proper recovery
4160 * procedures and ask retransmission.
4162 if (l2cap_check_fcs(chan, skb))
4165 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4166 len -= L2CAP_SDULEN_SIZE;
4168 if (chan->fcs == L2CAP_FCS_CRC16)
4169 len -= L2CAP_FCS_SIZE;
4171 if (len > chan->mps) {
4172 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4176 req_seq = __get_reqseq(chan, control);
4178 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4180 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4181 chan->expected_ack_seq);
4183 /* check for invalid req-seq */
4184 if (req_seq_offset > next_tx_seq_offset) {
4185 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4189 if (!__is_sframe(chan, control)) {
4191 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4195 l2cap_data_channel_iframe(chan, control, skb);
4199 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4203 l2cap_data_channel_sframe(chan, control, skb);
4213 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4215 struct l2cap_chan *chan;
4216 struct sock *sk = NULL;
4221 chan = l2cap_get_chan_by_scid(conn, cid);
4223 BT_DBG("unknown cid 0x%4.4x", cid);
4229 BT_DBG("chan %p, len %d", chan, skb->len);
4231 if (chan->state != BT_CONNECTED)
4234 switch (chan->mode) {
4235 case L2CAP_MODE_BASIC:
4236 /* If socket recv buffers overflows we drop data here
4237 * which is *bad* because L2CAP has to be reliable.
4238 * But we don't have any other choice. L2CAP doesn't
4239 * provide flow control mechanism. */
4241 if (chan->imtu < skb->len)
4244 if (!chan->ops->recv(chan->data, skb))
4248 case L2CAP_MODE_ERTM:
4249 l2cap_ertm_data_rcv(chan, skb);
4253 case L2CAP_MODE_STREAMING:
4254 control = __get_control(chan, skb->data);
4255 skb_pull(skb, __ctrl_size(chan));
4258 if (l2cap_check_fcs(chan, skb))
4261 if (__is_sar_start(chan, control))
4262 len -= L2CAP_SDULEN_SIZE;
4264 if (chan->fcs == L2CAP_FCS_CRC16)
4265 len -= L2CAP_FCS_SIZE;
4267 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4270 tx_seq = __get_txseq(chan, control);
4272 if (chan->expected_tx_seq != tx_seq) {
4273 /* Frame(s) missing - must discard partial SDU */
4274 kfree_skb(chan->sdu);
4276 chan->sdu_last_frag = NULL;
4279 /* TODO: Notify userland of missing data */
4282 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4284 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4285 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4290 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4304 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4306 struct sock *sk = NULL;
4307 struct l2cap_chan *chan;
4309 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4317 BT_DBG("sk %p, len %d", sk, skb->len);
4319 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4322 if (chan->imtu < skb->len)
4325 if (!chan->ops->recv(chan->data, skb))
4337 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4339 struct sock *sk = NULL;
4340 struct l2cap_chan *chan;
4342 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4350 BT_DBG("sk %p, len %d", sk, skb->len);
4352 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4355 if (chan->imtu < skb->len)
4358 if (!chan->ops->recv(chan->data, skb))
4370 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4372 struct l2cap_hdr *lh = (void *) skb->data;
4376 skb_pull(skb, L2CAP_HDR_SIZE);
4377 cid = __le16_to_cpu(lh->cid);
4378 len = __le16_to_cpu(lh->len);
4380 if (len != skb->len) {
4385 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4388 case L2CAP_CID_LE_SIGNALING:
4389 case L2CAP_CID_SIGNALING:
4390 l2cap_sig_channel(conn, skb);
4393 case L2CAP_CID_CONN_LESS:
4394 psm = get_unaligned_le16(skb->data);
4396 l2cap_conless_channel(conn, psm, skb);
4399 case L2CAP_CID_LE_DATA:
4400 l2cap_att_channel(conn, cid, skb);
4404 if (smp_sig_channel(conn, skb))
4405 l2cap_conn_del(conn->hcon, EACCES);
4409 l2cap_data_channel(conn, cid, skb);
4414 /* ---- L2CAP interface with lower layer (HCI) ---- */
4416 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4418 int exact = 0, lm1 = 0, lm2 = 0;
4419 struct l2cap_chan *c;
4421 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4423 /* Find listening sockets and check their link_mode */
4424 read_lock(&chan_list_lock);
4425 list_for_each_entry(c, &chan_list, global_l) {
4426 struct sock *sk = c->sk;
4428 if (c->state != BT_LISTEN)
4431 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4432 lm1 |= HCI_LM_ACCEPT;
4433 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4434 lm1 |= HCI_LM_MASTER;
4436 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4437 lm2 |= HCI_LM_ACCEPT;
4438 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4439 lm2 |= HCI_LM_MASTER;
4442 read_unlock(&chan_list_lock);
4444 return exact ? lm1 : lm2;
4447 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4449 struct l2cap_conn *conn;
4451 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4454 conn = l2cap_conn_add(hcon, status);
4456 l2cap_conn_ready(conn);
4458 l2cap_conn_del(hcon, bt_to_errno(status));
4463 int l2cap_disconn_ind(struct hci_conn *hcon)
4465 struct l2cap_conn *conn = hcon->l2cap_data;
4467 BT_DBG("hcon %p", hcon);
4470 return HCI_ERROR_REMOTE_USER_TERM;
4471 return conn->disc_reason;
4474 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4476 BT_DBG("hcon %p reason %d", hcon, reason);
4478 l2cap_conn_del(hcon, bt_to_errno(reason));
4482 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4484 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4487 if (encrypt == 0x00) {
4488 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4489 __clear_chan_timer(chan);
4490 __set_chan_timer(chan,
4491 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4492 } else if (chan->sec_level == BT_SECURITY_HIGH)
4493 l2cap_chan_close(chan, ECONNREFUSED);
4495 if (chan->sec_level == BT_SECURITY_MEDIUM)
4496 __clear_chan_timer(chan);
4500 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4502 struct l2cap_conn *conn = hcon->l2cap_data;
4503 struct l2cap_chan *chan;
4508 BT_DBG("conn %p", conn);
4510 if (hcon->type == LE_LINK) {
4511 smp_distribute_keys(conn, 0);
4512 cancel_delayed_work(&conn->security_timer);
4517 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
4518 struct sock *sk = chan->sk;
4522 BT_DBG("chan->scid %d", chan->scid);
4524 if (chan->scid == L2CAP_CID_LE_DATA) {
4525 if (!status && encrypt) {
4526 chan->sec_level = hcon->sec_level;
4527 l2cap_chan_ready(sk);
4534 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4539 if (!status && (chan->state == BT_CONNECTED ||
4540 chan->state == BT_CONFIG)) {
4541 l2cap_check_encryption(chan, encrypt);
4546 if (chan->state == BT_CONNECT) {
4548 struct l2cap_conn_req req;
4549 req.scid = cpu_to_le16(chan->scid);
4550 req.psm = chan->psm;
4552 chan->ident = l2cap_get_ident(conn);
4553 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4555 l2cap_send_cmd(conn, chan->ident,
4556 L2CAP_CONN_REQ, sizeof(req), &req);
4558 __clear_chan_timer(chan);
4559 __set_chan_timer(chan,
4560 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4562 } else if (chan->state == BT_CONNECT2) {
4563 struct l2cap_conn_rsp rsp;
4567 if (bt_sk(sk)->defer_setup) {
4568 struct sock *parent = bt_sk(sk)->parent;
4569 res = L2CAP_CR_PEND;
4570 stat = L2CAP_CS_AUTHOR_PEND;
4572 parent->sk_data_ready(parent, 0);
4574 l2cap_state_change(chan, BT_CONFIG);
4575 res = L2CAP_CR_SUCCESS;
4576 stat = L2CAP_CS_NO_INFO;
4579 l2cap_state_change(chan, BT_DISCONN);
4580 __set_chan_timer(chan,
4581 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4582 res = L2CAP_CR_SEC_BLOCK;
4583 stat = L2CAP_CS_NO_INFO;
4586 rsp.scid = cpu_to_le16(chan->dcid);
4587 rsp.dcid = cpu_to_le16(chan->scid);
4588 rsp.result = cpu_to_le16(res);
4589 rsp.status = cpu_to_le16(stat);
4590 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4602 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4604 struct l2cap_conn *conn = hcon->l2cap_data;
4607 conn = l2cap_conn_add(hcon, 0);
4612 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4614 if (!(flags & ACL_CONT)) {
4615 struct l2cap_hdr *hdr;
4616 struct l2cap_chan *chan;
4621 BT_ERR("Unexpected start frame (len %d)", skb->len);
4622 kfree_skb(conn->rx_skb);
4623 conn->rx_skb = NULL;
4625 l2cap_conn_unreliable(conn, ECOMM);
4628 /* Start fragment always begin with Basic L2CAP header */
4629 if (skb->len < L2CAP_HDR_SIZE) {
4630 BT_ERR("Frame is too short (len %d)", skb->len);
4631 l2cap_conn_unreliable(conn, ECOMM);
4635 hdr = (struct l2cap_hdr *) skb->data;
4636 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4637 cid = __le16_to_cpu(hdr->cid);
4639 if (len == skb->len) {
4640 /* Complete frame received */
4641 l2cap_recv_frame(conn, skb);
4645 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4647 if (skb->len > len) {
4648 BT_ERR("Frame is too long (len %d, expected len %d)",
4650 l2cap_conn_unreliable(conn, ECOMM);
4654 chan = l2cap_get_chan_by_scid(conn, cid);
4656 if (chan && chan->sk) {
4657 struct sock *sk = chan->sk;
4659 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4660 BT_ERR("Frame exceeding recv MTU (len %d, "
4664 l2cap_conn_unreliable(conn, ECOMM);
4670 /* Allocate skb for the complete frame (with header) */
4671 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4675 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4677 conn->rx_len = len - skb->len;
4679 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4681 if (!conn->rx_len) {
4682 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4683 l2cap_conn_unreliable(conn, ECOMM);
4687 if (skb->len > conn->rx_len) {
4688 BT_ERR("Fragment is too long (len %d, expected %d)",
4689 skb->len, conn->rx_len);
4690 kfree_skb(conn->rx_skb);
4691 conn->rx_skb = NULL;
4693 l2cap_conn_unreliable(conn, ECOMM);
4697 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4699 conn->rx_len -= skb->len;
4701 if (!conn->rx_len) {
4702 /* Complete frame received */
4703 l2cap_recv_frame(conn, conn->rx_skb);
4704 conn->rx_skb = NULL;
4713 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4715 struct l2cap_chan *c;
4717 read_lock(&chan_list_lock);
4719 list_for_each_entry(c, &chan_list, global_l) {
4720 struct sock *sk = c->sk;
4722 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4723 batostr(&bt_sk(sk)->src),
4724 batostr(&bt_sk(sk)->dst),
4725 c->state, __le16_to_cpu(c->psm),
4726 c->scid, c->dcid, c->imtu, c->omtu,
4727 c->sec_level, c->mode);
4730 read_unlock(&chan_list_lock);
4735 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4737 return single_open(file, l2cap_debugfs_show, inode->i_private);
4740 static const struct file_operations l2cap_debugfs_fops = {
4741 .open = l2cap_debugfs_open,
4743 .llseek = seq_lseek,
4744 .release = single_release,
4747 static struct dentry *l2cap_debugfs;
4749 int __init l2cap_init(void)
4753 err = l2cap_init_sockets();
4758 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4759 bt_debugfs, NULL, &l2cap_debugfs_fops);
4761 BT_ERR("Failed to create L2CAP debug file");
4767 void l2cap_exit(void)
4769 debugfs_remove(l2cap_debugfs);
4770 l2cap_cleanup_sockets();
4773 module_param(disable_ertm, bool, 0644);
4774 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");