2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
78 /* ---- L2CAP channels ---- */
80 static inline void chan_hold(struct l2cap_chan *c)
82 atomic_inc(&c->refcnt);
85 static inline void chan_put(struct l2cap_chan *c)
87 if (atomic_dec_and_test(&c->refcnt))
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
93 struct l2cap_chan *c, *r = NULL;
97 list_for_each_entry_rcu(c, &conn->chan_l, list) {
108 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
110 struct l2cap_chan *c, *r = NULL;
114 list_for_each_entry_rcu(c, &conn->chan_l, list) {
115 if (c->scid == cid) {
125 /* Find channel with given SCID.
126 * Returns locked socket */
127 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
129 struct l2cap_chan *c;
131 c = __l2cap_get_chan_by_scid(conn, cid);
137 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
139 struct l2cap_chan *c, *r = NULL;
143 list_for_each_entry_rcu(c, &conn->chan_l, list) {
144 if (c->ident == ident) {
154 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
156 struct l2cap_chan *c;
158 c = __l2cap_get_chan_by_ident(conn, ident);
164 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
166 struct l2cap_chan *c;
168 list_for_each_entry(c, &chan_list, global_l) {
169 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
175 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
179 write_lock_bh(&chan_list_lock);
181 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
194 for (p = 0x1001; p < 0x1100; p += 2)
195 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
196 chan->psm = cpu_to_le16(p);
197 chan->sport = cpu_to_le16(p);
204 write_unlock_bh(&chan_list_lock);
208 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
210 write_lock_bh(&chan_list_lock);
214 write_unlock_bh(&chan_list_lock);
219 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
221 u16 cid = L2CAP_CID_DYN_START;
223 for (; cid < L2CAP_CID_DYN_END; cid++) {
224 if (!__l2cap_get_chan_by_scid(conn, cid))
231 static void l2cap_set_timer(struct l2cap_chan *chan, struct delayed_work *work, long timeout)
233 BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
235 cancel_delayed_work_sync(work);
237 schedule_delayed_work(work, timeout);
240 static void l2cap_clear_timer(struct delayed_work *work)
242 cancel_delayed_work_sync(work);
245 static char *state_to_string(int state)
249 return "BT_CONNECTED";
259 return "BT_CONNECT2";
268 return "invalid state";
271 static void l2cap_state_change(struct l2cap_chan *chan, int state)
273 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
274 state_to_string(state));
277 chan->ops->state_change(chan->data, state);
280 static void l2cap_chan_timeout(struct work_struct *work)
282 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
284 struct sock *sk = chan->sk;
287 BT_DBG("chan %p state %d", chan, chan->state);
291 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
292 reason = ECONNREFUSED;
293 else if (chan->state == BT_CONNECT &&
294 chan->sec_level != BT_SECURITY_SDP)
295 reason = ECONNREFUSED;
299 l2cap_chan_close(chan, reason);
303 chan->ops->close(chan->data);
307 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
309 struct l2cap_chan *chan;
311 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
317 write_lock_bh(&chan_list_lock);
318 list_add(&chan->global_l, &chan_list);
319 write_unlock_bh(&chan_list_lock);
321 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
323 chan->state = BT_OPEN;
325 atomic_set(&chan->refcnt, 1);
327 BT_DBG("sk %p chan %p", sk, chan);
332 void l2cap_chan_destroy(struct l2cap_chan *chan)
334 write_lock_bh(&chan_list_lock);
335 list_del(&chan->global_l);
336 write_unlock_bh(&chan_list_lock);
341 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
343 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
344 chan->psm, chan->dcid);
346 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
350 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
351 if (conn->hcon->type == LE_LINK) {
353 chan->omtu = L2CAP_LE_DEFAULT_MTU;
354 chan->scid = L2CAP_CID_LE_DATA;
355 chan->dcid = L2CAP_CID_LE_DATA;
357 /* Alloc CID for connection-oriented socket */
358 chan->scid = l2cap_alloc_cid(conn);
359 chan->omtu = L2CAP_DEFAULT_MTU;
361 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
362 /* Connectionless socket */
363 chan->scid = L2CAP_CID_CONN_LESS;
364 chan->dcid = L2CAP_CID_CONN_LESS;
365 chan->omtu = L2CAP_DEFAULT_MTU;
367 /* Raw socket can send/recv signalling messages only */
368 chan->scid = L2CAP_CID_SIGNALING;
369 chan->dcid = L2CAP_CID_SIGNALING;
370 chan->omtu = L2CAP_DEFAULT_MTU;
373 chan->local_id = L2CAP_BESTEFFORT_ID;
374 chan->local_stype = L2CAP_SERV_BESTEFFORT;
375 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
376 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
377 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
378 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
382 list_add_rcu(&chan->list, &conn->chan_l);
386 * Must be called on the locked socket. */
387 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
389 struct sock *sk = chan->sk;
390 struct l2cap_conn *conn = chan->conn;
391 struct sock *parent = bt_sk(sk)->parent;
393 __clear_chan_timer(chan);
395 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
398 /* Delete from channel list */
399 list_del_rcu(&chan->list);
405 hci_conn_put(conn->hcon);
408 l2cap_state_change(chan, BT_CLOSED);
409 sock_set_flag(sk, SOCK_ZAPPED);
415 bt_accept_unlink(sk);
416 parent->sk_data_ready(parent, 0);
418 sk->sk_state_change(sk);
420 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
421 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
424 skb_queue_purge(&chan->tx_q);
426 if (chan->mode == L2CAP_MODE_ERTM) {
427 struct srej_list *l, *tmp;
429 __clear_retrans_timer(chan);
430 __clear_monitor_timer(chan);
431 __clear_ack_timer(chan);
433 skb_queue_purge(&chan->srej_q);
435 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
442 static void l2cap_chan_cleanup_listen(struct sock *parent)
446 BT_DBG("parent %p", parent);
448 /* Close not yet accepted channels */
449 while ((sk = bt_accept_dequeue(parent, NULL))) {
450 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
451 __clear_chan_timer(chan);
453 l2cap_chan_close(chan, ECONNRESET);
455 chan->ops->close(chan->data);
459 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
461 struct l2cap_conn *conn = chan->conn;
462 struct sock *sk = chan->sk;
464 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
466 switch (chan->state) {
468 l2cap_chan_cleanup_listen(sk);
470 l2cap_state_change(chan, BT_CLOSED);
471 sock_set_flag(sk, SOCK_ZAPPED);
476 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
477 conn->hcon->type == ACL_LINK) {
478 __clear_chan_timer(chan);
479 __set_chan_timer(chan, sk->sk_sndtimeo);
480 l2cap_send_disconn_req(conn, chan, reason);
482 l2cap_chan_del(chan, reason);
486 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
487 conn->hcon->type == ACL_LINK) {
488 struct l2cap_conn_rsp rsp;
491 if (bt_sk(sk)->defer_setup)
492 result = L2CAP_CR_SEC_BLOCK;
494 result = L2CAP_CR_BAD_PSM;
495 l2cap_state_change(chan, BT_DISCONN);
497 rsp.scid = cpu_to_le16(chan->dcid);
498 rsp.dcid = cpu_to_le16(chan->scid);
499 rsp.result = cpu_to_le16(result);
500 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
501 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
505 l2cap_chan_del(chan, reason);
510 l2cap_chan_del(chan, reason);
514 sock_set_flag(sk, SOCK_ZAPPED);
519 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
521 if (chan->chan_type == L2CAP_CHAN_RAW) {
522 switch (chan->sec_level) {
523 case BT_SECURITY_HIGH:
524 return HCI_AT_DEDICATED_BONDING_MITM;
525 case BT_SECURITY_MEDIUM:
526 return HCI_AT_DEDICATED_BONDING;
528 return HCI_AT_NO_BONDING;
530 } else if (chan->psm == cpu_to_le16(0x0001)) {
531 if (chan->sec_level == BT_SECURITY_LOW)
532 chan->sec_level = BT_SECURITY_SDP;
534 if (chan->sec_level == BT_SECURITY_HIGH)
535 return HCI_AT_NO_BONDING_MITM;
537 return HCI_AT_NO_BONDING;
539 switch (chan->sec_level) {
540 case BT_SECURITY_HIGH:
541 return HCI_AT_GENERAL_BONDING_MITM;
542 case BT_SECURITY_MEDIUM:
543 return HCI_AT_GENERAL_BONDING;
545 return HCI_AT_NO_BONDING;
550 /* Service level security */
551 int l2cap_chan_check_security(struct l2cap_chan *chan)
553 struct l2cap_conn *conn = chan->conn;
556 auth_type = l2cap_get_auth_type(chan);
558 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
561 static u8 l2cap_get_ident(struct l2cap_conn *conn)
565 /* Get next available identificator.
566 * 1 - 128 are used by kernel.
567 * 129 - 199 are reserved.
568 * 200 - 254 are used by utilities like l2ping, etc.
571 spin_lock_bh(&conn->lock);
573 if (++conn->tx_ident > 128)
578 spin_unlock_bh(&conn->lock);
583 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
585 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
588 BT_DBG("code 0x%2.2x", code);
593 if (lmp_no_flush_capable(conn->hcon->hdev))
594 flags = ACL_START_NO_FLUSH;
598 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
599 skb->priority = HCI_PRIO_MAX;
601 hci_send_acl(conn->hchan, skb, flags);
604 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
606 struct hci_conn *hcon = chan->conn->hcon;
609 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
612 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
613 lmp_no_flush_capable(hcon->hdev))
614 flags = ACL_START_NO_FLUSH;
618 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
619 hci_send_acl(chan->conn->hchan, skb, flags);
622 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
625 struct l2cap_hdr *lh;
626 struct l2cap_conn *conn = chan->conn;
629 if (chan->state != BT_CONNECTED)
632 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
633 hlen = L2CAP_EXT_HDR_SIZE;
635 hlen = L2CAP_ENH_HDR_SIZE;
637 if (chan->fcs == L2CAP_FCS_CRC16)
638 hlen += L2CAP_FCS_SIZE;
640 BT_DBG("chan %p, control 0x%8.8x", chan, control);
642 count = min_t(unsigned int, conn->mtu, hlen);
644 control |= __set_sframe(chan);
646 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
647 control |= __set_ctrl_final(chan);
649 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
650 control |= __set_ctrl_poll(chan);
652 skb = bt_skb_alloc(count, GFP_ATOMIC);
656 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
657 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
658 lh->cid = cpu_to_le16(chan->dcid);
660 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
662 if (chan->fcs == L2CAP_FCS_CRC16) {
663 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
664 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
667 skb->priority = HCI_PRIO_MAX;
668 l2cap_do_send(chan, skb);
671 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
673 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
674 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
675 set_bit(CONN_RNR_SENT, &chan->conn_state);
677 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
679 control |= __set_reqseq(chan, chan->buffer_seq);
681 l2cap_send_sframe(chan, control);
684 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
686 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
689 static void l2cap_do_start(struct l2cap_chan *chan)
691 struct l2cap_conn *conn = chan->conn;
693 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
694 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
697 if (l2cap_chan_check_security(chan) &&
698 __l2cap_no_conn_pending(chan)) {
699 struct l2cap_conn_req req;
700 req.scid = cpu_to_le16(chan->scid);
703 chan->ident = l2cap_get_ident(conn);
704 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
706 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
710 struct l2cap_info_req req;
711 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
713 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
714 conn->info_ident = l2cap_get_ident(conn);
716 schedule_delayed_work(&conn->info_work,
717 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
719 l2cap_send_cmd(conn, conn->info_ident,
720 L2CAP_INFO_REQ, sizeof(req), &req);
724 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
726 u32 local_feat_mask = l2cap_feat_mask;
728 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
731 case L2CAP_MODE_ERTM:
732 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
733 case L2CAP_MODE_STREAMING:
734 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
740 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
743 struct l2cap_disconn_req req;
750 if (chan->mode == L2CAP_MODE_ERTM) {
751 __clear_retrans_timer(chan);
752 __clear_monitor_timer(chan);
753 __clear_ack_timer(chan);
756 req.dcid = cpu_to_le16(chan->dcid);
757 req.scid = cpu_to_le16(chan->scid);
758 l2cap_send_cmd(conn, l2cap_get_ident(conn),
759 L2CAP_DISCONN_REQ, sizeof(req), &req);
761 l2cap_state_change(chan, BT_DISCONN);
765 /* ---- L2CAP connections ---- */
766 static void l2cap_conn_start(struct l2cap_conn *conn)
768 struct l2cap_chan *chan;
770 BT_DBG("conn %p", conn);
774 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
775 struct sock *sk = chan->sk;
779 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
784 if (chan->state == BT_CONNECT) {
785 struct l2cap_conn_req req;
787 if (!l2cap_chan_check_security(chan) ||
788 !__l2cap_no_conn_pending(chan)) {
793 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
794 && test_bit(CONF_STATE2_DEVICE,
795 &chan->conf_state)) {
796 /* l2cap_chan_close() calls list_del(chan)
797 * so release the lock */
798 l2cap_chan_close(chan, ECONNRESET);
803 req.scid = cpu_to_le16(chan->scid);
806 chan->ident = l2cap_get_ident(conn);
807 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
809 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
812 } else if (chan->state == BT_CONNECT2) {
813 struct l2cap_conn_rsp rsp;
815 rsp.scid = cpu_to_le16(chan->dcid);
816 rsp.dcid = cpu_to_le16(chan->scid);
818 if (l2cap_chan_check_security(chan)) {
819 if (bt_sk(sk)->defer_setup) {
820 struct sock *parent = bt_sk(sk)->parent;
821 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
822 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
824 parent->sk_data_ready(parent, 0);
827 l2cap_state_change(chan, BT_CONFIG);
828 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
829 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
832 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
833 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
836 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
839 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
840 rsp.result != L2CAP_CR_SUCCESS) {
845 set_bit(CONF_REQ_SENT, &chan->conf_state);
846 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
847 l2cap_build_conf_req(chan, buf), buf);
848 chan->num_conf_req++;
857 /* Find socket with cid and source bdaddr.
858 * Returns closest match, locked.
860 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
862 struct l2cap_chan *c, *c1 = NULL;
864 read_lock(&chan_list_lock);
866 list_for_each_entry(c, &chan_list, global_l) {
867 struct sock *sk = c->sk;
869 if (state && c->state != state)
872 if (c->scid == cid) {
874 if (!bacmp(&bt_sk(sk)->src, src)) {
875 read_unlock(&chan_list_lock);
880 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
885 read_unlock(&chan_list_lock);
890 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
892 struct sock *parent, *sk;
893 struct l2cap_chan *chan, *pchan;
897 /* Check if we have socket listening on cid */
898 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
907 /* Check for backlog size */
908 if (sk_acceptq_is_full(parent)) {
909 BT_DBG("backlog full %d", parent->sk_ack_backlog);
913 chan = pchan->ops->new_connection(pchan->data);
919 hci_conn_hold(conn->hcon);
921 bacpy(&bt_sk(sk)->src, conn->src);
922 bacpy(&bt_sk(sk)->dst, conn->dst);
924 bt_accept_enqueue(parent, sk);
926 l2cap_chan_add(conn, chan);
928 __set_chan_timer(chan, sk->sk_sndtimeo);
930 l2cap_state_change(chan, BT_CONNECTED);
931 parent->sk_data_ready(parent, 0);
934 release_sock(parent);
937 static void l2cap_chan_ready(struct sock *sk)
939 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
940 struct sock *parent = bt_sk(sk)->parent;
942 BT_DBG("sk %p, parent %p", sk, parent);
944 chan->conf_state = 0;
945 __clear_chan_timer(chan);
947 l2cap_state_change(chan, BT_CONNECTED);
948 sk->sk_state_change(sk);
951 parent->sk_data_ready(parent, 0);
954 static void l2cap_conn_ready(struct l2cap_conn *conn)
956 struct l2cap_chan *chan;
958 BT_DBG("conn %p", conn);
960 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
961 l2cap_le_conn_ready(conn);
963 if (conn->hcon->out && conn->hcon->type == LE_LINK)
964 smp_conn_security(conn, conn->hcon->pending_sec_level);
968 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
969 struct sock *sk = chan->sk;
973 if (conn->hcon->type == LE_LINK) {
974 if (smp_conn_security(conn, chan->sec_level))
975 l2cap_chan_ready(sk);
977 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
978 __clear_chan_timer(chan);
979 l2cap_state_change(chan, BT_CONNECTED);
980 sk->sk_state_change(sk);
982 } else if (chan->state == BT_CONNECT)
983 l2cap_do_start(chan);
991 /* Notify sockets that we cannot guaranty reliability anymore */
992 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
994 struct l2cap_chan *chan;
996 BT_DBG("conn %p", conn);
1000 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1001 struct sock *sk = chan->sk;
1003 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
1010 static void l2cap_info_timeout(struct work_struct *work)
1012 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1015 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1016 conn->info_ident = 0;
1018 l2cap_conn_start(conn);
1021 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1023 struct l2cap_conn *conn = hcon->l2cap_data;
1024 struct l2cap_chan *chan, *l;
1030 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1032 kfree_skb(conn->rx_skb);
1035 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1038 l2cap_chan_del(chan, err);
1040 chan->ops->close(chan->data);
1043 hci_chan_del(conn->hchan);
1045 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1046 cancel_delayed_work_sync(&conn->info_work);
1048 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1049 del_timer(&conn->security_timer);
1050 smp_chan_destroy(conn);
1053 hcon->l2cap_data = NULL;
1057 static void security_timeout(unsigned long arg)
1059 struct l2cap_conn *conn = (void *) arg;
1061 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1064 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1066 struct l2cap_conn *conn = hcon->l2cap_data;
1067 struct hci_chan *hchan;
1072 hchan = hci_chan_create(hcon);
1076 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1078 hci_chan_del(hchan);
1082 hcon->l2cap_data = conn;
1084 conn->hchan = hchan;
1086 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1088 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1089 conn->mtu = hcon->hdev->le_mtu;
1091 conn->mtu = hcon->hdev->acl_mtu;
1093 conn->src = &hcon->hdev->bdaddr;
1094 conn->dst = &hcon->dst;
1096 conn->feat_mask = 0;
1098 spin_lock_init(&conn->lock);
1100 INIT_LIST_HEAD(&conn->chan_l);
1102 if (hcon->type == LE_LINK)
1103 setup_timer(&conn->security_timer, security_timeout,
1104 (unsigned long) conn);
1106 INIT_DELAYED_WORK(&conn->info_work, l2cap_info_timeout);
1108 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1113 /* ---- Socket interface ---- */
1115 /* Find socket with psm and source bdaddr.
1116 * Returns closest match.
1118 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1120 struct l2cap_chan *c, *c1 = NULL;
1122 read_lock(&chan_list_lock);
1124 list_for_each_entry(c, &chan_list, global_l) {
1125 struct sock *sk = c->sk;
1127 if (state && c->state != state)
1130 if (c->psm == psm) {
1132 if (!bacmp(&bt_sk(sk)->src, src)) {
1133 read_unlock(&chan_list_lock);
1138 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1143 read_unlock(&chan_list_lock);
1148 inline int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1150 struct sock *sk = chan->sk;
1151 bdaddr_t *src = &bt_sk(sk)->src;
1152 struct l2cap_conn *conn;
1153 struct hci_conn *hcon;
1154 struct hci_dev *hdev;
1158 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1161 hdev = hci_get_route(dst, src);
1163 return -EHOSTUNREACH;
1169 /* PSM must be odd and lsb of upper byte must be 0 */
1170 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1171 chan->chan_type != L2CAP_CHAN_RAW) {
1176 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1181 switch (chan->mode) {
1182 case L2CAP_MODE_BASIC:
1184 case L2CAP_MODE_ERTM:
1185 case L2CAP_MODE_STREAMING:
1194 switch (sk->sk_state) {
1198 /* Already connecting */
1203 /* Already connected */
1217 /* Set destination address and psm */
1218 bacpy(&bt_sk(sk)->dst, src);
1222 auth_type = l2cap_get_auth_type(chan);
1224 if (chan->dcid == L2CAP_CID_LE_DATA)
1225 hcon = hci_connect(hdev, LE_LINK, dst,
1226 chan->sec_level, auth_type);
1228 hcon = hci_connect(hdev, ACL_LINK, dst,
1229 chan->sec_level, auth_type);
1232 err = PTR_ERR(hcon);
1236 conn = l2cap_conn_add(hcon, 0);
1243 /* Update source addr of the socket */
1244 bacpy(src, conn->src);
1246 l2cap_chan_add(conn, chan);
1248 l2cap_state_change(chan, BT_CONNECT);
1249 __set_chan_timer(chan, sk->sk_sndtimeo);
1251 if (hcon->state == BT_CONNECTED) {
1252 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1253 __clear_chan_timer(chan);
1254 if (l2cap_chan_check_security(chan))
1255 l2cap_state_change(chan, BT_CONNECTED);
1257 l2cap_do_start(chan);
1263 hci_dev_unlock(hdev);
1268 int __l2cap_wait_ack(struct sock *sk)
1270 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1271 DECLARE_WAITQUEUE(wait, current);
1275 add_wait_queue(sk_sleep(sk), &wait);
1276 set_current_state(TASK_INTERRUPTIBLE);
1277 while (chan->unacked_frames > 0 && chan->conn) {
1281 if (signal_pending(current)) {
1282 err = sock_intr_errno(timeo);
1287 timeo = schedule_timeout(timeo);
1289 set_current_state(TASK_INTERRUPTIBLE);
1291 err = sock_error(sk);
1295 set_current_state(TASK_RUNNING);
1296 remove_wait_queue(sk_sleep(sk), &wait);
1300 static void l2cap_monitor_timeout(struct work_struct *work)
1302 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1303 monitor_timer.work);
1304 struct sock *sk = chan->sk;
1306 BT_DBG("chan %p", chan);
1309 if (chan->retry_count >= chan->remote_max_tx) {
1310 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1315 chan->retry_count++;
1316 __set_monitor_timer(chan);
1318 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1322 static void l2cap_retrans_timeout(struct work_struct *work)
1324 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1325 retrans_timer.work);
1326 struct sock *sk = chan->sk;
1328 BT_DBG("chan %p", chan);
1331 chan->retry_count = 1;
1332 __set_monitor_timer(chan);
1334 set_bit(CONN_WAIT_F, &chan->conn_state);
1336 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1340 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1342 struct sk_buff *skb;
1344 while ((skb = skb_peek(&chan->tx_q)) &&
1345 chan->unacked_frames) {
1346 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1349 skb = skb_dequeue(&chan->tx_q);
1352 chan->unacked_frames--;
1355 if (!chan->unacked_frames)
1356 __clear_retrans_timer(chan);
1359 static void l2cap_streaming_send(struct l2cap_chan *chan)
1361 struct sk_buff *skb;
1365 while ((skb = skb_dequeue(&chan->tx_q))) {
1366 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1367 control |= __set_txseq(chan, chan->next_tx_seq);
1368 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1370 if (chan->fcs == L2CAP_FCS_CRC16) {
1371 fcs = crc16(0, (u8 *)skb->data,
1372 skb->len - L2CAP_FCS_SIZE);
1373 put_unaligned_le16(fcs,
1374 skb->data + skb->len - L2CAP_FCS_SIZE);
1377 l2cap_do_send(chan, skb);
1379 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1383 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1385 struct sk_buff *skb, *tx_skb;
1389 skb = skb_peek(&chan->tx_q);
1393 while (bt_cb(skb)->tx_seq != tx_seq) {
1394 if (skb_queue_is_last(&chan->tx_q, skb))
1397 skb = skb_queue_next(&chan->tx_q, skb);
1400 if (chan->remote_max_tx &&
1401 bt_cb(skb)->retries == chan->remote_max_tx) {
1402 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1406 tx_skb = skb_clone(skb, GFP_ATOMIC);
1407 bt_cb(skb)->retries++;
1409 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1410 control &= __get_sar_mask(chan);
1412 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1413 control |= __set_ctrl_final(chan);
1415 control |= __set_reqseq(chan, chan->buffer_seq);
1416 control |= __set_txseq(chan, tx_seq);
1418 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1420 if (chan->fcs == L2CAP_FCS_CRC16) {
1421 fcs = crc16(0, (u8 *)tx_skb->data,
1422 tx_skb->len - L2CAP_FCS_SIZE);
1423 put_unaligned_le16(fcs,
1424 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1427 l2cap_do_send(chan, tx_skb);
1430 static int l2cap_ertm_send(struct l2cap_chan *chan)
1432 struct sk_buff *skb, *tx_skb;
1437 if (chan->state != BT_CONNECTED)
1440 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1442 if (chan->remote_max_tx &&
1443 bt_cb(skb)->retries == chan->remote_max_tx) {
1444 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1448 tx_skb = skb_clone(skb, GFP_ATOMIC);
1450 bt_cb(skb)->retries++;
1452 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1453 control &= __get_sar_mask(chan);
1455 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1456 control |= __set_ctrl_final(chan);
1458 control |= __set_reqseq(chan, chan->buffer_seq);
1459 control |= __set_txseq(chan, chan->next_tx_seq);
1461 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1463 if (chan->fcs == L2CAP_FCS_CRC16) {
1464 fcs = crc16(0, (u8 *)skb->data,
1465 tx_skb->len - L2CAP_FCS_SIZE);
1466 put_unaligned_le16(fcs, skb->data +
1467 tx_skb->len - L2CAP_FCS_SIZE);
1470 l2cap_do_send(chan, tx_skb);
1472 __set_retrans_timer(chan);
1474 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1476 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1478 if (bt_cb(skb)->retries == 1)
1479 chan->unacked_frames++;
1481 chan->frames_sent++;
1483 if (skb_queue_is_last(&chan->tx_q, skb))
1484 chan->tx_send_head = NULL;
1486 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1494 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1498 if (!skb_queue_empty(&chan->tx_q))
1499 chan->tx_send_head = chan->tx_q.next;
1501 chan->next_tx_seq = chan->expected_ack_seq;
1502 ret = l2cap_ertm_send(chan);
1506 static void l2cap_send_ack(struct l2cap_chan *chan)
1510 control |= __set_reqseq(chan, chan->buffer_seq);
1512 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1513 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1514 set_bit(CONN_RNR_SENT, &chan->conn_state);
1515 l2cap_send_sframe(chan, control);
1519 if (l2cap_ertm_send(chan) > 0)
1522 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1523 l2cap_send_sframe(chan, control);
1526 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1528 struct srej_list *tail;
1531 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1532 control |= __set_ctrl_final(chan);
1534 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1535 control |= __set_reqseq(chan, tail->tx_seq);
1537 l2cap_send_sframe(chan, control);
1540 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1542 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1543 struct sk_buff **frag;
1546 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1552 /* Continuation fragments (no L2CAP header) */
1553 frag = &skb_shinfo(skb)->frag_list;
1555 count = min_t(unsigned int, conn->mtu, len);
1557 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1560 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1563 (*frag)->priority = skb->priority;
1568 frag = &(*frag)->next;
1574 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1575 struct msghdr *msg, size_t len,
1578 struct sock *sk = chan->sk;
1579 struct l2cap_conn *conn = chan->conn;
1580 struct sk_buff *skb;
1581 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1582 struct l2cap_hdr *lh;
1584 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1586 count = min_t(unsigned int, (conn->mtu - hlen), len);
1587 skb = bt_skb_send_alloc(sk, count + hlen,
1588 msg->msg_flags & MSG_DONTWAIT, &err);
1590 return ERR_PTR(err);
1592 skb->priority = priority;
1594 /* Create L2CAP header */
1595 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1596 lh->cid = cpu_to_le16(chan->dcid);
1597 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1598 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1600 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1601 if (unlikely(err < 0)) {
1603 return ERR_PTR(err);
1608 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1609 struct msghdr *msg, size_t len,
1612 struct sock *sk = chan->sk;
1613 struct l2cap_conn *conn = chan->conn;
1614 struct sk_buff *skb;
1615 int err, count, hlen = L2CAP_HDR_SIZE;
1616 struct l2cap_hdr *lh;
1618 BT_DBG("sk %p len %d", sk, (int)len);
1620 count = min_t(unsigned int, (conn->mtu - hlen), len);
1621 skb = bt_skb_send_alloc(sk, count + hlen,
1622 msg->msg_flags & MSG_DONTWAIT, &err);
1624 return ERR_PTR(err);
1626 skb->priority = priority;
1628 /* Create L2CAP header */
1629 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1630 lh->cid = cpu_to_le16(chan->dcid);
1631 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1633 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1634 if (unlikely(err < 0)) {
1636 return ERR_PTR(err);
1641 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1642 struct msghdr *msg, size_t len,
1643 u32 control, u16 sdulen)
1645 struct sock *sk = chan->sk;
1646 struct l2cap_conn *conn = chan->conn;
1647 struct sk_buff *skb;
1648 int err, count, hlen;
1649 struct l2cap_hdr *lh;
1651 BT_DBG("sk %p len %d", sk, (int)len);
1654 return ERR_PTR(-ENOTCONN);
1656 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1657 hlen = L2CAP_EXT_HDR_SIZE;
1659 hlen = L2CAP_ENH_HDR_SIZE;
1662 hlen += L2CAP_SDULEN_SIZE;
1664 if (chan->fcs == L2CAP_FCS_CRC16)
1665 hlen += L2CAP_FCS_SIZE;
1667 count = min_t(unsigned int, (conn->mtu - hlen), len);
1668 skb = bt_skb_send_alloc(sk, count + hlen,
1669 msg->msg_flags & MSG_DONTWAIT, &err);
1671 return ERR_PTR(err);
1673 /* Create L2CAP header */
1674 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1675 lh->cid = cpu_to_le16(chan->dcid);
1676 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1678 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1681 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1683 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1684 if (unlikely(err < 0)) {
1686 return ERR_PTR(err);
1689 if (chan->fcs == L2CAP_FCS_CRC16)
1690 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1692 bt_cb(skb)->retries = 0;
1696 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1698 struct sk_buff *skb;
1699 struct sk_buff_head sar_queue;
1703 skb_queue_head_init(&sar_queue);
1704 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1705 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1707 return PTR_ERR(skb);
1709 __skb_queue_tail(&sar_queue, skb);
1710 len -= chan->remote_mps;
1711 size += chan->remote_mps;
1716 if (len > chan->remote_mps) {
1717 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1718 buflen = chan->remote_mps;
1720 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1724 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1726 skb_queue_purge(&sar_queue);
1727 return PTR_ERR(skb);
1730 __skb_queue_tail(&sar_queue, skb);
1734 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1735 if (chan->tx_send_head == NULL)
1736 chan->tx_send_head = sar_queue.next;
1741 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1744 struct sk_buff *skb;
1748 /* Connectionless channel */
1749 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1750 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1752 return PTR_ERR(skb);
1754 l2cap_do_send(chan, skb);
1758 switch (chan->mode) {
1759 case L2CAP_MODE_BASIC:
1760 /* Check outgoing MTU */
1761 if (len > chan->omtu)
1764 /* Create a basic PDU */
1765 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1767 return PTR_ERR(skb);
1769 l2cap_do_send(chan, skb);
1773 case L2CAP_MODE_ERTM:
1774 case L2CAP_MODE_STREAMING:
1775 /* Entire SDU fits into one PDU */
1776 if (len <= chan->remote_mps) {
1777 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1778 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1781 return PTR_ERR(skb);
1783 __skb_queue_tail(&chan->tx_q, skb);
1785 if (chan->tx_send_head == NULL)
1786 chan->tx_send_head = skb;
1789 /* Segment SDU into multiples PDUs */
1790 err = l2cap_sar_segment_sdu(chan, msg, len);
1795 if (chan->mode == L2CAP_MODE_STREAMING) {
1796 l2cap_streaming_send(chan);
1801 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1802 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1807 err = l2cap_ertm_send(chan);
1814 BT_DBG("bad state %1.1x", chan->mode);
1821 /* Copy frame to all raw sockets on that connection */
1822 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1824 struct sk_buff *nskb;
1825 struct l2cap_chan *chan;
1827 BT_DBG("conn %p", conn);
1831 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1832 struct sock *sk = chan->sk;
1833 if (chan->chan_type != L2CAP_CHAN_RAW)
1836 /* Don't send frame to the socket it came from */
1839 nskb = skb_clone(skb, GFP_ATOMIC);
1843 if (chan->ops->recv(chan->data, nskb))
1850 /* ---- L2CAP signalling commands ---- */
1851 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1852 u8 code, u8 ident, u16 dlen, void *data)
1854 struct sk_buff *skb, **frag;
1855 struct l2cap_cmd_hdr *cmd;
1856 struct l2cap_hdr *lh;
1859 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1860 conn, code, ident, dlen);
1862 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1863 count = min_t(unsigned int, conn->mtu, len);
1865 skb = bt_skb_alloc(count, GFP_ATOMIC);
1869 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1870 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1872 if (conn->hcon->type == LE_LINK)
1873 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1875 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1877 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1880 cmd->len = cpu_to_le16(dlen);
1883 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1884 memcpy(skb_put(skb, count), data, count);
1890 /* Continuation fragments (no L2CAP header) */
1891 frag = &skb_shinfo(skb)->frag_list;
1893 count = min_t(unsigned int, conn->mtu, len);
1895 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1899 memcpy(skb_put(*frag, count), data, count);
1904 frag = &(*frag)->next;
1914 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1916 struct l2cap_conf_opt *opt = *ptr;
1919 len = L2CAP_CONF_OPT_SIZE + opt->len;
1927 *val = *((u8 *) opt->val);
1931 *val = get_unaligned_le16(opt->val);
1935 *val = get_unaligned_le32(opt->val);
1939 *val = (unsigned long) opt->val;
1943 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1947 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1949 struct l2cap_conf_opt *opt = *ptr;
1951 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1958 *((u8 *) opt->val) = val;
1962 put_unaligned_le16(val, opt->val);
1966 put_unaligned_le32(val, opt->val);
1970 memcpy(opt->val, (void *) val, len);
1974 *ptr += L2CAP_CONF_OPT_SIZE + len;
1977 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1979 struct l2cap_conf_efs efs;
1981 switch (chan->mode) {
1982 case L2CAP_MODE_ERTM:
1983 efs.id = chan->local_id;
1984 efs.stype = chan->local_stype;
1985 efs.msdu = cpu_to_le16(chan->local_msdu);
1986 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1987 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1988 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1991 case L2CAP_MODE_STREAMING:
1993 efs.stype = L2CAP_SERV_BESTEFFORT;
1994 efs.msdu = cpu_to_le16(chan->local_msdu);
1995 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2004 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2005 (unsigned long) &efs);
2008 static void l2cap_ack_timeout(struct work_struct *work)
2010 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2013 lock_sock(chan->sk);
2014 l2cap_send_ack(chan);
2015 release_sock(chan->sk);
2018 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2020 chan->expected_ack_seq = 0;
2021 chan->unacked_frames = 0;
2022 chan->buffer_seq = 0;
2023 chan->num_acked = 0;
2024 chan->frames_sent = 0;
2026 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2027 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2028 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2030 skb_queue_head_init(&chan->srej_q);
2032 INIT_LIST_HEAD(&chan->srej_l);
2035 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2038 case L2CAP_MODE_STREAMING:
2039 case L2CAP_MODE_ERTM:
2040 if (l2cap_mode_supported(mode, remote_feat_mask))
2044 return L2CAP_MODE_BASIC;
2048 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2050 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2053 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2055 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2058 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2060 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2061 __l2cap_ews_supported(chan)) {
2062 /* use extended control field */
2063 set_bit(FLAG_EXT_CTRL, &chan->flags);
2064 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2066 chan->tx_win = min_t(u16, chan->tx_win,
2067 L2CAP_DEFAULT_TX_WINDOW);
2068 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2072 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2074 struct l2cap_conf_req *req = data;
2075 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2076 void *ptr = req->data;
2079 BT_DBG("chan %p", chan);
2081 if (chan->num_conf_req || chan->num_conf_rsp)
2084 switch (chan->mode) {
2085 case L2CAP_MODE_STREAMING:
2086 case L2CAP_MODE_ERTM:
2087 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2090 if (__l2cap_efs_supported(chan))
2091 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2095 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2100 if (chan->imtu != L2CAP_DEFAULT_MTU)
2101 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2103 switch (chan->mode) {
2104 case L2CAP_MODE_BASIC:
2105 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2106 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2109 rfc.mode = L2CAP_MODE_BASIC;
2111 rfc.max_transmit = 0;
2112 rfc.retrans_timeout = 0;
2113 rfc.monitor_timeout = 0;
2114 rfc.max_pdu_size = 0;
2116 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2117 (unsigned long) &rfc);
2120 case L2CAP_MODE_ERTM:
2121 rfc.mode = L2CAP_MODE_ERTM;
2122 rfc.max_transmit = chan->max_tx;
2123 rfc.retrans_timeout = 0;
2124 rfc.monitor_timeout = 0;
2126 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2127 L2CAP_EXT_HDR_SIZE -
2130 rfc.max_pdu_size = cpu_to_le16(size);
2132 l2cap_txwin_setup(chan);
2134 rfc.txwin_size = min_t(u16, chan->tx_win,
2135 L2CAP_DEFAULT_TX_WINDOW);
2137 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2138 (unsigned long) &rfc);
2140 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2141 l2cap_add_opt_efs(&ptr, chan);
2143 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2146 if (chan->fcs == L2CAP_FCS_NONE ||
2147 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2148 chan->fcs = L2CAP_FCS_NONE;
2149 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2152 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2153 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2157 case L2CAP_MODE_STREAMING:
2158 rfc.mode = L2CAP_MODE_STREAMING;
2160 rfc.max_transmit = 0;
2161 rfc.retrans_timeout = 0;
2162 rfc.monitor_timeout = 0;
2164 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2165 L2CAP_EXT_HDR_SIZE -
2168 rfc.max_pdu_size = cpu_to_le16(size);
2170 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2171 (unsigned long) &rfc);
2173 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2174 l2cap_add_opt_efs(&ptr, chan);
2176 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2179 if (chan->fcs == L2CAP_FCS_NONE ||
2180 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2181 chan->fcs = L2CAP_FCS_NONE;
2182 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2187 req->dcid = cpu_to_le16(chan->dcid);
2188 req->flags = cpu_to_le16(0);
2193 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2195 struct l2cap_conf_rsp *rsp = data;
2196 void *ptr = rsp->data;
2197 void *req = chan->conf_req;
2198 int len = chan->conf_len;
2199 int type, hint, olen;
2201 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2202 struct l2cap_conf_efs efs;
2204 u16 mtu = L2CAP_DEFAULT_MTU;
2205 u16 result = L2CAP_CONF_SUCCESS;
2208 BT_DBG("chan %p", chan);
2210 while (len >= L2CAP_CONF_OPT_SIZE) {
2211 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2213 hint = type & L2CAP_CONF_HINT;
2214 type &= L2CAP_CONF_MASK;
2217 case L2CAP_CONF_MTU:
2221 case L2CAP_CONF_FLUSH_TO:
2222 chan->flush_to = val;
2225 case L2CAP_CONF_QOS:
2228 case L2CAP_CONF_RFC:
2229 if (olen == sizeof(rfc))
2230 memcpy(&rfc, (void *) val, olen);
2233 case L2CAP_CONF_FCS:
2234 if (val == L2CAP_FCS_NONE)
2235 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2238 case L2CAP_CONF_EFS:
2240 if (olen == sizeof(efs))
2241 memcpy(&efs, (void *) val, olen);
2244 case L2CAP_CONF_EWS:
2246 return -ECONNREFUSED;
2248 set_bit(FLAG_EXT_CTRL, &chan->flags);
2249 set_bit(CONF_EWS_RECV, &chan->conf_state);
2250 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2251 chan->remote_tx_win = val;
2258 result = L2CAP_CONF_UNKNOWN;
2259 *((u8 *) ptr++) = type;
2264 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2267 switch (chan->mode) {
2268 case L2CAP_MODE_STREAMING:
2269 case L2CAP_MODE_ERTM:
2270 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2271 chan->mode = l2cap_select_mode(rfc.mode,
2272 chan->conn->feat_mask);
2277 if (__l2cap_efs_supported(chan))
2278 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2280 return -ECONNREFUSED;
2283 if (chan->mode != rfc.mode)
2284 return -ECONNREFUSED;
2290 if (chan->mode != rfc.mode) {
2291 result = L2CAP_CONF_UNACCEPT;
2292 rfc.mode = chan->mode;
2294 if (chan->num_conf_rsp == 1)
2295 return -ECONNREFUSED;
2297 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2298 sizeof(rfc), (unsigned long) &rfc);
2301 if (result == L2CAP_CONF_SUCCESS) {
2302 /* Configure output options and let the other side know
2303 * which ones we don't like. */
2305 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2306 result = L2CAP_CONF_UNACCEPT;
2309 set_bit(CONF_MTU_DONE, &chan->conf_state);
2311 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2314 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2315 efs.stype != L2CAP_SERV_NOTRAFIC &&
2316 efs.stype != chan->local_stype) {
2318 result = L2CAP_CONF_UNACCEPT;
2320 if (chan->num_conf_req >= 1)
2321 return -ECONNREFUSED;
2323 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2325 (unsigned long) &efs);
2327 /* Send PENDING Conf Rsp */
2328 result = L2CAP_CONF_PENDING;
2329 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2334 case L2CAP_MODE_BASIC:
2335 chan->fcs = L2CAP_FCS_NONE;
2336 set_bit(CONF_MODE_DONE, &chan->conf_state);
2339 case L2CAP_MODE_ERTM:
2340 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2341 chan->remote_tx_win = rfc.txwin_size;
2343 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2345 chan->remote_max_tx = rfc.max_transmit;
2347 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2349 L2CAP_EXT_HDR_SIZE -
2352 rfc.max_pdu_size = cpu_to_le16(size);
2353 chan->remote_mps = size;
2355 rfc.retrans_timeout =
2356 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2357 rfc.monitor_timeout =
2358 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2360 set_bit(CONF_MODE_DONE, &chan->conf_state);
2362 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2363 sizeof(rfc), (unsigned long) &rfc);
2365 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2366 chan->remote_id = efs.id;
2367 chan->remote_stype = efs.stype;
2368 chan->remote_msdu = le16_to_cpu(efs.msdu);
2369 chan->remote_flush_to =
2370 le32_to_cpu(efs.flush_to);
2371 chan->remote_acc_lat =
2372 le32_to_cpu(efs.acc_lat);
2373 chan->remote_sdu_itime =
2374 le32_to_cpu(efs.sdu_itime);
2375 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2376 sizeof(efs), (unsigned long) &efs);
2380 case L2CAP_MODE_STREAMING:
2381 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2383 L2CAP_EXT_HDR_SIZE -
2386 rfc.max_pdu_size = cpu_to_le16(size);
2387 chan->remote_mps = size;
2389 set_bit(CONF_MODE_DONE, &chan->conf_state);
2391 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2392 sizeof(rfc), (unsigned long) &rfc);
2397 result = L2CAP_CONF_UNACCEPT;
2399 memset(&rfc, 0, sizeof(rfc));
2400 rfc.mode = chan->mode;
2403 if (result == L2CAP_CONF_SUCCESS)
2404 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2406 rsp->scid = cpu_to_le16(chan->dcid);
2407 rsp->result = cpu_to_le16(result);
2408 rsp->flags = cpu_to_le16(0x0000);
2413 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2415 struct l2cap_conf_req *req = data;
2416 void *ptr = req->data;
2419 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2420 struct l2cap_conf_efs efs;
2422 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2424 while (len >= L2CAP_CONF_OPT_SIZE) {
2425 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2428 case L2CAP_CONF_MTU:
2429 if (val < L2CAP_DEFAULT_MIN_MTU) {
2430 *result = L2CAP_CONF_UNACCEPT;
2431 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2434 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2437 case L2CAP_CONF_FLUSH_TO:
2438 chan->flush_to = val;
2439 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2443 case L2CAP_CONF_RFC:
2444 if (olen == sizeof(rfc))
2445 memcpy(&rfc, (void *)val, olen);
2447 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2448 rfc.mode != chan->mode)
2449 return -ECONNREFUSED;
2453 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2454 sizeof(rfc), (unsigned long) &rfc);
2457 case L2CAP_CONF_EWS:
2458 chan->tx_win = min_t(u16, val,
2459 L2CAP_DEFAULT_EXT_WINDOW);
2460 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2464 case L2CAP_CONF_EFS:
2465 if (olen == sizeof(efs))
2466 memcpy(&efs, (void *)val, olen);
2468 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2469 efs.stype != L2CAP_SERV_NOTRAFIC &&
2470 efs.stype != chan->local_stype)
2471 return -ECONNREFUSED;
2473 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2474 sizeof(efs), (unsigned long) &efs);
2479 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2480 return -ECONNREFUSED;
2482 chan->mode = rfc.mode;
2484 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2486 case L2CAP_MODE_ERTM:
2487 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2488 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2489 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2491 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2492 chan->local_msdu = le16_to_cpu(efs.msdu);
2493 chan->local_sdu_itime =
2494 le32_to_cpu(efs.sdu_itime);
2495 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2496 chan->local_flush_to =
2497 le32_to_cpu(efs.flush_to);
2501 case L2CAP_MODE_STREAMING:
2502 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2506 req->dcid = cpu_to_le16(chan->dcid);
2507 req->flags = cpu_to_le16(0x0000);
2512 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2514 struct l2cap_conf_rsp *rsp = data;
2515 void *ptr = rsp->data;
2517 BT_DBG("chan %p", chan);
2519 rsp->scid = cpu_to_le16(chan->dcid);
2520 rsp->result = cpu_to_le16(result);
2521 rsp->flags = cpu_to_le16(flags);
2526 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2528 struct l2cap_conn_rsp rsp;
2529 struct l2cap_conn *conn = chan->conn;
2532 rsp.scid = cpu_to_le16(chan->dcid);
2533 rsp.dcid = cpu_to_le16(chan->scid);
2534 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2535 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2536 l2cap_send_cmd(conn, chan->ident,
2537 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2539 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2542 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2543 l2cap_build_conf_req(chan, buf), buf);
2544 chan->num_conf_req++;
2547 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2551 struct l2cap_conf_rfc rfc;
2553 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2555 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2558 while (len >= L2CAP_CONF_OPT_SIZE) {
2559 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2562 case L2CAP_CONF_RFC:
2563 if (olen == sizeof(rfc))
2564 memcpy(&rfc, (void *)val, olen);
2569 /* Use sane default values in case a misbehaving remote device
2570 * did not send an RFC option.
2572 rfc.mode = chan->mode;
2573 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2574 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2575 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2577 BT_ERR("Expected RFC option was not found, using defaults");
2581 case L2CAP_MODE_ERTM:
2582 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2583 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2584 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2586 case L2CAP_MODE_STREAMING:
2587 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2591 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2593 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2595 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2598 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2599 cmd->ident == conn->info_ident) {
2600 cancel_delayed_work_sync(&conn->info_work);
2602 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2603 conn->info_ident = 0;
2605 l2cap_conn_start(conn);
2611 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2613 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2614 struct l2cap_conn_rsp rsp;
2615 struct l2cap_chan *chan = NULL, *pchan;
2616 struct sock *parent, *sk = NULL;
2617 int result, status = L2CAP_CS_NO_INFO;
2619 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2620 __le16 psm = req->psm;
2622 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2624 /* Check if we have socket listening on psm */
2625 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2627 result = L2CAP_CR_BAD_PSM;
2635 /* Check if the ACL is secure enough (if not SDP) */
2636 if (psm != cpu_to_le16(0x0001) &&
2637 !hci_conn_check_link_mode(conn->hcon)) {
2638 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2639 result = L2CAP_CR_SEC_BLOCK;
2643 result = L2CAP_CR_NO_MEM;
2645 /* Check for backlog size */
2646 if (sk_acceptq_is_full(parent)) {
2647 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2651 chan = pchan->ops->new_connection(pchan->data);
2657 /* Check if we already have channel with that dcid */
2658 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2659 sock_set_flag(sk, SOCK_ZAPPED);
2660 chan->ops->close(chan->data);
2664 hci_conn_hold(conn->hcon);
2666 bacpy(&bt_sk(sk)->src, conn->src);
2667 bacpy(&bt_sk(sk)->dst, conn->dst);
2671 bt_accept_enqueue(parent, sk);
2673 l2cap_chan_add(conn, chan);
2677 __set_chan_timer(chan, sk->sk_sndtimeo);
2679 chan->ident = cmd->ident;
2681 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2682 if (l2cap_chan_check_security(chan)) {
2683 if (bt_sk(sk)->defer_setup) {
2684 l2cap_state_change(chan, BT_CONNECT2);
2685 result = L2CAP_CR_PEND;
2686 status = L2CAP_CS_AUTHOR_PEND;
2687 parent->sk_data_ready(parent, 0);
2689 l2cap_state_change(chan, BT_CONFIG);
2690 result = L2CAP_CR_SUCCESS;
2691 status = L2CAP_CS_NO_INFO;
2694 l2cap_state_change(chan, BT_CONNECT2);
2695 result = L2CAP_CR_PEND;
2696 status = L2CAP_CS_AUTHEN_PEND;
2699 l2cap_state_change(chan, BT_CONNECT2);
2700 result = L2CAP_CR_PEND;
2701 status = L2CAP_CS_NO_INFO;
2705 release_sock(parent);
2708 rsp.scid = cpu_to_le16(scid);
2709 rsp.dcid = cpu_to_le16(dcid);
2710 rsp.result = cpu_to_le16(result);
2711 rsp.status = cpu_to_le16(status);
2712 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2714 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2715 struct l2cap_info_req info;
2716 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2718 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2719 conn->info_ident = l2cap_get_ident(conn);
2721 schedule_delayed_work(&conn->info_work,
2722 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2724 l2cap_send_cmd(conn, conn->info_ident,
2725 L2CAP_INFO_REQ, sizeof(info), &info);
2728 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2729 result == L2CAP_CR_SUCCESS) {
2731 set_bit(CONF_REQ_SENT, &chan->conf_state);
2732 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2733 l2cap_build_conf_req(chan, buf), buf);
2734 chan->num_conf_req++;
2740 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2742 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2743 u16 scid, dcid, result, status;
2744 struct l2cap_chan *chan;
2748 scid = __le16_to_cpu(rsp->scid);
2749 dcid = __le16_to_cpu(rsp->dcid);
2750 result = __le16_to_cpu(rsp->result);
2751 status = __le16_to_cpu(rsp->status);
2753 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2756 chan = l2cap_get_chan_by_scid(conn, scid);
2760 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2768 case L2CAP_CR_SUCCESS:
2769 l2cap_state_change(chan, BT_CONFIG);
2772 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2774 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2777 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2778 l2cap_build_conf_req(chan, req), req);
2779 chan->num_conf_req++;
2783 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2787 l2cap_chan_del(chan, ECONNREFUSED);
2795 static inline void set_default_fcs(struct l2cap_chan *chan)
2797 /* FCS is enabled only in ERTM or streaming mode, if one or both
2800 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2801 chan->fcs = L2CAP_FCS_NONE;
2802 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2803 chan->fcs = L2CAP_FCS_CRC16;
2806 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2808 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2811 struct l2cap_chan *chan;
2815 dcid = __le16_to_cpu(req->dcid);
2816 flags = __le16_to_cpu(req->flags);
2818 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2820 chan = l2cap_get_chan_by_scid(conn, dcid);
2826 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2827 struct l2cap_cmd_rej_cid rej;
2829 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2830 rej.scid = cpu_to_le16(chan->scid);
2831 rej.dcid = cpu_to_le16(chan->dcid);
2833 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2838 /* Reject if config buffer is too small. */
2839 len = cmd_len - sizeof(*req);
2840 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2841 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2842 l2cap_build_conf_rsp(chan, rsp,
2843 L2CAP_CONF_REJECT, flags), rsp);
2848 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2849 chan->conf_len += len;
2851 if (flags & 0x0001) {
2852 /* Incomplete config. Send empty response. */
2853 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2854 l2cap_build_conf_rsp(chan, rsp,
2855 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2859 /* Complete config. */
2860 len = l2cap_parse_conf_req(chan, rsp);
2862 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2866 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2867 chan->num_conf_rsp++;
2869 /* Reset config buffer. */
2872 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2875 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2876 set_default_fcs(chan);
2878 l2cap_state_change(chan, BT_CONNECTED);
2880 chan->next_tx_seq = 0;
2881 chan->expected_tx_seq = 0;
2882 skb_queue_head_init(&chan->tx_q);
2883 if (chan->mode == L2CAP_MODE_ERTM)
2884 l2cap_ertm_init(chan);
2886 l2cap_chan_ready(sk);
2890 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2892 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2893 l2cap_build_conf_req(chan, buf), buf);
2894 chan->num_conf_req++;
2897 /* Got Conf Rsp PENDING from remote side and asume we sent
2898 Conf Rsp PENDING in the code above */
2899 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2900 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2902 /* check compatibility */
2904 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2905 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2907 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2908 l2cap_build_conf_rsp(chan, rsp,
2909 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2917 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2919 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2920 u16 scid, flags, result;
2921 struct l2cap_chan *chan;
2923 int len = cmd->len - sizeof(*rsp);
2925 scid = __le16_to_cpu(rsp->scid);
2926 flags = __le16_to_cpu(rsp->flags);
2927 result = __le16_to_cpu(rsp->result);
2929 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2930 scid, flags, result);
2932 chan = l2cap_get_chan_by_scid(conn, scid);
2939 case L2CAP_CONF_SUCCESS:
2940 l2cap_conf_rfc_get(chan, rsp->data, len);
2941 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2944 case L2CAP_CONF_PENDING:
2945 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2947 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2950 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2953 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2957 /* check compatibility */
2959 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2960 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2962 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2963 l2cap_build_conf_rsp(chan, buf,
2964 L2CAP_CONF_SUCCESS, 0x0000), buf);
2968 case L2CAP_CONF_UNACCEPT:
2969 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2972 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2973 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2977 /* throw out any old stored conf requests */
2978 result = L2CAP_CONF_SUCCESS;
2979 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2982 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2986 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2987 L2CAP_CONF_REQ, len, req);
2988 chan->num_conf_req++;
2989 if (result != L2CAP_CONF_SUCCESS)
2995 sk->sk_err = ECONNRESET;
2996 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2997 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3004 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3006 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3007 set_default_fcs(chan);
3009 l2cap_state_change(chan, BT_CONNECTED);
3010 chan->next_tx_seq = 0;
3011 chan->expected_tx_seq = 0;
3012 skb_queue_head_init(&chan->tx_q);
3013 if (chan->mode == L2CAP_MODE_ERTM)
3014 l2cap_ertm_init(chan);
3016 l2cap_chan_ready(sk);
3024 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3026 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3027 struct l2cap_disconn_rsp rsp;
3029 struct l2cap_chan *chan;
3032 scid = __le16_to_cpu(req->scid);
3033 dcid = __le16_to_cpu(req->dcid);
3035 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3037 chan = l2cap_get_chan_by_scid(conn, dcid);
3043 rsp.dcid = cpu_to_le16(chan->scid);
3044 rsp.scid = cpu_to_le16(chan->dcid);
3045 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3047 sk->sk_shutdown = SHUTDOWN_MASK;
3049 l2cap_chan_del(chan, ECONNRESET);
3052 chan->ops->close(chan->data);
3056 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3058 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3060 struct l2cap_chan *chan;
3063 scid = __le16_to_cpu(rsp->scid);
3064 dcid = __le16_to_cpu(rsp->dcid);
3066 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3068 chan = l2cap_get_chan_by_scid(conn, scid);
3074 l2cap_chan_del(chan, 0);
3077 chan->ops->close(chan->data);
3081 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3083 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3086 type = __le16_to_cpu(req->type);
3088 BT_DBG("type 0x%4.4x", type);
3090 if (type == L2CAP_IT_FEAT_MASK) {
3092 u32 feat_mask = l2cap_feat_mask;
3093 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3094 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3095 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3097 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3100 feat_mask |= L2CAP_FEAT_EXT_FLOW
3101 | L2CAP_FEAT_EXT_WINDOW;
3103 put_unaligned_le32(feat_mask, rsp->data);
3104 l2cap_send_cmd(conn, cmd->ident,
3105 L2CAP_INFO_RSP, sizeof(buf), buf);
3106 } else if (type == L2CAP_IT_FIXED_CHAN) {
3108 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3111 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3113 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3115 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3116 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3117 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3118 l2cap_send_cmd(conn, cmd->ident,
3119 L2CAP_INFO_RSP, sizeof(buf), buf);
3121 struct l2cap_info_rsp rsp;
3122 rsp.type = cpu_to_le16(type);
3123 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3124 l2cap_send_cmd(conn, cmd->ident,
3125 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3131 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3133 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3136 type = __le16_to_cpu(rsp->type);
3137 result = __le16_to_cpu(rsp->result);
3139 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3141 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3142 if (cmd->ident != conn->info_ident ||
3143 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3146 cancel_delayed_work_sync(&conn->info_work);
3148 if (result != L2CAP_IR_SUCCESS) {
3149 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3150 conn->info_ident = 0;
3152 l2cap_conn_start(conn);
3157 if (type == L2CAP_IT_FEAT_MASK) {
3158 conn->feat_mask = get_unaligned_le32(rsp->data);
3160 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3161 struct l2cap_info_req req;
3162 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3164 conn->info_ident = l2cap_get_ident(conn);
3166 l2cap_send_cmd(conn, conn->info_ident,
3167 L2CAP_INFO_REQ, sizeof(req), &req);
3169 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3170 conn->info_ident = 0;
3172 l2cap_conn_start(conn);
3174 } else if (type == L2CAP_IT_FIXED_CHAN) {
3175 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3176 conn->info_ident = 0;
3178 l2cap_conn_start(conn);
3184 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3185 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3188 struct l2cap_create_chan_req *req = data;
3189 struct l2cap_create_chan_rsp rsp;
3192 if (cmd_len != sizeof(*req))
3198 psm = le16_to_cpu(req->psm);
3199 scid = le16_to_cpu(req->scid);
3201 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3203 /* Placeholder: Always reject */
3205 rsp.scid = cpu_to_le16(scid);
3206 rsp.result = L2CAP_CR_NO_MEM;
3207 rsp.status = L2CAP_CS_NO_INFO;
3209 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3215 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3216 struct l2cap_cmd_hdr *cmd, void *data)
3218 BT_DBG("conn %p", conn);
3220 return l2cap_connect_rsp(conn, cmd, data);
3223 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3224 u16 icid, u16 result)
3226 struct l2cap_move_chan_rsp rsp;
3228 BT_DBG("icid %d, result %d", icid, result);
3230 rsp.icid = cpu_to_le16(icid);
3231 rsp.result = cpu_to_le16(result);
3233 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3236 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3237 struct l2cap_chan *chan, u16 icid, u16 result)
3239 struct l2cap_move_chan_cfm cfm;
3242 BT_DBG("icid %d, result %d", icid, result);
3244 ident = l2cap_get_ident(conn);
3246 chan->ident = ident;
3248 cfm.icid = cpu_to_le16(icid);
3249 cfm.result = cpu_to_le16(result);
3251 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3254 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3257 struct l2cap_move_chan_cfm_rsp rsp;
3259 BT_DBG("icid %d", icid);
3261 rsp.icid = cpu_to_le16(icid);
3262 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3265 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3266 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3268 struct l2cap_move_chan_req *req = data;
3270 u16 result = L2CAP_MR_NOT_ALLOWED;
3272 if (cmd_len != sizeof(*req))
3275 icid = le16_to_cpu(req->icid);
3277 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3282 /* Placeholder: Always refuse */
3283 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3288 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3289 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3291 struct l2cap_move_chan_rsp *rsp = data;
3294 if (cmd_len != sizeof(*rsp))
3297 icid = le16_to_cpu(rsp->icid);
3298 result = le16_to_cpu(rsp->result);
3300 BT_DBG("icid %d, result %d", icid, result);
3302 /* Placeholder: Always unconfirmed */
3303 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3308 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3309 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3311 struct l2cap_move_chan_cfm *cfm = data;
3314 if (cmd_len != sizeof(*cfm))
3317 icid = le16_to_cpu(cfm->icid);
3318 result = le16_to_cpu(cfm->result);
3320 BT_DBG("icid %d, result %d", icid, result);
3322 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3327 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3328 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3330 struct l2cap_move_chan_cfm_rsp *rsp = data;
3333 if (cmd_len != sizeof(*rsp))
3336 icid = le16_to_cpu(rsp->icid);
3338 BT_DBG("icid %d", icid);
3343 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3348 if (min > max || min < 6 || max > 3200)
3351 if (to_multiplier < 10 || to_multiplier > 3200)
3354 if (max >= to_multiplier * 8)
3357 max_latency = (to_multiplier * 8 / max) - 1;
3358 if (latency > 499 || latency > max_latency)
3364 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3365 struct l2cap_cmd_hdr *cmd, u8 *data)
3367 struct hci_conn *hcon = conn->hcon;
3368 struct l2cap_conn_param_update_req *req;
3369 struct l2cap_conn_param_update_rsp rsp;
3370 u16 min, max, latency, to_multiplier, cmd_len;
3373 if (!(hcon->link_mode & HCI_LM_MASTER))
3376 cmd_len = __le16_to_cpu(cmd->len);
3377 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3380 req = (struct l2cap_conn_param_update_req *) data;
3381 min = __le16_to_cpu(req->min);
3382 max = __le16_to_cpu(req->max);
3383 latency = __le16_to_cpu(req->latency);
3384 to_multiplier = __le16_to_cpu(req->to_multiplier);
3386 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3387 min, max, latency, to_multiplier);
3389 memset(&rsp, 0, sizeof(rsp));
3391 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3393 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3395 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3397 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3401 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3406 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3407 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3411 switch (cmd->code) {
3412 case L2CAP_COMMAND_REJ:
3413 l2cap_command_rej(conn, cmd, data);
3416 case L2CAP_CONN_REQ:
3417 err = l2cap_connect_req(conn, cmd, data);
3420 case L2CAP_CONN_RSP:
3421 err = l2cap_connect_rsp(conn, cmd, data);
3424 case L2CAP_CONF_REQ:
3425 err = l2cap_config_req(conn, cmd, cmd_len, data);
3428 case L2CAP_CONF_RSP:
3429 err = l2cap_config_rsp(conn, cmd, data);
3432 case L2CAP_DISCONN_REQ:
3433 err = l2cap_disconnect_req(conn, cmd, data);
3436 case L2CAP_DISCONN_RSP:
3437 err = l2cap_disconnect_rsp(conn, cmd, data);
3440 case L2CAP_ECHO_REQ:
3441 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3444 case L2CAP_ECHO_RSP:
3447 case L2CAP_INFO_REQ:
3448 err = l2cap_information_req(conn, cmd, data);
3451 case L2CAP_INFO_RSP:
3452 err = l2cap_information_rsp(conn, cmd, data);
3455 case L2CAP_CREATE_CHAN_REQ:
3456 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3459 case L2CAP_CREATE_CHAN_RSP:
3460 err = l2cap_create_channel_rsp(conn, cmd, data);
3463 case L2CAP_MOVE_CHAN_REQ:
3464 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3467 case L2CAP_MOVE_CHAN_RSP:
3468 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3471 case L2CAP_MOVE_CHAN_CFM:
3472 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3475 case L2CAP_MOVE_CHAN_CFM_RSP:
3476 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3480 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3488 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3489 struct l2cap_cmd_hdr *cmd, u8 *data)
3491 switch (cmd->code) {
3492 case L2CAP_COMMAND_REJ:
3495 case L2CAP_CONN_PARAM_UPDATE_REQ:
3496 return l2cap_conn_param_update_req(conn, cmd, data);
3498 case L2CAP_CONN_PARAM_UPDATE_RSP:
3502 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3507 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3508 struct sk_buff *skb)
3510 u8 *data = skb->data;
3512 struct l2cap_cmd_hdr cmd;
3515 l2cap_raw_recv(conn, skb);
3517 while (len >= L2CAP_CMD_HDR_SIZE) {
3519 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3520 data += L2CAP_CMD_HDR_SIZE;
3521 len -= L2CAP_CMD_HDR_SIZE;
3523 cmd_len = le16_to_cpu(cmd.len);
3525 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3527 if (cmd_len > len || !cmd.ident) {
3528 BT_DBG("corrupted command");
3532 if (conn->hcon->type == LE_LINK)
3533 err = l2cap_le_sig_cmd(conn, &cmd, data);
3535 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3538 struct l2cap_cmd_rej_unk rej;
3540 BT_ERR("Wrong link type (%d)", err);
3542 /* FIXME: Map err to a valid reason */
3543 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3544 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3554 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3556 u16 our_fcs, rcv_fcs;
3559 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3560 hdr_size = L2CAP_EXT_HDR_SIZE;
3562 hdr_size = L2CAP_ENH_HDR_SIZE;
3564 if (chan->fcs == L2CAP_FCS_CRC16) {
3565 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3566 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3567 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3569 if (our_fcs != rcv_fcs)
3575 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3579 chan->frames_sent = 0;
3581 control |= __set_reqseq(chan, chan->buffer_seq);
3583 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3584 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3585 l2cap_send_sframe(chan, control);
3586 set_bit(CONN_RNR_SENT, &chan->conn_state);
3589 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3590 l2cap_retransmit_frames(chan);
3592 l2cap_ertm_send(chan);
3594 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3595 chan->frames_sent == 0) {
3596 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3597 l2cap_send_sframe(chan, control);
3601 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3603 struct sk_buff *next_skb;
3604 int tx_seq_offset, next_tx_seq_offset;
3606 bt_cb(skb)->tx_seq = tx_seq;
3607 bt_cb(skb)->sar = sar;
3609 next_skb = skb_peek(&chan->srej_q);
3611 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3614 if (bt_cb(next_skb)->tx_seq == tx_seq)
3617 next_tx_seq_offset = __seq_offset(chan,
3618 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3620 if (next_tx_seq_offset > tx_seq_offset) {
3621 __skb_queue_before(&chan->srej_q, next_skb, skb);
3625 if (skb_queue_is_last(&chan->srej_q, next_skb))
3628 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3631 __skb_queue_tail(&chan->srej_q, skb);
3636 static void append_skb_frag(struct sk_buff *skb,
3637 struct sk_buff *new_frag, struct sk_buff **last_frag)
3639 /* skb->len reflects data in skb as well as all fragments
3640 * skb->data_len reflects only data in fragments
3642 if (!skb_has_frag_list(skb))
3643 skb_shinfo(skb)->frag_list = new_frag;
3645 new_frag->next = NULL;
3647 (*last_frag)->next = new_frag;
3648 *last_frag = new_frag;
3650 skb->len += new_frag->len;
3651 skb->data_len += new_frag->len;
3652 skb->truesize += new_frag->truesize;
3655 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3659 switch (__get_ctrl_sar(chan, control)) {
3660 case L2CAP_SAR_UNSEGMENTED:
3664 err = chan->ops->recv(chan->data, skb);
3667 case L2CAP_SAR_START:
3671 chan->sdu_len = get_unaligned_le16(skb->data);
3672 skb_pull(skb, L2CAP_SDULEN_SIZE);
3674 if (chan->sdu_len > chan->imtu) {
3679 if (skb->len >= chan->sdu_len)
3683 chan->sdu_last_frag = skb;
3689 case L2CAP_SAR_CONTINUE:
3693 append_skb_frag(chan->sdu, skb,
3694 &chan->sdu_last_frag);
3697 if (chan->sdu->len >= chan->sdu_len)
3707 append_skb_frag(chan->sdu, skb,
3708 &chan->sdu_last_frag);
3711 if (chan->sdu->len != chan->sdu_len)
3714 err = chan->ops->recv(chan->data, chan->sdu);
3717 /* Reassembly complete */
3719 chan->sdu_last_frag = NULL;
3727 kfree_skb(chan->sdu);
3729 chan->sdu_last_frag = NULL;
3736 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3740 BT_DBG("chan %p, Enter local busy", chan);
3742 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3744 control = __set_reqseq(chan, chan->buffer_seq);
3745 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3746 l2cap_send_sframe(chan, control);
3748 set_bit(CONN_RNR_SENT, &chan->conn_state);
3750 __clear_ack_timer(chan);
3753 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3757 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3760 control = __set_reqseq(chan, chan->buffer_seq);
3761 control |= __set_ctrl_poll(chan);
3762 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3763 l2cap_send_sframe(chan, control);
3764 chan->retry_count = 1;
3766 __clear_retrans_timer(chan);
3767 __set_monitor_timer(chan);
3769 set_bit(CONN_WAIT_F, &chan->conn_state);
3772 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3773 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3775 BT_DBG("chan %p, Exit local busy", chan);
3778 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3780 if (chan->mode == L2CAP_MODE_ERTM) {
3782 l2cap_ertm_enter_local_busy(chan);
3784 l2cap_ertm_exit_local_busy(chan);
3788 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3790 struct sk_buff *skb;
3793 while ((skb = skb_peek(&chan->srej_q)) &&
3794 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3797 if (bt_cb(skb)->tx_seq != tx_seq)
3800 skb = skb_dequeue(&chan->srej_q);
3801 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3802 err = l2cap_reassemble_sdu(chan, skb, control);
3805 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3809 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3810 tx_seq = __next_seq(chan, tx_seq);
3814 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3816 struct srej_list *l, *tmp;
3819 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3820 if (l->tx_seq == tx_seq) {
3825 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3826 control |= __set_reqseq(chan, l->tx_seq);
3827 l2cap_send_sframe(chan, control);
3829 list_add_tail(&l->list, &chan->srej_l);
3833 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3835 struct srej_list *new;
3838 while (tx_seq != chan->expected_tx_seq) {
3839 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3840 control |= __set_reqseq(chan, chan->expected_tx_seq);
3841 l2cap_send_sframe(chan, control);
3843 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3847 new->tx_seq = chan->expected_tx_seq;
3849 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3851 list_add_tail(&new->list, &chan->srej_l);
3854 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3859 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3861 u16 tx_seq = __get_txseq(chan, rx_control);
3862 u16 req_seq = __get_reqseq(chan, rx_control);
3863 u8 sar = __get_ctrl_sar(chan, rx_control);
3864 int tx_seq_offset, expected_tx_seq_offset;
3865 int num_to_ack = (chan->tx_win/6) + 1;
3868 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3869 tx_seq, rx_control);
3871 if (__is_ctrl_final(chan, rx_control) &&
3872 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3873 __clear_monitor_timer(chan);
3874 if (chan->unacked_frames > 0)
3875 __set_retrans_timer(chan);
3876 clear_bit(CONN_WAIT_F, &chan->conn_state);
3879 chan->expected_ack_seq = req_seq;
3880 l2cap_drop_acked_frames(chan);
3882 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3884 /* invalid tx_seq */
3885 if (tx_seq_offset >= chan->tx_win) {
3886 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3890 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3893 if (tx_seq == chan->expected_tx_seq)
3896 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3897 struct srej_list *first;
3899 first = list_first_entry(&chan->srej_l,
3900 struct srej_list, list);
3901 if (tx_seq == first->tx_seq) {
3902 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3903 l2cap_check_srej_gap(chan, tx_seq);
3905 list_del(&first->list);
3908 if (list_empty(&chan->srej_l)) {
3909 chan->buffer_seq = chan->buffer_seq_srej;
3910 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3911 l2cap_send_ack(chan);
3912 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3915 struct srej_list *l;
3917 /* duplicated tx_seq */
3918 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3921 list_for_each_entry(l, &chan->srej_l, list) {
3922 if (l->tx_seq == tx_seq) {
3923 l2cap_resend_srejframe(chan, tx_seq);
3928 err = l2cap_send_srejframe(chan, tx_seq);
3930 l2cap_send_disconn_req(chan->conn, chan, -err);
3935 expected_tx_seq_offset = __seq_offset(chan,
3936 chan->expected_tx_seq, chan->buffer_seq);
3938 /* duplicated tx_seq */
3939 if (tx_seq_offset < expected_tx_seq_offset)
3942 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3944 BT_DBG("chan %p, Enter SREJ", chan);
3946 INIT_LIST_HEAD(&chan->srej_l);
3947 chan->buffer_seq_srej = chan->buffer_seq;
3949 __skb_queue_head_init(&chan->srej_q);
3950 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3952 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3954 err = l2cap_send_srejframe(chan, tx_seq);
3956 l2cap_send_disconn_req(chan->conn, chan, -err);
3960 __clear_ack_timer(chan);
3965 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3967 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3968 bt_cb(skb)->tx_seq = tx_seq;
3969 bt_cb(skb)->sar = sar;
3970 __skb_queue_tail(&chan->srej_q, skb);
3974 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3975 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3978 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3982 if (__is_ctrl_final(chan, rx_control)) {
3983 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3984 l2cap_retransmit_frames(chan);
3988 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3989 if (chan->num_acked == num_to_ack - 1)
3990 l2cap_send_ack(chan);
3992 __set_ack_timer(chan);
4001 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4003 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4004 __get_reqseq(chan, rx_control), rx_control);
4006 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4007 l2cap_drop_acked_frames(chan);
4009 if (__is_ctrl_poll(chan, rx_control)) {
4010 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4011 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4012 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4013 (chan->unacked_frames > 0))
4014 __set_retrans_timer(chan);
4016 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4017 l2cap_send_srejtail(chan);
4019 l2cap_send_i_or_rr_or_rnr(chan);
4022 } else if (__is_ctrl_final(chan, rx_control)) {
4023 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4025 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4026 l2cap_retransmit_frames(chan);
4029 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4030 (chan->unacked_frames > 0))
4031 __set_retrans_timer(chan);
4033 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4034 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4035 l2cap_send_ack(chan);
4037 l2cap_ertm_send(chan);
4041 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4043 u16 tx_seq = __get_reqseq(chan, rx_control);
4045 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4047 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4049 chan->expected_ack_seq = tx_seq;
4050 l2cap_drop_acked_frames(chan);
4052 if (__is_ctrl_final(chan, rx_control)) {
4053 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4054 l2cap_retransmit_frames(chan);
4056 l2cap_retransmit_frames(chan);
4058 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4059 set_bit(CONN_REJ_ACT, &chan->conn_state);
4062 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4064 u16 tx_seq = __get_reqseq(chan, rx_control);
4066 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4068 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4070 if (__is_ctrl_poll(chan, rx_control)) {
4071 chan->expected_ack_seq = tx_seq;
4072 l2cap_drop_acked_frames(chan);
4074 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4075 l2cap_retransmit_one_frame(chan, tx_seq);
4077 l2cap_ertm_send(chan);
4079 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4080 chan->srej_save_reqseq = tx_seq;
4081 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4083 } else if (__is_ctrl_final(chan, rx_control)) {
4084 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4085 chan->srej_save_reqseq == tx_seq)
4086 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4088 l2cap_retransmit_one_frame(chan, tx_seq);
4090 l2cap_retransmit_one_frame(chan, tx_seq);
4091 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4092 chan->srej_save_reqseq = tx_seq;
4093 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4098 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4100 u16 tx_seq = __get_reqseq(chan, rx_control);
4102 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4104 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4105 chan->expected_ack_seq = tx_seq;
4106 l2cap_drop_acked_frames(chan);
4108 if (__is_ctrl_poll(chan, rx_control))
4109 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4111 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4112 __clear_retrans_timer(chan);
4113 if (__is_ctrl_poll(chan, rx_control))
4114 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4118 if (__is_ctrl_poll(chan, rx_control)) {
4119 l2cap_send_srejtail(chan);
4121 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4122 l2cap_send_sframe(chan, rx_control);
4126 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4128 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4130 if (__is_ctrl_final(chan, rx_control) &&
4131 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4132 __clear_monitor_timer(chan);
4133 if (chan->unacked_frames > 0)
4134 __set_retrans_timer(chan);
4135 clear_bit(CONN_WAIT_F, &chan->conn_state);
4138 switch (__get_ctrl_super(chan, rx_control)) {
4139 case L2CAP_SUPER_RR:
4140 l2cap_data_channel_rrframe(chan, rx_control);
4143 case L2CAP_SUPER_REJ:
4144 l2cap_data_channel_rejframe(chan, rx_control);
4147 case L2CAP_SUPER_SREJ:
4148 l2cap_data_channel_srejframe(chan, rx_control);
4151 case L2CAP_SUPER_RNR:
4152 l2cap_data_channel_rnrframe(chan, rx_control);
4160 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4162 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
4165 int len, next_tx_seq_offset, req_seq_offset;
4167 control = __get_control(chan, skb->data);
4168 skb_pull(skb, __ctrl_size(chan));
4172 * We can just drop the corrupted I-frame here.
4173 * Receiver will miss it and start proper recovery
4174 * procedures and ask retransmission.
4176 if (l2cap_check_fcs(chan, skb))
4179 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4180 len -= L2CAP_SDULEN_SIZE;
4182 if (chan->fcs == L2CAP_FCS_CRC16)
4183 len -= L2CAP_FCS_SIZE;
4185 if (len > chan->mps) {
4186 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4190 req_seq = __get_reqseq(chan, control);
4192 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4194 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4195 chan->expected_ack_seq);
4197 /* check for invalid req-seq */
4198 if (req_seq_offset > next_tx_seq_offset) {
4199 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4203 if (!__is_sframe(chan, control)) {
4205 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4209 l2cap_data_channel_iframe(chan, control, skb);
4213 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4217 l2cap_data_channel_sframe(chan, control, skb);
4227 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4229 struct l2cap_chan *chan;
4230 struct sock *sk = NULL;
4235 chan = l2cap_get_chan_by_scid(conn, cid);
4237 BT_DBG("unknown cid 0x%4.4x", cid);
4243 BT_DBG("chan %p, len %d", chan, skb->len);
4245 if (chan->state != BT_CONNECTED)
4248 switch (chan->mode) {
4249 case L2CAP_MODE_BASIC:
4250 /* If socket recv buffers overflows we drop data here
4251 * which is *bad* because L2CAP has to be reliable.
4252 * But we don't have any other choice. L2CAP doesn't
4253 * provide flow control mechanism. */
4255 if (chan->imtu < skb->len)
4258 if (!chan->ops->recv(chan->data, skb))
4262 case L2CAP_MODE_ERTM:
4263 l2cap_ertm_data_rcv(sk, skb);
4267 case L2CAP_MODE_STREAMING:
4268 control = __get_control(chan, skb->data);
4269 skb_pull(skb, __ctrl_size(chan));
4272 if (l2cap_check_fcs(chan, skb))
4275 if (__is_sar_start(chan, control))
4276 len -= L2CAP_SDULEN_SIZE;
4278 if (chan->fcs == L2CAP_FCS_CRC16)
4279 len -= L2CAP_FCS_SIZE;
4281 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4284 tx_seq = __get_txseq(chan, control);
4286 if (chan->expected_tx_seq != tx_seq) {
4287 /* Frame(s) missing - must discard partial SDU */
4288 kfree_skb(chan->sdu);
4290 chan->sdu_last_frag = NULL;
4293 /* TODO: Notify userland of missing data */
4296 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4298 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4299 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4304 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4318 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4320 struct sock *sk = NULL;
4321 struct l2cap_chan *chan;
4323 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4331 BT_DBG("sk %p, len %d", sk, skb->len);
4333 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4336 if (chan->imtu < skb->len)
4339 if (!chan->ops->recv(chan->data, skb))
4351 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4353 struct sock *sk = NULL;
4354 struct l2cap_chan *chan;
4356 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4364 BT_DBG("sk %p, len %d", sk, skb->len);
4366 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4369 if (chan->imtu < skb->len)
4372 if (!chan->ops->recv(chan->data, skb))
4384 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4386 struct l2cap_hdr *lh = (void *) skb->data;
4390 skb_pull(skb, L2CAP_HDR_SIZE);
4391 cid = __le16_to_cpu(lh->cid);
4392 len = __le16_to_cpu(lh->len);
4394 if (len != skb->len) {
4399 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4402 case L2CAP_CID_LE_SIGNALING:
4403 case L2CAP_CID_SIGNALING:
4404 l2cap_sig_channel(conn, skb);
4407 case L2CAP_CID_CONN_LESS:
4408 psm = get_unaligned_le16(skb->data);
4410 l2cap_conless_channel(conn, psm, skb);
4413 case L2CAP_CID_LE_DATA:
4414 l2cap_att_channel(conn, cid, skb);
4418 if (smp_sig_channel(conn, skb))
4419 l2cap_conn_del(conn->hcon, EACCES);
4423 l2cap_data_channel(conn, cid, skb);
4428 /* ---- L2CAP interface with lower layer (HCI) ---- */
4430 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4432 int exact = 0, lm1 = 0, lm2 = 0;
4433 struct l2cap_chan *c;
4435 if (type != ACL_LINK)
4438 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4440 /* Find listening sockets and check their link_mode */
4441 read_lock(&chan_list_lock);
4442 list_for_each_entry(c, &chan_list, global_l) {
4443 struct sock *sk = c->sk;
4445 if (c->state != BT_LISTEN)
4448 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4449 lm1 |= HCI_LM_ACCEPT;
4450 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4451 lm1 |= HCI_LM_MASTER;
4453 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4454 lm2 |= HCI_LM_ACCEPT;
4455 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4456 lm2 |= HCI_LM_MASTER;
4459 read_unlock(&chan_list_lock);
4461 return exact ? lm1 : lm2;
4464 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4466 struct l2cap_conn *conn;
4468 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4470 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4474 conn = l2cap_conn_add(hcon, status);
4476 l2cap_conn_ready(conn);
4478 l2cap_conn_del(hcon, bt_to_errno(status));
4483 static int l2cap_disconn_ind(struct hci_conn *hcon)
4485 struct l2cap_conn *conn = hcon->l2cap_data;
4487 BT_DBG("hcon %p", hcon);
4489 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4490 return HCI_ERROR_REMOTE_USER_TERM;
4492 return conn->disc_reason;
4495 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4497 BT_DBG("hcon %p reason %d", hcon, reason);
4499 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4502 l2cap_conn_del(hcon, bt_to_errno(reason));
4507 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4509 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4512 if (encrypt == 0x00) {
4513 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4514 __clear_chan_timer(chan);
4515 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4516 } else if (chan->sec_level == BT_SECURITY_HIGH)
4517 l2cap_chan_close(chan, ECONNREFUSED);
4519 if (chan->sec_level == BT_SECURITY_MEDIUM)
4520 __clear_chan_timer(chan);
4524 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4526 struct l2cap_conn *conn = hcon->l2cap_data;
4527 struct l2cap_chan *chan;
4532 BT_DBG("conn %p", conn);
4534 if (hcon->type == LE_LINK) {
4535 smp_distribute_keys(conn, 0);
4536 del_timer(&conn->security_timer);
4541 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
4542 struct sock *sk = chan->sk;
4546 BT_DBG("chan->scid %d", chan->scid);
4548 if (chan->scid == L2CAP_CID_LE_DATA) {
4549 if (!status && encrypt) {
4550 chan->sec_level = hcon->sec_level;
4551 l2cap_chan_ready(sk);
4558 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4563 if (!status && (chan->state == BT_CONNECTED ||
4564 chan->state == BT_CONFIG)) {
4565 l2cap_check_encryption(chan, encrypt);
4570 if (chan->state == BT_CONNECT) {
4572 struct l2cap_conn_req req;
4573 req.scid = cpu_to_le16(chan->scid);
4574 req.psm = chan->psm;
4576 chan->ident = l2cap_get_ident(conn);
4577 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4579 l2cap_send_cmd(conn, chan->ident,
4580 L2CAP_CONN_REQ, sizeof(req), &req);
4582 __clear_chan_timer(chan);
4583 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4585 } else if (chan->state == BT_CONNECT2) {
4586 struct l2cap_conn_rsp rsp;
4590 if (bt_sk(sk)->defer_setup) {
4591 struct sock *parent = bt_sk(sk)->parent;
4592 res = L2CAP_CR_PEND;
4593 stat = L2CAP_CS_AUTHOR_PEND;
4595 parent->sk_data_ready(parent, 0);
4597 l2cap_state_change(chan, BT_CONFIG);
4598 res = L2CAP_CR_SUCCESS;
4599 stat = L2CAP_CS_NO_INFO;
4602 l2cap_state_change(chan, BT_DISCONN);
4603 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4604 res = L2CAP_CR_SEC_BLOCK;
4605 stat = L2CAP_CS_NO_INFO;
4608 rsp.scid = cpu_to_le16(chan->dcid);
4609 rsp.dcid = cpu_to_le16(chan->scid);
4610 rsp.result = cpu_to_le16(res);
4611 rsp.status = cpu_to_le16(stat);
4612 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4624 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4626 struct l2cap_conn *conn = hcon->l2cap_data;
4629 conn = l2cap_conn_add(hcon, 0);
4634 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4636 if (!(flags & ACL_CONT)) {
4637 struct l2cap_hdr *hdr;
4638 struct l2cap_chan *chan;
4643 BT_ERR("Unexpected start frame (len %d)", skb->len);
4644 kfree_skb(conn->rx_skb);
4645 conn->rx_skb = NULL;
4647 l2cap_conn_unreliable(conn, ECOMM);
4650 /* Start fragment always begin with Basic L2CAP header */
4651 if (skb->len < L2CAP_HDR_SIZE) {
4652 BT_ERR("Frame is too short (len %d)", skb->len);
4653 l2cap_conn_unreliable(conn, ECOMM);
4657 hdr = (struct l2cap_hdr *) skb->data;
4658 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4659 cid = __le16_to_cpu(hdr->cid);
4661 if (len == skb->len) {
4662 /* Complete frame received */
4663 l2cap_recv_frame(conn, skb);
4667 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4669 if (skb->len > len) {
4670 BT_ERR("Frame is too long (len %d, expected len %d)",
4672 l2cap_conn_unreliable(conn, ECOMM);
4676 chan = l2cap_get_chan_by_scid(conn, cid);
4678 if (chan && chan->sk) {
4679 struct sock *sk = chan->sk;
4681 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4682 BT_ERR("Frame exceeding recv MTU (len %d, "
4686 l2cap_conn_unreliable(conn, ECOMM);
4692 /* Allocate skb for the complete frame (with header) */
4693 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4697 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4699 conn->rx_len = len - skb->len;
4701 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4703 if (!conn->rx_len) {
4704 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4705 l2cap_conn_unreliable(conn, ECOMM);
4709 if (skb->len > conn->rx_len) {
4710 BT_ERR("Fragment is too long (len %d, expected %d)",
4711 skb->len, conn->rx_len);
4712 kfree_skb(conn->rx_skb);
4713 conn->rx_skb = NULL;
4715 l2cap_conn_unreliable(conn, ECOMM);
4719 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4721 conn->rx_len -= skb->len;
4723 if (!conn->rx_len) {
4724 /* Complete frame received */
4725 l2cap_recv_frame(conn, conn->rx_skb);
4726 conn->rx_skb = NULL;
4735 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4737 struct l2cap_chan *c;
4739 read_lock_bh(&chan_list_lock);
4741 list_for_each_entry(c, &chan_list, global_l) {
4742 struct sock *sk = c->sk;
4744 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4745 batostr(&bt_sk(sk)->src),
4746 batostr(&bt_sk(sk)->dst),
4747 c->state, __le16_to_cpu(c->psm),
4748 c->scid, c->dcid, c->imtu, c->omtu,
4749 c->sec_level, c->mode);
4752 read_unlock_bh(&chan_list_lock);
4757 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4759 return single_open(file, l2cap_debugfs_show, inode->i_private);
4762 static const struct file_operations l2cap_debugfs_fops = {
4763 .open = l2cap_debugfs_open,
4765 .llseek = seq_lseek,
4766 .release = single_release,
4769 static struct dentry *l2cap_debugfs;
4771 static struct hci_proto l2cap_hci_proto = {
4773 .id = HCI_PROTO_L2CAP,
4774 .connect_ind = l2cap_connect_ind,
4775 .connect_cfm = l2cap_connect_cfm,
4776 .disconn_ind = l2cap_disconn_ind,
4777 .disconn_cfm = l2cap_disconn_cfm,
4778 .security_cfm = l2cap_security_cfm,
4779 .recv_acldata = l2cap_recv_acldata
4782 int __init l2cap_init(void)
4786 err = l2cap_init_sockets();
4790 err = hci_register_proto(&l2cap_hci_proto);
4792 BT_ERR("L2CAP protocol registration failed");
4793 bt_sock_unregister(BTPROTO_L2CAP);
4798 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4799 bt_debugfs, NULL, &l2cap_debugfs_fops);
4801 BT_ERR("Failed to create L2CAP debug file");
4807 l2cap_cleanup_sockets();
4811 void l2cap_exit(void)
4813 debugfs_remove(l2cap_debugfs);
4815 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4816 BT_ERR("L2CAP protocol unregistration failed");
4818 l2cap_cleanup_sockets();
4821 module_param(disable_ertm, bool, 0644);
4822 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");