2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
80 struct l2cap_chan *c, *r = NULL;
84 list_for_each_entry_rcu(c, &conn->chan_l, list) {
95 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
97 struct l2cap_chan *c, *r = NULL;
101 list_for_each_entry_rcu(c, &conn->chan_l, list) {
102 if (c->scid == cid) {
112 /* Find channel with given SCID.
113 * Returns locked socket */
114 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
116 struct l2cap_chan *c;
118 c = __l2cap_get_chan_by_scid(conn, cid);
124 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
126 struct l2cap_chan *c, *r = NULL;
130 list_for_each_entry_rcu(c, &conn->chan_l, list) {
131 if (c->ident == ident) {
141 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
143 struct l2cap_chan *c;
145 c = __l2cap_get_chan_by_ident(conn, ident);
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
153 struct l2cap_chan *c;
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
162 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
166 write_lock(&chan_list_lock);
168 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
181 for (p = 0x1001; p < 0x1100; p += 2)
182 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
183 chan->psm = cpu_to_le16(p);
184 chan->sport = cpu_to_le16(p);
191 write_unlock(&chan_list_lock);
195 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
197 write_lock(&chan_list_lock);
201 write_unlock(&chan_list_lock);
206 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
208 u16 cid = L2CAP_CID_DYN_START;
210 for (; cid < L2CAP_CID_DYN_END; cid++) {
211 if (!__l2cap_get_chan_by_scid(conn, cid))
218 static void l2cap_state_change(struct l2cap_chan *chan, int state)
220 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
221 state_to_string(state));
224 chan->ops->state_change(chan->data, state);
227 static void l2cap_chan_timeout(struct work_struct *work)
229 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
231 struct sock *sk = chan->sk;
234 BT_DBG("chan %p state %d", chan, chan->state);
238 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
239 reason = ECONNREFUSED;
240 else if (chan->state == BT_CONNECT &&
241 chan->sec_level != BT_SECURITY_SDP)
242 reason = ECONNREFUSED;
246 l2cap_chan_close(chan, reason);
250 chan->ops->close(chan->data);
251 l2cap_chan_put(chan);
254 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
256 struct l2cap_chan *chan;
258 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
264 write_lock(&chan_list_lock);
265 list_add(&chan->global_l, &chan_list);
266 write_unlock(&chan_list_lock);
268 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
270 chan->state = BT_OPEN;
272 atomic_set(&chan->refcnt, 1);
274 BT_DBG("sk %p chan %p", sk, chan);
279 void l2cap_chan_destroy(struct l2cap_chan *chan)
281 write_lock(&chan_list_lock);
282 list_del(&chan->global_l);
283 write_unlock(&chan_list_lock);
285 l2cap_chan_put(chan);
288 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
290 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
291 chan->psm, chan->dcid);
293 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
297 switch (chan->chan_type) {
298 case L2CAP_CHAN_CONN_ORIENTED:
299 if (conn->hcon->type == LE_LINK) {
301 chan->omtu = L2CAP_LE_DEFAULT_MTU;
302 chan->scid = L2CAP_CID_LE_DATA;
303 chan->dcid = L2CAP_CID_LE_DATA;
305 /* Alloc CID for connection-oriented socket */
306 chan->scid = l2cap_alloc_cid(conn);
307 chan->omtu = L2CAP_DEFAULT_MTU;
311 case L2CAP_CHAN_CONN_LESS:
312 /* Connectionless socket */
313 chan->scid = L2CAP_CID_CONN_LESS;
314 chan->dcid = L2CAP_CID_CONN_LESS;
315 chan->omtu = L2CAP_DEFAULT_MTU;
319 /* Raw socket can send/recv signalling messages only */
320 chan->scid = L2CAP_CID_SIGNALING;
321 chan->dcid = L2CAP_CID_SIGNALING;
322 chan->omtu = L2CAP_DEFAULT_MTU;
325 chan->local_id = L2CAP_BESTEFFORT_ID;
326 chan->local_stype = L2CAP_SERV_BESTEFFORT;
327 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
328 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
329 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
330 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
332 l2cap_chan_hold(chan);
334 list_add_rcu(&chan->list, &conn->chan_l);
338 * Must be called on the locked socket. */
339 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
341 struct sock *sk = chan->sk;
342 struct l2cap_conn *conn = chan->conn;
343 struct sock *parent = bt_sk(sk)->parent;
345 __clear_chan_timer(chan);
347 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
350 /* Delete from channel list */
351 list_del_rcu(&chan->list);
354 l2cap_chan_put(chan);
357 hci_conn_put(conn->hcon);
360 l2cap_state_change(chan, BT_CLOSED);
361 sock_set_flag(sk, SOCK_ZAPPED);
367 bt_accept_unlink(sk);
368 parent->sk_data_ready(parent, 0);
370 sk->sk_state_change(sk);
372 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
373 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
376 skb_queue_purge(&chan->tx_q);
378 if (chan->mode == L2CAP_MODE_ERTM) {
379 struct srej_list *l, *tmp;
381 __clear_retrans_timer(chan);
382 __clear_monitor_timer(chan);
383 __clear_ack_timer(chan);
385 skb_queue_purge(&chan->srej_q);
387 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
394 static void l2cap_chan_cleanup_listen(struct sock *parent)
398 BT_DBG("parent %p", parent);
400 /* Close not yet accepted channels */
401 while ((sk = bt_accept_dequeue(parent, NULL))) {
402 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
403 __clear_chan_timer(chan);
405 l2cap_chan_close(chan, ECONNRESET);
407 chan->ops->close(chan->data);
411 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
413 struct l2cap_conn *conn = chan->conn;
414 struct sock *sk = chan->sk;
416 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
418 switch (chan->state) {
420 l2cap_chan_cleanup_listen(sk);
422 l2cap_state_change(chan, BT_CLOSED);
423 sock_set_flag(sk, SOCK_ZAPPED);
428 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
429 conn->hcon->type == ACL_LINK) {
430 __clear_chan_timer(chan);
431 __set_chan_timer(chan, sk->sk_sndtimeo);
432 l2cap_send_disconn_req(conn, chan, reason);
434 l2cap_chan_del(chan, reason);
438 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
439 conn->hcon->type == ACL_LINK) {
440 struct l2cap_conn_rsp rsp;
443 if (bt_sk(sk)->defer_setup)
444 result = L2CAP_CR_SEC_BLOCK;
446 result = L2CAP_CR_BAD_PSM;
447 l2cap_state_change(chan, BT_DISCONN);
449 rsp.scid = cpu_to_le16(chan->dcid);
450 rsp.dcid = cpu_to_le16(chan->scid);
451 rsp.result = cpu_to_le16(result);
452 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
453 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
457 l2cap_chan_del(chan, reason);
462 l2cap_chan_del(chan, reason);
466 sock_set_flag(sk, SOCK_ZAPPED);
471 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
473 if (chan->chan_type == L2CAP_CHAN_RAW) {
474 switch (chan->sec_level) {
475 case BT_SECURITY_HIGH:
476 return HCI_AT_DEDICATED_BONDING_MITM;
477 case BT_SECURITY_MEDIUM:
478 return HCI_AT_DEDICATED_BONDING;
480 return HCI_AT_NO_BONDING;
482 } else if (chan->psm == cpu_to_le16(0x0001)) {
483 if (chan->sec_level == BT_SECURITY_LOW)
484 chan->sec_level = BT_SECURITY_SDP;
486 if (chan->sec_level == BT_SECURITY_HIGH)
487 return HCI_AT_NO_BONDING_MITM;
489 return HCI_AT_NO_BONDING;
491 switch (chan->sec_level) {
492 case BT_SECURITY_HIGH:
493 return HCI_AT_GENERAL_BONDING_MITM;
494 case BT_SECURITY_MEDIUM:
495 return HCI_AT_GENERAL_BONDING;
497 return HCI_AT_NO_BONDING;
502 /* Service level security */
503 int l2cap_chan_check_security(struct l2cap_chan *chan)
505 struct l2cap_conn *conn = chan->conn;
508 auth_type = l2cap_get_auth_type(chan);
510 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
513 static u8 l2cap_get_ident(struct l2cap_conn *conn)
517 /* Get next available identificator.
518 * 1 - 128 are used by kernel.
519 * 129 - 199 are reserved.
520 * 200 - 254 are used by utilities like l2ping, etc.
523 spin_lock(&conn->lock);
525 if (++conn->tx_ident > 128)
530 spin_unlock(&conn->lock);
535 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
537 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
540 BT_DBG("code 0x%2.2x", code);
545 if (lmp_no_flush_capable(conn->hcon->hdev))
546 flags = ACL_START_NO_FLUSH;
550 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
551 skb->priority = HCI_PRIO_MAX;
553 hci_send_acl(conn->hchan, skb, flags);
556 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
558 struct hci_conn *hcon = chan->conn->hcon;
561 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
564 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
565 lmp_no_flush_capable(hcon->hdev))
566 flags = ACL_START_NO_FLUSH;
570 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
571 hci_send_acl(chan->conn->hchan, skb, flags);
574 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
577 struct l2cap_hdr *lh;
578 struct l2cap_conn *conn = chan->conn;
581 if (chan->state != BT_CONNECTED)
584 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
585 hlen = L2CAP_EXT_HDR_SIZE;
587 hlen = L2CAP_ENH_HDR_SIZE;
589 if (chan->fcs == L2CAP_FCS_CRC16)
590 hlen += L2CAP_FCS_SIZE;
592 BT_DBG("chan %p, control 0x%8.8x", chan, control);
594 count = min_t(unsigned int, conn->mtu, hlen);
596 control |= __set_sframe(chan);
598 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
599 control |= __set_ctrl_final(chan);
601 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
602 control |= __set_ctrl_poll(chan);
604 skb = bt_skb_alloc(count, GFP_ATOMIC);
608 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
609 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
610 lh->cid = cpu_to_le16(chan->dcid);
612 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
614 if (chan->fcs == L2CAP_FCS_CRC16) {
615 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
616 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
619 skb->priority = HCI_PRIO_MAX;
620 l2cap_do_send(chan, skb);
623 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
625 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
626 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
627 set_bit(CONN_RNR_SENT, &chan->conn_state);
629 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
631 control |= __set_reqseq(chan, chan->buffer_seq);
633 l2cap_send_sframe(chan, control);
636 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
638 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
641 static void l2cap_do_start(struct l2cap_chan *chan)
643 struct l2cap_conn *conn = chan->conn;
645 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
646 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
649 if (l2cap_chan_check_security(chan) &&
650 __l2cap_no_conn_pending(chan)) {
651 struct l2cap_conn_req req;
652 req.scid = cpu_to_le16(chan->scid);
655 chan->ident = l2cap_get_ident(conn);
656 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
658 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
662 struct l2cap_info_req req;
663 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
665 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
666 conn->info_ident = l2cap_get_ident(conn);
668 schedule_delayed_work(&conn->info_timer,
669 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
671 l2cap_send_cmd(conn, conn->info_ident,
672 L2CAP_INFO_REQ, sizeof(req), &req);
676 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
678 u32 local_feat_mask = l2cap_feat_mask;
680 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
683 case L2CAP_MODE_ERTM:
684 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
685 case L2CAP_MODE_STREAMING:
686 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
692 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
695 struct l2cap_disconn_req req;
702 if (chan->mode == L2CAP_MODE_ERTM) {
703 __clear_retrans_timer(chan);
704 __clear_monitor_timer(chan);
705 __clear_ack_timer(chan);
708 req.dcid = cpu_to_le16(chan->dcid);
709 req.scid = cpu_to_le16(chan->scid);
710 l2cap_send_cmd(conn, l2cap_get_ident(conn),
711 L2CAP_DISCONN_REQ, sizeof(req), &req);
713 l2cap_state_change(chan, BT_DISCONN);
717 /* ---- L2CAP connections ---- */
718 static void l2cap_conn_start(struct l2cap_conn *conn)
720 struct l2cap_chan *chan;
722 BT_DBG("conn %p", conn);
726 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
727 struct sock *sk = chan->sk;
731 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
736 if (chan->state == BT_CONNECT) {
737 struct l2cap_conn_req req;
739 if (!l2cap_chan_check_security(chan) ||
740 !__l2cap_no_conn_pending(chan)) {
745 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
746 && test_bit(CONF_STATE2_DEVICE,
747 &chan->conf_state)) {
748 /* l2cap_chan_close() calls list_del(chan)
749 * so release the lock */
750 l2cap_chan_close(chan, ECONNRESET);
755 req.scid = cpu_to_le16(chan->scid);
758 chan->ident = l2cap_get_ident(conn);
759 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
761 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
764 } else if (chan->state == BT_CONNECT2) {
765 struct l2cap_conn_rsp rsp;
767 rsp.scid = cpu_to_le16(chan->dcid);
768 rsp.dcid = cpu_to_le16(chan->scid);
770 if (l2cap_chan_check_security(chan)) {
771 if (bt_sk(sk)->defer_setup) {
772 struct sock *parent = bt_sk(sk)->parent;
773 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
774 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
776 parent->sk_data_ready(parent, 0);
779 l2cap_state_change(chan, BT_CONFIG);
780 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
781 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
784 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
785 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
788 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
791 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
792 rsp.result != L2CAP_CR_SUCCESS) {
797 set_bit(CONF_REQ_SENT, &chan->conf_state);
798 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
799 l2cap_build_conf_req(chan, buf), buf);
800 chan->num_conf_req++;
809 /* Find socket with cid and source bdaddr.
810 * Returns closest match, locked.
812 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
814 struct l2cap_chan *c, *c1 = NULL;
816 read_lock(&chan_list_lock);
818 list_for_each_entry(c, &chan_list, global_l) {
819 struct sock *sk = c->sk;
821 if (state && c->state != state)
824 if (c->scid == cid) {
826 if (!bacmp(&bt_sk(sk)->src, src)) {
827 read_unlock(&chan_list_lock);
832 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
837 read_unlock(&chan_list_lock);
842 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
844 struct sock *parent, *sk;
845 struct l2cap_chan *chan, *pchan;
849 /* Check if we have socket listening on cid */
850 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
859 /* Check for backlog size */
860 if (sk_acceptq_is_full(parent)) {
861 BT_DBG("backlog full %d", parent->sk_ack_backlog);
865 chan = pchan->ops->new_connection(pchan->data);
871 hci_conn_hold(conn->hcon);
873 bacpy(&bt_sk(sk)->src, conn->src);
874 bacpy(&bt_sk(sk)->dst, conn->dst);
876 bt_accept_enqueue(parent, sk);
878 l2cap_chan_add(conn, chan);
880 __set_chan_timer(chan, sk->sk_sndtimeo);
882 l2cap_state_change(chan, BT_CONNECTED);
883 parent->sk_data_ready(parent, 0);
886 release_sock(parent);
889 static void l2cap_chan_ready(struct l2cap_chan *chan)
891 struct sock *sk = chan->sk;
892 struct sock *parent = bt_sk(sk)->parent;
894 BT_DBG("sk %p, parent %p", sk, parent);
896 chan->conf_state = 0;
897 __clear_chan_timer(chan);
899 l2cap_state_change(chan, BT_CONNECTED);
900 sk->sk_state_change(sk);
903 parent->sk_data_ready(parent, 0);
906 static void l2cap_conn_ready(struct l2cap_conn *conn)
908 struct l2cap_chan *chan;
910 BT_DBG("conn %p", conn);
912 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
913 l2cap_le_conn_ready(conn);
915 if (conn->hcon->out && conn->hcon->type == LE_LINK)
916 smp_conn_security(conn, conn->hcon->pending_sec_level);
920 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
921 struct sock *sk = chan->sk;
925 if (conn->hcon->type == LE_LINK) {
926 if (smp_conn_security(conn, chan->sec_level))
927 l2cap_chan_ready(chan);
929 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
930 __clear_chan_timer(chan);
931 l2cap_state_change(chan, BT_CONNECTED);
932 sk->sk_state_change(sk);
934 } else if (chan->state == BT_CONNECT)
935 l2cap_do_start(chan);
943 /* Notify sockets that we cannot guaranty reliability anymore */
944 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
946 struct l2cap_chan *chan;
948 BT_DBG("conn %p", conn);
952 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
953 struct sock *sk = chan->sk;
955 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
962 static void l2cap_info_timeout(struct work_struct *work)
964 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
967 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
968 conn->info_ident = 0;
970 l2cap_conn_start(conn);
973 static void l2cap_conn_del(struct hci_conn *hcon, int err)
975 struct l2cap_conn *conn = hcon->l2cap_data;
976 struct l2cap_chan *chan, *l;
982 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
984 kfree_skb(conn->rx_skb);
987 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
990 l2cap_chan_del(chan, err);
992 chan->ops->close(chan->data);
995 hci_chan_del(conn->hchan);
997 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
998 cancel_delayed_work_sync(&conn->info_timer);
1000 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1001 cancel_delayed_work_sync(&conn->security_timer);
1002 smp_chan_destroy(conn);
1005 hcon->l2cap_data = NULL;
1009 static void security_timeout(struct work_struct *work)
1011 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1012 security_timer.work);
1014 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1017 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1019 struct l2cap_conn *conn = hcon->l2cap_data;
1020 struct hci_chan *hchan;
1025 hchan = hci_chan_create(hcon);
1029 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1031 hci_chan_del(hchan);
1035 hcon->l2cap_data = conn;
1037 conn->hchan = hchan;
1039 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1041 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1042 conn->mtu = hcon->hdev->le_mtu;
1044 conn->mtu = hcon->hdev->acl_mtu;
1046 conn->src = &hcon->hdev->bdaddr;
1047 conn->dst = &hcon->dst;
1049 conn->feat_mask = 0;
1051 spin_lock_init(&conn->lock);
1053 INIT_LIST_HEAD(&conn->chan_l);
1055 if (hcon->type == LE_LINK)
1056 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1058 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1060 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1065 /* ---- Socket interface ---- */
1067 /* Find socket with psm and source bdaddr.
1068 * Returns closest match.
1070 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1072 struct l2cap_chan *c, *c1 = NULL;
1074 read_lock(&chan_list_lock);
1076 list_for_each_entry(c, &chan_list, global_l) {
1077 struct sock *sk = c->sk;
1079 if (state && c->state != state)
1082 if (c->psm == psm) {
1084 if (!bacmp(&bt_sk(sk)->src, src)) {
1085 read_unlock(&chan_list_lock);
1090 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1095 read_unlock(&chan_list_lock);
1100 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1102 struct sock *sk = chan->sk;
1103 bdaddr_t *src = &bt_sk(sk)->src;
1104 struct l2cap_conn *conn;
1105 struct hci_conn *hcon;
1106 struct hci_dev *hdev;
1110 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1113 hdev = hci_get_route(dst, src);
1115 return -EHOSTUNREACH;
1121 /* PSM must be odd and lsb of upper byte must be 0 */
1122 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1123 chan->chan_type != L2CAP_CHAN_RAW) {
1128 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1133 switch (chan->mode) {
1134 case L2CAP_MODE_BASIC:
1136 case L2CAP_MODE_ERTM:
1137 case L2CAP_MODE_STREAMING:
1146 switch (sk->sk_state) {
1150 /* Already connecting */
1155 /* Already connected */
1169 /* Set destination address and psm */
1170 bacpy(&bt_sk(sk)->dst, dst);
1174 auth_type = l2cap_get_auth_type(chan);
1176 if (chan->dcid == L2CAP_CID_LE_DATA)
1177 hcon = hci_connect(hdev, LE_LINK, dst,
1178 chan->sec_level, auth_type);
1180 hcon = hci_connect(hdev, ACL_LINK, dst,
1181 chan->sec_level, auth_type);
1184 err = PTR_ERR(hcon);
1188 conn = l2cap_conn_add(hcon, 0);
1195 /* Update source addr of the socket */
1196 bacpy(src, conn->src);
1198 l2cap_chan_add(conn, chan);
1200 l2cap_state_change(chan, BT_CONNECT);
1201 __set_chan_timer(chan, sk->sk_sndtimeo);
1203 if (hcon->state == BT_CONNECTED) {
1204 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1205 __clear_chan_timer(chan);
1206 if (l2cap_chan_check_security(chan))
1207 l2cap_state_change(chan, BT_CONNECTED);
1209 l2cap_do_start(chan);
1215 hci_dev_unlock(hdev);
1220 int __l2cap_wait_ack(struct sock *sk)
1222 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1223 DECLARE_WAITQUEUE(wait, current);
1227 add_wait_queue(sk_sleep(sk), &wait);
1228 set_current_state(TASK_INTERRUPTIBLE);
1229 while (chan->unacked_frames > 0 && chan->conn) {
1233 if (signal_pending(current)) {
1234 err = sock_intr_errno(timeo);
1239 timeo = schedule_timeout(timeo);
1241 set_current_state(TASK_INTERRUPTIBLE);
1243 err = sock_error(sk);
1247 set_current_state(TASK_RUNNING);
1248 remove_wait_queue(sk_sleep(sk), &wait);
1252 static void l2cap_monitor_timeout(struct work_struct *work)
1254 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1255 monitor_timer.work);
1256 struct sock *sk = chan->sk;
1258 BT_DBG("chan %p", chan);
1261 if (chan->retry_count >= chan->remote_max_tx) {
1262 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1267 chan->retry_count++;
1268 __set_monitor_timer(chan);
1270 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1274 static void l2cap_retrans_timeout(struct work_struct *work)
1276 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1277 retrans_timer.work);
1278 struct sock *sk = chan->sk;
1280 BT_DBG("chan %p", chan);
1283 chan->retry_count = 1;
1284 __set_monitor_timer(chan);
1286 set_bit(CONN_WAIT_F, &chan->conn_state);
1288 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1292 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1294 struct sk_buff *skb;
1296 while ((skb = skb_peek(&chan->tx_q)) &&
1297 chan->unacked_frames) {
1298 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1301 skb = skb_dequeue(&chan->tx_q);
1304 chan->unacked_frames--;
1307 if (!chan->unacked_frames)
1308 __clear_retrans_timer(chan);
1311 static void l2cap_streaming_send(struct l2cap_chan *chan)
1313 struct sk_buff *skb;
1317 while ((skb = skb_dequeue(&chan->tx_q))) {
1318 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1319 control |= __set_txseq(chan, chan->next_tx_seq);
1320 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1322 if (chan->fcs == L2CAP_FCS_CRC16) {
1323 fcs = crc16(0, (u8 *)skb->data,
1324 skb->len - L2CAP_FCS_SIZE);
1325 put_unaligned_le16(fcs,
1326 skb->data + skb->len - L2CAP_FCS_SIZE);
1329 l2cap_do_send(chan, skb);
1331 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1335 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1337 struct sk_buff *skb, *tx_skb;
1341 skb = skb_peek(&chan->tx_q);
1345 while (bt_cb(skb)->tx_seq != tx_seq) {
1346 if (skb_queue_is_last(&chan->tx_q, skb))
1349 skb = skb_queue_next(&chan->tx_q, skb);
1352 if (chan->remote_max_tx &&
1353 bt_cb(skb)->retries == chan->remote_max_tx) {
1354 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1358 tx_skb = skb_clone(skb, GFP_ATOMIC);
1359 bt_cb(skb)->retries++;
1361 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1362 control &= __get_sar_mask(chan);
1364 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1365 control |= __set_ctrl_final(chan);
1367 control |= __set_reqseq(chan, chan->buffer_seq);
1368 control |= __set_txseq(chan, tx_seq);
1370 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1372 if (chan->fcs == L2CAP_FCS_CRC16) {
1373 fcs = crc16(0, (u8 *)tx_skb->data,
1374 tx_skb->len - L2CAP_FCS_SIZE);
1375 put_unaligned_le16(fcs,
1376 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1379 l2cap_do_send(chan, tx_skb);
1382 static int l2cap_ertm_send(struct l2cap_chan *chan)
1384 struct sk_buff *skb, *tx_skb;
1389 if (chan->state != BT_CONNECTED)
1392 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1394 if (chan->remote_max_tx &&
1395 bt_cb(skb)->retries == chan->remote_max_tx) {
1396 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1400 tx_skb = skb_clone(skb, GFP_ATOMIC);
1402 bt_cb(skb)->retries++;
1404 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1405 control &= __get_sar_mask(chan);
1407 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1408 control |= __set_ctrl_final(chan);
1410 control |= __set_reqseq(chan, chan->buffer_seq);
1411 control |= __set_txseq(chan, chan->next_tx_seq);
1413 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1415 if (chan->fcs == L2CAP_FCS_CRC16) {
1416 fcs = crc16(0, (u8 *)skb->data,
1417 tx_skb->len - L2CAP_FCS_SIZE);
1418 put_unaligned_le16(fcs, skb->data +
1419 tx_skb->len - L2CAP_FCS_SIZE);
1422 l2cap_do_send(chan, tx_skb);
1424 __set_retrans_timer(chan);
1426 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1428 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1430 if (bt_cb(skb)->retries == 1) {
1431 chan->unacked_frames++;
1434 __clear_ack_timer(chan);
1437 chan->frames_sent++;
1439 if (skb_queue_is_last(&chan->tx_q, skb))
1440 chan->tx_send_head = NULL;
1442 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1448 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1452 if (!skb_queue_empty(&chan->tx_q))
1453 chan->tx_send_head = chan->tx_q.next;
1455 chan->next_tx_seq = chan->expected_ack_seq;
1456 ret = l2cap_ertm_send(chan);
1460 static void __l2cap_send_ack(struct l2cap_chan *chan)
1464 control |= __set_reqseq(chan, chan->buffer_seq);
1466 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1467 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1468 set_bit(CONN_RNR_SENT, &chan->conn_state);
1469 l2cap_send_sframe(chan, control);
1473 if (l2cap_ertm_send(chan) > 0)
1476 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1477 l2cap_send_sframe(chan, control);
1480 static void l2cap_send_ack(struct l2cap_chan *chan)
1482 __clear_ack_timer(chan);
1483 __l2cap_send_ack(chan);
1486 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1488 struct srej_list *tail;
1491 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1492 control |= __set_ctrl_final(chan);
1494 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1495 control |= __set_reqseq(chan, tail->tx_seq);
1497 l2cap_send_sframe(chan, control);
1500 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1502 struct l2cap_conn *conn = chan->conn;
1503 struct sk_buff **frag;
1506 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1512 /* Continuation fragments (no L2CAP header) */
1513 frag = &skb_shinfo(skb)->frag_list;
1515 count = min_t(unsigned int, conn->mtu, len);
1517 *frag = chan->ops->alloc_skb(chan, count,
1518 msg->msg_flags & MSG_DONTWAIT, &err);
1522 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1525 (*frag)->priority = skb->priority;
1530 frag = &(*frag)->next;
1536 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1537 struct msghdr *msg, size_t len,
1540 struct l2cap_conn *conn = chan->conn;
1541 struct sk_buff *skb;
1542 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1543 struct l2cap_hdr *lh;
1545 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1547 count = min_t(unsigned int, (conn->mtu - hlen), len);
1549 skb = chan->ops->alloc_skb(chan, count + hlen,
1550 msg->msg_flags & MSG_DONTWAIT, &err);
1553 return ERR_PTR(err);
1555 skb->priority = priority;
1557 /* Create L2CAP header */
1558 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1559 lh->cid = cpu_to_le16(chan->dcid);
1560 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1561 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1563 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1564 if (unlikely(err < 0)) {
1566 return ERR_PTR(err);
1571 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1572 struct msghdr *msg, size_t len,
1575 struct l2cap_conn *conn = chan->conn;
1576 struct sk_buff *skb;
1577 int err, count, hlen = L2CAP_HDR_SIZE;
1578 struct l2cap_hdr *lh;
1580 BT_DBG("chan %p len %d", chan, (int)len);
1582 count = min_t(unsigned int, (conn->mtu - hlen), len);
1584 skb = chan->ops->alloc_skb(chan, count + hlen,
1585 msg->msg_flags & MSG_DONTWAIT, &err);
1588 return ERR_PTR(err);
1590 skb->priority = priority;
1592 /* Create L2CAP header */
1593 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1594 lh->cid = cpu_to_le16(chan->dcid);
1595 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1597 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1598 if (unlikely(err < 0)) {
1600 return ERR_PTR(err);
1605 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1606 struct msghdr *msg, size_t len,
1607 u32 control, u16 sdulen)
1609 struct l2cap_conn *conn = chan->conn;
1610 struct sk_buff *skb;
1611 int err, count, hlen;
1612 struct l2cap_hdr *lh;
1614 BT_DBG("chan %p len %d", chan, (int)len);
1617 return ERR_PTR(-ENOTCONN);
1619 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1620 hlen = L2CAP_EXT_HDR_SIZE;
1622 hlen = L2CAP_ENH_HDR_SIZE;
1625 hlen += L2CAP_SDULEN_SIZE;
1627 if (chan->fcs == L2CAP_FCS_CRC16)
1628 hlen += L2CAP_FCS_SIZE;
1630 count = min_t(unsigned int, (conn->mtu - hlen), len);
1632 skb = chan->ops->alloc_skb(chan, count + hlen,
1633 msg->msg_flags & MSG_DONTWAIT, &err);
1636 return ERR_PTR(err);
1638 /* Create L2CAP header */
1639 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1640 lh->cid = cpu_to_le16(chan->dcid);
1641 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1643 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1646 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1648 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1649 if (unlikely(err < 0)) {
1651 return ERR_PTR(err);
1654 if (chan->fcs == L2CAP_FCS_CRC16)
1655 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1657 bt_cb(skb)->retries = 0;
1661 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1663 struct sk_buff *skb;
1664 struct sk_buff_head sar_queue;
1668 skb_queue_head_init(&sar_queue);
1669 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1670 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1672 return PTR_ERR(skb);
1674 __skb_queue_tail(&sar_queue, skb);
1675 len -= chan->remote_mps;
1676 size += chan->remote_mps;
1681 if (len > chan->remote_mps) {
1682 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1683 buflen = chan->remote_mps;
1685 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1689 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1691 skb_queue_purge(&sar_queue);
1692 return PTR_ERR(skb);
1695 __skb_queue_tail(&sar_queue, skb);
1699 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1700 if (chan->tx_send_head == NULL)
1701 chan->tx_send_head = sar_queue.next;
1706 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1709 struct sk_buff *skb;
1713 /* Connectionless channel */
1714 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1715 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1717 return PTR_ERR(skb);
1719 l2cap_do_send(chan, skb);
1723 switch (chan->mode) {
1724 case L2CAP_MODE_BASIC:
1725 /* Check outgoing MTU */
1726 if (len > chan->omtu)
1729 /* Create a basic PDU */
1730 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1732 return PTR_ERR(skb);
1734 l2cap_do_send(chan, skb);
1738 case L2CAP_MODE_ERTM:
1739 case L2CAP_MODE_STREAMING:
1740 /* Entire SDU fits into one PDU */
1741 if (len <= chan->remote_mps) {
1742 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1743 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1746 return PTR_ERR(skb);
1748 __skb_queue_tail(&chan->tx_q, skb);
1750 if (chan->tx_send_head == NULL)
1751 chan->tx_send_head = skb;
1754 /* Segment SDU into multiples PDUs */
1755 err = l2cap_sar_segment_sdu(chan, msg, len);
1760 if (chan->mode == L2CAP_MODE_STREAMING) {
1761 l2cap_streaming_send(chan);
1766 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1767 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1772 err = l2cap_ertm_send(chan);
1779 BT_DBG("bad state %1.1x", chan->mode);
1786 /* Copy frame to all raw sockets on that connection */
1787 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1789 struct sk_buff *nskb;
1790 struct l2cap_chan *chan;
1792 BT_DBG("conn %p", conn);
1796 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1797 struct sock *sk = chan->sk;
1798 if (chan->chan_type != L2CAP_CHAN_RAW)
1801 /* Don't send frame to the socket it came from */
1804 nskb = skb_clone(skb, GFP_ATOMIC);
1808 if (chan->ops->recv(chan->data, nskb))
1815 /* ---- L2CAP signalling commands ---- */
1816 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1817 u8 code, u8 ident, u16 dlen, void *data)
1819 struct sk_buff *skb, **frag;
1820 struct l2cap_cmd_hdr *cmd;
1821 struct l2cap_hdr *lh;
1824 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1825 conn, code, ident, dlen);
1827 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1828 count = min_t(unsigned int, conn->mtu, len);
1830 skb = bt_skb_alloc(count, GFP_ATOMIC);
1834 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1835 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1837 if (conn->hcon->type == LE_LINK)
1838 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1840 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1842 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1845 cmd->len = cpu_to_le16(dlen);
1848 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1849 memcpy(skb_put(skb, count), data, count);
1855 /* Continuation fragments (no L2CAP header) */
1856 frag = &skb_shinfo(skb)->frag_list;
1858 count = min_t(unsigned int, conn->mtu, len);
1860 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1864 memcpy(skb_put(*frag, count), data, count);
1869 frag = &(*frag)->next;
1879 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1881 struct l2cap_conf_opt *opt = *ptr;
1884 len = L2CAP_CONF_OPT_SIZE + opt->len;
1892 *val = *((u8 *) opt->val);
1896 *val = get_unaligned_le16(opt->val);
1900 *val = get_unaligned_le32(opt->val);
1904 *val = (unsigned long) opt->val;
1908 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1912 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1914 struct l2cap_conf_opt *opt = *ptr;
1916 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1923 *((u8 *) opt->val) = val;
1927 put_unaligned_le16(val, opt->val);
1931 put_unaligned_le32(val, opt->val);
1935 memcpy(opt->val, (void *) val, len);
1939 *ptr += L2CAP_CONF_OPT_SIZE + len;
1942 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1944 struct l2cap_conf_efs efs;
1946 switch (chan->mode) {
1947 case L2CAP_MODE_ERTM:
1948 efs.id = chan->local_id;
1949 efs.stype = chan->local_stype;
1950 efs.msdu = cpu_to_le16(chan->local_msdu);
1951 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1952 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1953 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1956 case L2CAP_MODE_STREAMING:
1958 efs.stype = L2CAP_SERV_BESTEFFORT;
1959 efs.msdu = cpu_to_le16(chan->local_msdu);
1960 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1969 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1970 (unsigned long) &efs);
1973 static void l2cap_ack_timeout(struct work_struct *work)
1975 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1978 BT_DBG("chan %p", chan);
1980 lock_sock(chan->sk);
1981 __l2cap_send_ack(chan);
1982 release_sock(chan->sk);
1984 l2cap_chan_put(chan);
1987 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1989 chan->expected_ack_seq = 0;
1990 chan->unacked_frames = 0;
1991 chan->buffer_seq = 0;
1992 chan->num_acked = 0;
1993 chan->frames_sent = 0;
1995 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
1996 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
1997 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
1999 skb_queue_head_init(&chan->srej_q);
2001 INIT_LIST_HEAD(&chan->srej_l);
2004 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2007 case L2CAP_MODE_STREAMING:
2008 case L2CAP_MODE_ERTM:
2009 if (l2cap_mode_supported(mode, remote_feat_mask))
2013 return L2CAP_MODE_BASIC;
2017 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2019 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2022 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2024 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2027 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2029 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2030 __l2cap_ews_supported(chan)) {
2031 /* use extended control field */
2032 set_bit(FLAG_EXT_CTRL, &chan->flags);
2033 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2035 chan->tx_win = min_t(u16, chan->tx_win,
2036 L2CAP_DEFAULT_TX_WINDOW);
2037 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2041 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2043 struct l2cap_conf_req *req = data;
2044 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2045 void *ptr = req->data;
2048 BT_DBG("chan %p", chan);
2050 if (chan->num_conf_req || chan->num_conf_rsp)
2053 switch (chan->mode) {
2054 case L2CAP_MODE_STREAMING:
2055 case L2CAP_MODE_ERTM:
2056 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2059 if (__l2cap_efs_supported(chan))
2060 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2064 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2069 if (chan->imtu != L2CAP_DEFAULT_MTU)
2070 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2072 switch (chan->mode) {
2073 case L2CAP_MODE_BASIC:
2074 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2075 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2078 rfc.mode = L2CAP_MODE_BASIC;
2080 rfc.max_transmit = 0;
2081 rfc.retrans_timeout = 0;
2082 rfc.monitor_timeout = 0;
2083 rfc.max_pdu_size = 0;
2085 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2086 (unsigned long) &rfc);
2089 case L2CAP_MODE_ERTM:
2090 rfc.mode = L2CAP_MODE_ERTM;
2091 rfc.max_transmit = chan->max_tx;
2092 rfc.retrans_timeout = 0;
2093 rfc.monitor_timeout = 0;
2095 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2096 L2CAP_EXT_HDR_SIZE -
2099 rfc.max_pdu_size = cpu_to_le16(size);
2101 l2cap_txwin_setup(chan);
2103 rfc.txwin_size = min_t(u16, chan->tx_win,
2104 L2CAP_DEFAULT_TX_WINDOW);
2106 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2107 (unsigned long) &rfc);
2109 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2110 l2cap_add_opt_efs(&ptr, chan);
2112 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2115 if (chan->fcs == L2CAP_FCS_NONE ||
2116 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2117 chan->fcs = L2CAP_FCS_NONE;
2118 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2121 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2122 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2126 case L2CAP_MODE_STREAMING:
2127 rfc.mode = L2CAP_MODE_STREAMING;
2129 rfc.max_transmit = 0;
2130 rfc.retrans_timeout = 0;
2131 rfc.monitor_timeout = 0;
2133 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2134 L2CAP_EXT_HDR_SIZE -
2137 rfc.max_pdu_size = cpu_to_le16(size);
2139 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2140 (unsigned long) &rfc);
2142 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2143 l2cap_add_opt_efs(&ptr, chan);
2145 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2148 if (chan->fcs == L2CAP_FCS_NONE ||
2149 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2150 chan->fcs = L2CAP_FCS_NONE;
2151 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2156 req->dcid = cpu_to_le16(chan->dcid);
2157 req->flags = cpu_to_le16(0);
2162 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2164 struct l2cap_conf_rsp *rsp = data;
2165 void *ptr = rsp->data;
2166 void *req = chan->conf_req;
2167 int len = chan->conf_len;
2168 int type, hint, olen;
2170 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2171 struct l2cap_conf_efs efs;
2173 u16 mtu = L2CAP_DEFAULT_MTU;
2174 u16 result = L2CAP_CONF_SUCCESS;
2177 BT_DBG("chan %p", chan);
2179 while (len >= L2CAP_CONF_OPT_SIZE) {
2180 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2182 hint = type & L2CAP_CONF_HINT;
2183 type &= L2CAP_CONF_MASK;
2186 case L2CAP_CONF_MTU:
2190 case L2CAP_CONF_FLUSH_TO:
2191 chan->flush_to = val;
2194 case L2CAP_CONF_QOS:
2197 case L2CAP_CONF_RFC:
2198 if (olen == sizeof(rfc))
2199 memcpy(&rfc, (void *) val, olen);
2202 case L2CAP_CONF_FCS:
2203 if (val == L2CAP_FCS_NONE)
2204 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2207 case L2CAP_CONF_EFS:
2209 if (olen == sizeof(efs))
2210 memcpy(&efs, (void *) val, olen);
2213 case L2CAP_CONF_EWS:
2215 return -ECONNREFUSED;
2217 set_bit(FLAG_EXT_CTRL, &chan->flags);
2218 set_bit(CONF_EWS_RECV, &chan->conf_state);
2219 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2220 chan->remote_tx_win = val;
2227 result = L2CAP_CONF_UNKNOWN;
2228 *((u8 *) ptr++) = type;
2233 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2236 switch (chan->mode) {
2237 case L2CAP_MODE_STREAMING:
2238 case L2CAP_MODE_ERTM:
2239 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2240 chan->mode = l2cap_select_mode(rfc.mode,
2241 chan->conn->feat_mask);
2246 if (__l2cap_efs_supported(chan))
2247 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2249 return -ECONNREFUSED;
2252 if (chan->mode != rfc.mode)
2253 return -ECONNREFUSED;
2259 if (chan->mode != rfc.mode) {
2260 result = L2CAP_CONF_UNACCEPT;
2261 rfc.mode = chan->mode;
2263 if (chan->num_conf_rsp == 1)
2264 return -ECONNREFUSED;
2266 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2267 sizeof(rfc), (unsigned long) &rfc);
2270 if (result == L2CAP_CONF_SUCCESS) {
2271 /* Configure output options and let the other side know
2272 * which ones we don't like. */
2274 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2275 result = L2CAP_CONF_UNACCEPT;
2278 set_bit(CONF_MTU_DONE, &chan->conf_state);
2280 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2283 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2284 efs.stype != L2CAP_SERV_NOTRAFIC &&
2285 efs.stype != chan->local_stype) {
2287 result = L2CAP_CONF_UNACCEPT;
2289 if (chan->num_conf_req >= 1)
2290 return -ECONNREFUSED;
2292 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2294 (unsigned long) &efs);
2296 /* Send PENDING Conf Rsp */
2297 result = L2CAP_CONF_PENDING;
2298 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2303 case L2CAP_MODE_BASIC:
2304 chan->fcs = L2CAP_FCS_NONE;
2305 set_bit(CONF_MODE_DONE, &chan->conf_state);
2308 case L2CAP_MODE_ERTM:
2309 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2310 chan->remote_tx_win = rfc.txwin_size;
2312 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2314 chan->remote_max_tx = rfc.max_transmit;
2316 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2318 L2CAP_EXT_HDR_SIZE -
2321 rfc.max_pdu_size = cpu_to_le16(size);
2322 chan->remote_mps = size;
2324 rfc.retrans_timeout =
2325 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2326 rfc.monitor_timeout =
2327 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2329 set_bit(CONF_MODE_DONE, &chan->conf_state);
2331 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2332 sizeof(rfc), (unsigned long) &rfc);
2334 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2335 chan->remote_id = efs.id;
2336 chan->remote_stype = efs.stype;
2337 chan->remote_msdu = le16_to_cpu(efs.msdu);
2338 chan->remote_flush_to =
2339 le32_to_cpu(efs.flush_to);
2340 chan->remote_acc_lat =
2341 le32_to_cpu(efs.acc_lat);
2342 chan->remote_sdu_itime =
2343 le32_to_cpu(efs.sdu_itime);
2344 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2345 sizeof(efs), (unsigned long) &efs);
2349 case L2CAP_MODE_STREAMING:
2350 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2352 L2CAP_EXT_HDR_SIZE -
2355 rfc.max_pdu_size = cpu_to_le16(size);
2356 chan->remote_mps = size;
2358 set_bit(CONF_MODE_DONE, &chan->conf_state);
2360 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2361 sizeof(rfc), (unsigned long) &rfc);
2366 result = L2CAP_CONF_UNACCEPT;
2368 memset(&rfc, 0, sizeof(rfc));
2369 rfc.mode = chan->mode;
2372 if (result == L2CAP_CONF_SUCCESS)
2373 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2375 rsp->scid = cpu_to_le16(chan->dcid);
2376 rsp->result = cpu_to_le16(result);
2377 rsp->flags = cpu_to_le16(0x0000);
2382 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2384 struct l2cap_conf_req *req = data;
2385 void *ptr = req->data;
2388 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2389 struct l2cap_conf_efs efs;
2391 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2393 while (len >= L2CAP_CONF_OPT_SIZE) {
2394 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2397 case L2CAP_CONF_MTU:
2398 if (val < L2CAP_DEFAULT_MIN_MTU) {
2399 *result = L2CAP_CONF_UNACCEPT;
2400 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2403 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2406 case L2CAP_CONF_FLUSH_TO:
2407 chan->flush_to = val;
2408 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2412 case L2CAP_CONF_RFC:
2413 if (olen == sizeof(rfc))
2414 memcpy(&rfc, (void *)val, olen);
2416 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2417 rfc.mode != chan->mode)
2418 return -ECONNREFUSED;
2422 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2423 sizeof(rfc), (unsigned long) &rfc);
2426 case L2CAP_CONF_EWS:
2427 chan->tx_win = min_t(u16, val,
2428 L2CAP_DEFAULT_EXT_WINDOW);
2429 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2433 case L2CAP_CONF_EFS:
2434 if (olen == sizeof(efs))
2435 memcpy(&efs, (void *)val, olen);
2437 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2438 efs.stype != L2CAP_SERV_NOTRAFIC &&
2439 efs.stype != chan->local_stype)
2440 return -ECONNREFUSED;
2442 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2443 sizeof(efs), (unsigned long) &efs);
2448 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2449 return -ECONNREFUSED;
2451 chan->mode = rfc.mode;
2453 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2455 case L2CAP_MODE_ERTM:
2456 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2457 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2458 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2460 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2461 chan->local_msdu = le16_to_cpu(efs.msdu);
2462 chan->local_sdu_itime =
2463 le32_to_cpu(efs.sdu_itime);
2464 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2465 chan->local_flush_to =
2466 le32_to_cpu(efs.flush_to);
2470 case L2CAP_MODE_STREAMING:
2471 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2475 req->dcid = cpu_to_le16(chan->dcid);
2476 req->flags = cpu_to_le16(0x0000);
2481 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2483 struct l2cap_conf_rsp *rsp = data;
2484 void *ptr = rsp->data;
2486 BT_DBG("chan %p", chan);
2488 rsp->scid = cpu_to_le16(chan->dcid);
2489 rsp->result = cpu_to_le16(result);
2490 rsp->flags = cpu_to_le16(flags);
2495 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2497 struct l2cap_conn_rsp rsp;
2498 struct l2cap_conn *conn = chan->conn;
2501 rsp.scid = cpu_to_le16(chan->dcid);
2502 rsp.dcid = cpu_to_le16(chan->scid);
2503 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2504 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2505 l2cap_send_cmd(conn, chan->ident,
2506 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2508 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2511 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2512 l2cap_build_conf_req(chan, buf), buf);
2513 chan->num_conf_req++;
2516 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2520 struct l2cap_conf_rfc rfc;
2522 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2524 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2527 while (len >= L2CAP_CONF_OPT_SIZE) {
2528 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2531 case L2CAP_CONF_RFC:
2532 if (olen == sizeof(rfc))
2533 memcpy(&rfc, (void *)val, olen);
2538 /* Use sane default values in case a misbehaving remote device
2539 * did not send an RFC option.
2541 rfc.mode = chan->mode;
2542 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2543 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2544 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2546 BT_ERR("Expected RFC option was not found, using defaults");
2550 case L2CAP_MODE_ERTM:
2551 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2552 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2553 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2555 case L2CAP_MODE_STREAMING:
2556 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2560 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2562 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2564 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2567 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2568 cmd->ident == conn->info_ident) {
2569 cancel_delayed_work(&conn->info_timer);
2571 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2572 conn->info_ident = 0;
2574 l2cap_conn_start(conn);
2580 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2582 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2583 struct l2cap_conn_rsp rsp;
2584 struct l2cap_chan *chan = NULL, *pchan;
2585 struct sock *parent, *sk = NULL;
2586 int result, status = L2CAP_CS_NO_INFO;
2588 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2589 __le16 psm = req->psm;
2591 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2593 /* Check if we have socket listening on psm */
2594 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2596 result = L2CAP_CR_BAD_PSM;
2604 /* Check if the ACL is secure enough (if not SDP) */
2605 if (psm != cpu_to_le16(0x0001) &&
2606 !hci_conn_check_link_mode(conn->hcon)) {
2607 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2608 result = L2CAP_CR_SEC_BLOCK;
2612 result = L2CAP_CR_NO_MEM;
2614 /* Check for backlog size */
2615 if (sk_acceptq_is_full(parent)) {
2616 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2620 chan = pchan->ops->new_connection(pchan->data);
2626 /* Check if we already have channel with that dcid */
2627 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2628 sock_set_flag(sk, SOCK_ZAPPED);
2629 chan->ops->close(chan->data);
2633 hci_conn_hold(conn->hcon);
2635 bacpy(&bt_sk(sk)->src, conn->src);
2636 bacpy(&bt_sk(sk)->dst, conn->dst);
2640 bt_accept_enqueue(parent, sk);
2642 l2cap_chan_add(conn, chan);
2646 __set_chan_timer(chan, sk->sk_sndtimeo);
2648 chan->ident = cmd->ident;
2650 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2651 if (l2cap_chan_check_security(chan)) {
2652 if (bt_sk(sk)->defer_setup) {
2653 l2cap_state_change(chan, BT_CONNECT2);
2654 result = L2CAP_CR_PEND;
2655 status = L2CAP_CS_AUTHOR_PEND;
2656 parent->sk_data_ready(parent, 0);
2658 l2cap_state_change(chan, BT_CONFIG);
2659 result = L2CAP_CR_SUCCESS;
2660 status = L2CAP_CS_NO_INFO;
2663 l2cap_state_change(chan, BT_CONNECT2);
2664 result = L2CAP_CR_PEND;
2665 status = L2CAP_CS_AUTHEN_PEND;
2668 l2cap_state_change(chan, BT_CONNECT2);
2669 result = L2CAP_CR_PEND;
2670 status = L2CAP_CS_NO_INFO;
2674 release_sock(parent);
2677 rsp.scid = cpu_to_le16(scid);
2678 rsp.dcid = cpu_to_le16(dcid);
2679 rsp.result = cpu_to_le16(result);
2680 rsp.status = cpu_to_le16(status);
2681 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2683 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2684 struct l2cap_info_req info;
2685 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2687 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2688 conn->info_ident = l2cap_get_ident(conn);
2690 schedule_delayed_work(&conn->info_timer,
2691 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2693 l2cap_send_cmd(conn, conn->info_ident,
2694 L2CAP_INFO_REQ, sizeof(info), &info);
2697 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2698 result == L2CAP_CR_SUCCESS) {
2700 set_bit(CONF_REQ_SENT, &chan->conf_state);
2701 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2702 l2cap_build_conf_req(chan, buf), buf);
2703 chan->num_conf_req++;
2709 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2711 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2712 u16 scid, dcid, result, status;
2713 struct l2cap_chan *chan;
2717 scid = __le16_to_cpu(rsp->scid);
2718 dcid = __le16_to_cpu(rsp->dcid);
2719 result = __le16_to_cpu(rsp->result);
2720 status = __le16_to_cpu(rsp->status);
2722 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2725 chan = l2cap_get_chan_by_scid(conn, scid);
2729 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2737 case L2CAP_CR_SUCCESS:
2738 l2cap_state_change(chan, BT_CONFIG);
2741 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2743 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2746 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2747 l2cap_build_conf_req(chan, req), req);
2748 chan->num_conf_req++;
2752 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2756 l2cap_chan_del(chan, ECONNREFUSED);
2764 static inline void set_default_fcs(struct l2cap_chan *chan)
2766 /* FCS is enabled only in ERTM or streaming mode, if one or both
2769 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2770 chan->fcs = L2CAP_FCS_NONE;
2771 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2772 chan->fcs = L2CAP_FCS_CRC16;
2775 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2777 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2780 struct l2cap_chan *chan;
2784 dcid = __le16_to_cpu(req->dcid);
2785 flags = __le16_to_cpu(req->flags);
2787 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2789 chan = l2cap_get_chan_by_scid(conn, dcid);
2795 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2796 struct l2cap_cmd_rej_cid rej;
2798 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2799 rej.scid = cpu_to_le16(chan->scid);
2800 rej.dcid = cpu_to_le16(chan->dcid);
2802 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2807 /* Reject if config buffer is too small. */
2808 len = cmd_len - sizeof(*req);
2809 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2810 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2811 l2cap_build_conf_rsp(chan, rsp,
2812 L2CAP_CONF_REJECT, flags), rsp);
2817 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2818 chan->conf_len += len;
2820 if (flags & 0x0001) {
2821 /* Incomplete config. Send empty response. */
2822 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2823 l2cap_build_conf_rsp(chan, rsp,
2824 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2828 /* Complete config. */
2829 len = l2cap_parse_conf_req(chan, rsp);
2831 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2835 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2836 chan->num_conf_rsp++;
2838 /* Reset config buffer. */
2841 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2844 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2845 set_default_fcs(chan);
2847 l2cap_state_change(chan, BT_CONNECTED);
2849 chan->next_tx_seq = 0;
2850 chan->expected_tx_seq = 0;
2851 skb_queue_head_init(&chan->tx_q);
2852 if (chan->mode == L2CAP_MODE_ERTM)
2853 l2cap_ertm_init(chan);
2855 l2cap_chan_ready(chan);
2859 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2861 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2862 l2cap_build_conf_req(chan, buf), buf);
2863 chan->num_conf_req++;
2866 /* Got Conf Rsp PENDING from remote side and asume we sent
2867 Conf Rsp PENDING in the code above */
2868 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2869 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2871 /* check compatibility */
2873 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2874 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2876 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2877 l2cap_build_conf_rsp(chan, rsp,
2878 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2886 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2888 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2889 u16 scid, flags, result;
2890 struct l2cap_chan *chan;
2892 int len = cmd->len - sizeof(*rsp);
2894 scid = __le16_to_cpu(rsp->scid);
2895 flags = __le16_to_cpu(rsp->flags);
2896 result = __le16_to_cpu(rsp->result);
2898 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2899 scid, flags, result);
2901 chan = l2cap_get_chan_by_scid(conn, scid);
2908 case L2CAP_CONF_SUCCESS:
2909 l2cap_conf_rfc_get(chan, rsp->data, len);
2910 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2913 case L2CAP_CONF_PENDING:
2914 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2916 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2919 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2922 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2926 /* check compatibility */
2928 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2929 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2931 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2932 l2cap_build_conf_rsp(chan, buf,
2933 L2CAP_CONF_SUCCESS, 0x0000), buf);
2937 case L2CAP_CONF_UNACCEPT:
2938 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2941 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2942 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2946 /* throw out any old stored conf requests */
2947 result = L2CAP_CONF_SUCCESS;
2948 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2951 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2955 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2956 L2CAP_CONF_REQ, len, req);
2957 chan->num_conf_req++;
2958 if (result != L2CAP_CONF_SUCCESS)
2964 sk->sk_err = ECONNRESET;
2965 __set_chan_timer(chan,
2966 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT));
2967 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2974 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2976 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2977 set_default_fcs(chan);
2979 l2cap_state_change(chan, BT_CONNECTED);
2980 chan->next_tx_seq = 0;
2981 chan->expected_tx_seq = 0;
2982 skb_queue_head_init(&chan->tx_q);
2983 if (chan->mode == L2CAP_MODE_ERTM)
2984 l2cap_ertm_init(chan);
2986 l2cap_chan_ready(chan);
2994 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2996 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2997 struct l2cap_disconn_rsp rsp;
2999 struct l2cap_chan *chan;
3002 scid = __le16_to_cpu(req->scid);
3003 dcid = __le16_to_cpu(req->dcid);
3005 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3007 chan = l2cap_get_chan_by_scid(conn, dcid);
3013 rsp.dcid = cpu_to_le16(chan->scid);
3014 rsp.scid = cpu_to_le16(chan->dcid);
3015 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3017 sk->sk_shutdown = SHUTDOWN_MASK;
3019 l2cap_chan_del(chan, ECONNRESET);
3022 chan->ops->close(chan->data);
3026 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3028 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3030 struct l2cap_chan *chan;
3033 scid = __le16_to_cpu(rsp->scid);
3034 dcid = __le16_to_cpu(rsp->dcid);
3036 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3038 chan = l2cap_get_chan_by_scid(conn, scid);
3044 l2cap_chan_del(chan, 0);
3047 chan->ops->close(chan->data);
3051 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3053 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3056 type = __le16_to_cpu(req->type);
3058 BT_DBG("type 0x%4.4x", type);
3060 if (type == L2CAP_IT_FEAT_MASK) {
3062 u32 feat_mask = l2cap_feat_mask;
3063 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3064 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3065 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3067 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3070 feat_mask |= L2CAP_FEAT_EXT_FLOW
3071 | L2CAP_FEAT_EXT_WINDOW;
3073 put_unaligned_le32(feat_mask, rsp->data);
3074 l2cap_send_cmd(conn, cmd->ident,
3075 L2CAP_INFO_RSP, sizeof(buf), buf);
3076 } else if (type == L2CAP_IT_FIXED_CHAN) {
3078 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3081 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3083 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3085 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3086 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3087 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3088 l2cap_send_cmd(conn, cmd->ident,
3089 L2CAP_INFO_RSP, sizeof(buf), buf);
3091 struct l2cap_info_rsp rsp;
3092 rsp.type = cpu_to_le16(type);
3093 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3094 l2cap_send_cmd(conn, cmd->ident,
3095 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3101 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3103 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3106 type = __le16_to_cpu(rsp->type);
3107 result = __le16_to_cpu(rsp->result);
3109 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3111 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3112 if (cmd->ident != conn->info_ident ||
3113 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3116 cancel_delayed_work(&conn->info_timer);
3118 if (result != L2CAP_IR_SUCCESS) {
3119 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3120 conn->info_ident = 0;
3122 l2cap_conn_start(conn);
3127 if (type == L2CAP_IT_FEAT_MASK) {
3128 conn->feat_mask = get_unaligned_le32(rsp->data);
3130 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3131 struct l2cap_info_req req;
3132 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3134 conn->info_ident = l2cap_get_ident(conn);
3136 l2cap_send_cmd(conn, conn->info_ident,
3137 L2CAP_INFO_REQ, sizeof(req), &req);
3139 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3140 conn->info_ident = 0;
3142 l2cap_conn_start(conn);
3144 } else if (type == L2CAP_IT_FIXED_CHAN) {
3145 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3146 conn->info_ident = 0;
3148 l2cap_conn_start(conn);
3154 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3155 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3158 struct l2cap_create_chan_req *req = data;
3159 struct l2cap_create_chan_rsp rsp;
3162 if (cmd_len != sizeof(*req))
3168 psm = le16_to_cpu(req->psm);
3169 scid = le16_to_cpu(req->scid);
3171 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3173 /* Placeholder: Always reject */
3175 rsp.scid = cpu_to_le16(scid);
3176 rsp.result = L2CAP_CR_NO_MEM;
3177 rsp.status = L2CAP_CS_NO_INFO;
3179 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3185 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3186 struct l2cap_cmd_hdr *cmd, void *data)
3188 BT_DBG("conn %p", conn);
3190 return l2cap_connect_rsp(conn, cmd, data);
3193 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3194 u16 icid, u16 result)
3196 struct l2cap_move_chan_rsp rsp;
3198 BT_DBG("icid %d, result %d", icid, result);
3200 rsp.icid = cpu_to_le16(icid);
3201 rsp.result = cpu_to_le16(result);
3203 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3206 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3207 struct l2cap_chan *chan, u16 icid, u16 result)
3209 struct l2cap_move_chan_cfm cfm;
3212 BT_DBG("icid %d, result %d", icid, result);
3214 ident = l2cap_get_ident(conn);
3216 chan->ident = ident;
3218 cfm.icid = cpu_to_le16(icid);
3219 cfm.result = cpu_to_le16(result);
3221 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3224 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3227 struct l2cap_move_chan_cfm_rsp rsp;
3229 BT_DBG("icid %d", icid);
3231 rsp.icid = cpu_to_le16(icid);
3232 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3235 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3236 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3238 struct l2cap_move_chan_req *req = data;
3240 u16 result = L2CAP_MR_NOT_ALLOWED;
3242 if (cmd_len != sizeof(*req))
3245 icid = le16_to_cpu(req->icid);
3247 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3252 /* Placeholder: Always refuse */
3253 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3258 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3259 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3261 struct l2cap_move_chan_rsp *rsp = data;
3264 if (cmd_len != sizeof(*rsp))
3267 icid = le16_to_cpu(rsp->icid);
3268 result = le16_to_cpu(rsp->result);
3270 BT_DBG("icid %d, result %d", icid, result);
3272 /* Placeholder: Always unconfirmed */
3273 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3278 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3279 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3281 struct l2cap_move_chan_cfm *cfm = data;
3284 if (cmd_len != sizeof(*cfm))
3287 icid = le16_to_cpu(cfm->icid);
3288 result = le16_to_cpu(cfm->result);
3290 BT_DBG("icid %d, result %d", icid, result);
3292 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3297 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3298 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3300 struct l2cap_move_chan_cfm_rsp *rsp = data;
3303 if (cmd_len != sizeof(*rsp))
3306 icid = le16_to_cpu(rsp->icid);
3308 BT_DBG("icid %d", icid);
3313 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3318 if (min > max || min < 6 || max > 3200)
3321 if (to_multiplier < 10 || to_multiplier > 3200)
3324 if (max >= to_multiplier * 8)
3327 max_latency = (to_multiplier * 8 / max) - 1;
3328 if (latency > 499 || latency > max_latency)
3334 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3335 struct l2cap_cmd_hdr *cmd, u8 *data)
3337 struct hci_conn *hcon = conn->hcon;
3338 struct l2cap_conn_param_update_req *req;
3339 struct l2cap_conn_param_update_rsp rsp;
3340 u16 min, max, latency, to_multiplier, cmd_len;
3343 if (!(hcon->link_mode & HCI_LM_MASTER))
3346 cmd_len = __le16_to_cpu(cmd->len);
3347 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3350 req = (struct l2cap_conn_param_update_req *) data;
3351 min = __le16_to_cpu(req->min);
3352 max = __le16_to_cpu(req->max);
3353 latency = __le16_to_cpu(req->latency);
3354 to_multiplier = __le16_to_cpu(req->to_multiplier);
3356 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3357 min, max, latency, to_multiplier);
3359 memset(&rsp, 0, sizeof(rsp));
3361 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3363 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3365 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3367 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3371 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3376 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3377 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3381 switch (cmd->code) {
3382 case L2CAP_COMMAND_REJ:
3383 l2cap_command_rej(conn, cmd, data);
3386 case L2CAP_CONN_REQ:
3387 err = l2cap_connect_req(conn, cmd, data);
3390 case L2CAP_CONN_RSP:
3391 err = l2cap_connect_rsp(conn, cmd, data);
3394 case L2CAP_CONF_REQ:
3395 err = l2cap_config_req(conn, cmd, cmd_len, data);
3398 case L2CAP_CONF_RSP:
3399 err = l2cap_config_rsp(conn, cmd, data);
3402 case L2CAP_DISCONN_REQ:
3403 err = l2cap_disconnect_req(conn, cmd, data);
3406 case L2CAP_DISCONN_RSP:
3407 err = l2cap_disconnect_rsp(conn, cmd, data);
3410 case L2CAP_ECHO_REQ:
3411 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3414 case L2CAP_ECHO_RSP:
3417 case L2CAP_INFO_REQ:
3418 err = l2cap_information_req(conn, cmd, data);
3421 case L2CAP_INFO_RSP:
3422 err = l2cap_information_rsp(conn, cmd, data);
3425 case L2CAP_CREATE_CHAN_REQ:
3426 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3429 case L2CAP_CREATE_CHAN_RSP:
3430 err = l2cap_create_channel_rsp(conn, cmd, data);
3433 case L2CAP_MOVE_CHAN_REQ:
3434 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3437 case L2CAP_MOVE_CHAN_RSP:
3438 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3441 case L2CAP_MOVE_CHAN_CFM:
3442 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3445 case L2CAP_MOVE_CHAN_CFM_RSP:
3446 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3450 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3458 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3459 struct l2cap_cmd_hdr *cmd, u8 *data)
3461 switch (cmd->code) {
3462 case L2CAP_COMMAND_REJ:
3465 case L2CAP_CONN_PARAM_UPDATE_REQ:
3466 return l2cap_conn_param_update_req(conn, cmd, data);
3468 case L2CAP_CONN_PARAM_UPDATE_RSP:
3472 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3477 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3478 struct sk_buff *skb)
3480 u8 *data = skb->data;
3482 struct l2cap_cmd_hdr cmd;
3485 l2cap_raw_recv(conn, skb);
3487 while (len >= L2CAP_CMD_HDR_SIZE) {
3489 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3490 data += L2CAP_CMD_HDR_SIZE;
3491 len -= L2CAP_CMD_HDR_SIZE;
3493 cmd_len = le16_to_cpu(cmd.len);
3495 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3497 if (cmd_len > len || !cmd.ident) {
3498 BT_DBG("corrupted command");
3502 if (conn->hcon->type == LE_LINK)
3503 err = l2cap_le_sig_cmd(conn, &cmd, data);
3505 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3508 struct l2cap_cmd_rej_unk rej;
3510 BT_ERR("Wrong link type (%d)", err);
3512 /* FIXME: Map err to a valid reason */
3513 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3514 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3524 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3526 u16 our_fcs, rcv_fcs;
3529 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3530 hdr_size = L2CAP_EXT_HDR_SIZE;
3532 hdr_size = L2CAP_ENH_HDR_SIZE;
3534 if (chan->fcs == L2CAP_FCS_CRC16) {
3535 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3536 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3537 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3539 if (our_fcs != rcv_fcs)
3545 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3549 chan->frames_sent = 0;
3551 control |= __set_reqseq(chan, chan->buffer_seq);
3553 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3554 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3555 l2cap_send_sframe(chan, control);
3556 set_bit(CONN_RNR_SENT, &chan->conn_state);
3559 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3560 l2cap_retransmit_frames(chan);
3562 l2cap_ertm_send(chan);
3564 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3565 chan->frames_sent == 0) {
3566 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3567 l2cap_send_sframe(chan, control);
3571 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3573 struct sk_buff *next_skb;
3574 int tx_seq_offset, next_tx_seq_offset;
3576 bt_cb(skb)->tx_seq = tx_seq;
3577 bt_cb(skb)->sar = sar;
3579 next_skb = skb_peek(&chan->srej_q);
3581 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3584 if (bt_cb(next_skb)->tx_seq == tx_seq)
3587 next_tx_seq_offset = __seq_offset(chan,
3588 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3590 if (next_tx_seq_offset > tx_seq_offset) {
3591 __skb_queue_before(&chan->srej_q, next_skb, skb);
3595 if (skb_queue_is_last(&chan->srej_q, next_skb))
3598 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3601 __skb_queue_tail(&chan->srej_q, skb);
3606 static void append_skb_frag(struct sk_buff *skb,
3607 struct sk_buff *new_frag, struct sk_buff **last_frag)
3609 /* skb->len reflects data in skb as well as all fragments
3610 * skb->data_len reflects only data in fragments
3612 if (!skb_has_frag_list(skb))
3613 skb_shinfo(skb)->frag_list = new_frag;
3615 new_frag->next = NULL;
3617 (*last_frag)->next = new_frag;
3618 *last_frag = new_frag;
3620 skb->len += new_frag->len;
3621 skb->data_len += new_frag->len;
3622 skb->truesize += new_frag->truesize;
3625 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3629 switch (__get_ctrl_sar(chan, control)) {
3630 case L2CAP_SAR_UNSEGMENTED:
3634 err = chan->ops->recv(chan->data, skb);
3637 case L2CAP_SAR_START:
3641 chan->sdu_len = get_unaligned_le16(skb->data);
3642 skb_pull(skb, L2CAP_SDULEN_SIZE);
3644 if (chan->sdu_len > chan->imtu) {
3649 if (skb->len >= chan->sdu_len)
3653 chan->sdu_last_frag = skb;
3659 case L2CAP_SAR_CONTINUE:
3663 append_skb_frag(chan->sdu, skb,
3664 &chan->sdu_last_frag);
3667 if (chan->sdu->len >= chan->sdu_len)
3677 append_skb_frag(chan->sdu, skb,
3678 &chan->sdu_last_frag);
3681 if (chan->sdu->len != chan->sdu_len)
3684 err = chan->ops->recv(chan->data, chan->sdu);
3687 /* Reassembly complete */
3689 chan->sdu_last_frag = NULL;
3697 kfree_skb(chan->sdu);
3699 chan->sdu_last_frag = NULL;
3706 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3708 BT_DBG("chan %p, Enter local busy", chan);
3710 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3712 __set_ack_timer(chan);
3715 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3719 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3722 control = __set_reqseq(chan, chan->buffer_seq);
3723 control |= __set_ctrl_poll(chan);
3724 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3725 l2cap_send_sframe(chan, control);
3726 chan->retry_count = 1;
3728 __clear_retrans_timer(chan);
3729 __set_monitor_timer(chan);
3731 set_bit(CONN_WAIT_F, &chan->conn_state);
3734 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3735 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3737 BT_DBG("chan %p, Exit local busy", chan);
3740 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3742 if (chan->mode == L2CAP_MODE_ERTM) {
3744 l2cap_ertm_enter_local_busy(chan);
3746 l2cap_ertm_exit_local_busy(chan);
3750 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3752 struct sk_buff *skb;
3755 while ((skb = skb_peek(&chan->srej_q)) &&
3756 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3759 if (bt_cb(skb)->tx_seq != tx_seq)
3762 skb = skb_dequeue(&chan->srej_q);
3763 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3764 err = l2cap_reassemble_sdu(chan, skb, control);
3767 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3771 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3772 tx_seq = __next_seq(chan, tx_seq);
3776 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3778 struct srej_list *l, *tmp;
3781 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3782 if (l->tx_seq == tx_seq) {
3787 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3788 control |= __set_reqseq(chan, l->tx_seq);
3789 l2cap_send_sframe(chan, control);
3791 list_add_tail(&l->list, &chan->srej_l);
3795 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3797 struct srej_list *new;
3800 while (tx_seq != chan->expected_tx_seq) {
3801 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3802 control |= __set_reqseq(chan, chan->expected_tx_seq);
3803 l2cap_send_sframe(chan, control);
3805 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3809 new->tx_seq = chan->expected_tx_seq;
3811 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3813 list_add_tail(&new->list, &chan->srej_l);
3816 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3821 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3823 u16 tx_seq = __get_txseq(chan, rx_control);
3824 u16 req_seq = __get_reqseq(chan, rx_control);
3825 u8 sar = __get_ctrl_sar(chan, rx_control);
3826 int tx_seq_offset, expected_tx_seq_offset;
3827 int num_to_ack = (chan->tx_win/6) + 1;
3830 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3831 tx_seq, rx_control);
3833 if (__is_ctrl_final(chan, rx_control) &&
3834 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3835 __clear_monitor_timer(chan);
3836 if (chan->unacked_frames > 0)
3837 __set_retrans_timer(chan);
3838 clear_bit(CONN_WAIT_F, &chan->conn_state);
3841 chan->expected_ack_seq = req_seq;
3842 l2cap_drop_acked_frames(chan);
3844 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3846 /* invalid tx_seq */
3847 if (tx_seq_offset >= chan->tx_win) {
3848 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3852 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3853 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3854 l2cap_send_ack(chan);
3858 if (tx_seq == chan->expected_tx_seq)
3861 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3862 struct srej_list *first;
3864 first = list_first_entry(&chan->srej_l,
3865 struct srej_list, list);
3866 if (tx_seq == first->tx_seq) {
3867 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3868 l2cap_check_srej_gap(chan, tx_seq);
3870 list_del(&first->list);
3873 if (list_empty(&chan->srej_l)) {
3874 chan->buffer_seq = chan->buffer_seq_srej;
3875 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3876 l2cap_send_ack(chan);
3877 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3880 struct srej_list *l;
3882 /* duplicated tx_seq */
3883 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3886 list_for_each_entry(l, &chan->srej_l, list) {
3887 if (l->tx_seq == tx_seq) {
3888 l2cap_resend_srejframe(chan, tx_seq);
3893 err = l2cap_send_srejframe(chan, tx_seq);
3895 l2cap_send_disconn_req(chan->conn, chan, -err);
3900 expected_tx_seq_offset = __seq_offset(chan,
3901 chan->expected_tx_seq, chan->buffer_seq);
3903 /* duplicated tx_seq */
3904 if (tx_seq_offset < expected_tx_seq_offset)
3907 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3909 BT_DBG("chan %p, Enter SREJ", chan);
3911 INIT_LIST_HEAD(&chan->srej_l);
3912 chan->buffer_seq_srej = chan->buffer_seq;
3914 __skb_queue_head_init(&chan->srej_q);
3915 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3917 /* Set P-bit only if there are some I-frames to ack. */
3918 if (__clear_ack_timer(chan))
3919 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3921 err = l2cap_send_srejframe(chan, tx_seq);
3923 l2cap_send_disconn_req(chan->conn, chan, -err);
3930 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3932 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3933 bt_cb(skb)->tx_seq = tx_seq;
3934 bt_cb(skb)->sar = sar;
3935 __skb_queue_tail(&chan->srej_q, skb);
3939 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3940 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3943 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3947 if (__is_ctrl_final(chan, rx_control)) {
3948 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3949 l2cap_retransmit_frames(chan);
3953 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3954 if (chan->num_acked == num_to_ack - 1)
3955 l2cap_send_ack(chan);
3957 __set_ack_timer(chan);
3966 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3968 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3969 __get_reqseq(chan, rx_control), rx_control);
3971 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3972 l2cap_drop_acked_frames(chan);
3974 if (__is_ctrl_poll(chan, rx_control)) {
3975 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3976 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3977 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3978 (chan->unacked_frames > 0))
3979 __set_retrans_timer(chan);
3981 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3982 l2cap_send_srejtail(chan);
3984 l2cap_send_i_or_rr_or_rnr(chan);
3987 } else if (__is_ctrl_final(chan, rx_control)) {
3988 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3990 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3991 l2cap_retransmit_frames(chan);
3994 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3995 (chan->unacked_frames > 0))
3996 __set_retrans_timer(chan);
3998 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3999 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4000 l2cap_send_ack(chan);
4002 l2cap_ertm_send(chan);
4006 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4008 u16 tx_seq = __get_reqseq(chan, rx_control);
4010 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4012 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4014 chan->expected_ack_seq = tx_seq;
4015 l2cap_drop_acked_frames(chan);
4017 if (__is_ctrl_final(chan, rx_control)) {
4018 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4019 l2cap_retransmit_frames(chan);
4021 l2cap_retransmit_frames(chan);
4023 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4024 set_bit(CONN_REJ_ACT, &chan->conn_state);
4027 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4029 u16 tx_seq = __get_reqseq(chan, rx_control);
4031 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4033 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4035 if (__is_ctrl_poll(chan, rx_control)) {
4036 chan->expected_ack_seq = tx_seq;
4037 l2cap_drop_acked_frames(chan);
4039 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4040 l2cap_retransmit_one_frame(chan, tx_seq);
4042 l2cap_ertm_send(chan);
4044 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4045 chan->srej_save_reqseq = tx_seq;
4046 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4048 } else if (__is_ctrl_final(chan, rx_control)) {
4049 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4050 chan->srej_save_reqseq == tx_seq)
4051 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4053 l2cap_retransmit_one_frame(chan, tx_seq);
4055 l2cap_retransmit_one_frame(chan, tx_seq);
4056 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4057 chan->srej_save_reqseq = tx_seq;
4058 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4063 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4065 u16 tx_seq = __get_reqseq(chan, rx_control);
4067 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4069 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4070 chan->expected_ack_seq = tx_seq;
4071 l2cap_drop_acked_frames(chan);
4073 if (__is_ctrl_poll(chan, rx_control))
4074 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4076 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4077 __clear_retrans_timer(chan);
4078 if (__is_ctrl_poll(chan, rx_control))
4079 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4083 if (__is_ctrl_poll(chan, rx_control)) {
4084 l2cap_send_srejtail(chan);
4086 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4087 l2cap_send_sframe(chan, rx_control);
4091 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4093 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4095 if (__is_ctrl_final(chan, rx_control) &&
4096 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4097 __clear_monitor_timer(chan);
4098 if (chan->unacked_frames > 0)
4099 __set_retrans_timer(chan);
4100 clear_bit(CONN_WAIT_F, &chan->conn_state);
4103 switch (__get_ctrl_super(chan, rx_control)) {
4104 case L2CAP_SUPER_RR:
4105 l2cap_data_channel_rrframe(chan, rx_control);
4108 case L2CAP_SUPER_REJ:
4109 l2cap_data_channel_rejframe(chan, rx_control);
4112 case L2CAP_SUPER_SREJ:
4113 l2cap_data_channel_srejframe(chan, rx_control);
4116 case L2CAP_SUPER_RNR:
4117 l2cap_data_channel_rnrframe(chan, rx_control);
4125 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4129 int len, next_tx_seq_offset, req_seq_offset;
4131 control = __get_control(chan, skb->data);
4132 skb_pull(skb, __ctrl_size(chan));
4136 * We can just drop the corrupted I-frame here.
4137 * Receiver will miss it and start proper recovery
4138 * procedures and ask retransmission.
4140 if (l2cap_check_fcs(chan, skb))
4143 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4144 len -= L2CAP_SDULEN_SIZE;
4146 if (chan->fcs == L2CAP_FCS_CRC16)
4147 len -= L2CAP_FCS_SIZE;
4149 if (len > chan->mps) {
4150 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4154 req_seq = __get_reqseq(chan, control);
4156 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4158 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4159 chan->expected_ack_seq);
4161 /* check for invalid req-seq */
4162 if (req_seq_offset > next_tx_seq_offset) {
4163 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4167 if (!__is_sframe(chan, control)) {
4169 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4173 l2cap_data_channel_iframe(chan, control, skb);
4177 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4181 l2cap_data_channel_sframe(chan, control, skb);
4191 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4193 struct l2cap_chan *chan;
4194 struct sock *sk = NULL;
4199 chan = l2cap_get_chan_by_scid(conn, cid);
4201 BT_DBG("unknown cid 0x%4.4x", cid);
4207 BT_DBG("chan %p, len %d", chan, skb->len);
4209 if (chan->state != BT_CONNECTED)
4212 switch (chan->mode) {
4213 case L2CAP_MODE_BASIC:
4214 /* If socket recv buffers overflows we drop data here
4215 * which is *bad* because L2CAP has to be reliable.
4216 * But we don't have any other choice. L2CAP doesn't
4217 * provide flow control mechanism. */
4219 if (chan->imtu < skb->len)
4222 if (!chan->ops->recv(chan->data, skb))
4226 case L2CAP_MODE_ERTM:
4227 l2cap_ertm_data_rcv(chan, skb);
4231 case L2CAP_MODE_STREAMING:
4232 control = __get_control(chan, skb->data);
4233 skb_pull(skb, __ctrl_size(chan));
4236 if (l2cap_check_fcs(chan, skb))
4239 if (__is_sar_start(chan, control))
4240 len -= L2CAP_SDULEN_SIZE;
4242 if (chan->fcs == L2CAP_FCS_CRC16)
4243 len -= L2CAP_FCS_SIZE;
4245 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4248 tx_seq = __get_txseq(chan, control);
4250 if (chan->expected_tx_seq != tx_seq) {
4251 /* Frame(s) missing - must discard partial SDU */
4252 kfree_skb(chan->sdu);
4254 chan->sdu_last_frag = NULL;
4257 /* TODO: Notify userland of missing data */
4260 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4262 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4263 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4268 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4282 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4284 struct sock *sk = NULL;
4285 struct l2cap_chan *chan;
4287 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4295 BT_DBG("sk %p, len %d", sk, skb->len);
4297 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4300 if (chan->imtu < skb->len)
4303 if (!chan->ops->recv(chan->data, skb))
4315 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4317 struct sock *sk = NULL;
4318 struct l2cap_chan *chan;
4320 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4328 BT_DBG("sk %p, len %d", sk, skb->len);
4330 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4333 if (chan->imtu < skb->len)
4336 if (!chan->ops->recv(chan->data, skb))
4348 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4350 struct l2cap_hdr *lh = (void *) skb->data;
4354 skb_pull(skb, L2CAP_HDR_SIZE);
4355 cid = __le16_to_cpu(lh->cid);
4356 len = __le16_to_cpu(lh->len);
4358 if (len != skb->len) {
4363 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4366 case L2CAP_CID_LE_SIGNALING:
4367 case L2CAP_CID_SIGNALING:
4368 l2cap_sig_channel(conn, skb);
4371 case L2CAP_CID_CONN_LESS:
4372 psm = get_unaligned_le16(skb->data);
4374 l2cap_conless_channel(conn, psm, skb);
4377 case L2CAP_CID_LE_DATA:
4378 l2cap_att_channel(conn, cid, skb);
4382 if (smp_sig_channel(conn, skb))
4383 l2cap_conn_del(conn->hcon, EACCES);
4387 l2cap_data_channel(conn, cid, skb);
4392 /* ---- L2CAP interface with lower layer (HCI) ---- */
4394 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4396 int exact = 0, lm1 = 0, lm2 = 0;
4397 struct l2cap_chan *c;
4399 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4401 /* Find listening sockets and check their link_mode */
4402 read_lock(&chan_list_lock);
4403 list_for_each_entry(c, &chan_list, global_l) {
4404 struct sock *sk = c->sk;
4406 if (c->state != BT_LISTEN)
4409 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4410 lm1 |= HCI_LM_ACCEPT;
4411 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4412 lm1 |= HCI_LM_MASTER;
4414 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4415 lm2 |= HCI_LM_ACCEPT;
4416 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4417 lm2 |= HCI_LM_MASTER;
4420 read_unlock(&chan_list_lock);
4422 return exact ? lm1 : lm2;
4425 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4427 struct l2cap_conn *conn;
4429 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4432 conn = l2cap_conn_add(hcon, status);
4434 l2cap_conn_ready(conn);
4436 l2cap_conn_del(hcon, bt_to_errno(status));
4441 int l2cap_disconn_ind(struct hci_conn *hcon)
4443 struct l2cap_conn *conn = hcon->l2cap_data;
4445 BT_DBG("hcon %p", hcon);
4448 return HCI_ERROR_REMOTE_USER_TERM;
4449 return conn->disc_reason;
4452 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4454 BT_DBG("hcon %p reason %d", hcon, reason);
4456 l2cap_conn_del(hcon, bt_to_errno(reason));
4460 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4462 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4465 if (encrypt == 0x00) {
4466 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4467 __clear_chan_timer(chan);
4468 __set_chan_timer(chan,
4469 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4470 } else if (chan->sec_level == BT_SECURITY_HIGH)
4471 l2cap_chan_close(chan, ECONNREFUSED);
4473 if (chan->sec_level == BT_SECURITY_MEDIUM)
4474 __clear_chan_timer(chan);
4478 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4480 struct l2cap_conn *conn = hcon->l2cap_data;
4481 struct l2cap_chan *chan;
4486 BT_DBG("conn %p", conn);
4488 if (hcon->type == LE_LINK) {
4489 smp_distribute_keys(conn, 0);
4490 cancel_delayed_work(&conn->security_timer);
4495 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
4496 struct sock *sk = chan->sk;
4500 BT_DBG("chan->scid %d", chan->scid);
4502 if (chan->scid == L2CAP_CID_LE_DATA) {
4503 if (!status && encrypt) {
4504 chan->sec_level = hcon->sec_level;
4505 l2cap_chan_ready(chan);
4512 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4517 if (!status && (chan->state == BT_CONNECTED ||
4518 chan->state == BT_CONFIG)) {
4519 l2cap_check_encryption(chan, encrypt);
4524 if (chan->state == BT_CONNECT) {
4526 struct l2cap_conn_req req;
4527 req.scid = cpu_to_le16(chan->scid);
4528 req.psm = chan->psm;
4530 chan->ident = l2cap_get_ident(conn);
4531 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4533 l2cap_send_cmd(conn, chan->ident,
4534 L2CAP_CONN_REQ, sizeof(req), &req);
4536 __clear_chan_timer(chan);
4537 __set_chan_timer(chan,
4538 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4540 } else if (chan->state == BT_CONNECT2) {
4541 struct l2cap_conn_rsp rsp;
4545 if (bt_sk(sk)->defer_setup) {
4546 struct sock *parent = bt_sk(sk)->parent;
4547 res = L2CAP_CR_PEND;
4548 stat = L2CAP_CS_AUTHOR_PEND;
4550 parent->sk_data_ready(parent, 0);
4552 l2cap_state_change(chan, BT_CONFIG);
4553 res = L2CAP_CR_SUCCESS;
4554 stat = L2CAP_CS_NO_INFO;
4557 l2cap_state_change(chan, BT_DISCONN);
4558 __set_chan_timer(chan,
4559 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4560 res = L2CAP_CR_SEC_BLOCK;
4561 stat = L2CAP_CS_NO_INFO;
4564 rsp.scid = cpu_to_le16(chan->dcid);
4565 rsp.dcid = cpu_to_le16(chan->scid);
4566 rsp.result = cpu_to_le16(res);
4567 rsp.status = cpu_to_le16(stat);
4568 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4580 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4582 struct l2cap_conn *conn = hcon->l2cap_data;
4585 conn = l2cap_conn_add(hcon, 0);
4590 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4592 if (!(flags & ACL_CONT)) {
4593 struct l2cap_hdr *hdr;
4594 struct l2cap_chan *chan;
4599 BT_ERR("Unexpected start frame (len %d)", skb->len);
4600 kfree_skb(conn->rx_skb);
4601 conn->rx_skb = NULL;
4603 l2cap_conn_unreliable(conn, ECOMM);
4606 /* Start fragment always begin with Basic L2CAP header */
4607 if (skb->len < L2CAP_HDR_SIZE) {
4608 BT_ERR("Frame is too short (len %d)", skb->len);
4609 l2cap_conn_unreliable(conn, ECOMM);
4613 hdr = (struct l2cap_hdr *) skb->data;
4614 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4615 cid = __le16_to_cpu(hdr->cid);
4617 if (len == skb->len) {
4618 /* Complete frame received */
4619 l2cap_recv_frame(conn, skb);
4623 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4625 if (skb->len > len) {
4626 BT_ERR("Frame is too long (len %d, expected len %d)",
4628 l2cap_conn_unreliable(conn, ECOMM);
4632 chan = l2cap_get_chan_by_scid(conn, cid);
4634 if (chan && chan->sk) {
4635 struct sock *sk = chan->sk;
4637 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4638 BT_ERR("Frame exceeding recv MTU (len %d, "
4642 l2cap_conn_unreliable(conn, ECOMM);
4648 /* Allocate skb for the complete frame (with header) */
4649 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4653 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4655 conn->rx_len = len - skb->len;
4657 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4659 if (!conn->rx_len) {
4660 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4661 l2cap_conn_unreliable(conn, ECOMM);
4665 if (skb->len > conn->rx_len) {
4666 BT_ERR("Fragment is too long (len %d, expected %d)",
4667 skb->len, conn->rx_len);
4668 kfree_skb(conn->rx_skb);
4669 conn->rx_skb = NULL;
4671 l2cap_conn_unreliable(conn, ECOMM);
4675 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4677 conn->rx_len -= skb->len;
4679 if (!conn->rx_len) {
4680 /* Complete frame received */
4681 l2cap_recv_frame(conn, conn->rx_skb);
4682 conn->rx_skb = NULL;
4691 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4693 struct l2cap_chan *c;
4695 read_lock(&chan_list_lock);
4697 list_for_each_entry(c, &chan_list, global_l) {
4698 struct sock *sk = c->sk;
4700 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4701 batostr(&bt_sk(sk)->src),
4702 batostr(&bt_sk(sk)->dst),
4703 c->state, __le16_to_cpu(c->psm),
4704 c->scid, c->dcid, c->imtu, c->omtu,
4705 c->sec_level, c->mode);
4708 read_unlock(&chan_list_lock);
4713 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4715 return single_open(file, l2cap_debugfs_show, inode->i_private);
4718 static const struct file_operations l2cap_debugfs_fops = {
4719 .open = l2cap_debugfs_open,
4721 .llseek = seq_lseek,
4722 .release = single_release,
4725 static struct dentry *l2cap_debugfs;
4727 int __init l2cap_init(void)
4731 err = l2cap_init_sockets();
4736 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4737 bt_debugfs, NULL, &l2cap_debugfs_fops);
4739 BT_ERR("Failed to create L2CAP debug file");
4745 void l2cap_exit(void)
4747 debugfs_remove(l2cap_debugfs);
4748 l2cap_cleanup_sockets();
4751 module_param(disable_ertm, bool, 0644);
4752 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");