2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 list_for_each_entry(c, &conn->chan_l, list) {
89 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked socket */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 struct l2cap_chan *c;
106 mutex_lock(&conn->chan_lock);
107 c = __l2cap_get_chan_by_scid(conn, cid);
108 mutex_unlock(&conn->chan_lock);
113 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
115 struct l2cap_chan *c;
117 list_for_each_entry(c, &conn->chan_l, list) {
118 if (c->ident == ident)
124 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
126 struct l2cap_chan *c;
128 mutex_lock(&conn->chan_lock);
129 c = __l2cap_get_chan_by_ident(conn, ident);
130 mutex_unlock(&conn->chan_lock);
135 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
137 struct l2cap_chan *c;
139 list_for_each_entry(c, &chan_list, global_l) {
140 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
146 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
150 write_lock(&chan_list_lock);
152 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
165 for (p = 0x1001; p < 0x1100; p += 2)
166 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
167 chan->psm = cpu_to_le16(p);
168 chan->sport = cpu_to_le16(p);
175 write_unlock(&chan_list_lock);
179 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
181 write_lock(&chan_list_lock);
185 write_unlock(&chan_list_lock);
190 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
192 u16 cid = L2CAP_CID_DYN_START;
194 for (; cid < L2CAP_CID_DYN_END; cid++) {
195 if (!__l2cap_get_chan_by_scid(conn, cid))
202 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
204 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
205 state_to_string(state));
208 chan->ops->state_change(chan->data, state);
211 static void l2cap_state_change(struct l2cap_chan *chan, int state)
213 struct sock *sk = chan->sk;
216 __l2cap_state_change(chan, state);
220 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
222 struct sock *sk = chan->sk;
227 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
229 struct sock *sk = chan->sk;
232 __l2cap_chan_set_err(chan, err);
236 static void l2cap_chan_timeout(struct work_struct *work)
238 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
240 struct l2cap_conn *conn = chan->conn;
241 struct sock *sk = chan->sk;
244 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
246 mutex_lock(&conn->chan_lock);
249 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
250 reason = ECONNREFUSED;
251 else if (chan->state == BT_CONNECT &&
252 chan->sec_level != BT_SECURITY_SDP)
253 reason = ECONNREFUSED;
257 l2cap_chan_close(chan, reason);
261 chan->ops->close(chan->data);
262 mutex_unlock(&conn->chan_lock);
264 l2cap_chan_put(chan);
267 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
269 struct l2cap_chan *chan;
271 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
275 mutex_init(&chan->lock);
279 write_lock(&chan_list_lock);
280 list_add(&chan->global_l, &chan_list);
281 write_unlock(&chan_list_lock);
283 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
285 chan->state = BT_OPEN;
287 atomic_set(&chan->refcnt, 1);
289 BT_DBG("sk %p chan %p", sk, chan);
294 void l2cap_chan_destroy(struct l2cap_chan *chan)
296 write_lock(&chan_list_lock);
297 list_del(&chan->global_l);
298 write_unlock(&chan_list_lock);
300 l2cap_chan_put(chan);
303 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
305 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
306 chan->psm, chan->dcid);
308 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
312 switch (chan->chan_type) {
313 case L2CAP_CHAN_CONN_ORIENTED:
314 if (conn->hcon->type == LE_LINK) {
316 chan->omtu = L2CAP_LE_DEFAULT_MTU;
317 chan->scid = L2CAP_CID_LE_DATA;
318 chan->dcid = L2CAP_CID_LE_DATA;
320 /* Alloc CID for connection-oriented socket */
321 chan->scid = l2cap_alloc_cid(conn);
322 chan->omtu = L2CAP_DEFAULT_MTU;
326 case L2CAP_CHAN_CONN_LESS:
327 /* Connectionless socket */
328 chan->scid = L2CAP_CID_CONN_LESS;
329 chan->dcid = L2CAP_CID_CONN_LESS;
330 chan->omtu = L2CAP_DEFAULT_MTU;
334 /* Raw socket can send/recv signalling messages only */
335 chan->scid = L2CAP_CID_SIGNALING;
336 chan->dcid = L2CAP_CID_SIGNALING;
337 chan->omtu = L2CAP_DEFAULT_MTU;
340 chan->local_id = L2CAP_BESTEFFORT_ID;
341 chan->local_stype = L2CAP_SERV_BESTEFFORT;
342 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
343 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
344 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
345 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
347 l2cap_chan_hold(chan);
349 list_add(&chan->list, &conn->chan_l);
352 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
354 mutex_lock(&conn->chan_lock);
355 __l2cap_chan_add(conn, chan);
356 mutex_unlock(&conn->chan_lock);
360 * Must be called on the locked socket. */
361 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
363 struct sock *sk = chan->sk;
364 struct l2cap_conn *conn = chan->conn;
365 struct sock *parent = bt_sk(sk)->parent;
367 __clear_chan_timer(chan);
369 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
372 /* Delete from channel list */
373 list_del(&chan->list);
375 l2cap_chan_put(chan);
378 hci_conn_put(conn->hcon);
381 __l2cap_state_change(chan, BT_CLOSED);
382 sock_set_flag(sk, SOCK_ZAPPED);
385 __l2cap_chan_set_err(chan, err);
388 bt_accept_unlink(sk);
389 parent->sk_data_ready(parent, 0);
391 sk->sk_state_change(sk);
393 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
394 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
397 skb_queue_purge(&chan->tx_q);
399 if (chan->mode == L2CAP_MODE_ERTM) {
400 struct srej_list *l, *tmp;
402 __clear_retrans_timer(chan);
403 __clear_monitor_timer(chan);
404 __clear_ack_timer(chan);
406 skb_queue_purge(&chan->srej_q);
408 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
415 static void l2cap_chan_cleanup_listen(struct sock *parent)
419 BT_DBG("parent %p", parent);
421 /* Close not yet accepted channels */
422 while ((sk = bt_accept_dequeue(parent, NULL))) {
423 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
425 __clear_chan_timer(chan);
427 l2cap_chan_close(chan, ECONNRESET);
430 chan->ops->close(chan->data);
434 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
436 struct l2cap_conn *conn = chan->conn;
437 struct sock *sk = chan->sk;
439 BT_DBG("chan %p state %s sk %p", chan,
440 state_to_string(chan->state), sk);
442 switch (chan->state) {
444 l2cap_chan_cleanup_listen(sk);
446 __l2cap_state_change(chan, BT_CLOSED);
447 sock_set_flag(sk, SOCK_ZAPPED);
452 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
453 conn->hcon->type == ACL_LINK) {
454 __clear_chan_timer(chan);
455 __set_chan_timer(chan, sk->sk_sndtimeo);
456 l2cap_send_disconn_req(conn, chan, reason);
458 l2cap_chan_del(chan, reason);
462 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
463 conn->hcon->type == ACL_LINK) {
464 struct l2cap_conn_rsp rsp;
467 if (bt_sk(sk)->defer_setup)
468 result = L2CAP_CR_SEC_BLOCK;
470 result = L2CAP_CR_BAD_PSM;
471 l2cap_state_change(chan, BT_DISCONN);
473 rsp.scid = cpu_to_le16(chan->dcid);
474 rsp.dcid = cpu_to_le16(chan->scid);
475 rsp.result = cpu_to_le16(result);
476 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
477 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
481 l2cap_chan_del(chan, reason);
486 l2cap_chan_del(chan, reason);
490 sock_set_flag(sk, SOCK_ZAPPED);
495 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
497 if (chan->chan_type == L2CAP_CHAN_RAW) {
498 switch (chan->sec_level) {
499 case BT_SECURITY_HIGH:
500 return HCI_AT_DEDICATED_BONDING_MITM;
501 case BT_SECURITY_MEDIUM:
502 return HCI_AT_DEDICATED_BONDING;
504 return HCI_AT_NO_BONDING;
506 } else if (chan->psm == cpu_to_le16(0x0001)) {
507 if (chan->sec_level == BT_SECURITY_LOW)
508 chan->sec_level = BT_SECURITY_SDP;
510 if (chan->sec_level == BT_SECURITY_HIGH)
511 return HCI_AT_NO_BONDING_MITM;
513 return HCI_AT_NO_BONDING;
515 switch (chan->sec_level) {
516 case BT_SECURITY_HIGH:
517 return HCI_AT_GENERAL_BONDING_MITM;
518 case BT_SECURITY_MEDIUM:
519 return HCI_AT_GENERAL_BONDING;
521 return HCI_AT_NO_BONDING;
526 /* Service level security */
527 int l2cap_chan_check_security(struct l2cap_chan *chan)
529 struct l2cap_conn *conn = chan->conn;
532 auth_type = l2cap_get_auth_type(chan);
534 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
537 static u8 l2cap_get_ident(struct l2cap_conn *conn)
541 /* Get next available identificator.
542 * 1 - 128 are used by kernel.
543 * 129 - 199 are reserved.
544 * 200 - 254 are used by utilities like l2ping, etc.
547 spin_lock(&conn->lock);
549 if (++conn->tx_ident > 128)
554 spin_unlock(&conn->lock);
559 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
561 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
564 BT_DBG("code 0x%2.2x", code);
569 if (lmp_no_flush_capable(conn->hcon->hdev))
570 flags = ACL_START_NO_FLUSH;
574 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
575 skb->priority = HCI_PRIO_MAX;
577 hci_send_acl(conn->hchan, skb, flags);
580 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
582 struct hci_conn *hcon = chan->conn->hcon;
585 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
588 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
589 lmp_no_flush_capable(hcon->hdev))
590 flags = ACL_START_NO_FLUSH;
594 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
595 hci_send_acl(chan->conn->hchan, skb, flags);
598 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
601 struct l2cap_hdr *lh;
602 struct l2cap_conn *conn = chan->conn;
605 if (chan->state != BT_CONNECTED)
608 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
609 hlen = L2CAP_EXT_HDR_SIZE;
611 hlen = L2CAP_ENH_HDR_SIZE;
613 if (chan->fcs == L2CAP_FCS_CRC16)
614 hlen += L2CAP_FCS_SIZE;
616 BT_DBG("chan %p, control 0x%8.8x", chan, control);
618 count = min_t(unsigned int, conn->mtu, hlen);
620 control |= __set_sframe(chan);
622 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
623 control |= __set_ctrl_final(chan);
625 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
626 control |= __set_ctrl_poll(chan);
628 skb = bt_skb_alloc(count, GFP_ATOMIC);
632 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
633 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
634 lh->cid = cpu_to_le16(chan->dcid);
636 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
638 if (chan->fcs == L2CAP_FCS_CRC16) {
639 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
640 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
643 skb->priority = HCI_PRIO_MAX;
644 l2cap_do_send(chan, skb);
647 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
649 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
650 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
651 set_bit(CONN_RNR_SENT, &chan->conn_state);
653 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
655 control |= __set_reqseq(chan, chan->buffer_seq);
657 l2cap_send_sframe(chan, control);
660 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
662 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
665 static void l2cap_do_start(struct l2cap_chan *chan)
667 struct l2cap_conn *conn = chan->conn;
669 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
670 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
673 if (l2cap_chan_check_security(chan) &&
674 __l2cap_no_conn_pending(chan)) {
675 struct l2cap_conn_req req;
676 req.scid = cpu_to_le16(chan->scid);
679 chan->ident = l2cap_get_ident(conn);
680 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
682 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
686 struct l2cap_info_req req;
687 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
689 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
690 conn->info_ident = l2cap_get_ident(conn);
692 schedule_delayed_work(&conn->info_timer,
693 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
695 l2cap_send_cmd(conn, conn->info_ident,
696 L2CAP_INFO_REQ, sizeof(req), &req);
700 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
702 u32 local_feat_mask = l2cap_feat_mask;
704 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
707 case L2CAP_MODE_ERTM:
708 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
709 case L2CAP_MODE_STREAMING:
710 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
716 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
718 struct l2cap_disconn_req req;
723 if (chan->mode == L2CAP_MODE_ERTM) {
724 __clear_retrans_timer(chan);
725 __clear_monitor_timer(chan);
726 __clear_ack_timer(chan);
729 req.dcid = cpu_to_le16(chan->dcid);
730 req.scid = cpu_to_le16(chan->scid);
731 l2cap_send_cmd(conn, l2cap_get_ident(conn),
732 L2CAP_DISCONN_REQ, sizeof(req), &req);
734 __l2cap_state_change(chan, BT_DISCONN);
735 __l2cap_chan_set_err(chan, err);
738 /* ---- L2CAP connections ---- */
739 static void l2cap_conn_start(struct l2cap_conn *conn)
741 struct l2cap_chan *chan, *tmp;
743 BT_DBG("conn %p", conn);
745 mutex_lock(&conn->chan_lock);
747 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
748 struct sock *sk = chan->sk;
752 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
757 if (chan->state == BT_CONNECT) {
758 struct l2cap_conn_req req;
760 if (!l2cap_chan_check_security(chan) ||
761 !__l2cap_no_conn_pending(chan)) {
766 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
767 && test_bit(CONF_STATE2_DEVICE,
768 &chan->conf_state)) {
769 /* l2cap_chan_close() calls list_del(chan)
770 * so release the lock */
771 l2cap_chan_close(chan, ECONNRESET);
776 req.scid = cpu_to_le16(chan->scid);
779 chan->ident = l2cap_get_ident(conn);
780 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
782 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
785 } else if (chan->state == BT_CONNECT2) {
786 struct l2cap_conn_rsp rsp;
788 rsp.scid = cpu_to_le16(chan->dcid);
789 rsp.dcid = cpu_to_le16(chan->scid);
791 if (l2cap_chan_check_security(chan)) {
792 if (bt_sk(sk)->defer_setup) {
793 struct sock *parent = bt_sk(sk)->parent;
794 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
795 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
797 parent->sk_data_ready(parent, 0);
800 __l2cap_state_change(chan, BT_CONFIG);
801 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
802 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
805 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
806 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
809 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
812 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
813 rsp.result != L2CAP_CR_SUCCESS) {
818 set_bit(CONF_REQ_SENT, &chan->conf_state);
819 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
820 l2cap_build_conf_req(chan, buf), buf);
821 chan->num_conf_req++;
827 mutex_unlock(&conn->chan_lock);
830 /* Find socket with cid and source bdaddr.
831 * Returns closest match, locked.
833 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
835 struct l2cap_chan *c, *c1 = NULL;
837 read_lock(&chan_list_lock);
839 list_for_each_entry(c, &chan_list, global_l) {
840 struct sock *sk = c->sk;
842 if (state && c->state != state)
845 if (c->scid == cid) {
847 if (!bacmp(&bt_sk(sk)->src, src)) {
848 read_unlock(&chan_list_lock);
853 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
858 read_unlock(&chan_list_lock);
863 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
865 struct sock *parent, *sk;
866 struct l2cap_chan *chan, *pchan;
870 /* Check if we have socket listening on cid */
871 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
880 /* Check for backlog size */
881 if (sk_acceptq_is_full(parent)) {
882 BT_DBG("backlog full %d", parent->sk_ack_backlog);
886 chan = pchan->ops->new_connection(pchan->data);
892 hci_conn_hold(conn->hcon);
894 bacpy(&bt_sk(sk)->src, conn->src);
895 bacpy(&bt_sk(sk)->dst, conn->dst);
897 bt_accept_enqueue(parent, sk);
899 l2cap_chan_add(conn, chan);
901 __set_chan_timer(chan, sk->sk_sndtimeo);
903 __l2cap_state_change(chan, BT_CONNECTED);
904 parent->sk_data_ready(parent, 0);
907 release_sock(parent);
910 static void l2cap_chan_ready(struct l2cap_chan *chan)
912 struct sock *sk = chan->sk;
913 struct sock *parent = bt_sk(sk)->parent;
915 BT_DBG("sk %p, parent %p", sk, parent);
917 chan->conf_state = 0;
918 __clear_chan_timer(chan);
920 __l2cap_state_change(chan, BT_CONNECTED);
921 sk->sk_state_change(sk);
924 parent->sk_data_ready(parent, 0);
927 static void l2cap_conn_ready(struct l2cap_conn *conn)
929 struct l2cap_chan *chan;
931 BT_DBG("conn %p", conn);
933 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
934 l2cap_le_conn_ready(conn);
936 if (conn->hcon->out && conn->hcon->type == LE_LINK)
937 smp_conn_security(conn, conn->hcon->pending_sec_level);
939 mutex_lock(&conn->chan_lock);
941 list_for_each_entry(chan, &conn->chan_l, list) {
942 struct sock *sk = chan->sk;
946 if (conn->hcon->type == LE_LINK) {
947 if (smp_conn_security(conn, chan->sec_level))
948 l2cap_chan_ready(chan);
950 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
951 __clear_chan_timer(chan);
952 __l2cap_state_change(chan, BT_CONNECTED);
953 sk->sk_state_change(sk);
955 } else if (chan->state == BT_CONNECT)
956 l2cap_do_start(chan);
961 mutex_unlock(&conn->chan_lock);
964 /* Notify sockets that we cannot guaranty reliability anymore */
965 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
967 struct l2cap_chan *chan;
969 BT_DBG("conn %p", conn);
971 mutex_lock(&conn->chan_lock);
973 list_for_each_entry(chan, &conn->chan_l, list) {
974 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
975 __l2cap_chan_set_err(chan, err);
978 mutex_unlock(&conn->chan_lock);
981 static void l2cap_info_timeout(struct work_struct *work)
983 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
986 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
987 conn->info_ident = 0;
989 l2cap_conn_start(conn);
992 static void l2cap_conn_del(struct hci_conn *hcon, int err)
994 struct l2cap_conn *conn = hcon->l2cap_data;
995 struct l2cap_chan *chan, *l;
1001 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1003 kfree_skb(conn->rx_skb);
1005 mutex_lock(&conn->chan_lock);
1008 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1011 l2cap_chan_del(chan, err);
1013 chan->ops->close(chan->data);
1016 mutex_unlock(&conn->chan_lock);
1018 hci_chan_del(conn->hchan);
1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1021 cancel_delayed_work_sync(&conn->info_timer);
1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1024 cancel_delayed_work_sync(&conn->security_timer);
1025 smp_chan_destroy(conn);
1028 hcon->l2cap_data = NULL;
1032 static void security_timeout(struct work_struct *work)
1034 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1035 security_timer.work);
1037 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1040 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1042 struct l2cap_conn *conn = hcon->l2cap_data;
1043 struct hci_chan *hchan;
1048 hchan = hci_chan_create(hcon);
1052 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1054 hci_chan_del(hchan);
1058 hcon->l2cap_data = conn;
1060 conn->hchan = hchan;
1062 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1064 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1065 conn->mtu = hcon->hdev->le_mtu;
1067 conn->mtu = hcon->hdev->acl_mtu;
1069 conn->src = &hcon->hdev->bdaddr;
1070 conn->dst = &hcon->dst;
1072 conn->feat_mask = 0;
1074 spin_lock_init(&conn->lock);
1075 mutex_init(&conn->chan_lock);
1077 INIT_LIST_HEAD(&conn->chan_l);
1079 if (hcon->type == LE_LINK)
1080 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1082 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1084 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1089 /* ---- Socket interface ---- */
1091 /* Find socket with psm and source bdaddr.
1092 * Returns closest match.
1094 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1096 struct l2cap_chan *c, *c1 = NULL;
1098 read_lock(&chan_list_lock);
1100 list_for_each_entry(c, &chan_list, global_l) {
1101 struct sock *sk = c->sk;
1103 if (state && c->state != state)
1106 if (c->psm == psm) {
1108 if (!bacmp(&bt_sk(sk)->src, src)) {
1109 read_unlock(&chan_list_lock);
1114 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1119 read_unlock(&chan_list_lock);
1124 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1126 struct sock *sk = chan->sk;
1127 bdaddr_t *src = &bt_sk(sk)->src;
1128 struct l2cap_conn *conn;
1129 struct hci_conn *hcon;
1130 struct hci_dev *hdev;
1134 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1137 hdev = hci_get_route(dst, src);
1139 return -EHOSTUNREACH;
1145 /* PSM must be odd and lsb of upper byte must be 0 */
1146 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1147 chan->chan_type != L2CAP_CHAN_RAW) {
1152 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1157 switch (chan->mode) {
1158 case L2CAP_MODE_BASIC:
1160 case L2CAP_MODE_ERTM:
1161 case L2CAP_MODE_STREAMING:
1170 switch (sk->sk_state) {
1174 /* Already connecting */
1179 /* Already connected */
1193 /* Set destination address and psm */
1194 bacpy(&bt_sk(sk)->dst, dst);
1198 auth_type = l2cap_get_auth_type(chan);
1200 if (chan->dcid == L2CAP_CID_LE_DATA)
1201 hcon = hci_connect(hdev, LE_LINK, dst,
1202 chan->sec_level, auth_type);
1204 hcon = hci_connect(hdev, ACL_LINK, dst,
1205 chan->sec_level, auth_type);
1208 err = PTR_ERR(hcon);
1212 conn = l2cap_conn_add(hcon, 0);
1219 /* Update source addr of the socket */
1220 bacpy(src, conn->src);
1222 l2cap_chan_add(conn, chan);
1224 __l2cap_state_change(chan, BT_CONNECT);
1225 __set_chan_timer(chan, sk->sk_sndtimeo);
1227 if (hcon->state == BT_CONNECTED) {
1228 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1229 __clear_chan_timer(chan);
1230 if (l2cap_chan_check_security(chan))
1231 __l2cap_state_change(chan, BT_CONNECTED);
1233 l2cap_do_start(chan);
1239 hci_dev_unlock(hdev);
1244 int __l2cap_wait_ack(struct sock *sk)
1246 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1247 DECLARE_WAITQUEUE(wait, current);
1251 add_wait_queue(sk_sleep(sk), &wait);
1252 set_current_state(TASK_INTERRUPTIBLE);
1253 while (chan->unacked_frames > 0 && chan->conn) {
1257 if (signal_pending(current)) {
1258 err = sock_intr_errno(timeo);
1263 timeo = schedule_timeout(timeo);
1265 set_current_state(TASK_INTERRUPTIBLE);
1267 err = sock_error(sk);
1271 set_current_state(TASK_RUNNING);
1272 remove_wait_queue(sk_sleep(sk), &wait);
1276 static void l2cap_monitor_timeout(struct work_struct *work)
1278 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1279 monitor_timer.work);
1280 struct sock *sk = chan->sk;
1282 BT_DBG("chan %p", chan);
1285 if (chan->retry_count >= chan->remote_max_tx) {
1286 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1291 chan->retry_count++;
1292 __set_monitor_timer(chan);
1294 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1298 static void l2cap_retrans_timeout(struct work_struct *work)
1300 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1301 retrans_timer.work);
1302 struct sock *sk = chan->sk;
1304 BT_DBG("chan %p", chan);
1307 chan->retry_count = 1;
1308 __set_monitor_timer(chan);
1310 set_bit(CONN_WAIT_F, &chan->conn_state);
1312 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1316 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1318 struct sk_buff *skb;
1320 while ((skb = skb_peek(&chan->tx_q)) &&
1321 chan->unacked_frames) {
1322 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1325 skb = skb_dequeue(&chan->tx_q);
1328 chan->unacked_frames--;
1331 if (!chan->unacked_frames)
1332 __clear_retrans_timer(chan);
1335 static void l2cap_streaming_send(struct l2cap_chan *chan)
1337 struct sk_buff *skb;
1341 while ((skb = skb_dequeue(&chan->tx_q))) {
1342 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1343 control |= __set_txseq(chan, chan->next_tx_seq);
1344 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1346 if (chan->fcs == L2CAP_FCS_CRC16) {
1347 fcs = crc16(0, (u8 *)skb->data,
1348 skb->len - L2CAP_FCS_SIZE);
1349 put_unaligned_le16(fcs,
1350 skb->data + skb->len - L2CAP_FCS_SIZE);
1353 l2cap_do_send(chan, skb);
1355 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1359 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1361 struct sk_buff *skb, *tx_skb;
1365 skb = skb_peek(&chan->tx_q);
1369 while (bt_cb(skb)->tx_seq != tx_seq) {
1370 if (skb_queue_is_last(&chan->tx_q, skb))
1373 skb = skb_queue_next(&chan->tx_q, skb);
1376 if (chan->remote_max_tx &&
1377 bt_cb(skb)->retries == chan->remote_max_tx) {
1378 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1382 tx_skb = skb_clone(skb, GFP_ATOMIC);
1383 bt_cb(skb)->retries++;
1385 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1386 control &= __get_sar_mask(chan);
1388 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1389 control |= __set_ctrl_final(chan);
1391 control |= __set_reqseq(chan, chan->buffer_seq);
1392 control |= __set_txseq(chan, tx_seq);
1394 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1396 if (chan->fcs == L2CAP_FCS_CRC16) {
1397 fcs = crc16(0, (u8 *)tx_skb->data,
1398 tx_skb->len - L2CAP_FCS_SIZE);
1399 put_unaligned_le16(fcs,
1400 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1403 l2cap_do_send(chan, tx_skb);
1406 static int l2cap_ertm_send(struct l2cap_chan *chan)
1408 struct sk_buff *skb, *tx_skb;
1413 if (chan->state != BT_CONNECTED)
1416 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1418 if (chan->remote_max_tx &&
1419 bt_cb(skb)->retries == chan->remote_max_tx) {
1420 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1424 tx_skb = skb_clone(skb, GFP_ATOMIC);
1426 bt_cb(skb)->retries++;
1428 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1429 control &= __get_sar_mask(chan);
1431 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1432 control |= __set_ctrl_final(chan);
1434 control |= __set_reqseq(chan, chan->buffer_seq);
1435 control |= __set_txseq(chan, chan->next_tx_seq);
1437 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1439 if (chan->fcs == L2CAP_FCS_CRC16) {
1440 fcs = crc16(0, (u8 *)skb->data,
1441 tx_skb->len - L2CAP_FCS_SIZE);
1442 put_unaligned_le16(fcs, skb->data +
1443 tx_skb->len - L2CAP_FCS_SIZE);
1446 l2cap_do_send(chan, tx_skb);
1448 __set_retrans_timer(chan);
1450 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1452 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1454 if (bt_cb(skb)->retries == 1) {
1455 chan->unacked_frames++;
1458 __clear_ack_timer(chan);
1461 chan->frames_sent++;
1463 if (skb_queue_is_last(&chan->tx_q, skb))
1464 chan->tx_send_head = NULL;
1466 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1472 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1476 if (!skb_queue_empty(&chan->tx_q))
1477 chan->tx_send_head = chan->tx_q.next;
1479 chan->next_tx_seq = chan->expected_ack_seq;
1480 ret = l2cap_ertm_send(chan);
1484 static void __l2cap_send_ack(struct l2cap_chan *chan)
1488 control |= __set_reqseq(chan, chan->buffer_seq);
1490 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1491 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1492 set_bit(CONN_RNR_SENT, &chan->conn_state);
1493 l2cap_send_sframe(chan, control);
1497 if (l2cap_ertm_send(chan) > 0)
1500 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1501 l2cap_send_sframe(chan, control);
1504 static void l2cap_send_ack(struct l2cap_chan *chan)
1506 __clear_ack_timer(chan);
1507 __l2cap_send_ack(chan);
1510 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1512 struct srej_list *tail;
1515 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1516 control |= __set_ctrl_final(chan);
1518 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1519 control |= __set_reqseq(chan, tail->tx_seq);
1521 l2cap_send_sframe(chan, control);
1524 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1526 struct l2cap_conn *conn = chan->conn;
1527 struct sk_buff **frag;
1530 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1536 /* Continuation fragments (no L2CAP header) */
1537 frag = &skb_shinfo(skb)->frag_list;
1539 count = min_t(unsigned int, conn->mtu, len);
1541 *frag = chan->ops->alloc_skb(chan, count,
1542 msg->msg_flags & MSG_DONTWAIT, &err);
1546 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1549 (*frag)->priority = skb->priority;
1554 frag = &(*frag)->next;
1560 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1561 struct msghdr *msg, size_t len,
1564 struct l2cap_conn *conn = chan->conn;
1565 struct sk_buff *skb;
1566 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1567 struct l2cap_hdr *lh;
1569 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1571 count = min_t(unsigned int, (conn->mtu - hlen), len);
1573 skb = chan->ops->alloc_skb(chan, count + hlen,
1574 msg->msg_flags & MSG_DONTWAIT, &err);
1577 return ERR_PTR(err);
1579 skb->priority = priority;
1581 /* Create L2CAP header */
1582 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1583 lh->cid = cpu_to_le16(chan->dcid);
1584 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1585 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1587 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1588 if (unlikely(err < 0)) {
1590 return ERR_PTR(err);
1595 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1596 struct msghdr *msg, size_t len,
1599 struct l2cap_conn *conn = chan->conn;
1600 struct sk_buff *skb;
1601 int err, count, hlen = L2CAP_HDR_SIZE;
1602 struct l2cap_hdr *lh;
1604 BT_DBG("chan %p len %d", chan, (int)len);
1606 count = min_t(unsigned int, (conn->mtu - hlen), len);
1608 skb = chan->ops->alloc_skb(chan, count + hlen,
1609 msg->msg_flags & MSG_DONTWAIT, &err);
1612 return ERR_PTR(err);
1614 skb->priority = priority;
1616 /* Create L2CAP header */
1617 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1618 lh->cid = cpu_to_le16(chan->dcid);
1619 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1621 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1622 if (unlikely(err < 0)) {
1624 return ERR_PTR(err);
1629 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1630 struct msghdr *msg, size_t len,
1631 u32 control, u16 sdulen)
1633 struct l2cap_conn *conn = chan->conn;
1634 struct sk_buff *skb;
1635 int err, count, hlen;
1636 struct l2cap_hdr *lh;
1638 BT_DBG("chan %p len %d", chan, (int)len);
1641 return ERR_PTR(-ENOTCONN);
1643 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1644 hlen = L2CAP_EXT_HDR_SIZE;
1646 hlen = L2CAP_ENH_HDR_SIZE;
1649 hlen += L2CAP_SDULEN_SIZE;
1651 if (chan->fcs == L2CAP_FCS_CRC16)
1652 hlen += L2CAP_FCS_SIZE;
1654 count = min_t(unsigned int, (conn->mtu - hlen), len);
1656 skb = chan->ops->alloc_skb(chan, count + hlen,
1657 msg->msg_flags & MSG_DONTWAIT, &err);
1660 return ERR_PTR(err);
1662 /* Create L2CAP header */
1663 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1664 lh->cid = cpu_to_le16(chan->dcid);
1665 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1667 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1670 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1672 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1673 if (unlikely(err < 0)) {
1675 return ERR_PTR(err);
1678 if (chan->fcs == L2CAP_FCS_CRC16)
1679 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1681 bt_cb(skb)->retries = 0;
1685 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1687 struct sk_buff *skb;
1688 struct sk_buff_head sar_queue;
1692 skb_queue_head_init(&sar_queue);
1693 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1694 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1696 return PTR_ERR(skb);
1698 __skb_queue_tail(&sar_queue, skb);
1699 len -= chan->remote_mps;
1700 size += chan->remote_mps;
1705 if (len > chan->remote_mps) {
1706 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1707 buflen = chan->remote_mps;
1709 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1713 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1715 skb_queue_purge(&sar_queue);
1716 return PTR_ERR(skb);
1719 __skb_queue_tail(&sar_queue, skb);
1723 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1724 if (chan->tx_send_head == NULL)
1725 chan->tx_send_head = sar_queue.next;
1730 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1733 struct sk_buff *skb;
1737 /* Connectionless channel */
1738 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1739 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1741 return PTR_ERR(skb);
1743 l2cap_do_send(chan, skb);
1747 switch (chan->mode) {
1748 case L2CAP_MODE_BASIC:
1749 /* Check outgoing MTU */
1750 if (len > chan->omtu)
1753 /* Create a basic PDU */
1754 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1756 return PTR_ERR(skb);
1758 l2cap_do_send(chan, skb);
1762 case L2CAP_MODE_ERTM:
1763 case L2CAP_MODE_STREAMING:
1764 /* Entire SDU fits into one PDU */
1765 if (len <= chan->remote_mps) {
1766 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1767 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1770 return PTR_ERR(skb);
1772 __skb_queue_tail(&chan->tx_q, skb);
1774 if (chan->tx_send_head == NULL)
1775 chan->tx_send_head = skb;
1778 /* Segment SDU into multiples PDUs */
1779 err = l2cap_sar_segment_sdu(chan, msg, len);
1784 if (chan->mode == L2CAP_MODE_STREAMING) {
1785 l2cap_streaming_send(chan);
1790 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1791 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1796 err = l2cap_ertm_send(chan);
1803 BT_DBG("bad state %1.1x", chan->mode);
1810 /* Copy frame to all raw sockets on that connection */
1811 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1813 struct sk_buff *nskb;
1814 struct l2cap_chan *chan;
1816 BT_DBG("conn %p", conn);
1818 mutex_lock(&conn->chan_lock);
1820 list_for_each_entry(chan, &conn->chan_l, list) {
1821 struct sock *sk = chan->sk;
1822 if (chan->chan_type != L2CAP_CHAN_RAW)
1825 /* Don't send frame to the socket it came from */
1828 nskb = skb_clone(skb, GFP_ATOMIC);
1832 if (chan->ops->recv(chan->data, nskb))
1836 mutex_unlock(&conn->chan_lock);
1839 /* ---- L2CAP signalling commands ---- */
1840 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1841 u8 code, u8 ident, u16 dlen, void *data)
1843 struct sk_buff *skb, **frag;
1844 struct l2cap_cmd_hdr *cmd;
1845 struct l2cap_hdr *lh;
1848 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1849 conn, code, ident, dlen);
1851 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1852 count = min_t(unsigned int, conn->mtu, len);
1854 skb = bt_skb_alloc(count, GFP_ATOMIC);
1858 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1859 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1861 if (conn->hcon->type == LE_LINK)
1862 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1864 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1866 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1869 cmd->len = cpu_to_le16(dlen);
1872 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1873 memcpy(skb_put(skb, count), data, count);
1879 /* Continuation fragments (no L2CAP header) */
1880 frag = &skb_shinfo(skb)->frag_list;
1882 count = min_t(unsigned int, conn->mtu, len);
1884 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1888 memcpy(skb_put(*frag, count), data, count);
1893 frag = &(*frag)->next;
1903 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1905 struct l2cap_conf_opt *opt = *ptr;
1908 len = L2CAP_CONF_OPT_SIZE + opt->len;
1916 *val = *((u8 *) opt->val);
1920 *val = get_unaligned_le16(opt->val);
1924 *val = get_unaligned_le32(opt->val);
1928 *val = (unsigned long) opt->val;
1932 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1936 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1938 struct l2cap_conf_opt *opt = *ptr;
1940 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1947 *((u8 *) opt->val) = val;
1951 put_unaligned_le16(val, opt->val);
1955 put_unaligned_le32(val, opt->val);
1959 memcpy(opt->val, (void *) val, len);
1963 *ptr += L2CAP_CONF_OPT_SIZE + len;
1966 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1968 struct l2cap_conf_efs efs;
1970 switch (chan->mode) {
1971 case L2CAP_MODE_ERTM:
1972 efs.id = chan->local_id;
1973 efs.stype = chan->local_stype;
1974 efs.msdu = cpu_to_le16(chan->local_msdu);
1975 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1976 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1977 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1980 case L2CAP_MODE_STREAMING:
1982 efs.stype = L2CAP_SERV_BESTEFFORT;
1983 efs.msdu = cpu_to_le16(chan->local_msdu);
1984 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1993 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1994 (unsigned long) &efs);
1997 static void l2cap_ack_timeout(struct work_struct *work)
1999 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2002 BT_DBG("chan %p", chan);
2004 lock_sock(chan->sk);
2005 __l2cap_send_ack(chan);
2006 release_sock(chan->sk);
2008 l2cap_chan_put(chan);
2011 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2013 chan->expected_ack_seq = 0;
2014 chan->unacked_frames = 0;
2015 chan->buffer_seq = 0;
2016 chan->num_acked = 0;
2017 chan->frames_sent = 0;
2019 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2020 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2021 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2023 skb_queue_head_init(&chan->srej_q);
2025 INIT_LIST_HEAD(&chan->srej_l);
2028 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2031 case L2CAP_MODE_STREAMING:
2032 case L2CAP_MODE_ERTM:
2033 if (l2cap_mode_supported(mode, remote_feat_mask))
2037 return L2CAP_MODE_BASIC;
2041 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2043 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2046 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2048 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2051 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2053 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2054 __l2cap_ews_supported(chan)) {
2055 /* use extended control field */
2056 set_bit(FLAG_EXT_CTRL, &chan->flags);
2057 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2059 chan->tx_win = min_t(u16, chan->tx_win,
2060 L2CAP_DEFAULT_TX_WINDOW);
2061 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2065 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2067 struct l2cap_conf_req *req = data;
2068 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2069 void *ptr = req->data;
2072 BT_DBG("chan %p", chan);
2074 if (chan->num_conf_req || chan->num_conf_rsp)
2077 switch (chan->mode) {
2078 case L2CAP_MODE_STREAMING:
2079 case L2CAP_MODE_ERTM:
2080 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2083 if (__l2cap_efs_supported(chan))
2084 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2088 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2093 if (chan->imtu != L2CAP_DEFAULT_MTU)
2094 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2096 switch (chan->mode) {
2097 case L2CAP_MODE_BASIC:
2098 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2099 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2102 rfc.mode = L2CAP_MODE_BASIC;
2104 rfc.max_transmit = 0;
2105 rfc.retrans_timeout = 0;
2106 rfc.monitor_timeout = 0;
2107 rfc.max_pdu_size = 0;
2109 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2110 (unsigned long) &rfc);
2113 case L2CAP_MODE_ERTM:
2114 rfc.mode = L2CAP_MODE_ERTM;
2115 rfc.max_transmit = chan->max_tx;
2116 rfc.retrans_timeout = 0;
2117 rfc.monitor_timeout = 0;
2119 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2120 L2CAP_EXT_HDR_SIZE -
2123 rfc.max_pdu_size = cpu_to_le16(size);
2125 l2cap_txwin_setup(chan);
2127 rfc.txwin_size = min_t(u16, chan->tx_win,
2128 L2CAP_DEFAULT_TX_WINDOW);
2130 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2131 (unsigned long) &rfc);
2133 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2134 l2cap_add_opt_efs(&ptr, chan);
2136 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2139 if (chan->fcs == L2CAP_FCS_NONE ||
2140 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2141 chan->fcs = L2CAP_FCS_NONE;
2142 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2145 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2146 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2150 case L2CAP_MODE_STREAMING:
2151 rfc.mode = L2CAP_MODE_STREAMING;
2153 rfc.max_transmit = 0;
2154 rfc.retrans_timeout = 0;
2155 rfc.monitor_timeout = 0;
2157 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2158 L2CAP_EXT_HDR_SIZE -
2161 rfc.max_pdu_size = cpu_to_le16(size);
2163 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2164 (unsigned long) &rfc);
2166 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2167 l2cap_add_opt_efs(&ptr, chan);
2169 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2172 if (chan->fcs == L2CAP_FCS_NONE ||
2173 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2174 chan->fcs = L2CAP_FCS_NONE;
2175 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2180 req->dcid = cpu_to_le16(chan->dcid);
2181 req->flags = cpu_to_le16(0);
2186 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2188 struct l2cap_conf_rsp *rsp = data;
2189 void *ptr = rsp->data;
2190 void *req = chan->conf_req;
2191 int len = chan->conf_len;
2192 int type, hint, olen;
2194 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2195 struct l2cap_conf_efs efs;
2197 u16 mtu = L2CAP_DEFAULT_MTU;
2198 u16 result = L2CAP_CONF_SUCCESS;
2201 BT_DBG("chan %p", chan);
2203 while (len >= L2CAP_CONF_OPT_SIZE) {
2204 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2206 hint = type & L2CAP_CONF_HINT;
2207 type &= L2CAP_CONF_MASK;
2210 case L2CAP_CONF_MTU:
2214 case L2CAP_CONF_FLUSH_TO:
2215 chan->flush_to = val;
2218 case L2CAP_CONF_QOS:
2221 case L2CAP_CONF_RFC:
2222 if (olen == sizeof(rfc))
2223 memcpy(&rfc, (void *) val, olen);
2226 case L2CAP_CONF_FCS:
2227 if (val == L2CAP_FCS_NONE)
2228 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2231 case L2CAP_CONF_EFS:
2233 if (olen == sizeof(efs))
2234 memcpy(&efs, (void *) val, olen);
2237 case L2CAP_CONF_EWS:
2239 return -ECONNREFUSED;
2241 set_bit(FLAG_EXT_CTRL, &chan->flags);
2242 set_bit(CONF_EWS_RECV, &chan->conf_state);
2243 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2244 chan->remote_tx_win = val;
2251 result = L2CAP_CONF_UNKNOWN;
2252 *((u8 *) ptr++) = type;
2257 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2260 switch (chan->mode) {
2261 case L2CAP_MODE_STREAMING:
2262 case L2CAP_MODE_ERTM:
2263 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2264 chan->mode = l2cap_select_mode(rfc.mode,
2265 chan->conn->feat_mask);
2270 if (__l2cap_efs_supported(chan))
2271 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2273 return -ECONNREFUSED;
2276 if (chan->mode != rfc.mode)
2277 return -ECONNREFUSED;
2283 if (chan->mode != rfc.mode) {
2284 result = L2CAP_CONF_UNACCEPT;
2285 rfc.mode = chan->mode;
2287 if (chan->num_conf_rsp == 1)
2288 return -ECONNREFUSED;
2290 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2291 sizeof(rfc), (unsigned long) &rfc);
2294 if (result == L2CAP_CONF_SUCCESS) {
2295 /* Configure output options and let the other side know
2296 * which ones we don't like. */
2298 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2299 result = L2CAP_CONF_UNACCEPT;
2302 set_bit(CONF_MTU_DONE, &chan->conf_state);
2304 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2307 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2308 efs.stype != L2CAP_SERV_NOTRAFIC &&
2309 efs.stype != chan->local_stype) {
2311 result = L2CAP_CONF_UNACCEPT;
2313 if (chan->num_conf_req >= 1)
2314 return -ECONNREFUSED;
2316 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2318 (unsigned long) &efs);
2320 /* Send PENDING Conf Rsp */
2321 result = L2CAP_CONF_PENDING;
2322 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2327 case L2CAP_MODE_BASIC:
2328 chan->fcs = L2CAP_FCS_NONE;
2329 set_bit(CONF_MODE_DONE, &chan->conf_state);
2332 case L2CAP_MODE_ERTM:
2333 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2334 chan->remote_tx_win = rfc.txwin_size;
2336 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2338 chan->remote_max_tx = rfc.max_transmit;
2340 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2342 L2CAP_EXT_HDR_SIZE -
2345 rfc.max_pdu_size = cpu_to_le16(size);
2346 chan->remote_mps = size;
2348 rfc.retrans_timeout =
2349 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2350 rfc.monitor_timeout =
2351 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2353 set_bit(CONF_MODE_DONE, &chan->conf_state);
2355 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2356 sizeof(rfc), (unsigned long) &rfc);
2358 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2359 chan->remote_id = efs.id;
2360 chan->remote_stype = efs.stype;
2361 chan->remote_msdu = le16_to_cpu(efs.msdu);
2362 chan->remote_flush_to =
2363 le32_to_cpu(efs.flush_to);
2364 chan->remote_acc_lat =
2365 le32_to_cpu(efs.acc_lat);
2366 chan->remote_sdu_itime =
2367 le32_to_cpu(efs.sdu_itime);
2368 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2369 sizeof(efs), (unsigned long) &efs);
2373 case L2CAP_MODE_STREAMING:
2374 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2376 L2CAP_EXT_HDR_SIZE -
2379 rfc.max_pdu_size = cpu_to_le16(size);
2380 chan->remote_mps = size;
2382 set_bit(CONF_MODE_DONE, &chan->conf_state);
2384 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2385 sizeof(rfc), (unsigned long) &rfc);
2390 result = L2CAP_CONF_UNACCEPT;
2392 memset(&rfc, 0, sizeof(rfc));
2393 rfc.mode = chan->mode;
2396 if (result == L2CAP_CONF_SUCCESS)
2397 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2399 rsp->scid = cpu_to_le16(chan->dcid);
2400 rsp->result = cpu_to_le16(result);
2401 rsp->flags = cpu_to_le16(0x0000);
2406 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2408 struct l2cap_conf_req *req = data;
2409 void *ptr = req->data;
2412 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2413 struct l2cap_conf_efs efs;
2415 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2417 while (len >= L2CAP_CONF_OPT_SIZE) {
2418 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2421 case L2CAP_CONF_MTU:
2422 if (val < L2CAP_DEFAULT_MIN_MTU) {
2423 *result = L2CAP_CONF_UNACCEPT;
2424 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2427 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2430 case L2CAP_CONF_FLUSH_TO:
2431 chan->flush_to = val;
2432 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2436 case L2CAP_CONF_RFC:
2437 if (olen == sizeof(rfc))
2438 memcpy(&rfc, (void *)val, olen);
2440 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2441 rfc.mode != chan->mode)
2442 return -ECONNREFUSED;
2446 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2447 sizeof(rfc), (unsigned long) &rfc);
2450 case L2CAP_CONF_EWS:
2451 chan->tx_win = min_t(u16, val,
2452 L2CAP_DEFAULT_EXT_WINDOW);
2453 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2457 case L2CAP_CONF_EFS:
2458 if (olen == sizeof(efs))
2459 memcpy(&efs, (void *)val, olen);
2461 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2462 efs.stype != L2CAP_SERV_NOTRAFIC &&
2463 efs.stype != chan->local_stype)
2464 return -ECONNREFUSED;
2466 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2467 sizeof(efs), (unsigned long) &efs);
2472 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2473 return -ECONNREFUSED;
2475 chan->mode = rfc.mode;
2477 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2479 case L2CAP_MODE_ERTM:
2480 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2481 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2482 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2484 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2485 chan->local_msdu = le16_to_cpu(efs.msdu);
2486 chan->local_sdu_itime =
2487 le32_to_cpu(efs.sdu_itime);
2488 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2489 chan->local_flush_to =
2490 le32_to_cpu(efs.flush_to);
2494 case L2CAP_MODE_STREAMING:
2495 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2499 req->dcid = cpu_to_le16(chan->dcid);
2500 req->flags = cpu_to_le16(0x0000);
2505 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2507 struct l2cap_conf_rsp *rsp = data;
2508 void *ptr = rsp->data;
2510 BT_DBG("chan %p", chan);
2512 rsp->scid = cpu_to_le16(chan->dcid);
2513 rsp->result = cpu_to_le16(result);
2514 rsp->flags = cpu_to_le16(flags);
2519 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2521 struct l2cap_conn_rsp rsp;
2522 struct l2cap_conn *conn = chan->conn;
2525 rsp.scid = cpu_to_le16(chan->dcid);
2526 rsp.dcid = cpu_to_le16(chan->scid);
2527 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2528 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2529 l2cap_send_cmd(conn, chan->ident,
2530 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2532 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2535 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2536 l2cap_build_conf_req(chan, buf), buf);
2537 chan->num_conf_req++;
2540 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2544 struct l2cap_conf_rfc rfc;
2546 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2548 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2551 while (len >= L2CAP_CONF_OPT_SIZE) {
2552 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2555 case L2CAP_CONF_RFC:
2556 if (olen == sizeof(rfc))
2557 memcpy(&rfc, (void *)val, olen);
2562 /* Use sane default values in case a misbehaving remote device
2563 * did not send an RFC option.
2565 rfc.mode = chan->mode;
2566 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2567 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2568 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2570 BT_ERR("Expected RFC option was not found, using defaults");
2574 case L2CAP_MODE_ERTM:
2575 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2576 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2577 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2579 case L2CAP_MODE_STREAMING:
2580 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2584 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2586 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2588 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2591 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2592 cmd->ident == conn->info_ident) {
2593 cancel_delayed_work(&conn->info_timer);
2595 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2596 conn->info_ident = 0;
2598 l2cap_conn_start(conn);
2604 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2606 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2607 struct l2cap_conn_rsp rsp;
2608 struct l2cap_chan *chan = NULL, *pchan;
2609 struct sock *parent, *sk = NULL;
2610 int result, status = L2CAP_CS_NO_INFO;
2612 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2613 __le16 psm = req->psm;
2615 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2617 /* Check if we have socket listening on psm */
2618 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2620 result = L2CAP_CR_BAD_PSM;
2626 mutex_lock(&conn->chan_lock);
2629 /* Check if the ACL is secure enough (if not SDP) */
2630 if (psm != cpu_to_le16(0x0001) &&
2631 !hci_conn_check_link_mode(conn->hcon)) {
2632 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2633 result = L2CAP_CR_SEC_BLOCK;
2637 result = L2CAP_CR_NO_MEM;
2639 /* Check for backlog size */
2640 if (sk_acceptq_is_full(parent)) {
2641 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2645 chan = pchan->ops->new_connection(pchan->data);
2651 /* Check if we already have channel with that dcid */
2652 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2653 sock_set_flag(sk, SOCK_ZAPPED);
2654 chan->ops->close(chan->data);
2658 hci_conn_hold(conn->hcon);
2660 bacpy(&bt_sk(sk)->src, conn->src);
2661 bacpy(&bt_sk(sk)->dst, conn->dst);
2665 bt_accept_enqueue(parent, sk);
2667 l2cap_chan_add(conn, chan);
2671 __set_chan_timer(chan, sk->sk_sndtimeo);
2673 chan->ident = cmd->ident;
2675 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2676 if (l2cap_chan_check_security(chan)) {
2677 if (bt_sk(sk)->defer_setup) {
2678 __l2cap_state_change(chan, BT_CONNECT2);
2679 result = L2CAP_CR_PEND;
2680 status = L2CAP_CS_AUTHOR_PEND;
2681 parent->sk_data_ready(parent, 0);
2683 __l2cap_state_change(chan, BT_CONFIG);
2684 result = L2CAP_CR_SUCCESS;
2685 status = L2CAP_CS_NO_INFO;
2688 __l2cap_state_change(chan, BT_CONNECT2);
2689 result = L2CAP_CR_PEND;
2690 status = L2CAP_CS_AUTHEN_PEND;
2693 __l2cap_state_change(chan, BT_CONNECT2);
2694 result = L2CAP_CR_PEND;
2695 status = L2CAP_CS_NO_INFO;
2699 release_sock(parent);
2700 mutex_unlock(&conn->chan_lock);
2703 rsp.scid = cpu_to_le16(scid);
2704 rsp.dcid = cpu_to_le16(dcid);
2705 rsp.result = cpu_to_le16(result);
2706 rsp.status = cpu_to_le16(status);
2707 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2709 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2710 struct l2cap_info_req info;
2711 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2713 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2714 conn->info_ident = l2cap_get_ident(conn);
2716 schedule_delayed_work(&conn->info_timer,
2717 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2719 l2cap_send_cmd(conn, conn->info_ident,
2720 L2CAP_INFO_REQ, sizeof(info), &info);
2723 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2724 result == L2CAP_CR_SUCCESS) {
2726 set_bit(CONF_REQ_SENT, &chan->conf_state);
2727 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2728 l2cap_build_conf_req(chan, buf), buf);
2729 chan->num_conf_req++;
2735 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2737 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2738 u16 scid, dcid, result, status;
2739 struct l2cap_chan *chan;
2744 scid = __le16_to_cpu(rsp->scid);
2745 dcid = __le16_to_cpu(rsp->dcid);
2746 result = __le16_to_cpu(rsp->result);
2747 status = __le16_to_cpu(rsp->status);
2749 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
2750 dcid, scid, result, status);
2752 mutex_lock(&conn->chan_lock);
2755 chan = __l2cap_get_chan_by_scid(conn, scid);
2761 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
2774 case L2CAP_CR_SUCCESS:
2775 l2cap_state_change(chan, BT_CONFIG);
2778 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2780 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2783 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2784 l2cap_build_conf_req(chan, req), req);
2785 chan->num_conf_req++;
2789 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2793 l2cap_chan_del(chan, ECONNREFUSED);
2800 mutex_unlock(&conn->chan_lock);
2805 static inline void set_default_fcs(struct l2cap_chan *chan)
2807 /* FCS is enabled only in ERTM or streaming mode, if one or both
2810 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2811 chan->fcs = L2CAP_FCS_NONE;
2812 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2813 chan->fcs = L2CAP_FCS_CRC16;
2816 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2818 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2821 struct l2cap_chan *chan;
2825 dcid = __le16_to_cpu(req->dcid);
2826 flags = __le16_to_cpu(req->flags);
2828 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2830 chan = l2cap_get_chan_by_scid(conn, dcid);
2837 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2838 struct l2cap_cmd_rej_cid rej;
2840 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2841 rej.scid = cpu_to_le16(chan->scid);
2842 rej.dcid = cpu_to_le16(chan->dcid);
2844 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2849 /* Reject if config buffer is too small. */
2850 len = cmd_len - sizeof(*req);
2851 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2852 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2853 l2cap_build_conf_rsp(chan, rsp,
2854 L2CAP_CONF_REJECT, flags), rsp);
2859 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2860 chan->conf_len += len;
2862 if (flags & 0x0001) {
2863 /* Incomplete config. Send empty response. */
2864 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2865 l2cap_build_conf_rsp(chan, rsp,
2866 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2870 /* Complete config. */
2871 len = l2cap_parse_conf_req(chan, rsp);
2873 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2877 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2878 chan->num_conf_rsp++;
2880 /* Reset config buffer. */
2883 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2886 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2887 set_default_fcs(chan);
2889 l2cap_state_change(chan, BT_CONNECTED);
2891 chan->next_tx_seq = 0;
2892 chan->expected_tx_seq = 0;
2893 skb_queue_head_init(&chan->tx_q);
2894 if (chan->mode == L2CAP_MODE_ERTM)
2895 l2cap_ertm_init(chan);
2897 l2cap_chan_ready(chan);
2901 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2903 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2904 l2cap_build_conf_req(chan, buf), buf);
2905 chan->num_conf_req++;
2908 /* Got Conf Rsp PENDING from remote side and asume we sent
2909 Conf Rsp PENDING in the code above */
2910 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2911 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2913 /* check compatibility */
2915 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2916 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2918 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2919 l2cap_build_conf_rsp(chan, rsp,
2920 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2928 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2930 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2931 u16 scid, flags, result;
2932 struct l2cap_chan *chan;
2934 int len = cmd->len - sizeof(*rsp);
2936 scid = __le16_to_cpu(rsp->scid);
2937 flags = __le16_to_cpu(rsp->flags);
2938 result = __le16_to_cpu(rsp->result);
2940 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2941 scid, flags, result);
2943 chan = l2cap_get_chan_by_scid(conn, scid);
2951 case L2CAP_CONF_SUCCESS:
2952 l2cap_conf_rfc_get(chan, rsp->data, len);
2953 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2956 case L2CAP_CONF_PENDING:
2957 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2959 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2962 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2965 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2969 /* check compatibility */
2971 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2972 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2974 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2975 l2cap_build_conf_rsp(chan, buf,
2976 L2CAP_CONF_SUCCESS, 0x0000), buf);
2980 case L2CAP_CONF_UNACCEPT:
2981 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2984 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2985 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2989 /* throw out any old stored conf requests */
2990 result = L2CAP_CONF_SUCCESS;
2991 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2994 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2998 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2999 L2CAP_CONF_REQ, len, req);
3000 chan->num_conf_req++;
3001 if (result != L2CAP_CONF_SUCCESS)
3007 __l2cap_chan_set_err(chan, ECONNRESET);
3009 __set_chan_timer(chan,
3010 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT));
3011 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3018 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3020 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3021 set_default_fcs(chan);
3023 l2cap_state_change(chan, BT_CONNECTED);
3024 chan->next_tx_seq = 0;
3025 chan->expected_tx_seq = 0;
3026 skb_queue_head_init(&chan->tx_q);
3027 if (chan->mode == L2CAP_MODE_ERTM)
3028 l2cap_ertm_init(chan);
3030 l2cap_chan_ready(chan);
3038 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3040 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3041 struct l2cap_disconn_rsp rsp;
3043 struct l2cap_chan *chan;
3046 scid = __le16_to_cpu(req->scid);
3047 dcid = __le16_to_cpu(req->dcid);
3049 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3051 mutex_lock(&conn->chan_lock);
3053 chan = __l2cap_get_chan_by_scid(conn, dcid);
3055 mutex_unlock(&conn->chan_lock);
3062 rsp.dcid = cpu_to_le16(chan->scid);
3063 rsp.scid = cpu_to_le16(chan->dcid);
3064 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3066 sk->sk_shutdown = SHUTDOWN_MASK;
3068 l2cap_chan_del(chan, ECONNRESET);
3071 chan->ops->close(chan->data);
3073 mutex_unlock(&conn->chan_lock);
3078 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3080 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3082 struct l2cap_chan *chan;
3085 scid = __le16_to_cpu(rsp->scid);
3086 dcid = __le16_to_cpu(rsp->dcid);
3088 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3090 mutex_lock(&conn->chan_lock);
3092 chan = __l2cap_get_chan_by_scid(conn, scid);
3094 mutex_unlock(&conn->chan_lock);
3101 l2cap_chan_del(chan, 0);
3104 chan->ops->close(chan->data);
3106 mutex_unlock(&conn->chan_lock);
3111 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3113 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3116 type = __le16_to_cpu(req->type);
3118 BT_DBG("type 0x%4.4x", type);
3120 if (type == L2CAP_IT_FEAT_MASK) {
3122 u32 feat_mask = l2cap_feat_mask;
3123 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3124 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3125 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3127 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3130 feat_mask |= L2CAP_FEAT_EXT_FLOW
3131 | L2CAP_FEAT_EXT_WINDOW;
3133 put_unaligned_le32(feat_mask, rsp->data);
3134 l2cap_send_cmd(conn, cmd->ident,
3135 L2CAP_INFO_RSP, sizeof(buf), buf);
3136 } else if (type == L2CAP_IT_FIXED_CHAN) {
3138 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3141 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3143 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3145 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3146 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3147 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3148 l2cap_send_cmd(conn, cmd->ident,
3149 L2CAP_INFO_RSP, sizeof(buf), buf);
3151 struct l2cap_info_rsp rsp;
3152 rsp.type = cpu_to_le16(type);
3153 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3154 l2cap_send_cmd(conn, cmd->ident,
3155 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3161 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3163 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3166 type = __le16_to_cpu(rsp->type);
3167 result = __le16_to_cpu(rsp->result);
3169 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3171 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3172 if (cmd->ident != conn->info_ident ||
3173 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3176 cancel_delayed_work(&conn->info_timer);
3178 if (result != L2CAP_IR_SUCCESS) {
3179 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3180 conn->info_ident = 0;
3182 l2cap_conn_start(conn);
3187 if (type == L2CAP_IT_FEAT_MASK) {
3188 conn->feat_mask = get_unaligned_le32(rsp->data);
3190 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3191 struct l2cap_info_req req;
3192 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3194 conn->info_ident = l2cap_get_ident(conn);
3196 l2cap_send_cmd(conn, conn->info_ident,
3197 L2CAP_INFO_REQ, sizeof(req), &req);
3199 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3200 conn->info_ident = 0;
3202 l2cap_conn_start(conn);
3204 } else if (type == L2CAP_IT_FIXED_CHAN) {
3205 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3206 conn->info_ident = 0;
3208 l2cap_conn_start(conn);
3214 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3215 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3218 struct l2cap_create_chan_req *req = data;
3219 struct l2cap_create_chan_rsp rsp;
3222 if (cmd_len != sizeof(*req))
3228 psm = le16_to_cpu(req->psm);
3229 scid = le16_to_cpu(req->scid);
3231 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3233 /* Placeholder: Always reject */
3235 rsp.scid = cpu_to_le16(scid);
3236 rsp.result = L2CAP_CR_NO_MEM;
3237 rsp.status = L2CAP_CS_NO_INFO;
3239 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3245 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3246 struct l2cap_cmd_hdr *cmd, void *data)
3248 BT_DBG("conn %p", conn);
3250 return l2cap_connect_rsp(conn, cmd, data);
3253 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3254 u16 icid, u16 result)
3256 struct l2cap_move_chan_rsp rsp;
3258 BT_DBG("icid %d, result %d", icid, result);
3260 rsp.icid = cpu_to_le16(icid);
3261 rsp.result = cpu_to_le16(result);
3263 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3266 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3267 struct l2cap_chan *chan, u16 icid, u16 result)
3269 struct l2cap_move_chan_cfm cfm;
3272 BT_DBG("icid %d, result %d", icid, result);
3274 ident = l2cap_get_ident(conn);
3276 chan->ident = ident;
3278 cfm.icid = cpu_to_le16(icid);
3279 cfm.result = cpu_to_le16(result);
3281 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3284 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3287 struct l2cap_move_chan_cfm_rsp rsp;
3289 BT_DBG("icid %d", icid);
3291 rsp.icid = cpu_to_le16(icid);
3292 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3295 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3296 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3298 struct l2cap_move_chan_req *req = data;
3300 u16 result = L2CAP_MR_NOT_ALLOWED;
3302 if (cmd_len != sizeof(*req))
3305 icid = le16_to_cpu(req->icid);
3307 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3312 /* Placeholder: Always refuse */
3313 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3318 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3319 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3321 struct l2cap_move_chan_rsp *rsp = data;
3324 if (cmd_len != sizeof(*rsp))
3327 icid = le16_to_cpu(rsp->icid);
3328 result = le16_to_cpu(rsp->result);
3330 BT_DBG("icid %d, result %d", icid, result);
3332 /* Placeholder: Always unconfirmed */
3333 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3338 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3339 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3341 struct l2cap_move_chan_cfm *cfm = data;
3344 if (cmd_len != sizeof(*cfm))
3347 icid = le16_to_cpu(cfm->icid);
3348 result = le16_to_cpu(cfm->result);
3350 BT_DBG("icid %d, result %d", icid, result);
3352 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3357 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3358 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3360 struct l2cap_move_chan_cfm_rsp *rsp = data;
3363 if (cmd_len != sizeof(*rsp))
3366 icid = le16_to_cpu(rsp->icid);
3368 BT_DBG("icid %d", icid);
3373 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3378 if (min > max || min < 6 || max > 3200)
3381 if (to_multiplier < 10 || to_multiplier > 3200)
3384 if (max >= to_multiplier * 8)
3387 max_latency = (to_multiplier * 8 / max) - 1;
3388 if (latency > 499 || latency > max_latency)
3394 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3395 struct l2cap_cmd_hdr *cmd, u8 *data)
3397 struct hci_conn *hcon = conn->hcon;
3398 struct l2cap_conn_param_update_req *req;
3399 struct l2cap_conn_param_update_rsp rsp;
3400 u16 min, max, latency, to_multiplier, cmd_len;
3403 if (!(hcon->link_mode & HCI_LM_MASTER))
3406 cmd_len = __le16_to_cpu(cmd->len);
3407 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3410 req = (struct l2cap_conn_param_update_req *) data;
3411 min = __le16_to_cpu(req->min);
3412 max = __le16_to_cpu(req->max);
3413 latency = __le16_to_cpu(req->latency);
3414 to_multiplier = __le16_to_cpu(req->to_multiplier);
3416 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3417 min, max, latency, to_multiplier);
3419 memset(&rsp, 0, sizeof(rsp));
3421 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3423 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3425 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3427 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3431 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3436 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3437 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3441 switch (cmd->code) {
3442 case L2CAP_COMMAND_REJ:
3443 l2cap_command_rej(conn, cmd, data);
3446 case L2CAP_CONN_REQ:
3447 err = l2cap_connect_req(conn, cmd, data);
3450 case L2CAP_CONN_RSP:
3451 err = l2cap_connect_rsp(conn, cmd, data);
3454 case L2CAP_CONF_REQ:
3455 err = l2cap_config_req(conn, cmd, cmd_len, data);
3458 case L2CAP_CONF_RSP:
3459 err = l2cap_config_rsp(conn, cmd, data);
3462 case L2CAP_DISCONN_REQ:
3463 err = l2cap_disconnect_req(conn, cmd, data);
3466 case L2CAP_DISCONN_RSP:
3467 err = l2cap_disconnect_rsp(conn, cmd, data);
3470 case L2CAP_ECHO_REQ:
3471 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3474 case L2CAP_ECHO_RSP:
3477 case L2CAP_INFO_REQ:
3478 err = l2cap_information_req(conn, cmd, data);
3481 case L2CAP_INFO_RSP:
3482 err = l2cap_information_rsp(conn, cmd, data);
3485 case L2CAP_CREATE_CHAN_REQ:
3486 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3489 case L2CAP_CREATE_CHAN_RSP:
3490 err = l2cap_create_channel_rsp(conn, cmd, data);
3493 case L2CAP_MOVE_CHAN_REQ:
3494 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3497 case L2CAP_MOVE_CHAN_RSP:
3498 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3501 case L2CAP_MOVE_CHAN_CFM:
3502 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3505 case L2CAP_MOVE_CHAN_CFM_RSP:
3506 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3510 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3518 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3519 struct l2cap_cmd_hdr *cmd, u8 *data)
3521 switch (cmd->code) {
3522 case L2CAP_COMMAND_REJ:
3525 case L2CAP_CONN_PARAM_UPDATE_REQ:
3526 return l2cap_conn_param_update_req(conn, cmd, data);
3528 case L2CAP_CONN_PARAM_UPDATE_RSP:
3532 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3537 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3538 struct sk_buff *skb)
3540 u8 *data = skb->data;
3542 struct l2cap_cmd_hdr cmd;
3545 l2cap_raw_recv(conn, skb);
3547 while (len >= L2CAP_CMD_HDR_SIZE) {
3549 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3550 data += L2CAP_CMD_HDR_SIZE;
3551 len -= L2CAP_CMD_HDR_SIZE;
3553 cmd_len = le16_to_cpu(cmd.len);
3555 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3557 if (cmd_len > len || !cmd.ident) {
3558 BT_DBG("corrupted command");
3562 if (conn->hcon->type == LE_LINK)
3563 err = l2cap_le_sig_cmd(conn, &cmd, data);
3565 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3568 struct l2cap_cmd_rej_unk rej;
3570 BT_ERR("Wrong link type (%d)", err);
3572 /* FIXME: Map err to a valid reason */
3573 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3574 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3584 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3586 u16 our_fcs, rcv_fcs;
3589 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3590 hdr_size = L2CAP_EXT_HDR_SIZE;
3592 hdr_size = L2CAP_ENH_HDR_SIZE;
3594 if (chan->fcs == L2CAP_FCS_CRC16) {
3595 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3596 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3597 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3599 if (our_fcs != rcv_fcs)
3605 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3609 chan->frames_sent = 0;
3611 control |= __set_reqseq(chan, chan->buffer_seq);
3613 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3614 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3615 l2cap_send_sframe(chan, control);
3616 set_bit(CONN_RNR_SENT, &chan->conn_state);
3619 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3620 l2cap_retransmit_frames(chan);
3622 l2cap_ertm_send(chan);
3624 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3625 chan->frames_sent == 0) {
3626 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3627 l2cap_send_sframe(chan, control);
3631 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3633 struct sk_buff *next_skb;
3634 int tx_seq_offset, next_tx_seq_offset;
3636 bt_cb(skb)->tx_seq = tx_seq;
3637 bt_cb(skb)->sar = sar;
3639 next_skb = skb_peek(&chan->srej_q);
3641 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3644 if (bt_cb(next_skb)->tx_seq == tx_seq)
3647 next_tx_seq_offset = __seq_offset(chan,
3648 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3650 if (next_tx_seq_offset > tx_seq_offset) {
3651 __skb_queue_before(&chan->srej_q, next_skb, skb);
3655 if (skb_queue_is_last(&chan->srej_q, next_skb))
3658 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3661 __skb_queue_tail(&chan->srej_q, skb);
3666 static void append_skb_frag(struct sk_buff *skb,
3667 struct sk_buff *new_frag, struct sk_buff **last_frag)
3669 /* skb->len reflects data in skb as well as all fragments
3670 * skb->data_len reflects only data in fragments
3672 if (!skb_has_frag_list(skb))
3673 skb_shinfo(skb)->frag_list = new_frag;
3675 new_frag->next = NULL;
3677 (*last_frag)->next = new_frag;
3678 *last_frag = new_frag;
3680 skb->len += new_frag->len;
3681 skb->data_len += new_frag->len;
3682 skb->truesize += new_frag->truesize;
3685 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3689 switch (__get_ctrl_sar(chan, control)) {
3690 case L2CAP_SAR_UNSEGMENTED:
3694 err = chan->ops->recv(chan->data, skb);
3697 case L2CAP_SAR_START:
3701 chan->sdu_len = get_unaligned_le16(skb->data);
3702 skb_pull(skb, L2CAP_SDULEN_SIZE);
3704 if (chan->sdu_len > chan->imtu) {
3709 if (skb->len >= chan->sdu_len)
3713 chan->sdu_last_frag = skb;
3719 case L2CAP_SAR_CONTINUE:
3723 append_skb_frag(chan->sdu, skb,
3724 &chan->sdu_last_frag);
3727 if (chan->sdu->len >= chan->sdu_len)
3737 append_skb_frag(chan->sdu, skb,
3738 &chan->sdu_last_frag);
3741 if (chan->sdu->len != chan->sdu_len)
3744 err = chan->ops->recv(chan->data, chan->sdu);
3747 /* Reassembly complete */
3749 chan->sdu_last_frag = NULL;
3757 kfree_skb(chan->sdu);
3759 chan->sdu_last_frag = NULL;
3766 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3768 BT_DBG("chan %p, Enter local busy", chan);
3770 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3772 __set_ack_timer(chan);
3775 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3779 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3782 control = __set_reqseq(chan, chan->buffer_seq);
3783 control |= __set_ctrl_poll(chan);
3784 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3785 l2cap_send_sframe(chan, control);
3786 chan->retry_count = 1;
3788 __clear_retrans_timer(chan);
3789 __set_monitor_timer(chan);
3791 set_bit(CONN_WAIT_F, &chan->conn_state);
3794 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3795 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3797 BT_DBG("chan %p, Exit local busy", chan);
3800 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3802 if (chan->mode == L2CAP_MODE_ERTM) {
3804 l2cap_ertm_enter_local_busy(chan);
3806 l2cap_ertm_exit_local_busy(chan);
3810 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3812 struct sk_buff *skb;
3815 while ((skb = skb_peek(&chan->srej_q)) &&
3816 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3819 if (bt_cb(skb)->tx_seq != tx_seq)
3822 skb = skb_dequeue(&chan->srej_q);
3823 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3824 err = l2cap_reassemble_sdu(chan, skb, control);
3827 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3831 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3832 tx_seq = __next_seq(chan, tx_seq);
3836 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3838 struct srej_list *l, *tmp;
3841 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3842 if (l->tx_seq == tx_seq) {
3847 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3848 control |= __set_reqseq(chan, l->tx_seq);
3849 l2cap_send_sframe(chan, control);
3851 list_add_tail(&l->list, &chan->srej_l);
3855 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3857 struct srej_list *new;
3860 while (tx_seq != chan->expected_tx_seq) {
3861 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3862 control |= __set_reqseq(chan, chan->expected_tx_seq);
3863 l2cap_send_sframe(chan, control);
3865 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3869 new->tx_seq = chan->expected_tx_seq;
3871 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3873 list_add_tail(&new->list, &chan->srej_l);
3876 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3881 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3883 u16 tx_seq = __get_txseq(chan, rx_control);
3884 u16 req_seq = __get_reqseq(chan, rx_control);
3885 u8 sar = __get_ctrl_sar(chan, rx_control);
3886 int tx_seq_offset, expected_tx_seq_offset;
3887 int num_to_ack = (chan->tx_win/6) + 1;
3890 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3891 tx_seq, rx_control);
3893 if (__is_ctrl_final(chan, rx_control) &&
3894 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3895 __clear_monitor_timer(chan);
3896 if (chan->unacked_frames > 0)
3897 __set_retrans_timer(chan);
3898 clear_bit(CONN_WAIT_F, &chan->conn_state);
3901 chan->expected_ack_seq = req_seq;
3902 l2cap_drop_acked_frames(chan);
3904 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3906 /* invalid tx_seq */
3907 if (tx_seq_offset >= chan->tx_win) {
3908 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3912 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3913 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3914 l2cap_send_ack(chan);
3918 if (tx_seq == chan->expected_tx_seq)
3921 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3922 struct srej_list *first;
3924 first = list_first_entry(&chan->srej_l,
3925 struct srej_list, list);
3926 if (tx_seq == first->tx_seq) {
3927 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3928 l2cap_check_srej_gap(chan, tx_seq);
3930 list_del(&first->list);
3933 if (list_empty(&chan->srej_l)) {
3934 chan->buffer_seq = chan->buffer_seq_srej;
3935 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3936 l2cap_send_ack(chan);
3937 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3940 struct srej_list *l;
3942 /* duplicated tx_seq */
3943 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3946 list_for_each_entry(l, &chan->srej_l, list) {
3947 if (l->tx_seq == tx_seq) {
3948 l2cap_resend_srejframe(chan, tx_seq);
3953 err = l2cap_send_srejframe(chan, tx_seq);
3955 l2cap_send_disconn_req(chan->conn, chan, -err);
3960 expected_tx_seq_offset = __seq_offset(chan,
3961 chan->expected_tx_seq, chan->buffer_seq);
3963 /* duplicated tx_seq */
3964 if (tx_seq_offset < expected_tx_seq_offset)
3967 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3969 BT_DBG("chan %p, Enter SREJ", chan);
3971 INIT_LIST_HEAD(&chan->srej_l);
3972 chan->buffer_seq_srej = chan->buffer_seq;
3974 __skb_queue_head_init(&chan->srej_q);
3975 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3977 /* Set P-bit only if there are some I-frames to ack. */
3978 if (__clear_ack_timer(chan))
3979 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3981 err = l2cap_send_srejframe(chan, tx_seq);
3983 l2cap_send_disconn_req(chan->conn, chan, -err);
3990 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3992 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3993 bt_cb(skb)->tx_seq = tx_seq;
3994 bt_cb(skb)->sar = sar;
3995 __skb_queue_tail(&chan->srej_q, skb);
3999 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4000 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4003 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4007 if (__is_ctrl_final(chan, rx_control)) {
4008 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4009 l2cap_retransmit_frames(chan);
4013 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4014 if (chan->num_acked == num_to_ack - 1)
4015 l2cap_send_ack(chan);
4017 __set_ack_timer(chan);
4026 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4028 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4029 __get_reqseq(chan, rx_control), rx_control);
4031 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4032 l2cap_drop_acked_frames(chan);
4034 if (__is_ctrl_poll(chan, rx_control)) {
4035 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4036 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4037 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4038 (chan->unacked_frames > 0))
4039 __set_retrans_timer(chan);
4041 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4042 l2cap_send_srejtail(chan);
4044 l2cap_send_i_or_rr_or_rnr(chan);
4047 } else if (__is_ctrl_final(chan, rx_control)) {
4048 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4050 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4051 l2cap_retransmit_frames(chan);
4054 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4055 (chan->unacked_frames > 0))
4056 __set_retrans_timer(chan);
4058 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4059 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4060 l2cap_send_ack(chan);
4062 l2cap_ertm_send(chan);
4066 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4068 u16 tx_seq = __get_reqseq(chan, rx_control);
4070 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4072 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4074 chan->expected_ack_seq = tx_seq;
4075 l2cap_drop_acked_frames(chan);
4077 if (__is_ctrl_final(chan, rx_control)) {
4078 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4079 l2cap_retransmit_frames(chan);
4081 l2cap_retransmit_frames(chan);
4083 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4084 set_bit(CONN_REJ_ACT, &chan->conn_state);
4087 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4089 u16 tx_seq = __get_reqseq(chan, rx_control);
4091 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4093 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4095 if (__is_ctrl_poll(chan, rx_control)) {
4096 chan->expected_ack_seq = tx_seq;
4097 l2cap_drop_acked_frames(chan);
4099 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4100 l2cap_retransmit_one_frame(chan, tx_seq);
4102 l2cap_ertm_send(chan);
4104 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4105 chan->srej_save_reqseq = tx_seq;
4106 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4108 } else if (__is_ctrl_final(chan, rx_control)) {
4109 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4110 chan->srej_save_reqseq == tx_seq)
4111 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4113 l2cap_retransmit_one_frame(chan, tx_seq);
4115 l2cap_retransmit_one_frame(chan, tx_seq);
4116 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4117 chan->srej_save_reqseq = tx_seq;
4118 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4123 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4125 u16 tx_seq = __get_reqseq(chan, rx_control);
4127 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4129 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4130 chan->expected_ack_seq = tx_seq;
4131 l2cap_drop_acked_frames(chan);
4133 if (__is_ctrl_poll(chan, rx_control))
4134 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4136 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4137 __clear_retrans_timer(chan);
4138 if (__is_ctrl_poll(chan, rx_control))
4139 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4143 if (__is_ctrl_poll(chan, rx_control)) {
4144 l2cap_send_srejtail(chan);
4146 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4147 l2cap_send_sframe(chan, rx_control);
4151 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4153 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4155 if (__is_ctrl_final(chan, rx_control) &&
4156 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4157 __clear_monitor_timer(chan);
4158 if (chan->unacked_frames > 0)
4159 __set_retrans_timer(chan);
4160 clear_bit(CONN_WAIT_F, &chan->conn_state);
4163 switch (__get_ctrl_super(chan, rx_control)) {
4164 case L2CAP_SUPER_RR:
4165 l2cap_data_channel_rrframe(chan, rx_control);
4168 case L2CAP_SUPER_REJ:
4169 l2cap_data_channel_rejframe(chan, rx_control);
4172 case L2CAP_SUPER_SREJ:
4173 l2cap_data_channel_srejframe(chan, rx_control);
4176 case L2CAP_SUPER_RNR:
4177 l2cap_data_channel_rnrframe(chan, rx_control);
4185 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4189 int len, next_tx_seq_offset, req_seq_offset;
4191 control = __get_control(chan, skb->data);
4192 skb_pull(skb, __ctrl_size(chan));
4196 * We can just drop the corrupted I-frame here.
4197 * Receiver will miss it and start proper recovery
4198 * procedures and ask retransmission.
4200 if (l2cap_check_fcs(chan, skb))
4203 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4204 len -= L2CAP_SDULEN_SIZE;
4206 if (chan->fcs == L2CAP_FCS_CRC16)
4207 len -= L2CAP_FCS_SIZE;
4209 if (len > chan->mps) {
4210 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4214 req_seq = __get_reqseq(chan, control);
4216 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4218 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4219 chan->expected_ack_seq);
4221 /* check for invalid req-seq */
4222 if (req_seq_offset > next_tx_seq_offset) {
4223 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4227 if (!__is_sframe(chan, control)) {
4229 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4233 l2cap_data_channel_iframe(chan, control, skb);
4237 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4241 l2cap_data_channel_sframe(chan, control, skb);
4251 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4253 struct l2cap_chan *chan;
4254 struct sock *sk = NULL;
4259 chan = l2cap_get_chan_by_scid(conn, cid);
4261 BT_DBG("unknown cid 0x%4.4x", cid);
4268 BT_DBG("chan %p, len %d", chan, skb->len);
4270 if (chan->state != BT_CONNECTED)
4273 switch (chan->mode) {
4274 case L2CAP_MODE_BASIC:
4275 /* If socket recv buffers overflows we drop data here
4276 * which is *bad* because L2CAP has to be reliable.
4277 * But we don't have any other choice. L2CAP doesn't
4278 * provide flow control mechanism. */
4280 if (chan->imtu < skb->len)
4283 if (!chan->ops->recv(chan->data, skb))
4287 case L2CAP_MODE_ERTM:
4288 l2cap_ertm_data_rcv(chan, skb);
4292 case L2CAP_MODE_STREAMING:
4293 control = __get_control(chan, skb->data);
4294 skb_pull(skb, __ctrl_size(chan));
4297 if (l2cap_check_fcs(chan, skb))
4300 if (__is_sar_start(chan, control))
4301 len -= L2CAP_SDULEN_SIZE;
4303 if (chan->fcs == L2CAP_FCS_CRC16)
4304 len -= L2CAP_FCS_SIZE;
4306 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4309 tx_seq = __get_txseq(chan, control);
4311 if (chan->expected_tx_seq != tx_seq) {
4312 /* Frame(s) missing - must discard partial SDU */
4313 kfree_skb(chan->sdu);
4315 chan->sdu_last_frag = NULL;
4318 /* TODO: Notify userland of missing data */
4321 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4323 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4324 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4329 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4343 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4345 struct sock *sk = NULL;
4346 struct l2cap_chan *chan;
4348 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4356 BT_DBG("sk %p, len %d", sk, skb->len);
4358 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4361 if (chan->imtu < skb->len)
4364 if (!chan->ops->recv(chan->data, skb))
4376 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4378 struct sock *sk = NULL;
4379 struct l2cap_chan *chan;
4381 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4389 BT_DBG("sk %p, len %d", sk, skb->len);
4391 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4394 if (chan->imtu < skb->len)
4397 if (!chan->ops->recv(chan->data, skb))
4409 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4411 struct l2cap_hdr *lh = (void *) skb->data;
4415 skb_pull(skb, L2CAP_HDR_SIZE);
4416 cid = __le16_to_cpu(lh->cid);
4417 len = __le16_to_cpu(lh->len);
4419 if (len != skb->len) {
4424 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4427 case L2CAP_CID_LE_SIGNALING:
4428 case L2CAP_CID_SIGNALING:
4429 l2cap_sig_channel(conn, skb);
4432 case L2CAP_CID_CONN_LESS:
4433 psm = get_unaligned_le16(skb->data);
4435 l2cap_conless_channel(conn, psm, skb);
4438 case L2CAP_CID_LE_DATA:
4439 l2cap_att_channel(conn, cid, skb);
4443 if (smp_sig_channel(conn, skb))
4444 l2cap_conn_del(conn->hcon, EACCES);
4448 l2cap_data_channel(conn, cid, skb);
4453 /* ---- L2CAP interface with lower layer (HCI) ---- */
4455 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4457 int exact = 0, lm1 = 0, lm2 = 0;
4458 struct l2cap_chan *c;
4460 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4462 /* Find listening sockets and check their link_mode */
4463 read_lock(&chan_list_lock);
4464 list_for_each_entry(c, &chan_list, global_l) {
4465 struct sock *sk = c->sk;
4467 if (c->state != BT_LISTEN)
4470 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4471 lm1 |= HCI_LM_ACCEPT;
4472 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4473 lm1 |= HCI_LM_MASTER;
4475 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4476 lm2 |= HCI_LM_ACCEPT;
4477 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4478 lm2 |= HCI_LM_MASTER;
4481 read_unlock(&chan_list_lock);
4483 return exact ? lm1 : lm2;
4486 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4488 struct l2cap_conn *conn;
4490 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4493 conn = l2cap_conn_add(hcon, status);
4495 l2cap_conn_ready(conn);
4497 l2cap_conn_del(hcon, bt_to_errno(status));
4502 int l2cap_disconn_ind(struct hci_conn *hcon)
4504 struct l2cap_conn *conn = hcon->l2cap_data;
4506 BT_DBG("hcon %p", hcon);
4509 return HCI_ERROR_REMOTE_USER_TERM;
4510 return conn->disc_reason;
4513 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4515 BT_DBG("hcon %p reason %d", hcon, reason);
4517 l2cap_conn_del(hcon, bt_to_errno(reason));
4521 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4523 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4526 if (encrypt == 0x00) {
4527 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4528 __clear_chan_timer(chan);
4529 __set_chan_timer(chan,
4530 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4531 } else if (chan->sec_level == BT_SECURITY_HIGH)
4532 l2cap_chan_close(chan, ECONNREFUSED);
4534 if (chan->sec_level == BT_SECURITY_MEDIUM)
4535 __clear_chan_timer(chan);
4539 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4541 struct l2cap_conn *conn = hcon->l2cap_data;
4542 struct l2cap_chan *chan;
4547 BT_DBG("conn %p", conn);
4549 if (hcon->type == LE_LINK) {
4550 smp_distribute_keys(conn, 0);
4551 cancel_delayed_work(&conn->security_timer);
4554 mutex_lock(&conn->chan_lock);
4556 list_for_each_entry(chan, &conn->chan_l, list) {
4557 struct sock *sk = chan->sk;
4561 BT_DBG("chan->scid %d", chan->scid);
4563 if (chan->scid == L2CAP_CID_LE_DATA) {
4564 if (!status && encrypt) {
4565 chan->sec_level = hcon->sec_level;
4566 l2cap_chan_ready(chan);
4573 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4578 if (!status && (chan->state == BT_CONNECTED ||
4579 chan->state == BT_CONFIG)) {
4580 l2cap_check_encryption(chan, encrypt);
4585 if (chan->state == BT_CONNECT) {
4587 struct l2cap_conn_req req;
4588 req.scid = cpu_to_le16(chan->scid);
4589 req.psm = chan->psm;
4591 chan->ident = l2cap_get_ident(conn);
4592 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4594 l2cap_send_cmd(conn, chan->ident,
4595 L2CAP_CONN_REQ, sizeof(req), &req);
4597 __clear_chan_timer(chan);
4598 __set_chan_timer(chan,
4599 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4601 } else if (chan->state == BT_CONNECT2) {
4602 struct l2cap_conn_rsp rsp;
4606 if (bt_sk(sk)->defer_setup) {
4607 struct sock *parent = bt_sk(sk)->parent;
4608 res = L2CAP_CR_PEND;
4609 stat = L2CAP_CS_AUTHOR_PEND;
4611 parent->sk_data_ready(parent, 0);
4613 __l2cap_state_change(chan, BT_CONFIG);
4614 res = L2CAP_CR_SUCCESS;
4615 stat = L2CAP_CS_NO_INFO;
4618 __l2cap_state_change(chan, BT_DISCONN);
4619 __set_chan_timer(chan,
4620 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4621 res = L2CAP_CR_SEC_BLOCK;
4622 stat = L2CAP_CS_NO_INFO;
4625 rsp.scid = cpu_to_le16(chan->dcid);
4626 rsp.dcid = cpu_to_le16(chan->scid);
4627 rsp.result = cpu_to_le16(res);
4628 rsp.status = cpu_to_le16(stat);
4629 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4636 mutex_unlock(&conn->chan_lock);
4641 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4643 struct l2cap_conn *conn = hcon->l2cap_data;
4646 conn = l2cap_conn_add(hcon, 0);
4651 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4653 if (!(flags & ACL_CONT)) {
4654 struct l2cap_hdr *hdr;
4655 struct l2cap_chan *chan;
4660 BT_ERR("Unexpected start frame (len %d)", skb->len);
4661 kfree_skb(conn->rx_skb);
4662 conn->rx_skb = NULL;
4664 l2cap_conn_unreliable(conn, ECOMM);
4667 /* Start fragment always begin with Basic L2CAP header */
4668 if (skb->len < L2CAP_HDR_SIZE) {
4669 BT_ERR("Frame is too short (len %d)", skb->len);
4670 l2cap_conn_unreliable(conn, ECOMM);
4674 hdr = (struct l2cap_hdr *) skb->data;
4675 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4676 cid = __le16_to_cpu(hdr->cid);
4678 if (len == skb->len) {
4679 /* Complete frame received */
4680 l2cap_recv_frame(conn, skb);
4684 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4686 if (skb->len > len) {
4687 BT_ERR("Frame is too long (len %d, expected len %d)",
4689 l2cap_conn_unreliable(conn, ECOMM);
4693 chan = l2cap_get_chan_by_scid(conn, cid);
4695 if (chan && chan->sk) {
4696 struct sock *sk = chan->sk;
4699 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4700 BT_ERR("Frame exceeding recv MTU (len %d, "
4704 l2cap_conn_unreliable(conn, ECOMM);
4710 /* Allocate skb for the complete frame (with header) */
4711 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4715 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4717 conn->rx_len = len - skb->len;
4719 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4721 if (!conn->rx_len) {
4722 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4723 l2cap_conn_unreliable(conn, ECOMM);
4727 if (skb->len > conn->rx_len) {
4728 BT_ERR("Fragment is too long (len %d, expected %d)",
4729 skb->len, conn->rx_len);
4730 kfree_skb(conn->rx_skb);
4731 conn->rx_skb = NULL;
4733 l2cap_conn_unreliable(conn, ECOMM);
4737 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4739 conn->rx_len -= skb->len;
4741 if (!conn->rx_len) {
4742 /* Complete frame received */
4743 l2cap_recv_frame(conn, conn->rx_skb);
4744 conn->rx_skb = NULL;
4753 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4755 struct l2cap_chan *c;
4757 read_lock(&chan_list_lock);
4759 list_for_each_entry(c, &chan_list, global_l) {
4760 struct sock *sk = c->sk;
4762 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4763 batostr(&bt_sk(sk)->src),
4764 batostr(&bt_sk(sk)->dst),
4765 c->state, __le16_to_cpu(c->psm),
4766 c->scid, c->dcid, c->imtu, c->omtu,
4767 c->sec_level, c->mode);
4770 read_unlock(&chan_list_lock);
4775 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4777 return single_open(file, l2cap_debugfs_show, inode->i_private);
4780 static const struct file_operations l2cap_debugfs_fops = {
4781 .open = l2cap_debugfs_open,
4783 .llseek = seq_lseek,
4784 .release = single_release,
4787 static struct dentry *l2cap_debugfs;
4789 int __init l2cap_init(void)
4793 err = l2cap_init_sockets();
4798 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4799 bt_debugfs, NULL, &l2cap_debugfs_fops);
4801 BT_ERR("Failed to create L2CAP debug file");
4807 void l2cap_exit(void)
4809 debugfs_remove(l2cap_debugfs);
4810 l2cap_cleanup_sockets();
4813 module_param(disable_ertm, bool, 0644);
4814 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");