2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
80 struct l2cap_chan *c, *r = NULL;
84 list_for_each_entry_rcu(c, &conn->chan_l, list) {
95 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
97 struct l2cap_chan *c, *r = NULL;
101 list_for_each_entry_rcu(c, &conn->chan_l, list) {
102 if (c->scid == cid) {
112 /* Find channel with given SCID.
113 * Returns locked socket */
114 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
116 struct l2cap_chan *c;
118 c = __l2cap_get_chan_by_scid(conn, cid);
124 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
126 struct l2cap_chan *c, *r = NULL;
130 list_for_each_entry_rcu(c, &conn->chan_l, list) {
131 if (c->ident == ident) {
141 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
143 struct l2cap_chan *c;
145 c = __l2cap_get_chan_by_ident(conn, ident);
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
153 struct l2cap_chan *c;
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
162 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
166 write_lock(&chan_list_lock);
168 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
181 for (p = 0x1001; p < 0x1100; p += 2)
182 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
183 chan->psm = cpu_to_le16(p);
184 chan->sport = cpu_to_le16(p);
191 write_unlock(&chan_list_lock);
195 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
197 write_lock(&chan_list_lock);
201 write_unlock(&chan_list_lock);
206 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
208 u16 cid = L2CAP_CID_DYN_START;
210 for (; cid < L2CAP_CID_DYN_END; cid++) {
211 if (!__l2cap_get_chan_by_scid(conn, cid))
218 static char *state_to_string(int state)
222 return "BT_CONNECTED";
232 return "BT_CONNECT2";
241 return "invalid state";
244 static void l2cap_state_change(struct l2cap_chan *chan, int state)
246 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
247 state_to_string(state));
250 chan->ops->state_change(chan->data, state);
253 static void l2cap_chan_timeout(struct work_struct *work)
255 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
257 struct sock *sk = chan->sk;
260 BT_DBG("chan %p state %d", chan, chan->state);
264 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
265 reason = ECONNREFUSED;
266 else if (chan->state == BT_CONNECT &&
267 chan->sec_level != BT_SECURITY_SDP)
268 reason = ECONNREFUSED;
272 l2cap_chan_close(chan, reason);
276 chan->ops->close(chan->data);
277 l2cap_chan_put(chan);
280 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
282 struct l2cap_chan *chan;
284 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
290 write_lock(&chan_list_lock);
291 list_add(&chan->global_l, &chan_list);
292 write_unlock(&chan_list_lock);
294 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
296 chan->state = BT_OPEN;
298 atomic_set(&chan->refcnt, 1);
300 BT_DBG("sk %p chan %p", sk, chan);
305 void l2cap_chan_destroy(struct l2cap_chan *chan)
307 write_lock(&chan_list_lock);
308 list_del(&chan->global_l);
309 write_unlock(&chan_list_lock);
311 l2cap_chan_put(chan);
314 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
316 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
317 chan->psm, chan->dcid);
319 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
323 switch (chan->chan_type) {
324 case L2CAP_CHAN_CONN_ORIENTED:
325 if (conn->hcon->type == LE_LINK) {
327 chan->omtu = L2CAP_LE_DEFAULT_MTU;
328 chan->scid = L2CAP_CID_LE_DATA;
329 chan->dcid = L2CAP_CID_LE_DATA;
331 /* Alloc CID for connection-oriented socket */
332 chan->scid = l2cap_alloc_cid(conn);
333 chan->omtu = L2CAP_DEFAULT_MTU;
337 case L2CAP_CHAN_CONN_LESS:
338 /* Connectionless socket */
339 chan->scid = L2CAP_CID_CONN_LESS;
340 chan->dcid = L2CAP_CID_CONN_LESS;
341 chan->omtu = L2CAP_DEFAULT_MTU;
345 /* Raw socket can send/recv signalling messages only */
346 chan->scid = L2CAP_CID_SIGNALING;
347 chan->dcid = L2CAP_CID_SIGNALING;
348 chan->omtu = L2CAP_DEFAULT_MTU;
351 chan->local_id = L2CAP_BESTEFFORT_ID;
352 chan->local_stype = L2CAP_SERV_BESTEFFORT;
353 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
354 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
355 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
356 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
358 l2cap_chan_hold(chan);
360 list_add_rcu(&chan->list, &conn->chan_l);
364 * Must be called on the locked socket. */
365 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
367 struct sock *sk = chan->sk;
368 struct l2cap_conn *conn = chan->conn;
369 struct sock *parent = bt_sk(sk)->parent;
371 __clear_chan_timer(chan);
373 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
376 /* Delete from channel list */
377 list_del_rcu(&chan->list);
380 l2cap_chan_put(chan);
383 hci_conn_put(conn->hcon);
386 l2cap_state_change(chan, BT_CLOSED);
387 sock_set_flag(sk, SOCK_ZAPPED);
393 bt_accept_unlink(sk);
394 parent->sk_data_ready(parent, 0);
396 sk->sk_state_change(sk);
398 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
399 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
402 skb_queue_purge(&chan->tx_q);
404 if (chan->mode == L2CAP_MODE_ERTM) {
405 struct srej_list *l, *tmp;
407 __clear_retrans_timer(chan);
408 __clear_monitor_timer(chan);
409 __clear_ack_timer(chan);
411 skb_queue_purge(&chan->srej_q);
413 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
420 static void l2cap_chan_cleanup_listen(struct sock *parent)
424 BT_DBG("parent %p", parent);
426 /* Close not yet accepted channels */
427 while ((sk = bt_accept_dequeue(parent, NULL))) {
428 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
429 __clear_chan_timer(chan);
431 l2cap_chan_close(chan, ECONNRESET);
433 chan->ops->close(chan->data);
437 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
439 struct l2cap_conn *conn = chan->conn;
440 struct sock *sk = chan->sk;
442 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
444 switch (chan->state) {
446 l2cap_chan_cleanup_listen(sk);
448 l2cap_state_change(chan, BT_CLOSED);
449 sock_set_flag(sk, SOCK_ZAPPED);
454 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
455 conn->hcon->type == ACL_LINK) {
456 __clear_chan_timer(chan);
457 __set_chan_timer(chan, sk->sk_sndtimeo);
458 l2cap_send_disconn_req(conn, chan, reason);
460 l2cap_chan_del(chan, reason);
464 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
465 conn->hcon->type == ACL_LINK) {
466 struct l2cap_conn_rsp rsp;
469 if (bt_sk(sk)->defer_setup)
470 result = L2CAP_CR_SEC_BLOCK;
472 result = L2CAP_CR_BAD_PSM;
473 l2cap_state_change(chan, BT_DISCONN);
475 rsp.scid = cpu_to_le16(chan->dcid);
476 rsp.dcid = cpu_to_le16(chan->scid);
477 rsp.result = cpu_to_le16(result);
478 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
479 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
483 l2cap_chan_del(chan, reason);
488 l2cap_chan_del(chan, reason);
492 sock_set_flag(sk, SOCK_ZAPPED);
497 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
499 if (chan->chan_type == L2CAP_CHAN_RAW) {
500 switch (chan->sec_level) {
501 case BT_SECURITY_HIGH:
502 return HCI_AT_DEDICATED_BONDING_MITM;
503 case BT_SECURITY_MEDIUM:
504 return HCI_AT_DEDICATED_BONDING;
506 return HCI_AT_NO_BONDING;
508 } else if (chan->psm == cpu_to_le16(0x0001)) {
509 if (chan->sec_level == BT_SECURITY_LOW)
510 chan->sec_level = BT_SECURITY_SDP;
512 if (chan->sec_level == BT_SECURITY_HIGH)
513 return HCI_AT_NO_BONDING_MITM;
515 return HCI_AT_NO_BONDING;
517 switch (chan->sec_level) {
518 case BT_SECURITY_HIGH:
519 return HCI_AT_GENERAL_BONDING_MITM;
520 case BT_SECURITY_MEDIUM:
521 return HCI_AT_GENERAL_BONDING;
523 return HCI_AT_NO_BONDING;
528 /* Service level security */
529 int l2cap_chan_check_security(struct l2cap_chan *chan)
531 struct l2cap_conn *conn = chan->conn;
534 auth_type = l2cap_get_auth_type(chan);
536 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
539 static u8 l2cap_get_ident(struct l2cap_conn *conn)
543 /* Get next available identificator.
544 * 1 - 128 are used by kernel.
545 * 129 - 199 are reserved.
546 * 200 - 254 are used by utilities like l2ping, etc.
549 spin_lock(&conn->lock);
551 if (++conn->tx_ident > 128)
556 spin_unlock(&conn->lock);
561 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
563 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
566 BT_DBG("code 0x%2.2x", code);
571 if (lmp_no_flush_capable(conn->hcon->hdev))
572 flags = ACL_START_NO_FLUSH;
576 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
577 skb->priority = HCI_PRIO_MAX;
579 hci_send_acl(conn->hchan, skb, flags);
582 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
584 struct hci_conn *hcon = chan->conn->hcon;
587 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
590 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
591 lmp_no_flush_capable(hcon->hdev))
592 flags = ACL_START_NO_FLUSH;
596 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
597 hci_send_acl(chan->conn->hchan, skb, flags);
600 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
603 struct l2cap_hdr *lh;
604 struct l2cap_conn *conn = chan->conn;
607 if (chan->state != BT_CONNECTED)
610 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
611 hlen = L2CAP_EXT_HDR_SIZE;
613 hlen = L2CAP_ENH_HDR_SIZE;
615 if (chan->fcs == L2CAP_FCS_CRC16)
616 hlen += L2CAP_FCS_SIZE;
618 BT_DBG("chan %p, control 0x%8.8x", chan, control);
620 count = min_t(unsigned int, conn->mtu, hlen);
622 control |= __set_sframe(chan);
624 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
625 control |= __set_ctrl_final(chan);
627 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
628 control |= __set_ctrl_poll(chan);
630 skb = bt_skb_alloc(count, GFP_ATOMIC);
634 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
635 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
636 lh->cid = cpu_to_le16(chan->dcid);
638 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
640 if (chan->fcs == L2CAP_FCS_CRC16) {
641 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
642 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
645 skb->priority = HCI_PRIO_MAX;
646 l2cap_do_send(chan, skb);
649 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
651 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
652 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
653 set_bit(CONN_RNR_SENT, &chan->conn_state);
655 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
657 control |= __set_reqseq(chan, chan->buffer_seq);
659 l2cap_send_sframe(chan, control);
662 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
664 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
667 static void l2cap_do_start(struct l2cap_chan *chan)
669 struct l2cap_conn *conn = chan->conn;
671 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
672 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
675 if (l2cap_chan_check_security(chan) &&
676 __l2cap_no_conn_pending(chan)) {
677 struct l2cap_conn_req req;
678 req.scid = cpu_to_le16(chan->scid);
681 chan->ident = l2cap_get_ident(conn);
682 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
684 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
688 struct l2cap_info_req req;
689 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
691 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
692 conn->info_ident = l2cap_get_ident(conn);
694 schedule_delayed_work(&conn->info_timer,
695 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
697 l2cap_send_cmd(conn, conn->info_ident,
698 L2CAP_INFO_REQ, sizeof(req), &req);
702 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
704 u32 local_feat_mask = l2cap_feat_mask;
706 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
709 case L2CAP_MODE_ERTM:
710 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
711 case L2CAP_MODE_STREAMING:
712 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
718 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
721 struct l2cap_disconn_req req;
728 if (chan->mode == L2CAP_MODE_ERTM) {
729 __clear_retrans_timer(chan);
730 __clear_monitor_timer(chan);
731 __clear_ack_timer(chan);
734 req.dcid = cpu_to_le16(chan->dcid);
735 req.scid = cpu_to_le16(chan->scid);
736 l2cap_send_cmd(conn, l2cap_get_ident(conn),
737 L2CAP_DISCONN_REQ, sizeof(req), &req);
739 l2cap_state_change(chan, BT_DISCONN);
743 /* ---- L2CAP connections ---- */
744 static void l2cap_conn_start(struct l2cap_conn *conn)
746 struct l2cap_chan *chan;
748 BT_DBG("conn %p", conn);
752 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
753 struct sock *sk = chan->sk;
757 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
762 if (chan->state == BT_CONNECT) {
763 struct l2cap_conn_req req;
765 if (!l2cap_chan_check_security(chan) ||
766 !__l2cap_no_conn_pending(chan)) {
771 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
772 && test_bit(CONF_STATE2_DEVICE,
773 &chan->conf_state)) {
774 /* l2cap_chan_close() calls list_del(chan)
775 * so release the lock */
776 l2cap_chan_close(chan, ECONNRESET);
781 req.scid = cpu_to_le16(chan->scid);
784 chan->ident = l2cap_get_ident(conn);
785 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
787 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
790 } else if (chan->state == BT_CONNECT2) {
791 struct l2cap_conn_rsp rsp;
793 rsp.scid = cpu_to_le16(chan->dcid);
794 rsp.dcid = cpu_to_le16(chan->scid);
796 if (l2cap_chan_check_security(chan)) {
797 if (bt_sk(sk)->defer_setup) {
798 struct sock *parent = bt_sk(sk)->parent;
799 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
800 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
802 parent->sk_data_ready(parent, 0);
805 l2cap_state_change(chan, BT_CONFIG);
806 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
807 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
810 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
811 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
814 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
817 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
818 rsp.result != L2CAP_CR_SUCCESS) {
823 set_bit(CONF_REQ_SENT, &chan->conf_state);
824 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
825 l2cap_build_conf_req(chan, buf), buf);
826 chan->num_conf_req++;
835 /* Find socket with cid and source bdaddr.
836 * Returns closest match, locked.
838 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
840 struct l2cap_chan *c, *c1 = NULL;
842 read_lock(&chan_list_lock);
844 list_for_each_entry(c, &chan_list, global_l) {
845 struct sock *sk = c->sk;
847 if (state && c->state != state)
850 if (c->scid == cid) {
852 if (!bacmp(&bt_sk(sk)->src, src)) {
853 read_unlock(&chan_list_lock);
858 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
863 read_unlock(&chan_list_lock);
868 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
870 struct sock *parent, *sk;
871 struct l2cap_chan *chan, *pchan;
875 /* Check if we have socket listening on cid */
876 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
885 /* Check for backlog size */
886 if (sk_acceptq_is_full(parent)) {
887 BT_DBG("backlog full %d", parent->sk_ack_backlog);
891 chan = pchan->ops->new_connection(pchan->data);
897 hci_conn_hold(conn->hcon);
899 bacpy(&bt_sk(sk)->src, conn->src);
900 bacpy(&bt_sk(sk)->dst, conn->dst);
902 bt_accept_enqueue(parent, sk);
904 l2cap_chan_add(conn, chan);
906 __set_chan_timer(chan, sk->sk_sndtimeo);
908 l2cap_state_change(chan, BT_CONNECTED);
909 parent->sk_data_ready(parent, 0);
912 release_sock(parent);
915 static void l2cap_chan_ready(struct l2cap_chan *chan)
917 struct sock *sk = chan->sk;
918 struct sock *parent = bt_sk(sk)->parent;
920 BT_DBG("sk %p, parent %p", sk, parent);
922 chan->conf_state = 0;
923 __clear_chan_timer(chan);
925 l2cap_state_change(chan, BT_CONNECTED);
926 sk->sk_state_change(sk);
929 parent->sk_data_ready(parent, 0);
932 static void l2cap_conn_ready(struct l2cap_conn *conn)
934 struct l2cap_chan *chan;
936 BT_DBG("conn %p", conn);
938 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
939 l2cap_le_conn_ready(conn);
941 if (conn->hcon->out && conn->hcon->type == LE_LINK)
942 smp_conn_security(conn, conn->hcon->pending_sec_level);
946 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
947 struct sock *sk = chan->sk;
951 if (conn->hcon->type == LE_LINK) {
952 if (smp_conn_security(conn, chan->sec_level))
953 l2cap_chan_ready(chan);
955 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
956 __clear_chan_timer(chan);
957 l2cap_state_change(chan, BT_CONNECTED);
958 sk->sk_state_change(sk);
960 } else if (chan->state == BT_CONNECT)
961 l2cap_do_start(chan);
969 /* Notify sockets that we cannot guaranty reliability anymore */
970 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
972 struct l2cap_chan *chan;
974 BT_DBG("conn %p", conn);
978 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
979 struct sock *sk = chan->sk;
981 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
988 static void l2cap_info_timeout(struct work_struct *work)
990 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
993 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
994 conn->info_ident = 0;
996 l2cap_conn_start(conn);
999 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1001 struct l2cap_conn *conn = hcon->l2cap_data;
1002 struct l2cap_chan *chan, *l;
1008 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1010 kfree_skb(conn->rx_skb);
1013 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1016 l2cap_chan_del(chan, err);
1018 chan->ops->close(chan->data);
1021 hci_chan_del(conn->hchan);
1023 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1024 cancel_delayed_work_sync(&conn->info_timer);
1026 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1027 cancel_delayed_work_sync(&conn->security_timer);
1028 smp_chan_destroy(conn);
1031 hcon->l2cap_data = NULL;
1035 static void security_timeout(struct work_struct *work)
1037 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1038 security_timer.work);
1040 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1043 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1045 struct l2cap_conn *conn = hcon->l2cap_data;
1046 struct hci_chan *hchan;
1051 hchan = hci_chan_create(hcon);
1055 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1057 hci_chan_del(hchan);
1061 hcon->l2cap_data = conn;
1063 conn->hchan = hchan;
1065 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1067 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1068 conn->mtu = hcon->hdev->le_mtu;
1070 conn->mtu = hcon->hdev->acl_mtu;
1072 conn->src = &hcon->hdev->bdaddr;
1073 conn->dst = &hcon->dst;
1075 conn->feat_mask = 0;
1077 spin_lock_init(&conn->lock);
1079 INIT_LIST_HEAD(&conn->chan_l);
1081 if (hcon->type == LE_LINK)
1082 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1084 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1086 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1091 /* ---- Socket interface ---- */
1093 /* Find socket with psm and source bdaddr.
1094 * Returns closest match.
1096 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1098 struct l2cap_chan *c, *c1 = NULL;
1100 read_lock(&chan_list_lock);
1102 list_for_each_entry(c, &chan_list, global_l) {
1103 struct sock *sk = c->sk;
1105 if (state && c->state != state)
1108 if (c->psm == psm) {
1110 if (!bacmp(&bt_sk(sk)->src, src)) {
1111 read_unlock(&chan_list_lock);
1116 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1121 read_unlock(&chan_list_lock);
1126 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1128 struct sock *sk = chan->sk;
1129 bdaddr_t *src = &bt_sk(sk)->src;
1130 struct l2cap_conn *conn;
1131 struct hci_conn *hcon;
1132 struct hci_dev *hdev;
1136 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1139 hdev = hci_get_route(dst, src);
1141 return -EHOSTUNREACH;
1147 /* PSM must be odd and lsb of upper byte must be 0 */
1148 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1149 chan->chan_type != L2CAP_CHAN_RAW) {
1154 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1159 switch (chan->mode) {
1160 case L2CAP_MODE_BASIC:
1162 case L2CAP_MODE_ERTM:
1163 case L2CAP_MODE_STREAMING:
1172 switch (sk->sk_state) {
1176 /* Already connecting */
1181 /* Already connected */
1195 /* Set destination address and psm */
1196 bacpy(&bt_sk(sk)->dst, dst);
1200 auth_type = l2cap_get_auth_type(chan);
1202 if (chan->dcid == L2CAP_CID_LE_DATA)
1203 hcon = hci_connect(hdev, LE_LINK, dst,
1204 chan->sec_level, auth_type);
1206 hcon = hci_connect(hdev, ACL_LINK, dst,
1207 chan->sec_level, auth_type);
1210 err = PTR_ERR(hcon);
1214 conn = l2cap_conn_add(hcon, 0);
1221 /* Update source addr of the socket */
1222 bacpy(src, conn->src);
1224 l2cap_chan_add(conn, chan);
1226 l2cap_state_change(chan, BT_CONNECT);
1227 __set_chan_timer(chan, sk->sk_sndtimeo);
1229 if (hcon->state == BT_CONNECTED) {
1230 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1231 __clear_chan_timer(chan);
1232 if (l2cap_chan_check_security(chan))
1233 l2cap_state_change(chan, BT_CONNECTED);
1235 l2cap_do_start(chan);
1241 hci_dev_unlock(hdev);
1246 int __l2cap_wait_ack(struct sock *sk)
1248 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1249 DECLARE_WAITQUEUE(wait, current);
1253 add_wait_queue(sk_sleep(sk), &wait);
1254 set_current_state(TASK_INTERRUPTIBLE);
1255 while (chan->unacked_frames > 0 && chan->conn) {
1259 if (signal_pending(current)) {
1260 err = sock_intr_errno(timeo);
1265 timeo = schedule_timeout(timeo);
1267 set_current_state(TASK_INTERRUPTIBLE);
1269 err = sock_error(sk);
1273 set_current_state(TASK_RUNNING);
1274 remove_wait_queue(sk_sleep(sk), &wait);
1278 static void l2cap_monitor_timeout(struct work_struct *work)
1280 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1281 monitor_timer.work);
1282 struct sock *sk = chan->sk;
1284 BT_DBG("chan %p", chan);
1287 if (chan->retry_count >= chan->remote_max_tx) {
1288 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1293 chan->retry_count++;
1294 __set_monitor_timer(chan);
1296 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1300 static void l2cap_retrans_timeout(struct work_struct *work)
1302 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1303 retrans_timer.work);
1304 struct sock *sk = chan->sk;
1306 BT_DBG("chan %p", chan);
1309 chan->retry_count = 1;
1310 __set_monitor_timer(chan);
1312 set_bit(CONN_WAIT_F, &chan->conn_state);
1314 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1318 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1320 struct sk_buff *skb;
1322 while ((skb = skb_peek(&chan->tx_q)) &&
1323 chan->unacked_frames) {
1324 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1327 skb = skb_dequeue(&chan->tx_q);
1330 chan->unacked_frames--;
1333 if (!chan->unacked_frames)
1334 __clear_retrans_timer(chan);
1337 static void l2cap_streaming_send(struct l2cap_chan *chan)
1339 struct sk_buff *skb;
1343 while ((skb = skb_dequeue(&chan->tx_q))) {
1344 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1345 control |= __set_txseq(chan, chan->next_tx_seq);
1346 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1348 if (chan->fcs == L2CAP_FCS_CRC16) {
1349 fcs = crc16(0, (u8 *)skb->data,
1350 skb->len - L2CAP_FCS_SIZE);
1351 put_unaligned_le16(fcs,
1352 skb->data + skb->len - L2CAP_FCS_SIZE);
1355 l2cap_do_send(chan, skb);
1357 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1361 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1363 struct sk_buff *skb, *tx_skb;
1367 skb = skb_peek(&chan->tx_q);
1371 while (bt_cb(skb)->tx_seq != tx_seq) {
1372 if (skb_queue_is_last(&chan->tx_q, skb))
1375 skb = skb_queue_next(&chan->tx_q, skb);
1378 if (chan->remote_max_tx &&
1379 bt_cb(skb)->retries == chan->remote_max_tx) {
1380 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1384 tx_skb = skb_clone(skb, GFP_ATOMIC);
1385 bt_cb(skb)->retries++;
1387 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1388 control &= __get_sar_mask(chan);
1390 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1391 control |= __set_ctrl_final(chan);
1393 control |= __set_reqseq(chan, chan->buffer_seq);
1394 control |= __set_txseq(chan, tx_seq);
1396 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1398 if (chan->fcs == L2CAP_FCS_CRC16) {
1399 fcs = crc16(0, (u8 *)tx_skb->data,
1400 tx_skb->len - L2CAP_FCS_SIZE);
1401 put_unaligned_le16(fcs,
1402 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1405 l2cap_do_send(chan, tx_skb);
1408 static int l2cap_ertm_send(struct l2cap_chan *chan)
1410 struct sk_buff *skb, *tx_skb;
1415 if (chan->state != BT_CONNECTED)
1418 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1420 if (chan->remote_max_tx &&
1421 bt_cb(skb)->retries == chan->remote_max_tx) {
1422 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1426 tx_skb = skb_clone(skb, GFP_ATOMIC);
1428 bt_cb(skb)->retries++;
1430 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1431 control &= __get_sar_mask(chan);
1433 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1434 control |= __set_ctrl_final(chan);
1436 control |= __set_reqseq(chan, chan->buffer_seq);
1437 control |= __set_txseq(chan, chan->next_tx_seq);
1439 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1441 if (chan->fcs == L2CAP_FCS_CRC16) {
1442 fcs = crc16(0, (u8 *)skb->data,
1443 tx_skb->len - L2CAP_FCS_SIZE);
1444 put_unaligned_le16(fcs, skb->data +
1445 tx_skb->len - L2CAP_FCS_SIZE);
1448 l2cap_do_send(chan, tx_skb);
1450 __set_retrans_timer(chan);
1452 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1454 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1456 if (bt_cb(skb)->retries == 1) {
1457 chan->unacked_frames++;
1460 __clear_ack_timer(chan);
1463 chan->frames_sent++;
1465 if (skb_queue_is_last(&chan->tx_q, skb))
1466 chan->tx_send_head = NULL;
1468 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1474 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1478 if (!skb_queue_empty(&chan->tx_q))
1479 chan->tx_send_head = chan->tx_q.next;
1481 chan->next_tx_seq = chan->expected_ack_seq;
1482 ret = l2cap_ertm_send(chan);
1486 static void __l2cap_send_ack(struct l2cap_chan *chan)
1490 control |= __set_reqseq(chan, chan->buffer_seq);
1492 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1493 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1494 set_bit(CONN_RNR_SENT, &chan->conn_state);
1495 l2cap_send_sframe(chan, control);
1499 if (l2cap_ertm_send(chan) > 0)
1502 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1503 l2cap_send_sframe(chan, control);
1506 static void l2cap_send_ack(struct l2cap_chan *chan)
1508 __clear_ack_timer(chan);
1509 __l2cap_send_ack(chan);
1512 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1514 struct srej_list *tail;
1517 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1518 control |= __set_ctrl_final(chan);
1520 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1521 control |= __set_reqseq(chan, tail->tx_seq);
1523 l2cap_send_sframe(chan, control);
1526 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1528 struct l2cap_conn *conn = chan->conn;
1529 struct sk_buff **frag;
1532 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1538 /* Continuation fragments (no L2CAP header) */
1539 frag = &skb_shinfo(skb)->frag_list;
1541 count = min_t(unsigned int, conn->mtu, len);
1543 *frag = chan->ops->alloc_skb(chan, count,
1544 msg->msg_flags & MSG_DONTWAIT, &err);
1548 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1551 (*frag)->priority = skb->priority;
1556 frag = &(*frag)->next;
1562 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1563 struct msghdr *msg, size_t len,
1566 struct l2cap_conn *conn = chan->conn;
1567 struct sk_buff *skb;
1568 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1569 struct l2cap_hdr *lh;
1571 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1573 count = min_t(unsigned int, (conn->mtu - hlen), len);
1575 skb = chan->ops->alloc_skb(chan, count + hlen,
1576 msg->msg_flags & MSG_DONTWAIT, &err);
1579 return ERR_PTR(err);
1581 skb->priority = priority;
1583 /* Create L2CAP header */
1584 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1585 lh->cid = cpu_to_le16(chan->dcid);
1586 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1587 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1589 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1590 if (unlikely(err < 0)) {
1592 return ERR_PTR(err);
1597 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1598 struct msghdr *msg, size_t len,
1601 struct l2cap_conn *conn = chan->conn;
1602 struct sk_buff *skb;
1603 int err, count, hlen = L2CAP_HDR_SIZE;
1604 struct l2cap_hdr *lh;
1606 BT_DBG("chan %p len %d", chan, (int)len);
1608 count = min_t(unsigned int, (conn->mtu - hlen), len);
1610 skb = chan->ops->alloc_skb(chan, count + hlen,
1611 msg->msg_flags & MSG_DONTWAIT, &err);
1614 return ERR_PTR(err);
1616 skb->priority = priority;
1618 /* Create L2CAP header */
1619 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1620 lh->cid = cpu_to_le16(chan->dcid);
1621 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1623 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1624 if (unlikely(err < 0)) {
1626 return ERR_PTR(err);
1631 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1632 struct msghdr *msg, size_t len,
1633 u32 control, u16 sdulen)
1635 struct l2cap_conn *conn = chan->conn;
1636 struct sk_buff *skb;
1637 int err, count, hlen;
1638 struct l2cap_hdr *lh;
1640 BT_DBG("chan %p len %d", chan, (int)len);
1643 return ERR_PTR(-ENOTCONN);
1645 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1646 hlen = L2CAP_EXT_HDR_SIZE;
1648 hlen = L2CAP_ENH_HDR_SIZE;
1651 hlen += L2CAP_SDULEN_SIZE;
1653 if (chan->fcs == L2CAP_FCS_CRC16)
1654 hlen += L2CAP_FCS_SIZE;
1656 count = min_t(unsigned int, (conn->mtu - hlen), len);
1658 skb = chan->ops->alloc_skb(chan, count + hlen,
1659 msg->msg_flags & MSG_DONTWAIT, &err);
1662 return ERR_PTR(err);
1664 /* Create L2CAP header */
1665 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1666 lh->cid = cpu_to_le16(chan->dcid);
1667 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1669 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1672 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1674 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1675 if (unlikely(err < 0)) {
1677 return ERR_PTR(err);
1680 if (chan->fcs == L2CAP_FCS_CRC16)
1681 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1683 bt_cb(skb)->retries = 0;
1687 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1689 struct sk_buff *skb;
1690 struct sk_buff_head sar_queue;
1694 skb_queue_head_init(&sar_queue);
1695 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1696 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1698 return PTR_ERR(skb);
1700 __skb_queue_tail(&sar_queue, skb);
1701 len -= chan->remote_mps;
1702 size += chan->remote_mps;
1707 if (len > chan->remote_mps) {
1708 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1709 buflen = chan->remote_mps;
1711 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1715 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1717 skb_queue_purge(&sar_queue);
1718 return PTR_ERR(skb);
1721 __skb_queue_tail(&sar_queue, skb);
1725 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1726 if (chan->tx_send_head == NULL)
1727 chan->tx_send_head = sar_queue.next;
1732 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1735 struct sk_buff *skb;
1739 /* Connectionless channel */
1740 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1741 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1743 return PTR_ERR(skb);
1745 l2cap_do_send(chan, skb);
1749 switch (chan->mode) {
1750 case L2CAP_MODE_BASIC:
1751 /* Check outgoing MTU */
1752 if (len > chan->omtu)
1755 /* Create a basic PDU */
1756 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1758 return PTR_ERR(skb);
1760 l2cap_do_send(chan, skb);
1764 case L2CAP_MODE_ERTM:
1765 case L2CAP_MODE_STREAMING:
1766 /* Entire SDU fits into one PDU */
1767 if (len <= chan->remote_mps) {
1768 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1769 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1772 return PTR_ERR(skb);
1774 __skb_queue_tail(&chan->tx_q, skb);
1776 if (chan->tx_send_head == NULL)
1777 chan->tx_send_head = skb;
1780 /* Segment SDU into multiples PDUs */
1781 err = l2cap_sar_segment_sdu(chan, msg, len);
1786 if (chan->mode == L2CAP_MODE_STREAMING) {
1787 l2cap_streaming_send(chan);
1792 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1793 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1798 err = l2cap_ertm_send(chan);
1805 BT_DBG("bad state %1.1x", chan->mode);
1812 /* Copy frame to all raw sockets on that connection */
1813 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1815 struct sk_buff *nskb;
1816 struct l2cap_chan *chan;
1818 BT_DBG("conn %p", conn);
1822 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1823 struct sock *sk = chan->sk;
1824 if (chan->chan_type != L2CAP_CHAN_RAW)
1827 /* Don't send frame to the socket it came from */
1830 nskb = skb_clone(skb, GFP_ATOMIC);
1834 if (chan->ops->recv(chan->data, nskb))
1841 /* ---- L2CAP signalling commands ---- */
1842 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1843 u8 code, u8 ident, u16 dlen, void *data)
1845 struct sk_buff *skb, **frag;
1846 struct l2cap_cmd_hdr *cmd;
1847 struct l2cap_hdr *lh;
1850 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1851 conn, code, ident, dlen);
1853 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1854 count = min_t(unsigned int, conn->mtu, len);
1856 skb = bt_skb_alloc(count, GFP_ATOMIC);
1860 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1861 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1863 if (conn->hcon->type == LE_LINK)
1864 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1866 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1868 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1871 cmd->len = cpu_to_le16(dlen);
1874 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1875 memcpy(skb_put(skb, count), data, count);
1881 /* Continuation fragments (no L2CAP header) */
1882 frag = &skb_shinfo(skb)->frag_list;
1884 count = min_t(unsigned int, conn->mtu, len);
1886 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1890 memcpy(skb_put(*frag, count), data, count);
1895 frag = &(*frag)->next;
1905 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1907 struct l2cap_conf_opt *opt = *ptr;
1910 len = L2CAP_CONF_OPT_SIZE + opt->len;
1918 *val = *((u8 *) opt->val);
1922 *val = get_unaligned_le16(opt->val);
1926 *val = get_unaligned_le32(opt->val);
1930 *val = (unsigned long) opt->val;
1934 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1938 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1940 struct l2cap_conf_opt *opt = *ptr;
1942 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1949 *((u8 *) opt->val) = val;
1953 put_unaligned_le16(val, opt->val);
1957 put_unaligned_le32(val, opt->val);
1961 memcpy(opt->val, (void *) val, len);
1965 *ptr += L2CAP_CONF_OPT_SIZE + len;
1968 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1970 struct l2cap_conf_efs efs;
1972 switch (chan->mode) {
1973 case L2CAP_MODE_ERTM:
1974 efs.id = chan->local_id;
1975 efs.stype = chan->local_stype;
1976 efs.msdu = cpu_to_le16(chan->local_msdu);
1977 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1978 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1979 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1982 case L2CAP_MODE_STREAMING:
1984 efs.stype = L2CAP_SERV_BESTEFFORT;
1985 efs.msdu = cpu_to_le16(chan->local_msdu);
1986 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1995 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1996 (unsigned long) &efs);
1999 static void l2cap_ack_timeout(struct work_struct *work)
2001 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2004 BT_DBG("chan %p", chan);
2006 lock_sock(chan->sk);
2007 __l2cap_send_ack(chan);
2008 release_sock(chan->sk);
2010 l2cap_chan_put(chan);
2013 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2015 chan->expected_ack_seq = 0;
2016 chan->unacked_frames = 0;
2017 chan->buffer_seq = 0;
2018 chan->num_acked = 0;
2019 chan->frames_sent = 0;
2021 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2022 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2023 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2025 skb_queue_head_init(&chan->srej_q);
2027 INIT_LIST_HEAD(&chan->srej_l);
2030 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2033 case L2CAP_MODE_STREAMING:
2034 case L2CAP_MODE_ERTM:
2035 if (l2cap_mode_supported(mode, remote_feat_mask))
2039 return L2CAP_MODE_BASIC;
2043 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2045 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2048 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2050 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2053 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2055 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2056 __l2cap_ews_supported(chan)) {
2057 /* use extended control field */
2058 set_bit(FLAG_EXT_CTRL, &chan->flags);
2059 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2061 chan->tx_win = min_t(u16, chan->tx_win,
2062 L2CAP_DEFAULT_TX_WINDOW);
2063 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2067 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2069 struct l2cap_conf_req *req = data;
2070 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2071 void *ptr = req->data;
2074 BT_DBG("chan %p", chan);
2076 if (chan->num_conf_req || chan->num_conf_rsp)
2079 switch (chan->mode) {
2080 case L2CAP_MODE_STREAMING:
2081 case L2CAP_MODE_ERTM:
2082 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2085 if (__l2cap_efs_supported(chan))
2086 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2090 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2095 if (chan->imtu != L2CAP_DEFAULT_MTU)
2096 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2098 switch (chan->mode) {
2099 case L2CAP_MODE_BASIC:
2100 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2101 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2104 rfc.mode = L2CAP_MODE_BASIC;
2106 rfc.max_transmit = 0;
2107 rfc.retrans_timeout = 0;
2108 rfc.monitor_timeout = 0;
2109 rfc.max_pdu_size = 0;
2111 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2112 (unsigned long) &rfc);
2115 case L2CAP_MODE_ERTM:
2116 rfc.mode = L2CAP_MODE_ERTM;
2117 rfc.max_transmit = chan->max_tx;
2118 rfc.retrans_timeout = 0;
2119 rfc.monitor_timeout = 0;
2121 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2122 L2CAP_EXT_HDR_SIZE -
2125 rfc.max_pdu_size = cpu_to_le16(size);
2127 l2cap_txwin_setup(chan);
2129 rfc.txwin_size = min_t(u16, chan->tx_win,
2130 L2CAP_DEFAULT_TX_WINDOW);
2132 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2133 (unsigned long) &rfc);
2135 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2136 l2cap_add_opt_efs(&ptr, chan);
2138 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2141 if (chan->fcs == L2CAP_FCS_NONE ||
2142 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2143 chan->fcs = L2CAP_FCS_NONE;
2144 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2147 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2148 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2152 case L2CAP_MODE_STREAMING:
2153 rfc.mode = L2CAP_MODE_STREAMING;
2155 rfc.max_transmit = 0;
2156 rfc.retrans_timeout = 0;
2157 rfc.monitor_timeout = 0;
2159 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2160 L2CAP_EXT_HDR_SIZE -
2163 rfc.max_pdu_size = cpu_to_le16(size);
2165 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2166 (unsigned long) &rfc);
2168 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2169 l2cap_add_opt_efs(&ptr, chan);
2171 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2174 if (chan->fcs == L2CAP_FCS_NONE ||
2175 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2176 chan->fcs = L2CAP_FCS_NONE;
2177 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2182 req->dcid = cpu_to_le16(chan->dcid);
2183 req->flags = cpu_to_le16(0);
2188 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2190 struct l2cap_conf_rsp *rsp = data;
2191 void *ptr = rsp->data;
2192 void *req = chan->conf_req;
2193 int len = chan->conf_len;
2194 int type, hint, olen;
2196 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2197 struct l2cap_conf_efs efs;
2199 u16 mtu = L2CAP_DEFAULT_MTU;
2200 u16 result = L2CAP_CONF_SUCCESS;
2203 BT_DBG("chan %p", chan);
2205 while (len >= L2CAP_CONF_OPT_SIZE) {
2206 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2208 hint = type & L2CAP_CONF_HINT;
2209 type &= L2CAP_CONF_MASK;
2212 case L2CAP_CONF_MTU:
2216 case L2CAP_CONF_FLUSH_TO:
2217 chan->flush_to = val;
2220 case L2CAP_CONF_QOS:
2223 case L2CAP_CONF_RFC:
2224 if (olen == sizeof(rfc))
2225 memcpy(&rfc, (void *) val, olen);
2228 case L2CAP_CONF_FCS:
2229 if (val == L2CAP_FCS_NONE)
2230 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2233 case L2CAP_CONF_EFS:
2235 if (olen == sizeof(efs))
2236 memcpy(&efs, (void *) val, olen);
2239 case L2CAP_CONF_EWS:
2241 return -ECONNREFUSED;
2243 set_bit(FLAG_EXT_CTRL, &chan->flags);
2244 set_bit(CONF_EWS_RECV, &chan->conf_state);
2245 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2246 chan->remote_tx_win = val;
2253 result = L2CAP_CONF_UNKNOWN;
2254 *((u8 *) ptr++) = type;
2259 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2262 switch (chan->mode) {
2263 case L2CAP_MODE_STREAMING:
2264 case L2CAP_MODE_ERTM:
2265 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2266 chan->mode = l2cap_select_mode(rfc.mode,
2267 chan->conn->feat_mask);
2272 if (__l2cap_efs_supported(chan))
2273 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2275 return -ECONNREFUSED;
2278 if (chan->mode != rfc.mode)
2279 return -ECONNREFUSED;
2285 if (chan->mode != rfc.mode) {
2286 result = L2CAP_CONF_UNACCEPT;
2287 rfc.mode = chan->mode;
2289 if (chan->num_conf_rsp == 1)
2290 return -ECONNREFUSED;
2292 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2293 sizeof(rfc), (unsigned long) &rfc);
2296 if (result == L2CAP_CONF_SUCCESS) {
2297 /* Configure output options and let the other side know
2298 * which ones we don't like. */
2300 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2301 result = L2CAP_CONF_UNACCEPT;
2304 set_bit(CONF_MTU_DONE, &chan->conf_state);
2306 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2309 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2310 efs.stype != L2CAP_SERV_NOTRAFIC &&
2311 efs.stype != chan->local_stype) {
2313 result = L2CAP_CONF_UNACCEPT;
2315 if (chan->num_conf_req >= 1)
2316 return -ECONNREFUSED;
2318 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2320 (unsigned long) &efs);
2322 /* Send PENDING Conf Rsp */
2323 result = L2CAP_CONF_PENDING;
2324 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2329 case L2CAP_MODE_BASIC:
2330 chan->fcs = L2CAP_FCS_NONE;
2331 set_bit(CONF_MODE_DONE, &chan->conf_state);
2334 case L2CAP_MODE_ERTM:
2335 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2336 chan->remote_tx_win = rfc.txwin_size;
2338 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2340 chan->remote_max_tx = rfc.max_transmit;
2342 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2344 L2CAP_EXT_HDR_SIZE -
2347 rfc.max_pdu_size = cpu_to_le16(size);
2348 chan->remote_mps = size;
2350 rfc.retrans_timeout =
2351 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2352 rfc.monitor_timeout =
2353 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2355 set_bit(CONF_MODE_DONE, &chan->conf_state);
2357 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2358 sizeof(rfc), (unsigned long) &rfc);
2360 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2361 chan->remote_id = efs.id;
2362 chan->remote_stype = efs.stype;
2363 chan->remote_msdu = le16_to_cpu(efs.msdu);
2364 chan->remote_flush_to =
2365 le32_to_cpu(efs.flush_to);
2366 chan->remote_acc_lat =
2367 le32_to_cpu(efs.acc_lat);
2368 chan->remote_sdu_itime =
2369 le32_to_cpu(efs.sdu_itime);
2370 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2371 sizeof(efs), (unsigned long) &efs);
2375 case L2CAP_MODE_STREAMING:
2376 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2378 L2CAP_EXT_HDR_SIZE -
2381 rfc.max_pdu_size = cpu_to_le16(size);
2382 chan->remote_mps = size;
2384 set_bit(CONF_MODE_DONE, &chan->conf_state);
2386 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2387 sizeof(rfc), (unsigned long) &rfc);
2392 result = L2CAP_CONF_UNACCEPT;
2394 memset(&rfc, 0, sizeof(rfc));
2395 rfc.mode = chan->mode;
2398 if (result == L2CAP_CONF_SUCCESS)
2399 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2401 rsp->scid = cpu_to_le16(chan->dcid);
2402 rsp->result = cpu_to_le16(result);
2403 rsp->flags = cpu_to_le16(0x0000);
2408 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2410 struct l2cap_conf_req *req = data;
2411 void *ptr = req->data;
2414 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2415 struct l2cap_conf_efs efs;
2417 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2419 while (len >= L2CAP_CONF_OPT_SIZE) {
2420 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2423 case L2CAP_CONF_MTU:
2424 if (val < L2CAP_DEFAULT_MIN_MTU) {
2425 *result = L2CAP_CONF_UNACCEPT;
2426 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2429 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2432 case L2CAP_CONF_FLUSH_TO:
2433 chan->flush_to = val;
2434 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2438 case L2CAP_CONF_RFC:
2439 if (olen == sizeof(rfc))
2440 memcpy(&rfc, (void *)val, olen);
2442 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2443 rfc.mode != chan->mode)
2444 return -ECONNREFUSED;
2448 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2449 sizeof(rfc), (unsigned long) &rfc);
2452 case L2CAP_CONF_EWS:
2453 chan->tx_win = min_t(u16, val,
2454 L2CAP_DEFAULT_EXT_WINDOW);
2455 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2459 case L2CAP_CONF_EFS:
2460 if (olen == sizeof(efs))
2461 memcpy(&efs, (void *)val, olen);
2463 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2464 efs.stype != L2CAP_SERV_NOTRAFIC &&
2465 efs.stype != chan->local_stype)
2466 return -ECONNREFUSED;
2468 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2469 sizeof(efs), (unsigned long) &efs);
2474 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2475 return -ECONNREFUSED;
2477 chan->mode = rfc.mode;
2479 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2481 case L2CAP_MODE_ERTM:
2482 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2483 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2484 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2486 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2487 chan->local_msdu = le16_to_cpu(efs.msdu);
2488 chan->local_sdu_itime =
2489 le32_to_cpu(efs.sdu_itime);
2490 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2491 chan->local_flush_to =
2492 le32_to_cpu(efs.flush_to);
2496 case L2CAP_MODE_STREAMING:
2497 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2501 req->dcid = cpu_to_le16(chan->dcid);
2502 req->flags = cpu_to_le16(0x0000);
2507 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2509 struct l2cap_conf_rsp *rsp = data;
2510 void *ptr = rsp->data;
2512 BT_DBG("chan %p", chan);
2514 rsp->scid = cpu_to_le16(chan->dcid);
2515 rsp->result = cpu_to_le16(result);
2516 rsp->flags = cpu_to_le16(flags);
2521 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2523 struct l2cap_conn_rsp rsp;
2524 struct l2cap_conn *conn = chan->conn;
2527 rsp.scid = cpu_to_le16(chan->dcid);
2528 rsp.dcid = cpu_to_le16(chan->scid);
2529 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2530 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2531 l2cap_send_cmd(conn, chan->ident,
2532 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2534 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2537 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2538 l2cap_build_conf_req(chan, buf), buf);
2539 chan->num_conf_req++;
2542 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2546 struct l2cap_conf_rfc rfc;
2548 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2550 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2553 while (len >= L2CAP_CONF_OPT_SIZE) {
2554 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2557 case L2CAP_CONF_RFC:
2558 if (olen == sizeof(rfc))
2559 memcpy(&rfc, (void *)val, olen);
2564 /* Use sane default values in case a misbehaving remote device
2565 * did not send an RFC option.
2567 rfc.mode = chan->mode;
2568 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2569 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2570 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2572 BT_ERR("Expected RFC option was not found, using defaults");
2576 case L2CAP_MODE_ERTM:
2577 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2578 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2579 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2581 case L2CAP_MODE_STREAMING:
2582 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2586 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2588 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2590 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2593 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2594 cmd->ident == conn->info_ident) {
2595 cancel_delayed_work(&conn->info_timer);
2597 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2598 conn->info_ident = 0;
2600 l2cap_conn_start(conn);
2606 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2608 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2609 struct l2cap_conn_rsp rsp;
2610 struct l2cap_chan *chan = NULL, *pchan;
2611 struct sock *parent, *sk = NULL;
2612 int result, status = L2CAP_CS_NO_INFO;
2614 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2615 __le16 psm = req->psm;
2617 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2619 /* Check if we have socket listening on psm */
2620 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2622 result = L2CAP_CR_BAD_PSM;
2630 /* Check if the ACL is secure enough (if not SDP) */
2631 if (psm != cpu_to_le16(0x0001) &&
2632 !hci_conn_check_link_mode(conn->hcon)) {
2633 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2634 result = L2CAP_CR_SEC_BLOCK;
2638 result = L2CAP_CR_NO_MEM;
2640 /* Check for backlog size */
2641 if (sk_acceptq_is_full(parent)) {
2642 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2646 chan = pchan->ops->new_connection(pchan->data);
2652 /* Check if we already have channel with that dcid */
2653 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2654 sock_set_flag(sk, SOCK_ZAPPED);
2655 chan->ops->close(chan->data);
2659 hci_conn_hold(conn->hcon);
2661 bacpy(&bt_sk(sk)->src, conn->src);
2662 bacpy(&bt_sk(sk)->dst, conn->dst);
2666 bt_accept_enqueue(parent, sk);
2668 l2cap_chan_add(conn, chan);
2672 __set_chan_timer(chan, sk->sk_sndtimeo);
2674 chan->ident = cmd->ident;
2676 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2677 if (l2cap_chan_check_security(chan)) {
2678 if (bt_sk(sk)->defer_setup) {
2679 l2cap_state_change(chan, BT_CONNECT2);
2680 result = L2CAP_CR_PEND;
2681 status = L2CAP_CS_AUTHOR_PEND;
2682 parent->sk_data_ready(parent, 0);
2684 l2cap_state_change(chan, BT_CONFIG);
2685 result = L2CAP_CR_SUCCESS;
2686 status = L2CAP_CS_NO_INFO;
2689 l2cap_state_change(chan, BT_CONNECT2);
2690 result = L2CAP_CR_PEND;
2691 status = L2CAP_CS_AUTHEN_PEND;
2694 l2cap_state_change(chan, BT_CONNECT2);
2695 result = L2CAP_CR_PEND;
2696 status = L2CAP_CS_NO_INFO;
2700 release_sock(parent);
2703 rsp.scid = cpu_to_le16(scid);
2704 rsp.dcid = cpu_to_le16(dcid);
2705 rsp.result = cpu_to_le16(result);
2706 rsp.status = cpu_to_le16(status);
2707 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2709 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2710 struct l2cap_info_req info;
2711 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2713 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2714 conn->info_ident = l2cap_get_ident(conn);
2716 schedule_delayed_work(&conn->info_timer,
2717 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2719 l2cap_send_cmd(conn, conn->info_ident,
2720 L2CAP_INFO_REQ, sizeof(info), &info);
2723 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2724 result == L2CAP_CR_SUCCESS) {
2726 set_bit(CONF_REQ_SENT, &chan->conf_state);
2727 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2728 l2cap_build_conf_req(chan, buf), buf);
2729 chan->num_conf_req++;
2735 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2737 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2738 u16 scid, dcid, result, status;
2739 struct l2cap_chan *chan;
2743 scid = __le16_to_cpu(rsp->scid);
2744 dcid = __le16_to_cpu(rsp->dcid);
2745 result = __le16_to_cpu(rsp->result);
2746 status = __le16_to_cpu(rsp->status);
2748 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2751 chan = l2cap_get_chan_by_scid(conn, scid);
2755 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2763 case L2CAP_CR_SUCCESS:
2764 l2cap_state_change(chan, BT_CONFIG);
2767 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2769 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2772 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2773 l2cap_build_conf_req(chan, req), req);
2774 chan->num_conf_req++;
2778 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2782 l2cap_chan_del(chan, ECONNREFUSED);
2790 static inline void set_default_fcs(struct l2cap_chan *chan)
2792 /* FCS is enabled only in ERTM or streaming mode, if one or both
2795 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2796 chan->fcs = L2CAP_FCS_NONE;
2797 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2798 chan->fcs = L2CAP_FCS_CRC16;
2801 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2803 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2806 struct l2cap_chan *chan;
2810 dcid = __le16_to_cpu(req->dcid);
2811 flags = __le16_to_cpu(req->flags);
2813 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2815 chan = l2cap_get_chan_by_scid(conn, dcid);
2821 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2822 struct l2cap_cmd_rej_cid rej;
2824 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2825 rej.scid = cpu_to_le16(chan->scid);
2826 rej.dcid = cpu_to_le16(chan->dcid);
2828 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2833 /* Reject if config buffer is too small. */
2834 len = cmd_len - sizeof(*req);
2835 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2836 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2837 l2cap_build_conf_rsp(chan, rsp,
2838 L2CAP_CONF_REJECT, flags), rsp);
2843 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2844 chan->conf_len += len;
2846 if (flags & 0x0001) {
2847 /* Incomplete config. Send empty response. */
2848 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2849 l2cap_build_conf_rsp(chan, rsp,
2850 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2854 /* Complete config. */
2855 len = l2cap_parse_conf_req(chan, rsp);
2857 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2861 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2862 chan->num_conf_rsp++;
2864 /* Reset config buffer. */
2867 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2870 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2871 set_default_fcs(chan);
2873 l2cap_state_change(chan, BT_CONNECTED);
2875 chan->next_tx_seq = 0;
2876 chan->expected_tx_seq = 0;
2877 skb_queue_head_init(&chan->tx_q);
2878 if (chan->mode == L2CAP_MODE_ERTM)
2879 l2cap_ertm_init(chan);
2881 l2cap_chan_ready(chan);
2885 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2887 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2888 l2cap_build_conf_req(chan, buf), buf);
2889 chan->num_conf_req++;
2892 /* Got Conf Rsp PENDING from remote side and asume we sent
2893 Conf Rsp PENDING in the code above */
2894 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2895 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2897 /* check compatibility */
2899 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2900 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2902 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2903 l2cap_build_conf_rsp(chan, rsp,
2904 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2912 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2914 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2915 u16 scid, flags, result;
2916 struct l2cap_chan *chan;
2918 int len = cmd->len - sizeof(*rsp);
2920 scid = __le16_to_cpu(rsp->scid);
2921 flags = __le16_to_cpu(rsp->flags);
2922 result = __le16_to_cpu(rsp->result);
2924 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2925 scid, flags, result);
2927 chan = l2cap_get_chan_by_scid(conn, scid);
2934 case L2CAP_CONF_SUCCESS:
2935 l2cap_conf_rfc_get(chan, rsp->data, len);
2936 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2939 case L2CAP_CONF_PENDING:
2940 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2942 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2945 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2948 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2952 /* check compatibility */
2954 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2955 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2957 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2958 l2cap_build_conf_rsp(chan, buf,
2959 L2CAP_CONF_SUCCESS, 0x0000), buf);
2963 case L2CAP_CONF_UNACCEPT:
2964 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2967 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2968 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2972 /* throw out any old stored conf requests */
2973 result = L2CAP_CONF_SUCCESS;
2974 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2977 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2981 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2982 L2CAP_CONF_REQ, len, req);
2983 chan->num_conf_req++;
2984 if (result != L2CAP_CONF_SUCCESS)
2990 sk->sk_err = ECONNRESET;
2991 __set_chan_timer(chan,
2992 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT));
2993 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3000 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3002 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3003 set_default_fcs(chan);
3005 l2cap_state_change(chan, BT_CONNECTED);
3006 chan->next_tx_seq = 0;
3007 chan->expected_tx_seq = 0;
3008 skb_queue_head_init(&chan->tx_q);
3009 if (chan->mode == L2CAP_MODE_ERTM)
3010 l2cap_ertm_init(chan);
3012 l2cap_chan_ready(chan);
3020 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3022 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3023 struct l2cap_disconn_rsp rsp;
3025 struct l2cap_chan *chan;
3028 scid = __le16_to_cpu(req->scid);
3029 dcid = __le16_to_cpu(req->dcid);
3031 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3033 chan = l2cap_get_chan_by_scid(conn, dcid);
3039 rsp.dcid = cpu_to_le16(chan->scid);
3040 rsp.scid = cpu_to_le16(chan->dcid);
3041 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3043 sk->sk_shutdown = SHUTDOWN_MASK;
3045 l2cap_chan_del(chan, ECONNRESET);
3048 chan->ops->close(chan->data);
3052 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3054 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3056 struct l2cap_chan *chan;
3059 scid = __le16_to_cpu(rsp->scid);
3060 dcid = __le16_to_cpu(rsp->dcid);
3062 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3064 chan = l2cap_get_chan_by_scid(conn, scid);
3070 l2cap_chan_del(chan, 0);
3073 chan->ops->close(chan->data);
3077 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3079 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3082 type = __le16_to_cpu(req->type);
3084 BT_DBG("type 0x%4.4x", type);
3086 if (type == L2CAP_IT_FEAT_MASK) {
3088 u32 feat_mask = l2cap_feat_mask;
3089 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3090 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3091 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3093 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3096 feat_mask |= L2CAP_FEAT_EXT_FLOW
3097 | L2CAP_FEAT_EXT_WINDOW;
3099 put_unaligned_le32(feat_mask, rsp->data);
3100 l2cap_send_cmd(conn, cmd->ident,
3101 L2CAP_INFO_RSP, sizeof(buf), buf);
3102 } else if (type == L2CAP_IT_FIXED_CHAN) {
3104 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3107 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3109 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3111 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3112 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3113 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3114 l2cap_send_cmd(conn, cmd->ident,
3115 L2CAP_INFO_RSP, sizeof(buf), buf);
3117 struct l2cap_info_rsp rsp;
3118 rsp.type = cpu_to_le16(type);
3119 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3120 l2cap_send_cmd(conn, cmd->ident,
3121 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3127 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3129 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3132 type = __le16_to_cpu(rsp->type);
3133 result = __le16_to_cpu(rsp->result);
3135 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3137 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3138 if (cmd->ident != conn->info_ident ||
3139 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3142 cancel_delayed_work(&conn->info_timer);
3144 if (result != L2CAP_IR_SUCCESS) {
3145 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3146 conn->info_ident = 0;
3148 l2cap_conn_start(conn);
3153 if (type == L2CAP_IT_FEAT_MASK) {
3154 conn->feat_mask = get_unaligned_le32(rsp->data);
3156 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3157 struct l2cap_info_req req;
3158 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3160 conn->info_ident = l2cap_get_ident(conn);
3162 l2cap_send_cmd(conn, conn->info_ident,
3163 L2CAP_INFO_REQ, sizeof(req), &req);
3165 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3166 conn->info_ident = 0;
3168 l2cap_conn_start(conn);
3170 } else if (type == L2CAP_IT_FIXED_CHAN) {
3171 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3172 conn->info_ident = 0;
3174 l2cap_conn_start(conn);
3180 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3181 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3184 struct l2cap_create_chan_req *req = data;
3185 struct l2cap_create_chan_rsp rsp;
3188 if (cmd_len != sizeof(*req))
3194 psm = le16_to_cpu(req->psm);
3195 scid = le16_to_cpu(req->scid);
3197 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3199 /* Placeholder: Always reject */
3201 rsp.scid = cpu_to_le16(scid);
3202 rsp.result = L2CAP_CR_NO_MEM;
3203 rsp.status = L2CAP_CS_NO_INFO;
3205 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3211 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3212 struct l2cap_cmd_hdr *cmd, void *data)
3214 BT_DBG("conn %p", conn);
3216 return l2cap_connect_rsp(conn, cmd, data);
3219 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3220 u16 icid, u16 result)
3222 struct l2cap_move_chan_rsp rsp;
3224 BT_DBG("icid %d, result %d", icid, result);
3226 rsp.icid = cpu_to_le16(icid);
3227 rsp.result = cpu_to_le16(result);
3229 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3232 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3233 struct l2cap_chan *chan, u16 icid, u16 result)
3235 struct l2cap_move_chan_cfm cfm;
3238 BT_DBG("icid %d, result %d", icid, result);
3240 ident = l2cap_get_ident(conn);
3242 chan->ident = ident;
3244 cfm.icid = cpu_to_le16(icid);
3245 cfm.result = cpu_to_le16(result);
3247 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3250 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3253 struct l2cap_move_chan_cfm_rsp rsp;
3255 BT_DBG("icid %d", icid);
3257 rsp.icid = cpu_to_le16(icid);
3258 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3261 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3262 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3264 struct l2cap_move_chan_req *req = data;
3266 u16 result = L2CAP_MR_NOT_ALLOWED;
3268 if (cmd_len != sizeof(*req))
3271 icid = le16_to_cpu(req->icid);
3273 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3278 /* Placeholder: Always refuse */
3279 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3284 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3285 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3287 struct l2cap_move_chan_rsp *rsp = data;
3290 if (cmd_len != sizeof(*rsp))
3293 icid = le16_to_cpu(rsp->icid);
3294 result = le16_to_cpu(rsp->result);
3296 BT_DBG("icid %d, result %d", icid, result);
3298 /* Placeholder: Always unconfirmed */
3299 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3304 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3305 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3307 struct l2cap_move_chan_cfm *cfm = data;
3310 if (cmd_len != sizeof(*cfm))
3313 icid = le16_to_cpu(cfm->icid);
3314 result = le16_to_cpu(cfm->result);
3316 BT_DBG("icid %d, result %d", icid, result);
3318 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3323 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3324 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3326 struct l2cap_move_chan_cfm_rsp *rsp = data;
3329 if (cmd_len != sizeof(*rsp))
3332 icid = le16_to_cpu(rsp->icid);
3334 BT_DBG("icid %d", icid);
3339 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3344 if (min > max || min < 6 || max > 3200)
3347 if (to_multiplier < 10 || to_multiplier > 3200)
3350 if (max >= to_multiplier * 8)
3353 max_latency = (to_multiplier * 8 / max) - 1;
3354 if (latency > 499 || latency > max_latency)
3360 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3361 struct l2cap_cmd_hdr *cmd, u8 *data)
3363 struct hci_conn *hcon = conn->hcon;
3364 struct l2cap_conn_param_update_req *req;
3365 struct l2cap_conn_param_update_rsp rsp;
3366 u16 min, max, latency, to_multiplier, cmd_len;
3369 if (!(hcon->link_mode & HCI_LM_MASTER))
3372 cmd_len = __le16_to_cpu(cmd->len);
3373 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3376 req = (struct l2cap_conn_param_update_req *) data;
3377 min = __le16_to_cpu(req->min);
3378 max = __le16_to_cpu(req->max);
3379 latency = __le16_to_cpu(req->latency);
3380 to_multiplier = __le16_to_cpu(req->to_multiplier);
3382 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3383 min, max, latency, to_multiplier);
3385 memset(&rsp, 0, sizeof(rsp));
3387 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3389 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3391 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3393 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3397 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3402 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3403 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3407 switch (cmd->code) {
3408 case L2CAP_COMMAND_REJ:
3409 l2cap_command_rej(conn, cmd, data);
3412 case L2CAP_CONN_REQ:
3413 err = l2cap_connect_req(conn, cmd, data);
3416 case L2CAP_CONN_RSP:
3417 err = l2cap_connect_rsp(conn, cmd, data);
3420 case L2CAP_CONF_REQ:
3421 err = l2cap_config_req(conn, cmd, cmd_len, data);
3424 case L2CAP_CONF_RSP:
3425 err = l2cap_config_rsp(conn, cmd, data);
3428 case L2CAP_DISCONN_REQ:
3429 err = l2cap_disconnect_req(conn, cmd, data);
3432 case L2CAP_DISCONN_RSP:
3433 err = l2cap_disconnect_rsp(conn, cmd, data);
3436 case L2CAP_ECHO_REQ:
3437 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3440 case L2CAP_ECHO_RSP:
3443 case L2CAP_INFO_REQ:
3444 err = l2cap_information_req(conn, cmd, data);
3447 case L2CAP_INFO_RSP:
3448 err = l2cap_information_rsp(conn, cmd, data);
3451 case L2CAP_CREATE_CHAN_REQ:
3452 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3455 case L2CAP_CREATE_CHAN_RSP:
3456 err = l2cap_create_channel_rsp(conn, cmd, data);
3459 case L2CAP_MOVE_CHAN_REQ:
3460 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3463 case L2CAP_MOVE_CHAN_RSP:
3464 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3467 case L2CAP_MOVE_CHAN_CFM:
3468 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3471 case L2CAP_MOVE_CHAN_CFM_RSP:
3472 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3476 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3484 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3485 struct l2cap_cmd_hdr *cmd, u8 *data)
3487 switch (cmd->code) {
3488 case L2CAP_COMMAND_REJ:
3491 case L2CAP_CONN_PARAM_UPDATE_REQ:
3492 return l2cap_conn_param_update_req(conn, cmd, data);
3494 case L2CAP_CONN_PARAM_UPDATE_RSP:
3498 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3503 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3504 struct sk_buff *skb)
3506 u8 *data = skb->data;
3508 struct l2cap_cmd_hdr cmd;
3511 l2cap_raw_recv(conn, skb);
3513 while (len >= L2CAP_CMD_HDR_SIZE) {
3515 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3516 data += L2CAP_CMD_HDR_SIZE;
3517 len -= L2CAP_CMD_HDR_SIZE;
3519 cmd_len = le16_to_cpu(cmd.len);
3521 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3523 if (cmd_len > len || !cmd.ident) {
3524 BT_DBG("corrupted command");
3528 if (conn->hcon->type == LE_LINK)
3529 err = l2cap_le_sig_cmd(conn, &cmd, data);
3531 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3534 struct l2cap_cmd_rej_unk rej;
3536 BT_ERR("Wrong link type (%d)", err);
3538 /* FIXME: Map err to a valid reason */
3539 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3540 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3550 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3552 u16 our_fcs, rcv_fcs;
3555 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3556 hdr_size = L2CAP_EXT_HDR_SIZE;
3558 hdr_size = L2CAP_ENH_HDR_SIZE;
3560 if (chan->fcs == L2CAP_FCS_CRC16) {
3561 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3562 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3563 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3565 if (our_fcs != rcv_fcs)
3571 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3575 chan->frames_sent = 0;
3577 control |= __set_reqseq(chan, chan->buffer_seq);
3579 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3580 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3581 l2cap_send_sframe(chan, control);
3582 set_bit(CONN_RNR_SENT, &chan->conn_state);
3585 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3586 l2cap_retransmit_frames(chan);
3588 l2cap_ertm_send(chan);
3590 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3591 chan->frames_sent == 0) {
3592 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3593 l2cap_send_sframe(chan, control);
3597 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3599 struct sk_buff *next_skb;
3600 int tx_seq_offset, next_tx_seq_offset;
3602 bt_cb(skb)->tx_seq = tx_seq;
3603 bt_cb(skb)->sar = sar;
3605 next_skb = skb_peek(&chan->srej_q);
3607 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3610 if (bt_cb(next_skb)->tx_seq == tx_seq)
3613 next_tx_seq_offset = __seq_offset(chan,
3614 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3616 if (next_tx_seq_offset > tx_seq_offset) {
3617 __skb_queue_before(&chan->srej_q, next_skb, skb);
3621 if (skb_queue_is_last(&chan->srej_q, next_skb))
3624 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3627 __skb_queue_tail(&chan->srej_q, skb);
3632 static void append_skb_frag(struct sk_buff *skb,
3633 struct sk_buff *new_frag, struct sk_buff **last_frag)
3635 /* skb->len reflects data in skb as well as all fragments
3636 * skb->data_len reflects only data in fragments
3638 if (!skb_has_frag_list(skb))
3639 skb_shinfo(skb)->frag_list = new_frag;
3641 new_frag->next = NULL;
3643 (*last_frag)->next = new_frag;
3644 *last_frag = new_frag;
3646 skb->len += new_frag->len;
3647 skb->data_len += new_frag->len;
3648 skb->truesize += new_frag->truesize;
3651 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3655 switch (__get_ctrl_sar(chan, control)) {
3656 case L2CAP_SAR_UNSEGMENTED:
3660 err = chan->ops->recv(chan->data, skb);
3663 case L2CAP_SAR_START:
3667 chan->sdu_len = get_unaligned_le16(skb->data);
3668 skb_pull(skb, L2CAP_SDULEN_SIZE);
3670 if (chan->sdu_len > chan->imtu) {
3675 if (skb->len >= chan->sdu_len)
3679 chan->sdu_last_frag = skb;
3685 case L2CAP_SAR_CONTINUE:
3689 append_skb_frag(chan->sdu, skb,
3690 &chan->sdu_last_frag);
3693 if (chan->sdu->len >= chan->sdu_len)
3703 append_skb_frag(chan->sdu, skb,
3704 &chan->sdu_last_frag);
3707 if (chan->sdu->len != chan->sdu_len)
3710 err = chan->ops->recv(chan->data, chan->sdu);
3713 /* Reassembly complete */
3715 chan->sdu_last_frag = NULL;
3723 kfree_skb(chan->sdu);
3725 chan->sdu_last_frag = NULL;
3732 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3734 BT_DBG("chan %p, Enter local busy", chan);
3736 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3738 __set_ack_timer(chan);
3741 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3745 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3748 control = __set_reqseq(chan, chan->buffer_seq);
3749 control |= __set_ctrl_poll(chan);
3750 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3751 l2cap_send_sframe(chan, control);
3752 chan->retry_count = 1;
3754 __clear_retrans_timer(chan);
3755 __set_monitor_timer(chan);
3757 set_bit(CONN_WAIT_F, &chan->conn_state);
3760 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3761 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3763 BT_DBG("chan %p, Exit local busy", chan);
3766 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3768 if (chan->mode == L2CAP_MODE_ERTM) {
3770 l2cap_ertm_enter_local_busy(chan);
3772 l2cap_ertm_exit_local_busy(chan);
3776 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3778 struct sk_buff *skb;
3781 while ((skb = skb_peek(&chan->srej_q)) &&
3782 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3785 if (bt_cb(skb)->tx_seq != tx_seq)
3788 skb = skb_dequeue(&chan->srej_q);
3789 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3790 err = l2cap_reassemble_sdu(chan, skb, control);
3793 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3797 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3798 tx_seq = __next_seq(chan, tx_seq);
3802 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3804 struct srej_list *l, *tmp;
3807 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3808 if (l->tx_seq == tx_seq) {
3813 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3814 control |= __set_reqseq(chan, l->tx_seq);
3815 l2cap_send_sframe(chan, control);
3817 list_add_tail(&l->list, &chan->srej_l);
3821 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3823 struct srej_list *new;
3826 while (tx_seq != chan->expected_tx_seq) {
3827 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3828 control |= __set_reqseq(chan, chan->expected_tx_seq);
3829 l2cap_send_sframe(chan, control);
3831 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3835 new->tx_seq = chan->expected_tx_seq;
3837 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3839 list_add_tail(&new->list, &chan->srej_l);
3842 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3847 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3849 u16 tx_seq = __get_txseq(chan, rx_control);
3850 u16 req_seq = __get_reqseq(chan, rx_control);
3851 u8 sar = __get_ctrl_sar(chan, rx_control);
3852 int tx_seq_offset, expected_tx_seq_offset;
3853 int num_to_ack = (chan->tx_win/6) + 1;
3856 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3857 tx_seq, rx_control);
3859 if (__is_ctrl_final(chan, rx_control) &&
3860 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3861 __clear_monitor_timer(chan);
3862 if (chan->unacked_frames > 0)
3863 __set_retrans_timer(chan);
3864 clear_bit(CONN_WAIT_F, &chan->conn_state);
3867 chan->expected_ack_seq = req_seq;
3868 l2cap_drop_acked_frames(chan);
3870 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3872 /* invalid tx_seq */
3873 if (tx_seq_offset >= chan->tx_win) {
3874 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3878 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3879 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3880 l2cap_send_ack(chan);
3884 if (tx_seq == chan->expected_tx_seq)
3887 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3888 struct srej_list *first;
3890 first = list_first_entry(&chan->srej_l,
3891 struct srej_list, list);
3892 if (tx_seq == first->tx_seq) {
3893 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3894 l2cap_check_srej_gap(chan, tx_seq);
3896 list_del(&first->list);
3899 if (list_empty(&chan->srej_l)) {
3900 chan->buffer_seq = chan->buffer_seq_srej;
3901 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3902 l2cap_send_ack(chan);
3903 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3906 struct srej_list *l;
3908 /* duplicated tx_seq */
3909 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3912 list_for_each_entry(l, &chan->srej_l, list) {
3913 if (l->tx_seq == tx_seq) {
3914 l2cap_resend_srejframe(chan, tx_seq);
3919 err = l2cap_send_srejframe(chan, tx_seq);
3921 l2cap_send_disconn_req(chan->conn, chan, -err);
3926 expected_tx_seq_offset = __seq_offset(chan,
3927 chan->expected_tx_seq, chan->buffer_seq);
3929 /* duplicated tx_seq */
3930 if (tx_seq_offset < expected_tx_seq_offset)
3933 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3935 BT_DBG("chan %p, Enter SREJ", chan);
3937 INIT_LIST_HEAD(&chan->srej_l);
3938 chan->buffer_seq_srej = chan->buffer_seq;
3940 __skb_queue_head_init(&chan->srej_q);
3941 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3943 /* Set P-bit only if there are some I-frames to ack. */
3944 if (__clear_ack_timer(chan))
3945 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3947 err = l2cap_send_srejframe(chan, tx_seq);
3949 l2cap_send_disconn_req(chan->conn, chan, -err);
3956 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3958 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3959 bt_cb(skb)->tx_seq = tx_seq;
3960 bt_cb(skb)->sar = sar;
3961 __skb_queue_tail(&chan->srej_q, skb);
3965 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3966 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3969 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3973 if (__is_ctrl_final(chan, rx_control)) {
3974 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3975 l2cap_retransmit_frames(chan);
3979 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3980 if (chan->num_acked == num_to_ack - 1)
3981 l2cap_send_ack(chan);
3983 __set_ack_timer(chan);
3992 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3994 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3995 __get_reqseq(chan, rx_control), rx_control);
3997 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3998 l2cap_drop_acked_frames(chan);
4000 if (__is_ctrl_poll(chan, rx_control)) {
4001 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4002 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4003 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4004 (chan->unacked_frames > 0))
4005 __set_retrans_timer(chan);
4007 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4008 l2cap_send_srejtail(chan);
4010 l2cap_send_i_or_rr_or_rnr(chan);
4013 } else if (__is_ctrl_final(chan, rx_control)) {
4014 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4016 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4017 l2cap_retransmit_frames(chan);
4020 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4021 (chan->unacked_frames > 0))
4022 __set_retrans_timer(chan);
4024 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4025 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4026 l2cap_send_ack(chan);
4028 l2cap_ertm_send(chan);
4032 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4034 u16 tx_seq = __get_reqseq(chan, rx_control);
4036 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4038 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4040 chan->expected_ack_seq = tx_seq;
4041 l2cap_drop_acked_frames(chan);
4043 if (__is_ctrl_final(chan, rx_control)) {
4044 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4045 l2cap_retransmit_frames(chan);
4047 l2cap_retransmit_frames(chan);
4049 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4050 set_bit(CONN_REJ_ACT, &chan->conn_state);
4053 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4055 u16 tx_seq = __get_reqseq(chan, rx_control);
4057 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4059 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4061 if (__is_ctrl_poll(chan, rx_control)) {
4062 chan->expected_ack_seq = tx_seq;
4063 l2cap_drop_acked_frames(chan);
4065 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4066 l2cap_retransmit_one_frame(chan, tx_seq);
4068 l2cap_ertm_send(chan);
4070 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4071 chan->srej_save_reqseq = tx_seq;
4072 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4074 } else if (__is_ctrl_final(chan, rx_control)) {
4075 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4076 chan->srej_save_reqseq == tx_seq)
4077 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4079 l2cap_retransmit_one_frame(chan, tx_seq);
4081 l2cap_retransmit_one_frame(chan, tx_seq);
4082 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4083 chan->srej_save_reqseq = tx_seq;
4084 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4089 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4091 u16 tx_seq = __get_reqseq(chan, rx_control);
4093 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4095 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4096 chan->expected_ack_seq = tx_seq;
4097 l2cap_drop_acked_frames(chan);
4099 if (__is_ctrl_poll(chan, rx_control))
4100 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4102 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4103 __clear_retrans_timer(chan);
4104 if (__is_ctrl_poll(chan, rx_control))
4105 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4109 if (__is_ctrl_poll(chan, rx_control)) {
4110 l2cap_send_srejtail(chan);
4112 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4113 l2cap_send_sframe(chan, rx_control);
4117 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4119 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4121 if (__is_ctrl_final(chan, rx_control) &&
4122 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4123 __clear_monitor_timer(chan);
4124 if (chan->unacked_frames > 0)
4125 __set_retrans_timer(chan);
4126 clear_bit(CONN_WAIT_F, &chan->conn_state);
4129 switch (__get_ctrl_super(chan, rx_control)) {
4130 case L2CAP_SUPER_RR:
4131 l2cap_data_channel_rrframe(chan, rx_control);
4134 case L2CAP_SUPER_REJ:
4135 l2cap_data_channel_rejframe(chan, rx_control);
4138 case L2CAP_SUPER_SREJ:
4139 l2cap_data_channel_srejframe(chan, rx_control);
4142 case L2CAP_SUPER_RNR:
4143 l2cap_data_channel_rnrframe(chan, rx_control);
4151 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4155 int len, next_tx_seq_offset, req_seq_offset;
4157 control = __get_control(chan, skb->data);
4158 skb_pull(skb, __ctrl_size(chan));
4162 * We can just drop the corrupted I-frame here.
4163 * Receiver will miss it and start proper recovery
4164 * procedures and ask retransmission.
4166 if (l2cap_check_fcs(chan, skb))
4169 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4170 len -= L2CAP_SDULEN_SIZE;
4172 if (chan->fcs == L2CAP_FCS_CRC16)
4173 len -= L2CAP_FCS_SIZE;
4175 if (len > chan->mps) {
4176 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4180 req_seq = __get_reqseq(chan, control);
4182 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4184 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4185 chan->expected_ack_seq);
4187 /* check for invalid req-seq */
4188 if (req_seq_offset > next_tx_seq_offset) {
4189 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4193 if (!__is_sframe(chan, control)) {
4195 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4199 l2cap_data_channel_iframe(chan, control, skb);
4203 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4207 l2cap_data_channel_sframe(chan, control, skb);
4217 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4219 struct l2cap_chan *chan;
4220 struct sock *sk = NULL;
4225 chan = l2cap_get_chan_by_scid(conn, cid);
4227 BT_DBG("unknown cid 0x%4.4x", cid);
4233 BT_DBG("chan %p, len %d", chan, skb->len);
4235 if (chan->state != BT_CONNECTED)
4238 switch (chan->mode) {
4239 case L2CAP_MODE_BASIC:
4240 /* If socket recv buffers overflows we drop data here
4241 * which is *bad* because L2CAP has to be reliable.
4242 * But we don't have any other choice. L2CAP doesn't
4243 * provide flow control mechanism. */
4245 if (chan->imtu < skb->len)
4248 if (!chan->ops->recv(chan->data, skb))
4252 case L2CAP_MODE_ERTM:
4253 l2cap_ertm_data_rcv(chan, skb);
4257 case L2CAP_MODE_STREAMING:
4258 control = __get_control(chan, skb->data);
4259 skb_pull(skb, __ctrl_size(chan));
4262 if (l2cap_check_fcs(chan, skb))
4265 if (__is_sar_start(chan, control))
4266 len -= L2CAP_SDULEN_SIZE;
4268 if (chan->fcs == L2CAP_FCS_CRC16)
4269 len -= L2CAP_FCS_SIZE;
4271 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4274 tx_seq = __get_txseq(chan, control);
4276 if (chan->expected_tx_seq != tx_seq) {
4277 /* Frame(s) missing - must discard partial SDU */
4278 kfree_skb(chan->sdu);
4280 chan->sdu_last_frag = NULL;
4283 /* TODO: Notify userland of missing data */
4286 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4288 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4289 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4294 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4308 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4310 struct sock *sk = NULL;
4311 struct l2cap_chan *chan;
4313 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4321 BT_DBG("sk %p, len %d", sk, skb->len);
4323 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4326 if (chan->imtu < skb->len)
4329 if (!chan->ops->recv(chan->data, skb))
4341 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4343 struct sock *sk = NULL;
4344 struct l2cap_chan *chan;
4346 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4354 BT_DBG("sk %p, len %d", sk, skb->len);
4356 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4359 if (chan->imtu < skb->len)
4362 if (!chan->ops->recv(chan->data, skb))
4374 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4376 struct l2cap_hdr *lh = (void *) skb->data;
4380 skb_pull(skb, L2CAP_HDR_SIZE);
4381 cid = __le16_to_cpu(lh->cid);
4382 len = __le16_to_cpu(lh->len);
4384 if (len != skb->len) {
4389 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4392 case L2CAP_CID_LE_SIGNALING:
4393 case L2CAP_CID_SIGNALING:
4394 l2cap_sig_channel(conn, skb);
4397 case L2CAP_CID_CONN_LESS:
4398 psm = get_unaligned_le16(skb->data);
4400 l2cap_conless_channel(conn, psm, skb);
4403 case L2CAP_CID_LE_DATA:
4404 l2cap_att_channel(conn, cid, skb);
4408 if (smp_sig_channel(conn, skb))
4409 l2cap_conn_del(conn->hcon, EACCES);
4413 l2cap_data_channel(conn, cid, skb);
4418 /* ---- L2CAP interface with lower layer (HCI) ---- */
4420 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4422 int exact = 0, lm1 = 0, lm2 = 0;
4423 struct l2cap_chan *c;
4425 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4427 /* Find listening sockets and check their link_mode */
4428 read_lock(&chan_list_lock);
4429 list_for_each_entry(c, &chan_list, global_l) {
4430 struct sock *sk = c->sk;
4432 if (c->state != BT_LISTEN)
4435 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4436 lm1 |= HCI_LM_ACCEPT;
4437 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4438 lm1 |= HCI_LM_MASTER;
4440 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4441 lm2 |= HCI_LM_ACCEPT;
4442 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4443 lm2 |= HCI_LM_MASTER;
4446 read_unlock(&chan_list_lock);
4448 return exact ? lm1 : lm2;
4451 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4453 struct l2cap_conn *conn;
4455 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4458 conn = l2cap_conn_add(hcon, status);
4460 l2cap_conn_ready(conn);
4462 l2cap_conn_del(hcon, bt_to_errno(status));
4467 int l2cap_disconn_ind(struct hci_conn *hcon)
4469 struct l2cap_conn *conn = hcon->l2cap_data;
4471 BT_DBG("hcon %p", hcon);
4474 return HCI_ERROR_REMOTE_USER_TERM;
4475 return conn->disc_reason;
4478 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4480 BT_DBG("hcon %p reason %d", hcon, reason);
4482 l2cap_conn_del(hcon, bt_to_errno(reason));
4486 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4488 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4491 if (encrypt == 0x00) {
4492 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4493 __clear_chan_timer(chan);
4494 __set_chan_timer(chan,
4495 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4496 } else if (chan->sec_level == BT_SECURITY_HIGH)
4497 l2cap_chan_close(chan, ECONNREFUSED);
4499 if (chan->sec_level == BT_SECURITY_MEDIUM)
4500 __clear_chan_timer(chan);
4504 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4506 struct l2cap_conn *conn = hcon->l2cap_data;
4507 struct l2cap_chan *chan;
4512 BT_DBG("conn %p", conn);
4514 if (hcon->type == LE_LINK) {
4515 smp_distribute_keys(conn, 0);
4516 cancel_delayed_work(&conn->security_timer);
4521 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
4522 struct sock *sk = chan->sk;
4526 BT_DBG("chan->scid %d", chan->scid);
4528 if (chan->scid == L2CAP_CID_LE_DATA) {
4529 if (!status && encrypt) {
4530 chan->sec_level = hcon->sec_level;
4531 l2cap_chan_ready(chan);
4538 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4543 if (!status && (chan->state == BT_CONNECTED ||
4544 chan->state == BT_CONFIG)) {
4545 l2cap_check_encryption(chan, encrypt);
4550 if (chan->state == BT_CONNECT) {
4552 struct l2cap_conn_req req;
4553 req.scid = cpu_to_le16(chan->scid);
4554 req.psm = chan->psm;
4556 chan->ident = l2cap_get_ident(conn);
4557 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4559 l2cap_send_cmd(conn, chan->ident,
4560 L2CAP_CONN_REQ, sizeof(req), &req);
4562 __clear_chan_timer(chan);
4563 __set_chan_timer(chan,
4564 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4566 } else if (chan->state == BT_CONNECT2) {
4567 struct l2cap_conn_rsp rsp;
4571 if (bt_sk(sk)->defer_setup) {
4572 struct sock *parent = bt_sk(sk)->parent;
4573 res = L2CAP_CR_PEND;
4574 stat = L2CAP_CS_AUTHOR_PEND;
4576 parent->sk_data_ready(parent, 0);
4578 l2cap_state_change(chan, BT_CONFIG);
4579 res = L2CAP_CR_SUCCESS;
4580 stat = L2CAP_CS_NO_INFO;
4583 l2cap_state_change(chan, BT_DISCONN);
4584 __set_chan_timer(chan,
4585 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4586 res = L2CAP_CR_SEC_BLOCK;
4587 stat = L2CAP_CS_NO_INFO;
4590 rsp.scid = cpu_to_le16(chan->dcid);
4591 rsp.dcid = cpu_to_le16(chan->scid);
4592 rsp.result = cpu_to_le16(res);
4593 rsp.status = cpu_to_le16(stat);
4594 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4606 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4608 struct l2cap_conn *conn = hcon->l2cap_data;
4611 conn = l2cap_conn_add(hcon, 0);
4616 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4618 if (!(flags & ACL_CONT)) {
4619 struct l2cap_hdr *hdr;
4620 struct l2cap_chan *chan;
4625 BT_ERR("Unexpected start frame (len %d)", skb->len);
4626 kfree_skb(conn->rx_skb);
4627 conn->rx_skb = NULL;
4629 l2cap_conn_unreliable(conn, ECOMM);
4632 /* Start fragment always begin with Basic L2CAP header */
4633 if (skb->len < L2CAP_HDR_SIZE) {
4634 BT_ERR("Frame is too short (len %d)", skb->len);
4635 l2cap_conn_unreliable(conn, ECOMM);
4639 hdr = (struct l2cap_hdr *) skb->data;
4640 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4641 cid = __le16_to_cpu(hdr->cid);
4643 if (len == skb->len) {
4644 /* Complete frame received */
4645 l2cap_recv_frame(conn, skb);
4649 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4651 if (skb->len > len) {
4652 BT_ERR("Frame is too long (len %d, expected len %d)",
4654 l2cap_conn_unreliable(conn, ECOMM);
4658 chan = l2cap_get_chan_by_scid(conn, cid);
4660 if (chan && chan->sk) {
4661 struct sock *sk = chan->sk;
4663 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4664 BT_ERR("Frame exceeding recv MTU (len %d, "
4668 l2cap_conn_unreliable(conn, ECOMM);
4674 /* Allocate skb for the complete frame (with header) */
4675 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4679 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4681 conn->rx_len = len - skb->len;
4683 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4685 if (!conn->rx_len) {
4686 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4687 l2cap_conn_unreliable(conn, ECOMM);
4691 if (skb->len > conn->rx_len) {
4692 BT_ERR("Fragment is too long (len %d, expected %d)",
4693 skb->len, conn->rx_len);
4694 kfree_skb(conn->rx_skb);
4695 conn->rx_skb = NULL;
4697 l2cap_conn_unreliable(conn, ECOMM);
4701 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4703 conn->rx_len -= skb->len;
4705 if (!conn->rx_len) {
4706 /* Complete frame received */
4707 l2cap_recv_frame(conn, conn->rx_skb);
4708 conn->rx_skb = NULL;
4717 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4719 struct l2cap_chan *c;
4721 read_lock(&chan_list_lock);
4723 list_for_each_entry(c, &chan_list, global_l) {
4724 struct sock *sk = c->sk;
4726 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4727 batostr(&bt_sk(sk)->src),
4728 batostr(&bt_sk(sk)->dst),
4729 c->state, __le16_to_cpu(c->psm),
4730 c->scid, c->dcid, c->imtu, c->omtu,
4731 c->sec_level, c->mode);
4734 read_unlock(&chan_list_lock);
4739 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4741 return single_open(file, l2cap_debugfs_show, inode->i_private);
4744 static const struct file_operations l2cap_debugfs_fops = {
4745 .open = l2cap_debugfs_open,
4747 .llseek = seq_lseek,
4748 .release = single_release,
4751 static struct dentry *l2cap_debugfs;
4753 int __init l2cap_init(void)
4757 err = l2cap_init_sockets();
4762 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4763 bt_debugfs, NULL, &l2cap_debugfs_fops);
4765 BT_ERR("Failed to create L2CAP debug file");
4771 void l2cap_exit(void)
4773 debugfs_remove(l2cap_debugfs);
4774 l2cap_cleanup_sockets();
4777 module_param(disable_ertm, bool, 0644);
4778 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");