2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
80 struct l2cap_chan *c, *r = NULL;
84 list_for_each_entry_rcu(c, &conn->chan_l, list) {
95 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
97 struct l2cap_chan *c, *r = NULL;
101 list_for_each_entry_rcu(c, &conn->chan_l, list) {
102 if (c->scid == cid) {
112 /* Find channel with given SCID.
113 * Returns locked socket */
114 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
116 struct l2cap_chan *c;
118 c = __l2cap_get_chan_by_scid(conn, cid);
124 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
126 struct l2cap_chan *c, *r = NULL;
130 list_for_each_entry_rcu(c, &conn->chan_l, list) {
131 if (c->ident == ident) {
141 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
143 struct l2cap_chan *c;
145 c = __l2cap_get_chan_by_ident(conn, ident);
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
153 struct l2cap_chan *c;
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
162 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
166 write_lock(&chan_list_lock);
168 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
181 for (p = 0x1001; p < 0x1100; p += 2)
182 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
183 chan->psm = cpu_to_le16(p);
184 chan->sport = cpu_to_le16(p);
191 write_unlock(&chan_list_lock);
195 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
197 write_lock(&chan_list_lock);
201 write_unlock(&chan_list_lock);
206 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
208 u16 cid = L2CAP_CID_DYN_START;
210 for (; cid < L2CAP_CID_DYN_END; cid++) {
211 if (!__l2cap_get_chan_by_scid(conn, cid))
218 static char *state_to_string(int state)
222 return "BT_CONNECTED";
232 return "BT_CONNECT2";
241 return "invalid state";
244 static void l2cap_state_change(struct l2cap_chan *chan, int state)
246 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
247 state_to_string(state));
250 chan->ops->state_change(chan->data, state);
253 static void l2cap_chan_timeout(struct work_struct *work)
255 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
257 struct sock *sk = chan->sk;
260 BT_DBG("chan %p state %d", chan, chan->state);
264 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
265 reason = ECONNREFUSED;
266 else if (chan->state == BT_CONNECT &&
267 chan->sec_level != BT_SECURITY_SDP)
268 reason = ECONNREFUSED;
272 l2cap_chan_close(chan, reason);
276 chan->ops->close(chan->data);
277 l2cap_chan_put(chan);
280 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
282 struct l2cap_chan *chan;
284 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
290 write_lock(&chan_list_lock);
291 list_add(&chan->global_l, &chan_list);
292 write_unlock(&chan_list_lock);
294 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
296 chan->state = BT_OPEN;
298 atomic_set(&chan->refcnt, 1);
300 BT_DBG("sk %p chan %p", sk, chan);
305 void l2cap_chan_destroy(struct l2cap_chan *chan)
307 write_lock(&chan_list_lock);
308 list_del(&chan->global_l);
309 write_unlock(&chan_list_lock);
311 l2cap_chan_put(chan);
314 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
316 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
317 chan->psm, chan->dcid);
319 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
323 switch (chan->chan_type) {
324 case L2CAP_CHAN_CONN_ORIENTED:
325 if (conn->hcon->type == LE_LINK) {
327 chan->omtu = L2CAP_LE_DEFAULT_MTU;
328 chan->scid = L2CAP_CID_LE_DATA;
329 chan->dcid = L2CAP_CID_LE_DATA;
331 /* Alloc CID for connection-oriented socket */
332 chan->scid = l2cap_alloc_cid(conn);
333 chan->omtu = L2CAP_DEFAULT_MTU;
337 case L2CAP_CHAN_CONN_LESS:
338 /* Connectionless socket */
339 chan->scid = L2CAP_CID_CONN_LESS;
340 chan->dcid = L2CAP_CID_CONN_LESS;
341 chan->omtu = L2CAP_DEFAULT_MTU;
345 /* Raw socket can send/recv signalling messages only */
346 chan->scid = L2CAP_CID_SIGNALING;
347 chan->dcid = L2CAP_CID_SIGNALING;
348 chan->omtu = L2CAP_DEFAULT_MTU;
351 chan->local_id = L2CAP_BESTEFFORT_ID;
352 chan->local_stype = L2CAP_SERV_BESTEFFORT;
353 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
354 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
355 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
356 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
358 l2cap_chan_hold(chan);
360 list_add_rcu(&chan->list, &conn->chan_l);
364 * Must be called on the locked socket. */
365 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
367 struct sock *sk = chan->sk;
368 struct l2cap_conn *conn = chan->conn;
369 struct sock *parent = bt_sk(sk)->parent;
371 __clear_chan_timer(chan);
373 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
376 /* Delete from channel list */
377 list_del_rcu(&chan->list);
380 l2cap_chan_put(chan);
383 hci_conn_put(conn->hcon);
386 l2cap_state_change(chan, BT_CLOSED);
387 sock_set_flag(sk, SOCK_ZAPPED);
393 bt_accept_unlink(sk);
394 parent->sk_data_ready(parent, 0);
396 sk->sk_state_change(sk);
398 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
399 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
402 skb_queue_purge(&chan->tx_q);
404 if (chan->mode == L2CAP_MODE_ERTM) {
405 struct srej_list *l, *tmp;
407 __clear_retrans_timer(chan);
408 __clear_monitor_timer(chan);
409 __clear_ack_timer(chan);
411 skb_queue_purge(&chan->srej_q);
413 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
420 static void l2cap_chan_cleanup_listen(struct sock *parent)
424 BT_DBG("parent %p", parent);
426 /* Close not yet accepted channels */
427 while ((sk = bt_accept_dequeue(parent, NULL))) {
428 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
429 __clear_chan_timer(chan);
431 l2cap_chan_close(chan, ECONNRESET);
433 chan->ops->close(chan->data);
437 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
439 struct l2cap_conn *conn = chan->conn;
440 struct sock *sk = chan->sk;
442 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
444 switch (chan->state) {
446 l2cap_chan_cleanup_listen(sk);
448 l2cap_state_change(chan, BT_CLOSED);
449 sock_set_flag(sk, SOCK_ZAPPED);
454 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
455 conn->hcon->type == ACL_LINK) {
456 __clear_chan_timer(chan);
457 __set_chan_timer(chan, sk->sk_sndtimeo);
458 l2cap_send_disconn_req(conn, chan, reason);
460 l2cap_chan_del(chan, reason);
464 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
465 conn->hcon->type == ACL_LINK) {
466 struct l2cap_conn_rsp rsp;
469 if (bt_sk(sk)->defer_setup)
470 result = L2CAP_CR_SEC_BLOCK;
472 result = L2CAP_CR_BAD_PSM;
473 l2cap_state_change(chan, BT_DISCONN);
475 rsp.scid = cpu_to_le16(chan->dcid);
476 rsp.dcid = cpu_to_le16(chan->scid);
477 rsp.result = cpu_to_le16(result);
478 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
479 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
483 l2cap_chan_del(chan, reason);
488 l2cap_chan_del(chan, reason);
492 sock_set_flag(sk, SOCK_ZAPPED);
497 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
499 if (chan->chan_type == L2CAP_CHAN_RAW) {
500 switch (chan->sec_level) {
501 case BT_SECURITY_HIGH:
502 return HCI_AT_DEDICATED_BONDING_MITM;
503 case BT_SECURITY_MEDIUM:
504 return HCI_AT_DEDICATED_BONDING;
506 return HCI_AT_NO_BONDING;
508 } else if (chan->psm == cpu_to_le16(0x0001)) {
509 if (chan->sec_level == BT_SECURITY_LOW)
510 chan->sec_level = BT_SECURITY_SDP;
512 if (chan->sec_level == BT_SECURITY_HIGH)
513 return HCI_AT_NO_BONDING_MITM;
515 return HCI_AT_NO_BONDING;
517 switch (chan->sec_level) {
518 case BT_SECURITY_HIGH:
519 return HCI_AT_GENERAL_BONDING_MITM;
520 case BT_SECURITY_MEDIUM:
521 return HCI_AT_GENERAL_BONDING;
523 return HCI_AT_NO_BONDING;
528 /* Service level security */
529 int l2cap_chan_check_security(struct l2cap_chan *chan)
531 struct l2cap_conn *conn = chan->conn;
534 auth_type = l2cap_get_auth_type(chan);
536 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
539 static u8 l2cap_get_ident(struct l2cap_conn *conn)
543 /* Get next available identificator.
544 * 1 - 128 are used by kernel.
545 * 129 - 199 are reserved.
546 * 200 - 254 are used by utilities like l2ping, etc.
549 spin_lock(&conn->lock);
551 if (++conn->tx_ident > 128)
556 spin_unlock(&conn->lock);
561 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
563 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
566 BT_DBG("code 0x%2.2x", code);
571 if (lmp_no_flush_capable(conn->hcon->hdev))
572 flags = ACL_START_NO_FLUSH;
576 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
577 skb->priority = HCI_PRIO_MAX;
579 hci_send_acl(conn->hchan, skb, flags);
582 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
584 struct hci_conn *hcon = chan->conn->hcon;
587 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
590 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
591 lmp_no_flush_capable(hcon->hdev))
592 flags = ACL_START_NO_FLUSH;
596 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
597 hci_send_acl(chan->conn->hchan, skb, flags);
600 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
603 struct l2cap_hdr *lh;
604 struct l2cap_conn *conn = chan->conn;
607 if (chan->state != BT_CONNECTED)
610 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
611 hlen = L2CAP_EXT_HDR_SIZE;
613 hlen = L2CAP_ENH_HDR_SIZE;
615 if (chan->fcs == L2CAP_FCS_CRC16)
616 hlen += L2CAP_FCS_SIZE;
618 BT_DBG("chan %p, control 0x%8.8x", chan, control);
620 count = min_t(unsigned int, conn->mtu, hlen);
622 control |= __set_sframe(chan);
624 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
625 control |= __set_ctrl_final(chan);
627 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
628 control |= __set_ctrl_poll(chan);
630 skb = bt_skb_alloc(count, GFP_ATOMIC);
634 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
635 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
636 lh->cid = cpu_to_le16(chan->dcid);
638 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
640 if (chan->fcs == L2CAP_FCS_CRC16) {
641 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
642 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
645 skb->priority = HCI_PRIO_MAX;
646 l2cap_do_send(chan, skb);
649 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
651 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
652 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
653 set_bit(CONN_RNR_SENT, &chan->conn_state);
655 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
657 control |= __set_reqseq(chan, chan->buffer_seq);
659 l2cap_send_sframe(chan, control);
662 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
664 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
667 static void l2cap_do_start(struct l2cap_chan *chan)
669 struct l2cap_conn *conn = chan->conn;
671 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
672 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
675 if (l2cap_chan_check_security(chan) &&
676 __l2cap_no_conn_pending(chan)) {
677 struct l2cap_conn_req req;
678 req.scid = cpu_to_le16(chan->scid);
681 chan->ident = l2cap_get_ident(conn);
682 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
684 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
688 struct l2cap_info_req req;
689 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
691 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
692 conn->info_ident = l2cap_get_ident(conn);
694 schedule_delayed_work(&conn->info_timer,
695 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
697 l2cap_send_cmd(conn, conn->info_ident,
698 L2CAP_INFO_REQ, sizeof(req), &req);
702 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
704 u32 local_feat_mask = l2cap_feat_mask;
706 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
709 case L2CAP_MODE_ERTM:
710 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
711 case L2CAP_MODE_STREAMING:
712 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
718 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
721 struct l2cap_disconn_req req;
728 if (chan->mode == L2CAP_MODE_ERTM) {
729 __clear_retrans_timer(chan);
730 __clear_monitor_timer(chan);
731 __clear_ack_timer(chan);
734 req.dcid = cpu_to_le16(chan->dcid);
735 req.scid = cpu_to_le16(chan->scid);
736 l2cap_send_cmd(conn, l2cap_get_ident(conn),
737 L2CAP_DISCONN_REQ, sizeof(req), &req);
739 l2cap_state_change(chan, BT_DISCONN);
743 /* ---- L2CAP connections ---- */
744 static void l2cap_conn_start(struct l2cap_conn *conn)
746 struct l2cap_chan *chan;
748 BT_DBG("conn %p", conn);
752 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
753 struct sock *sk = chan->sk;
757 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
762 if (chan->state == BT_CONNECT) {
763 struct l2cap_conn_req req;
765 if (!l2cap_chan_check_security(chan) ||
766 !__l2cap_no_conn_pending(chan)) {
771 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
772 && test_bit(CONF_STATE2_DEVICE,
773 &chan->conf_state)) {
774 /* l2cap_chan_close() calls list_del(chan)
775 * so release the lock */
776 l2cap_chan_close(chan, ECONNRESET);
781 req.scid = cpu_to_le16(chan->scid);
784 chan->ident = l2cap_get_ident(conn);
785 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
787 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
790 } else if (chan->state == BT_CONNECT2) {
791 struct l2cap_conn_rsp rsp;
793 rsp.scid = cpu_to_le16(chan->dcid);
794 rsp.dcid = cpu_to_le16(chan->scid);
796 if (l2cap_chan_check_security(chan)) {
797 if (bt_sk(sk)->defer_setup) {
798 struct sock *parent = bt_sk(sk)->parent;
799 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
800 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
802 parent->sk_data_ready(parent, 0);
805 l2cap_state_change(chan, BT_CONFIG);
806 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
807 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
810 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
811 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
814 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
817 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
818 rsp.result != L2CAP_CR_SUCCESS) {
823 set_bit(CONF_REQ_SENT, &chan->conf_state);
824 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
825 l2cap_build_conf_req(chan, buf), buf);
826 chan->num_conf_req++;
835 /* Find socket with cid and source bdaddr.
836 * Returns closest match, locked.
838 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
840 struct l2cap_chan *c, *c1 = NULL;
842 read_lock(&chan_list_lock);
844 list_for_each_entry(c, &chan_list, global_l) {
845 struct sock *sk = c->sk;
847 if (state && c->state != state)
850 if (c->scid == cid) {
852 if (!bacmp(&bt_sk(sk)->src, src)) {
853 read_unlock(&chan_list_lock);
858 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
863 read_unlock(&chan_list_lock);
868 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
870 struct sock *parent, *sk;
871 struct l2cap_chan *chan, *pchan;
875 /* Check if we have socket listening on cid */
876 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
885 /* Check for backlog size */
886 if (sk_acceptq_is_full(parent)) {
887 BT_DBG("backlog full %d", parent->sk_ack_backlog);
891 chan = pchan->ops->new_connection(pchan->data);
897 hci_conn_hold(conn->hcon);
899 bacpy(&bt_sk(sk)->src, conn->src);
900 bacpy(&bt_sk(sk)->dst, conn->dst);
902 bt_accept_enqueue(parent, sk);
904 l2cap_chan_add(conn, chan);
906 __set_chan_timer(chan, sk->sk_sndtimeo);
908 l2cap_state_change(chan, BT_CONNECTED);
909 parent->sk_data_ready(parent, 0);
912 release_sock(parent);
915 static void l2cap_chan_ready(struct l2cap_chan *chan)
917 struct sock *sk = chan->sk;
918 struct sock *parent = bt_sk(sk)->parent;
920 BT_DBG("sk %p, parent %p", sk, parent);
922 chan->conf_state = 0;
923 __clear_chan_timer(chan);
925 l2cap_state_change(chan, BT_CONNECTED);
926 sk->sk_state_change(sk);
929 parent->sk_data_ready(parent, 0);
932 static void l2cap_conn_ready(struct l2cap_conn *conn)
934 struct l2cap_chan *chan;
936 BT_DBG("conn %p", conn);
938 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
939 l2cap_le_conn_ready(conn);
941 if (conn->hcon->out && conn->hcon->type == LE_LINK)
942 smp_conn_security(conn, conn->hcon->pending_sec_level);
946 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
947 struct sock *sk = chan->sk;
951 if (conn->hcon->type == LE_LINK) {
952 if (smp_conn_security(conn, chan->sec_level))
953 l2cap_chan_ready(chan);
955 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
956 __clear_chan_timer(chan);
957 l2cap_state_change(chan, BT_CONNECTED);
958 sk->sk_state_change(sk);
960 } else if (chan->state == BT_CONNECT)
961 l2cap_do_start(chan);
969 /* Notify sockets that we cannot guaranty reliability anymore */
970 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
972 struct l2cap_chan *chan;
974 BT_DBG("conn %p", conn);
978 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
979 struct sock *sk = chan->sk;
981 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
988 static void l2cap_info_timeout(struct work_struct *work)
990 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
993 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
994 conn->info_ident = 0;
996 l2cap_conn_start(conn);
999 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1001 struct l2cap_conn *conn = hcon->l2cap_data;
1002 struct l2cap_chan *chan, *l;
1008 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1010 kfree_skb(conn->rx_skb);
1013 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1016 l2cap_chan_del(chan, err);
1018 chan->ops->close(chan->data);
1021 hci_chan_del(conn->hchan);
1023 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1024 cancel_delayed_work_sync(&conn->info_timer);
1026 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1027 cancel_delayed_work_sync(&conn->security_timer);
1028 smp_chan_destroy(conn);
1031 hcon->l2cap_data = NULL;
1035 static void security_timeout(struct work_struct *work)
1037 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1038 security_timer.work);
1040 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1043 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1045 struct l2cap_conn *conn = hcon->l2cap_data;
1046 struct hci_chan *hchan;
1051 hchan = hci_chan_create(hcon);
1055 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1057 hci_chan_del(hchan);
1061 hcon->l2cap_data = conn;
1063 conn->hchan = hchan;
1065 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1067 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1068 conn->mtu = hcon->hdev->le_mtu;
1070 conn->mtu = hcon->hdev->acl_mtu;
1072 conn->src = &hcon->hdev->bdaddr;
1073 conn->dst = &hcon->dst;
1075 conn->feat_mask = 0;
1077 spin_lock_init(&conn->lock);
1079 INIT_LIST_HEAD(&conn->chan_l);
1081 if (hcon->type == LE_LINK)
1082 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1084 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1086 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1091 /* ---- Socket interface ---- */
1093 /* Find socket with psm and source bdaddr.
1094 * Returns closest match.
1096 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1098 struct l2cap_chan *c, *c1 = NULL;
1100 read_lock(&chan_list_lock);
1102 list_for_each_entry(c, &chan_list, global_l) {
1103 struct sock *sk = c->sk;
1105 if (state && c->state != state)
1108 if (c->psm == psm) {
1110 if (!bacmp(&bt_sk(sk)->src, src)) {
1111 read_unlock(&chan_list_lock);
1116 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1121 read_unlock(&chan_list_lock);
1126 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1128 struct sock *sk = chan->sk;
1129 bdaddr_t *src = &bt_sk(sk)->src;
1130 struct l2cap_conn *conn;
1131 struct hci_conn *hcon;
1132 struct hci_dev *hdev;
1136 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1139 hdev = hci_get_route(dst, src);
1141 return -EHOSTUNREACH;
1147 /* PSM must be odd and lsb of upper byte must be 0 */
1148 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1149 chan->chan_type != L2CAP_CHAN_RAW) {
1154 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1159 switch (chan->mode) {
1160 case L2CAP_MODE_BASIC:
1162 case L2CAP_MODE_ERTM:
1163 case L2CAP_MODE_STREAMING:
1172 switch (sk->sk_state) {
1176 /* Already connecting */
1181 /* Already connected */
1195 /* Set destination address and psm */
1196 bacpy(&bt_sk(sk)->dst, dst);
1200 auth_type = l2cap_get_auth_type(chan);
1202 if (chan->dcid == L2CAP_CID_LE_DATA)
1203 hcon = hci_connect(hdev, LE_LINK, dst,
1204 chan->sec_level, auth_type);
1206 hcon = hci_connect(hdev, ACL_LINK, dst,
1207 chan->sec_level, auth_type);
1210 err = PTR_ERR(hcon);
1214 conn = l2cap_conn_add(hcon, 0);
1221 /* Update source addr of the socket */
1222 bacpy(src, conn->src);
1224 l2cap_chan_add(conn, chan);
1226 l2cap_state_change(chan, BT_CONNECT);
1227 __set_chan_timer(chan, sk->sk_sndtimeo);
1229 if (hcon->state == BT_CONNECTED) {
1230 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1231 __clear_chan_timer(chan);
1232 if (l2cap_chan_check_security(chan))
1233 l2cap_state_change(chan, BT_CONNECTED);
1235 l2cap_do_start(chan);
1241 hci_dev_unlock(hdev);
1246 int __l2cap_wait_ack(struct sock *sk)
1248 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1249 DECLARE_WAITQUEUE(wait, current);
1253 add_wait_queue(sk_sleep(sk), &wait);
1254 set_current_state(TASK_INTERRUPTIBLE);
1255 while (chan->unacked_frames > 0 && chan->conn) {
1259 if (signal_pending(current)) {
1260 err = sock_intr_errno(timeo);
1265 timeo = schedule_timeout(timeo);
1267 set_current_state(TASK_INTERRUPTIBLE);
1269 err = sock_error(sk);
1273 set_current_state(TASK_RUNNING);
1274 remove_wait_queue(sk_sleep(sk), &wait);
1278 static void l2cap_monitor_timeout(struct work_struct *work)
1280 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1281 monitor_timer.work);
1282 struct sock *sk = chan->sk;
1284 BT_DBG("chan %p", chan);
1287 if (chan->retry_count >= chan->remote_max_tx) {
1288 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1293 chan->retry_count++;
1294 __set_monitor_timer(chan);
1296 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1300 static void l2cap_retrans_timeout(struct work_struct *work)
1302 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1303 retrans_timer.work);
1304 struct sock *sk = chan->sk;
1306 BT_DBG("chan %p", chan);
1309 chan->retry_count = 1;
1310 __set_monitor_timer(chan);
1312 set_bit(CONN_WAIT_F, &chan->conn_state);
1314 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1318 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1320 struct sk_buff *skb;
1322 while ((skb = skb_peek(&chan->tx_q)) &&
1323 chan->unacked_frames) {
1324 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1327 skb = skb_dequeue(&chan->tx_q);
1330 chan->unacked_frames--;
1333 if (!chan->unacked_frames)
1334 __clear_retrans_timer(chan);
1337 static void l2cap_streaming_send(struct l2cap_chan *chan)
1339 struct sk_buff *skb;
1343 while ((skb = skb_dequeue(&chan->tx_q))) {
1344 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1345 control |= __set_txseq(chan, chan->next_tx_seq);
1346 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1348 if (chan->fcs == L2CAP_FCS_CRC16) {
1349 fcs = crc16(0, (u8 *)skb->data,
1350 skb->len - L2CAP_FCS_SIZE);
1351 put_unaligned_le16(fcs,
1352 skb->data + skb->len - L2CAP_FCS_SIZE);
1355 l2cap_do_send(chan, skb);
1357 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1361 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1363 struct sk_buff *skb, *tx_skb;
1367 skb = skb_peek(&chan->tx_q);
1371 while (bt_cb(skb)->tx_seq != tx_seq) {
1372 if (skb_queue_is_last(&chan->tx_q, skb))
1375 skb = skb_queue_next(&chan->tx_q, skb);
1378 if (chan->remote_max_tx &&
1379 bt_cb(skb)->retries == chan->remote_max_tx) {
1380 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1384 tx_skb = skb_clone(skb, GFP_ATOMIC);
1385 bt_cb(skb)->retries++;
1387 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1388 control &= __get_sar_mask(chan);
1390 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1391 control |= __set_ctrl_final(chan);
1393 control |= __set_reqseq(chan, chan->buffer_seq);
1394 control |= __set_txseq(chan, tx_seq);
1396 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1398 if (chan->fcs == L2CAP_FCS_CRC16) {
1399 fcs = crc16(0, (u8 *)tx_skb->data,
1400 tx_skb->len - L2CAP_FCS_SIZE);
1401 put_unaligned_le16(fcs,
1402 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1405 l2cap_do_send(chan, tx_skb);
1408 static int l2cap_ertm_send(struct l2cap_chan *chan)
1410 struct sk_buff *skb, *tx_skb;
1415 if (chan->state != BT_CONNECTED)
1418 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1420 if (chan->remote_max_tx &&
1421 bt_cb(skb)->retries == chan->remote_max_tx) {
1422 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1426 tx_skb = skb_clone(skb, GFP_ATOMIC);
1428 bt_cb(skb)->retries++;
1430 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1431 control &= __get_sar_mask(chan);
1433 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1434 control |= __set_ctrl_final(chan);
1436 control |= __set_reqseq(chan, chan->buffer_seq);
1437 control |= __set_txseq(chan, chan->next_tx_seq);
1439 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1441 if (chan->fcs == L2CAP_FCS_CRC16) {
1442 fcs = crc16(0, (u8 *)skb->data,
1443 tx_skb->len - L2CAP_FCS_SIZE);
1444 put_unaligned_le16(fcs, skb->data +
1445 tx_skb->len - L2CAP_FCS_SIZE);
1448 l2cap_do_send(chan, tx_skb);
1450 __set_retrans_timer(chan);
1452 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1454 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1456 if (bt_cb(skb)->retries == 1)
1457 chan->unacked_frames++;
1459 chan->frames_sent++;
1461 if (skb_queue_is_last(&chan->tx_q, skb))
1462 chan->tx_send_head = NULL;
1464 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1472 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1476 if (!skb_queue_empty(&chan->tx_q))
1477 chan->tx_send_head = chan->tx_q.next;
1479 chan->next_tx_seq = chan->expected_ack_seq;
1480 ret = l2cap_ertm_send(chan);
1484 static void __l2cap_send_ack(struct l2cap_chan *chan)
1488 control |= __set_reqseq(chan, chan->buffer_seq);
1490 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1491 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1492 set_bit(CONN_RNR_SENT, &chan->conn_state);
1493 l2cap_send_sframe(chan, control);
1497 if (l2cap_ertm_send(chan) > 0)
1500 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1501 l2cap_send_sframe(chan, control);
1504 static void l2cap_send_ack(struct l2cap_chan *chan)
1506 __clear_ack_timer(chan);
1507 __l2cap_send_ack(chan);
1510 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1512 struct srej_list *tail;
1515 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1516 control |= __set_ctrl_final(chan);
1518 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1519 control |= __set_reqseq(chan, tail->tx_seq);
1521 l2cap_send_sframe(chan, control);
1524 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1526 struct l2cap_conn *conn = chan->conn;
1527 struct sk_buff **frag;
1530 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1536 /* Continuation fragments (no L2CAP header) */
1537 frag = &skb_shinfo(skb)->frag_list;
1539 count = min_t(unsigned int, conn->mtu, len);
1541 *frag = chan->ops->alloc_skb(chan, count,
1542 msg->msg_flags & MSG_DONTWAIT, &err);
1546 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1549 (*frag)->priority = skb->priority;
1554 frag = &(*frag)->next;
1560 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1561 struct msghdr *msg, size_t len,
1564 struct sock *sk = chan->sk;
1565 struct l2cap_conn *conn = chan->conn;
1566 struct sk_buff *skb;
1567 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1568 struct l2cap_hdr *lh;
1570 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1572 count = min_t(unsigned int, (conn->mtu - hlen), len);
1574 skb = chan->ops->alloc_skb(chan, count + hlen,
1575 msg->msg_flags & MSG_DONTWAIT, &err);
1578 return ERR_PTR(err);
1580 skb->priority = priority;
1582 /* Create L2CAP header */
1583 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1584 lh->cid = cpu_to_le16(chan->dcid);
1585 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1586 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1588 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1589 if (unlikely(err < 0)) {
1591 return ERR_PTR(err);
1596 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1597 struct msghdr *msg, size_t len,
1600 struct sock *sk = chan->sk;
1601 struct l2cap_conn *conn = chan->conn;
1602 struct sk_buff *skb;
1603 int err, count, hlen = L2CAP_HDR_SIZE;
1604 struct l2cap_hdr *lh;
1606 BT_DBG("sk %p len %d", sk, (int)len);
1608 count = min_t(unsigned int, (conn->mtu - hlen), len);
1610 skb = chan->ops->alloc_skb(chan, count + hlen,
1611 msg->msg_flags & MSG_DONTWAIT, &err);
1614 return ERR_PTR(err);
1616 skb->priority = priority;
1618 /* Create L2CAP header */
1619 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1620 lh->cid = cpu_to_le16(chan->dcid);
1621 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1623 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1624 if (unlikely(err < 0)) {
1626 return ERR_PTR(err);
1631 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1632 struct msghdr *msg, size_t len,
1633 u32 control, u16 sdulen)
1635 struct sock *sk = chan->sk;
1636 struct l2cap_conn *conn = chan->conn;
1637 struct sk_buff *skb;
1638 int err, count, hlen;
1639 struct l2cap_hdr *lh;
1641 BT_DBG("sk %p len %d", sk, (int)len);
1644 return ERR_PTR(-ENOTCONN);
1646 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1647 hlen = L2CAP_EXT_HDR_SIZE;
1649 hlen = L2CAP_ENH_HDR_SIZE;
1652 hlen += L2CAP_SDULEN_SIZE;
1654 if (chan->fcs == L2CAP_FCS_CRC16)
1655 hlen += L2CAP_FCS_SIZE;
1657 count = min_t(unsigned int, (conn->mtu - hlen), len);
1659 skb = chan->ops->alloc_skb(chan, count + hlen,
1660 msg->msg_flags & MSG_DONTWAIT, &err);
1663 return ERR_PTR(err);
1665 /* Create L2CAP header */
1666 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1667 lh->cid = cpu_to_le16(chan->dcid);
1668 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1670 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1673 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1675 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1676 if (unlikely(err < 0)) {
1678 return ERR_PTR(err);
1681 if (chan->fcs == L2CAP_FCS_CRC16)
1682 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1684 bt_cb(skb)->retries = 0;
1688 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1690 struct sk_buff *skb;
1691 struct sk_buff_head sar_queue;
1695 skb_queue_head_init(&sar_queue);
1696 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1697 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1699 return PTR_ERR(skb);
1701 __skb_queue_tail(&sar_queue, skb);
1702 len -= chan->remote_mps;
1703 size += chan->remote_mps;
1708 if (len > chan->remote_mps) {
1709 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1710 buflen = chan->remote_mps;
1712 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1716 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1718 skb_queue_purge(&sar_queue);
1719 return PTR_ERR(skb);
1722 __skb_queue_tail(&sar_queue, skb);
1726 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1727 if (chan->tx_send_head == NULL)
1728 chan->tx_send_head = sar_queue.next;
1733 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1736 struct sk_buff *skb;
1740 /* Connectionless channel */
1741 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1742 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1744 return PTR_ERR(skb);
1746 l2cap_do_send(chan, skb);
1750 switch (chan->mode) {
1751 case L2CAP_MODE_BASIC:
1752 /* Check outgoing MTU */
1753 if (len > chan->omtu)
1756 /* Create a basic PDU */
1757 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1759 return PTR_ERR(skb);
1761 l2cap_do_send(chan, skb);
1765 case L2CAP_MODE_ERTM:
1766 case L2CAP_MODE_STREAMING:
1767 /* Entire SDU fits into one PDU */
1768 if (len <= chan->remote_mps) {
1769 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1770 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1773 return PTR_ERR(skb);
1775 __skb_queue_tail(&chan->tx_q, skb);
1777 if (chan->tx_send_head == NULL)
1778 chan->tx_send_head = skb;
1781 /* Segment SDU into multiples PDUs */
1782 err = l2cap_sar_segment_sdu(chan, msg, len);
1787 if (chan->mode == L2CAP_MODE_STREAMING) {
1788 l2cap_streaming_send(chan);
1793 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1794 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1799 err = l2cap_ertm_send(chan);
1806 BT_DBG("bad state %1.1x", chan->mode);
1813 /* Copy frame to all raw sockets on that connection */
1814 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1816 struct sk_buff *nskb;
1817 struct l2cap_chan *chan;
1819 BT_DBG("conn %p", conn);
1823 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1824 struct sock *sk = chan->sk;
1825 if (chan->chan_type != L2CAP_CHAN_RAW)
1828 /* Don't send frame to the socket it came from */
1831 nskb = skb_clone(skb, GFP_ATOMIC);
1835 if (chan->ops->recv(chan->data, nskb))
1842 /* ---- L2CAP signalling commands ---- */
1843 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1844 u8 code, u8 ident, u16 dlen, void *data)
1846 struct sk_buff *skb, **frag;
1847 struct l2cap_cmd_hdr *cmd;
1848 struct l2cap_hdr *lh;
1851 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1852 conn, code, ident, dlen);
1854 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1855 count = min_t(unsigned int, conn->mtu, len);
1857 skb = bt_skb_alloc(count, GFP_ATOMIC);
1861 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1862 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1864 if (conn->hcon->type == LE_LINK)
1865 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1867 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1869 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1872 cmd->len = cpu_to_le16(dlen);
1875 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1876 memcpy(skb_put(skb, count), data, count);
1882 /* Continuation fragments (no L2CAP header) */
1883 frag = &skb_shinfo(skb)->frag_list;
1885 count = min_t(unsigned int, conn->mtu, len);
1887 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1891 memcpy(skb_put(*frag, count), data, count);
1896 frag = &(*frag)->next;
1906 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1908 struct l2cap_conf_opt *opt = *ptr;
1911 len = L2CAP_CONF_OPT_SIZE + opt->len;
1919 *val = *((u8 *) opt->val);
1923 *val = get_unaligned_le16(opt->val);
1927 *val = get_unaligned_le32(opt->val);
1931 *val = (unsigned long) opt->val;
1935 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1939 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1941 struct l2cap_conf_opt *opt = *ptr;
1943 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1950 *((u8 *) opt->val) = val;
1954 put_unaligned_le16(val, opt->val);
1958 put_unaligned_le32(val, opt->val);
1962 memcpy(opt->val, (void *) val, len);
1966 *ptr += L2CAP_CONF_OPT_SIZE + len;
1969 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1971 struct l2cap_conf_efs efs;
1973 switch (chan->mode) {
1974 case L2CAP_MODE_ERTM:
1975 efs.id = chan->local_id;
1976 efs.stype = chan->local_stype;
1977 efs.msdu = cpu_to_le16(chan->local_msdu);
1978 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1979 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1980 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1983 case L2CAP_MODE_STREAMING:
1985 efs.stype = L2CAP_SERV_BESTEFFORT;
1986 efs.msdu = cpu_to_le16(chan->local_msdu);
1987 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1996 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1997 (unsigned long) &efs);
2000 static void l2cap_ack_timeout(struct work_struct *work)
2002 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2005 BT_DBG("chan %p", chan);
2007 lock_sock(chan->sk);
2008 __l2cap_send_ack(chan);
2009 release_sock(chan->sk);
2011 l2cap_chan_put(chan);
2014 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2016 chan->expected_ack_seq = 0;
2017 chan->unacked_frames = 0;
2018 chan->buffer_seq = 0;
2019 chan->num_acked = 0;
2020 chan->frames_sent = 0;
2022 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2023 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2024 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2026 skb_queue_head_init(&chan->srej_q);
2028 INIT_LIST_HEAD(&chan->srej_l);
2031 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2034 case L2CAP_MODE_STREAMING:
2035 case L2CAP_MODE_ERTM:
2036 if (l2cap_mode_supported(mode, remote_feat_mask))
2040 return L2CAP_MODE_BASIC;
2044 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2046 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2049 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2051 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2054 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2056 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2057 __l2cap_ews_supported(chan)) {
2058 /* use extended control field */
2059 set_bit(FLAG_EXT_CTRL, &chan->flags);
2060 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2062 chan->tx_win = min_t(u16, chan->tx_win,
2063 L2CAP_DEFAULT_TX_WINDOW);
2064 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2068 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2070 struct l2cap_conf_req *req = data;
2071 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2072 void *ptr = req->data;
2075 BT_DBG("chan %p", chan);
2077 if (chan->num_conf_req || chan->num_conf_rsp)
2080 switch (chan->mode) {
2081 case L2CAP_MODE_STREAMING:
2082 case L2CAP_MODE_ERTM:
2083 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2086 if (__l2cap_efs_supported(chan))
2087 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2091 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2096 if (chan->imtu != L2CAP_DEFAULT_MTU)
2097 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2099 switch (chan->mode) {
2100 case L2CAP_MODE_BASIC:
2101 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2102 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2105 rfc.mode = L2CAP_MODE_BASIC;
2107 rfc.max_transmit = 0;
2108 rfc.retrans_timeout = 0;
2109 rfc.monitor_timeout = 0;
2110 rfc.max_pdu_size = 0;
2112 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2113 (unsigned long) &rfc);
2116 case L2CAP_MODE_ERTM:
2117 rfc.mode = L2CAP_MODE_ERTM;
2118 rfc.max_transmit = chan->max_tx;
2119 rfc.retrans_timeout = 0;
2120 rfc.monitor_timeout = 0;
2122 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2123 L2CAP_EXT_HDR_SIZE -
2126 rfc.max_pdu_size = cpu_to_le16(size);
2128 l2cap_txwin_setup(chan);
2130 rfc.txwin_size = min_t(u16, chan->tx_win,
2131 L2CAP_DEFAULT_TX_WINDOW);
2133 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2134 (unsigned long) &rfc);
2136 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2137 l2cap_add_opt_efs(&ptr, chan);
2139 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2142 if (chan->fcs == L2CAP_FCS_NONE ||
2143 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2144 chan->fcs = L2CAP_FCS_NONE;
2145 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2148 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2149 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2153 case L2CAP_MODE_STREAMING:
2154 rfc.mode = L2CAP_MODE_STREAMING;
2156 rfc.max_transmit = 0;
2157 rfc.retrans_timeout = 0;
2158 rfc.monitor_timeout = 0;
2160 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2161 L2CAP_EXT_HDR_SIZE -
2164 rfc.max_pdu_size = cpu_to_le16(size);
2166 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2167 (unsigned long) &rfc);
2169 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2170 l2cap_add_opt_efs(&ptr, chan);
2172 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2175 if (chan->fcs == L2CAP_FCS_NONE ||
2176 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2177 chan->fcs = L2CAP_FCS_NONE;
2178 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2183 req->dcid = cpu_to_le16(chan->dcid);
2184 req->flags = cpu_to_le16(0);
2189 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2191 struct l2cap_conf_rsp *rsp = data;
2192 void *ptr = rsp->data;
2193 void *req = chan->conf_req;
2194 int len = chan->conf_len;
2195 int type, hint, olen;
2197 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2198 struct l2cap_conf_efs efs;
2200 u16 mtu = L2CAP_DEFAULT_MTU;
2201 u16 result = L2CAP_CONF_SUCCESS;
2204 BT_DBG("chan %p", chan);
2206 while (len >= L2CAP_CONF_OPT_SIZE) {
2207 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2209 hint = type & L2CAP_CONF_HINT;
2210 type &= L2CAP_CONF_MASK;
2213 case L2CAP_CONF_MTU:
2217 case L2CAP_CONF_FLUSH_TO:
2218 chan->flush_to = val;
2221 case L2CAP_CONF_QOS:
2224 case L2CAP_CONF_RFC:
2225 if (olen == sizeof(rfc))
2226 memcpy(&rfc, (void *) val, olen);
2229 case L2CAP_CONF_FCS:
2230 if (val == L2CAP_FCS_NONE)
2231 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2234 case L2CAP_CONF_EFS:
2236 if (olen == sizeof(efs))
2237 memcpy(&efs, (void *) val, olen);
2240 case L2CAP_CONF_EWS:
2242 return -ECONNREFUSED;
2244 set_bit(FLAG_EXT_CTRL, &chan->flags);
2245 set_bit(CONF_EWS_RECV, &chan->conf_state);
2246 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2247 chan->remote_tx_win = val;
2254 result = L2CAP_CONF_UNKNOWN;
2255 *((u8 *) ptr++) = type;
2260 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2263 switch (chan->mode) {
2264 case L2CAP_MODE_STREAMING:
2265 case L2CAP_MODE_ERTM:
2266 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2267 chan->mode = l2cap_select_mode(rfc.mode,
2268 chan->conn->feat_mask);
2273 if (__l2cap_efs_supported(chan))
2274 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2276 return -ECONNREFUSED;
2279 if (chan->mode != rfc.mode)
2280 return -ECONNREFUSED;
2286 if (chan->mode != rfc.mode) {
2287 result = L2CAP_CONF_UNACCEPT;
2288 rfc.mode = chan->mode;
2290 if (chan->num_conf_rsp == 1)
2291 return -ECONNREFUSED;
2293 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2294 sizeof(rfc), (unsigned long) &rfc);
2297 if (result == L2CAP_CONF_SUCCESS) {
2298 /* Configure output options and let the other side know
2299 * which ones we don't like. */
2301 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2302 result = L2CAP_CONF_UNACCEPT;
2305 set_bit(CONF_MTU_DONE, &chan->conf_state);
2307 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2310 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2311 efs.stype != L2CAP_SERV_NOTRAFIC &&
2312 efs.stype != chan->local_stype) {
2314 result = L2CAP_CONF_UNACCEPT;
2316 if (chan->num_conf_req >= 1)
2317 return -ECONNREFUSED;
2319 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2321 (unsigned long) &efs);
2323 /* Send PENDING Conf Rsp */
2324 result = L2CAP_CONF_PENDING;
2325 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2330 case L2CAP_MODE_BASIC:
2331 chan->fcs = L2CAP_FCS_NONE;
2332 set_bit(CONF_MODE_DONE, &chan->conf_state);
2335 case L2CAP_MODE_ERTM:
2336 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2337 chan->remote_tx_win = rfc.txwin_size;
2339 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2341 chan->remote_max_tx = rfc.max_transmit;
2343 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2345 L2CAP_EXT_HDR_SIZE -
2348 rfc.max_pdu_size = cpu_to_le16(size);
2349 chan->remote_mps = size;
2351 rfc.retrans_timeout =
2352 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2353 rfc.monitor_timeout =
2354 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2356 set_bit(CONF_MODE_DONE, &chan->conf_state);
2358 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2359 sizeof(rfc), (unsigned long) &rfc);
2361 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2362 chan->remote_id = efs.id;
2363 chan->remote_stype = efs.stype;
2364 chan->remote_msdu = le16_to_cpu(efs.msdu);
2365 chan->remote_flush_to =
2366 le32_to_cpu(efs.flush_to);
2367 chan->remote_acc_lat =
2368 le32_to_cpu(efs.acc_lat);
2369 chan->remote_sdu_itime =
2370 le32_to_cpu(efs.sdu_itime);
2371 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2372 sizeof(efs), (unsigned long) &efs);
2376 case L2CAP_MODE_STREAMING:
2377 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2379 L2CAP_EXT_HDR_SIZE -
2382 rfc.max_pdu_size = cpu_to_le16(size);
2383 chan->remote_mps = size;
2385 set_bit(CONF_MODE_DONE, &chan->conf_state);
2387 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2388 sizeof(rfc), (unsigned long) &rfc);
2393 result = L2CAP_CONF_UNACCEPT;
2395 memset(&rfc, 0, sizeof(rfc));
2396 rfc.mode = chan->mode;
2399 if (result == L2CAP_CONF_SUCCESS)
2400 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2402 rsp->scid = cpu_to_le16(chan->dcid);
2403 rsp->result = cpu_to_le16(result);
2404 rsp->flags = cpu_to_le16(0x0000);
2409 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2411 struct l2cap_conf_req *req = data;
2412 void *ptr = req->data;
2415 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2416 struct l2cap_conf_efs efs;
2418 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2420 while (len >= L2CAP_CONF_OPT_SIZE) {
2421 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2424 case L2CAP_CONF_MTU:
2425 if (val < L2CAP_DEFAULT_MIN_MTU) {
2426 *result = L2CAP_CONF_UNACCEPT;
2427 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2430 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2433 case L2CAP_CONF_FLUSH_TO:
2434 chan->flush_to = val;
2435 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2439 case L2CAP_CONF_RFC:
2440 if (olen == sizeof(rfc))
2441 memcpy(&rfc, (void *)val, olen);
2443 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2444 rfc.mode != chan->mode)
2445 return -ECONNREFUSED;
2449 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2450 sizeof(rfc), (unsigned long) &rfc);
2453 case L2CAP_CONF_EWS:
2454 chan->tx_win = min_t(u16, val,
2455 L2CAP_DEFAULT_EXT_WINDOW);
2456 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2460 case L2CAP_CONF_EFS:
2461 if (olen == sizeof(efs))
2462 memcpy(&efs, (void *)val, olen);
2464 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2465 efs.stype != L2CAP_SERV_NOTRAFIC &&
2466 efs.stype != chan->local_stype)
2467 return -ECONNREFUSED;
2469 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2470 sizeof(efs), (unsigned long) &efs);
2475 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2476 return -ECONNREFUSED;
2478 chan->mode = rfc.mode;
2480 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2482 case L2CAP_MODE_ERTM:
2483 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2484 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2485 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2487 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2488 chan->local_msdu = le16_to_cpu(efs.msdu);
2489 chan->local_sdu_itime =
2490 le32_to_cpu(efs.sdu_itime);
2491 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2492 chan->local_flush_to =
2493 le32_to_cpu(efs.flush_to);
2497 case L2CAP_MODE_STREAMING:
2498 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2502 req->dcid = cpu_to_le16(chan->dcid);
2503 req->flags = cpu_to_le16(0x0000);
2508 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2510 struct l2cap_conf_rsp *rsp = data;
2511 void *ptr = rsp->data;
2513 BT_DBG("chan %p", chan);
2515 rsp->scid = cpu_to_le16(chan->dcid);
2516 rsp->result = cpu_to_le16(result);
2517 rsp->flags = cpu_to_le16(flags);
2522 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2524 struct l2cap_conn_rsp rsp;
2525 struct l2cap_conn *conn = chan->conn;
2528 rsp.scid = cpu_to_le16(chan->dcid);
2529 rsp.dcid = cpu_to_le16(chan->scid);
2530 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2531 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2532 l2cap_send_cmd(conn, chan->ident,
2533 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2535 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2538 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2539 l2cap_build_conf_req(chan, buf), buf);
2540 chan->num_conf_req++;
2543 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2547 struct l2cap_conf_rfc rfc;
2549 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2551 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2554 while (len >= L2CAP_CONF_OPT_SIZE) {
2555 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2558 case L2CAP_CONF_RFC:
2559 if (olen == sizeof(rfc))
2560 memcpy(&rfc, (void *)val, olen);
2565 /* Use sane default values in case a misbehaving remote device
2566 * did not send an RFC option.
2568 rfc.mode = chan->mode;
2569 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2570 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2571 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2573 BT_ERR("Expected RFC option was not found, using defaults");
2577 case L2CAP_MODE_ERTM:
2578 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2579 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2580 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2582 case L2CAP_MODE_STREAMING:
2583 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2587 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2589 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2591 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2594 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2595 cmd->ident == conn->info_ident) {
2596 cancel_delayed_work(&conn->info_timer);
2598 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2599 conn->info_ident = 0;
2601 l2cap_conn_start(conn);
2607 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2609 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2610 struct l2cap_conn_rsp rsp;
2611 struct l2cap_chan *chan = NULL, *pchan;
2612 struct sock *parent, *sk = NULL;
2613 int result, status = L2CAP_CS_NO_INFO;
2615 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2616 __le16 psm = req->psm;
2618 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2620 /* Check if we have socket listening on psm */
2621 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2623 result = L2CAP_CR_BAD_PSM;
2631 /* Check if the ACL is secure enough (if not SDP) */
2632 if (psm != cpu_to_le16(0x0001) &&
2633 !hci_conn_check_link_mode(conn->hcon)) {
2634 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2635 result = L2CAP_CR_SEC_BLOCK;
2639 result = L2CAP_CR_NO_MEM;
2641 /* Check for backlog size */
2642 if (sk_acceptq_is_full(parent)) {
2643 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2647 chan = pchan->ops->new_connection(pchan->data);
2653 /* Check if we already have channel with that dcid */
2654 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2655 sock_set_flag(sk, SOCK_ZAPPED);
2656 chan->ops->close(chan->data);
2660 hci_conn_hold(conn->hcon);
2662 bacpy(&bt_sk(sk)->src, conn->src);
2663 bacpy(&bt_sk(sk)->dst, conn->dst);
2667 bt_accept_enqueue(parent, sk);
2669 l2cap_chan_add(conn, chan);
2673 __set_chan_timer(chan, sk->sk_sndtimeo);
2675 chan->ident = cmd->ident;
2677 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2678 if (l2cap_chan_check_security(chan)) {
2679 if (bt_sk(sk)->defer_setup) {
2680 l2cap_state_change(chan, BT_CONNECT2);
2681 result = L2CAP_CR_PEND;
2682 status = L2CAP_CS_AUTHOR_PEND;
2683 parent->sk_data_ready(parent, 0);
2685 l2cap_state_change(chan, BT_CONFIG);
2686 result = L2CAP_CR_SUCCESS;
2687 status = L2CAP_CS_NO_INFO;
2690 l2cap_state_change(chan, BT_CONNECT2);
2691 result = L2CAP_CR_PEND;
2692 status = L2CAP_CS_AUTHEN_PEND;
2695 l2cap_state_change(chan, BT_CONNECT2);
2696 result = L2CAP_CR_PEND;
2697 status = L2CAP_CS_NO_INFO;
2701 release_sock(parent);
2704 rsp.scid = cpu_to_le16(scid);
2705 rsp.dcid = cpu_to_le16(dcid);
2706 rsp.result = cpu_to_le16(result);
2707 rsp.status = cpu_to_le16(status);
2708 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2710 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2711 struct l2cap_info_req info;
2712 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2714 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2715 conn->info_ident = l2cap_get_ident(conn);
2717 schedule_delayed_work(&conn->info_timer,
2718 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2720 l2cap_send_cmd(conn, conn->info_ident,
2721 L2CAP_INFO_REQ, sizeof(info), &info);
2724 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2725 result == L2CAP_CR_SUCCESS) {
2727 set_bit(CONF_REQ_SENT, &chan->conf_state);
2728 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2729 l2cap_build_conf_req(chan, buf), buf);
2730 chan->num_conf_req++;
2736 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2738 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2739 u16 scid, dcid, result, status;
2740 struct l2cap_chan *chan;
2744 scid = __le16_to_cpu(rsp->scid);
2745 dcid = __le16_to_cpu(rsp->dcid);
2746 result = __le16_to_cpu(rsp->result);
2747 status = __le16_to_cpu(rsp->status);
2749 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2752 chan = l2cap_get_chan_by_scid(conn, scid);
2756 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2764 case L2CAP_CR_SUCCESS:
2765 l2cap_state_change(chan, BT_CONFIG);
2768 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2770 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2773 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2774 l2cap_build_conf_req(chan, req), req);
2775 chan->num_conf_req++;
2779 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2783 l2cap_chan_del(chan, ECONNREFUSED);
2791 static inline void set_default_fcs(struct l2cap_chan *chan)
2793 /* FCS is enabled only in ERTM or streaming mode, if one or both
2796 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2797 chan->fcs = L2CAP_FCS_NONE;
2798 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2799 chan->fcs = L2CAP_FCS_CRC16;
2802 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2804 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2807 struct l2cap_chan *chan;
2811 dcid = __le16_to_cpu(req->dcid);
2812 flags = __le16_to_cpu(req->flags);
2814 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2816 chan = l2cap_get_chan_by_scid(conn, dcid);
2822 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2823 struct l2cap_cmd_rej_cid rej;
2825 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2826 rej.scid = cpu_to_le16(chan->scid);
2827 rej.dcid = cpu_to_le16(chan->dcid);
2829 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2834 /* Reject if config buffer is too small. */
2835 len = cmd_len - sizeof(*req);
2836 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2837 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2838 l2cap_build_conf_rsp(chan, rsp,
2839 L2CAP_CONF_REJECT, flags), rsp);
2844 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2845 chan->conf_len += len;
2847 if (flags & 0x0001) {
2848 /* Incomplete config. Send empty response. */
2849 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2850 l2cap_build_conf_rsp(chan, rsp,
2851 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2855 /* Complete config. */
2856 len = l2cap_parse_conf_req(chan, rsp);
2858 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2862 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2863 chan->num_conf_rsp++;
2865 /* Reset config buffer. */
2868 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2871 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2872 set_default_fcs(chan);
2874 l2cap_state_change(chan, BT_CONNECTED);
2876 chan->next_tx_seq = 0;
2877 chan->expected_tx_seq = 0;
2878 skb_queue_head_init(&chan->tx_q);
2879 if (chan->mode == L2CAP_MODE_ERTM)
2880 l2cap_ertm_init(chan);
2882 l2cap_chan_ready(chan);
2886 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2888 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2889 l2cap_build_conf_req(chan, buf), buf);
2890 chan->num_conf_req++;
2893 /* Got Conf Rsp PENDING from remote side and asume we sent
2894 Conf Rsp PENDING in the code above */
2895 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2896 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2898 /* check compatibility */
2900 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2901 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2903 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2904 l2cap_build_conf_rsp(chan, rsp,
2905 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2913 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2915 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2916 u16 scid, flags, result;
2917 struct l2cap_chan *chan;
2919 int len = cmd->len - sizeof(*rsp);
2921 scid = __le16_to_cpu(rsp->scid);
2922 flags = __le16_to_cpu(rsp->flags);
2923 result = __le16_to_cpu(rsp->result);
2925 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2926 scid, flags, result);
2928 chan = l2cap_get_chan_by_scid(conn, scid);
2935 case L2CAP_CONF_SUCCESS:
2936 l2cap_conf_rfc_get(chan, rsp->data, len);
2937 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2940 case L2CAP_CONF_PENDING:
2941 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2943 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2946 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2949 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2953 /* check compatibility */
2955 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2956 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2958 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2959 l2cap_build_conf_rsp(chan, buf,
2960 L2CAP_CONF_SUCCESS, 0x0000), buf);
2964 case L2CAP_CONF_UNACCEPT:
2965 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2968 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2969 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2973 /* throw out any old stored conf requests */
2974 result = L2CAP_CONF_SUCCESS;
2975 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2978 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2982 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2983 L2CAP_CONF_REQ, len, req);
2984 chan->num_conf_req++;
2985 if (result != L2CAP_CONF_SUCCESS)
2991 sk->sk_err = ECONNRESET;
2992 __set_chan_timer(chan,
2993 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT));
2994 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3001 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3003 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3004 set_default_fcs(chan);
3006 l2cap_state_change(chan, BT_CONNECTED);
3007 chan->next_tx_seq = 0;
3008 chan->expected_tx_seq = 0;
3009 skb_queue_head_init(&chan->tx_q);
3010 if (chan->mode == L2CAP_MODE_ERTM)
3011 l2cap_ertm_init(chan);
3013 l2cap_chan_ready(chan);
3021 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3023 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3024 struct l2cap_disconn_rsp rsp;
3026 struct l2cap_chan *chan;
3029 scid = __le16_to_cpu(req->scid);
3030 dcid = __le16_to_cpu(req->dcid);
3032 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3034 chan = l2cap_get_chan_by_scid(conn, dcid);
3040 rsp.dcid = cpu_to_le16(chan->scid);
3041 rsp.scid = cpu_to_le16(chan->dcid);
3042 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3044 sk->sk_shutdown = SHUTDOWN_MASK;
3046 l2cap_chan_del(chan, ECONNRESET);
3049 chan->ops->close(chan->data);
3053 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3055 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3057 struct l2cap_chan *chan;
3060 scid = __le16_to_cpu(rsp->scid);
3061 dcid = __le16_to_cpu(rsp->dcid);
3063 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3065 chan = l2cap_get_chan_by_scid(conn, scid);
3071 l2cap_chan_del(chan, 0);
3074 chan->ops->close(chan->data);
3078 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3080 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3083 type = __le16_to_cpu(req->type);
3085 BT_DBG("type 0x%4.4x", type);
3087 if (type == L2CAP_IT_FEAT_MASK) {
3089 u32 feat_mask = l2cap_feat_mask;
3090 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3091 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3092 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3094 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3097 feat_mask |= L2CAP_FEAT_EXT_FLOW
3098 | L2CAP_FEAT_EXT_WINDOW;
3100 put_unaligned_le32(feat_mask, rsp->data);
3101 l2cap_send_cmd(conn, cmd->ident,
3102 L2CAP_INFO_RSP, sizeof(buf), buf);
3103 } else if (type == L2CAP_IT_FIXED_CHAN) {
3105 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3108 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3110 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3112 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3113 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3114 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3115 l2cap_send_cmd(conn, cmd->ident,
3116 L2CAP_INFO_RSP, sizeof(buf), buf);
3118 struct l2cap_info_rsp rsp;
3119 rsp.type = cpu_to_le16(type);
3120 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3121 l2cap_send_cmd(conn, cmd->ident,
3122 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3128 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3130 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3133 type = __le16_to_cpu(rsp->type);
3134 result = __le16_to_cpu(rsp->result);
3136 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3138 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3139 if (cmd->ident != conn->info_ident ||
3140 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3143 cancel_delayed_work(&conn->info_timer);
3145 if (result != L2CAP_IR_SUCCESS) {
3146 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3147 conn->info_ident = 0;
3149 l2cap_conn_start(conn);
3154 if (type == L2CAP_IT_FEAT_MASK) {
3155 conn->feat_mask = get_unaligned_le32(rsp->data);
3157 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3158 struct l2cap_info_req req;
3159 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3161 conn->info_ident = l2cap_get_ident(conn);
3163 l2cap_send_cmd(conn, conn->info_ident,
3164 L2CAP_INFO_REQ, sizeof(req), &req);
3166 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3167 conn->info_ident = 0;
3169 l2cap_conn_start(conn);
3171 } else if (type == L2CAP_IT_FIXED_CHAN) {
3172 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3173 conn->info_ident = 0;
3175 l2cap_conn_start(conn);
3181 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3182 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3185 struct l2cap_create_chan_req *req = data;
3186 struct l2cap_create_chan_rsp rsp;
3189 if (cmd_len != sizeof(*req))
3195 psm = le16_to_cpu(req->psm);
3196 scid = le16_to_cpu(req->scid);
3198 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3200 /* Placeholder: Always reject */
3202 rsp.scid = cpu_to_le16(scid);
3203 rsp.result = L2CAP_CR_NO_MEM;
3204 rsp.status = L2CAP_CS_NO_INFO;
3206 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3212 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3213 struct l2cap_cmd_hdr *cmd, void *data)
3215 BT_DBG("conn %p", conn);
3217 return l2cap_connect_rsp(conn, cmd, data);
3220 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3221 u16 icid, u16 result)
3223 struct l2cap_move_chan_rsp rsp;
3225 BT_DBG("icid %d, result %d", icid, result);
3227 rsp.icid = cpu_to_le16(icid);
3228 rsp.result = cpu_to_le16(result);
3230 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3233 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3234 struct l2cap_chan *chan, u16 icid, u16 result)
3236 struct l2cap_move_chan_cfm cfm;
3239 BT_DBG("icid %d, result %d", icid, result);
3241 ident = l2cap_get_ident(conn);
3243 chan->ident = ident;
3245 cfm.icid = cpu_to_le16(icid);
3246 cfm.result = cpu_to_le16(result);
3248 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3251 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3254 struct l2cap_move_chan_cfm_rsp rsp;
3256 BT_DBG("icid %d", icid);
3258 rsp.icid = cpu_to_le16(icid);
3259 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3262 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3263 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3265 struct l2cap_move_chan_req *req = data;
3267 u16 result = L2CAP_MR_NOT_ALLOWED;
3269 if (cmd_len != sizeof(*req))
3272 icid = le16_to_cpu(req->icid);
3274 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3279 /* Placeholder: Always refuse */
3280 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3285 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3286 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3288 struct l2cap_move_chan_rsp *rsp = data;
3291 if (cmd_len != sizeof(*rsp))
3294 icid = le16_to_cpu(rsp->icid);
3295 result = le16_to_cpu(rsp->result);
3297 BT_DBG("icid %d, result %d", icid, result);
3299 /* Placeholder: Always unconfirmed */
3300 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3305 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3306 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3308 struct l2cap_move_chan_cfm *cfm = data;
3311 if (cmd_len != sizeof(*cfm))
3314 icid = le16_to_cpu(cfm->icid);
3315 result = le16_to_cpu(cfm->result);
3317 BT_DBG("icid %d, result %d", icid, result);
3319 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3324 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3325 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3327 struct l2cap_move_chan_cfm_rsp *rsp = data;
3330 if (cmd_len != sizeof(*rsp))
3333 icid = le16_to_cpu(rsp->icid);
3335 BT_DBG("icid %d", icid);
3340 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3345 if (min > max || min < 6 || max > 3200)
3348 if (to_multiplier < 10 || to_multiplier > 3200)
3351 if (max >= to_multiplier * 8)
3354 max_latency = (to_multiplier * 8 / max) - 1;
3355 if (latency > 499 || latency > max_latency)
3361 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3362 struct l2cap_cmd_hdr *cmd, u8 *data)
3364 struct hci_conn *hcon = conn->hcon;
3365 struct l2cap_conn_param_update_req *req;
3366 struct l2cap_conn_param_update_rsp rsp;
3367 u16 min, max, latency, to_multiplier, cmd_len;
3370 if (!(hcon->link_mode & HCI_LM_MASTER))
3373 cmd_len = __le16_to_cpu(cmd->len);
3374 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3377 req = (struct l2cap_conn_param_update_req *) data;
3378 min = __le16_to_cpu(req->min);
3379 max = __le16_to_cpu(req->max);
3380 latency = __le16_to_cpu(req->latency);
3381 to_multiplier = __le16_to_cpu(req->to_multiplier);
3383 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3384 min, max, latency, to_multiplier);
3386 memset(&rsp, 0, sizeof(rsp));
3388 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3390 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3392 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3394 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3398 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3403 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3404 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3408 switch (cmd->code) {
3409 case L2CAP_COMMAND_REJ:
3410 l2cap_command_rej(conn, cmd, data);
3413 case L2CAP_CONN_REQ:
3414 err = l2cap_connect_req(conn, cmd, data);
3417 case L2CAP_CONN_RSP:
3418 err = l2cap_connect_rsp(conn, cmd, data);
3421 case L2CAP_CONF_REQ:
3422 err = l2cap_config_req(conn, cmd, cmd_len, data);
3425 case L2CAP_CONF_RSP:
3426 err = l2cap_config_rsp(conn, cmd, data);
3429 case L2CAP_DISCONN_REQ:
3430 err = l2cap_disconnect_req(conn, cmd, data);
3433 case L2CAP_DISCONN_RSP:
3434 err = l2cap_disconnect_rsp(conn, cmd, data);
3437 case L2CAP_ECHO_REQ:
3438 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3441 case L2CAP_ECHO_RSP:
3444 case L2CAP_INFO_REQ:
3445 err = l2cap_information_req(conn, cmd, data);
3448 case L2CAP_INFO_RSP:
3449 err = l2cap_information_rsp(conn, cmd, data);
3452 case L2CAP_CREATE_CHAN_REQ:
3453 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3456 case L2CAP_CREATE_CHAN_RSP:
3457 err = l2cap_create_channel_rsp(conn, cmd, data);
3460 case L2CAP_MOVE_CHAN_REQ:
3461 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3464 case L2CAP_MOVE_CHAN_RSP:
3465 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3468 case L2CAP_MOVE_CHAN_CFM:
3469 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3472 case L2CAP_MOVE_CHAN_CFM_RSP:
3473 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3477 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3485 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3486 struct l2cap_cmd_hdr *cmd, u8 *data)
3488 switch (cmd->code) {
3489 case L2CAP_COMMAND_REJ:
3492 case L2CAP_CONN_PARAM_UPDATE_REQ:
3493 return l2cap_conn_param_update_req(conn, cmd, data);
3495 case L2CAP_CONN_PARAM_UPDATE_RSP:
3499 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3504 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3505 struct sk_buff *skb)
3507 u8 *data = skb->data;
3509 struct l2cap_cmd_hdr cmd;
3512 l2cap_raw_recv(conn, skb);
3514 while (len >= L2CAP_CMD_HDR_SIZE) {
3516 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3517 data += L2CAP_CMD_HDR_SIZE;
3518 len -= L2CAP_CMD_HDR_SIZE;
3520 cmd_len = le16_to_cpu(cmd.len);
3522 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3524 if (cmd_len > len || !cmd.ident) {
3525 BT_DBG("corrupted command");
3529 if (conn->hcon->type == LE_LINK)
3530 err = l2cap_le_sig_cmd(conn, &cmd, data);
3532 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3535 struct l2cap_cmd_rej_unk rej;
3537 BT_ERR("Wrong link type (%d)", err);
3539 /* FIXME: Map err to a valid reason */
3540 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3541 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3551 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3553 u16 our_fcs, rcv_fcs;
3556 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3557 hdr_size = L2CAP_EXT_HDR_SIZE;
3559 hdr_size = L2CAP_ENH_HDR_SIZE;
3561 if (chan->fcs == L2CAP_FCS_CRC16) {
3562 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3563 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3564 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3566 if (our_fcs != rcv_fcs)
3572 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3576 chan->frames_sent = 0;
3578 control |= __set_reqseq(chan, chan->buffer_seq);
3580 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3581 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3582 l2cap_send_sframe(chan, control);
3583 set_bit(CONN_RNR_SENT, &chan->conn_state);
3586 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3587 l2cap_retransmit_frames(chan);
3589 l2cap_ertm_send(chan);
3591 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3592 chan->frames_sent == 0) {
3593 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3594 l2cap_send_sframe(chan, control);
3598 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3600 struct sk_buff *next_skb;
3601 int tx_seq_offset, next_tx_seq_offset;
3603 bt_cb(skb)->tx_seq = tx_seq;
3604 bt_cb(skb)->sar = sar;
3606 next_skb = skb_peek(&chan->srej_q);
3608 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3611 if (bt_cb(next_skb)->tx_seq == tx_seq)
3614 next_tx_seq_offset = __seq_offset(chan,
3615 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3617 if (next_tx_seq_offset > tx_seq_offset) {
3618 __skb_queue_before(&chan->srej_q, next_skb, skb);
3622 if (skb_queue_is_last(&chan->srej_q, next_skb))
3625 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3628 __skb_queue_tail(&chan->srej_q, skb);
3633 static void append_skb_frag(struct sk_buff *skb,
3634 struct sk_buff *new_frag, struct sk_buff **last_frag)
3636 /* skb->len reflects data in skb as well as all fragments
3637 * skb->data_len reflects only data in fragments
3639 if (!skb_has_frag_list(skb))
3640 skb_shinfo(skb)->frag_list = new_frag;
3642 new_frag->next = NULL;
3644 (*last_frag)->next = new_frag;
3645 *last_frag = new_frag;
3647 skb->len += new_frag->len;
3648 skb->data_len += new_frag->len;
3649 skb->truesize += new_frag->truesize;
3652 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3656 switch (__get_ctrl_sar(chan, control)) {
3657 case L2CAP_SAR_UNSEGMENTED:
3661 err = chan->ops->recv(chan->data, skb);
3664 case L2CAP_SAR_START:
3668 chan->sdu_len = get_unaligned_le16(skb->data);
3669 skb_pull(skb, L2CAP_SDULEN_SIZE);
3671 if (chan->sdu_len > chan->imtu) {
3676 if (skb->len >= chan->sdu_len)
3680 chan->sdu_last_frag = skb;
3686 case L2CAP_SAR_CONTINUE:
3690 append_skb_frag(chan->sdu, skb,
3691 &chan->sdu_last_frag);
3694 if (chan->sdu->len >= chan->sdu_len)
3704 append_skb_frag(chan->sdu, skb,
3705 &chan->sdu_last_frag);
3708 if (chan->sdu->len != chan->sdu_len)
3711 err = chan->ops->recv(chan->data, chan->sdu);
3714 /* Reassembly complete */
3716 chan->sdu_last_frag = NULL;
3724 kfree_skb(chan->sdu);
3726 chan->sdu_last_frag = NULL;
3733 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3735 BT_DBG("chan %p, Enter local busy", chan);
3737 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3739 __set_ack_timer(chan);
3742 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3746 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3749 control = __set_reqseq(chan, chan->buffer_seq);
3750 control |= __set_ctrl_poll(chan);
3751 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3752 l2cap_send_sframe(chan, control);
3753 chan->retry_count = 1;
3755 __clear_retrans_timer(chan);
3756 __set_monitor_timer(chan);
3758 set_bit(CONN_WAIT_F, &chan->conn_state);
3761 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3762 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3764 BT_DBG("chan %p, Exit local busy", chan);
3767 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3769 if (chan->mode == L2CAP_MODE_ERTM) {
3771 l2cap_ertm_enter_local_busy(chan);
3773 l2cap_ertm_exit_local_busy(chan);
3777 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3779 struct sk_buff *skb;
3782 while ((skb = skb_peek(&chan->srej_q)) &&
3783 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3786 if (bt_cb(skb)->tx_seq != tx_seq)
3789 skb = skb_dequeue(&chan->srej_q);
3790 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3791 err = l2cap_reassemble_sdu(chan, skb, control);
3794 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3798 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3799 tx_seq = __next_seq(chan, tx_seq);
3803 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3805 struct srej_list *l, *tmp;
3808 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3809 if (l->tx_seq == tx_seq) {
3814 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3815 control |= __set_reqseq(chan, l->tx_seq);
3816 l2cap_send_sframe(chan, control);
3818 list_add_tail(&l->list, &chan->srej_l);
3822 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3824 struct srej_list *new;
3827 while (tx_seq != chan->expected_tx_seq) {
3828 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3829 control |= __set_reqseq(chan, chan->expected_tx_seq);
3830 l2cap_send_sframe(chan, control);
3832 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3836 new->tx_seq = chan->expected_tx_seq;
3838 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3840 list_add_tail(&new->list, &chan->srej_l);
3843 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3848 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3850 u16 tx_seq = __get_txseq(chan, rx_control);
3851 u16 req_seq = __get_reqseq(chan, rx_control);
3852 u8 sar = __get_ctrl_sar(chan, rx_control);
3853 int tx_seq_offset, expected_tx_seq_offset;
3854 int num_to_ack = (chan->tx_win/6) + 1;
3857 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3858 tx_seq, rx_control);
3860 if (__is_ctrl_final(chan, rx_control) &&
3861 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3862 __clear_monitor_timer(chan);
3863 if (chan->unacked_frames > 0)
3864 __set_retrans_timer(chan);
3865 clear_bit(CONN_WAIT_F, &chan->conn_state);
3868 chan->expected_ack_seq = req_seq;
3869 l2cap_drop_acked_frames(chan);
3871 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3873 /* invalid tx_seq */
3874 if (tx_seq_offset >= chan->tx_win) {
3875 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3879 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3880 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3881 l2cap_send_ack(chan);
3885 if (tx_seq == chan->expected_tx_seq)
3888 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3889 struct srej_list *first;
3891 first = list_first_entry(&chan->srej_l,
3892 struct srej_list, list);
3893 if (tx_seq == first->tx_seq) {
3894 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3895 l2cap_check_srej_gap(chan, tx_seq);
3897 list_del(&first->list);
3900 if (list_empty(&chan->srej_l)) {
3901 chan->buffer_seq = chan->buffer_seq_srej;
3902 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3903 l2cap_send_ack(chan);
3904 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3907 struct srej_list *l;
3909 /* duplicated tx_seq */
3910 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3913 list_for_each_entry(l, &chan->srej_l, list) {
3914 if (l->tx_seq == tx_seq) {
3915 l2cap_resend_srejframe(chan, tx_seq);
3920 err = l2cap_send_srejframe(chan, tx_seq);
3922 l2cap_send_disconn_req(chan->conn, chan, -err);
3927 expected_tx_seq_offset = __seq_offset(chan,
3928 chan->expected_tx_seq, chan->buffer_seq);
3930 /* duplicated tx_seq */
3931 if (tx_seq_offset < expected_tx_seq_offset)
3934 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3936 BT_DBG("chan %p, Enter SREJ", chan);
3938 INIT_LIST_HEAD(&chan->srej_l);
3939 chan->buffer_seq_srej = chan->buffer_seq;
3941 __skb_queue_head_init(&chan->srej_q);
3942 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3944 /* Set P-bit only if there are some I-frames to ack. */
3945 if (__clear_ack_timer(chan))
3946 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3948 err = l2cap_send_srejframe(chan, tx_seq);
3950 l2cap_send_disconn_req(chan->conn, chan, -err);
3957 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3959 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3960 bt_cb(skb)->tx_seq = tx_seq;
3961 bt_cb(skb)->sar = sar;
3962 __skb_queue_tail(&chan->srej_q, skb);
3966 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3967 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3970 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3974 if (__is_ctrl_final(chan, rx_control)) {
3975 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3976 l2cap_retransmit_frames(chan);
3980 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3981 if (chan->num_acked == num_to_ack - 1)
3982 l2cap_send_ack(chan);
3984 __set_ack_timer(chan);
3993 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3995 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3996 __get_reqseq(chan, rx_control), rx_control);
3998 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3999 l2cap_drop_acked_frames(chan);
4001 if (__is_ctrl_poll(chan, rx_control)) {
4002 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4003 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4004 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4005 (chan->unacked_frames > 0))
4006 __set_retrans_timer(chan);
4008 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4009 l2cap_send_srejtail(chan);
4011 l2cap_send_i_or_rr_or_rnr(chan);
4014 } else if (__is_ctrl_final(chan, rx_control)) {
4015 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4017 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4018 l2cap_retransmit_frames(chan);
4021 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4022 (chan->unacked_frames > 0))
4023 __set_retrans_timer(chan);
4025 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4026 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4027 l2cap_send_ack(chan);
4029 l2cap_ertm_send(chan);
4033 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4035 u16 tx_seq = __get_reqseq(chan, rx_control);
4037 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4039 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4041 chan->expected_ack_seq = tx_seq;
4042 l2cap_drop_acked_frames(chan);
4044 if (__is_ctrl_final(chan, rx_control)) {
4045 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4046 l2cap_retransmit_frames(chan);
4048 l2cap_retransmit_frames(chan);
4050 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4051 set_bit(CONN_REJ_ACT, &chan->conn_state);
4054 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4056 u16 tx_seq = __get_reqseq(chan, rx_control);
4058 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4060 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4062 if (__is_ctrl_poll(chan, rx_control)) {
4063 chan->expected_ack_seq = tx_seq;
4064 l2cap_drop_acked_frames(chan);
4066 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4067 l2cap_retransmit_one_frame(chan, tx_seq);
4069 l2cap_ertm_send(chan);
4071 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4072 chan->srej_save_reqseq = tx_seq;
4073 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4075 } else if (__is_ctrl_final(chan, rx_control)) {
4076 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4077 chan->srej_save_reqseq == tx_seq)
4078 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4080 l2cap_retransmit_one_frame(chan, tx_seq);
4082 l2cap_retransmit_one_frame(chan, tx_seq);
4083 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4084 chan->srej_save_reqseq = tx_seq;
4085 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4090 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4092 u16 tx_seq = __get_reqseq(chan, rx_control);
4094 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4096 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4097 chan->expected_ack_seq = tx_seq;
4098 l2cap_drop_acked_frames(chan);
4100 if (__is_ctrl_poll(chan, rx_control))
4101 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4103 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4104 __clear_retrans_timer(chan);
4105 if (__is_ctrl_poll(chan, rx_control))
4106 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4110 if (__is_ctrl_poll(chan, rx_control)) {
4111 l2cap_send_srejtail(chan);
4113 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4114 l2cap_send_sframe(chan, rx_control);
4118 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4120 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4122 if (__is_ctrl_final(chan, rx_control) &&
4123 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4124 __clear_monitor_timer(chan);
4125 if (chan->unacked_frames > 0)
4126 __set_retrans_timer(chan);
4127 clear_bit(CONN_WAIT_F, &chan->conn_state);
4130 switch (__get_ctrl_super(chan, rx_control)) {
4131 case L2CAP_SUPER_RR:
4132 l2cap_data_channel_rrframe(chan, rx_control);
4135 case L2CAP_SUPER_REJ:
4136 l2cap_data_channel_rejframe(chan, rx_control);
4139 case L2CAP_SUPER_SREJ:
4140 l2cap_data_channel_srejframe(chan, rx_control);
4143 case L2CAP_SUPER_RNR:
4144 l2cap_data_channel_rnrframe(chan, rx_control);
4152 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4156 int len, next_tx_seq_offset, req_seq_offset;
4158 control = __get_control(chan, skb->data);
4159 skb_pull(skb, __ctrl_size(chan));
4163 * We can just drop the corrupted I-frame here.
4164 * Receiver will miss it and start proper recovery
4165 * procedures and ask retransmission.
4167 if (l2cap_check_fcs(chan, skb))
4170 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4171 len -= L2CAP_SDULEN_SIZE;
4173 if (chan->fcs == L2CAP_FCS_CRC16)
4174 len -= L2CAP_FCS_SIZE;
4176 if (len > chan->mps) {
4177 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4181 req_seq = __get_reqseq(chan, control);
4183 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4185 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4186 chan->expected_ack_seq);
4188 /* check for invalid req-seq */
4189 if (req_seq_offset > next_tx_seq_offset) {
4190 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4194 if (!__is_sframe(chan, control)) {
4196 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4200 l2cap_data_channel_iframe(chan, control, skb);
4204 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4208 l2cap_data_channel_sframe(chan, control, skb);
4218 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4220 struct l2cap_chan *chan;
4221 struct sock *sk = NULL;
4226 chan = l2cap_get_chan_by_scid(conn, cid);
4228 BT_DBG("unknown cid 0x%4.4x", cid);
4234 BT_DBG("chan %p, len %d", chan, skb->len);
4236 if (chan->state != BT_CONNECTED)
4239 switch (chan->mode) {
4240 case L2CAP_MODE_BASIC:
4241 /* If socket recv buffers overflows we drop data here
4242 * which is *bad* because L2CAP has to be reliable.
4243 * But we don't have any other choice. L2CAP doesn't
4244 * provide flow control mechanism. */
4246 if (chan->imtu < skb->len)
4249 if (!chan->ops->recv(chan->data, skb))
4253 case L2CAP_MODE_ERTM:
4254 l2cap_ertm_data_rcv(chan, skb);
4258 case L2CAP_MODE_STREAMING:
4259 control = __get_control(chan, skb->data);
4260 skb_pull(skb, __ctrl_size(chan));
4263 if (l2cap_check_fcs(chan, skb))
4266 if (__is_sar_start(chan, control))
4267 len -= L2CAP_SDULEN_SIZE;
4269 if (chan->fcs == L2CAP_FCS_CRC16)
4270 len -= L2CAP_FCS_SIZE;
4272 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4275 tx_seq = __get_txseq(chan, control);
4277 if (chan->expected_tx_seq != tx_seq) {
4278 /* Frame(s) missing - must discard partial SDU */
4279 kfree_skb(chan->sdu);
4281 chan->sdu_last_frag = NULL;
4284 /* TODO: Notify userland of missing data */
4287 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4289 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4290 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4295 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4309 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4311 struct sock *sk = NULL;
4312 struct l2cap_chan *chan;
4314 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4322 BT_DBG("sk %p, len %d", sk, skb->len);
4324 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4327 if (chan->imtu < skb->len)
4330 if (!chan->ops->recv(chan->data, skb))
4342 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4344 struct sock *sk = NULL;
4345 struct l2cap_chan *chan;
4347 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4355 BT_DBG("sk %p, len %d", sk, skb->len);
4357 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4360 if (chan->imtu < skb->len)
4363 if (!chan->ops->recv(chan->data, skb))
4375 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4377 struct l2cap_hdr *lh = (void *) skb->data;
4381 skb_pull(skb, L2CAP_HDR_SIZE);
4382 cid = __le16_to_cpu(lh->cid);
4383 len = __le16_to_cpu(lh->len);
4385 if (len != skb->len) {
4390 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4393 case L2CAP_CID_LE_SIGNALING:
4394 case L2CAP_CID_SIGNALING:
4395 l2cap_sig_channel(conn, skb);
4398 case L2CAP_CID_CONN_LESS:
4399 psm = get_unaligned_le16(skb->data);
4401 l2cap_conless_channel(conn, psm, skb);
4404 case L2CAP_CID_LE_DATA:
4405 l2cap_att_channel(conn, cid, skb);
4409 if (smp_sig_channel(conn, skb))
4410 l2cap_conn_del(conn->hcon, EACCES);
4414 l2cap_data_channel(conn, cid, skb);
4419 /* ---- L2CAP interface with lower layer (HCI) ---- */
4421 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4423 int exact = 0, lm1 = 0, lm2 = 0;
4424 struct l2cap_chan *c;
4426 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4428 /* Find listening sockets and check their link_mode */
4429 read_lock(&chan_list_lock);
4430 list_for_each_entry(c, &chan_list, global_l) {
4431 struct sock *sk = c->sk;
4433 if (c->state != BT_LISTEN)
4436 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4437 lm1 |= HCI_LM_ACCEPT;
4438 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4439 lm1 |= HCI_LM_MASTER;
4441 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4442 lm2 |= HCI_LM_ACCEPT;
4443 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4444 lm2 |= HCI_LM_MASTER;
4447 read_unlock(&chan_list_lock);
4449 return exact ? lm1 : lm2;
4452 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4454 struct l2cap_conn *conn;
4456 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4459 conn = l2cap_conn_add(hcon, status);
4461 l2cap_conn_ready(conn);
4463 l2cap_conn_del(hcon, bt_to_errno(status));
4468 int l2cap_disconn_ind(struct hci_conn *hcon)
4470 struct l2cap_conn *conn = hcon->l2cap_data;
4472 BT_DBG("hcon %p", hcon);
4475 return HCI_ERROR_REMOTE_USER_TERM;
4476 return conn->disc_reason;
4479 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4481 BT_DBG("hcon %p reason %d", hcon, reason);
4483 l2cap_conn_del(hcon, bt_to_errno(reason));
4487 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4489 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4492 if (encrypt == 0x00) {
4493 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4494 __clear_chan_timer(chan);
4495 __set_chan_timer(chan,
4496 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4497 } else if (chan->sec_level == BT_SECURITY_HIGH)
4498 l2cap_chan_close(chan, ECONNREFUSED);
4500 if (chan->sec_level == BT_SECURITY_MEDIUM)
4501 __clear_chan_timer(chan);
4505 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4507 struct l2cap_conn *conn = hcon->l2cap_data;
4508 struct l2cap_chan *chan;
4513 BT_DBG("conn %p", conn);
4515 if (hcon->type == LE_LINK) {
4516 smp_distribute_keys(conn, 0);
4517 cancel_delayed_work(&conn->security_timer);
4522 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
4523 struct sock *sk = chan->sk;
4527 BT_DBG("chan->scid %d", chan->scid);
4529 if (chan->scid == L2CAP_CID_LE_DATA) {
4530 if (!status && encrypt) {
4531 chan->sec_level = hcon->sec_level;
4532 l2cap_chan_ready(chan);
4539 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4544 if (!status && (chan->state == BT_CONNECTED ||
4545 chan->state == BT_CONFIG)) {
4546 l2cap_check_encryption(chan, encrypt);
4551 if (chan->state == BT_CONNECT) {
4553 struct l2cap_conn_req req;
4554 req.scid = cpu_to_le16(chan->scid);
4555 req.psm = chan->psm;
4557 chan->ident = l2cap_get_ident(conn);
4558 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4560 l2cap_send_cmd(conn, chan->ident,
4561 L2CAP_CONN_REQ, sizeof(req), &req);
4563 __clear_chan_timer(chan);
4564 __set_chan_timer(chan,
4565 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4567 } else if (chan->state == BT_CONNECT2) {
4568 struct l2cap_conn_rsp rsp;
4572 if (bt_sk(sk)->defer_setup) {
4573 struct sock *parent = bt_sk(sk)->parent;
4574 res = L2CAP_CR_PEND;
4575 stat = L2CAP_CS_AUTHOR_PEND;
4577 parent->sk_data_ready(parent, 0);
4579 l2cap_state_change(chan, BT_CONFIG);
4580 res = L2CAP_CR_SUCCESS;
4581 stat = L2CAP_CS_NO_INFO;
4584 l2cap_state_change(chan, BT_DISCONN);
4585 __set_chan_timer(chan,
4586 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4587 res = L2CAP_CR_SEC_BLOCK;
4588 stat = L2CAP_CS_NO_INFO;
4591 rsp.scid = cpu_to_le16(chan->dcid);
4592 rsp.dcid = cpu_to_le16(chan->scid);
4593 rsp.result = cpu_to_le16(res);
4594 rsp.status = cpu_to_le16(stat);
4595 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4607 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4609 struct l2cap_conn *conn = hcon->l2cap_data;
4612 conn = l2cap_conn_add(hcon, 0);
4617 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4619 if (!(flags & ACL_CONT)) {
4620 struct l2cap_hdr *hdr;
4621 struct l2cap_chan *chan;
4626 BT_ERR("Unexpected start frame (len %d)", skb->len);
4627 kfree_skb(conn->rx_skb);
4628 conn->rx_skb = NULL;
4630 l2cap_conn_unreliable(conn, ECOMM);
4633 /* Start fragment always begin with Basic L2CAP header */
4634 if (skb->len < L2CAP_HDR_SIZE) {
4635 BT_ERR("Frame is too short (len %d)", skb->len);
4636 l2cap_conn_unreliable(conn, ECOMM);
4640 hdr = (struct l2cap_hdr *) skb->data;
4641 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4642 cid = __le16_to_cpu(hdr->cid);
4644 if (len == skb->len) {
4645 /* Complete frame received */
4646 l2cap_recv_frame(conn, skb);
4650 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4652 if (skb->len > len) {
4653 BT_ERR("Frame is too long (len %d, expected len %d)",
4655 l2cap_conn_unreliable(conn, ECOMM);
4659 chan = l2cap_get_chan_by_scid(conn, cid);
4661 if (chan && chan->sk) {
4662 struct sock *sk = chan->sk;
4664 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4665 BT_ERR("Frame exceeding recv MTU (len %d, "
4669 l2cap_conn_unreliable(conn, ECOMM);
4675 /* Allocate skb for the complete frame (with header) */
4676 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4680 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4682 conn->rx_len = len - skb->len;
4684 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4686 if (!conn->rx_len) {
4687 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4688 l2cap_conn_unreliable(conn, ECOMM);
4692 if (skb->len > conn->rx_len) {
4693 BT_ERR("Fragment is too long (len %d, expected %d)",
4694 skb->len, conn->rx_len);
4695 kfree_skb(conn->rx_skb);
4696 conn->rx_skb = NULL;
4698 l2cap_conn_unreliable(conn, ECOMM);
4702 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4704 conn->rx_len -= skb->len;
4706 if (!conn->rx_len) {
4707 /* Complete frame received */
4708 l2cap_recv_frame(conn, conn->rx_skb);
4709 conn->rx_skb = NULL;
4718 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4720 struct l2cap_chan *c;
4722 read_lock(&chan_list_lock);
4724 list_for_each_entry(c, &chan_list, global_l) {
4725 struct sock *sk = c->sk;
4727 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4728 batostr(&bt_sk(sk)->src),
4729 batostr(&bt_sk(sk)->dst),
4730 c->state, __le16_to_cpu(c->psm),
4731 c->scid, c->dcid, c->imtu, c->omtu,
4732 c->sec_level, c->mode);
4735 read_unlock(&chan_list_lock);
4740 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4742 return single_open(file, l2cap_debugfs_show, inode->i_private);
4745 static const struct file_operations l2cap_debugfs_fops = {
4746 .open = l2cap_debugfs_open,
4748 .llseek = seq_lseek,
4749 .release = single_release,
4752 static struct dentry *l2cap_debugfs;
4754 int __init l2cap_init(void)
4758 err = l2cap_init_sockets();
4763 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4764 bt_debugfs, NULL, &l2cap_debugfs_fops);
4766 BT_ERR("Failed to create L2CAP debug file");
4772 void l2cap_exit(void)
4774 debugfs_remove(l2cap_debugfs);
4775 l2cap_cleanup_sockets();
4778 module_param(disable_ertm, bool, 0644);
4779 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");