2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb);
78 /* ---- L2CAP channels ---- */
80 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 struct l2cap_chan *c, *r = NULL;
86 list_for_each_entry_rcu(c, &conn->chan_l, list) {
97 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
99 struct l2cap_chan *c, *r = NULL;
103 list_for_each_entry_rcu(c, &conn->chan_l, list) {
104 if (c->scid == cid) {
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
118 struct l2cap_chan *c;
120 c = __l2cap_get_chan_by_scid(conn, cid);
126 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
128 struct l2cap_chan *c, *r = NULL;
132 list_for_each_entry_rcu(c, &conn->chan_l, list) {
133 if (c->ident == ident) {
143 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
145 struct l2cap_chan *c;
147 c = __l2cap_get_chan_by_ident(conn, ident);
153 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
155 struct l2cap_chan *c;
157 list_for_each_entry(c, &chan_list, global_l) {
158 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
164 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
168 write_lock(&chan_list_lock);
170 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
183 for (p = 0x1001; p < 0x1100; p += 2)
184 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
185 chan->psm = cpu_to_le16(p);
186 chan->sport = cpu_to_le16(p);
193 write_unlock(&chan_list_lock);
197 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
199 write_lock(&chan_list_lock);
203 write_unlock(&chan_list_lock);
208 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
210 u16 cid = L2CAP_CID_DYN_START;
212 for (; cid < L2CAP_CID_DYN_END; cid++) {
213 if (!__l2cap_get_chan_by_scid(conn, cid))
220 static char *state_to_string(int state)
224 return "BT_CONNECTED";
234 return "BT_CONNECT2";
243 return "invalid state";
246 static void l2cap_state_change(struct l2cap_chan *chan, int state)
248 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
249 state_to_string(state));
252 chan->ops->state_change(chan->data, state);
255 static void l2cap_chan_timeout(struct work_struct *work)
257 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
259 struct sock *sk = chan->sk;
262 BT_DBG("chan %p state %d", chan, chan->state);
266 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
267 reason = ECONNREFUSED;
268 else if (chan->state == BT_CONNECT &&
269 chan->sec_level != BT_SECURITY_SDP)
270 reason = ECONNREFUSED;
274 l2cap_chan_close(chan, reason);
278 chan->ops->close(chan->data);
279 l2cap_chan_put(chan);
282 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
284 struct l2cap_chan *chan;
286 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
292 write_lock(&chan_list_lock);
293 list_add(&chan->global_l, &chan_list);
294 write_unlock(&chan_list_lock);
296 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
298 chan->state = BT_OPEN;
300 atomic_set(&chan->refcnt, 1);
302 BT_DBG("sk %p chan %p", sk, chan);
307 void l2cap_chan_destroy(struct l2cap_chan *chan)
309 write_lock(&chan_list_lock);
310 list_del(&chan->global_l);
311 write_unlock(&chan_list_lock);
313 l2cap_chan_put(chan);
316 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
318 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
319 chan->psm, chan->dcid);
321 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
325 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
326 if (conn->hcon->type == LE_LINK) {
328 chan->omtu = L2CAP_LE_DEFAULT_MTU;
329 chan->scid = L2CAP_CID_LE_DATA;
330 chan->dcid = L2CAP_CID_LE_DATA;
332 /* Alloc CID for connection-oriented socket */
333 chan->scid = l2cap_alloc_cid(conn);
334 chan->omtu = L2CAP_DEFAULT_MTU;
336 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
337 /* Connectionless socket */
338 chan->scid = L2CAP_CID_CONN_LESS;
339 chan->dcid = L2CAP_CID_CONN_LESS;
340 chan->omtu = L2CAP_DEFAULT_MTU;
342 /* Raw socket can send/recv signalling messages only */
343 chan->scid = L2CAP_CID_SIGNALING;
344 chan->dcid = L2CAP_CID_SIGNALING;
345 chan->omtu = L2CAP_DEFAULT_MTU;
348 chan->local_id = L2CAP_BESTEFFORT_ID;
349 chan->local_stype = L2CAP_SERV_BESTEFFORT;
350 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
351 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
352 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
353 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
355 l2cap_chan_hold(chan);
357 list_add_rcu(&chan->list, &conn->chan_l);
361 * Must be called on the locked socket. */
362 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
364 struct sock *sk = chan->sk;
365 struct l2cap_conn *conn = chan->conn;
366 struct sock *parent = bt_sk(sk)->parent;
368 __clear_chan_timer(chan);
370 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
373 /* Delete from channel list */
374 list_del_rcu(&chan->list);
377 l2cap_chan_put(chan);
380 hci_conn_put(conn->hcon);
383 l2cap_state_change(chan, BT_CLOSED);
384 sock_set_flag(sk, SOCK_ZAPPED);
390 bt_accept_unlink(sk);
391 parent->sk_data_ready(parent, 0);
393 sk->sk_state_change(sk);
395 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
396 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
399 skb_queue_purge(&chan->tx_q);
401 if (chan->mode == L2CAP_MODE_ERTM) {
402 struct srej_list *l, *tmp;
404 __clear_retrans_timer(chan);
405 __clear_monitor_timer(chan);
406 __clear_ack_timer(chan);
408 skb_queue_purge(&chan->srej_q);
410 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
417 static void l2cap_chan_cleanup_listen(struct sock *parent)
421 BT_DBG("parent %p", parent);
423 /* Close not yet accepted channels */
424 while ((sk = bt_accept_dequeue(parent, NULL))) {
425 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
426 __clear_chan_timer(chan);
428 l2cap_chan_close(chan, ECONNRESET);
430 chan->ops->close(chan->data);
434 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
436 struct l2cap_conn *conn = chan->conn;
437 struct sock *sk = chan->sk;
439 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
441 switch (chan->state) {
443 l2cap_chan_cleanup_listen(sk);
445 l2cap_state_change(chan, BT_CLOSED);
446 sock_set_flag(sk, SOCK_ZAPPED);
451 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
452 conn->hcon->type == ACL_LINK) {
453 __clear_chan_timer(chan);
454 __set_chan_timer(chan, sk->sk_sndtimeo);
455 l2cap_send_disconn_req(conn, chan, reason);
457 l2cap_chan_del(chan, reason);
461 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
462 conn->hcon->type == ACL_LINK) {
463 struct l2cap_conn_rsp rsp;
466 if (bt_sk(sk)->defer_setup)
467 result = L2CAP_CR_SEC_BLOCK;
469 result = L2CAP_CR_BAD_PSM;
470 l2cap_state_change(chan, BT_DISCONN);
472 rsp.scid = cpu_to_le16(chan->dcid);
473 rsp.dcid = cpu_to_le16(chan->scid);
474 rsp.result = cpu_to_le16(result);
475 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
476 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
480 l2cap_chan_del(chan, reason);
485 l2cap_chan_del(chan, reason);
489 sock_set_flag(sk, SOCK_ZAPPED);
494 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
496 if (chan->chan_type == L2CAP_CHAN_RAW) {
497 switch (chan->sec_level) {
498 case BT_SECURITY_HIGH:
499 return HCI_AT_DEDICATED_BONDING_MITM;
500 case BT_SECURITY_MEDIUM:
501 return HCI_AT_DEDICATED_BONDING;
503 return HCI_AT_NO_BONDING;
505 } else if (chan->psm == cpu_to_le16(0x0001)) {
506 if (chan->sec_level == BT_SECURITY_LOW)
507 chan->sec_level = BT_SECURITY_SDP;
509 if (chan->sec_level == BT_SECURITY_HIGH)
510 return HCI_AT_NO_BONDING_MITM;
512 return HCI_AT_NO_BONDING;
514 switch (chan->sec_level) {
515 case BT_SECURITY_HIGH:
516 return HCI_AT_GENERAL_BONDING_MITM;
517 case BT_SECURITY_MEDIUM:
518 return HCI_AT_GENERAL_BONDING;
520 return HCI_AT_NO_BONDING;
525 /* Service level security */
526 int l2cap_chan_check_security(struct l2cap_chan *chan)
528 struct l2cap_conn *conn = chan->conn;
531 auth_type = l2cap_get_auth_type(chan);
533 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
536 static u8 l2cap_get_ident(struct l2cap_conn *conn)
540 /* Get next available identificator.
541 * 1 - 128 are used by kernel.
542 * 129 - 199 are reserved.
543 * 200 - 254 are used by utilities like l2ping, etc.
546 spin_lock(&conn->lock);
548 if (++conn->tx_ident > 128)
553 spin_unlock(&conn->lock);
558 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
560 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
563 BT_DBG("code 0x%2.2x", code);
568 if (lmp_no_flush_capable(conn->hcon->hdev))
569 flags = ACL_START_NO_FLUSH;
573 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
574 skb->priority = HCI_PRIO_MAX;
576 hci_send_acl(conn->hchan, skb, flags);
579 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
581 struct hci_conn *hcon = chan->conn->hcon;
584 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
587 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
588 lmp_no_flush_capable(hcon->hdev))
589 flags = ACL_START_NO_FLUSH;
593 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
594 hci_send_acl(chan->conn->hchan, skb, flags);
597 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
600 struct l2cap_hdr *lh;
601 struct l2cap_conn *conn = chan->conn;
604 if (chan->state != BT_CONNECTED)
607 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
608 hlen = L2CAP_EXT_HDR_SIZE;
610 hlen = L2CAP_ENH_HDR_SIZE;
612 if (chan->fcs == L2CAP_FCS_CRC16)
613 hlen += L2CAP_FCS_SIZE;
615 BT_DBG("chan %p, control 0x%8.8x", chan, control);
617 count = min_t(unsigned int, conn->mtu, hlen);
619 control |= __set_sframe(chan);
621 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
622 control |= __set_ctrl_final(chan);
624 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
625 control |= __set_ctrl_poll(chan);
627 skb = bt_skb_alloc(count, GFP_ATOMIC);
631 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
632 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
633 lh->cid = cpu_to_le16(chan->dcid);
635 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
637 if (chan->fcs == L2CAP_FCS_CRC16) {
638 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
639 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
642 skb->priority = HCI_PRIO_MAX;
643 l2cap_do_send(chan, skb);
646 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
648 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
649 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
650 set_bit(CONN_RNR_SENT, &chan->conn_state);
652 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
654 control |= __set_reqseq(chan, chan->buffer_seq);
656 l2cap_send_sframe(chan, control);
659 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
661 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
664 static void l2cap_do_start(struct l2cap_chan *chan)
666 struct l2cap_conn *conn = chan->conn;
668 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
669 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
672 if (l2cap_chan_check_security(chan) &&
673 __l2cap_no_conn_pending(chan)) {
674 struct l2cap_conn_req req;
675 req.scid = cpu_to_le16(chan->scid);
678 chan->ident = l2cap_get_ident(conn);
679 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
681 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
685 struct l2cap_info_req req;
686 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
688 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
689 conn->info_ident = l2cap_get_ident(conn);
691 schedule_delayed_work(&conn->info_timer,
692 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
694 l2cap_send_cmd(conn, conn->info_ident,
695 L2CAP_INFO_REQ, sizeof(req), &req);
699 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
701 u32 local_feat_mask = l2cap_feat_mask;
703 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
706 case L2CAP_MODE_ERTM:
707 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
708 case L2CAP_MODE_STREAMING:
709 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
715 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
718 struct l2cap_disconn_req req;
725 if (chan->mode == L2CAP_MODE_ERTM) {
726 __clear_retrans_timer(chan);
727 __clear_monitor_timer(chan);
728 __clear_ack_timer(chan);
731 req.dcid = cpu_to_le16(chan->dcid);
732 req.scid = cpu_to_le16(chan->scid);
733 l2cap_send_cmd(conn, l2cap_get_ident(conn),
734 L2CAP_DISCONN_REQ, sizeof(req), &req);
736 l2cap_state_change(chan, BT_DISCONN);
740 /* ---- L2CAP connections ---- */
741 static void l2cap_conn_start(struct l2cap_conn *conn)
743 struct l2cap_chan *chan;
745 BT_DBG("conn %p", conn);
749 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
750 struct sock *sk = chan->sk;
754 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
759 if (chan->state == BT_CONNECT) {
760 struct l2cap_conn_req req;
762 if (!l2cap_chan_check_security(chan) ||
763 !__l2cap_no_conn_pending(chan)) {
768 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
769 && test_bit(CONF_STATE2_DEVICE,
770 &chan->conf_state)) {
771 /* l2cap_chan_close() calls list_del(chan)
772 * so release the lock */
773 l2cap_chan_close(chan, ECONNRESET);
778 req.scid = cpu_to_le16(chan->scid);
781 chan->ident = l2cap_get_ident(conn);
782 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
784 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
787 } else if (chan->state == BT_CONNECT2) {
788 struct l2cap_conn_rsp rsp;
790 rsp.scid = cpu_to_le16(chan->dcid);
791 rsp.dcid = cpu_to_le16(chan->scid);
793 if (l2cap_chan_check_security(chan)) {
794 if (bt_sk(sk)->defer_setup) {
795 struct sock *parent = bt_sk(sk)->parent;
796 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
797 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
799 parent->sk_data_ready(parent, 0);
802 l2cap_state_change(chan, BT_CONFIG);
803 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
804 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
807 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
808 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
811 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
814 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
815 rsp.result != L2CAP_CR_SUCCESS) {
820 set_bit(CONF_REQ_SENT, &chan->conf_state);
821 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
822 l2cap_build_conf_req(chan, buf), buf);
823 chan->num_conf_req++;
832 /* Find socket with cid and source bdaddr.
833 * Returns closest match, locked.
835 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
837 struct l2cap_chan *c, *c1 = NULL;
839 read_lock(&chan_list_lock);
841 list_for_each_entry(c, &chan_list, global_l) {
842 struct sock *sk = c->sk;
844 if (state && c->state != state)
847 if (c->scid == cid) {
849 if (!bacmp(&bt_sk(sk)->src, src)) {
850 read_unlock(&chan_list_lock);
855 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
860 read_unlock(&chan_list_lock);
865 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
867 struct sock *parent, *sk;
868 struct l2cap_chan *chan, *pchan;
872 /* Check if we have socket listening on cid */
873 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
882 /* Check for backlog size */
883 if (sk_acceptq_is_full(parent)) {
884 BT_DBG("backlog full %d", parent->sk_ack_backlog);
888 chan = pchan->ops->new_connection(pchan->data);
894 hci_conn_hold(conn->hcon);
896 bacpy(&bt_sk(sk)->src, conn->src);
897 bacpy(&bt_sk(sk)->dst, conn->dst);
899 bt_accept_enqueue(parent, sk);
901 l2cap_chan_add(conn, chan);
903 __set_chan_timer(chan, sk->sk_sndtimeo);
905 l2cap_state_change(chan, BT_CONNECTED);
906 parent->sk_data_ready(parent, 0);
909 release_sock(parent);
912 static void l2cap_chan_ready(struct sock *sk)
914 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
915 struct sock *parent = bt_sk(sk)->parent;
917 BT_DBG("sk %p, parent %p", sk, parent);
919 chan->conf_state = 0;
920 __clear_chan_timer(chan);
922 l2cap_state_change(chan, BT_CONNECTED);
923 sk->sk_state_change(sk);
926 parent->sk_data_ready(parent, 0);
929 static void l2cap_conn_ready(struct l2cap_conn *conn)
931 struct l2cap_chan *chan;
933 BT_DBG("conn %p", conn);
935 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
936 l2cap_le_conn_ready(conn);
938 if (conn->hcon->out && conn->hcon->type == LE_LINK)
939 smp_conn_security(conn, conn->hcon->pending_sec_level);
943 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
944 struct sock *sk = chan->sk;
948 if (conn->hcon->type == LE_LINK) {
949 if (smp_conn_security(conn, chan->sec_level))
950 l2cap_chan_ready(sk);
952 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
953 __clear_chan_timer(chan);
954 l2cap_state_change(chan, BT_CONNECTED);
955 sk->sk_state_change(sk);
957 } else if (chan->state == BT_CONNECT)
958 l2cap_do_start(chan);
966 /* Notify sockets that we cannot guaranty reliability anymore */
967 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
969 struct l2cap_chan *chan;
971 BT_DBG("conn %p", conn);
975 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
976 struct sock *sk = chan->sk;
978 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
985 static void l2cap_info_timeout(struct work_struct *work)
987 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
990 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
991 conn->info_ident = 0;
993 l2cap_conn_start(conn);
996 static void l2cap_conn_del(struct hci_conn *hcon, int err)
998 struct l2cap_conn *conn = hcon->l2cap_data;
999 struct l2cap_chan *chan, *l;
1005 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1007 kfree_skb(conn->rx_skb);
1010 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1013 l2cap_chan_del(chan, err);
1015 chan->ops->close(chan->data);
1018 hci_chan_del(conn->hchan);
1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1021 __cancel_delayed_work(&conn->info_timer);
1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1024 __cancel_delayed_work(&conn->security_timer);
1025 smp_chan_destroy(conn);
1028 hcon->l2cap_data = NULL;
1032 static void security_timeout(struct work_struct *work)
1034 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1035 security_timer.work);
1037 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1040 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1042 struct l2cap_conn *conn = hcon->l2cap_data;
1043 struct hci_chan *hchan;
1048 hchan = hci_chan_create(hcon);
1052 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1054 hci_chan_del(hchan);
1058 hcon->l2cap_data = conn;
1060 conn->hchan = hchan;
1062 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1064 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1065 conn->mtu = hcon->hdev->le_mtu;
1067 conn->mtu = hcon->hdev->acl_mtu;
1069 conn->src = &hcon->hdev->bdaddr;
1070 conn->dst = &hcon->dst;
1072 conn->feat_mask = 0;
1074 spin_lock_init(&conn->lock);
1076 INIT_LIST_HEAD(&conn->chan_l);
1078 if (hcon->type == LE_LINK)
1079 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1081 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1083 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1088 /* ---- Socket interface ---- */
1090 /* Find socket with psm and source bdaddr.
1091 * Returns closest match.
1093 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1095 struct l2cap_chan *c, *c1 = NULL;
1097 read_lock(&chan_list_lock);
1099 list_for_each_entry(c, &chan_list, global_l) {
1100 struct sock *sk = c->sk;
1102 if (state && c->state != state)
1105 if (c->psm == psm) {
1107 if (!bacmp(&bt_sk(sk)->src, src)) {
1108 read_unlock(&chan_list_lock);
1113 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1118 read_unlock(&chan_list_lock);
1123 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1125 struct sock *sk = chan->sk;
1126 bdaddr_t *src = &bt_sk(sk)->src;
1127 struct l2cap_conn *conn;
1128 struct hci_conn *hcon;
1129 struct hci_dev *hdev;
1133 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1136 hdev = hci_get_route(dst, src);
1138 return -EHOSTUNREACH;
1144 /* PSM must be odd and lsb of upper byte must be 0 */
1145 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1146 chan->chan_type != L2CAP_CHAN_RAW) {
1151 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1156 switch (chan->mode) {
1157 case L2CAP_MODE_BASIC:
1159 case L2CAP_MODE_ERTM:
1160 case L2CAP_MODE_STREAMING:
1169 switch (sk->sk_state) {
1173 /* Already connecting */
1178 /* Already connected */
1192 /* Set destination address and psm */
1193 bacpy(&bt_sk(sk)->dst, dst);
1197 auth_type = l2cap_get_auth_type(chan);
1199 if (chan->dcid == L2CAP_CID_LE_DATA)
1200 hcon = hci_connect(hdev, LE_LINK, dst,
1201 chan->sec_level, auth_type);
1203 hcon = hci_connect(hdev, ACL_LINK, dst,
1204 chan->sec_level, auth_type);
1207 err = PTR_ERR(hcon);
1211 conn = l2cap_conn_add(hcon, 0);
1218 /* Update source addr of the socket */
1219 bacpy(src, conn->src);
1221 l2cap_chan_add(conn, chan);
1223 l2cap_state_change(chan, BT_CONNECT);
1224 __set_chan_timer(chan, sk->sk_sndtimeo);
1226 if (hcon->state == BT_CONNECTED) {
1227 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1228 __clear_chan_timer(chan);
1229 if (l2cap_chan_check_security(chan))
1230 l2cap_state_change(chan, BT_CONNECTED);
1232 l2cap_do_start(chan);
1238 hci_dev_unlock(hdev);
1243 int __l2cap_wait_ack(struct sock *sk)
1245 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1246 DECLARE_WAITQUEUE(wait, current);
1250 add_wait_queue(sk_sleep(sk), &wait);
1251 set_current_state(TASK_INTERRUPTIBLE);
1252 while (chan->unacked_frames > 0 && chan->conn) {
1256 if (signal_pending(current)) {
1257 err = sock_intr_errno(timeo);
1262 timeo = schedule_timeout(timeo);
1264 set_current_state(TASK_INTERRUPTIBLE);
1266 err = sock_error(sk);
1270 set_current_state(TASK_RUNNING);
1271 remove_wait_queue(sk_sleep(sk), &wait);
1275 static void l2cap_monitor_timeout(struct work_struct *work)
1277 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1278 monitor_timer.work);
1279 struct sock *sk = chan->sk;
1281 BT_DBG("chan %p", chan);
1284 if (chan->retry_count >= chan->remote_max_tx) {
1285 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1290 chan->retry_count++;
1291 __set_monitor_timer(chan);
1293 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1297 static void l2cap_retrans_timeout(struct work_struct *work)
1299 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1300 retrans_timer.work);
1301 struct sock *sk = chan->sk;
1303 BT_DBG("chan %p", chan);
1306 chan->retry_count = 1;
1307 __set_monitor_timer(chan);
1309 set_bit(CONN_WAIT_F, &chan->conn_state);
1311 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1315 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1317 struct sk_buff *skb;
1319 while ((skb = skb_peek(&chan->tx_q)) &&
1320 chan->unacked_frames) {
1321 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1324 skb = skb_dequeue(&chan->tx_q);
1327 chan->unacked_frames--;
1330 if (!chan->unacked_frames)
1331 __clear_retrans_timer(chan);
1334 static void l2cap_streaming_send(struct l2cap_chan *chan)
1336 struct sk_buff *skb;
1340 while ((skb = skb_dequeue(&chan->tx_q))) {
1341 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1342 control |= __set_txseq(chan, chan->next_tx_seq);
1343 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1345 if (chan->fcs == L2CAP_FCS_CRC16) {
1346 fcs = crc16(0, (u8 *)skb->data,
1347 skb->len - L2CAP_FCS_SIZE);
1348 put_unaligned_le16(fcs,
1349 skb->data + skb->len - L2CAP_FCS_SIZE);
1352 l2cap_do_send(chan, skb);
1354 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1358 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1360 struct sk_buff *skb, *tx_skb;
1364 skb = skb_peek(&chan->tx_q);
1368 while (bt_cb(skb)->tx_seq != tx_seq) {
1369 if (skb_queue_is_last(&chan->tx_q, skb))
1372 skb = skb_queue_next(&chan->tx_q, skb);
1375 if (chan->remote_max_tx &&
1376 bt_cb(skb)->retries == chan->remote_max_tx) {
1377 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1381 tx_skb = skb_clone(skb, GFP_ATOMIC);
1382 bt_cb(skb)->retries++;
1384 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1385 control &= __get_sar_mask(chan);
1387 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1388 control |= __set_ctrl_final(chan);
1390 control |= __set_reqseq(chan, chan->buffer_seq);
1391 control |= __set_txseq(chan, tx_seq);
1393 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1395 if (chan->fcs == L2CAP_FCS_CRC16) {
1396 fcs = crc16(0, (u8 *)tx_skb->data,
1397 tx_skb->len - L2CAP_FCS_SIZE);
1398 put_unaligned_le16(fcs,
1399 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1402 l2cap_do_send(chan, tx_skb);
1405 static int l2cap_ertm_send(struct l2cap_chan *chan)
1407 struct sk_buff *skb, *tx_skb;
1412 if (chan->state != BT_CONNECTED)
1415 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1417 if (chan->remote_max_tx &&
1418 bt_cb(skb)->retries == chan->remote_max_tx) {
1419 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1423 tx_skb = skb_clone(skb, GFP_ATOMIC);
1425 bt_cb(skb)->retries++;
1427 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1428 control &= __get_sar_mask(chan);
1430 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1431 control |= __set_ctrl_final(chan);
1433 control |= __set_reqseq(chan, chan->buffer_seq);
1434 control |= __set_txseq(chan, chan->next_tx_seq);
1436 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1438 if (chan->fcs == L2CAP_FCS_CRC16) {
1439 fcs = crc16(0, (u8 *)skb->data,
1440 tx_skb->len - L2CAP_FCS_SIZE);
1441 put_unaligned_le16(fcs, skb->data +
1442 tx_skb->len - L2CAP_FCS_SIZE);
1445 l2cap_do_send(chan, tx_skb);
1447 __set_retrans_timer(chan);
1449 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1451 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1453 if (bt_cb(skb)->retries == 1)
1454 chan->unacked_frames++;
1456 chan->frames_sent++;
1458 if (skb_queue_is_last(&chan->tx_q, skb))
1459 chan->tx_send_head = NULL;
1461 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1469 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1473 if (!skb_queue_empty(&chan->tx_q))
1474 chan->tx_send_head = chan->tx_q.next;
1476 chan->next_tx_seq = chan->expected_ack_seq;
1477 ret = l2cap_ertm_send(chan);
1481 static void __l2cap_send_ack(struct l2cap_chan *chan)
1485 control |= __set_reqseq(chan, chan->buffer_seq);
1487 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1488 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1489 set_bit(CONN_RNR_SENT, &chan->conn_state);
1490 l2cap_send_sframe(chan, control);
1494 if (l2cap_ertm_send(chan) > 0)
1497 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1498 l2cap_send_sframe(chan, control);
1501 static void l2cap_send_ack(struct l2cap_chan *chan)
1503 __clear_ack_timer(chan);
1504 __l2cap_send_ack(chan);
1507 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1509 struct srej_list *tail;
1512 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1513 control |= __set_ctrl_final(chan);
1515 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1516 control |= __set_reqseq(chan, tail->tx_seq);
1518 l2cap_send_sframe(chan, control);
1521 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1523 struct sock *sk = chan->sk;
1524 struct l2cap_conn *conn = chan->conn;
1525 struct sk_buff **frag;
1528 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1534 /* Continuation fragments (no L2CAP header) */
1535 frag = &skb_shinfo(skb)->frag_list;
1537 count = min_t(unsigned int, conn->mtu, len);
1539 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1542 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1545 (*frag)->priority = skb->priority;
1550 frag = &(*frag)->next;
1556 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1557 struct msghdr *msg, size_t len,
1560 struct sock *sk = chan->sk;
1561 struct l2cap_conn *conn = chan->conn;
1562 struct sk_buff *skb;
1563 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1564 struct l2cap_hdr *lh;
1566 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1568 count = min_t(unsigned int, (conn->mtu - hlen), len);
1569 skb = bt_skb_send_alloc(sk, count + hlen,
1570 msg->msg_flags & MSG_DONTWAIT, &err);
1572 return ERR_PTR(err);
1574 skb->priority = priority;
1576 /* Create L2CAP header */
1577 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1578 lh->cid = cpu_to_le16(chan->dcid);
1579 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1580 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1582 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1583 if (unlikely(err < 0)) {
1585 return ERR_PTR(err);
1590 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1591 struct msghdr *msg, size_t len,
1594 struct sock *sk = chan->sk;
1595 struct l2cap_conn *conn = chan->conn;
1596 struct sk_buff *skb;
1597 int err, count, hlen = L2CAP_HDR_SIZE;
1598 struct l2cap_hdr *lh;
1600 BT_DBG("sk %p len %d", sk, (int)len);
1602 count = min_t(unsigned int, (conn->mtu - hlen), len);
1603 skb = bt_skb_send_alloc(sk, count + hlen,
1604 msg->msg_flags & MSG_DONTWAIT, &err);
1606 return ERR_PTR(err);
1608 skb->priority = priority;
1610 /* Create L2CAP header */
1611 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1612 lh->cid = cpu_to_le16(chan->dcid);
1613 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1615 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1616 if (unlikely(err < 0)) {
1618 return ERR_PTR(err);
1623 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1624 struct msghdr *msg, size_t len,
1625 u32 control, u16 sdulen)
1627 struct sock *sk = chan->sk;
1628 struct l2cap_conn *conn = chan->conn;
1629 struct sk_buff *skb;
1630 int err, count, hlen;
1631 struct l2cap_hdr *lh;
1633 BT_DBG("sk %p len %d", sk, (int)len);
1636 return ERR_PTR(-ENOTCONN);
1638 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1639 hlen = L2CAP_EXT_HDR_SIZE;
1641 hlen = L2CAP_ENH_HDR_SIZE;
1644 hlen += L2CAP_SDULEN_SIZE;
1646 if (chan->fcs == L2CAP_FCS_CRC16)
1647 hlen += L2CAP_FCS_SIZE;
1649 count = min_t(unsigned int, (conn->mtu - hlen), len);
1650 skb = bt_skb_send_alloc(sk, count + hlen,
1651 msg->msg_flags & MSG_DONTWAIT, &err);
1653 return ERR_PTR(err);
1655 /* Create L2CAP header */
1656 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1657 lh->cid = cpu_to_le16(chan->dcid);
1658 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1660 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1663 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1665 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1666 if (unlikely(err < 0)) {
1668 return ERR_PTR(err);
1671 if (chan->fcs == L2CAP_FCS_CRC16)
1672 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1674 bt_cb(skb)->retries = 0;
1678 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1680 struct sk_buff *skb;
1681 struct sk_buff_head sar_queue;
1685 skb_queue_head_init(&sar_queue);
1686 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1687 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1689 return PTR_ERR(skb);
1691 __skb_queue_tail(&sar_queue, skb);
1692 len -= chan->remote_mps;
1693 size += chan->remote_mps;
1698 if (len > chan->remote_mps) {
1699 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1700 buflen = chan->remote_mps;
1702 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1706 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1708 skb_queue_purge(&sar_queue);
1709 return PTR_ERR(skb);
1712 __skb_queue_tail(&sar_queue, skb);
1716 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1717 if (chan->tx_send_head == NULL)
1718 chan->tx_send_head = sar_queue.next;
1723 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1726 struct sk_buff *skb;
1730 /* Connectionless channel */
1731 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1732 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1734 return PTR_ERR(skb);
1736 l2cap_do_send(chan, skb);
1740 switch (chan->mode) {
1741 case L2CAP_MODE_BASIC:
1742 /* Check outgoing MTU */
1743 if (len > chan->omtu)
1746 /* Create a basic PDU */
1747 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1749 return PTR_ERR(skb);
1751 l2cap_do_send(chan, skb);
1755 case L2CAP_MODE_ERTM:
1756 case L2CAP_MODE_STREAMING:
1757 /* Entire SDU fits into one PDU */
1758 if (len <= chan->remote_mps) {
1759 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1760 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1763 return PTR_ERR(skb);
1765 __skb_queue_tail(&chan->tx_q, skb);
1767 if (chan->tx_send_head == NULL)
1768 chan->tx_send_head = skb;
1771 /* Segment SDU into multiples PDUs */
1772 err = l2cap_sar_segment_sdu(chan, msg, len);
1777 if (chan->mode == L2CAP_MODE_STREAMING) {
1778 l2cap_streaming_send(chan);
1783 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1784 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1789 err = l2cap_ertm_send(chan);
1796 BT_DBG("bad state %1.1x", chan->mode);
1803 /* Copy frame to all raw sockets on that connection */
1804 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1806 struct sk_buff *nskb;
1807 struct l2cap_chan *chan;
1809 BT_DBG("conn %p", conn);
1813 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
1814 struct sock *sk = chan->sk;
1815 if (chan->chan_type != L2CAP_CHAN_RAW)
1818 /* Don't send frame to the socket it came from */
1821 nskb = skb_clone(skb, GFP_ATOMIC);
1825 if (chan->ops->recv(chan->data, nskb))
1832 /* ---- L2CAP signalling commands ---- */
1833 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1834 u8 code, u8 ident, u16 dlen, void *data)
1836 struct sk_buff *skb, **frag;
1837 struct l2cap_cmd_hdr *cmd;
1838 struct l2cap_hdr *lh;
1841 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1842 conn, code, ident, dlen);
1844 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1845 count = min_t(unsigned int, conn->mtu, len);
1847 skb = bt_skb_alloc(count, GFP_ATOMIC);
1851 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1852 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1854 if (conn->hcon->type == LE_LINK)
1855 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1857 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1859 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1862 cmd->len = cpu_to_le16(dlen);
1865 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1866 memcpy(skb_put(skb, count), data, count);
1872 /* Continuation fragments (no L2CAP header) */
1873 frag = &skb_shinfo(skb)->frag_list;
1875 count = min_t(unsigned int, conn->mtu, len);
1877 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1881 memcpy(skb_put(*frag, count), data, count);
1886 frag = &(*frag)->next;
1896 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1898 struct l2cap_conf_opt *opt = *ptr;
1901 len = L2CAP_CONF_OPT_SIZE + opt->len;
1909 *val = *((u8 *) opt->val);
1913 *val = get_unaligned_le16(opt->val);
1917 *val = get_unaligned_le32(opt->val);
1921 *val = (unsigned long) opt->val;
1925 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1929 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1931 struct l2cap_conf_opt *opt = *ptr;
1933 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1940 *((u8 *) opt->val) = val;
1944 put_unaligned_le16(val, opt->val);
1948 put_unaligned_le32(val, opt->val);
1952 memcpy(opt->val, (void *) val, len);
1956 *ptr += L2CAP_CONF_OPT_SIZE + len;
1959 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1961 struct l2cap_conf_efs efs;
1963 switch (chan->mode) {
1964 case L2CAP_MODE_ERTM:
1965 efs.id = chan->local_id;
1966 efs.stype = chan->local_stype;
1967 efs.msdu = cpu_to_le16(chan->local_msdu);
1968 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1969 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1970 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1973 case L2CAP_MODE_STREAMING:
1975 efs.stype = L2CAP_SERV_BESTEFFORT;
1976 efs.msdu = cpu_to_le16(chan->local_msdu);
1977 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1986 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1987 (unsigned long) &efs);
1990 static void l2cap_ack_timeout(struct work_struct *work)
1992 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1995 BT_DBG("chan %p", chan);
1997 lock_sock(chan->sk);
1998 __l2cap_send_ack(chan);
1999 release_sock(chan->sk);
2001 l2cap_chan_put(chan);
2004 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2006 chan->expected_ack_seq = 0;
2007 chan->unacked_frames = 0;
2008 chan->buffer_seq = 0;
2009 chan->num_acked = 0;
2010 chan->frames_sent = 0;
2012 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2013 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2014 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2016 skb_queue_head_init(&chan->srej_q);
2018 INIT_LIST_HEAD(&chan->srej_l);
2021 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2024 case L2CAP_MODE_STREAMING:
2025 case L2CAP_MODE_ERTM:
2026 if (l2cap_mode_supported(mode, remote_feat_mask))
2030 return L2CAP_MODE_BASIC;
2034 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2036 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2039 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2041 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2044 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2046 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2047 __l2cap_ews_supported(chan)) {
2048 /* use extended control field */
2049 set_bit(FLAG_EXT_CTRL, &chan->flags);
2050 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2052 chan->tx_win = min_t(u16, chan->tx_win,
2053 L2CAP_DEFAULT_TX_WINDOW);
2054 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2058 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2060 struct l2cap_conf_req *req = data;
2061 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2062 void *ptr = req->data;
2065 BT_DBG("chan %p", chan);
2067 if (chan->num_conf_req || chan->num_conf_rsp)
2070 switch (chan->mode) {
2071 case L2CAP_MODE_STREAMING:
2072 case L2CAP_MODE_ERTM:
2073 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2076 if (__l2cap_efs_supported(chan))
2077 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2081 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2086 if (chan->imtu != L2CAP_DEFAULT_MTU)
2087 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2089 switch (chan->mode) {
2090 case L2CAP_MODE_BASIC:
2091 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2092 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2095 rfc.mode = L2CAP_MODE_BASIC;
2097 rfc.max_transmit = 0;
2098 rfc.retrans_timeout = 0;
2099 rfc.monitor_timeout = 0;
2100 rfc.max_pdu_size = 0;
2102 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2103 (unsigned long) &rfc);
2106 case L2CAP_MODE_ERTM:
2107 rfc.mode = L2CAP_MODE_ERTM;
2108 rfc.max_transmit = chan->max_tx;
2109 rfc.retrans_timeout = 0;
2110 rfc.monitor_timeout = 0;
2112 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2113 L2CAP_EXT_HDR_SIZE -
2116 rfc.max_pdu_size = cpu_to_le16(size);
2118 l2cap_txwin_setup(chan);
2120 rfc.txwin_size = min_t(u16, chan->tx_win,
2121 L2CAP_DEFAULT_TX_WINDOW);
2123 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2124 (unsigned long) &rfc);
2126 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2127 l2cap_add_opt_efs(&ptr, chan);
2129 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2132 if (chan->fcs == L2CAP_FCS_NONE ||
2133 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2134 chan->fcs = L2CAP_FCS_NONE;
2135 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2138 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2139 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2143 case L2CAP_MODE_STREAMING:
2144 rfc.mode = L2CAP_MODE_STREAMING;
2146 rfc.max_transmit = 0;
2147 rfc.retrans_timeout = 0;
2148 rfc.monitor_timeout = 0;
2150 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2151 L2CAP_EXT_HDR_SIZE -
2154 rfc.max_pdu_size = cpu_to_le16(size);
2156 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2157 (unsigned long) &rfc);
2159 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2160 l2cap_add_opt_efs(&ptr, chan);
2162 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2165 if (chan->fcs == L2CAP_FCS_NONE ||
2166 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2167 chan->fcs = L2CAP_FCS_NONE;
2168 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2173 req->dcid = cpu_to_le16(chan->dcid);
2174 req->flags = cpu_to_le16(0);
2179 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2181 struct l2cap_conf_rsp *rsp = data;
2182 void *ptr = rsp->data;
2183 void *req = chan->conf_req;
2184 int len = chan->conf_len;
2185 int type, hint, olen;
2187 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2188 struct l2cap_conf_efs efs;
2190 u16 mtu = L2CAP_DEFAULT_MTU;
2191 u16 result = L2CAP_CONF_SUCCESS;
2194 BT_DBG("chan %p", chan);
2196 while (len >= L2CAP_CONF_OPT_SIZE) {
2197 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2199 hint = type & L2CAP_CONF_HINT;
2200 type &= L2CAP_CONF_MASK;
2203 case L2CAP_CONF_MTU:
2207 case L2CAP_CONF_FLUSH_TO:
2208 chan->flush_to = val;
2211 case L2CAP_CONF_QOS:
2214 case L2CAP_CONF_RFC:
2215 if (olen == sizeof(rfc))
2216 memcpy(&rfc, (void *) val, olen);
2219 case L2CAP_CONF_FCS:
2220 if (val == L2CAP_FCS_NONE)
2221 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2224 case L2CAP_CONF_EFS:
2226 if (olen == sizeof(efs))
2227 memcpy(&efs, (void *) val, olen);
2230 case L2CAP_CONF_EWS:
2232 return -ECONNREFUSED;
2234 set_bit(FLAG_EXT_CTRL, &chan->flags);
2235 set_bit(CONF_EWS_RECV, &chan->conf_state);
2236 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2237 chan->remote_tx_win = val;
2244 result = L2CAP_CONF_UNKNOWN;
2245 *((u8 *) ptr++) = type;
2250 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2253 switch (chan->mode) {
2254 case L2CAP_MODE_STREAMING:
2255 case L2CAP_MODE_ERTM:
2256 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2257 chan->mode = l2cap_select_mode(rfc.mode,
2258 chan->conn->feat_mask);
2263 if (__l2cap_efs_supported(chan))
2264 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2266 return -ECONNREFUSED;
2269 if (chan->mode != rfc.mode)
2270 return -ECONNREFUSED;
2276 if (chan->mode != rfc.mode) {
2277 result = L2CAP_CONF_UNACCEPT;
2278 rfc.mode = chan->mode;
2280 if (chan->num_conf_rsp == 1)
2281 return -ECONNREFUSED;
2283 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2284 sizeof(rfc), (unsigned long) &rfc);
2287 if (result == L2CAP_CONF_SUCCESS) {
2288 /* Configure output options and let the other side know
2289 * which ones we don't like. */
2291 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2292 result = L2CAP_CONF_UNACCEPT;
2295 set_bit(CONF_MTU_DONE, &chan->conf_state);
2297 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2300 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2301 efs.stype != L2CAP_SERV_NOTRAFIC &&
2302 efs.stype != chan->local_stype) {
2304 result = L2CAP_CONF_UNACCEPT;
2306 if (chan->num_conf_req >= 1)
2307 return -ECONNREFUSED;
2309 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2311 (unsigned long) &efs);
2313 /* Send PENDING Conf Rsp */
2314 result = L2CAP_CONF_PENDING;
2315 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2320 case L2CAP_MODE_BASIC:
2321 chan->fcs = L2CAP_FCS_NONE;
2322 set_bit(CONF_MODE_DONE, &chan->conf_state);
2325 case L2CAP_MODE_ERTM:
2326 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2327 chan->remote_tx_win = rfc.txwin_size;
2329 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2331 chan->remote_max_tx = rfc.max_transmit;
2333 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2335 L2CAP_EXT_HDR_SIZE -
2338 rfc.max_pdu_size = cpu_to_le16(size);
2339 chan->remote_mps = size;
2341 rfc.retrans_timeout =
2342 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2343 rfc.monitor_timeout =
2344 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2346 set_bit(CONF_MODE_DONE, &chan->conf_state);
2348 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2349 sizeof(rfc), (unsigned long) &rfc);
2351 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2352 chan->remote_id = efs.id;
2353 chan->remote_stype = efs.stype;
2354 chan->remote_msdu = le16_to_cpu(efs.msdu);
2355 chan->remote_flush_to =
2356 le32_to_cpu(efs.flush_to);
2357 chan->remote_acc_lat =
2358 le32_to_cpu(efs.acc_lat);
2359 chan->remote_sdu_itime =
2360 le32_to_cpu(efs.sdu_itime);
2361 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2362 sizeof(efs), (unsigned long) &efs);
2366 case L2CAP_MODE_STREAMING:
2367 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2369 L2CAP_EXT_HDR_SIZE -
2372 rfc.max_pdu_size = cpu_to_le16(size);
2373 chan->remote_mps = size;
2375 set_bit(CONF_MODE_DONE, &chan->conf_state);
2377 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2378 sizeof(rfc), (unsigned long) &rfc);
2383 result = L2CAP_CONF_UNACCEPT;
2385 memset(&rfc, 0, sizeof(rfc));
2386 rfc.mode = chan->mode;
2389 if (result == L2CAP_CONF_SUCCESS)
2390 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2392 rsp->scid = cpu_to_le16(chan->dcid);
2393 rsp->result = cpu_to_le16(result);
2394 rsp->flags = cpu_to_le16(0x0000);
2399 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2401 struct l2cap_conf_req *req = data;
2402 void *ptr = req->data;
2405 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2406 struct l2cap_conf_efs efs;
2408 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2410 while (len >= L2CAP_CONF_OPT_SIZE) {
2411 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2414 case L2CAP_CONF_MTU:
2415 if (val < L2CAP_DEFAULT_MIN_MTU) {
2416 *result = L2CAP_CONF_UNACCEPT;
2417 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2420 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2423 case L2CAP_CONF_FLUSH_TO:
2424 chan->flush_to = val;
2425 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2429 case L2CAP_CONF_RFC:
2430 if (olen == sizeof(rfc))
2431 memcpy(&rfc, (void *)val, olen);
2433 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2434 rfc.mode != chan->mode)
2435 return -ECONNREFUSED;
2439 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2440 sizeof(rfc), (unsigned long) &rfc);
2443 case L2CAP_CONF_EWS:
2444 chan->tx_win = min_t(u16, val,
2445 L2CAP_DEFAULT_EXT_WINDOW);
2446 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2450 case L2CAP_CONF_EFS:
2451 if (olen == sizeof(efs))
2452 memcpy(&efs, (void *)val, olen);
2454 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2455 efs.stype != L2CAP_SERV_NOTRAFIC &&
2456 efs.stype != chan->local_stype)
2457 return -ECONNREFUSED;
2459 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2460 sizeof(efs), (unsigned long) &efs);
2465 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2466 return -ECONNREFUSED;
2468 chan->mode = rfc.mode;
2470 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2472 case L2CAP_MODE_ERTM:
2473 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2474 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2475 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2477 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2478 chan->local_msdu = le16_to_cpu(efs.msdu);
2479 chan->local_sdu_itime =
2480 le32_to_cpu(efs.sdu_itime);
2481 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2482 chan->local_flush_to =
2483 le32_to_cpu(efs.flush_to);
2487 case L2CAP_MODE_STREAMING:
2488 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2492 req->dcid = cpu_to_le16(chan->dcid);
2493 req->flags = cpu_to_le16(0x0000);
2498 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2500 struct l2cap_conf_rsp *rsp = data;
2501 void *ptr = rsp->data;
2503 BT_DBG("chan %p", chan);
2505 rsp->scid = cpu_to_le16(chan->dcid);
2506 rsp->result = cpu_to_le16(result);
2507 rsp->flags = cpu_to_le16(flags);
2512 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2514 struct l2cap_conn_rsp rsp;
2515 struct l2cap_conn *conn = chan->conn;
2518 rsp.scid = cpu_to_le16(chan->dcid);
2519 rsp.dcid = cpu_to_le16(chan->scid);
2520 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2521 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2522 l2cap_send_cmd(conn, chan->ident,
2523 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2525 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2528 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2529 l2cap_build_conf_req(chan, buf), buf);
2530 chan->num_conf_req++;
2533 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2537 struct l2cap_conf_rfc rfc;
2539 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2541 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2544 while (len >= L2CAP_CONF_OPT_SIZE) {
2545 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2548 case L2CAP_CONF_RFC:
2549 if (olen == sizeof(rfc))
2550 memcpy(&rfc, (void *)val, olen);
2555 /* Use sane default values in case a misbehaving remote device
2556 * did not send an RFC option.
2558 rfc.mode = chan->mode;
2559 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2560 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2561 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2563 BT_ERR("Expected RFC option was not found, using defaults");
2567 case L2CAP_MODE_ERTM:
2568 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2569 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2570 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2572 case L2CAP_MODE_STREAMING:
2573 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2577 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2579 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2581 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2584 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2585 cmd->ident == conn->info_ident) {
2586 __cancel_delayed_work(&conn->info_timer);
2588 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2589 conn->info_ident = 0;
2591 l2cap_conn_start(conn);
2597 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2599 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2600 struct l2cap_conn_rsp rsp;
2601 struct l2cap_chan *chan = NULL, *pchan;
2602 struct sock *parent, *sk = NULL;
2603 int result, status = L2CAP_CS_NO_INFO;
2605 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2606 __le16 psm = req->psm;
2608 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2610 /* Check if we have socket listening on psm */
2611 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2613 result = L2CAP_CR_BAD_PSM;
2621 /* Check if the ACL is secure enough (if not SDP) */
2622 if (psm != cpu_to_le16(0x0001) &&
2623 !hci_conn_check_link_mode(conn->hcon)) {
2624 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2625 result = L2CAP_CR_SEC_BLOCK;
2629 result = L2CAP_CR_NO_MEM;
2631 /* Check for backlog size */
2632 if (sk_acceptq_is_full(parent)) {
2633 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2637 chan = pchan->ops->new_connection(pchan->data);
2643 /* Check if we already have channel with that dcid */
2644 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2645 sock_set_flag(sk, SOCK_ZAPPED);
2646 chan->ops->close(chan->data);
2650 hci_conn_hold(conn->hcon);
2652 bacpy(&bt_sk(sk)->src, conn->src);
2653 bacpy(&bt_sk(sk)->dst, conn->dst);
2657 bt_accept_enqueue(parent, sk);
2659 l2cap_chan_add(conn, chan);
2663 __set_chan_timer(chan, sk->sk_sndtimeo);
2665 chan->ident = cmd->ident;
2667 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2668 if (l2cap_chan_check_security(chan)) {
2669 if (bt_sk(sk)->defer_setup) {
2670 l2cap_state_change(chan, BT_CONNECT2);
2671 result = L2CAP_CR_PEND;
2672 status = L2CAP_CS_AUTHOR_PEND;
2673 parent->sk_data_ready(parent, 0);
2675 l2cap_state_change(chan, BT_CONFIG);
2676 result = L2CAP_CR_SUCCESS;
2677 status = L2CAP_CS_NO_INFO;
2680 l2cap_state_change(chan, BT_CONNECT2);
2681 result = L2CAP_CR_PEND;
2682 status = L2CAP_CS_AUTHEN_PEND;
2685 l2cap_state_change(chan, BT_CONNECT2);
2686 result = L2CAP_CR_PEND;
2687 status = L2CAP_CS_NO_INFO;
2691 release_sock(parent);
2694 rsp.scid = cpu_to_le16(scid);
2695 rsp.dcid = cpu_to_le16(dcid);
2696 rsp.result = cpu_to_le16(result);
2697 rsp.status = cpu_to_le16(status);
2698 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2700 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2701 struct l2cap_info_req info;
2702 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2704 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2705 conn->info_ident = l2cap_get_ident(conn);
2707 schedule_delayed_work(&conn->info_timer,
2708 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2710 l2cap_send_cmd(conn, conn->info_ident,
2711 L2CAP_INFO_REQ, sizeof(info), &info);
2714 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2715 result == L2CAP_CR_SUCCESS) {
2717 set_bit(CONF_REQ_SENT, &chan->conf_state);
2718 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2719 l2cap_build_conf_req(chan, buf), buf);
2720 chan->num_conf_req++;
2726 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2728 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2729 u16 scid, dcid, result, status;
2730 struct l2cap_chan *chan;
2734 scid = __le16_to_cpu(rsp->scid);
2735 dcid = __le16_to_cpu(rsp->dcid);
2736 result = __le16_to_cpu(rsp->result);
2737 status = __le16_to_cpu(rsp->status);
2739 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2742 chan = l2cap_get_chan_by_scid(conn, scid);
2746 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2754 case L2CAP_CR_SUCCESS:
2755 l2cap_state_change(chan, BT_CONFIG);
2758 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2760 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2763 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2764 l2cap_build_conf_req(chan, req), req);
2765 chan->num_conf_req++;
2769 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2773 l2cap_chan_del(chan, ECONNREFUSED);
2781 static inline void set_default_fcs(struct l2cap_chan *chan)
2783 /* FCS is enabled only in ERTM or streaming mode, if one or both
2786 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2787 chan->fcs = L2CAP_FCS_NONE;
2788 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2789 chan->fcs = L2CAP_FCS_CRC16;
2792 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2794 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2797 struct l2cap_chan *chan;
2801 dcid = __le16_to_cpu(req->dcid);
2802 flags = __le16_to_cpu(req->flags);
2804 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2806 chan = l2cap_get_chan_by_scid(conn, dcid);
2812 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2813 struct l2cap_cmd_rej_cid rej;
2815 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2816 rej.scid = cpu_to_le16(chan->scid);
2817 rej.dcid = cpu_to_le16(chan->dcid);
2819 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2824 /* Reject if config buffer is too small. */
2825 len = cmd_len - sizeof(*req);
2826 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2827 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2828 l2cap_build_conf_rsp(chan, rsp,
2829 L2CAP_CONF_REJECT, flags), rsp);
2834 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2835 chan->conf_len += len;
2837 if (flags & 0x0001) {
2838 /* Incomplete config. Send empty response. */
2839 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2840 l2cap_build_conf_rsp(chan, rsp,
2841 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2845 /* Complete config. */
2846 len = l2cap_parse_conf_req(chan, rsp);
2848 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2852 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2853 chan->num_conf_rsp++;
2855 /* Reset config buffer. */
2858 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2861 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2862 set_default_fcs(chan);
2864 l2cap_state_change(chan, BT_CONNECTED);
2866 chan->next_tx_seq = 0;
2867 chan->expected_tx_seq = 0;
2868 skb_queue_head_init(&chan->tx_q);
2869 if (chan->mode == L2CAP_MODE_ERTM)
2870 l2cap_ertm_init(chan);
2872 l2cap_chan_ready(sk);
2876 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2878 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2879 l2cap_build_conf_req(chan, buf), buf);
2880 chan->num_conf_req++;
2883 /* Got Conf Rsp PENDING from remote side and asume we sent
2884 Conf Rsp PENDING in the code above */
2885 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2886 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2888 /* check compatibility */
2890 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2891 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2893 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2894 l2cap_build_conf_rsp(chan, rsp,
2895 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2903 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2905 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2906 u16 scid, flags, result;
2907 struct l2cap_chan *chan;
2909 int len = cmd->len - sizeof(*rsp);
2911 scid = __le16_to_cpu(rsp->scid);
2912 flags = __le16_to_cpu(rsp->flags);
2913 result = __le16_to_cpu(rsp->result);
2915 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2916 scid, flags, result);
2918 chan = l2cap_get_chan_by_scid(conn, scid);
2925 case L2CAP_CONF_SUCCESS:
2926 l2cap_conf_rfc_get(chan, rsp->data, len);
2927 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2930 case L2CAP_CONF_PENDING:
2931 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2933 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2936 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2939 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2943 /* check compatibility */
2945 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2946 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2948 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2949 l2cap_build_conf_rsp(chan, buf,
2950 L2CAP_CONF_SUCCESS, 0x0000), buf);
2954 case L2CAP_CONF_UNACCEPT:
2955 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2958 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2959 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2963 /* throw out any old stored conf requests */
2964 result = L2CAP_CONF_SUCCESS;
2965 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2968 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2972 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2973 L2CAP_CONF_REQ, len, req);
2974 chan->num_conf_req++;
2975 if (result != L2CAP_CONF_SUCCESS)
2981 sk->sk_err = ECONNRESET;
2982 __set_chan_timer(chan,
2983 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT));
2984 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2991 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2993 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2994 set_default_fcs(chan);
2996 l2cap_state_change(chan, BT_CONNECTED);
2997 chan->next_tx_seq = 0;
2998 chan->expected_tx_seq = 0;
2999 skb_queue_head_init(&chan->tx_q);
3000 if (chan->mode == L2CAP_MODE_ERTM)
3001 l2cap_ertm_init(chan);
3003 l2cap_chan_ready(sk);
3011 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3013 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3014 struct l2cap_disconn_rsp rsp;
3016 struct l2cap_chan *chan;
3019 scid = __le16_to_cpu(req->scid);
3020 dcid = __le16_to_cpu(req->dcid);
3022 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3024 chan = l2cap_get_chan_by_scid(conn, dcid);
3030 rsp.dcid = cpu_to_le16(chan->scid);
3031 rsp.scid = cpu_to_le16(chan->dcid);
3032 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3034 sk->sk_shutdown = SHUTDOWN_MASK;
3036 l2cap_chan_del(chan, ECONNRESET);
3039 chan->ops->close(chan->data);
3043 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3045 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3047 struct l2cap_chan *chan;
3050 scid = __le16_to_cpu(rsp->scid);
3051 dcid = __le16_to_cpu(rsp->dcid);
3053 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3055 chan = l2cap_get_chan_by_scid(conn, scid);
3061 l2cap_chan_del(chan, 0);
3064 chan->ops->close(chan->data);
3068 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3070 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3073 type = __le16_to_cpu(req->type);
3075 BT_DBG("type 0x%4.4x", type);
3077 if (type == L2CAP_IT_FEAT_MASK) {
3079 u32 feat_mask = l2cap_feat_mask;
3080 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3081 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3082 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3084 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3087 feat_mask |= L2CAP_FEAT_EXT_FLOW
3088 | L2CAP_FEAT_EXT_WINDOW;
3090 put_unaligned_le32(feat_mask, rsp->data);
3091 l2cap_send_cmd(conn, cmd->ident,
3092 L2CAP_INFO_RSP, sizeof(buf), buf);
3093 } else if (type == L2CAP_IT_FIXED_CHAN) {
3095 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3098 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3100 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3102 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3103 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3104 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3105 l2cap_send_cmd(conn, cmd->ident,
3106 L2CAP_INFO_RSP, sizeof(buf), buf);
3108 struct l2cap_info_rsp rsp;
3109 rsp.type = cpu_to_le16(type);
3110 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3111 l2cap_send_cmd(conn, cmd->ident,
3112 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3118 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3120 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3123 type = __le16_to_cpu(rsp->type);
3124 result = __le16_to_cpu(rsp->result);
3126 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3128 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3129 if (cmd->ident != conn->info_ident ||
3130 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3133 __cancel_delayed_work(&conn->info_timer);
3135 if (result != L2CAP_IR_SUCCESS) {
3136 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3137 conn->info_ident = 0;
3139 l2cap_conn_start(conn);
3144 if (type == L2CAP_IT_FEAT_MASK) {
3145 conn->feat_mask = get_unaligned_le32(rsp->data);
3147 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3148 struct l2cap_info_req req;
3149 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3151 conn->info_ident = l2cap_get_ident(conn);
3153 l2cap_send_cmd(conn, conn->info_ident,
3154 L2CAP_INFO_REQ, sizeof(req), &req);
3156 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3157 conn->info_ident = 0;
3159 l2cap_conn_start(conn);
3161 } else if (type == L2CAP_IT_FIXED_CHAN) {
3162 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3163 conn->info_ident = 0;
3165 l2cap_conn_start(conn);
3171 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3172 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3175 struct l2cap_create_chan_req *req = data;
3176 struct l2cap_create_chan_rsp rsp;
3179 if (cmd_len != sizeof(*req))
3185 psm = le16_to_cpu(req->psm);
3186 scid = le16_to_cpu(req->scid);
3188 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3190 /* Placeholder: Always reject */
3192 rsp.scid = cpu_to_le16(scid);
3193 rsp.result = L2CAP_CR_NO_MEM;
3194 rsp.status = L2CAP_CS_NO_INFO;
3196 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3202 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3203 struct l2cap_cmd_hdr *cmd, void *data)
3205 BT_DBG("conn %p", conn);
3207 return l2cap_connect_rsp(conn, cmd, data);
3210 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3211 u16 icid, u16 result)
3213 struct l2cap_move_chan_rsp rsp;
3215 BT_DBG("icid %d, result %d", icid, result);
3217 rsp.icid = cpu_to_le16(icid);
3218 rsp.result = cpu_to_le16(result);
3220 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3223 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3224 struct l2cap_chan *chan, u16 icid, u16 result)
3226 struct l2cap_move_chan_cfm cfm;
3229 BT_DBG("icid %d, result %d", icid, result);
3231 ident = l2cap_get_ident(conn);
3233 chan->ident = ident;
3235 cfm.icid = cpu_to_le16(icid);
3236 cfm.result = cpu_to_le16(result);
3238 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3241 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3244 struct l2cap_move_chan_cfm_rsp rsp;
3246 BT_DBG("icid %d", icid);
3248 rsp.icid = cpu_to_le16(icid);
3249 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3252 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3253 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3255 struct l2cap_move_chan_req *req = data;
3257 u16 result = L2CAP_MR_NOT_ALLOWED;
3259 if (cmd_len != sizeof(*req))
3262 icid = le16_to_cpu(req->icid);
3264 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3269 /* Placeholder: Always refuse */
3270 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3275 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3276 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3278 struct l2cap_move_chan_rsp *rsp = data;
3281 if (cmd_len != sizeof(*rsp))
3284 icid = le16_to_cpu(rsp->icid);
3285 result = le16_to_cpu(rsp->result);
3287 BT_DBG("icid %d, result %d", icid, result);
3289 /* Placeholder: Always unconfirmed */
3290 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3295 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3296 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3298 struct l2cap_move_chan_cfm *cfm = data;
3301 if (cmd_len != sizeof(*cfm))
3304 icid = le16_to_cpu(cfm->icid);
3305 result = le16_to_cpu(cfm->result);
3307 BT_DBG("icid %d, result %d", icid, result);
3309 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3314 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3315 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3317 struct l2cap_move_chan_cfm_rsp *rsp = data;
3320 if (cmd_len != sizeof(*rsp))
3323 icid = le16_to_cpu(rsp->icid);
3325 BT_DBG("icid %d", icid);
3330 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3335 if (min > max || min < 6 || max > 3200)
3338 if (to_multiplier < 10 || to_multiplier > 3200)
3341 if (max >= to_multiplier * 8)
3344 max_latency = (to_multiplier * 8 / max) - 1;
3345 if (latency > 499 || latency > max_latency)
3351 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3352 struct l2cap_cmd_hdr *cmd, u8 *data)
3354 struct hci_conn *hcon = conn->hcon;
3355 struct l2cap_conn_param_update_req *req;
3356 struct l2cap_conn_param_update_rsp rsp;
3357 u16 min, max, latency, to_multiplier, cmd_len;
3360 if (!(hcon->link_mode & HCI_LM_MASTER))
3363 cmd_len = __le16_to_cpu(cmd->len);
3364 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3367 req = (struct l2cap_conn_param_update_req *) data;
3368 min = __le16_to_cpu(req->min);
3369 max = __le16_to_cpu(req->max);
3370 latency = __le16_to_cpu(req->latency);
3371 to_multiplier = __le16_to_cpu(req->to_multiplier);
3373 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3374 min, max, latency, to_multiplier);
3376 memset(&rsp, 0, sizeof(rsp));
3378 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3380 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3382 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3384 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3388 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3393 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3394 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3398 switch (cmd->code) {
3399 case L2CAP_COMMAND_REJ:
3400 l2cap_command_rej(conn, cmd, data);
3403 case L2CAP_CONN_REQ:
3404 err = l2cap_connect_req(conn, cmd, data);
3407 case L2CAP_CONN_RSP:
3408 err = l2cap_connect_rsp(conn, cmd, data);
3411 case L2CAP_CONF_REQ:
3412 err = l2cap_config_req(conn, cmd, cmd_len, data);
3415 case L2CAP_CONF_RSP:
3416 err = l2cap_config_rsp(conn, cmd, data);
3419 case L2CAP_DISCONN_REQ:
3420 err = l2cap_disconnect_req(conn, cmd, data);
3423 case L2CAP_DISCONN_RSP:
3424 err = l2cap_disconnect_rsp(conn, cmd, data);
3427 case L2CAP_ECHO_REQ:
3428 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3431 case L2CAP_ECHO_RSP:
3434 case L2CAP_INFO_REQ:
3435 err = l2cap_information_req(conn, cmd, data);
3438 case L2CAP_INFO_RSP:
3439 err = l2cap_information_rsp(conn, cmd, data);
3442 case L2CAP_CREATE_CHAN_REQ:
3443 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3446 case L2CAP_CREATE_CHAN_RSP:
3447 err = l2cap_create_channel_rsp(conn, cmd, data);
3450 case L2CAP_MOVE_CHAN_REQ:
3451 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3454 case L2CAP_MOVE_CHAN_RSP:
3455 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3458 case L2CAP_MOVE_CHAN_CFM:
3459 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3462 case L2CAP_MOVE_CHAN_CFM_RSP:
3463 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3467 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3475 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3476 struct l2cap_cmd_hdr *cmd, u8 *data)
3478 switch (cmd->code) {
3479 case L2CAP_COMMAND_REJ:
3482 case L2CAP_CONN_PARAM_UPDATE_REQ:
3483 return l2cap_conn_param_update_req(conn, cmd, data);
3485 case L2CAP_CONN_PARAM_UPDATE_RSP:
3489 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3494 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3495 struct sk_buff *skb)
3497 u8 *data = skb->data;
3499 struct l2cap_cmd_hdr cmd;
3502 l2cap_raw_recv(conn, skb);
3504 while (len >= L2CAP_CMD_HDR_SIZE) {
3506 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3507 data += L2CAP_CMD_HDR_SIZE;
3508 len -= L2CAP_CMD_HDR_SIZE;
3510 cmd_len = le16_to_cpu(cmd.len);
3512 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3514 if (cmd_len > len || !cmd.ident) {
3515 BT_DBG("corrupted command");
3519 if (conn->hcon->type == LE_LINK)
3520 err = l2cap_le_sig_cmd(conn, &cmd, data);
3522 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3525 struct l2cap_cmd_rej_unk rej;
3527 BT_ERR("Wrong link type (%d)", err);
3529 /* FIXME: Map err to a valid reason */
3530 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3531 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3541 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3543 u16 our_fcs, rcv_fcs;
3546 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3547 hdr_size = L2CAP_EXT_HDR_SIZE;
3549 hdr_size = L2CAP_ENH_HDR_SIZE;
3551 if (chan->fcs == L2CAP_FCS_CRC16) {
3552 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3553 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3554 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3556 if (our_fcs != rcv_fcs)
3562 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3566 chan->frames_sent = 0;
3568 control |= __set_reqseq(chan, chan->buffer_seq);
3570 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3571 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3572 l2cap_send_sframe(chan, control);
3573 set_bit(CONN_RNR_SENT, &chan->conn_state);
3576 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3577 l2cap_retransmit_frames(chan);
3579 l2cap_ertm_send(chan);
3581 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3582 chan->frames_sent == 0) {
3583 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3584 l2cap_send_sframe(chan, control);
3588 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3590 struct sk_buff *next_skb;
3591 int tx_seq_offset, next_tx_seq_offset;
3593 bt_cb(skb)->tx_seq = tx_seq;
3594 bt_cb(skb)->sar = sar;
3596 next_skb = skb_peek(&chan->srej_q);
3598 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3601 if (bt_cb(next_skb)->tx_seq == tx_seq)
3604 next_tx_seq_offset = __seq_offset(chan,
3605 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3607 if (next_tx_seq_offset > tx_seq_offset) {
3608 __skb_queue_before(&chan->srej_q, next_skb, skb);
3612 if (skb_queue_is_last(&chan->srej_q, next_skb))
3615 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3618 __skb_queue_tail(&chan->srej_q, skb);
3623 static void append_skb_frag(struct sk_buff *skb,
3624 struct sk_buff *new_frag, struct sk_buff **last_frag)
3626 /* skb->len reflects data in skb as well as all fragments
3627 * skb->data_len reflects only data in fragments
3629 if (!skb_has_frag_list(skb))
3630 skb_shinfo(skb)->frag_list = new_frag;
3632 new_frag->next = NULL;
3634 (*last_frag)->next = new_frag;
3635 *last_frag = new_frag;
3637 skb->len += new_frag->len;
3638 skb->data_len += new_frag->len;
3639 skb->truesize += new_frag->truesize;
3642 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3646 switch (__get_ctrl_sar(chan, control)) {
3647 case L2CAP_SAR_UNSEGMENTED:
3651 err = chan->ops->recv(chan->data, skb);
3654 case L2CAP_SAR_START:
3658 chan->sdu_len = get_unaligned_le16(skb->data);
3659 skb_pull(skb, L2CAP_SDULEN_SIZE);
3661 if (chan->sdu_len > chan->imtu) {
3666 if (skb->len >= chan->sdu_len)
3670 chan->sdu_last_frag = skb;
3676 case L2CAP_SAR_CONTINUE:
3680 append_skb_frag(chan->sdu, skb,
3681 &chan->sdu_last_frag);
3684 if (chan->sdu->len >= chan->sdu_len)
3694 append_skb_frag(chan->sdu, skb,
3695 &chan->sdu_last_frag);
3698 if (chan->sdu->len != chan->sdu_len)
3701 err = chan->ops->recv(chan->data, chan->sdu);
3704 /* Reassembly complete */
3706 chan->sdu_last_frag = NULL;
3714 kfree_skb(chan->sdu);
3716 chan->sdu_last_frag = NULL;
3723 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3725 BT_DBG("chan %p, Enter local busy", chan);
3727 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3729 __set_ack_timer(chan);
3732 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3736 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3739 control = __set_reqseq(chan, chan->buffer_seq);
3740 control |= __set_ctrl_poll(chan);
3741 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3742 l2cap_send_sframe(chan, control);
3743 chan->retry_count = 1;
3745 __clear_retrans_timer(chan);
3746 __set_monitor_timer(chan);
3748 set_bit(CONN_WAIT_F, &chan->conn_state);
3751 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3752 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3754 BT_DBG("chan %p, Exit local busy", chan);
3757 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3759 if (chan->mode == L2CAP_MODE_ERTM) {
3761 l2cap_ertm_enter_local_busy(chan);
3763 l2cap_ertm_exit_local_busy(chan);
3767 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3769 struct sk_buff *skb;
3772 while ((skb = skb_peek(&chan->srej_q)) &&
3773 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3776 if (bt_cb(skb)->tx_seq != tx_seq)
3779 skb = skb_dequeue(&chan->srej_q);
3780 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3781 err = l2cap_reassemble_sdu(chan, skb, control);
3784 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3788 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3789 tx_seq = __next_seq(chan, tx_seq);
3793 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3795 struct srej_list *l, *tmp;
3798 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3799 if (l->tx_seq == tx_seq) {
3804 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3805 control |= __set_reqseq(chan, l->tx_seq);
3806 l2cap_send_sframe(chan, control);
3808 list_add_tail(&l->list, &chan->srej_l);
3812 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3814 struct srej_list *new;
3817 while (tx_seq != chan->expected_tx_seq) {
3818 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3819 control |= __set_reqseq(chan, chan->expected_tx_seq);
3820 l2cap_send_sframe(chan, control);
3822 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3826 new->tx_seq = chan->expected_tx_seq;
3828 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3830 list_add_tail(&new->list, &chan->srej_l);
3833 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3838 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3840 u16 tx_seq = __get_txseq(chan, rx_control);
3841 u16 req_seq = __get_reqseq(chan, rx_control);
3842 u8 sar = __get_ctrl_sar(chan, rx_control);
3843 int tx_seq_offset, expected_tx_seq_offset;
3844 int num_to_ack = (chan->tx_win/6) + 1;
3847 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3848 tx_seq, rx_control);
3850 if (__is_ctrl_final(chan, rx_control) &&
3851 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3852 __clear_monitor_timer(chan);
3853 if (chan->unacked_frames > 0)
3854 __set_retrans_timer(chan);
3855 clear_bit(CONN_WAIT_F, &chan->conn_state);
3858 chan->expected_ack_seq = req_seq;
3859 l2cap_drop_acked_frames(chan);
3861 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3863 /* invalid tx_seq */
3864 if (tx_seq_offset >= chan->tx_win) {
3865 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3869 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3870 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3871 l2cap_send_ack(chan);
3875 if (tx_seq == chan->expected_tx_seq)
3878 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3879 struct srej_list *first;
3881 first = list_first_entry(&chan->srej_l,
3882 struct srej_list, list);
3883 if (tx_seq == first->tx_seq) {
3884 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3885 l2cap_check_srej_gap(chan, tx_seq);
3887 list_del(&first->list);
3890 if (list_empty(&chan->srej_l)) {
3891 chan->buffer_seq = chan->buffer_seq_srej;
3892 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3893 l2cap_send_ack(chan);
3894 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3897 struct srej_list *l;
3899 /* duplicated tx_seq */
3900 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3903 list_for_each_entry(l, &chan->srej_l, list) {
3904 if (l->tx_seq == tx_seq) {
3905 l2cap_resend_srejframe(chan, tx_seq);
3910 err = l2cap_send_srejframe(chan, tx_seq);
3912 l2cap_send_disconn_req(chan->conn, chan, -err);
3917 expected_tx_seq_offset = __seq_offset(chan,
3918 chan->expected_tx_seq, chan->buffer_seq);
3920 /* duplicated tx_seq */
3921 if (tx_seq_offset < expected_tx_seq_offset)
3924 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3926 BT_DBG("chan %p, Enter SREJ", chan);
3928 INIT_LIST_HEAD(&chan->srej_l);
3929 chan->buffer_seq_srej = chan->buffer_seq;
3931 __skb_queue_head_init(&chan->srej_q);
3932 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3934 /* Set P-bit only if there are some I-frames to ack. */
3935 if (__clear_ack_timer(chan))
3936 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3938 err = l2cap_send_srejframe(chan, tx_seq);
3940 l2cap_send_disconn_req(chan->conn, chan, -err);
3947 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3949 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3950 bt_cb(skb)->tx_seq = tx_seq;
3951 bt_cb(skb)->sar = sar;
3952 __skb_queue_tail(&chan->srej_q, skb);
3956 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3957 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3960 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3964 if (__is_ctrl_final(chan, rx_control)) {
3965 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3966 l2cap_retransmit_frames(chan);
3970 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3971 if (chan->num_acked == num_to_ack - 1)
3972 l2cap_send_ack(chan);
3974 __set_ack_timer(chan);
3983 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3985 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3986 __get_reqseq(chan, rx_control), rx_control);
3988 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3989 l2cap_drop_acked_frames(chan);
3991 if (__is_ctrl_poll(chan, rx_control)) {
3992 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3993 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3994 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3995 (chan->unacked_frames > 0))
3996 __set_retrans_timer(chan);
3998 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3999 l2cap_send_srejtail(chan);
4001 l2cap_send_i_or_rr_or_rnr(chan);
4004 } else if (__is_ctrl_final(chan, rx_control)) {
4005 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4007 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4008 l2cap_retransmit_frames(chan);
4011 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4012 (chan->unacked_frames > 0))
4013 __set_retrans_timer(chan);
4015 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4016 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4017 l2cap_send_ack(chan);
4019 l2cap_ertm_send(chan);
4023 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4025 u16 tx_seq = __get_reqseq(chan, rx_control);
4027 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4029 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4031 chan->expected_ack_seq = tx_seq;
4032 l2cap_drop_acked_frames(chan);
4034 if (__is_ctrl_final(chan, rx_control)) {
4035 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4036 l2cap_retransmit_frames(chan);
4038 l2cap_retransmit_frames(chan);
4040 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4041 set_bit(CONN_REJ_ACT, &chan->conn_state);
4044 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4046 u16 tx_seq = __get_reqseq(chan, rx_control);
4048 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4050 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4052 if (__is_ctrl_poll(chan, rx_control)) {
4053 chan->expected_ack_seq = tx_seq;
4054 l2cap_drop_acked_frames(chan);
4056 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4057 l2cap_retransmit_one_frame(chan, tx_seq);
4059 l2cap_ertm_send(chan);
4061 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4062 chan->srej_save_reqseq = tx_seq;
4063 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4065 } else if (__is_ctrl_final(chan, rx_control)) {
4066 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4067 chan->srej_save_reqseq == tx_seq)
4068 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4070 l2cap_retransmit_one_frame(chan, tx_seq);
4072 l2cap_retransmit_one_frame(chan, tx_seq);
4073 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4074 chan->srej_save_reqseq = tx_seq;
4075 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4080 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4082 u16 tx_seq = __get_reqseq(chan, rx_control);
4084 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4086 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4087 chan->expected_ack_seq = tx_seq;
4088 l2cap_drop_acked_frames(chan);
4090 if (__is_ctrl_poll(chan, rx_control))
4091 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4093 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4094 __clear_retrans_timer(chan);
4095 if (__is_ctrl_poll(chan, rx_control))
4096 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4100 if (__is_ctrl_poll(chan, rx_control)) {
4101 l2cap_send_srejtail(chan);
4103 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4104 l2cap_send_sframe(chan, rx_control);
4108 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4110 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4112 if (__is_ctrl_final(chan, rx_control) &&
4113 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4114 __clear_monitor_timer(chan);
4115 if (chan->unacked_frames > 0)
4116 __set_retrans_timer(chan);
4117 clear_bit(CONN_WAIT_F, &chan->conn_state);
4120 switch (__get_ctrl_super(chan, rx_control)) {
4121 case L2CAP_SUPER_RR:
4122 l2cap_data_channel_rrframe(chan, rx_control);
4125 case L2CAP_SUPER_REJ:
4126 l2cap_data_channel_rejframe(chan, rx_control);
4129 case L2CAP_SUPER_SREJ:
4130 l2cap_data_channel_srejframe(chan, rx_control);
4133 case L2CAP_SUPER_RNR:
4134 l2cap_data_channel_rnrframe(chan, rx_control);
4142 int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4146 int len, next_tx_seq_offset, req_seq_offset;
4148 control = __get_control(chan, skb->data);
4149 skb_pull(skb, __ctrl_size(chan));
4153 * We can just drop the corrupted I-frame here.
4154 * Receiver will miss it and start proper recovery
4155 * procedures and ask retransmission.
4157 if (l2cap_check_fcs(chan, skb))
4160 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4161 len -= L2CAP_SDULEN_SIZE;
4163 if (chan->fcs == L2CAP_FCS_CRC16)
4164 len -= L2CAP_FCS_SIZE;
4166 if (len > chan->mps) {
4167 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4171 req_seq = __get_reqseq(chan, control);
4173 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4175 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4176 chan->expected_ack_seq);
4178 /* check for invalid req-seq */
4179 if (req_seq_offset > next_tx_seq_offset) {
4180 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4184 if (!__is_sframe(chan, control)) {
4186 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4190 l2cap_data_channel_iframe(chan, control, skb);
4194 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4198 l2cap_data_channel_sframe(chan, control, skb);
4208 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4210 struct l2cap_chan *chan;
4211 struct sock *sk = NULL;
4216 chan = l2cap_get_chan_by_scid(conn, cid);
4218 BT_DBG("unknown cid 0x%4.4x", cid);
4224 BT_DBG("chan %p, len %d", chan, skb->len);
4226 if (chan->state != BT_CONNECTED)
4229 switch (chan->mode) {
4230 case L2CAP_MODE_BASIC:
4231 /* If socket recv buffers overflows we drop data here
4232 * which is *bad* because L2CAP has to be reliable.
4233 * But we don't have any other choice. L2CAP doesn't
4234 * provide flow control mechanism. */
4236 if (chan->imtu < skb->len)
4239 if (!chan->ops->recv(chan->data, skb))
4243 case L2CAP_MODE_ERTM:
4244 l2cap_ertm_data_rcv(chan, skb);
4248 case L2CAP_MODE_STREAMING:
4249 control = __get_control(chan, skb->data);
4250 skb_pull(skb, __ctrl_size(chan));
4253 if (l2cap_check_fcs(chan, skb))
4256 if (__is_sar_start(chan, control))
4257 len -= L2CAP_SDULEN_SIZE;
4259 if (chan->fcs == L2CAP_FCS_CRC16)
4260 len -= L2CAP_FCS_SIZE;
4262 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4265 tx_seq = __get_txseq(chan, control);
4267 if (chan->expected_tx_seq != tx_seq) {
4268 /* Frame(s) missing - must discard partial SDU */
4269 kfree_skb(chan->sdu);
4271 chan->sdu_last_frag = NULL;
4274 /* TODO: Notify userland of missing data */
4277 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4279 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4280 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4285 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4299 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4301 struct sock *sk = NULL;
4302 struct l2cap_chan *chan;
4304 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4312 BT_DBG("sk %p, len %d", sk, skb->len);
4314 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4317 if (chan->imtu < skb->len)
4320 if (!chan->ops->recv(chan->data, skb))
4332 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4334 struct sock *sk = NULL;
4335 struct l2cap_chan *chan;
4337 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4345 BT_DBG("sk %p, len %d", sk, skb->len);
4347 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4350 if (chan->imtu < skb->len)
4353 if (!chan->ops->recv(chan->data, skb))
4365 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4367 struct l2cap_hdr *lh = (void *) skb->data;
4371 skb_pull(skb, L2CAP_HDR_SIZE);
4372 cid = __le16_to_cpu(lh->cid);
4373 len = __le16_to_cpu(lh->len);
4375 if (len != skb->len) {
4380 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4383 case L2CAP_CID_LE_SIGNALING:
4384 case L2CAP_CID_SIGNALING:
4385 l2cap_sig_channel(conn, skb);
4388 case L2CAP_CID_CONN_LESS:
4389 psm = get_unaligned_le16(skb->data);
4391 l2cap_conless_channel(conn, psm, skb);
4394 case L2CAP_CID_LE_DATA:
4395 l2cap_att_channel(conn, cid, skb);
4399 if (smp_sig_channel(conn, skb))
4400 l2cap_conn_del(conn->hcon, EACCES);
4404 l2cap_data_channel(conn, cid, skb);
4409 /* ---- L2CAP interface with lower layer (HCI) ---- */
4411 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4413 int exact = 0, lm1 = 0, lm2 = 0;
4414 struct l2cap_chan *c;
4416 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4418 /* Find listening sockets and check their link_mode */
4419 read_lock(&chan_list_lock);
4420 list_for_each_entry(c, &chan_list, global_l) {
4421 struct sock *sk = c->sk;
4423 if (c->state != BT_LISTEN)
4426 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4427 lm1 |= HCI_LM_ACCEPT;
4428 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4429 lm1 |= HCI_LM_MASTER;
4431 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4432 lm2 |= HCI_LM_ACCEPT;
4433 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4434 lm2 |= HCI_LM_MASTER;
4437 read_unlock(&chan_list_lock);
4439 return exact ? lm1 : lm2;
4442 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4444 struct l2cap_conn *conn;
4446 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4449 conn = l2cap_conn_add(hcon, status);
4451 l2cap_conn_ready(conn);
4453 l2cap_conn_del(hcon, bt_to_errno(status));
4458 int l2cap_disconn_ind(struct hci_conn *hcon)
4460 struct l2cap_conn *conn = hcon->l2cap_data;
4462 BT_DBG("hcon %p", hcon);
4465 return HCI_ERROR_REMOTE_USER_TERM;
4466 return conn->disc_reason;
4469 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4471 BT_DBG("hcon %p reason %d", hcon, reason);
4473 l2cap_conn_del(hcon, bt_to_errno(reason));
4477 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4479 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4482 if (encrypt == 0x00) {
4483 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4484 __clear_chan_timer(chan);
4485 __set_chan_timer(chan,
4486 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4487 } else if (chan->sec_level == BT_SECURITY_HIGH)
4488 l2cap_chan_close(chan, ECONNREFUSED);
4490 if (chan->sec_level == BT_SECURITY_MEDIUM)
4491 __clear_chan_timer(chan);
4495 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4497 struct l2cap_conn *conn = hcon->l2cap_data;
4498 struct l2cap_chan *chan;
4503 BT_DBG("conn %p", conn);
4505 if (hcon->type == LE_LINK) {
4506 smp_distribute_keys(conn, 0);
4507 __cancel_delayed_work(&conn->security_timer);
4512 list_for_each_entry_rcu(chan, &conn->chan_l, list) {
4513 struct sock *sk = chan->sk;
4517 BT_DBG("chan->scid %d", chan->scid);
4519 if (chan->scid == L2CAP_CID_LE_DATA) {
4520 if (!status && encrypt) {
4521 chan->sec_level = hcon->sec_level;
4522 l2cap_chan_ready(sk);
4529 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4534 if (!status && (chan->state == BT_CONNECTED ||
4535 chan->state == BT_CONFIG)) {
4536 l2cap_check_encryption(chan, encrypt);
4541 if (chan->state == BT_CONNECT) {
4543 struct l2cap_conn_req req;
4544 req.scid = cpu_to_le16(chan->scid);
4545 req.psm = chan->psm;
4547 chan->ident = l2cap_get_ident(conn);
4548 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4550 l2cap_send_cmd(conn, chan->ident,
4551 L2CAP_CONN_REQ, sizeof(req), &req);
4553 __clear_chan_timer(chan);
4554 __set_chan_timer(chan,
4555 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4557 } else if (chan->state == BT_CONNECT2) {
4558 struct l2cap_conn_rsp rsp;
4562 if (bt_sk(sk)->defer_setup) {
4563 struct sock *parent = bt_sk(sk)->parent;
4564 res = L2CAP_CR_PEND;
4565 stat = L2CAP_CS_AUTHOR_PEND;
4567 parent->sk_data_ready(parent, 0);
4569 l2cap_state_change(chan, BT_CONFIG);
4570 res = L2CAP_CR_SUCCESS;
4571 stat = L2CAP_CS_NO_INFO;
4574 l2cap_state_change(chan, BT_DISCONN);
4575 __set_chan_timer(chan,
4576 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4577 res = L2CAP_CR_SEC_BLOCK;
4578 stat = L2CAP_CS_NO_INFO;
4581 rsp.scid = cpu_to_le16(chan->dcid);
4582 rsp.dcid = cpu_to_le16(chan->scid);
4583 rsp.result = cpu_to_le16(res);
4584 rsp.status = cpu_to_le16(stat);
4585 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4597 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4599 struct l2cap_conn *conn = hcon->l2cap_data;
4602 conn = l2cap_conn_add(hcon, 0);
4607 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4609 if (!(flags & ACL_CONT)) {
4610 struct l2cap_hdr *hdr;
4611 struct l2cap_chan *chan;
4616 BT_ERR("Unexpected start frame (len %d)", skb->len);
4617 kfree_skb(conn->rx_skb);
4618 conn->rx_skb = NULL;
4620 l2cap_conn_unreliable(conn, ECOMM);
4623 /* Start fragment always begin with Basic L2CAP header */
4624 if (skb->len < L2CAP_HDR_SIZE) {
4625 BT_ERR("Frame is too short (len %d)", skb->len);
4626 l2cap_conn_unreliable(conn, ECOMM);
4630 hdr = (struct l2cap_hdr *) skb->data;
4631 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4632 cid = __le16_to_cpu(hdr->cid);
4634 if (len == skb->len) {
4635 /* Complete frame received */
4636 l2cap_recv_frame(conn, skb);
4640 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4642 if (skb->len > len) {
4643 BT_ERR("Frame is too long (len %d, expected len %d)",
4645 l2cap_conn_unreliable(conn, ECOMM);
4649 chan = l2cap_get_chan_by_scid(conn, cid);
4651 if (chan && chan->sk) {
4652 struct sock *sk = chan->sk;
4654 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4655 BT_ERR("Frame exceeding recv MTU (len %d, "
4659 l2cap_conn_unreliable(conn, ECOMM);
4665 /* Allocate skb for the complete frame (with header) */
4666 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4670 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4672 conn->rx_len = len - skb->len;
4674 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4676 if (!conn->rx_len) {
4677 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4678 l2cap_conn_unreliable(conn, ECOMM);
4682 if (skb->len > conn->rx_len) {
4683 BT_ERR("Fragment is too long (len %d, expected %d)",
4684 skb->len, conn->rx_len);
4685 kfree_skb(conn->rx_skb);
4686 conn->rx_skb = NULL;
4688 l2cap_conn_unreliable(conn, ECOMM);
4692 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4694 conn->rx_len -= skb->len;
4696 if (!conn->rx_len) {
4697 /* Complete frame received */
4698 l2cap_recv_frame(conn, conn->rx_skb);
4699 conn->rx_skb = NULL;
4708 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4710 struct l2cap_chan *c;
4712 read_lock(&chan_list_lock);
4714 list_for_each_entry(c, &chan_list, global_l) {
4715 struct sock *sk = c->sk;
4717 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4718 batostr(&bt_sk(sk)->src),
4719 batostr(&bt_sk(sk)->dst),
4720 c->state, __le16_to_cpu(c->psm),
4721 c->scid, c->dcid, c->imtu, c->omtu,
4722 c->sec_level, c->mode);
4725 read_unlock(&chan_list_lock);
4730 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4732 return single_open(file, l2cap_debugfs_show, inode->i_private);
4735 static const struct file_operations l2cap_debugfs_fops = {
4736 .open = l2cap_debugfs_open,
4738 .llseek = seq_lseek,
4739 .release = single_release,
4742 static struct dentry *l2cap_debugfs;
4744 int __init l2cap_init(void)
4748 err = l2cap_init_sockets();
4753 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4754 bt_debugfs, NULL, &l2cap_debugfs_fops);
4756 BT_ERR("Failed to create L2CAP debug file");
4762 void l2cap_exit(void)
4764 debugfs_remove(l2cap_debugfs);
4765 l2cap_cleanup_sockets();
4768 module_param(disable_ertm, bool, 0644);
4769 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");