2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 list_for_each_entry(c, &conn->chan_l, list) {
89 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked socket */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 struct l2cap_chan *c;
106 mutex_lock(&conn->chan_lock);
107 c = __l2cap_get_chan_by_scid(conn, cid);
108 mutex_unlock(&conn->chan_lock);
113 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
115 struct l2cap_chan *c;
117 list_for_each_entry(c, &conn->chan_l, list) {
118 if (c->ident == ident)
124 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
126 struct l2cap_chan *c;
128 mutex_lock(&conn->chan_lock);
129 c = __l2cap_get_chan_by_ident(conn, ident);
130 mutex_unlock(&conn->chan_lock);
135 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
137 struct l2cap_chan *c;
139 list_for_each_entry(c, &chan_list, global_l) {
140 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
146 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
150 write_lock(&chan_list_lock);
152 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
165 for (p = 0x1001; p < 0x1100; p += 2)
166 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
167 chan->psm = cpu_to_le16(p);
168 chan->sport = cpu_to_le16(p);
175 write_unlock(&chan_list_lock);
179 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
181 write_lock(&chan_list_lock);
185 write_unlock(&chan_list_lock);
190 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
192 u16 cid = L2CAP_CID_DYN_START;
194 for (; cid < L2CAP_CID_DYN_END; cid++) {
195 if (!__l2cap_get_chan_by_scid(conn, cid))
202 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
204 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
205 state_to_string(state));
208 chan->ops->state_change(chan->data, state);
211 static void l2cap_state_change(struct l2cap_chan *chan, int state)
213 struct sock *sk = chan->sk;
216 __l2cap_state_change(chan, state);
220 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
222 struct sock *sk = chan->sk;
227 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
229 struct sock *sk = chan->sk;
232 __l2cap_chan_set_err(chan, err);
236 static void l2cap_chan_timeout(struct work_struct *work)
238 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
240 struct l2cap_conn *conn = chan->conn;
243 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
245 mutex_lock(&conn->chan_lock);
246 l2cap_chan_lock(chan);
248 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
249 reason = ECONNREFUSED;
250 else if (chan->state == BT_CONNECT &&
251 chan->sec_level != BT_SECURITY_SDP)
252 reason = ECONNREFUSED;
256 l2cap_chan_close(chan, reason);
258 l2cap_chan_unlock(chan);
260 chan->ops->close(chan->data);
261 mutex_unlock(&conn->chan_lock);
263 l2cap_chan_put(chan);
266 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
268 struct l2cap_chan *chan;
270 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
274 mutex_init(&chan->lock);
278 write_lock(&chan_list_lock);
279 list_add(&chan->global_l, &chan_list);
280 write_unlock(&chan_list_lock);
282 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
284 chan->state = BT_OPEN;
286 atomic_set(&chan->refcnt, 1);
288 BT_DBG("sk %p chan %p", sk, chan);
293 void l2cap_chan_destroy(struct l2cap_chan *chan)
295 write_lock(&chan_list_lock);
296 list_del(&chan->global_l);
297 write_unlock(&chan_list_lock);
299 l2cap_chan_put(chan);
302 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
304 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
305 chan->psm, chan->dcid);
307 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
311 switch (chan->chan_type) {
312 case L2CAP_CHAN_CONN_ORIENTED:
313 if (conn->hcon->type == LE_LINK) {
315 chan->omtu = L2CAP_LE_DEFAULT_MTU;
316 chan->scid = L2CAP_CID_LE_DATA;
317 chan->dcid = L2CAP_CID_LE_DATA;
319 /* Alloc CID for connection-oriented socket */
320 chan->scid = l2cap_alloc_cid(conn);
321 chan->omtu = L2CAP_DEFAULT_MTU;
325 case L2CAP_CHAN_CONN_LESS:
326 /* Connectionless socket */
327 chan->scid = L2CAP_CID_CONN_LESS;
328 chan->dcid = L2CAP_CID_CONN_LESS;
329 chan->omtu = L2CAP_DEFAULT_MTU;
333 /* Raw socket can send/recv signalling messages only */
334 chan->scid = L2CAP_CID_SIGNALING;
335 chan->dcid = L2CAP_CID_SIGNALING;
336 chan->omtu = L2CAP_DEFAULT_MTU;
339 chan->local_id = L2CAP_BESTEFFORT_ID;
340 chan->local_stype = L2CAP_SERV_BESTEFFORT;
341 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
342 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
343 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
344 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
346 l2cap_chan_hold(chan);
348 list_add(&chan->list, &conn->chan_l);
351 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
353 mutex_lock(&conn->chan_lock);
354 __l2cap_chan_add(conn, chan);
355 mutex_unlock(&conn->chan_lock);
358 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
360 struct sock *sk = chan->sk;
361 struct l2cap_conn *conn = chan->conn;
362 struct sock *parent = bt_sk(sk)->parent;
364 __clear_chan_timer(chan);
366 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
369 /* Delete from channel list */
370 list_del(&chan->list);
372 l2cap_chan_put(chan);
375 hci_conn_put(conn->hcon);
380 __l2cap_state_change(chan, BT_CLOSED);
381 sock_set_flag(sk, SOCK_ZAPPED);
384 __l2cap_chan_set_err(chan, err);
387 bt_accept_unlink(sk);
388 parent->sk_data_ready(parent, 0);
390 sk->sk_state_change(sk);
394 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
395 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
398 skb_queue_purge(&chan->tx_q);
400 if (chan->mode == L2CAP_MODE_ERTM) {
401 struct srej_list *l, *tmp;
403 __clear_retrans_timer(chan);
404 __clear_monitor_timer(chan);
405 __clear_ack_timer(chan);
407 skb_queue_purge(&chan->srej_q);
409 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
416 static void l2cap_chan_cleanup_listen(struct sock *parent)
420 BT_DBG("parent %p", parent);
422 /* Close not yet accepted channels */
423 while ((sk = bt_accept_dequeue(parent, NULL))) {
424 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
426 l2cap_chan_lock(chan);
427 __clear_chan_timer(chan);
428 l2cap_chan_close(chan, ECONNRESET);
429 l2cap_chan_unlock(chan);
431 chan->ops->close(chan->data);
435 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
437 struct l2cap_conn *conn = chan->conn;
438 struct sock *sk = chan->sk;
440 BT_DBG("chan %p state %s sk %p", chan,
441 state_to_string(chan->state), sk);
443 switch (chan->state) {
446 l2cap_chan_cleanup_listen(sk);
448 __l2cap_state_change(chan, BT_CLOSED);
449 sock_set_flag(sk, SOCK_ZAPPED);
455 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
456 conn->hcon->type == ACL_LINK) {
457 __clear_chan_timer(chan);
458 __set_chan_timer(chan, sk->sk_sndtimeo);
459 l2cap_send_disconn_req(conn, chan, reason);
461 l2cap_chan_del(chan, reason);
465 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
466 conn->hcon->type == ACL_LINK) {
467 struct l2cap_conn_rsp rsp;
470 if (bt_sk(sk)->defer_setup)
471 result = L2CAP_CR_SEC_BLOCK;
473 result = L2CAP_CR_BAD_PSM;
474 l2cap_state_change(chan, BT_DISCONN);
476 rsp.scid = cpu_to_le16(chan->dcid);
477 rsp.dcid = cpu_to_le16(chan->scid);
478 rsp.result = cpu_to_le16(result);
479 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
480 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
484 l2cap_chan_del(chan, reason);
489 l2cap_chan_del(chan, reason);
494 sock_set_flag(sk, SOCK_ZAPPED);
500 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
502 if (chan->chan_type == L2CAP_CHAN_RAW) {
503 switch (chan->sec_level) {
504 case BT_SECURITY_HIGH:
505 return HCI_AT_DEDICATED_BONDING_MITM;
506 case BT_SECURITY_MEDIUM:
507 return HCI_AT_DEDICATED_BONDING;
509 return HCI_AT_NO_BONDING;
511 } else if (chan->psm == cpu_to_le16(0x0001)) {
512 if (chan->sec_level == BT_SECURITY_LOW)
513 chan->sec_level = BT_SECURITY_SDP;
515 if (chan->sec_level == BT_SECURITY_HIGH)
516 return HCI_AT_NO_BONDING_MITM;
518 return HCI_AT_NO_BONDING;
520 switch (chan->sec_level) {
521 case BT_SECURITY_HIGH:
522 return HCI_AT_GENERAL_BONDING_MITM;
523 case BT_SECURITY_MEDIUM:
524 return HCI_AT_GENERAL_BONDING;
526 return HCI_AT_NO_BONDING;
531 /* Service level security */
532 int l2cap_chan_check_security(struct l2cap_chan *chan)
534 struct l2cap_conn *conn = chan->conn;
537 auth_type = l2cap_get_auth_type(chan);
539 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
542 static u8 l2cap_get_ident(struct l2cap_conn *conn)
546 /* Get next available identificator.
547 * 1 - 128 are used by kernel.
548 * 129 - 199 are reserved.
549 * 200 - 254 are used by utilities like l2ping, etc.
552 spin_lock(&conn->lock);
554 if (++conn->tx_ident > 128)
559 spin_unlock(&conn->lock);
564 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
566 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
569 BT_DBG("code 0x%2.2x", code);
574 if (lmp_no_flush_capable(conn->hcon->hdev))
575 flags = ACL_START_NO_FLUSH;
579 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
580 skb->priority = HCI_PRIO_MAX;
582 hci_send_acl(conn->hchan, skb, flags);
585 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
587 struct hci_conn *hcon = chan->conn->hcon;
590 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
593 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
594 lmp_no_flush_capable(hcon->hdev))
595 flags = ACL_START_NO_FLUSH;
599 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
600 hci_send_acl(chan->conn->hchan, skb, flags);
603 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
606 struct l2cap_hdr *lh;
607 struct l2cap_conn *conn = chan->conn;
610 if (chan->state != BT_CONNECTED)
613 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
614 hlen = L2CAP_EXT_HDR_SIZE;
616 hlen = L2CAP_ENH_HDR_SIZE;
618 if (chan->fcs == L2CAP_FCS_CRC16)
619 hlen += L2CAP_FCS_SIZE;
621 BT_DBG("chan %p, control 0x%8.8x", chan, control);
623 count = min_t(unsigned int, conn->mtu, hlen);
625 control |= __set_sframe(chan);
627 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
628 control |= __set_ctrl_final(chan);
630 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
631 control |= __set_ctrl_poll(chan);
633 skb = bt_skb_alloc(count, GFP_ATOMIC);
637 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
638 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
639 lh->cid = cpu_to_le16(chan->dcid);
641 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
643 if (chan->fcs == L2CAP_FCS_CRC16) {
644 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
645 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
648 skb->priority = HCI_PRIO_MAX;
649 l2cap_do_send(chan, skb);
652 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
654 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
655 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
656 set_bit(CONN_RNR_SENT, &chan->conn_state);
658 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
660 control |= __set_reqseq(chan, chan->buffer_seq);
662 l2cap_send_sframe(chan, control);
665 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
667 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
670 static void l2cap_send_conn_req(struct l2cap_chan *chan)
672 struct l2cap_conn *conn = chan->conn;
673 struct l2cap_conn_req req;
675 req.scid = cpu_to_le16(chan->scid);
678 chan->ident = l2cap_get_ident(conn);
680 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
682 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
685 static void l2cap_do_start(struct l2cap_chan *chan)
687 struct l2cap_conn *conn = chan->conn;
689 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
690 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
693 if (l2cap_chan_check_security(chan) &&
694 __l2cap_no_conn_pending(chan))
695 l2cap_send_conn_req(chan);
697 struct l2cap_info_req req;
698 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
700 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
701 conn->info_ident = l2cap_get_ident(conn);
703 schedule_delayed_work(&conn->info_timer,
704 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
706 l2cap_send_cmd(conn, conn->info_ident,
707 L2CAP_INFO_REQ, sizeof(req), &req);
711 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
713 u32 local_feat_mask = l2cap_feat_mask;
715 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
718 case L2CAP_MODE_ERTM:
719 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
720 case L2CAP_MODE_STREAMING:
721 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
727 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
729 struct sock *sk = chan->sk;
730 struct l2cap_disconn_req req;
735 if (chan->mode == L2CAP_MODE_ERTM) {
736 __clear_retrans_timer(chan);
737 __clear_monitor_timer(chan);
738 __clear_ack_timer(chan);
741 req.dcid = cpu_to_le16(chan->dcid);
742 req.scid = cpu_to_le16(chan->scid);
743 l2cap_send_cmd(conn, l2cap_get_ident(conn),
744 L2CAP_DISCONN_REQ, sizeof(req), &req);
747 __l2cap_state_change(chan, BT_DISCONN);
748 __l2cap_chan_set_err(chan, err);
752 /* ---- L2CAP connections ---- */
753 static void l2cap_conn_start(struct l2cap_conn *conn)
755 struct l2cap_chan *chan, *tmp;
757 BT_DBG("conn %p", conn);
759 mutex_lock(&conn->chan_lock);
761 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
762 struct sock *sk = chan->sk;
764 l2cap_chan_lock(chan);
766 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
767 l2cap_chan_unlock(chan);
771 if (chan->state == BT_CONNECT) {
772 if (!l2cap_chan_check_security(chan) ||
773 !__l2cap_no_conn_pending(chan)) {
774 l2cap_chan_unlock(chan);
778 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
779 && test_bit(CONF_STATE2_DEVICE,
780 &chan->conf_state)) {
781 l2cap_chan_close(chan, ECONNRESET);
782 l2cap_chan_unlock(chan);
786 l2cap_send_conn_req(chan);
788 } else if (chan->state == BT_CONNECT2) {
789 struct l2cap_conn_rsp rsp;
791 rsp.scid = cpu_to_le16(chan->dcid);
792 rsp.dcid = cpu_to_le16(chan->scid);
794 if (l2cap_chan_check_security(chan)) {
796 if (bt_sk(sk)->defer_setup) {
797 struct sock *parent = bt_sk(sk)->parent;
798 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
799 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
801 parent->sk_data_ready(parent, 0);
804 __l2cap_state_change(chan, BT_CONFIG);
805 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
806 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
810 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
811 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
814 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
817 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
818 rsp.result != L2CAP_CR_SUCCESS) {
819 l2cap_chan_unlock(chan);
823 set_bit(CONF_REQ_SENT, &chan->conf_state);
824 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
825 l2cap_build_conf_req(chan, buf), buf);
826 chan->num_conf_req++;
829 l2cap_chan_unlock(chan);
832 mutex_unlock(&conn->chan_lock);
835 /* Find socket with cid and source bdaddr.
836 * Returns closest match, locked.
838 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
840 struct l2cap_chan *c, *c1 = NULL;
842 read_lock(&chan_list_lock);
844 list_for_each_entry(c, &chan_list, global_l) {
845 struct sock *sk = c->sk;
847 if (state && c->state != state)
850 if (c->scid == cid) {
852 if (!bacmp(&bt_sk(sk)->src, src)) {
853 read_unlock(&chan_list_lock);
858 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
863 read_unlock(&chan_list_lock);
868 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
870 struct sock *parent, *sk;
871 struct l2cap_chan *chan, *pchan;
875 /* Check if we have socket listening on cid */
876 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
885 /* Check for backlog size */
886 if (sk_acceptq_is_full(parent)) {
887 BT_DBG("backlog full %d", parent->sk_ack_backlog);
891 chan = pchan->ops->new_connection(pchan->data);
897 hci_conn_hold(conn->hcon);
899 bacpy(&bt_sk(sk)->src, conn->src);
900 bacpy(&bt_sk(sk)->dst, conn->dst);
902 bt_accept_enqueue(parent, sk);
904 l2cap_chan_add(conn, chan);
906 __set_chan_timer(chan, sk->sk_sndtimeo);
908 __l2cap_state_change(chan, BT_CONNECTED);
909 parent->sk_data_ready(parent, 0);
912 release_sock(parent);
915 static void l2cap_chan_ready(struct l2cap_chan *chan)
917 struct sock *sk = chan->sk;
922 parent = bt_sk(sk)->parent;
924 BT_DBG("sk %p, parent %p", sk, parent);
926 chan->conf_state = 0;
927 __clear_chan_timer(chan);
929 __l2cap_state_change(chan, BT_CONNECTED);
930 sk->sk_state_change(sk);
933 parent->sk_data_ready(parent, 0);
938 static void l2cap_conn_ready(struct l2cap_conn *conn)
940 struct l2cap_chan *chan;
942 BT_DBG("conn %p", conn);
944 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
945 l2cap_le_conn_ready(conn);
947 if (conn->hcon->out && conn->hcon->type == LE_LINK)
948 smp_conn_security(conn, conn->hcon->pending_sec_level);
950 mutex_lock(&conn->chan_lock);
952 list_for_each_entry(chan, &conn->chan_l, list) {
954 l2cap_chan_lock(chan);
956 if (conn->hcon->type == LE_LINK) {
957 if (smp_conn_security(conn, chan->sec_level))
958 l2cap_chan_ready(chan);
960 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
961 struct sock *sk = chan->sk;
962 __clear_chan_timer(chan);
964 __l2cap_state_change(chan, BT_CONNECTED);
965 sk->sk_state_change(sk);
968 } else if (chan->state == BT_CONNECT)
969 l2cap_do_start(chan);
971 l2cap_chan_unlock(chan);
974 mutex_unlock(&conn->chan_lock);
977 /* Notify sockets that we cannot guaranty reliability anymore */
978 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
980 struct l2cap_chan *chan;
982 BT_DBG("conn %p", conn);
984 mutex_lock(&conn->chan_lock);
986 list_for_each_entry(chan, &conn->chan_l, list) {
987 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
988 __l2cap_chan_set_err(chan, err);
991 mutex_unlock(&conn->chan_lock);
994 static void l2cap_info_timeout(struct work_struct *work)
996 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
999 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1000 conn->info_ident = 0;
1002 l2cap_conn_start(conn);
1005 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1007 struct l2cap_conn *conn = hcon->l2cap_data;
1008 struct l2cap_chan *chan, *l;
1013 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1015 kfree_skb(conn->rx_skb);
1017 mutex_lock(&conn->chan_lock);
1020 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1021 l2cap_chan_lock(chan);
1023 l2cap_chan_del(chan, err);
1025 l2cap_chan_unlock(chan);
1027 chan->ops->close(chan->data);
1030 mutex_unlock(&conn->chan_lock);
1032 hci_chan_del(conn->hchan);
1034 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1035 cancel_delayed_work_sync(&conn->info_timer);
1037 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1038 cancel_delayed_work_sync(&conn->security_timer);
1039 smp_chan_destroy(conn);
1042 hcon->l2cap_data = NULL;
1046 static void security_timeout(struct work_struct *work)
1048 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1049 security_timer.work);
1051 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1054 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1056 struct l2cap_conn *conn = hcon->l2cap_data;
1057 struct hci_chan *hchan;
1062 hchan = hci_chan_create(hcon);
1066 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1068 hci_chan_del(hchan);
1072 hcon->l2cap_data = conn;
1074 conn->hchan = hchan;
1076 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1078 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1079 conn->mtu = hcon->hdev->le_mtu;
1081 conn->mtu = hcon->hdev->acl_mtu;
1083 conn->src = &hcon->hdev->bdaddr;
1084 conn->dst = &hcon->dst;
1086 conn->feat_mask = 0;
1088 spin_lock_init(&conn->lock);
1089 mutex_init(&conn->chan_lock);
1091 INIT_LIST_HEAD(&conn->chan_l);
1093 if (hcon->type == LE_LINK)
1094 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1096 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1098 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1103 /* ---- Socket interface ---- */
1105 /* Find socket with psm and source bdaddr.
1106 * Returns closest match.
1108 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1110 struct l2cap_chan *c, *c1 = NULL;
1112 read_lock(&chan_list_lock);
1114 list_for_each_entry(c, &chan_list, global_l) {
1115 struct sock *sk = c->sk;
1117 if (state && c->state != state)
1120 if (c->psm == psm) {
1122 if (!bacmp(&bt_sk(sk)->src, src)) {
1123 read_unlock(&chan_list_lock);
1128 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1133 read_unlock(&chan_list_lock);
1138 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1140 struct sock *sk = chan->sk;
1141 bdaddr_t *src = &bt_sk(sk)->src;
1142 struct l2cap_conn *conn;
1143 struct hci_conn *hcon;
1144 struct hci_dev *hdev;
1148 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1151 hdev = hci_get_route(dst, src);
1153 return -EHOSTUNREACH;
1157 l2cap_chan_lock(chan);
1159 /* PSM must be odd and lsb of upper byte must be 0 */
1160 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1161 chan->chan_type != L2CAP_CHAN_RAW) {
1166 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1171 switch (chan->mode) {
1172 case L2CAP_MODE_BASIC:
1174 case L2CAP_MODE_ERTM:
1175 case L2CAP_MODE_STREAMING:
1186 switch (sk->sk_state) {
1190 /* Already connecting */
1196 /* Already connected */
1212 /* Set destination address and psm */
1213 bacpy(&bt_sk(sk)->dst, dst);
1220 auth_type = l2cap_get_auth_type(chan);
1222 if (chan->dcid == L2CAP_CID_LE_DATA)
1223 hcon = hci_connect(hdev, LE_LINK, dst,
1224 chan->sec_level, auth_type);
1226 hcon = hci_connect(hdev, ACL_LINK, dst,
1227 chan->sec_level, auth_type);
1230 err = PTR_ERR(hcon);
1234 conn = l2cap_conn_add(hcon, 0);
1241 /* Update source addr of the socket */
1242 bacpy(src, conn->src);
1244 l2cap_chan_unlock(chan);
1245 l2cap_chan_add(conn, chan);
1246 l2cap_chan_lock(chan);
1248 l2cap_state_change(chan, BT_CONNECT);
1249 __set_chan_timer(chan, sk->sk_sndtimeo);
1251 if (hcon->state == BT_CONNECTED) {
1252 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1253 __clear_chan_timer(chan);
1254 if (l2cap_chan_check_security(chan))
1255 l2cap_state_change(chan, BT_CONNECTED);
1257 l2cap_do_start(chan);
1263 l2cap_chan_unlock(chan);
1264 hci_dev_unlock(hdev);
1269 int __l2cap_wait_ack(struct sock *sk)
1271 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1272 DECLARE_WAITQUEUE(wait, current);
1276 add_wait_queue(sk_sleep(sk), &wait);
1277 set_current_state(TASK_INTERRUPTIBLE);
1278 while (chan->unacked_frames > 0 && chan->conn) {
1282 if (signal_pending(current)) {
1283 err = sock_intr_errno(timeo);
1288 timeo = schedule_timeout(timeo);
1290 set_current_state(TASK_INTERRUPTIBLE);
1292 err = sock_error(sk);
1296 set_current_state(TASK_RUNNING);
1297 remove_wait_queue(sk_sleep(sk), &wait);
1301 static void l2cap_monitor_timeout(struct work_struct *work)
1303 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1304 monitor_timer.work);
1306 BT_DBG("chan %p", chan);
1308 l2cap_chan_lock(chan);
1310 if (chan->retry_count >= chan->remote_max_tx) {
1311 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1312 l2cap_chan_unlock(chan);
1316 chan->retry_count++;
1317 __set_monitor_timer(chan);
1319 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1320 l2cap_chan_unlock(chan);
1323 static void l2cap_retrans_timeout(struct work_struct *work)
1325 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1326 retrans_timer.work);
1328 BT_DBG("chan %p", chan);
1330 l2cap_chan_lock(chan);
1332 chan->retry_count = 1;
1333 __set_monitor_timer(chan);
1335 set_bit(CONN_WAIT_F, &chan->conn_state);
1337 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1339 l2cap_chan_unlock(chan);
1342 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1344 struct sk_buff *skb;
1346 while ((skb = skb_peek(&chan->tx_q)) &&
1347 chan->unacked_frames) {
1348 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1351 skb = skb_dequeue(&chan->tx_q);
1354 chan->unacked_frames--;
1357 if (!chan->unacked_frames)
1358 __clear_retrans_timer(chan);
1361 static void l2cap_streaming_send(struct l2cap_chan *chan)
1363 struct sk_buff *skb;
1367 while ((skb = skb_dequeue(&chan->tx_q))) {
1368 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1369 control |= __set_txseq(chan, chan->next_tx_seq);
1370 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1372 if (chan->fcs == L2CAP_FCS_CRC16) {
1373 fcs = crc16(0, (u8 *)skb->data,
1374 skb->len - L2CAP_FCS_SIZE);
1375 put_unaligned_le16(fcs,
1376 skb->data + skb->len - L2CAP_FCS_SIZE);
1379 l2cap_do_send(chan, skb);
1381 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1385 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1387 struct sk_buff *skb, *tx_skb;
1391 skb = skb_peek(&chan->tx_q);
1395 while (bt_cb(skb)->tx_seq != tx_seq) {
1396 if (skb_queue_is_last(&chan->tx_q, skb))
1399 skb = skb_queue_next(&chan->tx_q, skb);
1402 if (chan->remote_max_tx &&
1403 bt_cb(skb)->retries == chan->remote_max_tx) {
1404 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1408 tx_skb = skb_clone(skb, GFP_ATOMIC);
1409 bt_cb(skb)->retries++;
1411 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1412 control &= __get_sar_mask(chan);
1414 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1415 control |= __set_ctrl_final(chan);
1417 control |= __set_reqseq(chan, chan->buffer_seq);
1418 control |= __set_txseq(chan, tx_seq);
1420 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1422 if (chan->fcs == L2CAP_FCS_CRC16) {
1423 fcs = crc16(0, (u8 *)tx_skb->data,
1424 tx_skb->len - L2CAP_FCS_SIZE);
1425 put_unaligned_le16(fcs,
1426 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1429 l2cap_do_send(chan, tx_skb);
1432 static int l2cap_ertm_send(struct l2cap_chan *chan)
1434 struct sk_buff *skb, *tx_skb;
1439 if (chan->state != BT_CONNECTED)
1442 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1444 if (chan->remote_max_tx &&
1445 bt_cb(skb)->retries == chan->remote_max_tx) {
1446 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1450 tx_skb = skb_clone(skb, GFP_ATOMIC);
1452 bt_cb(skb)->retries++;
1454 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1455 control &= __get_sar_mask(chan);
1457 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1458 control |= __set_ctrl_final(chan);
1460 control |= __set_reqseq(chan, chan->buffer_seq);
1461 control |= __set_txseq(chan, chan->next_tx_seq);
1463 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1465 if (chan->fcs == L2CAP_FCS_CRC16) {
1466 fcs = crc16(0, (u8 *)skb->data,
1467 tx_skb->len - L2CAP_FCS_SIZE);
1468 put_unaligned_le16(fcs, skb->data +
1469 tx_skb->len - L2CAP_FCS_SIZE);
1472 l2cap_do_send(chan, tx_skb);
1474 __set_retrans_timer(chan);
1476 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1478 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1480 if (bt_cb(skb)->retries == 1) {
1481 chan->unacked_frames++;
1484 __clear_ack_timer(chan);
1487 chan->frames_sent++;
1489 if (skb_queue_is_last(&chan->tx_q, skb))
1490 chan->tx_send_head = NULL;
1492 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1498 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1502 if (!skb_queue_empty(&chan->tx_q))
1503 chan->tx_send_head = chan->tx_q.next;
1505 chan->next_tx_seq = chan->expected_ack_seq;
1506 ret = l2cap_ertm_send(chan);
1510 static void __l2cap_send_ack(struct l2cap_chan *chan)
1514 control |= __set_reqseq(chan, chan->buffer_seq);
1516 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1517 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1518 set_bit(CONN_RNR_SENT, &chan->conn_state);
1519 l2cap_send_sframe(chan, control);
1523 if (l2cap_ertm_send(chan) > 0)
1526 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1527 l2cap_send_sframe(chan, control);
1530 static void l2cap_send_ack(struct l2cap_chan *chan)
1532 __clear_ack_timer(chan);
1533 __l2cap_send_ack(chan);
1536 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1538 struct srej_list *tail;
1541 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1542 control |= __set_ctrl_final(chan);
1544 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1545 control |= __set_reqseq(chan, tail->tx_seq);
1547 l2cap_send_sframe(chan, control);
1550 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1552 struct l2cap_conn *conn = chan->conn;
1553 struct sk_buff **frag;
1556 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1562 /* Continuation fragments (no L2CAP header) */
1563 frag = &skb_shinfo(skb)->frag_list;
1565 count = min_t(unsigned int, conn->mtu, len);
1567 *frag = chan->ops->alloc_skb(chan, count,
1568 msg->msg_flags & MSG_DONTWAIT, &err);
1572 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1575 (*frag)->priority = skb->priority;
1580 frag = &(*frag)->next;
1586 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1587 struct msghdr *msg, size_t len,
1590 struct l2cap_conn *conn = chan->conn;
1591 struct sk_buff *skb;
1592 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1593 struct l2cap_hdr *lh;
1595 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1597 count = min_t(unsigned int, (conn->mtu - hlen), len);
1599 skb = chan->ops->alloc_skb(chan, count + hlen,
1600 msg->msg_flags & MSG_DONTWAIT, &err);
1603 return ERR_PTR(err);
1605 skb->priority = priority;
1607 /* Create L2CAP header */
1608 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1609 lh->cid = cpu_to_le16(chan->dcid);
1610 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1611 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1613 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1614 if (unlikely(err < 0)) {
1616 return ERR_PTR(err);
1621 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1622 struct msghdr *msg, size_t len,
1625 struct l2cap_conn *conn = chan->conn;
1626 struct sk_buff *skb;
1627 int err, count, hlen = L2CAP_HDR_SIZE;
1628 struct l2cap_hdr *lh;
1630 BT_DBG("chan %p len %d", chan, (int)len);
1632 count = min_t(unsigned int, (conn->mtu - hlen), len);
1634 skb = chan->ops->alloc_skb(chan, count + hlen,
1635 msg->msg_flags & MSG_DONTWAIT, &err);
1638 return ERR_PTR(err);
1640 skb->priority = priority;
1642 /* Create L2CAP header */
1643 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1644 lh->cid = cpu_to_le16(chan->dcid);
1645 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1647 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1648 if (unlikely(err < 0)) {
1650 return ERR_PTR(err);
1655 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1656 struct msghdr *msg, size_t len,
1657 u32 control, u16 sdulen)
1659 struct l2cap_conn *conn = chan->conn;
1660 struct sk_buff *skb;
1661 int err, count, hlen;
1662 struct l2cap_hdr *lh;
1664 BT_DBG("chan %p len %d", chan, (int)len);
1667 return ERR_PTR(-ENOTCONN);
1669 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1670 hlen = L2CAP_EXT_HDR_SIZE;
1672 hlen = L2CAP_ENH_HDR_SIZE;
1675 hlen += L2CAP_SDULEN_SIZE;
1677 if (chan->fcs == L2CAP_FCS_CRC16)
1678 hlen += L2CAP_FCS_SIZE;
1680 count = min_t(unsigned int, (conn->mtu - hlen), len);
1682 skb = chan->ops->alloc_skb(chan, count + hlen,
1683 msg->msg_flags & MSG_DONTWAIT, &err);
1686 return ERR_PTR(err);
1688 /* Create L2CAP header */
1689 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1690 lh->cid = cpu_to_le16(chan->dcid);
1691 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1693 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1696 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1698 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1699 if (unlikely(err < 0)) {
1701 return ERR_PTR(err);
1704 if (chan->fcs == L2CAP_FCS_CRC16)
1705 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1707 bt_cb(skb)->retries = 0;
1711 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1713 struct sk_buff *skb;
1714 struct sk_buff_head sar_queue;
1718 skb_queue_head_init(&sar_queue);
1719 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1720 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1722 return PTR_ERR(skb);
1724 __skb_queue_tail(&sar_queue, skb);
1725 len -= chan->remote_mps;
1726 size += chan->remote_mps;
1731 if (len > chan->remote_mps) {
1732 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1733 buflen = chan->remote_mps;
1735 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1739 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1741 skb_queue_purge(&sar_queue);
1742 return PTR_ERR(skb);
1745 __skb_queue_tail(&sar_queue, skb);
1749 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1750 if (chan->tx_send_head == NULL)
1751 chan->tx_send_head = sar_queue.next;
1756 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1759 struct sk_buff *skb;
1763 /* Connectionless channel */
1764 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1765 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1767 return PTR_ERR(skb);
1769 l2cap_do_send(chan, skb);
1773 switch (chan->mode) {
1774 case L2CAP_MODE_BASIC:
1775 /* Check outgoing MTU */
1776 if (len > chan->omtu)
1779 /* Create a basic PDU */
1780 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1782 return PTR_ERR(skb);
1784 l2cap_do_send(chan, skb);
1788 case L2CAP_MODE_ERTM:
1789 case L2CAP_MODE_STREAMING:
1790 /* Entire SDU fits into one PDU */
1791 if (len <= chan->remote_mps) {
1792 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1793 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1796 return PTR_ERR(skb);
1798 __skb_queue_tail(&chan->tx_q, skb);
1800 if (chan->tx_send_head == NULL)
1801 chan->tx_send_head = skb;
1804 /* Segment SDU into multiples PDUs */
1805 err = l2cap_sar_segment_sdu(chan, msg, len);
1810 if (chan->mode == L2CAP_MODE_STREAMING) {
1811 l2cap_streaming_send(chan);
1816 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1817 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1822 err = l2cap_ertm_send(chan);
1829 BT_DBG("bad state %1.1x", chan->mode);
1836 /* Copy frame to all raw sockets on that connection */
1837 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1839 struct sk_buff *nskb;
1840 struct l2cap_chan *chan;
1842 BT_DBG("conn %p", conn);
1844 mutex_lock(&conn->chan_lock);
1846 list_for_each_entry(chan, &conn->chan_l, list) {
1847 struct sock *sk = chan->sk;
1848 if (chan->chan_type != L2CAP_CHAN_RAW)
1851 /* Don't send frame to the socket it came from */
1854 nskb = skb_clone(skb, GFP_ATOMIC);
1858 if (chan->ops->recv(chan->data, nskb))
1862 mutex_unlock(&conn->chan_lock);
1865 /* ---- L2CAP signalling commands ---- */
1866 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1867 u8 code, u8 ident, u16 dlen, void *data)
1869 struct sk_buff *skb, **frag;
1870 struct l2cap_cmd_hdr *cmd;
1871 struct l2cap_hdr *lh;
1874 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1875 conn, code, ident, dlen);
1877 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1878 count = min_t(unsigned int, conn->mtu, len);
1880 skb = bt_skb_alloc(count, GFP_ATOMIC);
1884 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1885 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1887 if (conn->hcon->type == LE_LINK)
1888 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1890 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1892 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1895 cmd->len = cpu_to_le16(dlen);
1898 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1899 memcpy(skb_put(skb, count), data, count);
1905 /* Continuation fragments (no L2CAP header) */
1906 frag = &skb_shinfo(skb)->frag_list;
1908 count = min_t(unsigned int, conn->mtu, len);
1910 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1914 memcpy(skb_put(*frag, count), data, count);
1919 frag = &(*frag)->next;
1929 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1931 struct l2cap_conf_opt *opt = *ptr;
1934 len = L2CAP_CONF_OPT_SIZE + opt->len;
1942 *val = *((u8 *) opt->val);
1946 *val = get_unaligned_le16(opt->val);
1950 *val = get_unaligned_le32(opt->val);
1954 *val = (unsigned long) opt->val;
1958 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1962 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1964 struct l2cap_conf_opt *opt = *ptr;
1966 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1973 *((u8 *) opt->val) = val;
1977 put_unaligned_le16(val, opt->val);
1981 put_unaligned_le32(val, opt->val);
1985 memcpy(opt->val, (void *) val, len);
1989 *ptr += L2CAP_CONF_OPT_SIZE + len;
1992 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1994 struct l2cap_conf_efs efs;
1996 switch (chan->mode) {
1997 case L2CAP_MODE_ERTM:
1998 efs.id = chan->local_id;
1999 efs.stype = chan->local_stype;
2000 efs.msdu = cpu_to_le16(chan->local_msdu);
2001 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2002 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2003 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2006 case L2CAP_MODE_STREAMING:
2008 efs.stype = L2CAP_SERV_BESTEFFORT;
2009 efs.msdu = cpu_to_le16(chan->local_msdu);
2010 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2019 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2020 (unsigned long) &efs);
2023 static void l2cap_ack_timeout(struct work_struct *work)
2025 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2028 BT_DBG("chan %p", chan);
2030 l2cap_chan_lock(chan);
2032 __l2cap_send_ack(chan);
2034 l2cap_chan_unlock(chan);
2036 l2cap_chan_put(chan);
2039 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2041 chan->expected_ack_seq = 0;
2042 chan->unacked_frames = 0;
2043 chan->buffer_seq = 0;
2044 chan->num_acked = 0;
2045 chan->frames_sent = 0;
2047 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2048 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2049 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2051 skb_queue_head_init(&chan->srej_q);
2053 INIT_LIST_HEAD(&chan->srej_l);
2056 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2059 case L2CAP_MODE_STREAMING:
2060 case L2CAP_MODE_ERTM:
2061 if (l2cap_mode_supported(mode, remote_feat_mask))
2065 return L2CAP_MODE_BASIC;
2069 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2071 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2074 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2076 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2079 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2081 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2082 __l2cap_ews_supported(chan)) {
2083 /* use extended control field */
2084 set_bit(FLAG_EXT_CTRL, &chan->flags);
2085 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2087 chan->tx_win = min_t(u16, chan->tx_win,
2088 L2CAP_DEFAULT_TX_WINDOW);
2089 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2093 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2095 struct l2cap_conf_req *req = data;
2096 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2097 void *ptr = req->data;
2100 BT_DBG("chan %p", chan);
2102 if (chan->num_conf_req || chan->num_conf_rsp)
2105 switch (chan->mode) {
2106 case L2CAP_MODE_STREAMING:
2107 case L2CAP_MODE_ERTM:
2108 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2111 if (__l2cap_efs_supported(chan))
2112 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2116 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2121 if (chan->imtu != L2CAP_DEFAULT_MTU)
2122 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2124 switch (chan->mode) {
2125 case L2CAP_MODE_BASIC:
2126 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2127 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2130 rfc.mode = L2CAP_MODE_BASIC;
2132 rfc.max_transmit = 0;
2133 rfc.retrans_timeout = 0;
2134 rfc.monitor_timeout = 0;
2135 rfc.max_pdu_size = 0;
2137 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2138 (unsigned long) &rfc);
2141 case L2CAP_MODE_ERTM:
2142 rfc.mode = L2CAP_MODE_ERTM;
2143 rfc.max_transmit = chan->max_tx;
2144 rfc.retrans_timeout = 0;
2145 rfc.monitor_timeout = 0;
2147 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2148 L2CAP_EXT_HDR_SIZE -
2151 rfc.max_pdu_size = cpu_to_le16(size);
2153 l2cap_txwin_setup(chan);
2155 rfc.txwin_size = min_t(u16, chan->tx_win,
2156 L2CAP_DEFAULT_TX_WINDOW);
2158 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2159 (unsigned long) &rfc);
2161 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2162 l2cap_add_opt_efs(&ptr, chan);
2164 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2167 if (chan->fcs == L2CAP_FCS_NONE ||
2168 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2169 chan->fcs = L2CAP_FCS_NONE;
2170 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2173 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2174 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2178 case L2CAP_MODE_STREAMING:
2179 rfc.mode = L2CAP_MODE_STREAMING;
2181 rfc.max_transmit = 0;
2182 rfc.retrans_timeout = 0;
2183 rfc.monitor_timeout = 0;
2185 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2186 L2CAP_EXT_HDR_SIZE -
2189 rfc.max_pdu_size = cpu_to_le16(size);
2191 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2192 (unsigned long) &rfc);
2194 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2195 l2cap_add_opt_efs(&ptr, chan);
2197 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2200 if (chan->fcs == L2CAP_FCS_NONE ||
2201 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2202 chan->fcs = L2CAP_FCS_NONE;
2203 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2208 req->dcid = cpu_to_le16(chan->dcid);
2209 req->flags = cpu_to_le16(0);
2214 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2216 struct l2cap_conf_rsp *rsp = data;
2217 void *ptr = rsp->data;
2218 void *req = chan->conf_req;
2219 int len = chan->conf_len;
2220 int type, hint, olen;
2222 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2223 struct l2cap_conf_efs efs;
2225 u16 mtu = L2CAP_DEFAULT_MTU;
2226 u16 result = L2CAP_CONF_SUCCESS;
2229 BT_DBG("chan %p", chan);
2231 while (len >= L2CAP_CONF_OPT_SIZE) {
2232 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2234 hint = type & L2CAP_CONF_HINT;
2235 type &= L2CAP_CONF_MASK;
2238 case L2CAP_CONF_MTU:
2242 case L2CAP_CONF_FLUSH_TO:
2243 chan->flush_to = val;
2246 case L2CAP_CONF_QOS:
2249 case L2CAP_CONF_RFC:
2250 if (olen == sizeof(rfc))
2251 memcpy(&rfc, (void *) val, olen);
2254 case L2CAP_CONF_FCS:
2255 if (val == L2CAP_FCS_NONE)
2256 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2259 case L2CAP_CONF_EFS:
2261 if (olen == sizeof(efs))
2262 memcpy(&efs, (void *) val, olen);
2265 case L2CAP_CONF_EWS:
2267 return -ECONNREFUSED;
2269 set_bit(FLAG_EXT_CTRL, &chan->flags);
2270 set_bit(CONF_EWS_RECV, &chan->conf_state);
2271 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2272 chan->remote_tx_win = val;
2279 result = L2CAP_CONF_UNKNOWN;
2280 *((u8 *) ptr++) = type;
2285 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2288 switch (chan->mode) {
2289 case L2CAP_MODE_STREAMING:
2290 case L2CAP_MODE_ERTM:
2291 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2292 chan->mode = l2cap_select_mode(rfc.mode,
2293 chan->conn->feat_mask);
2298 if (__l2cap_efs_supported(chan))
2299 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2301 return -ECONNREFUSED;
2304 if (chan->mode != rfc.mode)
2305 return -ECONNREFUSED;
2311 if (chan->mode != rfc.mode) {
2312 result = L2CAP_CONF_UNACCEPT;
2313 rfc.mode = chan->mode;
2315 if (chan->num_conf_rsp == 1)
2316 return -ECONNREFUSED;
2318 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2319 sizeof(rfc), (unsigned long) &rfc);
2322 if (result == L2CAP_CONF_SUCCESS) {
2323 /* Configure output options and let the other side know
2324 * which ones we don't like. */
2326 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2327 result = L2CAP_CONF_UNACCEPT;
2330 set_bit(CONF_MTU_DONE, &chan->conf_state);
2332 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2335 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2336 efs.stype != L2CAP_SERV_NOTRAFIC &&
2337 efs.stype != chan->local_stype) {
2339 result = L2CAP_CONF_UNACCEPT;
2341 if (chan->num_conf_req >= 1)
2342 return -ECONNREFUSED;
2344 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2346 (unsigned long) &efs);
2348 /* Send PENDING Conf Rsp */
2349 result = L2CAP_CONF_PENDING;
2350 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2355 case L2CAP_MODE_BASIC:
2356 chan->fcs = L2CAP_FCS_NONE;
2357 set_bit(CONF_MODE_DONE, &chan->conf_state);
2360 case L2CAP_MODE_ERTM:
2361 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2362 chan->remote_tx_win = rfc.txwin_size;
2364 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2366 chan->remote_max_tx = rfc.max_transmit;
2368 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2370 L2CAP_EXT_HDR_SIZE -
2373 rfc.max_pdu_size = cpu_to_le16(size);
2374 chan->remote_mps = size;
2376 rfc.retrans_timeout =
2377 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2378 rfc.monitor_timeout =
2379 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2381 set_bit(CONF_MODE_DONE, &chan->conf_state);
2383 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2384 sizeof(rfc), (unsigned long) &rfc);
2386 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2387 chan->remote_id = efs.id;
2388 chan->remote_stype = efs.stype;
2389 chan->remote_msdu = le16_to_cpu(efs.msdu);
2390 chan->remote_flush_to =
2391 le32_to_cpu(efs.flush_to);
2392 chan->remote_acc_lat =
2393 le32_to_cpu(efs.acc_lat);
2394 chan->remote_sdu_itime =
2395 le32_to_cpu(efs.sdu_itime);
2396 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2397 sizeof(efs), (unsigned long) &efs);
2401 case L2CAP_MODE_STREAMING:
2402 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2404 L2CAP_EXT_HDR_SIZE -
2407 rfc.max_pdu_size = cpu_to_le16(size);
2408 chan->remote_mps = size;
2410 set_bit(CONF_MODE_DONE, &chan->conf_state);
2412 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2413 sizeof(rfc), (unsigned long) &rfc);
2418 result = L2CAP_CONF_UNACCEPT;
2420 memset(&rfc, 0, sizeof(rfc));
2421 rfc.mode = chan->mode;
2424 if (result == L2CAP_CONF_SUCCESS)
2425 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2427 rsp->scid = cpu_to_le16(chan->dcid);
2428 rsp->result = cpu_to_le16(result);
2429 rsp->flags = cpu_to_le16(0x0000);
2434 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2436 struct l2cap_conf_req *req = data;
2437 void *ptr = req->data;
2440 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2441 struct l2cap_conf_efs efs;
2443 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2445 while (len >= L2CAP_CONF_OPT_SIZE) {
2446 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2449 case L2CAP_CONF_MTU:
2450 if (val < L2CAP_DEFAULT_MIN_MTU) {
2451 *result = L2CAP_CONF_UNACCEPT;
2452 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2455 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2458 case L2CAP_CONF_FLUSH_TO:
2459 chan->flush_to = val;
2460 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2464 case L2CAP_CONF_RFC:
2465 if (olen == sizeof(rfc))
2466 memcpy(&rfc, (void *)val, olen);
2468 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2469 rfc.mode != chan->mode)
2470 return -ECONNREFUSED;
2474 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2475 sizeof(rfc), (unsigned long) &rfc);
2478 case L2CAP_CONF_EWS:
2479 chan->tx_win = min_t(u16, val,
2480 L2CAP_DEFAULT_EXT_WINDOW);
2481 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2485 case L2CAP_CONF_EFS:
2486 if (olen == sizeof(efs))
2487 memcpy(&efs, (void *)val, olen);
2489 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2490 efs.stype != L2CAP_SERV_NOTRAFIC &&
2491 efs.stype != chan->local_stype)
2492 return -ECONNREFUSED;
2494 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2495 sizeof(efs), (unsigned long) &efs);
2500 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2501 return -ECONNREFUSED;
2503 chan->mode = rfc.mode;
2505 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2507 case L2CAP_MODE_ERTM:
2508 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2509 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2510 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2512 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2513 chan->local_msdu = le16_to_cpu(efs.msdu);
2514 chan->local_sdu_itime =
2515 le32_to_cpu(efs.sdu_itime);
2516 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2517 chan->local_flush_to =
2518 le32_to_cpu(efs.flush_to);
2522 case L2CAP_MODE_STREAMING:
2523 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2527 req->dcid = cpu_to_le16(chan->dcid);
2528 req->flags = cpu_to_le16(0x0000);
2533 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2535 struct l2cap_conf_rsp *rsp = data;
2536 void *ptr = rsp->data;
2538 BT_DBG("chan %p", chan);
2540 rsp->scid = cpu_to_le16(chan->dcid);
2541 rsp->result = cpu_to_le16(result);
2542 rsp->flags = cpu_to_le16(flags);
2547 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2549 struct l2cap_conn_rsp rsp;
2550 struct l2cap_conn *conn = chan->conn;
2553 rsp.scid = cpu_to_le16(chan->dcid);
2554 rsp.dcid = cpu_to_le16(chan->scid);
2555 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2556 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2557 l2cap_send_cmd(conn, chan->ident,
2558 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2560 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2563 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2564 l2cap_build_conf_req(chan, buf), buf);
2565 chan->num_conf_req++;
2568 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2572 struct l2cap_conf_rfc rfc;
2574 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2576 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2579 while (len >= L2CAP_CONF_OPT_SIZE) {
2580 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2583 case L2CAP_CONF_RFC:
2584 if (olen == sizeof(rfc))
2585 memcpy(&rfc, (void *)val, olen);
2590 /* Use sane default values in case a misbehaving remote device
2591 * did not send an RFC option.
2593 rfc.mode = chan->mode;
2594 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2595 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2596 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2598 BT_ERR("Expected RFC option was not found, using defaults");
2602 case L2CAP_MODE_ERTM:
2603 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2604 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2605 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2607 case L2CAP_MODE_STREAMING:
2608 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2612 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2614 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2616 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2619 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2620 cmd->ident == conn->info_ident) {
2621 cancel_delayed_work(&conn->info_timer);
2623 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2624 conn->info_ident = 0;
2626 l2cap_conn_start(conn);
2632 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2634 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2635 struct l2cap_conn_rsp rsp;
2636 struct l2cap_chan *chan = NULL, *pchan;
2637 struct sock *parent, *sk = NULL;
2638 int result, status = L2CAP_CS_NO_INFO;
2640 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2641 __le16 psm = req->psm;
2643 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2645 /* Check if we have socket listening on psm */
2646 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2648 result = L2CAP_CR_BAD_PSM;
2654 mutex_lock(&conn->chan_lock);
2657 /* Check if the ACL is secure enough (if not SDP) */
2658 if (psm != cpu_to_le16(0x0001) &&
2659 !hci_conn_check_link_mode(conn->hcon)) {
2660 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2661 result = L2CAP_CR_SEC_BLOCK;
2665 result = L2CAP_CR_NO_MEM;
2667 /* Check for backlog size */
2668 if (sk_acceptq_is_full(parent)) {
2669 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2673 chan = pchan->ops->new_connection(pchan->data);
2679 /* Check if we already have channel with that dcid */
2680 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2681 sock_set_flag(sk, SOCK_ZAPPED);
2682 chan->ops->close(chan->data);
2686 hci_conn_hold(conn->hcon);
2688 bacpy(&bt_sk(sk)->src, conn->src);
2689 bacpy(&bt_sk(sk)->dst, conn->dst);
2693 bt_accept_enqueue(parent, sk);
2695 __l2cap_chan_add(conn, chan);
2699 __set_chan_timer(chan, sk->sk_sndtimeo);
2701 chan->ident = cmd->ident;
2703 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2704 if (l2cap_chan_check_security(chan)) {
2705 if (bt_sk(sk)->defer_setup) {
2706 __l2cap_state_change(chan, BT_CONNECT2);
2707 result = L2CAP_CR_PEND;
2708 status = L2CAP_CS_AUTHOR_PEND;
2709 parent->sk_data_ready(parent, 0);
2711 __l2cap_state_change(chan, BT_CONFIG);
2712 result = L2CAP_CR_SUCCESS;
2713 status = L2CAP_CS_NO_INFO;
2716 __l2cap_state_change(chan, BT_CONNECT2);
2717 result = L2CAP_CR_PEND;
2718 status = L2CAP_CS_AUTHEN_PEND;
2721 __l2cap_state_change(chan, BT_CONNECT2);
2722 result = L2CAP_CR_PEND;
2723 status = L2CAP_CS_NO_INFO;
2727 release_sock(parent);
2728 mutex_unlock(&conn->chan_lock);
2731 rsp.scid = cpu_to_le16(scid);
2732 rsp.dcid = cpu_to_le16(dcid);
2733 rsp.result = cpu_to_le16(result);
2734 rsp.status = cpu_to_le16(status);
2735 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2737 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2738 struct l2cap_info_req info;
2739 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2741 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2742 conn->info_ident = l2cap_get_ident(conn);
2744 schedule_delayed_work(&conn->info_timer,
2745 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2747 l2cap_send_cmd(conn, conn->info_ident,
2748 L2CAP_INFO_REQ, sizeof(info), &info);
2751 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2752 result == L2CAP_CR_SUCCESS) {
2754 set_bit(CONF_REQ_SENT, &chan->conf_state);
2755 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2756 l2cap_build_conf_req(chan, buf), buf);
2757 chan->num_conf_req++;
2763 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2765 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2766 u16 scid, dcid, result, status;
2767 struct l2cap_chan *chan;
2771 scid = __le16_to_cpu(rsp->scid);
2772 dcid = __le16_to_cpu(rsp->dcid);
2773 result = __le16_to_cpu(rsp->result);
2774 status = __le16_to_cpu(rsp->status);
2776 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
2777 dcid, scid, result, status);
2779 mutex_lock(&conn->chan_lock);
2782 chan = __l2cap_get_chan_by_scid(conn, scid);
2788 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
2797 l2cap_chan_lock(chan);
2800 case L2CAP_CR_SUCCESS:
2801 l2cap_state_change(chan, BT_CONFIG);
2804 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2806 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2809 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2810 l2cap_build_conf_req(chan, req), req);
2811 chan->num_conf_req++;
2815 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2819 l2cap_chan_del(chan, ECONNREFUSED);
2823 l2cap_chan_unlock(chan);
2826 mutex_unlock(&conn->chan_lock);
2831 static inline void set_default_fcs(struct l2cap_chan *chan)
2833 /* FCS is enabled only in ERTM or streaming mode, if one or both
2836 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2837 chan->fcs = L2CAP_FCS_NONE;
2838 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2839 chan->fcs = L2CAP_FCS_CRC16;
2842 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2844 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2847 struct l2cap_chan *chan;
2850 dcid = __le16_to_cpu(req->dcid);
2851 flags = __le16_to_cpu(req->flags);
2853 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2855 chan = l2cap_get_chan_by_scid(conn, dcid);
2859 l2cap_chan_lock(chan);
2861 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2862 struct l2cap_cmd_rej_cid rej;
2864 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2865 rej.scid = cpu_to_le16(chan->scid);
2866 rej.dcid = cpu_to_le16(chan->dcid);
2868 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2873 /* Reject if config buffer is too small. */
2874 len = cmd_len - sizeof(*req);
2875 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2876 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2877 l2cap_build_conf_rsp(chan, rsp,
2878 L2CAP_CONF_REJECT, flags), rsp);
2883 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2884 chan->conf_len += len;
2886 if (flags & 0x0001) {
2887 /* Incomplete config. Send empty response. */
2888 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2889 l2cap_build_conf_rsp(chan, rsp,
2890 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2894 /* Complete config. */
2895 len = l2cap_parse_conf_req(chan, rsp);
2897 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2901 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2902 chan->num_conf_rsp++;
2904 /* Reset config buffer. */
2907 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2910 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2911 set_default_fcs(chan);
2913 l2cap_state_change(chan, BT_CONNECTED);
2915 chan->next_tx_seq = 0;
2916 chan->expected_tx_seq = 0;
2917 skb_queue_head_init(&chan->tx_q);
2918 if (chan->mode == L2CAP_MODE_ERTM)
2919 l2cap_ertm_init(chan);
2921 l2cap_chan_ready(chan);
2925 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2927 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2928 l2cap_build_conf_req(chan, buf), buf);
2929 chan->num_conf_req++;
2932 /* Got Conf Rsp PENDING from remote side and asume we sent
2933 Conf Rsp PENDING in the code above */
2934 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2935 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2937 /* check compatibility */
2939 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2940 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2942 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2943 l2cap_build_conf_rsp(chan, rsp,
2944 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2948 l2cap_chan_unlock(chan);
2952 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2954 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2955 u16 scid, flags, result;
2956 struct l2cap_chan *chan;
2957 int len = cmd->len - sizeof(*rsp);
2959 scid = __le16_to_cpu(rsp->scid);
2960 flags = __le16_to_cpu(rsp->flags);
2961 result = __le16_to_cpu(rsp->result);
2963 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2964 scid, flags, result);
2966 chan = l2cap_get_chan_by_scid(conn, scid);
2970 l2cap_chan_lock(chan);
2973 case L2CAP_CONF_SUCCESS:
2974 l2cap_conf_rfc_get(chan, rsp->data, len);
2975 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2978 case L2CAP_CONF_PENDING:
2979 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2981 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2984 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2987 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2991 /* check compatibility */
2993 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2994 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2996 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2997 l2cap_build_conf_rsp(chan, buf,
2998 L2CAP_CONF_SUCCESS, 0x0000), buf);
3002 case L2CAP_CONF_UNACCEPT:
3003 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3006 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3007 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3011 /* throw out any old stored conf requests */
3012 result = L2CAP_CONF_SUCCESS;
3013 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3016 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3020 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3021 L2CAP_CONF_REQ, len, req);
3022 chan->num_conf_req++;
3023 if (result != L2CAP_CONF_SUCCESS)
3029 l2cap_chan_set_err(chan, ECONNRESET);
3031 __set_chan_timer(chan,
3032 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT));
3033 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3040 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3042 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3043 set_default_fcs(chan);
3045 l2cap_state_change(chan, BT_CONNECTED);
3046 chan->next_tx_seq = 0;
3047 chan->expected_tx_seq = 0;
3048 skb_queue_head_init(&chan->tx_q);
3049 if (chan->mode == L2CAP_MODE_ERTM)
3050 l2cap_ertm_init(chan);
3052 l2cap_chan_ready(chan);
3056 l2cap_chan_unlock(chan);
3060 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3062 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3063 struct l2cap_disconn_rsp rsp;
3065 struct l2cap_chan *chan;
3068 scid = __le16_to_cpu(req->scid);
3069 dcid = __le16_to_cpu(req->dcid);
3071 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3073 mutex_lock(&conn->chan_lock);
3075 chan = __l2cap_get_chan_by_scid(conn, dcid);
3077 mutex_unlock(&conn->chan_lock);
3081 l2cap_chan_lock(chan);
3085 rsp.dcid = cpu_to_le16(chan->scid);
3086 rsp.scid = cpu_to_le16(chan->dcid);
3087 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3090 sk->sk_shutdown = SHUTDOWN_MASK;
3093 l2cap_chan_del(chan, ECONNRESET);
3095 l2cap_chan_unlock(chan);
3097 chan->ops->close(chan->data);
3099 mutex_unlock(&conn->chan_lock);
3104 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3106 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3108 struct l2cap_chan *chan;
3110 scid = __le16_to_cpu(rsp->scid);
3111 dcid = __le16_to_cpu(rsp->dcid);
3113 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3115 mutex_lock(&conn->chan_lock);
3117 chan = __l2cap_get_chan_by_scid(conn, scid);
3119 mutex_unlock(&conn->chan_lock);
3123 l2cap_chan_lock(chan);
3125 l2cap_chan_del(chan, 0);
3127 l2cap_chan_unlock(chan);
3129 chan->ops->close(chan->data);
3131 mutex_unlock(&conn->chan_lock);
3136 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3138 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3141 type = __le16_to_cpu(req->type);
3143 BT_DBG("type 0x%4.4x", type);
3145 if (type == L2CAP_IT_FEAT_MASK) {
3147 u32 feat_mask = l2cap_feat_mask;
3148 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3149 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3150 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3152 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3155 feat_mask |= L2CAP_FEAT_EXT_FLOW
3156 | L2CAP_FEAT_EXT_WINDOW;
3158 put_unaligned_le32(feat_mask, rsp->data);
3159 l2cap_send_cmd(conn, cmd->ident,
3160 L2CAP_INFO_RSP, sizeof(buf), buf);
3161 } else if (type == L2CAP_IT_FIXED_CHAN) {
3163 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3166 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3168 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3170 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3171 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3172 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3173 l2cap_send_cmd(conn, cmd->ident,
3174 L2CAP_INFO_RSP, sizeof(buf), buf);
3176 struct l2cap_info_rsp rsp;
3177 rsp.type = cpu_to_le16(type);
3178 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3179 l2cap_send_cmd(conn, cmd->ident,
3180 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3186 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3188 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3191 type = __le16_to_cpu(rsp->type);
3192 result = __le16_to_cpu(rsp->result);
3194 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3196 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3197 if (cmd->ident != conn->info_ident ||
3198 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3201 cancel_delayed_work(&conn->info_timer);
3203 if (result != L2CAP_IR_SUCCESS) {
3204 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3205 conn->info_ident = 0;
3207 l2cap_conn_start(conn);
3213 case L2CAP_IT_FEAT_MASK:
3214 conn->feat_mask = get_unaligned_le32(rsp->data);
3216 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3217 struct l2cap_info_req req;
3218 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3220 conn->info_ident = l2cap_get_ident(conn);
3222 l2cap_send_cmd(conn, conn->info_ident,
3223 L2CAP_INFO_REQ, sizeof(req), &req);
3225 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3226 conn->info_ident = 0;
3228 l2cap_conn_start(conn);
3232 case L2CAP_IT_FIXED_CHAN:
3233 conn->fixed_chan_mask = rsp->data[0];
3234 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3235 conn->info_ident = 0;
3237 l2cap_conn_start(conn);
3244 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3245 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3248 struct l2cap_create_chan_req *req = data;
3249 struct l2cap_create_chan_rsp rsp;
3252 if (cmd_len != sizeof(*req))
3258 psm = le16_to_cpu(req->psm);
3259 scid = le16_to_cpu(req->scid);
3261 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3263 /* Placeholder: Always reject */
3265 rsp.scid = cpu_to_le16(scid);
3266 rsp.result = L2CAP_CR_NO_MEM;
3267 rsp.status = L2CAP_CS_NO_INFO;
3269 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3275 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3276 struct l2cap_cmd_hdr *cmd, void *data)
3278 BT_DBG("conn %p", conn);
3280 return l2cap_connect_rsp(conn, cmd, data);
3283 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3284 u16 icid, u16 result)
3286 struct l2cap_move_chan_rsp rsp;
3288 BT_DBG("icid %d, result %d", icid, result);
3290 rsp.icid = cpu_to_le16(icid);
3291 rsp.result = cpu_to_le16(result);
3293 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3296 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3297 struct l2cap_chan *chan, u16 icid, u16 result)
3299 struct l2cap_move_chan_cfm cfm;
3302 BT_DBG("icid %d, result %d", icid, result);
3304 ident = l2cap_get_ident(conn);
3306 chan->ident = ident;
3308 cfm.icid = cpu_to_le16(icid);
3309 cfm.result = cpu_to_le16(result);
3311 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3314 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3317 struct l2cap_move_chan_cfm_rsp rsp;
3319 BT_DBG("icid %d", icid);
3321 rsp.icid = cpu_to_le16(icid);
3322 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3325 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3326 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3328 struct l2cap_move_chan_req *req = data;
3330 u16 result = L2CAP_MR_NOT_ALLOWED;
3332 if (cmd_len != sizeof(*req))
3335 icid = le16_to_cpu(req->icid);
3337 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3342 /* Placeholder: Always refuse */
3343 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3348 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3349 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3351 struct l2cap_move_chan_rsp *rsp = data;
3354 if (cmd_len != sizeof(*rsp))
3357 icid = le16_to_cpu(rsp->icid);
3358 result = le16_to_cpu(rsp->result);
3360 BT_DBG("icid %d, result %d", icid, result);
3362 /* Placeholder: Always unconfirmed */
3363 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3368 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3369 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3371 struct l2cap_move_chan_cfm *cfm = data;
3374 if (cmd_len != sizeof(*cfm))
3377 icid = le16_to_cpu(cfm->icid);
3378 result = le16_to_cpu(cfm->result);
3380 BT_DBG("icid %d, result %d", icid, result);
3382 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3387 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3388 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3390 struct l2cap_move_chan_cfm_rsp *rsp = data;
3393 if (cmd_len != sizeof(*rsp))
3396 icid = le16_to_cpu(rsp->icid);
3398 BT_DBG("icid %d", icid);
3403 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3408 if (min > max || min < 6 || max > 3200)
3411 if (to_multiplier < 10 || to_multiplier > 3200)
3414 if (max >= to_multiplier * 8)
3417 max_latency = (to_multiplier * 8 / max) - 1;
3418 if (latency > 499 || latency > max_latency)
3424 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3425 struct l2cap_cmd_hdr *cmd, u8 *data)
3427 struct hci_conn *hcon = conn->hcon;
3428 struct l2cap_conn_param_update_req *req;
3429 struct l2cap_conn_param_update_rsp rsp;
3430 u16 min, max, latency, to_multiplier, cmd_len;
3433 if (!(hcon->link_mode & HCI_LM_MASTER))
3436 cmd_len = __le16_to_cpu(cmd->len);
3437 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3440 req = (struct l2cap_conn_param_update_req *) data;
3441 min = __le16_to_cpu(req->min);
3442 max = __le16_to_cpu(req->max);
3443 latency = __le16_to_cpu(req->latency);
3444 to_multiplier = __le16_to_cpu(req->to_multiplier);
3446 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3447 min, max, latency, to_multiplier);
3449 memset(&rsp, 0, sizeof(rsp));
3451 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3453 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3455 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3457 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3461 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3466 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3467 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3471 switch (cmd->code) {
3472 case L2CAP_COMMAND_REJ:
3473 l2cap_command_rej(conn, cmd, data);
3476 case L2CAP_CONN_REQ:
3477 err = l2cap_connect_req(conn, cmd, data);
3480 case L2CAP_CONN_RSP:
3481 err = l2cap_connect_rsp(conn, cmd, data);
3484 case L2CAP_CONF_REQ:
3485 err = l2cap_config_req(conn, cmd, cmd_len, data);
3488 case L2CAP_CONF_RSP:
3489 err = l2cap_config_rsp(conn, cmd, data);
3492 case L2CAP_DISCONN_REQ:
3493 err = l2cap_disconnect_req(conn, cmd, data);
3496 case L2CAP_DISCONN_RSP:
3497 err = l2cap_disconnect_rsp(conn, cmd, data);
3500 case L2CAP_ECHO_REQ:
3501 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3504 case L2CAP_ECHO_RSP:
3507 case L2CAP_INFO_REQ:
3508 err = l2cap_information_req(conn, cmd, data);
3511 case L2CAP_INFO_RSP:
3512 err = l2cap_information_rsp(conn, cmd, data);
3515 case L2CAP_CREATE_CHAN_REQ:
3516 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3519 case L2CAP_CREATE_CHAN_RSP:
3520 err = l2cap_create_channel_rsp(conn, cmd, data);
3523 case L2CAP_MOVE_CHAN_REQ:
3524 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3527 case L2CAP_MOVE_CHAN_RSP:
3528 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3531 case L2CAP_MOVE_CHAN_CFM:
3532 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3535 case L2CAP_MOVE_CHAN_CFM_RSP:
3536 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3540 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3548 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3549 struct l2cap_cmd_hdr *cmd, u8 *data)
3551 switch (cmd->code) {
3552 case L2CAP_COMMAND_REJ:
3555 case L2CAP_CONN_PARAM_UPDATE_REQ:
3556 return l2cap_conn_param_update_req(conn, cmd, data);
3558 case L2CAP_CONN_PARAM_UPDATE_RSP:
3562 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3567 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3568 struct sk_buff *skb)
3570 u8 *data = skb->data;
3572 struct l2cap_cmd_hdr cmd;
3575 l2cap_raw_recv(conn, skb);
3577 while (len >= L2CAP_CMD_HDR_SIZE) {
3579 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3580 data += L2CAP_CMD_HDR_SIZE;
3581 len -= L2CAP_CMD_HDR_SIZE;
3583 cmd_len = le16_to_cpu(cmd.len);
3585 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3587 if (cmd_len > len || !cmd.ident) {
3588 BT_DBG("corrupted command");
3592 if (conn->hcon->type == LE_LINK)
3593 err = l2cap_le_sig_cmd(conn, &cmd, data);
3595 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3598 struct l2cap_cmd_rej_unk rej;
3600 BT_ERR("Wrong link type (%d)", err);
3602 /* FIXME: Map err to a valid reason */
3603 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3604 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3614 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3616 u16 our_fcs, rcv_fcs;
3619 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3620 hdr_size = L2CAP_EXT_HDR_SIZE;
3622 hdr_size = L2CAP_ENH_HDR_SIZE;
3624 if (chan->fcs == L2CAP_FCS_CRC16) {
3625 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3626 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3627 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3629 if (our_fcs != rcv_fcs)
3635 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3639 chan->frames_sent = 0;
3641 control |= __set_reqseq(chan, chan->buffer_seq);
3643 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3644 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3645 l2cap_send_sframe(chan, control);
3646 set_bit(CONN_RNR_SENT, &chan->conn_state);
3649 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3650 l2cap_retransmit_frames(chan);
3652 l2cap_ertm_send(chan);
3654 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3655 chan->frames_sent == 0) {
3656 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3657 l2cap_send_sframe(chan, control);
3661 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3663 struct sk_buff *next_skb;
3664 int tx_seq_offset, next_tx_seq_offset;
3666 bt_cb(skb)->tx_seq = tx_seq;
3667 bt_cb(skb)->sar = sar;
3669 next_skb = skb_peek(&chan->srej_q);
3671 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3674 if (bt_cb(next_skb)->tx_seq == tx_seq)
3677 next_tx_seq_offset = __seq_offset(chan,
3678 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3680 if (next_tx_seq_offset > tx_seq_offset) {
3681 __skb_queue_before(&chan->srej_q, next_skb, skb);
3685 if (skb_queue_is_last(&chan->srej_q, next_skb))
3688 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3691 __skb_queue_tail(&chan->srej_q, skb);
3696 static void append_skb_frag(struct sk_buff *skb,
3697 struct sk_buff *new_frag, struct sk_buff **last_frag)
3699 /* skb->len reflects data in skb as well as all fragments
3700 * skb->data_len reflects only data in fragments
3702 if (!skb_has_frag_list(skb))
3703 skb_shinfo(skb)->frag_list = new_frag;
3705 new_frag->next = NULL;
3707 (*last_frag)->next = new_frag;
3708 *last_frag = new_frag;
3710 skb->len += new_frag->len;
3711 skb->data_len += new_frag->len;
3712 skb->truesize += new_frag->truesize;
3715 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3719 switch (__get_ctrl_sar(chan, control)) {
3720 case L2CAP_SAR_UNSEGMENTED:
3724 err = chan->ops->recv(chan->data, skb);
3727 case L2CAP_SAR_START:
3731 chan->sdu_len = get_unaligned_le16(skb->data);
3732 skb_pull(skb, L2CAP_SDULEN_SIZE);
3734 if (chan->sdu_len > chan->imtu) {
3739 if (skb->len >= chan->sdu_len)
3743 chan->sdu_last_frag = skb;
3749 case L2CAP_SAR_CONTINUE:
3753 append_skb_frag(chan->sdu, skb,
3754 &chan->sdu_last_frag);
3757 if (chan->sdu->len >= chan->sdu_len)
3767 append_skb_frag(chan->sdu, skb,
3768 &chan->sdu_last_frag);
3771 if (chan->sdu->len != chan->sdu_len)
3774 err = chan->ops->recv(chan->data, chan->sdu);
3777 /* Reassembly complete */
3779 chan->sdu_last_frag = NULL;
3787 kfree_skb(chan->sdu);
3789 chan->sdu_last_frag = NULL;
3796 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3798 BT_DBG("chan %p, Enter local busy", chan);
3800 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3802 __set_ack_timer(chan);
3805 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3809 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3812 control = __set_reqseq(chan, chan->buffer_seq);
3813 control |= __set_ctrl_poll(chan);
3814 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3815 l2cap_send_sframe(chan, control);
3816 chan->retry_count = 1;
3818 __clear_retrans_timer(chan);
3819 __set_monitor_timer(chan);
3821 set_bit(CONN_WAIT_F, &chan->conn_state);
3824 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3825 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3827 BT_DBG("chan %p, Exit local busy", chan);
3830 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3832 if (chan->mode == L2CAP_MODE_ERTM) {
3834 l2cap_ertm_enter_local_busy(chan);
3836 l2cap_ertm_exit_local_busy(chan);
3840 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3842 struct sk_buff *skb;
3845 while ((skb = skb_peek(&chan->srej_q)) &&
3846 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3849 if (bt_cb(skb)->tx_seq != tx_seq)
3852 skb = skb_dequeue(&chan->srej_q);
3853 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3854 err = l2cap_reassemble_sdu(chan, skb, control);
3857 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3861 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3862 tx_seq = __next_seq(chan, tx_seq);
3866 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3868 struct srej_list *l, *tmp;
3871 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3872 if (l->tx_seq == tx_seq) {
3877 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3878 control |= __set_reqseq(chan, l->tx_seq);
3879 l2cap_send_sframe(chan, control);
3881 list_add_tail(&l->list, &chan->srej_l);
3885 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3887 struct srej_list *new;
3890 while (tx_seq != chan->expected_tx_seq) {
3891 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3892 control |= __set_reqseq(chan, chan->expected_tx_seq);
3893 l2cap_send_sframe(chan, control);
3895 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3899 new->tx_seq = chan->expected_tx_seq;
3901 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3903 list_add_tail(&new->list, &chan->srej_l);
3906 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3911 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3913 u16 tx_seq = __get_txseq(chan, rx_control);
3914 u16 req_seq = __get_reqseq(chan, rx_control);
3915 u8 sar = __get_ctrl_sar(chan, rx_control);
3916 int tx_seq_offset, expected_tx_seq_offset;
3917 int num_to_ack = (chan->tx_win/6) + 1;
3920 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3921 tx_seq, rx_control);
3923 if (__is_ctrl_final(chan, rx_control) &&
3924 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3925 __clear_monitor_timer(chan);
3926 if (chan->unacked_frames > 0)
3927 __set_retrans_timer(chan);
3928 clear_bit(CONN_WAIT_F, &chan->conn_state);
3931 chan->expected_ack_seq = req_seq;
3932 l2cap_drop_acked_frames(chan);
3934 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3936 /* invalid tx_seq */
3937 if (tx_seq_offset >= chan->tx_win) {
3938 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3942 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3943 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3944 l2cap_send_ack(chan);
3948 if (tx_seq == chan->expected_tx_seq)
3951 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3952 struct srej_list *first;
3954 first = list_first_entry(&chan->srej_l,
3955 struct srej_list, list);
3956 if (tx_seq == first->tx_seq) {
3957 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3958 l2cap_check_srej_gap(chan, tx_seq);
3960 list_del(&first->list);
3963 if (list_empty(&chan->srej_l)) {
3964 chan->buffer_seq = chan->buffer_seq_srej;
3965 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3966 l2cap_send_ack(chan);
3967 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3970 struct srej_list *l;
3972 /* duplicated tx_seq */
3973 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3976 list_for_each_entry(l, &chan->srej_l, list) {
3977 if (l->tx_seq == tx_seq) {
3978 l2cap_resend_srejframe(chan, tx_seq);
3983 err = l2cap_send_srejframe(chan, tx_seq);
3985 l2cap_send_disconn_req(chan->conn, chan, -err);
3990 expected_tx_seq_offset = __seq_offset(chan,
3991 chan->expected_tx_seq, chan->buffer_seq);
3993 /* duplicated tx_seq */
3994 if (tx_seq_offset < expected_tx_seq_offset)
3997 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3999 BT_DBG("chan %p, Enter SREJ", chan);
4001 INIT_LIST_HEAD(&chan->srej_l);
4002 chan->buffer_seq_srej = chan->buffer_seq;
4004 __skb_queue_head_init(&chan->srej_q);
4005 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4007 /* Set P-bit only if there are some I-frames to ack. */
4008 if (__clear_ack_timer(chan))
4009 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4011 err = l2cap_send_srejframe(chan, tx_seq);
4013 l2cap_send_disconn_req(chan->conn, chan, -err);
4020 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4022 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4023 bt_cb(skb)->tx_seq = tx_seq;
4024 bt_cb(skb)->sar = sar;
4025 __skb_queue_tail(&chan->srej_q, skb);
4029 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4030 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4033 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4037 if (__is_ctrl_final(chan, rx_control)) {
4038 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4039 l2cap_retransmit_frames(chan);
4043 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4044 if (chan->num_acked == num_to_ack - 1)
4045 l2cap_send_ack(chan);
4047 __set_ack_timer(chan);
4056 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4058 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4059 __get_reqseq(chan, rx_control), rx_control);
4061 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4062 l2cap_drop_acked_frames(chan);
4064 if (__is_ctrl_poll(chan, rx_control)) {
4065 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4066 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4067 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4068 (chan->unacked_frames > 0))
4069 __set_retrans_timer(chan);
4071 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4072 l2cap_send_srejtail(chan);
4074 l2cap_send_i_or_rr_or_rnr(chan);
4077 } else if (__is_ctrl_final(chan, rx_control)) {
4078 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4080 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4081 l2cap_retransmit_frames(chan);
4084 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4085 (chan->unacked_frames > 0))
4086 __set_retrans_timer(chan);
4088 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4089 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4090 l2cap_send_ack(chan);
4092 l2cap_ertm_send(chan);
4096 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4098 u16 tx_seq = __get_reqseq(chan, rx_control);
4100 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4102 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4104 chan->expected_ack_seq = tx_seq;
4105 l2cap_drop_acked_frames(chan);
4107 if (__is_ctrl_final(chan, rx_control)) {
4108 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4109 l2cap_retransmit_frames(chan);
4111 l2cap_retransmit_frames(chan);
4113 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4114 set_bit(CONN_REJ_ACT, &chan->conn_state);
4117 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4119 u16 tx_seq = __get_reqseq(chan, rx_control);
4121 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4123 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4125 if (__is_ctrl_poll(chan, rx_control)) {
4126 chan->expected_ack_seq = tx_seq;
4127 l2cap_drop_acked_frames(chan);
4129 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4130 l2cap_retransmit_one_frame(chan, tx_seq);
4132 l2cap_ertm_send(chan);
4134 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4135 chan->srej_save_reqseq = tx_seq;
4136 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4138 } else if (__is_ctrl_final(chan, rx_control)) {
4139 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4140 chan->srej_save_reqseq == tx_seq)
4141 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4143 l2cap_retransmit_one_frame(chan, tx_seq);
4145 l2cap_retransmit_one_frame(chan, tx_seq);
4146 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4147 chan->srej_save_reqseq = tx_seq;
4148 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4153 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4155 u16 tx_seq = __get_reqseq(chan, rx_control);
4157 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4159 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4160 chan->expected_ack_seq = tx_seq;
4161 l2cap_drop_acked_frames(chan);
4163 if (__is_ctrl_poll(chan, rx_control))
4164 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4166 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4167 __clear_retrans_timer(chan);
4168 if (__is_ctrl_poll(chan, rx_control))
4169 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4173 if (__is_ctrl_poll(chan, rx_control)) {
4174 l2cap_send_srejtail(chan);
4176 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4177 l2cap_send_sframe(chan, rx_control);
4181 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4183 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4185 if (__is_ctrl_final(chan, rx_control) &&
4186 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4187 __clear_monitor_timer(chan);
4188 if (chan->unacked_frames > 0)
4189 __set_retrans_timer(chan);
4190 clear_bit(CONN_WAIT_F, &chan->conn_state);
4193 switch (__get_ctrl_super(chan, rx_control)) {
4194 case L2CAP_SUPER_RR:
4195 l2cap_data_channel_rrframe(chan, rx_control);
4198 case L2CAP_SUPER_REJ:
4199 l2cap_data_channel_rejframe(chan, rx_control);
4202 case L2CAP_SUPER_SREJ:
4203 l2cap_data_channel_srejframe(chan, rx_control);
4206 case L2CAP_SUPER_RNR:
4207 l2cap_data_channel_rnrframe(chan, rx_control);
4215 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4219 int len, next_tx_seq_offset, req_seq_offset;
4221 control = __get_control(chan, skb->data);
4222 skb_pull(skb, __ctrl_size(chan));
4226 * We can just drop the corrupted I-frame here.
4227 * Receiver will miss it and start proper recovery
4228 * procedures and ask retransmission.
4230 if (l2cap_check_fcs(chan, skb))
4233 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4234 len -= L2CAP_SDULEN_SIZE;
4236 if (chan->fcs == L2CAP_FCS_CRC16)
4237 len -= L2CAP_FCS_SIZE;
4239 if (len > chan->mps) {
4240 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4244 req_seq = __get_reqseq(chan, control);
4246 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4248 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4249 chan->expected_ack_seq);
4251 /* check for invalid req-seq */
4252 if (req_seq_offset > next_tx_seq_offset) {
4253 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4257 if (!__is_sframe(chan, control)) {
4259 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4263 l2cap_data_channel_iframe(chan, control, skb);
4267 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4271 l2cap_data_channel_sframe(chan, control, skb);
4281 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4283 struct l2cap_chan *chan;
4288 chan = l2cap_get_chan_by_scid(conn, cid);
4290 BT_DBG("unknown cid 0x%4.4x", cid);
4291 /* Drop packet and return */
4296 l2cap_chan_lock(chan);
4298 BT_DBG("chan %p, len %d", chan, skb->len);
4300 if (chan->state != BT_CONNECTED)
4303 switch (chan->mode) {
4304 case L2CAP_MODE_BASIC:
4305 /* If socket recv buffers overflows we drop data here
4306 * which is *bad* because L2CAP has to be reliable.
4307 * But we don't have any other choice. L2CAP doesn't
4308 * provide flow control mechanism. */
4310 if (chan->imtu < skb->len)
4313 if (!chan->ops->recv(chan->data, skb))
4317 case L2CAP_MODE_ERTM:
4318 l2cap_ertm_data_rcv(chan, skb);
4322 case L2CAP_MODE_STREAMING:
4323 control = __get_control(chan, skb->data);
4324 skb_pull(skb, __ctrl_size(chan));
4327 if (l2cap_check_fcs(chan, skb))
4330 if (__is_sar_start(chan, control))
4331 len -= L2CAP_SDULEN_SIZE;
4333 if (chan->fcs == L2CAP_FCS_CRC16)
4334 len -= L2CAP_FCS_SIZE;
4336 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4339 tx_seq = __get_txseq(chan, control);
4341 if (chan->expected_tx_seq != tx_seq) {
4342 /* Frame(s) missing - must discard partial SDU */
4343 kfree_skb(chan->sdu);
4345 chan->sdu_last_frag = NULL;
4348 /* TODO: Notify userland of missing data */
4351 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4353 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4354 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4359 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4367 l2cap_chan_unlock(chan);
4372 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4374 struct l2cap_chan *chan;
4376 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4380 BT_DBG("chan %p, len %d", chan, skb->len);
4382 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4385 if (chan->imtu < skb->len)
4388 if (!chan->ops->recv(chan->data, skb))
4397 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4399 struct l2cap_chan *chan;
4401 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4405 BT_DBG("chan %p, len %d", chan, skb->len);
4407 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4410 if (chan->imtu < skb->len)
4413 if (!chan->ops->recv(chan->data, skb))
4422 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4424 struct l2cap_hdr *lh = (void *) skb->data;
4428 skb_pull(skb, L2CAP_HDR_SIZE);
4429 cid = __le16_to_cpu(lh->cid);
4430 len = __le16_to_cpu(lh->len);
4432 if (len != skb->len) {
4437 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4440 case L2CAP_CID_LE_SIGNALING:
4441 case L2CAP_CID_SIGNALING:
4442 l2cap_sig_channel(conn, skb);
4445 case L2CAP_CID_CONN_LESS:
4446 psm = get_unaligned_le16(skb->data);
4448 l2cap_conless_channel(conn, psm, skb);
4451 case L2CAP_CID_LE_DATA:
4452 l2cap_att_channel(conn, cid, skb);
4456 if (smp_sig_channel(conn, skb))
4457 l2cap_conn_del(conn->hcon, EACCES);
4461 l2cap_data_channel(conn, cid, skb);
4466 /* ---- L2CAP interface with lower layer (HCI) ---- */
4468 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4470 int exact = 0, lm1 = 0, lm2 = 0;
4471 struct l2cap_chan *c;
4473 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4475 /* Find listening sockets and check their link_mode */
4476 read_lock(&chan_list_lock);
4477 list_for_each_entry(c, &chan_list, global_l) {
4478 struct sock *sk = c->sk;
4480 if (c->state != BT_LISTEN)
4483 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4484 lm1 |= HCI_LM_ACCEPT;
4485 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4486 lm1 |= HCI_LM_MASTER;
4488 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4489 lm2 |= HCI_LM_ACCEPT;
4490 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4491 lm2 |= HCI_LM_MASTER;
4494 read_unlock(&chan_list_lock);
4496 return exact ? lm1 : lm2;
4499 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4501 struct l2cap_conn *conn;
4503 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4506 conn = l2cap_conn_add(hcon, status);
4508 l2cap_conn_ready(conn);
4510 l2cap_conn_del(hcon, bt_to_errno(status));
4515 int l2cap_disconn_ind(struct hci_conn *hcon)
4517 struct l2cap_conn *conn = hcon->l2cap_data;
4519 BT_DBG("hcon %p", hcon);
4522 return HCI_ERROR_REMOTE_USER_TERM;
4523 return conn->disc_reason;
4526 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4528 BT_DBG("hcon %p reason %d", hcon, reason);
4530 l2cap_conn_del(hcon, bt_to_errno(reason));
4534 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4536 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4539 if (encrypt == 0x00) {
4540 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4541 __clear_chan_timer(chan);
4542 __set_chan_timer(chan,
4543 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4544 } else if (chan->sec_level == BT_SECURITY_HIGH)
4545 l2cap_chan_close(chan, ECONNREFUSED);
4547 if (chan->sec_level == BT_SECURITY_MEDIUM)
4548 __clear_chan_timer(chan);
4552 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4554 struct l2cap_conn *conn = hcon->l2cap_data;
4555 struct l2cap_chan *chan;
4560 BT_DBG("conn %p", conn);
4562 if (hcon->type == LE_LINK) {
4563 smp_distribute_keys(conn, 0);
4564 cancel_delayed_work(&conn->security_timer);
4567 mutex_lock(&conn->chan_lock);
4569 list_for_each_entry(chan, &conn->chan_l, list) {
4570 l2cap_chan_lock(chan);
4572 BT_DBG("chan->scid %d", chan->scid);
4574 if (chan->scid == L2CAP_CID_LE_DATA) {
4575 if (!status && encrypt) {
4576 chan->sec_level = hcon->sec_level;
4577 l2cap_chan_ready(chan);
4580 l2cap_chan_unlock(chan);
4584 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4585 l2cap_chan_unlock(chan);
4589 if (!status && (chan->state == BT_CONNECTED ||
4590 chan->state == BT_CONFIG)) {
4591 l2cap_check_encryption(chan, encrypt);
4592 l2cap_chan_unlock(chan);
4596 if (chan->state == BT_CONNECT) {
4598 l2cap_send_conn_req(chan);
4600 __clear_chan_timer(chan);
4601 __set_chan_timer(chan,
4602 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4604 } else if (chan->state == BT_CONNECT2) {
4605 struct sock *sk = chan->sk;
4606 struct l2cap_conn_rsp rsp;
4612 if (bt_sk(sk)->defer_setup) {
4613 struct sock *parent = bt_sk(sk)->parent;
4614 res = L2CAP_CR_PEND;
4615 stat = L2CAP_CS_AUTHOR_PEND;
4617 parent->sk_data_ready(parent, 0);
4619 __l2cap_state_change(chan, BT_CONFIG);
4620 res = L2CAP_CR_SUCCESS;
4621 stat = L2CAP_CS_NO_INFO;
4624 __l2cap_state_change(chan, BT_DISCONN);
4625 __set_chan_timer(chan,
4626 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4627 res = L2CAP_CR_SEC_BLOCK;
4628 stat = L2CAP_CS_NO_INFO;
4633 rsp.scid = cpu_to_le16(chan->dcid);
4634 rsp.dcid = cpu_to_le16(chan->scid);
4635 rsp.result = cpu_to_le16(res);
4636 rsp.status = cpu_to_le16(stat);
4637 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4641 l2cap_chan_unlock(chan);
4644 mutex_unlock(&conn->chan_lock);
4649 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4651 struct l2cap_conn *conn = hcon->l2cap_data;
4654 conn = l2cap_conn_add(hcon, 0);
4659 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4661 if (!(flags & ACL_CONT)) {
4662 struct l2cap_hdr *hdr;
4663 struct l2cap_chan *chan;
4668 BT_ERR("Unexpected start frame (len %d)", skb->len);
4669 kfree_skb(conn->rx_skb);
4670 conn->rx_skb = NULL;
4672 l2cap_conn_unreliable(conn, ECOMM);
4675 /* Start fragment always begin with Basic L2CAP header */
4676 if (skb->len < L2CAP_HDR_SIZE) {
4677 BT_ERR("Frame is too short (len %d)", skb->len);
4678 l2cap_conn_unreliable(conn, ECOMM);
4682 hdr = (struct l2cap_hdr *) skb->data;
4683 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4684 cid = __le16_to_cpu(hdr->cid);
4686 if (len == skb->len) {
4687 /* Complete frame received */
4688 l2cap_recv_frame(conn, skb);
4692 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4694 if (skb->len > len) {
4695 BT_ERR("Frame is too long (len %d, expected len %d)",
4697 l2cap_conn_unreliable(conn, ECOMM);
4701 chan = l2cap_get_chan_by_scid(conn, cid);
4703 if (chan && chan->sk) {
4704 struct sock *sk = chan->sk;
4707 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4708 BT_ERR("Frame exceeding recv MTU (len %d, "
4712 l2cap_conn_unreliable(conn, ECOMM);
4718 /* Allocate skb for the complete frame (with header) */
4719 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4723 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4725 conn->rx_len = len - skb->len;
4727 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4729 if (!conn->rx_len) {
4730 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4731 l2cap_conn_unreliable(conn, ECOMM);
4735 if (skb->len > conn->rx_len) {
4736 BT_ERR("Fragment is too long (len %d, expected %d)",
4737 skb->len, conn->rx_len);
4738 kfree_skb(conn->rx_skb);
4739 conn->rx_skb = NULL;
4741 l2cap_conn_unreliable(conn, ECOMM);
4745 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4747 conn->rx_len -= skb->len;
4749 if (!conn->rx_len) {
4750 /* Complete frame received */
4751 l2cap_recv_frame(conn, conn->rx_skb);
4752 conn->rx_skb = NULL;
4761 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4763 struct l2cap_chan *c;
4765 read_lock(&chan_list_lock);
4767 list_for_each_entry(c, &chan_list, global_l) {
4768 struct sock *sk = c->sk;
4770 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4771 batostr(&bt_sk(sk)->src),
4772 batostr(&bt_sk(sk)->dst),
4773 c->state, __le16_to_cpu(c->psm),
4774 c->scid, c->dcid, c->imtu, c->omtu,
4775 c->sec_level, c->mode);
4778 read_unlock(&chan_list_lock);
4783 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4785 return single_open(file, l2cap_debugfs_show, inode->i_private);
4788 static const struct file_operations l2cap_debugfs_fops = {
4789 .open = l2cap_debugfs_open,
4791 .llseek = seq_lseek,
4792 .release = single_release,
4795 static struct dentry *l2cap_debugfs;
4797 int __init l2cap_init(void)
4801 err = l2cap_init_sockets();
4806 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4807 bt_debugfs, NULL, &l2cap_debugfs_fops);
4809 BT_ERR("Failed to create L2CAP debug file");
4815 void l2cap_exit(void)
4817 debugfs_remove(l2cap_debugfs);
4818 l2cap_cleanup_sockets();
4821 module_param(disable_ertm, bool, 0644);
4822 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");