2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 list_for_each_entry(c, &conn->chan_l, list) {
89 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked socket */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 struct l2cap_chan *c;
106 mutex_lock(&conn->chan_lock);
107 c = __l2cap_get_chan_by_scid(conn, cid);
108 mutex_unlock(&conn->chan_lock);
113 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
115 struct l2cap_chan *c;
117 list_for_each_entry(c, &conn->chan_l, list) {
118 if (c->ident == ident)
124 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
126 struct l2cap_chan *c;
128 mutex_lock(&conn->chan_lock);
129 c = __l2cap_get_chan_by_ident(conn, ident);
130 mutex_unlock(&conn->chan_lock);
135 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
137 struct l2cap_chan *c;
139 list_for_each_entry(c, &chan_list, global_l) {
140 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
146 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
150 write_lock(&chan_list_lock);
152 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
165 for (p = 0x1001; p < 0x1100; p += 2)
166 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
167 chan->psm = cpu_to_le16(p);
168 chan->sport = cpu_to_le16(p);
175 write_unlock(&chan_list_lock);
179 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
181 write_lock(&chan_list_lock);
185 write_unlock(&chan_list_lock);
190 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
192 u16 cid = L2CAP_CID_DYN_START;
194 for (; cid < L2CAP_CID_DYN_END; cid++) {
195 if (!__l2cap_get_chan_by_scid(conn, cid))
202 static void __l2cap_state_change(struct l2cap_chan *chan, int state)
204 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
205 state_to_string(state));
208 chan->ops->state_change(chan->data, state);
211 static void l2cap_state_change(struct l2cap_chan *chan, int state)
213 struct sock *sk = chan->sk;
216 __l2cap_state_change(chan, state);
220 static inline void __l2cap_chan_set_err(struct l2cap_chan *chan, int err)
222 struct sock *sk = chan->sk;
227 static inline void l2cap_chan_set_err(struct l2cap_chan *chan, int err)
229 struct sock *sk = chan->sk;
232 __l2cap_chan_set_err(chan, err);
236 static void l2cap_chan_timeout(struct work_struct *work)
238 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
240 struct l2cap_conn *conn = chan->conn;
243 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
245 mutex_lock(&conn->chan_lock);
246 l2cap_chan_lock(chan);
248 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
249 reason = ECONNREFUSED;
250 else if (chan->state == BT_CONNECT &&
251 chan->sec_level != BT_SECURITY_SDP)
252 reason = ECONNREFUSED;
256 l2cap_chan_close(chan, reason);
258 l2cap_chan_unlock(chan);
260 chan->ops->close(chan->data);
261 mutex_unlock(&conn->chan_lock);
263 l2cap_chan_put(chan);
266 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
268 struct l2cap_chan *chan;
270 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
274 mutex_init(&chan->lock);
278 write_lock(&chan_list_lock);
279 list_add(&chan->global_l, &chan_list);
280 write_unlock(&chan_list_lock);
282 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
284 chan->state = BT_OPEN;
286 atomic_set(&chan->refcnt, 1);
288 BT_DBG("sk %p chan %p", sk, chan);
293 void l2cap_chan_destroy(struct l2cap_chan *chan)
295 write_lock(&chan_list_lock);
296 list_del(&chan->global_l);
297 write_unlock(&chan_list_lock);
299 l2cap_chan_put(chan);
302 void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
304 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
305 chan->psm, chan->dcid);
307 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
311 switch (chan->chan_type) {
312 case L2CAP_CHAN_CONN_ORIENTED:
313 if (conn->hcon->type == LE_LINK) {
315 chan->omtu = L2CAP_LE_DEFAULT_MTU;
316 chan->scid = L2CAP_CID_LE_DATA;
317 chan->dcid = L2CAP_CID_LE_DATA;
319 /* Alloc CID for connection-oriented socket */
320 chan->scid = l2cap_alloc_cid(conn);
321 chan->omtu = L2CAP_DEFAULT_MTU;
325 case L2CAP_CHAN_CONN_LESS:
326 /* Connectionless socket */
327 chan->scid = L2CAP_CID_CONN_LESS;
328 chan->dcid = L2CAP_CID_CONN_LESS;
329 chan->omtu = L2CAP_DEFAULT_MTU;
333 /* Raw socket can send/recv signalling messages only */
334 chan->scid = L2CAP_CID_SIGNALING;
335 chan->dcid = L2CAP_CID_SIGNALING;
336 chan->omtu = L2CAP_DEFAULT_MTU;
339 chan->local_id = L2CAP_BESTEFFORT_ID;
340 chan->local_stype = L2CAP_SERV_BESTEFFORT;
341 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
342 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
343 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
344 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
346 l2cap_chan_hold(chan);
348 list_add(&chan->list, &conn->chan_l);
351 void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
353 mutex_lock(&conn->chan_lock);
354 __l2cap_chan_add(conn, chan);
355 mutex_unlock(&conn->chan_lock);
358 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
360 struct sock *sk = chan->sk;
361 struct l2cap_conn *conn = chan->conn;
362 struct sock *parent = bt_sk(sk)->parent;
364 __clear_chan_timer(chan);
366 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
369 /* Delete from channel list */
370 list_del(&chan->list);
372 l2cap_chan_put(chan);
375 hci_conn_put(conn->hcon);
380 __l2cap_state_change(chan, BT_CLOSED);
381 sock_set_flag(sk, SOCK_ZAPPED);
384 __l2cap_chan_set_err(chan, err);
387 bt_accept_unlink(sk);
388 parent->sk_data_ready(parent, 0);
390 sk->sk_state_change(sk);
394 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
395 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
398 skb_queue_purge(&chan->tx_q);
400 if (chan->mode == L2CAP_MODE_ERTM) {
401 struct srej_list *l, *tmp;
403 __clear_retrans_timer(chan);
404 __clear_monitor_timer(chan);
405 __clear_ack_timer(chan);
407 skb_queue_purge(&chan->srej_q);
409 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
416 static void l2cap_chan_cleanup_listen(struct sock *parent)
420 BT_DBG("parent %p", parent);
422 /* Close not yet accepted channels */
423 while ((sk = bt_accept_dequeue(parent, NULL))) {
424 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
426 l2cap_chan_lock(chan);
427 __clear_chan_timer(chan);
428 l2cap_chan_close(chan, ECONNRESET);
429 l2cap_chan_unlock(chan);
431 chan->ops->close(chan->data);
435 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
437 struct l2cap_conn *conn = chan->conn;
438 struct sock *sk = chan->sk;
440 BT_DBG("chan %p state %s sk %p", chan,
441 state_to_string(chan->state), sk);
443 switch (chan->state) {
446 l2cap_chan_cleanup_listen(sk);
448 __l2cap_state_change(chan, BT_CLOSED);
449 sock_set_flag(sk, SOCK_ZAPPED);
455 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
456 conn->hcon->type == ACL_LINK) {
457 __clear_chan_timer(chan);
458 __set_chan_timer(chan, sk->sk_sndtimeo);
459 l2cap_send_disconn_req(conn, chan, reason);
461 l2cap_chan_del(chan, reason);
465 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
466 conn->hcon->type == ACL_LINK) {
467 struct l2cap_conn_rsp rsp;
470 if (bt_sk(sk)->defer_setup)
471 result = L2CAP_CR_SEC_BLOCK;
473 result = L2CAP_CR_BAD_PSM;
474 l2cap_state_change(chan, BT_DISCONN);
476 rsp.scid = cpu_to_le16(chan->dcid);
477 rsp.dcid = cpu_to_le16(chan->scid);
478 rsp.result = cpu_to_le16(result);
479 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
480 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
484 l2cap_chan_del(chan, reason);
489 l2cap_chan_del(chan, reason);
494 sock_set_flag(sk, SOCK_ZAPPED);
500 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
502 if (chan->chan_type == L2CAP_CHAN_RAW) {
503 switch (chan->sec_level) {
504 case BT_SECURITY_HIGH:
505 return HCI_AT_DEDICATED_BONDING_MITM;
506 case BT_SECURITY_MEDIUM:
507 return HCI_AT_DEDICATED_BONDING;
509 return HCI_AT_NO_BONDING;
511 } else if (chan->psm == cpu_to_le16(0x0001)) {
512 if (chan->sec_level == BT_SECURITY_LOW)
513 chan->sec_level = BT_SECURITY_SDP;
515 if (chan->sec_level == BT_SECURITY_HIGH)
516 return HCI_AT_NO_BONDING_MITM;
518 return HCI_AT_NO_BONDING;
520 switch (chan->sec_level) {
521 case BT_SECURITY_HIGH:
522 return HCI_AT_GENERAL_BONDING_MITM;
523 case BT_SECURITY_MEDIUM:
524 return HCI_AT_GENERAL_BONDING;
526 return HCI_AT_NO_BONDING;
531 /* Service level security */
532 int l2cap_chan_check_security(struct l2cap_chan *chan)
534 struct l2cap_conn *conn = chan->conn;
537 auth_type = l2cap_get_auth_type(chan);
539 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
542 static u8 l2cap_get_ident(struct l2cap_conn *conn)
546 /* Get next available identificator.
547 * 1 - 128 are used by kernel.
548 * 129 - 199 are reserved.
549 * 200 - 254 are used by utilities like l2ping, etc.
552 spin_lock(&conn->lock);
554 if (++conn->tx_ident > 128)
559 spin_unlock(&conn->lock);
564 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
566 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
569 BT_DBG("code 0x%2.2x", code);
574 if (lmp_no_flush_capable(conn->hcon->hdev))
575 flags = ACL_START_NO_FLUSH;
579 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
580 skb->priority = HCI_PRIO_MAX;
582 hci_send_acl(conn->hchan, skb, flags);
585 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
587 struct hci_conn *hcon = chan->conn->hcon;
590 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
593 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
594 lmp_no_flush_capable(hcon->hdev))
595 flags = ACL_START_NO_FLUSH;
599 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
600 hci_send_acl(chan->conn->hchan, skb, flags);
603 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
606 struct l2cap_hdr *lh;
607 struct l2cap_conn *conn = chan->conn;
610 if (chan->state != BT_CONNECTED)
613 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
614 hlen = L2CAP_EXT_HDR_SIZE;
616 hlen = L2CAP_ENH_HDR_SIZE;
618 if (chan->fcs == L2CAP_FCS_CRC16)
619 hlen += L2CAP_FCS_SIZE;
621 BT_DBG("chan %p, control 0x%8.8x", chan, control);
623 count = min_t(unsigned int, conn->mtu, hlen);
625 control |= __set_sframe(chan);
627 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
628 control |= __set_ctrl_final(chan);
630 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
631 control |= __set_ctrl_poll(chan);
633 skb = bt_skb_alloc(count, GFP_ATOMIC);
637 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
638 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
639 lh->cid = cpu_to_le16(chan->dcid);
641 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
643 if (chan->fcs == L2CAP_FCS_CRC16) {
644 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
645 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
648 skb->priority = HCI_PRIO_MAX;
649 l2cap_do_send(chan, skb);
652 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
654 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
655 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
656 set_bit(CONN_RNR_SENT, &chan->conn_state);
658 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
660 control |= __set_reqseq(chan, chan->buffer_seq);
662 l2cap_send_sframe(chan, control);
665 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
667 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
670 static void l2cap_send_conn_req(struct l2cap_chan *chan)
672 struct l2cap_conn *conn = chan->conn;
673 struct l2cap_conn_req req;
675 req.scid = cpu_to_le16(chan->scid);
678 chan->ident = l2cap_get_ident(conn);
680 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
682 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ, sizeof(req), &req);
685 static void l2cap_do_start(struct l2cap_chan *chan)
687 struct l2cap_conn *conn = chan->conn;
689 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
690 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
693 if (l2cap_chan_check_security(chan) &&
694 __l2cap_no_conn_pending(chan))
695 l2cap_send_conn_req(chan);
697 struct l2cap_info_req req;
698 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
700 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
701 conn->info_ident = l2cap_get_ident(conn);
703 schedule_delayed_work(&conn->info_timer,
704 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
706 l2cap_send_cmd(conn, conn->info_ident,
707 L2CAP_INFO_REQ, sizeof(req), &req);
711 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
713 u32 local_feat_mask = l2cap_feat_mask;
715 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
718 case L2CAP_MODE_ERTM:
719 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
720 case L2CAP_MODE_STREAMING:
721 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
727 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
729 struct sock *sk = chan->sk;
730 struct l2cap_disconn_req req;
735 if (chan->mode == L2CAP_MODE_ERTM) {
736 __clear_retrans_timer(chan);
737 __clear_monitor_timer(chan);
738 __clear_ack_timer(chan);
741 req.dcid = cpu_to_le16(chan->dcid);
742 req.scid = cpu_to_le16(chan->scid);
743 l2cap_send_cmd(conn, l2cap_get_ident(conn),
744 L2CAP_DISCONN_REQ, sizeof(req), &req);
747 __l2cap_state_change(chan, BT_DISCONN);
748 __l2cap_chan_set_err(chan, err);
752 /* ---- L2CAP connections ---- */
753 static void l2cap_conn_start(struct l2cap_conn *conn)
755 struct l2cap_chan *chan, *tmp;
757 BT_DBG("conn %p", conn);
759 mutex_lock(&conn->chan_lock);
761 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
762 struct sock *sk = chan->sk;
764 l2cap_chan_lock(chan);
766 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
767 l2cap_chan_unlock(chan);
771 if (chan->state == BT_CONNECT) {
772 if (!l2cap_chan_check_security(chan) ||
773 !__l2cap_no_conn_pending(chan)) {
774 l2cap_chan_unlock(chan);
778 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
779 && test_bit(CONF_STATE2_DEVICE,
780 &chan->conf_state)) {
781 l2cap_chan_close(chan, ECONNRESET);
782 l2cap_chan_unlock(chan);
786 l2cap_send_conn_req(chan);
788 } else if (chan->state == BT_CONNECT2) {
789 struct l2cap_conn_rsp rsp;
791 rsp.scid = cpu_to_le16(chan->dcid);
792 rsp.dcid = cpu_to_le16(chan->scid);
794 if (l2cap_chan_check_security(chan)) {
796 if (bt_sk(sk)->defer_setup) {
797 struct sock *parent = bt_sk(sk)->parent;
798 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
799 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
801 parent->sk_data_ready(parent, 0);
804 __l2cap_state_change(chan, BT_CONFIG);
805 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
806 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
810 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
811 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
814 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
817 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
818 rsp.result != L2CAP_CR_SUCCESS) {
819 l2cap_chan_unlock(chan);
823 set_bit(CONF_REQ_SENT, &chan->conf_state);
824 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
825 l2cap_build_conf_req(chan, buf), buf);
826 chan->num_conf_req++;
829 l2cap_chan_unlock(chan);
832 mutex_unlock(&conn->chan_lock);
835 /* Find socket with cid and source bdaddr.
836 * Returns closest match, locked.
838 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
840 struct l2cap_chan *c, *c1 = NULL;
842 read_lock(&chan_list_lock);
844 list_for_each_entry(c, &chan_list, global_l) {
845 struct sock *sk = c->sk;
847 if (state && c->state != state)
850 if (c->scid == cid) {
852 if (!bacmp(&bt_sk(sk)->src, src)) {
853 read_unlock(&chan_list_lock);
858 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
863 read_unlock(&chan_list_lock);
868 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
870 struct sock *parent, *sk;
871 struct l2cap_chan *chan, *pchan;
875 /* Check if we have socket listening on cid */
876 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
885 /* Check for backlog size */
886 if (sk_acceptq_is_full(parent)) {
887 BT_DBG("backlog full %d", parent->sk_ack_backlog);
891 chan = pchan->ops->new_connection(pchan->data);
897 hci_conn_hold(conn->hcon);
899 bacpy(&bt_sk(sk)->src, conn->src);
900 bacpy(&bt_sk(sk)->dst, conn->dst);
902 bt_accept_enqueue(parent, sk);
904 l2cap_chan_add(conn, chan);
906 __set_chan_timer(chan, sk->sk_sndtimeo);
908 __l2cap_state_change(chan, BT_CONNECTED);
909 parent->sk_data_ready(parent, 0);
912 release_sock(parent);
915 static void l2cap_chan_ready(struct l2cap_chan *chan)
917 struct sock *sk = chan->sk;
922 parent = bt_sk(sk)->parent;
924 BT_DBG("sk %p, parent %p", sk, parent);
926 chan->conf_state = 0;
927 __clear_chan_timer(chan);
929 __l2cap_state_change(chan, BT_CONNECTED);
930 sk->sk_state_change(sk);
933 parent->sk_data_ready(parent, 0);
938 static void l2cap_conn_ready(struct l2cap_conn *conn)
940 struct l2cap_chan *chan;
942 BT_DBG("conn %p", conn);
944 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
945 l2cap_le_conn_ready(conn);
947 if (conn->hcon->out && conn->hcon->type == LE_LINK)
948 smp_conn_security(conn, conn->hcon->pending_sec_level);
950 mutex_lock(&conn->chan_lock);
952 list_for_each_entry(chan, &conn->chan_l, list) {
954 l2cap_chan_lock(chan);
956 if (conn->hcon->type == LE_LINK) {
957 if (smp_conn_security(conn, chan->sec_level))
958 l2cap_chan_ready(chan);
960 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
961 struct sock *sk = chan->sk;
962 __clear_chan_timer(chan);
964 __l2cap_state_change(chan, BT_CONNECTED);
965 sk->sk_state_change(sk);
968 } else if (chan->state == BT_CONNECT)
969 l2cap_do_start(chan);
971 l2cap_chan_unlock(chan);
974 mutex_unlock(&conn->chan_lock);
977 /* Notify sockets that we cannot guaranty reliability anymore */
978 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
980 struct l2cap_chan *chan;
982 BT_DBG("conn %p", conn);
984 mutex_lock(&conn->chan_lock);
986 list_for_each_entry(chan, &conn->chan_l, list) {
987 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
988 __l2cap_chan_set_err(chan, err);
991 mutex_unlock(&conn->chan_lock);
994 static void l2cap_info_timeout(struct work_struct *work)
996 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
999 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1000 conn->info_ident = 0;
1002 l2cap_conn_start(conn);
1005 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1007 struct l2cap_conn *conn = hcon->l2cap_data;
1008 struct l2cap_chan *chan, *l;
1013 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1015 kfree_skb(conn->rx_skb);
1017 mutex_lock(&conn->chan_lock);
1020 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1021 l2cap_chan_lock(chan);
1023 l2cap_chan_del(chan, err);
1025 l2cap_chan_unlock(chan);
1027 chan->ops->close(chan->data);
1030 mutex_unlock(&conn->chan_lock);
1032 hci_chan_del(conn->hchan);
1034 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1035 cancel_delayed_work_sync(&conn->info_timer);
1037 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
1038 cancel_delayed_work_sync(&conn->security_timer);
1039 smp_chan_destroy(conn);
1042 hcon->l2cap_data = NULL;
1046 static void security_timeout(struct work_struct *work)
1048 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1049 security_timer.work);
1051 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1054 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1056 struct l2cap_conn *conn = hcon->l2cap_data;
1057 struct hci_chan *hchan;
1062 hchan = hci_chan_create(hcon);
1066 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1068 hci_chan_del(hchan);
1072 hcon->l2cap_data = conn;
1074 conn->hchan = hchan;
1076 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1078 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1079 conn->mtu = hcon->hdev->le_mtu;
1081 conn->mtu = hcon->hdev->acl_mtu;
1083 conn->src = &hcon->hdev->bdaddr;
1084 conn->dst = &hcon->dst;
1086 conn->feat_mask = 0;
1088 spin_lock_init(&conn->lock);
1089 mutex_init(&conn->chan_lock);
1091 INIT_LIST_HEAD(&conn->chan_l);
1093 if (hcon->type == LE_LINK)
1094 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1096 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1098 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1103 /* ---- Socket interface ---- */
1105 /* Find socket with psm and source bdaddr.
1106 * Returns closest match.
1108 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1110 struct l2cap_chan *c, *c1 = NULL;
1112 read_lock(&chan_list_lock);
1114 list_for_each_entry(c, &chan_list, global_l) {
1115 struct sock *sk = c->sk;
1117 if (state && c->state != state)
1120 if (c->psm == psm) {
1122 if (!bacmp(&bt_sk(sk)->src, src)) {
1123 read_unlock(&chan_list_lock);
1128 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1133 read_unlock(&chan_list_lock);
1138 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1140 struct sock *sk = chan->sk;
1141 bdaddr_t *src = &bt_sk(sk)->src;
1142 struct l2cap_conn *conn;
1143 struct hci_conn *hcon;
1144 struct hci_dev *hdev;
1148 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1151 hdev = hci_get_route(dst, src);
1153 return -EHOSTUNREACH;
1157 l2cap_chan_lock(chan);
1159 /* PSM must be odd and lsb of upper byte must be 0 */
1160 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1161 chan->chan_type != L2CAP_CHAN_RAW) {
1166 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1171 switch (chan->mode) {
1172 case L2CAP_MODE_BASIC:
1174 case L2CAP_MODE_ERTM:
1175 case L2CAP_MODE_STREAMING:
1186 switch (sk->sk_state) {
1190 /* Already connecting */
1196 /* Already connected */
1212 /* Set destination address and psm */
1213 bacpy(&bt_sk(sk)->dst, dst);
1220 auth_type = l2cap_get_auth_type(chan);
1222 if (chan->dcid == L2CAP_CID_LE_DATA)
1223 hcon = hci_connect(hdev, LE_LINK, dst,
1224 chan->sec_level, auth_type);
1226 hcon = hci_connect(hdev, ACL_LINK, dst,
1227 chan->sec_level, auth_type);
1230 err = PTR_ERR(hcon);
1234 conn = l2cap_conn_add(hcon, 0);
1241 /* Update source addr of the socket */
1242 bacpy(src, conn->src);
1244 l2cap_chan_unlock(chan);
1245 l2cap_chan_add(conn, chan);
1246 l2cap_chan_lock(chan);
1248 l2cap_state_change(chan, BT_CONNECT);
1249 __set_chan_timer(chan, sk->sk_sndtimeo);
1251 if (hcon->state == BT_CONNECTED) {
1252 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1253 __clear_chan_timer(chan);
1254 if (l2cap_chan_check_security(chan))
1255 l2cap_state_change(chan, BT_CONNECTED);
1257 l2cap_do_start(chan);
1263 l2cap_chan_unlock(chan);
1264 hci_dev_unlock(hdev);
1269 int __l2cap_wait_ack(struct sock *sk)
1271 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1272 DECLARE_WAITQUEUE(wait, current);
1276 add_wait_queue(sk_sleep(sk), &wait);
1277 set_current_state(TASK_INTERRUPTIBLE);
1278 while (chan->unacked_frames > 0 && chan->conn) {
1282 if (signal_pending(current)) {
1283 err = sock_intr_errno(timeo);
1288 timeo = schedule_timeout(timeo);
1290 set_current_state(TASK_INTERRUPTIBLE);
1292 err = sock_error(sk);
1296 set_current_state(TASK_RUNNING);
1297 remove_wait_queue(sk_sleep(sk), &wait);
1301 static void l2cap_monitor_timeout(struct work_struct *work)
1303 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1304 monitor_timer.work);
1306 BT_DBG("chan %p", chan);
1308 l2cap_chan_lock(chan);
1310 if (chan->retry_count >= chan->remote_max_tx) {
1311 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1312 l2cap_chan_unlock(chan);
1316 chan->retry_count++;
1317 __set_monitor_timer(chan);
1319 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1320 l2cap_chan_unlock(chan);
1323 static void l2cap_retrans_timeout(struct work_struct *work)
1325 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1326 retrans_timer.work);
1328 BT_DBG("chan %p", chan);
1330 l2cap_chan_lock(chan);
1332 chan->retry_count = 1;
1333 __set_monitor_timer(chan);
1335 set_bit(CONN_WAIT_F, &chan->conn_state);
1337 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1339 l2cap_chan_unlock(chan);
1342 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1344 struct sk_buff *skb;
1346 while ((skb = skb_peek(&chan->tx_q)) &&
1347 chan->unacked_frames) {
1348 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1351 skb = skb_dequeue(&chan->tx_q);
1354 chan->unacked_frames--;
1357 if (!chan->unacked_frames)
1358 __clear_retrans_timer(chan);
1361 static void l2cap_streaming_send(struct l2cap_chan *chan)
1363 struct sk_buff *skb;
1367 while ((skb = skb_dequeue(&chan->tx_q))) {
1368 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1369 control |= __set_txseq(chan, chan->next_tx_seq);
1370 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1372 if (chan->fcs == L2CAP_FCS_CRC16) {
1373 fcs = crc16(0, (u8 *)skb->data,
1374 skb->len - L2CAP_FCS_SIZE);
1375 put_unaligned_le16(fcs,
1376 skb->data + skb->len - L2CAP_FCS_SIZE);
1379 l2cap_do_send(chan, skb);
1381 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1385 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1387 struct sk_buff *skb, *tx_skb;
1391 skb = skb_peek(&chan->tx_q);
1395 while (bt_cb(skb)->tx_seq != tx_seq) {
1396 if (skb_queue_is_last(&chan->tx_q, skb))
1399 skb = skb_queue_next(&chan->tx_q, skb);
1402 if (chan->remote_max_tx &&
1403 bt_cb(skb)->retries == chan->remote_max_tx) {
1404 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1408 tx_skb = skb_clone(skb, GFP_ATOMIC);
1409 bt_cb(skb)->retries++;
1411 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1412 control &= __get_sar_mask(chan);
1414 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1415 control |= __set_ctrl_final(chan);
1417 control |= __set_reqseq(chan, chan->buffer_seq);
1418 control |= __set_txseq(chan, tx_seq);
1420 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1422 if (chan->fcs == L2CAP_FCS_CRC16) {
1423 fcs = crc16(0, (u8 *)tx_skb->data,
1424 tx_skb->len - L2CAP_FCS_SIZE);
1425 put_unaligned_le16(fcs,
1426 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1429 l2cap_do_send(chan, tx_skb);
1432 static int l2cap_ertm_send(struct l2cap_chan *chan)
1434 struct sk_buff *skb, *tx_skb;
1439 if (chan->state != BT_CONNECTED)
1442 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1444 if (chan->remote_max_tx &&
1445 bt_cb(skb)->retries == chan->remote_max_tx) {
1446 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1450 tx_skb = skb_clone(skb, GFP_ATOMIC);
1452 bt_cb(skb)->retries++;
1454 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1455 control &= __get_sar_mask(chan);
1457 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1458 control |= __set_ctrl_final(chan);
1460 control |= __set_reqseq(chan, chan->buffer_seq);
1461 control |= __set_txseq(chan, chan->next_tx_seq);
1463 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1465 if (chan->fcs == L2CAP_FCS_CRC16) {
1466 fcs = crc16(0, (u8 *)skb->data,
1467 tx_skb->len - L2CAP_FCS_SIZE);
1468 put_unaligned_le16(fcs, skb->data +
1469 tx_skb->len - L2CAP_FCS_SIZE);
1472 l2cap_do_send(chan, tx_skb);
1474 __set_retrans_timer(chan);
1476 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1478 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1480 if (bt_cb(skb)->retries == 1) {
1481 chan->unacked_frames++;
1484 __clear_ack_timer(chan);
1487 chan->frames_sent++;
1489 if (skb_queue_is_last(&chan->tx_q, skb))
1490 chan->tx_send_head = NULL;
1492 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1498 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1502 if (!skb_queue_empty(&chan->tx_q))
1503 chan->tx_send_head = chan->tx_q.next;
1505 chan->next_tx_seq = chan->expected_ack_seq;
1506 ret = l2cap_ertm_send(chan);
1510 static void __l2cap_send_ack(struct l2cap_chan *chan)
1514 control |= __set_reqseq(chan, chan->buffer_seq);
1516 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1517 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1518 set_bit(CONN_RNR_SENT, &chan->conn_state);
1519 l2cap_send_sframe(chan, control);
1523 if (l2cap_ertm_send(chan) > 0)
1526 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1527 l2cap_send_sframe(chan, control);
1530 static void l2cap_send_ack(struct l2cap_chan *chan)
1532 __clear_ack_timer(chan);
1533 __l2cap_send_ack(chan);
1536 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1538 struct srej_list *tail;
1541 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1542 control |= __set_ctrl_final(chan);
1544 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1545 control |= __set_reqseq(chan, tail->tx_seq);
1547 l2cap_send_sframe(chan, control);
1550 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1552 struct l2cap_conn *conn = chan->conn;
1553 struct sk_buff **frag;
1556 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1562 /* Continuation fragments (no L2CAP header) */
1563 frag = &skb_shinfo(skb)->frag_list;
1565 count = min_t(unsigned int, conn->mtu, len);
1567 *frag = chan->ops->alloc_skb(chan, count,
1568 msg->msg_flags & MSG_DONTWAIT, &err);
1572 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1575 (*frag)->priority = skb->priority;
1580 frag = &(*frag)->next;
1586 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1587 struct msghdr *msg, size_t len,
1590 struct l2cap_conn *conn = chan->conn;
1591 struct sk_buff *skb;
1592 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1593 struct l2cap_hdr *lh;
1595 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1597 count = min_t(unsigned int, (conn->mtu - hlen), len);
1599 skb = chan->ops->alloc_skb(chan, count + hlen,
1600 msg->msg_flags & MSG_DONTWAIT, &err);
1603 return ERR_PTR(err);
1605 skb->priority = priority;
1607 /* Create L2CAP header */
1608 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1609 lh->cid = cpu_to_le16(chan->dcid);
1610 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1611 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1613 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1614 if (unlikely(err < 0)) {
1616 return ERR_PTR(err);
1621 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1622 struct msghdr *msg, size_t len,
1625 struct l2cap_conn *conn = chan->conn;
1626 struct sk_buff *skb;
1627 int err, count, hlen = L2CAP_HDR_SIZE;
1628 struct l2cap_hdr *lh;
1630 BT_DBG("chan %p len %d", chan, (int)len);
1632 count = min_t(unsigned int, (conn->mtu - hlen), len);
1634 skb = chan->ops->alloc_skb(chan, count + hlen,
1635 msg->msg_flags & MSG_DONTWAIT, &err);
1638 return ERR_PTR(err);
1640 skb->priority = priority;
1642 /* Create L2CAP header */
1643 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1644 lh->cid = cpu_to_le16(chan->dcid);
1645 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1647 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1648 if (unlikely(err < 0)) {
1650 return ERR_PTR(err);
1655 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1656 struct msghdr *msg, size_t len,
1657 u32 control, u16 sdulen)
1659 struct l2cap_conn *conn = chan->conn;
1660 struct sk_buff *skb;
1661 int err, count, hlen;
1662 struct l2cap_hdr *lh;
1664 BT_DBG("chan %p len %d", chan, (int)len);
1667 return ERR_PTR(-ENOTCONN);
1669 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1670 hlen = L2CAP_EXT_HDR_SIZE;
1672 hlen = L2CAP_ENH_HDR_SIZE;
1675 hlen += L2CAP_SDULEN_SIZE;
1677 if (chan->fcs == L2CAP_FCS_CRC16)
1678 hlen += L2CAP_FCS_SIZE;
1680 count = min_t(unsigned int, (conn->mtu - hlen), len);
1682 skb = chan->ops->alloc_skb(chan, count + hlen,
1683 msg->msg_flags & MSG_DONTWAIT, &err);
1686 return ERR_PTR(err);
1688 /* Create L2CAP header */
1689 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1690 lh->cid = cpu_to_le16(chan->dcid);
1691 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1693 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1696 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1698 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1699 if (unlikely(err < 0)) {
1701 return ERR_PTR(err);
1704 if (chan->fcs == L2CAP_FCS_CRC16)
1705 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1707 bt_cb(skb)->retries = 0;
1711 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1713 struct sk_buff *skb;
1714 struct sk_buff_head sar_queue;
1718 skb_queue_head_init(&sar_queue);
1719 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1720 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1722 return PTR_ERR(skb);
1724 __skb_queue_tail(&sar_queue, skb);
1725 len -= chan->remote_mps;
1726 size += chan->remote_mps;
1731 if (len > chan->remote_mps) {
1732 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1733 buflen = chan->remote_mps;
1735 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1739 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1741 skb_queue_purge(&sar_queue);
1742 return PTR_ERR(skb);
1745 __skb_queue_tail(&sar_queue, skb);
1749 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1750 if (chan->tx_send_head == NULL)
1751 chan->tx_send_head = sar_queue.next;
1756 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1759 struct sk_buff *skb;
1763 /* Connectionless channel */
1764 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1765 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1767 return PTR_ERR(skb);
1769 l2cap_do_send(chan, skb);
1773 switch (chan->mode) {
1774 case L2CAP_MODE_BASIC:
1775 /* Check outgoing MTU */
1776 if (len > chan->omtu)
1779 /* Create a basic PDU */
1780 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1782 return PTR_ERR(skb);
1784 l2cap_do_send(chan, skb);
1788 case L2CAP_MODE_ERTM:
1789 case L2CAP_MODE_STREAMING:
1790 /* Entire SDU fits into one PDU */
1791 if (len <= chan->remote_mps) {
1792 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1793 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1796 return PTR_ERR(skb);
1798 __skb_queue_tail(&chan->tx_q, skb);
1800 if (chan->tx_send_head == NULL)
1801 chan->tx_send_head = skb;
1804 /* Segment SDU into multiples PDUs */
1805 err = l2cap_sar_segment_sdu(chan, msg, len);
1810 if (chan->mode == L2CAP_MODE_STREAMING) {
1811 l2cap_streaming_send(chan);
1816 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1817 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1822 err = l2cap_ertm_send(chan);
1829 BT_DBG("bad state %1.1x", chan->mode);
1836 /* Copy frame to all raw sockets on that connection */
1837 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1839 struct sk_buff *nskb;
1840 struct l2cap_chan *chan;
1842 BT_DBG("conn %p", conn);
1844 mutex_lock(&conn->chan_lock);
1846 list_for_each_entry(chan, &conn->chan_l, list) {
1847 struct sock *sk = chan->sk;
1848 if (chan->chan_type != L2CAP_CHAN_RAW)
1851 /* Don't send frame to the socket it came from */
1854 nskb = skb_clone(skb, GFP_ATOMIC);
1858 if (chan->ops->recv(chan->data, nskb))
1862 mutex_unlock(&conn->chan_lock);
1865 /* ---- L2CAP signalling commands ---- */
1866 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1867 u8 code, u8 ident, u16 dlen, void *data)
1869 struct sk_buff *skb, **frag;
1870 struct l2cap_cmd_hdr *cmd;
1871 struct l2cap_hdr *lh;
1874 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1875 conn, code, ident, dlen);
1877 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1878 count = min_t(unsigned int, conn->mtu, len);
1880 skb = bt_skb_alloc(count, GFP_ATOMIC);
1884 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1885 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1887 if (conn->hcon->type == LE_LINK)
1888 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1890 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1892 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1895 cmd->len = cpu_to_le16(dlen);
1898 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1899 memcpy(skb_put(skb, count), data, count);
1905 /* Continuation fragments (no L2CAP header) */
1906 frag = &skb_shinfo(skb)->frag_list;
1908 count = min_t(unsigned int, conn->mtu, len);
1910 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1914 memcpy(skb_put(*frag, count), data, count);
1919 frag = &(*frag)->next;
1929 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1931 struct l2cap_conf_opt *opt = *ptr;
1934 len = L2CAP_CONF_OPT_SIZE + opt->len;
1942 *val = *((u8 *) opt->val);
1946 *val = get_unaligned_le16(opt->val);
1950 *val = get_unaligned_le32(opt->val);
1954 *val = (unsigned long) opt->val;
1958 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1962 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1964 struct l2cap_conf_opt *opt = *ptr;
1966 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1973 *((u8 *) opt->val) = val;
1977 put_unaligned_le16(val, opt->val);
1981 put_unaligned_le32(val, opt->val);
1985 memcpy(opt->val, (void *) val, len);
1989 *ptr += L2CAP_CONF_OPT_SIZE + len;
1992 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1994 struct l2cap_conf_efs efs;
1996 switch (chan->mode) {
1997 case L2CAP_MODE_ERTM:
1998 efs.id = chan->local_id;
1999 efs.stype = chan->local_stype;
2000 efs.msdu = cpu_to_le16(chan->local_msdu);
2001 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2002 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
2003 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
2006 case L2CAP_MODE_STREAMING:
2008 efs.stype = L2CAP_SERV_BESTEFFORT;
2009 efs.msdu = cpu_to_le16(chan->local_msdu);
2010 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
2019 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
2020 (unsigned long) &efs);
2023 static void l2cap_ack_timeout(struct work_struct *work)
2025 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
2028 BT_DBG("chan %p", chan);
2030 l2cap_chan_lock(chan);
2032 __l2cap_send_ack(chan);
2034 l2cap_chan_unlock(chan);
2036 l2cap_chan_put(chan);
2039 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
2041 chan->expected_ack_seq = 0;
2042 chan->unacked_frames = 0;
2043 chan->buffer_seq = 0;
2044 chan->num_acked = 0;
2045 chan->frames_sent = 0;
2047 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
2048 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
2049 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
2051 skb_queue_head_init(&chan->srej_q);
2053 INIT_LIST_HEAD(&chan->srej_l);
2056 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2059 case L2CAP_MODE_STREAMING:
2060 case L2CAP_MODE_ERTM:
2061 if (l2cap_mode_supported(mode, remote_feat_mask))
2065 return L2CAP_MODE_BASIC;
2069 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2071 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2074 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2076 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2079 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2081 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2082 __l2cap_ews_supported(chan)) {
2083 /* use extended control field */
2084 set_bit(FLAG_EXT_CTRL, &chan->flags);
2085 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2087 chan->tx_win = min_t(u16, chan->tx_win,
2088 L2CAP_DEFAULT_TX_WINDOW);
2089 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2093 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2095 struct l2cap_conf_req *req = data;
2096 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2097 void *ptr = req->data;
2100 BT_DBG("chan %p", chan);
2102 if (chan->num_conf_req || chan->num_conf_rsp)
2105 switch (chan->mode) {
2106 case L2CAP_MODE_STREAMING:
2107 case L2CAP_MODE_ERTM:
2108 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2111 if (__l2cap_efs_supported(chan))
2112 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2116 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2121 if (chan->imtu != L2CAP_DEFAULT_MTU)
2122 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2124 switch (chan->mode) {
2125 case L2CAP_MODE_BASIC:
2126 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2127 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2130 rfc.mode = L2CAP_MODE_BASIC;
2132 rfc.max_transmit = 0;
2133 rfc.retrans_timeout = 0;
2134 rfc.monitor_timeout = 0;
2135 rfc.max_pdu_size = 0;
2137 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2138 (unsigned long) &rfc);
2141 case L2CAP_MODE_ERTM:
2142 rfc.mode = L2CAP_MODE_ERTM;
2143 rfc.max_transmit = chan->max_tx;
2144 rfc.retrans_timeout = 0;
2145 rfc.monitor_timeout = 0;
2147 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2148 L2CAP_EXT_HDR_SIZE -
2151 rfc.max_pdu_size = cpu_to_le16(size);
2153 l2cap_txwin_setup(chan);
2155 rfc.txwin_size = min_t(u16, chan->tx_win,
2156 L2CAP_DEFAULT_TX_WINDOW);
2158 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2159 (unsigned long) &rfc);
2161 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2162 l2cap_add_opt_efs(&ptr, chan);
2164 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2167 if (chan->fcs == L2CAP_FCS_NONE ||
2168 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2169 chan->fcs = L2CAP_FCS_NONE;
2170 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2173 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2174 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2178 case L2CAP_MODE_STREAMING:
2179 rfc.mode = L2CAP_MODE_STREAMING;
2181 rfc.max_transmit = 0;
2182 rfc.retrans_timeout = 0;
2183 rfc.monitor_timeout = 0;
2185 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2186 L2CAP_EXT_HDR_SIZE -
2189 rfc.max_pdu_size = cpu_to_le16(size);
2191 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2192 (unsigned long) &rfc);
2194 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2195 l2cap_add_opt_efs(&ptr, chan);
2197 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2200 if (chan->fcs == L2CAP_FCS_NONE ||
2201 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2202 chan->fcs = L2CAP_FCS_NONE;
2203 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2208 req->dcid = cpu_to_le16(chan->dcid);
2209 req->flags = cpu_to_le16(0);
2214 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2216 struct l2cap_conf_rsp *rsp = data;
2217 void *ptr = rsp->data;
2218 void *req = chan->conf_req;
2219 int len = chan->conf_len;
2220 int type, hint, olen;
2222 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2223 struct l2cap_conf_efs efs;
2225 u16 mtu = L2CAP_DEFAULT_MTU;
2226 u16 result = L2CAP_CONF_SUCCESS;
2229 BT_DBG("chan %p", chan);
2231 while (len >= L2CAP_CONF_OPT_SIZE) {
2232 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2234 hint = type & L2CAP_CONF_HINT;
2235 type &= L2CAP_CONF_MASK;
2238 case L2CAP_CONF_MTU:
2242 case L2CAP_CONF_FLUSH_TO:
2243 chan->flush_to = val;
2246 case L2CAP_CONF_QOS:
2249 case L2CAP_CONF_RFC:
2250 if (olen == sizeof(rfc))
2251 memcpy(&rfc, (void *) val, olen);
2254 case L2CAP_CONF_FCS:
2255 if (val == L2CAP_FCS_NONE)
2256 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2259 case L2CAP_CONF_EFS:
2261 if (olen == sizeof(efs))
2262 memcpy(&efs, (void *) val, olen);
2265 case L2CAP_CONF_EWS:
2267 return -ECONNREFUSED;
2269 set_bit(FLAG_EXT_CTRL, &chan->flags);
2270 set_bit(CONF_EWS_RECV, &chan->conf_state);
2271 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2272 chan->remote_tx_win = val;
2279 result = L2CAP_CONF_UNKNOWN;
2280 *((u8 *) ptr++) = type;
2285 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2288 switch (chan->mode) {
2289 case L2CAP_MODE_STREAMING:
2290 case L2CAP_MODE_ERTM:
2291 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2292 chan->mode = l2cap_select_mode(rfc.mode,
2293 chan->conn->feat_mask);
2298 if (__l2cap_efs_supported(chan))
2299 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2301 return -ECONNREFUSED;
2304 if (chan->mode != rfc.mode)
2305 return -ECONNREFUSED;
2311 if (chan->mode != rfc.mode) {
2312 result = L2CAP_CONF_UNACCEPT;
2313 rfc.mode = chan->mode;
2315 if (chan->num_conf_rsp == 1)
2316 return -ECONNREFUSED;
2318 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2319 sizeof(rfc), (unsigned long) &rfc);
2322 if (result == L2CAP_CONF_SUCCESS) {
2323 /* Configure output options and let the other side know
2324 * which ones we don't like. */
2326 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2327 result = L2CAP_CONF_UNACCEPT;
2330 set_bit(CONF_MTU_DONE, &chan->conf_state);
2332 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2335 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2336 efs.stype != L2CAP_SERV_NOTRAFIC &&
2337 efs.stype != chan->local_stype) {
2339 result = L2CAP_CONF_UNACCEPT;
2341 if (chan->num_conf_req >= 1)
2342 return -ECONNREFUSED;
2344 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2346 (unsigned long) &efs);
2348 /* Send PENDING Conf Rsp */
2349 result = L2CAP_CONF_PENDING;
2350 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2355 case L2CAP_MODE_BASIC:
2356 chan->fcs = L2CAP_FCS_NONE;
2357 set_bit(CONF_MODE_DONE, &chan->conf_state);
2360 case L2CAP_MODE_ERTM:
2361 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2362 chan->remote_tx_win = rfc.txwin_size;
2364 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2366 chan->remote_max_tx = rfc.max_transmit;
2368 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2370 L2CAP_EXT_HDR_SIZE -
2373 rfc.max_pdu_size = cpu_to_le16(size);
2374 chan->remote_mps = size;
2376 rfc.retrans_timeout =
2377 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2378 rfc.monitor_timeout =
2379 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2381 set_bit(CONF_MODE_DONE, &chan->conf_state);
2383 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2384 sizeof(rfc), (unsigned long) &rfc);
2386 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2387 chan->remote_id = efs.id;
2388 chan->remote_stype = efs.stype;
2389 chan->remote_msdu = le16_to_cpu(efs.msdu);
2390 chan->remote_flush_to =
2391 le32_to_cpu(efs.flush_to);
2392 chan->remote_acc_lat =
2393 le32_to_cpu(efs.acc_lat);
2394 chan->remote_sdu_itime =
2395 le32_to_cpu(efs.sdu_itime);
2396 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2397 sizeof(efs), (unsigned long) &efs);
2401 case L2CAP_MODE_STREAMING:
2402 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2404 L2CAP_EXT_HDR_SIZE -
2407 rfc.max_pdu_size = cpu_to_le16(size);
2408 chan->remote_mps = size;
2410 set_bit(CONF_MODE_DONE, &chan->conf_state);
2412 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2413 sizeof(rfc), (unsigned long) &rfc);
2418 result = L2CAP_CONF_UNACCEPT;
2420 memset(&rfc, 0, sizeof(rfc));
2421 rfc.mode = chan->mode;
2424 if (result == L2CAP_CONF_SUCCESS)
2425 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2427 rsp->scid = cpu_to_le16(chan->dcid);
2428 rsp->result = cpu_to_le16(result);
2429 rsp->flags = cpu_to_le16(0x0000);
2434 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2436 struct l2cap_conf_req *req = data;
2437 void *ptr = req->data;
2440 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2441 struct l2cap_conf_efs efs;
2443 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2445 while (len >= L2CAP_CONF_OPT_SIZE) {
2446 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2449 case L2CAP_CONF_MTU:
2450 if (val < L2CAP_DEFAULT_MIN_MTU) {
2451 *result = L2CAP_CONF_UNACCEPT;
2452 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2455 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2458 case L2CAP_CONF_FLUSH_TO:
2459 chan->flush_to = val;
2460 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2464 case L2CAP_CONF_RFC:
2465 if (olen == sizeof(rfc))
2466 memcpy(&rfc, (void *)val, olen);
2468 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2469 rfc.mode != chan->mode)
2470 return -ECONNREFUSED;
2474 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2475 sizeof(rfc), (unsigned long) &rfc);
2478 case L2CAP_CONF_EWS:
2479 chan->tx_win = min_t(u16, val,
2480 L2CAP_DEFAULT_EXT_WINDOW);
2481 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2485 case L2CAP_CONF_EFS:
2486 if (olen == sizeof(efs))
2487 memcpy(&efs, (void *)val, olen);
2489 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2490 efs.stype != L2CAP_SERV_NOTRAFIC &&
2491 efs.stype != chan->local_stype)
2492 return -ECONNREFUSED;
2494 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2495 sizeof(efs), (unsigned long) &efs);
2500 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2501 return -ECONNREFUSED;
2503 chan->mode = rfc.mode;
2505 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2507 case L2CAP_MODE_ERTM:
2508 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2509 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2510 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2512 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2513 chan->local_msdu = le16_to_cpu(efs.msdu);
2514 chan->local_sdu_itime =
2515 le32_to_cpu(efs.sdu_itime);
2516 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2517 chan->local_flush_to =
2518 le32_to_cpu(efs.flush_to);
2522 case L2CAP_MODE_STREAMING:
2523 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2527 req->dcid = cpu_to_le16(chan->dcid);
2528 req->flags = cpu_to_le16(0x0000);
2533 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2535 struct l2cap_conf_rsp *rsp = data;
2536 void *ptr = rsp->data;
2538 BT_DBG("chan %p", chan);
2540 rsp->scid = cpu_to_le16(chan->dcid);
2541 rsp->result = cpu_to_le16(result);
2542 rsp->flags = cpu_to_le16(flags);
2547 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2549 struct l2cap_conn_rsp rsp;
2550 struct l2cap_conn *conn = chan->conn;
2553 rsp.scid = cpu_to_le16(chan->dcid);
2554 rsp.dcid = cpu_to_le16(chan->scid);
2555 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2556 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2557 l2cap_send_cmd(conn, chan->ident,
2558 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2560 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2563 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2564 l2cap_build_conf_req(chan, buf), buf);
2565 chan->num_conf_req++;
2568 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2572 struct l2cap_conf_rfc rfc;
2574 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2576 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2579 while (len >= L2CAP_CONF_OPT_SIZE) {
2580 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2583 case L2CAP_CONF_RFC:
2584 if (olen == sizeof(rfc))
2585 memcpy(&rfc, (void *)val, olen);
2590 /* Use sane default values in case a misbehaving remote device
2591 * did not send an RFC option.
2593 rfc.mode = chan->mode;
2594 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2595 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2596 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2598 BT_ERR("Expected RFC option was not found, using defaults");
2602 case L2CAP_MODE_ERTM:
2603 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2604 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2605 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2607 case L2CAP_MODE_STREAMING:
2608 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2612 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2614 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2616 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2619 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2620 cmd->ident == conn->info_ident) {
2621 cancel_delayed_work(&conn->info_timer);
2623 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2624 conn->info_ident = 0;
2626 l2cap_conn_start(conn);
2632 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2634 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2635 struct l2cap_conn_rsp rsp;
2636 struct l2cap_chan *chan = NULL, *pchan;
2637 struct sock *parent, *sk = NULL;
2638 int result, status = L2CAP_CS_NO_INFO;
2640 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2641 __le16 psm = req->psm;
2643 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2645 /* Check if we have socket listening on psm */
2646 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2648 result = L2CAP_CR_BAD_PSM;
2654 mutex_lock(&conn->chan_lock);
2657 /* Check if the ACL is secure enough (if not SDP) */
2658 if (psm != cpu_to_le16(0x0001) &&
2659 !hci_conn_check_link_mode(conn->hcon)) {
2660 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2661 result = L2CAP_CR_SEC_BLOCK;
2665 result = L2CAP_CR_NO_MEM;
2667 /* Check for backlog size */
2668 if (sk_acceptq_is_full(parent)) {
2669 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2673 chan = pchan->ops->new_connection(pchan->data);
2679 /* Check if we already have channel with that dcid */
2680 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2681 sock_set_flag(sk, SOCK_ZAPPED);
2682 chan->ops->close(chan->data);
2686 hci_conn_hold(conn->hcon);
2688 bacpy(&bt_sk(sk)->src, conn->src);
2689 bacpy(&bt_sk(sk)->dst, conn->dst);
2693 bt_accept_enqueue(parent, sk);
2695 __l2cap_chan_add(conn, chan);
2699 __set_chan_timer(chan, sk->sk_sndtimeo);
2701 chan->ident = cmd->ident;
2703 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2704 if (l2cap_chan_check_security(chan)) {
2705 if (bt_sk(sk)->defer_setup) {
2706 __l2cap_state_change(chan, BT_CONNECT2);
2707 result = L2CAP_CR_PEND;
2708 status = L2CAP_CS_AUTHOR_PEND;
2709 parent->sk_data_ready(parent, 0);
2711 __l2cap_state_change(chan, BT_CONFIG);
2712 result = L2CAP_CR_SUCCESS;
2713 status = L2CAP_CS_NO_INFO;
2716 __l2cap_state_change(chan, BT_CONNECT2);
2717 result = L2CAP_CR_PEND;
2718 status = L2CAP_CS_AUTHEN_PEND;
2721 __l2cap_state_change(chan, BT_CONNECT2);
2722 result = L2CAP_CR_PEND;
2723 status = L2CAP_CS_NO_INFO;
2727 release_sock(parent);
2728 mutex_unlock(&conn->chan_lock);
2731 rsp.scid = cpu_to_le16(scid);
2732 rsp.dcid = cpu_to_le16(dcid);
2733 rsp.result = cpu_to_le16(result);
2734 rsp.status = cpu_to_le16(status);
2735 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2737 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2738 struct l2cap_info_req info;
2739 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2741 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2742 conn->info_ident = l2cap_get_ident(conn);
2744 schedule_delayed_work(&conn->info_timer,
2745 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2747 l2cap_send_cmd(conn, conn->info_ident,
2748 L2CAP_INFO_REQ, sizeof(info), &info);
2751 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2752 result == L2CAP_CR_SUCCESS) {
2754 set_bit(CONF_REQ_SENT, &chan->conf_state);
2755 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2756 l2cap_build_conf_req(chan, buf), buf);
2757 chan->num_conf_req++;
2763 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2765 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2766 u16 scid, dcid, result, status;
2767 struct l2cap_chan *chan;
2771 scid = __le16_to_cpu(rsp->scid);
2772 dcid = __le16_to_cpu(rsp->dcid);
2773 result = __le16_to_cpu(rsp->result);
2774 status = __le16_to_cpu(rsp->status);
2776 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
2777 dcid, scid, result, status);
2779 mutex_lock(&conn->chan_lock);
2782 chan = __l2cap_get_chan_by_scid(conn, scid);
2788 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
2797 l2cap_chan_lock(chan);
2800 case L2CAP_CR_SUCCESS:
2801 l2cap_state_change(chan, BT_CONFIG);
2804 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2806 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2809 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2810 l2cap_build_conf_req(chan, req), req);
2811 chan->num_conf_req++;
2815 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2819 l2cap_chan_del(chan, ECONNREFUSED);
2823 l2cap_chan_unlock(chan);
2826 mutex_unlock(&conn->chan_lock);
2831 static inline void set_default_fcs(struct l2cap_chan *chan)
2833 /* FCS is enabled only in ERTM or streaming mode, if one or both
2836 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2837 chan->fcs = L2CAP_FCS_NONE;
2838 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2839 chan->fcs = L2CAP_FCS_CRC16;
2842 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2844 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2847 struct l2cap_chan *chan;
2850 dcid = __le16_to_cpu(req->dcid);
2851 flags = __le16_to_cpu(req->flags);
2853 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2855 chan = l2cap_get_chan_by_scid(conn, dcid);
2859 l2cap_chan_lock(chan);
2861 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2862 struct l2cap_cmd_rej_cid rej;
2864 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2865 rej.scid = cpu_to_le16(chan->scid);
2866 rej.dcid = cpu_to_le16(chan->dcid);
2868 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2873 /* Reject if config buffer is too small. */
2874 len = cmd_len - sizeof(*req);
2875 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2876 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2877 l2cap_build_conf_rsp(chan, rsp,
2878 L2CAP_CONF_REJECT, flags), rsp);
2883 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2884 chan->conf_len += len;
2886 if (flags & 0x0001) {
2887 /* Incomplete config. Send empty response. */
2888 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2889 l2cap_build_conf_rsp(chan, rsp,
2890 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2894 /* Complete config. */
2895 len = l2cap_parse_conf_req(chan, rsp);
2897 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2901 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2902 chan->num_conf_rsp++;
2904 /* Reset config buffer. */
2907 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2910 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2911 set_default_fcs(chan);
2913 l2cap_state_change(chan, BT_CONNECTED);
2915 chan->next_tx_seq = 0;
2916 chan->expected_tx_seq = 0;
2917 skb_queue_head_init(&chan->tx_q);
2918 if (chan->mode == L2CAP_MODE_ERTM)
2919 l2cap_ertm_init(chan);
2921 l2cap_chan_ready(chan);
2925 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2927 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2928 l2cap_build_conf_req(chan, buf), buf);
2929 chan->num_conf_req++;
2932 /* Got Conf Rsp PENDING from remote side and asume we sent
2933 Conf Rsp PENDING in the code above */
2934 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2935 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2937 /* check compatibility */
2939 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2940 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2942 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2943 l2cap_build_conf_rsp(chan, rsp,
2944 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2948 l2cap_chan_unlock(chan);
2952 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2954 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2955 u16 scid, flags, result;
2956 struct l2cap_chan *chan;
2957 int len = cmd->len - sizeof(*rsp);
2959 scid = __le16_to_cpu(rsp->scid);
2960 flags = __le16_to_cpu(rsp->flags);
2961 result = __le16_to_cpu(rsp->result);
2963 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2964 scid, flags, result);
2966 chan = l2cap_get_chan_by_scid(conn, scid);
2970 l2cap_chan_lock(chan);
2973 case L2CAP_CONF_SUCCESS:
2974 l2cap_conf_rfc_get(chan, rsp->data, len);
2975 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2978 case L2CAP_CONF_PENDING:
2979 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2981 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2984 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2987 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2991 /* check compatibility */
2993 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2994 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2996 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2997 l2cap_build_conf_rsp(chan, buf,
2998 L2CAP_CONF_SUCCESS, 0x0000), buf);
3002 case L2CAP_CONF_UNACCEPT:
3003 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3006 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3007 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3011 /* throw out any old stored conf requests */
3012 result = L2CAP_CONF_SUCCESS;
3013 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
3016 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3020 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3021 L2CAP_CONF_REQ, len, req);
3022 chan->num_conf_req++;
3023 if (result != L2CAP_CONF_SUCCESS)
3029 l2cap_chan_set_err(chan, ECONNRESET);
3031 __set_chan_timer(chan,
3032 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT));
3033 l2cap_send_disconn_req(conn, chan, ECONNRESET);
3040 set_bit(CONF_INPUT_DONE, &chan->conf_state);
3042 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
3043 set_default_fcs(chan);
3045 l2cap_state_change(chan, BT_CONNECTED);
3046 chan->next_tx_seq = 0;
3047 chan->expected_tx_seq = 0;
3048 skb_queue_head_init(&chan->tx_q);
3049 if (chan->mode == L2CAP_MODE_ERTM)
3050 l2cap_ertm_init(chan);
3052 l2cap_chan_ready(chan);
3056 l2cap_chan_unlock(chan);
3060 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3062 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3063 struct l2cap_disconn_rsp rsp;
3065 struct l2cap_chan *chan;
3068 scid = __le16_to_cpu(req->scid);
3069 dcid = __le16_to_cpu(req->dcid);
3071 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3073 mutex_lock(&conn->chan_lock);
3075 chan = __l2cap_get_chan_by_scid(conn, dcid);
3077 mutex_unlock(&conn->chan_lock);
3081 l2cap_chan_lock(chan);
3085 rsp.dcid = cpu_to_le16(chan->scid);
3086 rsp.scid = cpu_to_le16(chan->dcid);
3087 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3090 sk->sk_shutdown = SHUTDOWN_MASK;
3093 l2cap_chan_del(chan, ECONNRESET);
3095 l2cap_chan_unlock(chan);
3097 chan->ops->close(chan->data);
3099 mutex_unlock(&conn->chan_lock);
3104 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3106 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3108 struct l2cap_chan *chan;
3110 scid = __le16_to_cpu(rsp->scid);
3111 dcid = __le16_to_cpu(rsp->dcid);
3113 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3115 mutex_lock(&conn->chan_lock);
3117 chan = __l2cap_get_chan_by_scid(conn, scid);
3119 mutex_unlock(&conn->chan_lock);
3123 l2cap_chan_lock(chan);
3125 l2cap_chan_del(chan, 0);
3127 l2cap_chan_unlock(chan);
3129 chan->ops->close(chan->data);
3131 mutex_unlock(&conn->chan_lock);
3136 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3138 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3141 type = __le16_to_cpu(req->type);
3143 BT_DBG("type 0x%4.4x", type);
3145 if (type == L2CAP_IT_FEAT_MASK) {
3147 u32 feat_mask = l2cap_feat_mask;
3148 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3149 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3150 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3152 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3155 feat_mask |= L2CAP_FEAT_EXT_FLOW
3156 | L2CAP_FEAT_EXT_WINDOW;
3158 put_unaligned_le32(feat_mask, rsp->data);
3159 l2cap_send_cmd(conn, cmd->ident,
3160 L2CAP_INFO_RSP, sizeof(buf), buf);
3161 } else if (type == L2CAP_IT_FIXED_CHAN) {
3163 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3166 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3168 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3170 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3171 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3172 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3173 l2cap_send_cmd(conn, cmd->ident,
3174 L2CAP_INFO_RSP, sizeof(buf), buf);
3176 struct l2cap_info_rsp rsp;
3177 rsp.type = cpu_to_le16(type);
3178 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3179 l2cap_send_cmd(conn, cmd->ident,
3180 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3186 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3188 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3191 type = __le16_to_cpu(rsp->type);
3192 result = __le16_to_cpu(rsp->result);
3194 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3196 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3197 if (cmd->ident != conn->info_ident ||
3198 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3201 cancel_delayed_work(&conn->info_timer);
3203 if (result != L2CAP_IR_SUCCESS) {
3204 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3205 conn->info_ident = 0;
3207 l2cap_conn_start(conn);
3212 if (type == L2CAP_IT_FEAT_MASK) {
3213 conn->feat_mask = get_unaligned_le32(rsp->data);
3215 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3216 struct l2cap_info_req req;
3217 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3219 conn->info_ident = l2cap_get_ident(conn);
3221 l2cap_send_cmd(conn, conn->info_ident,
3222 L2CAP_INFO_REQ, sizeof(req), &req);
3224 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3225 conn->info_ident = 0;
3227 l2cap_conn_start(conn);
3229 } else if (type == L2CAP_IT_FIXED_CHAN) {
3230 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3231 conn->info_ident = 0;
3233 l2cap_conn_start(conn);
3239 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3240 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3243 struct l2cap_create_chan_req *req = data;
3244 struct l2cap_create_chan_rsp rsp;
3247 if (cmd_len != sizeof(*req))
3253 psm = le16_to_cpu(req->psm);
3254 scid = le16_to_cpu(req->scid);
3256 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3258 /* Placeholder: Always reject */
3260 rsp.scid = cpu_to_le16(scid);
3261 rsp.result = L2CAP_CR_NO_MEM;
3262 rsp.status = L2CAP_CS_NO_INFO;
3264 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3270 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3271 struct l2cap_cmd_hdr *cmd, void *data)
3273 BT_DBG("conn %p", conn);
3275 return l2cap_connect_rsp(conn, cmd, data);
3278 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3279 u16 icid, u16 result)
3281 struct l2cap_move_chan_rsp rsp;
3283 BT_DBG("icid %d, result %d", icid, result);
3285 rsp.icid = cpu_to_le16(icid);
3286 rsp.result = cpu_to_le16(result);
3288 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3291 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3292 struct l2cap_chan *chan, u16 icid, u16 result)
3294 struct l2cap_move_chan_cfm cfm;
3297 BT_DBG("icid %d, result %d", icid, result);
3299 ident = l2cap_get_ident(conn);
3301 chan->ident = ident;
3303 cfm.icid = cpu_to_le16(icid);
3304 cfm.result = cpu_to_le16(result);
3306 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3309 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3312 struct l2cap_move_chan_cfm_rsp rsp;
3314 BT_DBG("icid %d", icid);
3316 rsp.icid = cpu_to_le16(icid);
3317 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3320 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3321 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3323 struct l2cap_move_chan_req *req = data;
3325 u16 result = L2CAP_MR_NOT_ALLOWED;
3327 if (cmd_len != sizeof(*req))
3330 icid = le16_to_cpu(req->icid);
3332 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3337 /* Placeholder: Always refuse */
3338 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3343 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3344 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3346 struct l2cap_move_chan_rsp *rsp = data;
3349 if (cmd_len != sizeof(*rsp))
3352 icid = le16_to_cpu(rsp->icid);
3353 result = le16_to_cpu(rsp->result);
3355 BT_DBG("icid %d, result %d", icid, result);
3357 /* Placeholder: Always unconfirmed */
3358 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3363 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3364 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3366 struct l2cap_move_chan_cfm *cfm = data;
3369 if (cmd_len != sizeof(*cfm))
3372 icid = le16_to_cpu(cfm->icid);
3373 result = le16_to_cpu(cfm->result);
3375 BT_DBG("icid %d, result %d", icid, result);
3377 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3382 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3383 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3385 struct l2cap_move_chan_cfm_rsp *rsp = data;
3388 if (cmd_len != sizeof(*rsp))
3391 icid = le16_to_cpu(rsp->icid);
3393 BT_DBG("icid %d", icid);
3398 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3403 if (min > max || min < 6 || max > 3200)
3406 if (to_multiplier < 10 || to_multiplier > 3200)
3409 if (max >= to_multiplier * 8)
3412 max_latency = (to_multiplier * 8 / max) - 1;
3413 if (latency > 499 || latency > max_latency)
3419 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3420 struct l2cap_cmd_hdr *cmd, u8 *data)
3422 struct hci_conn *hcon = conn->hcon;
3423 struct l2cap_conn_param_update_req *req;
3424 struct l2cap_conn_param_update_rsp rsp;
3425 u16 min, max, latency, to_multiplier, cmd_len;
3428 if (!(hcon->link_mode & HCI_LM_MASTER))
3431 cmd_len = __le16_to_cpu(cmd->len);
3432 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3435 req = (struct l2cap_conn_param_update_req *) data;
3436 min = __le16_to_cpu(req->min);
3437 max = __le16_to_cpu(req->max);
3438 latency = __le16_to_cpu(req->latency);
3439 to_multiplier = __le16_to_cpu(req->to_multiplier);
3441 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3442 min, max, latency, to_multiplier);
3444 memset(&rsp, 0, sizeof(rsp));
3446 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3448 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3450 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3452 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3456 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3461 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3462 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3466 switch (cmd->code) {
3467 case L2CAP_COMMAND_REJ:
3468 l2cap_command_rej(conn, cmd, data);
3471 case L2CAP_CONN_REQ:
3472 err = l2cap_connect_req(conn, cmd, data);
3475 case L2CAP_CONN_RSP:
3476 err = l2cap_connect_rsp(conn, cmd, data);
3479 case L2CAP_CONF_REQ:
3480 err = l2cap_config_req(conn, cmd, cmd_len, data);
3483 case L2CAP_CONF_RSP:
3484 err = l2cap_config_rsp(conn, cmd, data);
3487 case L2CAP_DISCONN_REQ:
3488 err = l2cap_disconnect_req(conn, cmd, data);
3491 case L2CAP_DISCONN_RSP:
3492 err = l2cap_disconnect_rsp(conn, cmd, data);
3495 case L2CAP_ECHO_REQ:
3496 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3499 case L2CAP_ECHO_RSP:
3502 case L2CAP_INFO_REQ:
3503 err = l2cap_information_req(conn, cmd, data);
3506 case L2CAP_INFO_RSP:
3507 err = l2cap_information_rsp(conn, cmd, data);
3510 case L2CAP_CREATE_CHAN_REQ:
3511 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3514 case L2CAP_CREATE_CHAN_RSP:
3515 err = l2cap_create_channel_rsp(conn, cmd, data);
3518 case L2CAP_MOVE_CHAN_REQ:
3519 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3522 case L2CAP_MOVE_CHAN_RSP:
3523 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3526 case L2CAP_MOVE_CHAN_CFM:
3527 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3530 case L2CAP_MOVE_CHAN_CFM_RSP:
3531 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3535 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3543 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3544 struct l2cap_cmd_hdr *cmd, u8 *data)
3546 switch (cmd->code) {
3547 case L2CAP_COMMAND_REJ:
3550 case L2CAP_CONN_PARAM_UPDATE_REQ:
3551 return l2cap_conn_param_update_req(conn, cmd, data);
3553 case L2CAP_CONN_PARAM_UPDATE_RSP:
3557 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3562 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3563 struct sk_buff *skb)
3565 u8 *data = skb->data;
3567 struct l2cap_cmd_hdr cmd;
3570 l2cap_raw_recv(conn, skb);
3572 while (len >= L2CAP_CMD_HDR_SIZE) {
3574 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3575 data += L2CAP_CMD_HDR_SIZE;
3576 len -= L2CAP_CMD_HDR_SIZE;
3578 cmd_len = le16_to_cpu(cmd.len);
3580 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3582 if (cmd_len > len || !cmd.ident) {
3583 BT_DBG("corrupted command");
3587 if (conn->hcon->type == LE_LINK)
3588 err = l2cap_le_sig_cmd(conn, &cmd, data);
3590 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3593 struct l2cap_cmd_rej_unk rej;
3595 BT_ERR("Wrong link type (%d)", err);
3597 /* FIXME: Map err to a valid reason */
3598 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3599 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3609 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3611 u16 our_fcs, rcv_fcs;
3614 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3615 hdr_size = L2CAP_EXT_HDR_SIZE;
3617 hdr_size = L2CAP_ENH_HDR_SIZE;
3619 if (chan->fcs == L2CAP_FCS_CRC16) {
3620 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3621 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3622 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3624 if (our_fcs != rcv_fcs)
3630 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3634 chan->frames_sent = 0;
3636 control |= __set_reqseq(chan, chan->buffer_seq);
3638 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3639 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3640 l2cap_send_sframe(chan, control);
3641 set_bit(CONN_RNR_SENT, &chan->conn_state);
3644 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3645 l2cap_retransmit_frames(chan);
3647 l2cap_ertm_send(chan);
3649 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3650 chan->frames_sent == 0) {
3651 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3652 l2cap_send_sframe(chan, control);
3656 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3658 struct sk_buff *next_skb;
3659 int tx_seq_offset, next_tx_seq_offset;
3661 bt_cb(skb)->tx_seq = tx_seq;
3662 bt_cb(skb)->sar = sar;
3664 next_skb = skb_peek(&chan->srej_q);
3666 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3669 if (bt_cb(next_skb)->tx_seq == tx_seq)
3672 next_tx_seq_offset = __seq_offset(chan,
3673 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3675 if (next_tx_seq_offset > tx_seq_offset) {
3676 __skb_queue_before(&chan->srej_q, next_skb, skb);
3680 if (skb_queue_is_last(&chan->srej_q, next_skb))
3683 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3686 __skb_queue_tail(&chan->srej_q, skb);
3691 static void append_skb_frag(struct sk_buff *skb,
3692 struct sk_buff *new_frag, struct sk_buff **last_frag)
3694 /* skb->len reflects data in skb as well as all fragments
3695 * skb->data_len reflects only data in fragments
3697 if (!skb_has_frag_list(skb))
3698 skb_shinfo(skb)->frag_list = new_frag;
3700 new_frag->next = NULL;
3702 (*last_frag)->next = new_frag;
3703 *last_frag = new_frag;
3705 skb->len += new_frag->len;
3706 skb->data_len += new_frag->len;
3707 skb->truesize += new_frag->truesize;
3710 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3714 switch (__get_ctrl_sar(chan, control)) {
3715 case L2CAP_SAR_UNSEGMENTED:
3719 err = chan->ops->recv(chan->data, skb);
3722 case L2CAP_SAR_START:
3726 chan->sdu_len = get_unaligned_le16(skb->data);
3727 skb_pull(skb, L2CAP_SDULEN_SIZE);
3729 if (chan->sdu_len > chan->imtu) {
3734 if (skb->len >= chan->sdu_len)
3738 chan->sdu_last_frag = skb;
3744 case L2CAP_SAR_CONTINUE:
3748 append_skb_frag(chan->sdu, skb,
3749 &chan->sdu_last_frag);
3752 if (chan->sdu->len >= chan->sdu_len)
3762 append_skb_frag(chan->sdu, skb,
3763 &chan->sdu_last_frag);
3766 if (chan->sdu->len != chan->sdu_len)
3769 err = chan->ops->recv(chan->data, chan->sdu);
3772 /* Reassembly complete */
3774 chan->sdu_last_frag = NULL;
3782 kfree_skb(chan->sdu);
3784 chan->sdu_last_frag = NULL;
3791 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3793 BT_DBG("chan %p, Enter local busy", chan);
3795 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3797 __set_ack_timer(chan);
3800 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3804 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3807 control = __set_reqseq(chan, chan->buffer_seq);
3808 control |= __set_ctrl_poll(chan);
3809 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3810 l2cap_send_sframe(chan, control);
3811 chan->retry_count = 1;
3813 __clear_retrans_timer(chan);
3814 __set_monitor_timer(chan);
3816 set_bit(CONN_WAIT_F, &chan->conn_state);
3819 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3820 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3822 BT_DBG("chan %p, Exit local busy", chan);
3825 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3827 if (chan->mode == L2CAP_MODE_ERTM) {
3829 l2cap_ertm_enter_local_busy(chan);
3831 l2cap_ertm_exit_local_busy(chan);
3835 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3837 struct sk_buff *skb;
3840 while ((skb = skb_peek(&chan->srej_q)) &&
3841 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3844 if (bt_cb(skb)->tx_seq != tx_seq)
3847 skb = skb_dequeue(&chan->srej_q);
3848 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3849 err = l2cap_reassemble_sdu(chan, skb, control);
3852 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3856 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3857 tx_seq = __next_seq(chan, tx_seq);
3861 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3863 struct srej_list *l, *tmp;
3866 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3867 if (l->tx_seq == tx_seq) {
3872 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3873 control |= __set_reqseq(chan, l->tx_seq);
3874 l2cap_send_sframe(chan, control);
3876 list_add_tail(&l->list, &chan->srej_l);
3880 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3882 struct srej_list *new;
3885 while (tx_seq != chan->expected_tx_seq) {
3886 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3887 control |= __set_reqseq(chan, chan->expected_tx_seq);
3888 l2cap_send_sframe(chan, control);
3890 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3894 new->tx_seq = chan->expected_tx_seq;
3896 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3898 list_add_tail(&new->list, &chan->srej_l);
3901 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3906 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3908 u16 tx_seq = __get_txseq(chan, rx_control);
3909 u16 req_seq = __get_reqseq(chan, rx_control);
3910 u8 sar = __get_ctrl_sar(chan, rx_control);
3911 int tx_seq_offset, expected_tx_seq_offset;
3912 int num_to_ack = (chan->tx_win/6) + 1;
3915 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3916 tx_seq, rx_control);
3918 if (__is_ctrl_final(chan, rx_control) &&
3919 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3920 __clear_monitor_timer(chan);
3921 if (chan->unacked_frames > 0)
3922 __set_retrans_timer(chan);
3923 clear_bit(CONN_WAIT_F, &chan->conn_state);
3926 chan->expected_ack_seq = req_seq;
3927 l2cap_drop_acked_frames(chan);
3929 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3931 /* invalid tx_seq */
3932 if (tx_seq_offset >= chan->tx_win) {
3933 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3937 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3938 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3939 l2cap_send_ack(chan);
3943 if (tx_seq == chan->expected_tx_seq)
3946 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3947 struct srej_list *first;
3949 first = list_first_entry(&chan->srej_l,
3950 struct srej_list, list);
3951 if (tx_seq == first->tx_seq) {
3952 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3953 l2cap_check_srej_gap(chan, tx_seq);
3955 list_del(&first->list);
3958 if (list_empty(&chan->srej_l)) {
3959 chan->buffer_seq = chan->buffer_seq_srej;
3960 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3961 l2cap_send_ack(chan);
3962 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3965 struct srej_list *l;
3967 /* duplicated tx_seq */
3968 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3971 list_for_each_entry(l, &chan->srej_l, list) {
3972 if (l->tx_seq == tx_seq) {
3973 l2cap_resend_srejframe(chan, tx_seq);
3978 err = l2cap_send_srejframe(chan, tx_seq);
3980 l2cap_send_disconn_req(chan->conn, chan, -err);
3985 expected_tx_seq_offset = __seq_offset(chan,
3986 chan->expected_tx_seq, chan->buffer_seq);
3988 /* duplicated tx_seq */
3989 if (tx_seq_offset < expected_tx_seq_offset)
3992 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3994 BT_DBG("chan %p, Enter SREJ", chan);
3996 INIT_LIST_HEAD(&chan->srej_l);
3997 chan->buffer_seq_srej = chan->buffer_seq;
3999 __skb_queue_head_init(&chan->srej_q);
4000 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
4002 /* Set P-bit only if there are some I-frames to ack. */
4003 if (__clear_ack_timer(chan))
4004 set_bit(CONN_SEND_PBIT, &chan->conn_state);
4006 err = l2cap_send_srejframe(chan, tx_seq);
4008 l2cap_send_disconn_req(chan->conn, chan, -err);
4015 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
4017 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4018 bt_cb(skb)->tx_seq = tx_seq;
4019 bt_cb(skb)->sar = sar;
4020 __skb_queue_tail(&chan->srej_q, skb);
4024 err = l2cap_reassemble_sdu(chan, skb, rx_control);
4025 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
4028 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4032 if (__is_ctrl_final(chan, rx_control)) {
4033 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4034 l2cap_retransmit_frames(chan);
4038 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
4039 if (chan->num_acked == num_to_ack - 1)
4040 l2cap_send_ack(chan);
4042 __set_ack_timer(chan);
4051 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4053 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4054 __get_reqseq(chan, rx_control), rx_control);
4056 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4057 l2cap_drop_acked_frames(chan);
4059 if (__is_ctrl_poll(chan, rx_control)) {
4060 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4061 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4062 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4063 (chan->unacked_frames > 0))
4064 __set_retrans_timer(chan);
4066 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4067 l2cap_send_srejtail(chan);
4069 l2cap_send_i_or_rr_or_rnr(chan);
4072 } else if (__is_ctrl_final(chan, rx_control)) {
4073 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4075 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4076 l2cap_retransmit_frames(chan);
4079 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4080 (chan->unacked_frames > 0))
4081 __set_retrans_timer(chan);
4083 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4084 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4085 l2cap_send_ack(chan);
4087 l2cap_ertm_send(chan);
4091 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4093 u16 tx_seq = __get_reqseq(chan, rx_control);
4095 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4097 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4099 chan->expected_ack_seq = tx_seq;
4100 l2cap_drop_acked_frames(chan);
4102 if (__is_ctrl_final(chan, rx_control)) {
4103 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4104 l2cap_retransmit_frames(chan);
4106 l2cap_retransmit_frames(chan);
4108 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4109 set_bit(CONN_REJ_ACT, &chan->conn_state);
4112 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4114 u16 tx_seq = __get_reqseq(chan, rx_control);
4116 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4118 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4120 if (__is_ctrl_poll(chan, rx_control)) {
4121 chan->expected_ack_seq = tx_seq;
4122 l2cap_drop_acked_frames(chan);
4124 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4125 l2cap_retransmit_one_frame(chan, tx_seq);
4127 l2cap_ertm_send(chan);
4129 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4130 chan->srej_save_reqseq = tx_seq;
4131 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4133 } else if (__is_ctrl_final(chan, rx_control)) {
4134 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4135 chan->srej_save_reqseq == tx_seq)
4136 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4138 l2cap_retransmit_one_frame(chan, tx_seq);
4140 l2cap_retransmit_one_frame(chan, tx_seq);
4141 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4142 chan->srej_save_reqseq = tx_seq;
4143 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4148 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4150 u16 tx_seq = __get_reqseq(chan, rx_control);
4152 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4154 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4155 chan->expected_ack_seq = tx_seq;
4156 l2cap_drop_acked_frames(chan);
4158 if (__is_ctrl_poll(chan, rx_control))
4159 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4161 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4162 __clear_retrans_timer(chan);
4163 if (__is_ctrl_poll(chan, rx_control))
4164 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4168 if (__is_ctrl_poll(chan, rx_control)) {
4169 l2cap_send_srejtail(chan);
4171 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4172 l2cap_send_sframe(chan, rx_control);
4176 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4178 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4180 if (__is_ctrl_final(chan, rx_control) &&
4181 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4182 __clear_monitor_timer(chan);
4183 if (chan->unacked_frames > 0)
4184 __set_retrans_timer(chan);
4185 clear_bit(CONN_WAIT_F, &chan->conn_state);
4188 switch (__get_ctrl_super(chan, rx_control)) {
4189 case L2CAP_SUPER_RR:
4190 l2cap_data_channel_rrframe(chan, rx_control);
4193 case L2CAP_SUPER_REJ:
4194 l2cap_data_channel_rejframe(chan, rx_control);
4197 case L2CAP_SUPER_SREJ:
4198 l2cap_data_channel_srejframe(chan, rx_control);
4201 case L2CAP_SUPER_RNR:
4202 l2cap_data_channel_rnrframe(chan, rx_control);
4210 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4214 int len, next_tx_seq_offset, req_seq_offset;
4216 control = __get_control(chan, skb->data);
4217 skb_pull(skb, __ctrl_size(chan));
4221 * We can just drop the corrupted I-frame here.
4222 * Receiver will miss it and start proper recovery
4223 * procedures and ask retransmission.
4225 if (l2cap_check_fcs(chan, skb))
4228 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4229 len -= L2CAP_SDULEN_SIZE;
4231 if (chan->fcs == L2CAP_FCS_CRC16)
4232 len -= L2CAP_FCS_SIZE;
4234 if (len > chan->mps) {
4235 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4239 req_seq = __get_reqseq(chan, control);
4241 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4243 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4244 chan->expected_ack_seq);
4246 /* check for invalid req-seq */
4247 if (req_seq_offset > next_tx_seq_offset) {
4248 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4252 if (!__is_sframe(chan, control)) {
4254 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4258 l2cap_data_channel_iframe(chan, control, skb);
4262 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4266 l2cap_data_channel_sframe(chan, control, skb);
4276 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4278 struct l2cap_chan *chan;
4283 chan = l2cap_get_chan_by_scid(conn, cid);
4285 BT_DBG("unknown cid 0x%4.4x", cid);
4286 /* Drop packet and return */
4291 l2cap_chan_lock(chan);
4293 BT_DBG("chan %p, len %d", chan, skb->len);
4295 if (chan->state != BT_CONNECTED)
4298 switch (chan->mode) {
4299 case L2CAP_MODE_BASIC:
4300 /* If socket recv buffers overflows we drop data here
4301 * which is *bad* because L2CAP has to be reliable.
4302 * But we don't have any other choice. L2CAP doesn't
4303 * provide flow control mechanism. */
4305 if (chan->imtu < skb->len)
4308 if (!chan->ops->recv(chan->data, skb))
4312 case L2CAP_MODE_ERTM:
4313 l2cap_ertm_data_rcv(chan, skb);
4317 case L2CAP_MODE_STREAMING:
4318 control = __get_control(chan, skb->data);
4319 skb_pull(skb, __ctrl_size(chan));
4322 if (l2cap_check_fcs(chan, skb))
4325 if (__is_sar_start(chan, control))
4326 len -= L2CAP_SDULEN_SIZE;
4328 if (chan->fcs == L2CAP_FCS_CRC16)
4329 len -= L2CAP_FCS_SIZE;
4331 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4334 tx_seq = __get_txseq(chan, control);
4336 if (chan->expected_tx_seq != tx_seq) {
4337 /* Frame(s) missing - must discard partial SDU */
4338 kfree_skb(chan->sdu);
4340 chan->sdu_last_frag = NULL;
4343 /* TODO: Notify userland of missing data */
4346 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4348 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4349 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4354 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4362 l2cap_chan_unlock(chan);
4367 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4369 struct l2cap_chan *chan;
4371 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4375 BT_DBG("chan %p, len %d", chan, skb->len);
4377 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4380 if (chan->imtu < skb->len)
4383 if (!chan->ops->recv(chan->data, skb))
4392 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4394 struct l2cap_chan *chan;
4396 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4400 BT_DBG("chan %p, len %d", chan, skb->len);
4402 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4405 if (chan->imtu < skb->len)
4408 if (!chan->ops->recv(chan->data, skb))
4417 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4419 struct l2cap_hdr *lh = (void *) skb->data;
4423 skb_pull(skb, L2CAP_HDR_SIZE);
4424 cid = __le16_to_cpu(lh->cid);
4425 len = __le16_to_cpu(lh->len);
4427 if (len != skb->len) {
4432 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4435 case L2CAP_CID_LE_SIGNALING:
4436 case L2CAP_CID_SIGNALING:
4437 l2cap_sig_channel(conn, skb);
4440 case L2CAP_CID_CONN_LESS:
4441 psm = get_unaligned_le16(skb->data);
4443 l2cap_conless_channel(conn, psm, skb);
4446 case L2CAP_CID_LE_DATA:
4447 l2cap_att_channel(conn, cid, skb);
4451 if (smp_sig_channel(conn, skb))
4452 l2cap_conn_del(conn->hcon, EACCES);
4456 l2cap_data_channel(conn, cid, skb);
4461 /* ---- L2CAP interface with lower layer (HCI) ---- */
4463 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4465 int exact = 0, lm1 = 0, lm2 = 0;
4466 struct l2cap_chan *c;
4468 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4470 /* Find listening sockets and check their link_mode */
4471 read_lock(&chan_list_lock);
4472 list_for_each_entry(c, &chan_list, global_l) {
4473 struct sock *sk = c->sk;
4475 if (c->state != BT_LISTEN)
4478 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4479 lm1 |= HCI_LM_ACCEPT;
4480 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4481 lm1 |= HCI_LM_MASTER;
4483 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4484 lm2 |= HCI_LM_ACCEPT;
4485 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4486 lm2 |= HCI_LM_MASTER;
4489 read_unlock(&chan_list_lock);
4491 return exact ? lm1 : lm2;
4494 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4496 struct l2cap_conn *conn;
4498 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4501 conn = l2cap_conn_add(hcon, status);
4503 l2cap_conn_ready(conn);
4505 l2cap_conn_del(hcon, bt_to_errno(status));
4510 int l2cap_disconn_ind(struct hci_conn *hcon)
4512 struct l2cap_conn *conn = hcon->l2cap_data;
4514 BT_DBG("hcon %p", hcon);
4517 return HCI_ERROR_REMOTE_USER_TERM;
4518 return conn->disc_reason;
4521 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4523 BT_DBG("hcon %p reason %d", hcon, reason);
4525 l2cap_conn_del(hcon, bt_to_errno(reason));
4529 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4531 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4534 if (encrypt == 0x00) {
4535 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4536 __clear_chan_timer(chan);
4537 __set_chan_timer(chan,
4538 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4539 } else if (chan->sec_level == BT_SECURITY_HIGH)
4540 l2cap_chan_close(chan, ECONNREFUSED);
4542 if (chan->sec_level == BT_SECURITY_MEDIUM)
4543 __clear_chan_timer(chan);
4547 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4549 struct l2cap_conn *conn = hcon->l2cap_data;
4550 struct l2cap_chan *chan;
4555 BT_DBG("conn %p", conn);
4557 if (hcon->type == LE_LINK) {
4558 smp_distribute_keys(conn, 0);
4559 cancel_delayed_work(&conn->security_timer);
4562 mutex_lock(&conn->chan_lock);
4564 list_for_each_entry(chan, &conn->chan_l, list) {
4565 l2cap_chan_lock(chan);
4567 BT_DBG("chan->scid %d", chan->scid);
4569 if (chan->scid == L2CAP_CID_LE_DATA) {
4570 if (!status && encrypt) {
4571 chan->sec_level = hcon->sec_level;
4572 l2cap_chan_ready(chan);
4575 l2cap_chan_unlock(chan);
4579 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4580 l2cap_chan_unlock(chan);
4584 if (!status && (chan->state == BT_CONNECTED ||
4585 chan->state == BT_CONFIG)) {
4586 l2cap_check_encryption(chan, encrypt);
4587 l2cap_chan_unlock(chan);
4591 if (chan->state == BT_CONNECT) {
4593 l2cap_send_conn_req(chan);
4595 __clear_chan_timer(chan);
4596 __set_chan_timer(chan,
4597 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4599 } else if (chan->state == BT_CONNECT2) {
4600 struct sock *sk = chan->sk;
4601 struct l2cap_conn_rsp rsp;
4607 if (bt_sk(sk)->defer_setup) {
4608 struct sock *parent = bt_sk(sk)->parent;
4609 res = L2CAP_CR_PEND;
4610 stat = L2CAP_CS_AUTHOR_PEND;
4612 parent->sk_data_ready(parent, 0);
4614 __l2cap_state_change(chan, BT_CONFIG);
4615 res = L2CAP_CR_SUCCESS;
4616 stat = L2CAP_CS_NO_INFO;
4619 __l2cap_state_change(chan, BT_DISCONN);
4620 __set_chan_timer(chan,
4621 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4622 res = L2CAP_CR_SEC_BLOCK;
4623 stat = L2CAP_CS_NO_INFO;
4628 rsp.scid = cpu_to_le16(chan->dcid);
4629 rsp.dcid = cpu_to_le16(chan->scid);
4630 rsp.result = cpu_to_le16(res);
4631 rsp.status = cpu_to_le16(stat);
4632 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4636 l2cap_chan_unlock(chan);
4639 mutex_unlock(&conn->chan_lock);
4644 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4646 struct l2cap_conn *conn = hcon->l2cap_data;
4649 conn = l2cap_conn_add(hcon, 0);
4654 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4656 if (!(flags & ACL_CONT)) {
4657 struct l2cap_hdr *hdr;
4658 struct l2cap_chan *chan;
4663 BT_ERR("Unexpected start frame (len %d)", skb->len);
4664 kfree_skb(conn->rx_skb);
4665 conn->rx_skb = NULL;
4667 l2cap_conn_unreliable(conn, ECOMM);
4670 /* Start fragment always begin with Basic L2CAP header */
4671 if (skb->len < L2CAP_HDR_SIZE) {
4672 BT_ERR("Frame is too short (len %d)", skb->len);
4673 l2cap_conn_unreliable(conn, ECOMM);
4677 hdr = (struct l2cap_hdr *) skb->data;
4678 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4679 cid = __le16_to_cpu(hdr->cid);
4681 if (len == skb->len) {
4682 /* Complete frame received */
4683 l2cap_recv_frame(conn, skb);
4687 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4689 if (skb->len > len) {
4690 BT_ERR("Frame is too long (len %d, expected len %d)",
4692 l2cap_conn_unreliable(conn, ECOMM);
4696 chan = l2cap_get_chan_by_scid(conn, cid);
4698 if (chan && chan->sk) {
4699 struct sock *sk = chan->sk;
4702 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4703 BT_ERR("Frame exceeding recv MTU (len %d, "
4707 l2cap_conn_unreliable(conn, ECOMM);
4713 /* Allocate skb for the complete frame (with header) */
4714 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4718 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4720 conn->rx_len = len - skb->len;
4722 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4724 if (!conn->rx_len) {
4725 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4726 l2cap_conn_unreliable(conn, ECOMM);
4730 if (skb->len > conn->rx_len) {
4731 BT_ERR("Fragment is too long (len %d, expected %d)",
4732 skb->len, conn->rx_len);
4733 kfree_skb(conn->rx_skb);
4734 conn->rx_skb = NULL;
4736 l2cap_conn_unreliable(conn, ECOMM);
4740 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4742 conn->rx_len -= skb->len;
4744 if (!conn->rx_len) {
4745 /* Complete frame received */
4746 l2cap_recv_frame(conn, conn->rx_skb);
4747 conn->rx_skb = NULL;
4756 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4758 struct l2cap_chan *c;
4760 read_lock(&chan_list_lock);
4762 list_for_each_entry(c, &chan_list, global_l) {
4763 struct sock *sk = c->sk;
4765 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4766 batostr(&bt_sk(sk)->src),
4767 batostr(&bt_sk(sk)->dst),
4768 c->state, __le16_to_cpu(c->psm),
4769 c->scid, c->dcid, c->imtu, c->omtu,
4770 c->sec_level, c->mode);
4773 read_unlock(&chan_list_lock);
4778 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4780 return single_open(file, l2cap_debugfs_show, inode->i_private);
4783 static const struct file_operations l2cap_debugfs_fops = {
4784 .open = l2cap_debugfs_open,
4786 .llseek = seq_lseek,
4787 .release = single_release,
4790 static struct dentry *l2cap_debugfs;
4792 int __init l2cap_init(void)
4796 err = l2cap_init_sockets();
4801 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4802 bt_debugfs, NULL, &l2cap_debugfs_fops);
4804 BT_ERR("Failed to create L2CAP debug file");
4810 void l2cap_exit(void)
4812 debugfs_remove(l2cap_debugfs);
4813 l2cap_cleanup_sockets();
4816 module_param(disable_ertm, bool, 0644);
4817 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");