2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
78 /* ---- L2CAP channels ---- */
80 static inline void chan_hold(struct l2cap_chan *c)
82 atomic_inc(&c->refcnt);
85 static inline void chan_put(struct l2cap_chan *c)
87 if (atomic_dec_and_test(&c->refcnt))
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
95 list_for_each_entry(c, &conn->chan_l, list) {
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
105 struct l2cap_chan *c;
107 list_for_each_entry(c, &conn->chan_l, list) {
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
118 struct l2cap_chan *c;
120 read_lock(&conn->chan_lock);
121 c = __l2cap_get_chan_by_scid(conn, cid);
124 read_unlock(&conn->chan_lock);
128 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
130 struct l2cap_chan *c;
132 list_for_each_entry(c, &conn->chan_l, list) {
133 if (c->ident == ident)
139 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
141 struct l2cap_chan *c;
143 read_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_ident(conn, ident);
147 read_unlock(&conn->chan_lock);
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
153 struct l2cap_chan *c;
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
165 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
169 write_lock_bh(&chan_list_lock);
171 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
184 for (p = 0x1001; p < 0x1100; p += 2)
185 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
186 chan->psm = cpu_to_le16(p);
187 chan->sport = cpu_to_le16(p);
194 write_unlock_bh(&chan_list_lock);
198 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
200 write_lock_bh(&chan_list_lock);
204 write_unlock_bh(&chan_list_lock);
209 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
211 u16 cid = L2CAP_CID_DYN_START;
213 for (; cid < L2CAP_CID_DYN_END; cid++) {
214 if (!__l2cap_get_chan_by_scid(conn, cid))
221 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
223 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
225 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
229 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
231 BT_DBG("chan %p state %d", chan, chan->state);
233 if (timer_pending(timer) && del_timer(timer))
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
240 chan->ops->state_change(chan->data, state);
243 static void l2cap_chan_timeout(unsigned long arg)
245 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
246 struct sock *sk = chan->sk;
249 BT_DBG("chan %p state %d", chan, chan->state);
253 if (sock_owned_by_user(sk)) {
254 /* sk is owned by user. Try again later */
255 __set_chan_timer(chan, HZ / 5);
261 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
262 reason = ECONNREFUSED;
263 else if (chan->state == BT_CONNECT &&
264 chan->sec_level != BT_SECURITY_SDP)
265 reason = ECONNREFUSED;
269 l2cap_chan_close(chan, reason);
273 chan->ops->close(chan->data);
277 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
279 struct l2cap_chan *chan;
281 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
287 write_lock_bh(&chan_list_lock);
288 list_add(&chan->global_l, &chan_list);
289 write_unlock_bh(&chan_list_lock);
291 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
293 chan->state = BT_OPEN;
295 atomic_set(&chan->refcnt, 1);
300 void l2cap_chan_destroy(struct l2cap_chan *chan)
302 write_lock_bh(&chan_list_lock);
303 list_del(&chan->global_l);
304 write_unlock_bh(&chan_list_lock);
309 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
311 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
312 chan->psm, chan->dcid);
314 conn->disc_reason = 0x13;
318 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
319 if (conn->hcon->type == LE_LINK) {
321 chan->omtu = L2CAP_LE_DEFAULT_MTU;
322 chan->scid = L2CAP_CID_LE_DATA;
323 chan->dcid = L2CAP_CID_LE_DATA;
325 /* Alloc CID for connection-oriented socket */
326 chan->scid = l2cap_alloc_cid(conn);
327 chan->omtu = L2CAP_DEFAULT_MTU;
329 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
330 /* Connectionless socket */
331 chan->scid = L2CAP_CID_CONN_LESS;
332 chan->dcid = L2CAP_CID_CONN_LESS;
333 chan->omtu = L2CAP_DEFAULT_MTU;
335 /* Raw socket can send/recv signalling messages only */
336 chan->scid = L2CAP_CID_SIGNALING;
337 chan->dcid = L2CAP_CID_SIGNALING;
338 chan->omtu = L2CAP_DEFAULT_MTU;
341 chan->local_id = L2CAP_BESTEFFORT_ID;
342 chan->local_stype = L2CAP_SERV_BESTEFFORT;
343 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
344 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
345 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
346 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
350 list_add(&chan->list, &conn->chan_l);
354 * Must be called on the locked socket. */
355 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
357 struct sock *sk = chan->sk;
358 struct l2cap_conn *conn = chan->conn;
359 struct sock *parent = bt_sk(sk)->parent;
361 __clear_chan_timer(chan);
363 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
366 /* Delete from channel list */
367 write_lock_bh(&conn->chan_lock);
368 list_del(&chan->list);
369 write_unlock_bh(&conn->chan_lock);
373 hci_conn_put(conn->hcon);
376 l2cap_state_change(chan, BT_CLOSED);
377 sock_set_flag(sk, SOCK_ZAPPED);
383 bt_accept_unlink(sk);
384 parent->sk_data_ready(parent, 0);
386 sk->sk_state_change(sk);
388 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
389 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
392 skb_queue_purge(&chan->tx_q);
394 if (chan->mode == L2CAP_MODE_ERTM) {
395 struct srej_list *l, *tmp;
397 __clear_retrans_timer(chan);
398 __clear_monitor_timer(chan);
399 __clear_ack_timer(chan);
401 skb_queue_purge(&chan->srej_q);
403 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
410 static void l2cap_chan_cleanup_listen(struct sock *parent)
414 BT_DBG("parent %p", parent);
416 /* Close not yet accepted channels */
417 while ((sk = bt_accept_dequeue(parent, NULL))) {
418 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
419 __clear_chan_timer(chan);
421 l2cap_chan_close(chan, ECONNRESET);
423 chan->ops->close(chan->data);
427 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
429 struct l2cap_conn *conn = chan->conn;
430 struct sock *sk = chan->sk;
432 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
434 switch (chan->state) {
436 l2cap_chan_cleanup_listen(sk);
438 l2cap_state_change(chan, BT_CLOSED);
439 sock_set_flag(sk, SOCK_ZAPPED);
444 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
445 conn->hcon->type == ACL_LINK) {
446 __clear_chan_timer(chan);
447 __set_chan_timer(chan, sk->sk_sndtimeo);
448 l2cap_send_disconn_req(conn, chan, reason);
450 l2cap_chan_del(chan, reason);
454 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
455 conn->hcon->type == ACL_LINK) {
456 struct l2cap_conn_rsp rsp;
459 if (bt_sk(sk)->defer_setup)
460 result = L2CAP_CR_SEC_BLOCK;
462 result = L2CAP_CR_BAD_PSM;
463 l2cap_state_change(chan, BT_DISCONN);
465 rsp.scid = cpu_to_le16(chan->dcid);
466 rsp.dcid = cpu_to_le16(chan->scid);
467 rsp.result = cpu_to_le16(result);
468 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
469 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
473 l2cap_chan_del(chan, reason);
478 l2cap_chan_del(chan, reason);
482 sock_set_flag(sk, SOCK_ZAPPED);
487 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
489 if (chan->chan_type == L2CAP_CHAN_RAW) {
490 switch (chan->sec_level) {
491 case BT_SECURITY_HIGH:
492 return HCI_AT_DEDICATED_BONDING_MITM;
493 case BT_SECURITY_MEDIUM:
494 return HCI_AT_DEDICATED_BONDING;
496 return HCI_AT_NO_BONDING;
498 } else if (chan->psm == cpu_to_le16(0x0001)) {
499 if (chan->sec_level == BT_SECURITY_LOW)
500 chan->sec_level = BT_SECURITY_SDP;
502 if (chan->sec_level == BT_SECURITY_HIGH)
503 return HCI_AT_NO_BONDING_MITM;
505 return HCI_AT_NO_BONDING;
507 switch (chan->sec_level) {
508 case BT_SECURITY_HIGH:
509 return HCI_AT_GENERAL_BONDING_MITM;
510 case BT_SECURITY_MEDIUM:
511 return HCI_AT_GENERAL_BONDING;
513 return HCI_AT_NO_BONDING;
518 /* Service level security */
519 static inline int l2cap_check_security(struct l2cap_chan *chan)
521 struct l2cap_conn *conn = chan->conn;
524 auth_type = l2cap_get_auth_type(chan);
526 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
529 static u8 l2cap_get_ident(struct l2cap_conn *conn)
533 /* Get next available identificator.
534 * 1 - 128 are used by kernel.
535 * 129 - 199 are reserved.
536 * 200 - 254 are used by utilities like l2ping, etc.
539 spin_lock_bh(&conn->lock);
541 if (++conn->tx_ident > 128)
546 spin_unlock_bh(&conn->lock);
551 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
553 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
556 BT_DBG("code 0x%2.2x", code);
561 if (lmp_no_flush_capable(conn->hcon->hdev))
562 flags = ACL_START_NO_FLUSH;
566 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
568 hci_send_acl(conn->hcon, skb, flags);
571 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
574 struct l2cap_hdr *lh;
575 struct l2cap_conn *conn = chan->conn;
579 if (chan->state != BT_CONNECTED)
582 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
583 hlen = L2CAP_EXT_HDR_SIZE;
585 hlen = L2CAP_ENH_HDR_SIZE;
587 if (chan->fcs == L2CAP_FCS_CRC16)
588 hlen += L2CAP_FCS_SIZE;
590 BT_DBG("chan %p, control 0x%8.8x", chan, control);
592 count = min_t(unsigned int, conn->mtu, hlen);
594 control |= __set_sframe(chan);
596 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
597 control |= __set_ctrl_final(chan);
599 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
600 control |= __set_ctrl_poll(chan);
602 skb = bt_skb_alloc(count, GFP_ATOMIC);
606 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
607 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
608 lh->cid = cpu_to_le16(chan->dcid);
610 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
612 if (chan->fcs == L2CAP_FCS_CRC16) {
613 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
614 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
617 if (lmp_no_flush_capable(conn->hcon->hdev))
618 flags = ACL_START_NO_FLUSH;
622 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
624 hci_send_acl(chan->conn->hcon, skb, flags);
627 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
629 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
630 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
631 set_bit(CONN_RNR_SENT, &chan->conn_state);
633 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
635 control |= __set_reqseq(chan, chan->buffer_seq);
637 l2cap_send_sframe(chan, control);
640 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
642 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
645 static void l2cap_do_start(struct l2cap_chan *chan)
647 struct l2cap_conn *conn = chan->conn;
649 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
650 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
653 if (l2cap_check_security(chan) &&
654 __l2cap_no_conn_pending(chan)) {
655 struct l2cap_conn_req req;
656 req.scid = cpu_to_le16(chan->scid);
659 chan->ident = l2cap_get_ident(conn);
660 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
662 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
666 struct l2cap_info_req req;
667 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
669 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
670 conn->info_ident = l2cap_get_ident(conn);
672 mod_timer(&conn->info_timer, jiffies +
673 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
675 l2cap_send_cmd(conn, conn->info_ident,
676 L2CAP_INFO_REQ, sizeof(req), &req);
680 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
682 u32 local_feat_mask = l2cap_feat_mask;
684 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
687 case L2CAP_MODE_ERTM:
688 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
689 case L2CAP_MODE_STREAMING:
690 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
696 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
699 struct l2cap_disconn_req req;
706 if (chan->mode == L2CAP_MODE_ERTM) {
707 __clear_retrans_timer(chan);
708 __clear_monitor_timer(chan);
709 __clear_ack_timer(chan);
712 req.dcid = cpu_to_le16(chan->dcid);
713 req.scid = cpu_to_le16(chan->scid);
714 l2cap_send_cmd(conn, l2cap_get_ident(conn),
715 L2CAP_DISCONN_REQ, sizeof(req), &req);
717 l2cap_state_change(chan, BT_DISCONN);
721 /* ---- L2CAP connections ---- */
722 static void l2cap_conn_start(struct l2cap_conn *conn)
724 struct l2cap_chan *chan, *tmp;
726 BT_DBG("conn %p", conn);
728 read_lock(&conn->chan_lock);
730 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
731 struct sock *sk = chan->sk;
735 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
740 if (chan->state == BT_CONNECT) {
741 struct l2cap_conn_req req;
743 if (!l2cap_check_security(chan) ||
744 !__l2cap_no_conn_pending(chan)) {
749 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
750 && test_bit(CONF_STATE2_DEVICE,
751 &chan->conf_state)) {
752 /* l2cap_chan_close() calls list_del(chan)
753 * so release the lock */
754 read_unlock(&conn->chan_lock);
755 l2cap_chan_close(chan, ECONNRESET);
756 read_lock(&conn->chan_lock);
761 req.scid = cpu_to_le16(chan->scid);
764 chan->ident = l2cap_get_ident(conn);
765 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
767 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
770 } else if (chan->state == BT_CONNECT2) {
771 struct l2cap_conn_rsp rsp;
773 rsp.scid = cpu_to_le16(chan->dcid);
774 rsp.dcid = cpu_to_le16(chan->scid);
776 if (l2cap_check_security(chan)) {
777 if (bt_sk(sk)->defer_setup) {
778 struct sock *parent = bt_sk(sk)->parent;
779 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
780 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
782 parent->sk_data_ready(parent, 0);
785 l2cap_state_change(chan, BT_CONFIG);
786 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
787 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
790 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
791 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
794 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
797 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
798 rsp.result != L2CAP_CR_SUCCESS) {
803 set_bit(CONF_REQ_SENT, &chan->conf_state);
804 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
805 l2cap_build_conf_req(chan, buf), buf);
806 chan->num_conf_req++;
812 read_unlock(&conn->chan_lock);
815 /* Find socket with cid and source bdaddr.
816 * Returns closest match, locked.
818 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
820 struct l2cap_chan *c, *c1 = NULL;
822 read_lock(&chan_list_lock);
824 list_for_each_entry(c, &chan_list, global_l) {
825 struct sock *sk = c->sk;
827 if (state && c->state != state)
830 if (c->scid == cid) {
832 if (!bacmp(&bt_sk(sk)->src, src)) {
833 read_unlock(&chan_list_lock);
838 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
843 read_unlock(&chan_list_lock);
848 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
850 struct sock *parent, *sk;
851 struct l2cap_chan *chan, *pchan;
855 /* Check if we have socket listening on cid */
856 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
863 bh_lock_sock(parent);
865 /* Check for backlog size */
866 if (sk_acceptq_is_full(parent)) {
867 BT_DBG("backlog full %d", parent->sk_ack_backlog);
871 chan = pchan->ops->new_connection(pchan->data);
877 write_lock_bh(&conn->chan_lock);
879 hci_conn_hold(conn->hcon);
881 bacpy(&bt_sk(sk)->src, conn->src);
882 bacpy(&bt_sk(sk)->dst, conn->dst);
884 bt_accept_enqueue(parent, sk);
886 __l2cap_chan_add(conn, chan);
888 __set_chan_timer(chan, sk->sk_sndtimeo);
890 l2cap_state_change(chan, BT_CONNECTED);
891 parent->sk_data_ready(parent, 0);
893 write_unlock_bh(&conn->chan_lock);
896 bh_unlock_sock(parent);
899 static void l2cap_chan_ready(struct sock *sk)
901 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
902 struct sock *parent = bt_sk(sk)->parent;
904 BT_DBG("sk %p, parent %p", sk, parent);
906 chan->conf_state = 0;
907 __clear_chan_timer(chan);
909 l2cap_state_change(chan, BT_CONNECTED);
910 sk->sk_state_change(sk);
913 parent->sk_data_ready(parent, 0);
916 static void l2cap_conn_ready(struct l2cap_conn *conn)
918 struct l2cap_chan *chan;
920 BT_DBG("conn %p", conn);
922 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
923 l2cap_le_conn_ready(conn);
925 if (conn->hcon->out && conn->hcon->type == LE_LINK)
926 smp_conn_security(conn, conn->hcon->pending_sec_level);
928 read_lock(&conn->chan_lock);
930 list_for_each_entry(chan, &conn->chan_l, list) {
931 struct sock *sk = chan->sk;
935 if (conn->hcon->type == LE_LINK) {
936 if (smp_conn_security(conn, chan->sec_level))
937 l2cap_chan_ready(sk);
939 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
940 __clear_chan_timer(chan);
941 l2cap_state_change(chan, BT_CONNECTED);
942 sk->sk_state_change(sk);
944 } else if (chan->state == BT_CONNECT)
945 l2cap_do_start(chan);
950 read_unlock(&conn->chan_lock);
953 /* Notify sockets that we cannot guaranty reliability anymore */
954 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
956 struct l2cap_chan *chan;
958 BT_DBG("conn %p", conn);
960 read_lock(&conn->chan_lock);
962 list_for_each_entry(chan, &conn->chan_l, list) {
963 struct sock *sk = chan->sk;
965 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
969 read_unlock(&conn->chan_lock);
972 static void l2cap_info_timeout(unsigned long arg)
974 struct l2cap_conn *conn = (void *) arg;
976 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
977 conn->info_ident = 0;
979 l2cap_conn_start(conn);
982 static void l2cap_conn_del(struct hci_conn *hcon, int err)
984 struct l2cap_conn *conn = hcon->l2cap_data;
985 struct l2cap_chan *chan, *l;
991 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
993 kfree_skb(conn->rx_skb);
996 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
999 l2cap_chan_del(chan, err);
1001 chan->ops->close(chan->data);
1004 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1005 del_timer_sync(&conn->info_timer);
1007 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1008 del_timer(&conn->security_timer);
1009 smp_chan_destroy(conn);
1012 hcon->l2cap_data = NULL;
1016 static void security_timeout(unsigned long arg)
1018 struct l2cap_conn *conn = (void *) arg;
1020 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1023 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1025 struct l2cap_conn *conn = hcon->l2cap_data;
1030 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1034 hcon->l2cap_data = conn;
1037 BT_DBG("hcon %p conn %p", hcon, conn);
1039 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1040 conn->mtu = hcon->hdev->le_mtu;
1042 conn->mtu = hcon->hdev->acl_mtu;
1044 conn->src = &hcon->hdev->bdaddr;
1045 conn->dst = &hcon->dst;
1047 conn->feat_mask = 0;
1049 spin_lock_init(&conn->lock);
1050 rwlock_init(&conn->chan_lock);
1052 INIT_LIST_HEAD(&conn->chan_l);
1054 if (hcon->type == LE_LINK)
1055 setup_timer(&conn->security_timer, security_timeout,
1056 (unsigned long) conn);
1058 setup_timer(&conn->info_timer, l2cap_info_timeout,
1059 (unsigned long) conn);
1061 conn->disc_reason = 0x13;
1066 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1068 write_lock_bh(&conn->chan_lock);
1069 __l2cap_chan_add(conn, chan);
1070 write_unlock_bh(&conn->chan_lock);
1073 /* ---- Socket interface ---- */
1075 /* Find socket with psm and source bdaddr.
1076 * Returns closest match.
1078 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1080 struct l2cap_chan *c, *c1 = NULL;
1082 read_lock(&chan_list_lock);
1084 list_for_each_entry(c, &chan_list, global_l) {
1085 struct sock *sk = c->sk;
1087 if (state && c->state != state)
1090 if (c->psm == psm) {
1092 if (!bacmp(&bt_sk(sk)->src, src)) {
1093 read_unlock(&chan_list_lock);
1098 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1103 read_unlock(&chan_list_lock);
1108 int l2cap_chan_connect(struct l2cap_chan *chan)
1110 struct sock *sk = chan->sk;
1111 bdaddr_t *src = &bt_sk(sk)->src;
1112 bdaddr_t *dst = &bt_sk(sk)->dst;
1113 struct l2cap_conn *conn;
1114 struct hci_conn *hcon;
1115 struct hci_dev *hdev;
1119 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1122 hdev = hci_get_route(dst, src);
1124 return -EHOSTUNREACH;
1126 hci_dev_lock_bh(hdev);
1128 auth_type = l2cap_get_auth_type(chan);
1130 if (chan->dcid == L2CAP_CID_LE_DATA)
1131 hcon = hci_connect(hdev, LE_LINK, dst,
1132 chan->sec_level, auth_type);
1134 hcon = hci_connect(hdev, ACL_LINK, dst,
1135 chan->sec_level, auth_type);
1138 err = PTR_ERR(hcon);
1142 conn = l2cap_conn_add(hcon, 0);
1149 /* Update source addr of the socket */
1150 bacpy(src, conn->src);
1152 l2cap_chan_add(conn, chan);
1154 l2cap_state_change(chan, BT_CONNECT);
1155 __set_chan_timer(chan, sk->sk_sndtimeo);
1157 if (hcon->state == BT_CONNECTED) {
1158 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1159 __clear_chan_timer(chan);
1160 if (l2cap_check_security(chan))
1161 l2cap_state_change(chan, BT_CONNECTED);
1163 l2cap_do_start(chan);
1169 hci_dev_unlock_bh(hdev);
1174 int __l2cap_wait_ack(struct sock *sk)
1176 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1177 DECLARE_WAITQUEUE(wait, current);
1181 add_wait_queue(sk_sleep(sk), &wait);
1182 set_current_state(TASK_INTERRUPTIBLE);
1183 while (chan->unacked_frames > 0 && chan->conn) {
1187 if (signal_pending(current)) {
1188 err = sock_intr_errno(timeo);
1193 timeo = schedule_timeout(timeo);
1195 set_current_state(TASK_INTERRUPTIBLE);
1197 err = sock_error(sk);
1201 set_current_state(TASK_RUNNING);
1202 remove_wait_queue(sk_sleep(sk), &wait);
1206 static void l2cap_monitor_timeout(unsigned long arg)
1208 struct l2cap_chan *chan = (void *) arg;
1209 struct sock *sk = chan->sk;
1211 BT_DBG("chan %p", chan);
1214 if (chan->retry_count >= chan->remote_max_tx) {
1215 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1220 chan->retry_count++;
1221 __set_monitor_timer(chan);
1223 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1227 static void l2cap_retrans_timeout(unsigned long arg)
1229 struct l2cap_chan *chan = (void *) arg;
1230 struct sock *sk = chan->sk;
1232 BT_DBG("chan %p", chan);
1235 chan->retry_count = 1;
1236 __set_monitor_timer(chan);
1238 set_bit(CONN_WAIT_F, &chan->conn_state);
1240 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1244 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1246 struct sk_buff *skb;
1248 while ((skb = skb_peek(&chan->tx_q)) &&
1249 chan->unacked_frames) {
1250 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1253 skb = skb_dequeue(&chan->tx_q);
1256 chan->unacked_frames--;
1259 if (!chan->unacked_frames)
1260 __clear_retrans_timer(chan);
1263 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1265 struct hci_conn *hcon = chan->conn->hcon;
1268 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1270 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1271 lmp_no_flush_capable(hcon->hdev))
1272 flags = ACL_START_NO_FLUSH;
1276 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1277 hci_send_acl(hcon, skb, flags);
1280 static void l2cap_streaming_send(struct l2cap_chan *chan)
1282 struct sk_buff *skb;
1286 while ((skb = skb_dequeue(&chan->tx_q))) {
1287 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1288 control |= __set_txseq(chan, chan->next_tx_seq);
1289 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1291 if (chan->fcs == L2CAP_FCS_CRC16) {
1292 fcs = crc16(0, (u8 *)skb->data,
1293 skb->len - L2CAP_FCS_SIZE);
1294 put_unaligned_le16(fcs,
1295 skb->data + skb->len - L2CAP_FCS_SIZE);
1298 l2cap_do_send(chan, skb);
1300 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1304 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1306 struct sk_buff *skb, *tx_skb;
1310 skb = skb_peek(&chan->tx_q);
1315 if (bt_cb(skb)->tx_seq == tx_seq)
1318 if (skb_queue_is_last(&chan->tx_q, skb))
1321 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1323 if (chan->remote_max_tx &&
1324 bt_cb(skb)->retries == chan->remote_max_tx) {
1325 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1329 tx_skb = skb_clone(skb, GFP_ATOMIC);
1330 bt_cb(skb)->retries++;
1332 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1333 control &= __get_sar_mask(chan);
1335 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1336 control |= __set_ctrl_final(chan);
1338 control |= __set_reqseq(chan, chan->buffer_seq);
1339 control |= __set_txseq(chan, tx_seq);
1341 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1343 if (chan->fcs == L2CAP_FCS_CRC16) {
1344 fcs = crc16(0, (u8 *)tx_skb->data,
1345 tx_skb->len - L2CAP_FCS_SIZE);
1346 put_unaligned_le16(fcs,
1347 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1350 l2cap_do_send(chan, tx_skb);
1353 static int l2cap_ertm_send(struct l2cap_chan *chan)
1355 struct sk_buff *skb, *tx_skb;
1360 if (chan->state != BT_CONNECTED)
1363 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1365 if (chan->remote_max_tx &&
1366 bt_cb(skb)->retries == chan->remote_max_tx) {
1367 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1371 tx_skb = skb_clone(skb, GFP_ATOMIC);
1373 bt_cb(skb)->retries++;
1375 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1376 control &= __get_sar_mask(chan);
1378 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1379 control |= __set_ctrl_final(chan);
1381 control |= __set_reqseq(chan, chan->buffer_seq);
1382 control |= __set_txseq(chan, chan->next_tx_seq);
1384 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1386 if (chan->fcs == L2CAP_FCS_CRC16) {
1387 fcs = crc16(0, (u8 *)skb->data,
1388 tx_skb->len - L2CAP_FCS_SIZE);
1389 put_unaligned_le16(fcs, skb->data +
1390 tx_skb->len - L2CAP_FCS_SIZE);
1393 l2cap_do_send(chan, tx_skb);
1395 __set_retrans_timer(chan);
1397 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1399 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1401 if (bt_cb(skb)->retries == 1)
1402 chan->unacked_frames++;
1404 chan->frames_sent++;
1406 if (skb_queue_is_last(&chan->tx_q, skb))
1407 chan->tx_send_head = NULL;
1409 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1417 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1421 if (!skb_queue_empty(&chan->tx_q))
1422 chan->tx_send_head = chan->tx_q.next;
1424 chan->next_tx_seq = chan->expected_ack_seq;
1425 ret = l2cap_ertm_send(chan);
1429 static void l2cap_send_ack(struct l2cap_chan *chan)
1433 control |= __set_reqseq(chan, chan->buffer_seq);
1435 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1436 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1437 set_bit(CONN_RNR_SENT, &chan->conn_state);
1438 l2cap_send_sframe(chan, control);
1442 if (l2cap_ertm_send(chan) > 0)
1445 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1446 l2cap_send_sframe(chan, control);
1449 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1451 struct srej_list *tail;
1454 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1455 control |= __set_ctrl_final(chan);
1457 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1458 control |= __set_reqseq(chan, tail->tx_seq);
1460 l2cap_send_sframe(chan, control);
1463 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1465 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1466 struct sk_buff **frag;
1469 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1475 /* Continuation fragments (no L2CAP header) */
1476 frag = &skb_shinfo(skb)->frag_list;
1478 count = min_t(unsigned int, conn->mtu, len);
1480 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1483 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1489 frag = &(*frag)->next;
1495 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1497 struct sock *sk = chan->sk;
1498 struct l2cap_conn *conn = chan->conn;
1499 struct sk_buff *skb;
1500 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1501 struct l2cap_hdr *lh;
1503 BT_DBG("sk %p len %d", sk, (int)len);
1505 count = min_t(unsigned int, (conn->mtu - hlen), len);
1506 skb = bt_skb_send_alloc(sk, count + hlen,
1507 msg->msg_flags & MSG_DONTWAIT, &err);
1509 return ERR_PTR(err);
1511 /* Create L2CAP header */
1512 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1513 lh->cid = cpu_to_le16(chan->dcid);
1514 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1515 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1517 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1518 if (unlikely(err < 0)) {
1520 return ERR_PTR(err);
1525 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1527 struct sock *sk = chan->sk;
1528 struct l2cap_conn *conn = chan->conn;
1529 struct sk_buff *skb;
1530 int err, count, hlen = L2CAP_HDR_SIZE;
1531 struct l2cap_hdr *lh;
1533 BT_DBG("sk %p len %d", sk, (int)len);
1535 count = min_t(unsigned int, (conn->mtu - hlen), len);
1536 skb = bt_skb_send_alloc(sk, count + hlen,
1537 msg->msg_flags & MSG_DONTWAIT, &err);
1539 return ERR_PTR(err);
1541 /* Create L2CAP header */
1542 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1543 lh->cid = cpu_to_le16(chan->dcid);
1544 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1546 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1547 if (unlikely(err < 0)) {
1549 return ERR_PTR(err);
1554 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1555 struct msghdr *msg, size_t len,
1556 u32 control, u16 sdulen)
1558 struct sock *sk = chan->sk;
1559 struct l2cap_conn *conn = chan->conn;
1560 struct sk_buff *skb;
1561 int err, count, hlen;
1562 struct l2cap_hdr *lh;
1564 BT_DBG("sk %p len %d", sk, (int)len);
1567 return ERR_PTR(-ENOTCONN);
1569 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1570 hlen = L2CAP_EXT_HDR_SIZE;
1572 hlen = L2CAP_ENH_HDR_SIZE;
1575 hlen += L2CAP_SDULEN_SIZE;
1577 if (chan->fcs == L2CAP_FCS_CRC16)
1578 hlen += L2CAP_FCS_SIZE;
1580 count = min_t(unsigned int, (conn->mtu - hlen), len);
1581 skb = bt_skb_send_alloc(sk, count + hlen,
1582 msg->msg_flags & MSG_DONTWAIT, &err);
1584 return ERR_PTR(err);
1586 /* Create L2CAP header */
1587 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1588 lh->cid = cpu_to_le16(chan->dcid);
1589 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1591 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1594 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1596 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1597 if (unlikely(err < 0)) {
1599 return ERR_PTR(err);
1602 if (chan->fcs == L2CAP_FCS_CRC16)
1603 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1605 bt_cb(skb)->retries = 0;
1609 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1611 struct sk_buff *skb;
1612 struct sk_buff_head sar_queue;
1616 skb_queue_head_init(&sar_queue);
1617 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1618 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1620 return PTR_ERR(skb);
1622 __skb_queue_tail(&sar_queue, skb);
1623 len -= chan->remote_mps;
1624 size += chan->remote_mps;
1629 if (len > chan->remote_mps) {
1630 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1631 buflen = chan->remote_mps;
1633 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1637 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1639 skb_queue_purge(&sar_queue);
1640 return PTR_ERR(skb);
1643 __skb_queue_tail(&sar_queue, skb);
1647 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1648 if (chan->tx_send_head == NULL)
1649 chan->tx_send_head = sar_queue.next;
1654 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1656 struct sk_buff *skb;
1660 /* Connectionless channel */
1661 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1662 skb = l2cap_create_connless_pdu(chan, msg, len);
1664 return PTR_ERR(skb);
1666 l2cap_do_send(chan, skb);
1670 switch (chan->mode) {
1671 case L2CAP_MODE_BASIC:
1672 /* Check outgoing MTU */
1673 if (len > chan->omtu)
1676 /* Create a basic PDU */
1677 skb = l2cap_create_basic_pdu(chan, msg, len);
1679 return PTR_ERR(skb);
1681 l2cap_do_send(chan, skb);
1685 case L2CAP_MODE_ERTM:
1686 case L2CAP_MODE_STREAMING:
1687 /* Entire SDU fits into one PDU */
1688 if (len <= chan->remote_mps) {
1689 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1690 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1693 return PTR_ERR(skb);
1695 __skb_queue_tail(&chan->tx_q, skb);
1697 if (chan->tx_send_head == NULL)
1698 chan->tx_send_head = skb;
1701 /* Segment SDU into multiples PDUs */
1702 err = l2cap_sar_segment_sdu(chan, msg, len);
1707 if (chan->mode == L2CAP_MODE_STREAMING) {
1708 l2cap_streaming_send(chan);
1713 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1714 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1719 err = l2cap_ertm_send(chan);
1726 BT_DBG("bad state %1.1x", chan->mode);
1733 /* Copy frame to all raw sockets on that connection */
1734 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1736 struct sk_buff *nskb;
1737 struct l2cap_chan *chan;
1739 BT_DBG("conn %p", conn);
1741 read_lock(&conn->chan_lock);
1742 list_for_each_entry(chan, &conn->chan_l, list) {
1743 struct sock *sk = chan->sk;
1744 if (chan->chan_type != L2CAP_CHAN_RAW)
1747 /* Don't send frame to the socket it came from */
1750 nskb = skb_clone(skb, GFP_ATOMIC);
1754 if (chan->ops->recv(chan->data, nskb))
1757 read_unlock(&conn->chan_lock);
1760 /* ---- L2CAP signalling commands ---- */
1761 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1762 u8 code, u8 ident, u16 dlen, void *data)
1764 struct sk_buff *skb, **frag;
1765 struct l2cap_cmd_hdr *cmd;
1766 struct l2cap_hdr *lh;
1769 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1770 conn, code, ident, dlen);
1772 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1773 count = min_t(unsigned int, conn->mtu, len);
1775 skb = bt_skb_alloc(count, GFP_ATOMIC);
1779 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1780 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1782 if (conn->hcon->type == LE_LINK)
1783 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1785 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1787 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1790 cmd->len = cpu_to_le16(dlen);
1793 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1794 memcpy(skb_put(skb, count), data, count);
1800 /* Continuation fragments (no L2CAP header) */
1801 frag = &skb_shinfo(skb)->frag_list;
1803 count = min_t(unsigned int, conn->mtu, len);
1805 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1809 memcpy(skb_put(*frag, count), data, count);
1814 frag = &(*frag)->next;
1824 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1826 struct l2cap_conf_opt *opt = *ptr;
1829 len = L2CAP_CONF_OPT_SIZE + opt->len;
1837 *val = *((u8 *) opt->val);
1841 *val = get_unaligned_le16(opt->val);
1845 *val = get_unaligned_le32(opt->val);
1849 *val = (unsigned long) opt->val;
1853 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1857 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1859 struct l2cap_conf_opt *opt = *ptr;
1861 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1868 *((u8 *) opt->val) = val;
1872 put_unaligned_le16(val, opt->val);
1876 put_unaligned_le32(val, opt->val);
1880 memcpy(opt->val, (void *) val, len);
1884 *ptr += L2CAP_CONF_OPT_SIZE + len;
1887 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1889 struct l2cap_conf_efs efs;
1891 switch(chan->mode) {
1892 case L2CAP_MODE_ERTM:
1893 efs.id = chan->local_id;
1894 efs.stype = chan->local_stype;
1895 efs.msdu = cpu_to_le16(chan->local_msdu);
1896 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1897 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1898 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1901 case L2CAP_MODE_STREAMING:
1903 efs.stype = L2CAP_SERV_BESTEFFORT;
1904 efs.msdu = cpu_to_le16(chan->local_msdu);
1905 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1914 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1915 (unsigned long) &efs);
1918 static void l2cap_ack_timeout(unsigned long arg)
1920 struct l2cap_chan *chan = (void *) arg;
1922 bh_lock_sock(chan->sk);
1923 l2cap_send_ack(chan);
1924 bh_unlock_sock(chan->sk);
1927 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1929 struct sock *sk = chan->sk;
1931 chan->expected_ack_seq = 0;
1932 chan->unacked_frames = 0;
1933 chan->buffer_seq = 0;
1934 chan->num_acked = 0;
1935 chan->frames_sent = 0;
1937 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1938 (unsigned long) chan);
1939 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1940 (unsigned long) chan);
1941 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1943 skb_queue_head_init(&chan->srej_q);
1945 INIT_LIST_HEAD(&chan->srej_l);
1948 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1951 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1954 case L2CAP_MODE_STREAMING:
1955 case L2CAP_MODE_ERTM:
1956 if (l2cap_mode_supported(mode, remote_feat_mask))
1960 return L2CAP_MODE_BASIC;
1964 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1966 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1969 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
1971 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
1974 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
1976 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
1977 __l2cap_ews_supported(chan)) {
1978 /* use extended control field */
1979 set_bit(FLAG_EXT_CTRL, &chan->flags);
1980 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
1982 chan->tx_win = min_t(u16, chan->tx_win,
1983 L2CAP_DEFAULT_TX_WINDOW);
1984 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
1988 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1990 struct l2cap_conf_req *req = data;
1991 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1992 void *ptr = req->data;
1995 BT_DBG("chan %p", chan);
1997 if (chan->num_conf_req || chan->num_conf_rsp)
2000 switch (chan->mode) {
2001 case L2CAP_MODE_STREAMING:
2002 case L2CAP_MODE_ERTM:
2003 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2006 if (__l2cap_efs_supported(chan))
2007 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2011 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2016 if (chan->imtu != L2CAP_DEFAULT_MTU)
2017 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2019 switch (chan->mode) {
2020 case L2CAP_MODE_BASIC:
2021 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2022 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2025 rfc.mode = L2CAP_MODE_BASIC;
2027 rfc.max_transmit = 0;
2028 rfc.retrans_timeout = 0;
2029 rfc.monitor_timeout = 0;
2030 rfc.max_pdu_size = 0;
2032 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2033 (unsigned long) &rfc);
2036 case L2CAP_MODE_ERTM:
2037 rfc.mode = L2CAP_MODE_ERTM;
2038 rfc.max_transmit = chan->max_tx;
2039 rfc.retrans_timeout = 0;
2040 rfc.monitor_timeout = 0;
2042 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2043 L2CAP_EXT_HDR_SIZE -
2046 rfc.max_pdu_size = cpu_to_le16(size);
2048 l2cap_txwin_setup(chan);
2050 rfc.txwin_size = min_t(u16, chan->tx_win,
2051 L2CAP_DEFAULT_TX_WINDOW);
2053 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2054 (unsigned long) &rfc);
2056 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2057 l2cap_add_opt_efs(&ptr, chan);
2059 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2062 if (chan->fcs == L2CAP_FCS_NONE ||
2063 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2064 chan->fcs = L2CAP_FCS_NONE;
2065 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2068 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2069 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2073 case L2CAP_MODE_STREAMING:
2074 rfc.mode = L2CAP_MODE_STREAMING;
2076 rfc.max_transmit = 0;
2077 rfc.retrans_timeout = 0;
2078 rfc.monitor_timeout = 0;
2080 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2081 L2CAP_EXT_HDR_SIZE -
2084 rfc.max_pdu_size = cpu_to_le16(size);
2086 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2087 (unsigned long) &rfc);
2089 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2090 l2cap_add_opt_efs(&ptr, chan);
2092 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2095 if (chan->fcs == L2CAP_FCS_NONE ||
2096 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2097 chan->fcs = L2CAP_FCS_NONE;
2098 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2103 req->dcid = cpu_to_le16(chan->dcid);
2104 req->flags = cpu_to_le16(0);
2109 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2111 struct l2cap_conf_rsp *rsp = data;
2112 void *ptr = rsp->data;
2113 void *req = chan->conf_req;
2114 int len = chan->conf_len;
2115 int type, hint, olen;
2117 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2118 u16 mtu = L2CAP_DEFAULT_MTU;
2119 u16 result = L2CAP_CONF_SUCCESS;
2122 BT_DBG("chan %p", chan);
2124 while (len >= L2CAP_CONF_OPT_SIZE) {
2125 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2127 hint = type & L2CAP_CONF_HINT;
2128 type &= L2CAP_CONF_MASK;
2131 case L2CAP_CONF_MTU:
2135 case L2CAP_CONF_FLUSH_TO:
2136 chan->flush_to = val;
2139 case L2CAP_CONF_QOS:
2142 case L2CAP_CONF_RFC:
2143 if (olen == sizeof(rfc))
2144 memcpy(&rfc, (void *) val, olen);
2147 case L2CAP_CONF_FCS:
2148 if (val == L2CAP_FCS_NONE)
2149 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2153 case L2CAP_CONF_EWS:
2155 return -ECONNREFUSED;
2157 set_bit(FLAG_EXT_CTRL, &chan->flags);
2158 set_bit(CONF_EWS_RECV, &chan->conf_state);
2159 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2160 chan->remote_tx_win = val;
2167 result = L2CAP_CONF_UNKNOWN;
2168 *((u8 *) ptr++) = type;
2173 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2176 switch (chan->mode) {
2177 case L2CAP_MODE_STREAMING:
2178 case L2CAP_MODE_ERTM:
2179 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2180 chan->mode = l2cap_select_mode(rfc.mode,
2181 chan->conn->feat_mask);
2185 if (chan->mode != rfc.mode)
2186 return -ECONNREFUSED;
2192 if (chan->mode != rfc.mode) {
2193 result = L2CAP_CONF_UNACCEPT;
2194 rfc.mode = chan->mode;
2196 if (chan->num_conf_rsp == 1)
2197 return -ECONNREFUSED;
2199 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2200 sizeof(rfc), (unsigned long) &rfc);
2204 if (result == L2CAP_CONF_SUCCESS) {
2205 /* Configure output options and let the other side know
2206 * which ones we don't like. */
2208 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2209 result = L2CAP_CONF_UNACCEPT;
2212 set_bit(CONF_MTU_DONE, &chan->conf_state);
2214 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2217 case L2CAP_MODE_BASIC:
2218 chan->fcs = L2CAP_FCS_NONE;
2219 set_bit(CONF_MODE_DONE, &chan->conf_state);
2222 case L2CAP_MODE_ERTM:
2223 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2224 chan->remote_tx_win = rfc.txwin_size;
2226 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2228 chan->remote_max_tx = rfc.max_transmit;
2230 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2232 L2CAP_EXT_HDR_SIZE -
2235 rfc.max_pdu_size = cpu_to_le16(size);
2236 chan->remote_mps = size;
2238 rfc.retrans_timeout =
2239 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2240 rfc.monitor_timeout =
2241 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2243 set_bit(CONF_MODE_DONE, &chan->conf_state);
2245 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2246 sizeof(rfc), (unsigned long) &rfc);
2250 case L2CAP_MODE_STREAMING:
2251 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2253 L2CAP_EXT_HDR_SIZE -
2256 rfc.max_pdu_size = cpu_to_le16(size);
2257 chan->remote_mps = size;
2259 set_bit(CONF_MODE_DONE, &chan->conf_state);
2261 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2262 sizeof(rfc), (unsigned long) &rfc);
2267 result = L2CAP_CONF_UNACCEPT;
2269 memset(&rfc, 0, sizeof(rfc));
2270 rfc.mode = chan->mode;
2273 if (result == L2CAP_CONF_SUCCESS)
2274 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2276 rsp->scid = cpu_to_le16(chan->dcid);
2277 rsp->result = cpu_to_le16(result);
2278 rsp->flags = cpu_to_le16(0x0000);
2283 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2285 struct l2cap_conf_req *req = data;
2286 void *ptr = req->data;
2289 struct l2cap_conf_rfc rfc;
2291 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2293 while (len >= L2CAP_CONF_OPT_SIZE) {
2294 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2297 case L2CAP_CONF_MTU:
2298 if (val < L2CAP_DEFAULT_MIN_MTU) {
2299 *result = L2CAP_CONF_UNACCEPT;
2300 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2303 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2306 case L2CAP_CONF_FLUSH_TO:
2307 chan->flush_to = val;
2308 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2312 case L2CAP_CONF_RFC:
2313 if (olen == sizeof(rfc))
2314 memcpy(&rfc, (void *)val, olen);
2316 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2317 rfc.mode != chan->mode)
2318 return -ECONNREFUSED;
2322 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2323 sizeof(rfc), (unsigned long) &rfc);
2326 case L2CAP_CONF_EWS:
2327 chan->tx_win = min_t(u16, val,
2328 L2CAP_DEFAULT_EXT_WINDOW);
2329 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS,
2335 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2336 return -ECONNREFUSED;
2338 chan->mode = rfc.mode;
2340 if (*result == L2CAP_CONF_SUCCESS) {
2342 case L2CAP_MODE_ERTM:
2343 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2344 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2345 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2347 case L2CAP_MODE_STREAMING:
2348 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2352 req->dcid = cpu_to_le16(chan->dcid);
2353 req->flags = cpu_to_le16(0x0000);
2358 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2360 struct l2cap_conf_rsp *rsp = data;
2361 void *ptr = rsp->data;
2363 BT_DBG("chan %p", chan);
2365 rsp->scid = cpu_to_le16(chan->dcid);
2366 rsp->result = cpu_to_le16(result);
2367 rsp->flags = cpu_to_le16(flags);
2372 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2374 struct l2cap_conn_rsp rsp;
2375 struct l2cap_conn *conn = chan->conn;
2378 rsp.scid = cpu_to_le16(chan->dcid);
2379 rsp.dcid = cpu_to_le16(chan->scid);
2380 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2381 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2382 l2cap_send_cmd(conn, chan->ident,
2383 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2385 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2388 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2389 l2cap_build_conf_req(chan, buf), buf);
2390 chan->num_conf_req++;
2393 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2397 struct l2cap_conf_rfc rfc;
2399 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2401 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2404 while (len >= L2CAP_CONF_OPT_SIZE) {
2405 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2408 case L2CAP_CONF_RFC:
2409 if (olen == sizeof(rfc))
2410 memcpy(&rfc, (void *)val, olen);
2417 case L2CAP_MODE_ERTM:
2418 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2419 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2420 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2422 case L2CAP_MODE_STREAMING:
2423 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2427 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2429 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2431 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2434 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2435 cmd->ident == conn->info_ident) {
2436 del_timer(&conn->info_timer);
2438 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2439 conn->info_ident = 0;
2441 l2cap_conn_start(conn);
2447 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2449 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2450 struct l2cap_conn_rsp rsp;
2451 struct l2cap_chan *chan = NULL, *pchan;
2452 struct sock *parent, *sk = NULL;
2453 int result, status = L2CAP_CS_NO_INFO;
2455 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2456 __le16 psm = req->psm;
2458 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2460 /* Check if we have socket listening on psm */
2461 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2463 result = L2CAP_CR_BAD_PSM;
2469 bh_lock_sock(parent);
2471 /* Check if the ACL is secure enough (if not SDP) */
2472 if (psm != cpu_to_le16(0x0001) &&
2473 !hci_conn_check_link_mode(conn->hcon)) {
2474 conn->disc_reason = 0x05;
2475 result = L2CAP_CR_SEC_BLOCK;
2479 result = L2CAP_CR_NO_MEM;
2481 /* Check for backlog size */
2482 if (sk_acceptq_is_full(parent)) {
2483 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2487 chan = pchan->ops->new_connection(pchan->data);
2493 write_lock_bh(&conn->chan_lock);
2495 /* Check if we already have channel with that dcid */
2496 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2497 write_unlock_bh(&conn->chan_lock);
2498 sock_set_flag(sk, SOCK_ZAPPED);
2499 chan->ops->close(chan->data);
2503 hci_conn_hold(conn->hcon);
2505 bacpy(&bt_sk(sk)->src, conn->src);
2506 bacpy(&bt_sk(sk)->dst, conn->dst);
2510 bt_accept_enqueue(parent, sk);
2512 __l2cap_chan_add(conn, chan);
2516 __set_chan_timer(chan, sk->sk_sndtimeo);
2518 chan->ident = cmd->ident;
2520 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2521 if (l2cap_check_security(chan)) {
2522 if (bt_sk(sk)->defer_setup) {
2523 l2cap_state_change(chan, BT_CONNECT2);
2524 result = L2CAP_CR_PEND;
2525 status = L2CAP_CS_AUTHOR_PEND;
2526 parent->sk_data_ready(parent, 0);
2528 l2cap_state_change(chan, BT_CONFIG);
2529 result = L2CAP_CR_SUCCESS;
2530 status = L2CAP_CS_NO_INFO;
2533 l2cap_state_change(chan, BT_CONNECT2);
2534 result = L2CAP_CR_PEND;
2535 status = L2CAP_CS_AUTHEN_PEND;
2538 l2cap_state_change(chan, BT_CONNECT2);
2539 result = L2CAP_CR_PEND;
2540 status = L2CAP_CS_NO_INFO;
2543 write_unlock_bh(&conn->chan_lock);
2546 bh_unlock_sock(parent);
2549 rsp.scid = cpu_to_le16(scid);
2550 rsp.dcid = cpu_to_le16(dcid);
2551 rsp.result = cpu_to_le16(result);
2552 rsp.status = cpu_to_le16(status);
2553 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2555 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2556 struct l2cap_info_req info;
2557 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2559 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2560 conn->info_ident = l2cap_get_ident(conn);
2562 mod_timer(&conn->info_timer, jiffies +
2563 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2565 l2cap_send_cmd(conn, conn->info_ident,
2566 L2CAP_INFO_REQ, sizeof(info), &info);
2569 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2570 result == L2CAP_CR_SUCCESS) {
2572 set_bit(CONF_REQ_SENT, &chan->conf_state);
2573 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2574 l2cap_build_conf_req(chan, buf), buf);
2575 chan->num_conf_req++;
2581 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2583 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2584 u16 scid, dcid, result, status;
2585 struct l2cap_chan *chan;
2589 scid = __le16_to_cpu(rsp->scid);
2590 dcid = __le16_to_cpu(rsp->dcid);
2591 result = __le16_to_cpu(rsp->result);
2592 status = __le16_to_cpu(rsp->status);
2594 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2597 chan = l2cap_get_chan_by_scid(conn, scid);
2601 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2609 case L2CAP_CR_SUCCESS:
2610 l2cap_state_change(chan, BT_CONFIG);
2613 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2615 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2618 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2619 l2cap_build_conf_req(chan, req), req);
2620 chan->num_conf_req++;
2624 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2628 /* don't delete l2cap channel if sk is owned by user */
2629 if (sock_owned_by_user(sk)) {
2630 l2cap_state_change(chan, BT_DISCONN);
2631 __clear_chan_timer(chan);
2632 __set_chan_timer(chan, HZ / 5);
2636 l2cap_chan_del(chan, ECONNREFUSED);
2644 static inline void set_default_fcs(struct l2cap_chan *chan)
2646 /* FCS is enabled only in ERTM or streaming mode, if one or both
2649 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2650 chan->fcs = L2CAP_FCS_NONE;
2651 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2652 chan->fcs = L2CAP_FCS_CRC16;
2655 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2657 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2660 struct l2cap_chan *chan;
2664 dcid = __le16_to_cpu(req->dcid);
2665 flags = __le16_to_cpu(req->flags);
2667 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2669 chan = l2cap_get_chan_by_scid(conn, dcid);
2675 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2676 struct l2cap_cmd_rej_cid rej;
2678 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2679 rej.scid = cpu_to_le16(chan->scid);
2680 rej.dcid = cpu_to_le16(chan->dcid);
2682 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2687 /* Reject if config buffer is too small. */
2688 len = cmd_len - sizeof(*req);
2689 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2690 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2691 l2cap_build_conf_rsp(chan, rsp,
2692 L2CAP_CONF_REJECT, flags), rsp);
2697 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2698 chan->conf_len += len;
2700 if (flags & 0x0001) {
2701 /* Incomplete config. Send empty response. */
2702 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2703 l2cap_build_conf_rsp(chan, rsp,
2704 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2708 /* Complete config. */
2709 len = l2cap_parse_conf_req(chan, rsp);
2711 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2715 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2716 chan->num_conf_rsp++;
2718 /* Reset config buffer. */
2721 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2724 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2725 set_default_fcs(chan);
2727 l2cap_state_change(chan, BT_CONNECTED);
2729 chan->next_tx_seq = 0;
2730 chan->expected_tx_seq = 0;
2731 skb_queue_head_init(&chan->tx_q);
2732 if (chan->mode == L2CAP_MODE_ERTM)
2733 l2cap_ertm_init(chan);
2735 l2cap_chan_ready(sk);
2739 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2741 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2742 l2cap_build_conf_req(chan, buf), buf);
2743 chan->num_conf_req++;
2751 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2753 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2754 u16 scid, flags, result;
2755 struct l2cap_chan *chan;
2757 int len = cmd->len - sizeof(*rsp);
2759 scid = __le16_to_cpu(rsp->scid);
2760 flags = __le16_to_cpu(rsp->flags);
2761 result = __le16_to_cpu(rsp->result);
2763 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2764 scid, flags, result);
2766 chan = l2cap_get_chan_by_scid(conn, scid);
2773 case L2CAP_CONF_SUCCESS:
2774 l2cap_conf_rfc_get(chan, rsp->data, len);
2777 case L2CAP_CONF_UNACCEPT:
2778 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2781 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2782 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2786 /* throw out any old stored conf requests */
2787 result = L2CAP_CONF_SUCCESS;
2788 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2791 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2795 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2796 L2CAP_CONF_REQ, len, req);
2797 chan->num_conf_req++;
2798 if (result != L2CAP_CONF_SUCCESS)
2804 sk->sk_err = ECONNRESET;
2805 __set_chan_timer(chan, HZ * 5);
2806 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2813 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2815 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2816 set_default_fcs(chan);
2818 l2cap_state_change(chan, BT_CONNECTED);
2819 chan->next_tx_seq = 0;
2820 chan->expected_tx_seq = 0;
2821 skb_queue_head_init(&chan->tx_q);
2822 if (chan->mode == L2CAP_MODE_ERTM)
2823 l2cap_ertm_init(chan);
2825 l2cap_chan_ready(sk);
2833 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2835 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2836 struct l2cap_disconn_rsp rsp;
2838 struct l2cap_chan *chan;
2841 scid = __le16_to_cpu(req->scid);
2842 dcid = __le16_to_cpu(req->dcid);
2844 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2846 chan = l2cap_get_chan_by_scid(conn, dcid);
2852 rsp.dcid = cpu_to_le16(chan->scid);
2853 rsp.scid = cpu_to_le16(chan->dcid);
2854 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2856 sk->sk_shutdown = SHUTDOWN_MASK;
2858 /* don't delete l2cap channel if sk is owned by user */
2859 if (sock_owned_by_user(sk)) {
2860 l2cap_state_change(chan, BT_DISCONN);
2861 __clear_chan_timer(chan);
2862 __set_chan_timer(chan, HZ / 5);
2867 l2cap_chan_del(chan, ECONNRESET);
2870 chan->ops->close(chan->data);
2874 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2876 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2878 struct l2cap_chan *chan;
2881 scid = __le16_to_cpu(rsp->scid);
2882 dcid = __le16_to_cpu(rsp->dcid);
2884 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2886 chan = l2cap_get_chan_by_scid(conn, scid);
2892 /* don't delete l2cap channel if sk is owned by user */
2893 if (sock_owned_by_user(sk)) {
2894 l2cap_state_change(chan,BT_DISCONN);
2895 __clear_chan_timer(chan);
2896 __set_chan_timer(chan, HZ / 5);
2901 l2cap_chan_del(chan, 0);
2904 chan->ops->close(chan->data);
2908 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2910 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2913 type = __le16_to_cpu(req->type);
2915 BT_DBG("type 0x%4.4x", type);
2917 if (type == L2CAP_IT_FEAT_MASK) {
2919 u32 feat_mask = l2cap_feat_mask;
2920 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2921 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2922 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2924 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2927 feat_mask |= L2CAP_FEAT_EXT_FLOW
2928 | L2CAP_FEAT_EXT_WINDOW;
2930 put_unaligned_le32(feat_mask, rsp->data);
2931 l2cap_send_cmd(conn, cmd->ident,
2932 L2CAP_INFO_RSP, sizeof(buf), buf);
2933 } else if (type == L2CAP_IT_FIXED_CHAN) {
2935 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2936 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2937 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2938 memcpy(buf + 4, l2cap_fixed_chan, 8);
2939 l2cap_send_cmd(conn, cmd->ident,
2940 L2CAP_INFO_RSP, sizeof(buf), buf);
2942 struct l2cap_info_rsp rsp;
2943 rsp.type = cpu_to_le16(type);
2944 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2945 l2cap_send_cmd(conn, cmd->ident,
2946 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2952 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2954 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2957 type = __le16_to_cpu(rsp->type);
2958 result = __le16_to_cpu(rsp->result);
2960 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2962 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2963 if (cmd->ident != conn->info_ident ||
2964 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2967 del_timer(&conn->info_timer);
2969 if (result != L2CAP_IR_SUCCESS) {
2970 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2971 conn->info_ident = 0;
2973 l2cap_conn_start(conn);
2978 if (type == L2CAP_IT_FEAT_MASK) {
2979 conn->feat_mask = get_unaligned_le32(rsp->data);
2981 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2982 struct l2cap_info_req req;
2983 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2985 conn->info_ident = l2cap_get_ident(conn);
2987 l2cap_send_cmd(conn, conn->info_ident,
2988 L2CAP_INFO_REQ, sizeof(req), &req);
2990 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2991 conn->info_ident = 0;
2993 l2cap_conn_start(conn);
2995 } else if (type == L2CAP_IT_FIXED_CHAN) {
2996 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2997 conn->info_ident = 0;
2999 l2cap_conn_start(conn);
3005 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3010 if (min > max || min < 6 || max > 3200)
3013 if (to_multiplier < 10 || to_multiplier > 3200)
3016 if (max >= to_multiplier * 8)
3019 max_latency = (to_multiplier * 8 / max) - 1;
3020 if (latency > 499 || latency > max_latency)
3026 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3027 struct l2cap_cmd_hdr *cmd, u8 *data)
3029 struct hci_conn *hcon = conn->hcon;
3030 struct l2cap_conn_param_update_req *req;
3031 struct l2cap_conn_param_update_rsp rsp;
3032 u16 min, max, latency, to_multiplier, cmd_len;
3035 if (!(hcon->link_mode & HCI_LM_MASTER))
3038 cmd_len = __le16_to_cpu(cmd->len);
3039 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3042 req = (struct l2cap_conn_param_update_req *) data;
3043 min = __le16_to_cpu(req->min);
3044 max = __le16_to_cpu(req->max);
3045 latency = __le16_to_cpu(req->latency);
3046 to_multiplier = __le16_to_cpu(req->to_multiplier);
3048 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3049 min, max, latency, to_multiplier);
3051 memset(&rsp, 0, sizeof(rsp));
3053 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3055 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3057 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3059 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3063 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3068 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3069 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3073 switch (cmd->code) {
3074 case L2CAP_COMMAND_REJ:
3075 l2cap_command_rej(conn, cmd, data);
3078 case L2CAP_CONN_REQ:
3079 err = l2cap_connect_req(conn, cmd, data);
3082 case L2CAP_CONN_RSP:
3083 err = l2cap_connect_rsp(conn, cmd, data);
3086 case L2CAP_CONF_REQ:
3087 err = l2cap_config_req(conn, cmd, cmd_len, data);
3090 case L2CAP_CONF_RSP:
3091 err = l2cap_config_rsp(conn, cmd, data);
3094 case L2CAP_DISCONN_REQ:
3095 err = l2cap_disconnect_req(conn, cmd, data);
3098 case L2CAP_DISCONN_RSP:
3099 err = l2cap_disconnect_rsp(conn, cmd, data);
3102 case L2CAP_ECHO_REQ:
3103 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3106 case L2CAP_ECHO_RSP:
3109 case L2CAP_INFO_REQ:
3110 err = l2cap_information_req(conn, cmd, data);
3113 case L2CAP_INFO_RSP:
3114 err = l2cap_information_rsp(conn, cmd, data);
3118 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3126 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3127 struct l2cap_cmd_hdr *cmd, u8 *data)
3129 switch (cmd->code) {
3130 case L2CAP_COMMAND_REJ:
3133 case L2CAP_CONN_PARAM_UPDATE_REQ:
3134 return l2cap_conn_param_update_req(conn, cmd, data);
3136 case L2CAP_CONN_PARAM_UPDATE_RSP:
3140 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3145 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3146 struct sk_buff *skb)
3148 u8 *data = skb->data;
3150 struct l2cap_cmd_hdr cmd;
3153 l2cap_raw_recv(conn, skb);
3155 while (len >= L2CAP_CMD_HDR_SIZE) {
3157 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3158 data += L2CAP_CMD_HDR_SIZE;
3159 len -= L2CAP_CMD_HDR_SIZE;
3161 cmd_len = le16_to_cpu(cmd.len);
3163 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3165 if (cmd_len > len || !cmd.ident) {
3166 BT_DBG("corrupted command");
3170 if (conn->hcon->type == LE_LINK)
3171 err = l2cap_le_sig_cmd(conn, &cmd, data);
3173 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3176 struct l2cap_cmd_rej_unk rej;
3178 BT_ERR("Wrong link type (%d)", err);
3180 /* FIXME: Map err to a valid reason */
3181 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3182 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3192 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3194 u16 our_fcs, rcv_fcs;
3197 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3198 hdr_size = L2CAP_EXT_HDR_SIZE;
3200 hdr_size = L2CAP_ENH_HDR_SIZE;
3202 if (chan->fcs == L2CAP_FCS_CRC16) {
3203 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3204 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3205 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3207 if (our_fcs != rcv_fcs)
3213 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3217 chan->frames_sent = 0;
3219 control |= __set_reqseq(chan, chan->buffer_seq);
3221 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3222 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3223 l2cap_send_sframe(chan, control);
3224 set_bit(CONN_RNR_SENT, &chan->conn_state);
3227 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3228 l2cap_retransmit_frames(chan);
3230 l2cap_ertm_send(chan);
3232 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3233 chan->frames_sent == 0) {
3234 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3235 l2cap_send_sframe(chan, control);
3239 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3241 struct sk_buff *next_skb;
3242 int tx_seq_offset, next_tx_seq_offset;
3244 bt_cb(skb)->tx_seq = tx_seq;
3245 bt_cb(skb)->sar = sar;
3247 next_skb = skb_peek(&chan->srej_q);
3249 __skb_queue_tail(&chan->srej_q, skb);
3253 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3256 if (bt_cb(next_skb)->tx_seq == tx_seq)
3259 next_tx_seq_offset = __seq_offset(chan,
3260 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3262 if (next_tx_seq_offset > tx_seq_offset) {
3263 __skb_queue_before(&chan->srej_q, next_skb, skb);
3267 if (skb_queue_is_last(&chan->srej_q, next_skb))
3270 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3272 __skb_queue_tail(&chan->srej_q, skb);
3277 static void append_skb_frag(struct sk_buff *skb,
3278 struct sk_buff *new_frag, struct sk_buff **last_frag)
3280 /* skb->len reflects data in skb as well as all fragments
3281 * skb->data_len reflects only data in fragments
3283 if (!skb_has_frag_list(skb))
3284 skb_shinfo(skb)->frag_list = new_frag;
3286 new_frag->next = NULL;
3288 (*last_frag)->next = new_frag;
3289 *last_frag = new_frag;
3291 skb->len += new_frag->len;
3292 skb->data_len += new_frag->len;
3293 skb->truesize += new_frag->truesize;
3296 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3300 switch (__get_ctrl_sar(chan, control)) {
3301 case L2CAP_SAR_UNSEGMENTED:
3305 err = chan->ops->recv(chan->data, skb);
3308 case L2CAP_SAR_START:
3312 chan->sdu_len = get_unaligned_le16(skb->data);
3313 skb_pull(skb, L2CAP_SDULEN_SIZE);
3315 if (chan->sdu_len > chan->imtu) {
3320 if (skb->len >= chan->sdu_len)
3324 chan->sdu_last_frag = skb;
3330 case L2CAP_SAR_CONTINUE:
3334 append_skb_frag(chan->sdu, skb,
3335 &chan->sdu_last_frag);
3338 if (chan->sdu->len >= chan->sdu_len)
3348 append_skb_frag(chan->sdu, skb,
3349 &chan->sdu_last_frag);
3352 if (chan->sdu->len != chan->sdu_len)
3355 err = chan->ops->recv(chan->data, chan->sdu);
3358 /* Reassembly complete */
3360 chan->sdu_last_frag = NULL;
3368 kfree_skb(chan->sdu);
3370 chan->sdu_last_frag = NULL;
3377 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3381 BT_DBG("chan %p, Enter local busy", chan);
3383 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3385 control = __set_reqseq(chan, chan->buffer_seq);
3386 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3387 l2cap_send_sframe(chan, control);
3389 set_bit(CONN_RNR_SENT, &chan->conn_state);
3391 __clear_ack_timer(chan);
3394 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3398 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3401 control = __set_reqseq(chan, chan->buffer_seq);
3402 control |= __set_ctrl_poll(chan);
3403 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3404 l2cap_send_sframe(chan, control);
3405 chan->retry_count = 1;
3407 __clear_retrans_timer(chan);
3408 __set_monitor_timer(chan);
3410 set_bit(CONN_WAIT_F, &chan->conn_state);
3413 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3414 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3416 BT_DBG("chan %p, Exit local busy", chan);
3419 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3421 if (chan->mode == L2CAP_MODE_ERTM) {
3423 l2cap_ertm_enter_local_busy(chan);
3425 l2cap_ertm_exit_local_busy(chan);
3429 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3431 struct sk_buff *skb;
3434 while ((skb = skb_peek(&chan->srej_q)) &&
3435 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3438 if (bt_cb(skb)->tx_seq != tx_seq)
3441 skb = skb_dequeue(&chan->srej_q);
3442 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3443 err = l2cap_reassemble_sdu(chan, skb, control);
3446 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3450 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3451 tx_seq = __next_seq(chan, tx_seq);
3455 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3457 struct srej_list *l, *tmp;
3460 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3461 if (l->tx_seq == tx_seq) {
3466 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3467 control |= __set_reqseq(chan, l->tx_seq);
3468 l2cap_send_sframe(chan, control);
3470 list_add_tail(&l->list, &chan->srej_l);
3474 static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3476 struct srej_list *new;
3479 while (tx_seq != chan->expected_tx_seq) {
3480 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3481 control |= __set_reqseq(chan, chan->expected_tx_seq);
3482 l2cap_send_sframe(chan, control);
3484 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3485 new->tx_seq = chan->expected_tx_seq;
3487 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3489 list_add_tail(&new->list, &chan->srej_l);
3492 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3495 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3497 u16 tx_seq = __get_txseq(chan, rx_control);
3498 u16 req_seq = __get_reqseq(chan, rx_control);
3499 u8 sar = __get_ctrl_sar(chan, rx_control);
3500 int tx_seq_offset, expected_tx_seq_offset;
3501 int num_to_ack = (chan->tx_win/6) + 1;
3504 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3505 tx_seq, rx_control);
3507 if (__is_ctrl_final(chan, rx_control) &&
3508 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3509 __clear_monitor_timer(chan);
3510 if (chan->unacked_frames > 0)
3511 __set_retrans_timer(chan);
3512 clear_bit(CONN_WAIT_F, &chan->conn_state);
3515 chan->expected_ack_seq = req_seq;
3516 l2cap_drop_acked_frames(chan);
3518 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3520 /* invalid tx_seq */
3521 if (tx_seq_offset >= chan->tx_win) {
3522 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3526 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3529 if (tx_seq == chan->expected_tx_seq)
3532 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3533 struct srej_list *first;
3535 first = list_first_entry(&chan->srej_l,
3536 struct srej_list, list);
3537 if (tx_seq == first->tx_seq) {
3538 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3539 l2cap_check_srej_gap(chan, tx_seq);
3541 list_del(&first->list);
3544 if (list_empty(&chan->srej_l)) {
3545 chan->buffer_seq = chan->buffer_seq_srej;
3546 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3547 l2cap_send_ack(chan);
3548 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3551 struct srej_list *l;
3553 /* duplicated tx_seq */
3554 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3557 list_for_each_entry(l, &chan->srej_l, list) {
3558 if (l->tx_seq == tx_seq) {
3559 l2cap_resend_srejframe(chan, tx_seq);
3563 l2cap_send_srejframe(chan, tx_seq);
3566 expected_tx_seq_offset = __seq_offset(chan,
3567 chan->expected_tx_seq, chan->buffer_seq);
3569 /* duplicated tx_seq */
3570 if (tx_seq_offset < expected_tx_seq_offset)
3573 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3575 BT_DBG("chan %p, Enter SREJ", chan);
3577 INIT_LIST_HEAD(&chan->srej_l);
3578 chan->buffer_seq_srej = chan->buffer_seq;
3580 __skb_queue_head_init(&chan->srej_q);
3581 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3583 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3585 l2cap_send_srejframe(chan, tx_seq);
3587 __clear_ack_timer(chan);
3592 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3594 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3595 bt_cb(skb)->tx_seq = tx_seq;
3596 bt_cb(skb)->sar = sar;
3597 __skb_queue_tail(&chan->srej_q, skb);
3601 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3602 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3605 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3609 if (__is_ctrl_final(chan, rx_control)) {
3610 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3611 l2cap_retransmit_frames(chan);
3614 __set_ack_timer(chan);
3616 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3617 if (chan->num_acked == num_to_ack - 1)
3618 l2cap_send_ack(chan);
3627 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3629 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3630 __get_reqseq(chan, rx_control), rx_control);
3632 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3633 l2cap_drop_acked_frames(chan);
3635 if (__is_ctrl_poll(chan, rx_control)) {
3636 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3637 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3638 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3639 (chan->unacked_frames > 0))
3640 __set_retrans_timer(chan);
3642 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3643 l2cap_send_srejtail(chan);
3645 l2cap_send_i_or_rr_or_rnr(chan);
3648 } else if (__is_ctrl_final(chan, rx_control)) {
3649 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3651 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3652 l2cap_retransmit_frames(chan);
3655 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3656 (chan->unacked_frames > 0))
3657 __set_retrans_timer(chan);
3659 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3660 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3661 l2cap_send_ack(chan);
3663 l2cap_ertm_send(chan);
3667 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
3669 u16 tx_seq = __get_reqseq(chan, rx_control);
3671 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3673 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3675 chan->expected_ack_seq = tx_seq;
3676 l2cap_drop_acked_frames(chan);
3678 if (__is_ctrl_final(chan, rx_control)) {
3679 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3680 l2cap_retransmit_frames(chan);
3682 l2cap_retransmit_frames(chan);
3684 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3685 set_bit(CONN_REJ_ACT, &chan->conn_state);
3688 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
3690 u16 tx_seq = __get_reqseq(chan, rx_control);
3692 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3694 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3696 if (__is_ctrl_poll(chan, rx_control)) {
3697 chan->expected_ack_seq = tx_seq;
3698 l2cap_drop_acked_frames(chan);
3700 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3701 l2cap_retransmit_one_frame(chan, tx_seq);
3703 l2cap_ertm_send(chan);
3705 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3706 chan->srej_save_reqseq = tx_seq;
3707 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3709 } else if (__is_ctrl_final(chan, rx_control)) {
3710 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3711 chan->srej_save_reqseq == tx_seq)
3712 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3714 l2cap_retransmit_one_frame(chan, tx_seq);
3716 l2cap_retransmit_one_frame(chan, tx_seq);
3717 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3718 chan->srej_save_reqseq = tx_seq;
3719 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3724 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
3726 u16 tx_seq = __get_reqseq(chan, rx_control);
3728 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3730 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3731 chan->expected_ack_seq = tx_seq;
3732 l2cap_drop_acked_frames(chan);
3734 if (__is_ctrl_poll(chan, rx_control))
3735 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3737 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3738 __clear_retrans_timer(chan);
3739 if (__is_ctrl_poll(chan, rx_control))
3740 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3744 if (__is_ctrl_poll(chan, rx_control)) {
3745 l2cap_send_srejtail(chan);
3747 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
3748 l2cap_send_sframe(chan, rx_control);
3752 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3754 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
3756 if (__is_ctrl_final(chan, rx_control) &&
3757 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3758 __clear_monitor_timer(chan);
3759 if (chan->unacked_frames > 0)
3760 __set_retrans_timer(chan);
3761 clear_bit(CONN_WAIT_F, &chan->conn_state);
3764 switch (__get_ctrl_super(chan, rx_control)) {
3765 case L2CAP_SUPER_RR:
3766 l2cap_data_channel_rrframe(chan, rx_control);
3769 case L2CAP_SUPER_REJ:
3770 l2cap_data_channel_rejframe(chan, rx_control);
3773 case L2CAP_SUPER_SREJ:
3774 l2cap_data_channel_srejframe(chan, rx_control);
3777 case L2CAP_SUPER_RNR:
3778 l2cap_data_channel_rnrframe(chan, rx_control);
3786 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3788 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3791 int len, next_tx_seq_offset, req_seq_offset;
3793 control = __get_control(chan, skb->data);
3794 skb_pull(skb, __ctrl_size(chan));
3798 * We can just drop the corrupted I-frame here.
3799 * Receiver will miss it and start proper recovery
3800 * procedures and ask retransmission.
3802 if (l2cap_check_fcs(chan, skb))
3805 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
3806 len -= L2CAP_SDULEN_SIZE;
3808 if (chan->fcs == L2CAP_FCS_CRC16)
3809 len -= L2CAP_FCS_SIZE;
3811 if (len > chan->mps) {
3812 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3816 req_seq = __get_reqseq(chan, control);
3818 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
3820 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
3821 chan->expected_ack_seq);
3823 /* check for invalid req-seq */
3824 if (req_seq_offset > next_tx_seq_offset) {
3825 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3829 if (!__is_sframe(chan, control)) {
3831 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3835 l2cap_data_channel_iframe(chan, control, skb);
3839 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3843 l2cap_data_channel_sframe(chan, control, skb);
3853 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3855 struct l2cap_chan *chan;
3856 struct sock *sk = NULL;
3861 chan = l2cap_get_chan_by_scid(conn, cid);
3863 BT_DBG("unknown cid 0x%4.4x", cid);
3869 BT_DBG("chan %p, len %d", chan, skb->len);
3871 if (chan->state != BT_CONNECTED)
3874 switch (chan->mode) {
3875 case L2CAP_MODE_BASIC:
3876 /* If socket recv buffers overflows we drop data here
3877 * which is *bad* because L2CAP has to be reliable.
3878 * But we don't have any other choice. L2CAP doesn't
3879 * provide flow control mechanism. */
3881 if (chan->imtu < skb->len)
3884 if (!chan->ops->recv(chan->data, skb))
3888 case L2CAP_MODE_ERTM:
3889 if (!sock_owned_by_user(sk)) {
3890 l2cap_ertm_data_rcv(sk, skb);
3892 if (sk_add_backlog(sk, skb))
3898 case L2CAP_MODE_STREAMING:
3899 control = __get_control(chan, skb->data);
3900 skb_pull(skb, __ctrl_size(chan));
3903 if (l2cap_check_fcs(chan, skb))
3906 if (__is_sar_start(chan, control))
3907 len -= L2CAP_SDULEN_SIZE;
3909 if (chan->fcs == L2CAP_FCS_CRC16)
3910 len -= L2CAP_FCS_SIZE;
3912 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
3915 tx_seq = __get_txseq(chan, control);
3917 if (chan->expected_tx_seq != tx_seq) {
3918 /* Frame(s) missing - must discard partial SDU */
3919 kfree_skb(chan->sdu);
3921 chan->sdu_last_frag = NULL;
3924 /* TODO: Notify userland of missing data */
3927 chan->expected_tx_seq = __next_seq(chan, tx_seq);
3929 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
3930 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3935 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3949 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3951 struct sock *sk = NULL;
3952 struct l2cap_chan *chan;
3954 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3962 BT_DBG("sk %p, len %d", sk, skb->len);
3964 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3967 if (chan->imtu < skb->len)
3970 if (!chan->ops->recv(chan->data, skb))
3982 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3984 struct sock *sk = NULL;
3985 struct l2cap_chan *chan;
3987 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3995 BT_DBG("sk %p, len %d", sk, skb->len);
3997 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4000 if (chan->imtu < skb->len)
4003 if (!chan->ops->recv(chan->data, skb))
4015 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4017 struct l2cap_hdr *lh = (void *) skb->data;
4021 skb_pull(skb, L2CAP_HDR_SIZE);
4022 cid = __le16_to_cpu(lh->cid);
4023 len = __le16_to_cpu(lh->len);
4025 if (len != skb->len) {
4030 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4033 case L2CAP_CID_LE_SIGNALING:
4034 case L2CAP_CID_SIGNALING:
4035 l2cap_sig_channel(conn, skb);
4038 case L2CAP_CID_CONN_LESS:
4039 psm = get_unaligned_le16(skb->data);
4041 l2cap_conless_channel(conn, psm, skb);
4044 case L2CAP_CID_LE_DATA:
4045 l2cap_att_channel(conn, cid, skb);
4049 if (smp_sig_channel(conn, skb))
4050 l2cap_conn_del(conn->hcon, EACCES);
4054 l2cap_data_channel(conn, cid, skb);
4059 /* ---- L2CAP interface with lower layer (HCI) ---- */
4061 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4063 int exact = 0, lm1 = 0, lm2 = 0;
4064 struct l2cap_chan *c;
4066 if (type != ACL_LINK)
4069 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4071 /* Find listening sockets and check their link_mode */
4072 read_lock(&chan_list_lock);
4073 list_for_each_entry(c, &chan_list, global_l) {
4074 struct sock *sk = c->sk;
4076 if (c->state != BT_LISTEN)
4079 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4080 lm1 |= HCI_LM_ACCEPT;
4081 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4082 lm1 |= HCI_LM_MASTER;
4084 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4085 lm2 |= HCI_LM_ACCEPT;
4086 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4087 lm2 |= HCI_LM_MASTER;
4090 read_unlock(&chan_list_lock);
4092 return exact ? lm1 : lm2;
4095 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4097 struct l2cap_conn *conn;
4099 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4101 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4105 conn = l2cap_conn_add(hcon, status);
4107 l2cap_conn_ready(conn);
4109 l2cap_conn_del(hcon, bt_to_errno(status));
4114 static int l2cap_disconn_ind(struct hci_conn *hcon)
4116 struct l2cap_conn *conn = hcon->l2cap_data;
4118 BT_DBG("hcon %p", hcon);
4120 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4123 return conn->disc_reason;
4126 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4128 BT_DBG("hcon %p reason %d", hcon, reason);
4130 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4133 l2cap_conn_del(hcon, bt_to_errno(reason));
4138 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4140 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4143 if (encrypt == 0x00) {
4144 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4145 __clear_chan_timer(chan);
4146 __set_chan_timer(chan, HZ * 5);
4147 } else if (chan->sec_level == BT_SECURITY_HIGH)
4148 l2cap_chan_close(chan, ECONNREFUSED);
4150 if (chan->sec_level == BT_SECURITY_MEDIUM)
4151 __clear_chan_timer(chan);
4155 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4157 struct l2cap_conn *conn = hcon->l2cap_data;
4158 struct l2cap_chan *chan;
4163 BT_DBG("conn %p", conn);
4165 if (hcon->type == LE_LINK) {
4166 smp_distribute_keys(conn, 0);
4167 del_timer(&conn->security_timer);
4170 read_lock(&conn->chan_lock);
4172 list_for_each_entry(chan, &conn->chan_l, list) {
4173 struct sock *sk = chan->sk;
4177 BT_DBG("chan->scid %d", chan->scid);
4179 if (chan->scid == L2CAP_CID_LE_DATA) {
4180 if (!status && encrypt) {
4181 chan->sec_level = hcon->sec_level;
4182 l2cap_chan_ready(sk);
4189 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4194 if (!status && (chan->state == BT_CONNECTED ||
4195 chan->state == BT_CONFIG)) {
4196 l2cap_check_encryption(chan, encrypt);
4201 if (chan->state == BT_CONNECT) {
4203 struct l2cap_conn_req req;
4204 req.scid = cpu_to_le16(chan->scid);
4205 req.psm = chan->psm;
4207 chan->ident = l2cap_get_ident(conn);
4208 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4210 l2cap_send_cmd(conn, chan->ident,
4211 L2CAP_CONN_REQ, sizeof(req), &req);
4213 __clear_chan_timer(chan);
4214 __set_chan_timer(chan, HZ / 10);
4216 } else if (chan->state == BT_CONNECT2) {
4217 struct l2cap_conn_rsp rsp;
4221 if (bt_sk(sk)->defer_setup) {
4222 struct sock *parent = bt_sk(sk)->parent;
4223 res = L2CAP_CR_PEND;
4224 stat = L2CAP_CS_AUTHOR_PEND;
4226 parent->sk_data_ready(parent, 0);
4228 l2cap_state_change(chan, BT_CONFIG);
4229 res = L2CAP_CR_SUCCESS;
4230 stat = L2CAP_CS_NO_INFO;
4233 l2cap_state_change(chan, BT_DISCONN);
4234 __set_chan_timer(chan, HZ / 10);
4235 res = L2CAP_CR_SEC_BLOCK;
4236 stat = L2CAP_CS_NO_INFO;
4239 rsp.scid = cpu_to_le16(chan->dcid);
4240 rsp.dcid = cpu_to_le16(chan->scid);
4241 rsp.result = cpu_to_le16(res);
4242 rsp.status = cpu_to_le16(stat);
4243 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4250 read_unlock(&conn->chan_lock);
4255 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4257 struct l2cap_conn *conn = hcon->l2cap_data;
4260 conn = l2cap_conn_add(hcon, 0);
4265 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4267 if (!(flags & ACL_CONT)) {
4268 struct l2cap_hdr *hdr;
4269 struct l2cap_chan *chan;
4274 BT_ERR("Unexpected start frame (len %d)", skb->len);
4275 kfree_skb(conn->rx_skb);
4276 conn->rx_skb = NULL;
4278 l2cap_conn_unreliable(conn, ECOMM);
4281 /* Start fragment always begin with Basic L2CAP header */
4282 if (skb->len < L2CAP_HDR_SIZE) {
4283 BT_ERR("Frame is too short (len %d)", skb->len);
4284 l2cap_conn_unreliable(conn, ECOMM);
4288 hdr = (struct l2cap_hdr *) skb->data;
4289 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4290 cid = __le16_to_cpu(hdr->cid);
4292 if (len == skb->len) {
4293 /* Complete frame received */
4294 l2cap_recv_frame(conn, skb);
4298 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4300 if (skb->len > len) {
4301 BT_ERR("Frame is too long (len %d, expected len %d)",
4303 l2cap_conn_unreliable(conn, ECOMM);
4307 chan = l2cap_get_chan_by_scid(conn, cid);
4309 if (chan && chan->sk) {
4310 struct sock *sk = chan->sk;
4312 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4313 BT_ERR("Frame exceeding recv MTU (len %d, "
4317 l2cap_conn_unreliable(conn, ECOMM);
4323 /* Allocate skb for the complete frame (with header) */
4324 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4328 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4330 conn->rx_len = len - skb->len;
4332 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4334 if (!conn->rx_len) {
4335 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4336 l2cap_conn_unreliable(conn, ECOMM);
4340 if (skb->len > conn->rx_len) {
4341 BT_ERR("Fragment is too long (len %d, expected %d)",
4342 skb->len, conn->rx_len);
4343 kfree_skb(conn->rx_skb);
4344 conn->rx_skb = NULL;
4346 l2cap_conn_unreliable(conn, ECOMM);
4350 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4352 conn->rx_len -= skb->len;
4354 if (!conn->rx_len) {
4355 /* Complete frame received */
4356 l2cap_recv_frame(conn, conn->rx_skb);
4357 conn->rx_skb = NULL;
4366 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4368 struct l2cap_chan *c;
4370 read_lock_bh(&chan_list_lock);
4372 list_for_each_entry(c, &chan_list, global_l) {
4373 struct sock *sk = c->sk;
4375 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4376 batostr(&bt_sk(sk)->src),
4377 batostr(&bt_sk(sk)->dst),
4378 c->state, __le16_to_cpu(c->psm),
4379 c->scid, c->dcid, c->imtu, c->omtu,
4380 c->sec_level, c->mode);
4383 read_unlock_bh(&chan_list_lock);
4388 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4390 return single_open(file, l2cap_debugfs_show, inode->i_private);
4393 static const struct file_operations l2cap_debugfs_fops = {
4394 .open = l2cap_debugfs_open,
4396 .llseek = seq_lseek,
4397 .release = single_release,
4400 static struct dentry *l2cap_debugfs;
4402 static struct hci_proto l2cap_hci_proto = {
4404 .id = HCI_PROTO_L2CAP,
4405 .connect_ind = l2cap_connect_ind,
4406 .connect_cfm = l2cap_connect_cfm,
4407 .disconn_ind = l2cap_disconn_ind,
4408 .disconn_cfm = l2cap_disconn_cfm,
4409 .security_cfm = l2cap_security_cfm,
4410 .recv_acldata = l2cap_recv_acldata
4413 int __init l2cap_init(void)
4417 err = l2cap_init_sockets();
4421 err = hci_register_proto(&l2cap_hci_proto);
4423 BT_ERR("L2CAP protocol registration failed");
4424 bt_sock_unregister(BTPROTO_L2CAP);
4429 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4430 bt_debugfs, NULL, &l2cap_debugfs_fops);
4432 BT_ERR("Failed to create L2CAP debug file");
4438 l2cap_cleanup_sockets();
4442 void l2cap_exit(void)
4444 debugfs_remove(l2cap_debugfs);
4446 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4447 BT_ERR("L2CAP protocol unregistration failed");
4449 l2cap_cleanup_sockets();
4452 module_param(disable_ertm, bool, 0644);
4453 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4455 module_param(enable_hs, bool, 0644);
4456 MODULE_PARM_DESC(enable_hs, "Enable High Speed");