2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
78 /* ---- L2CAP channels ---- */
80 static inline void chan_hold(struct l2cap_chan *c)
82 atomic_inc(&c->refcnt);
85 static inline void chan_put(struct l2cap_chan *c)
87 if (atomic_dec_and_test(&c->refcnt))
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
95 list_for_each_entry(c, &conn->chan_l, list) {
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
105 struct l2cap_chan *c;
107 list_for_each_entry(c, &conn->chan_l, list) {
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
118 struct l2cap_chan *c;
120 read_lock(&conn->chan_lock);
121 c = __l2cap_get_chan_by_scid(conn, cid);
124 read_unlock(&conn->chan_lock);
128 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
130 struct l2cap_chan *c;
132 list_for_each_entry(c, &conn->chan_l, list) {
133 if (c->ident == ident)
139 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
141 struct l2cap_chan *c;
143 read_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_ident(conn, ident);
147 read_unlock(&conn->chan_lock);
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
153 struct l2cap_chan *c;
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
165 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
169 write_lock_bh(&chan_list_lock);
171 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
184 for (p = 0x1001; p < 0x1100; p += 2)
185 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
186 chan->psm = cpu_to_le16(p);
187 chan->sport = cpu_to_le16(p);
194 write_unlock_bh(&chan_list_lock);
198 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
200 write_lock_bh(&chan_list_lock);
204 write_unlock_bh(&chan_list_lock);
209 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
211 u16 cid = L2CAP_CID_DYN_START;
213 for (; cid < L2CAP_CID_DYN_END; cid++) {
214 if (!__l2cap_get_chan_by_scid(conn, cid))
221 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
223 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
225 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
229 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
231 BT_DBG("chan %p state %d", chan, chan->state);
233 if (timer_pending(timer) && del_timer(timer))
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
240 chan->ops->state_change(chan->data, state);
243 static void l2cap_chan_timeout(unsigned long arg)
245 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
246 struct sock *sk = chan->sk;
249 BT_DBG("chan %p state %d", chan, chan->state);
253 if (sock_owned_by_user(sk)) {
254 /* sk is owned by user. Try again later */
255 __set_chan_timer(chan, HZ / 5);
261 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
262 reason = ECONNREFUSED;
263 else if (chan->state == BT_CONNECT &&
264 chan->sec_level != BT_SECURITY_SDP)
265 reason = ECONNREFUSED;
269 l2cap_chan_close(chan, reason);
273 chan->ops->close(chan->data);
277 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
279 struct l2cap_chan *chan;
281 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
287 write_lock_bh(&chan_list_lock);
288 list_add(&chan->global_l, &chan_list);
289 write_unlock_bh(&chan_list_lock);
291 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
293 chan->state = BT_OPEN;
295 atomic_set(&chan->refcnt, 1);
300 void l2cap_chan_destroy(struct l2cap_chan *chan)
302 write_lock_bh(&chan_list_lock);
303 list_del(&chan->global_l);
304 write_unlock_bh(&chan_list_lock);
309 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
311 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
312 chan->psm, chan->dcid);
314 conn->disc_reason = 0x13;
318 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
319 if (conn->hcon->type == LE_LINK) {
321 chan->omtu = L2CAP_LE_DEFAULT_MTU;
322 chan->scid = L2CAP_CID_LE_DATA;
323 chan->dcid = L2CAP_CID_LE_DATA;
325 /* Alloc CID for connection-oriented socket */
326 chan->scid = l2cap_alloc_cid(conn);
327 chan->omtu = L2CAP_DEFAULT_MTU;
329 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
330 /* Connectionless socket */
331 chan->scid = L2CAP_CID_CONN_LESS;
332 chan->dcid = L2CAP_CID_CONN_LESS;
333 chan->omtu = L2CAP_DEFAULT_MTU;
335 /* Raw socket can send/recv signalling messages only */
336 chan->scid = L2CAP_CID_SIGNALING;
337 chan->dcid = L2CAP_CID_SIGNALING;
338 chan->omtu = L2CAP_DEFAULT_MTU;
343 list_add(&chan->list, &conn->chan_l);
347 * Must be called on the locked socket. */
348 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
350 struct sock *sk = chan->sk;
351 struct l2cap_conn *conn = chan->conn;
352 struct sock *parent = bt_sk(sk)->parent;
354 __clear_chan_timer(chan);
356 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
359 /* Delete from channel list */
360 write_lock_bh(&conn->chan_lock);
361 list_del(&chan->list);
362 write_unlock_bh(&conn->chan_lock);
366 hci_conn_put(conn->hcon);
369 l2cap_state_change(chan, BT_CLOSED);
370 sock_set_flag(sk, SOCK_ZAPPED);
376 bt_accept_unlink(sk);
377 parent->sk_data_ready(parent, 0);
379 sk->sk_state_change(sk);
381 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
382 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
385 skb_queue_purge(&chan->tx_q);
387 if (chan->mode == L2CAP_MODE_ERTM) {
388 struct srej_list *l, *tmp;
390 __clear_retrans_timer(chan);
391 __clear_monitor_timer(chan);
392 __clear_ack_timer(chan);
394 skb_queue_purge(&chan->srej_q);
396 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
403 static void l2cap_chan_cleanup_listen(struct sock *parent)
407 BT_DBG("parent %p", parent);
409 /* Close not yet accepted channels */
410 while ((sk = bt_accept_dequeue(parent, NULL))) {
411 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
412 __clear_chan_timer(chan);
414 l2cap_chan_close(chan, ECONNRESET);
416 chan->ops->close(chan->data);
420 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
422 struct l2cap_conn *conn = chan->conn;
423 struct sock *sk = chan->sk;
425 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
427 switch (chan->state) {
429 l2cap_chan_cleanup_listen(sk);
431 l2cap_state_change(chan, BT_CLOSED);
432 sock_set_flag(sk, SOCK_ZAPPED);
437 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
438 conn->hcon->type == ACL_LINK) {
439 __clear_chan_timer(chan);
440 __set_chan_timer(chan, sk->sk_sndtimeo);
441 l2cap_send_disconn_req(conn, chan, reason);
443 l2cap_chan_del(chan, reason);
447 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
448 conn->hcon->type == ACL_LINK) {
449 struct l2cap_conn_rsp rsp;
452 if (bt_sk(sk)->defer_setup)
453 result = L2CAP_CR_SEC_BLOCK;
455 result = L2CAP_CR_BAD_PSM;
456 l2cap_state_change(chan, BT_DISCONN);
458 rsp.scid = cpu_to_le16(chan->dcid);
459 rsp.dcid = cpu_to_le16(chan->scid);
460 rsp.result = cpu_to_le16(result);
461 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
462 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
466 l2cap_chan_del(chan, reason);
471 l2cap_chan_del(chan, reason);
475 sock_set_flag(sk, SOCK_ZAPPED);
480 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
482 if (chan->chan_type == L2CAP_CHAN_RAW) {
483 switch (chan->sec_level) {
484 case BT_SECURITY_HIGH:
485 return HCI_AT_DEDICATED_BONDING_MITM;
486 case BT_SECURITY_MEDIUM:
487 return HCI_AT_DEDICATED_BONDING;
489 return HCI_AT_NO_BONDING;
491 } else if (chan->psm == cpu_to_le16(0x0001)) {
492 if (chan->sec_level == BT_SECURITY_LOW)
493 chan->sec_level = BT_SECURITY_SDP;
495 if (chan->sec_level == BT_SECURITY_HIGH)
496 return HCI_AT_NO_BONDING_MITM;
498 return HCI_AT_NO_BONDING;
500 switch (chan->sec_level) {
501 case BT_SECURITY_HIGH:
502 return HCI_AT_GENERAL_BONDING_MITM;
503 case BT_SECURITY_MEDIUM:
504 return HCI_AT_GENERAL_BONDING;
506 return HCI_AT_NO_BONDING;
511 /* Service level security */
512 static inline int l2cap_check_security(struct l2cap_chan *chan)
514 struct l2cap_conn *conn = chan->conn;
517 auth_type = l2cap_get_auth_type(chan);
519 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
522 static u8 l2cap_get_ident(struct l2cap_conn *conn)
526 /* Get next available identificator.
527 * 1 - 128 are used by kernel.
528 * 129 - 199 are reserved.
529 * 200 - 254 are used by utilities like l2ping, etc.
532 spin_lock_bh(&conn->lock);
534 if (++conn->tx_ident > 128)
539 spin_unlock_bh(&conn->lock);
544 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
546 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
549 BT_DBG("code 0x%2.2x", code);
554 if (lmp_no_flush_capable(conn->hcon->hdev))
555 flags = ACL_START_NO_FLUSH;
559 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
561 hci_send_acl(conn->hcon, skb, flags);
564 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u16 control)
567 struct l2cap_hdr *lh;
568 struct l2cap_conn *conn = chan->conn;
569 int count, hlen = L2CAP_HDR_SIZE + 2;
572 if (chan->state != BT_CONNECTED)
575 if (chan->fcs == L2CAP_FCS_CRC16)
578 BT_DBG("chan %p, control 0x%2.2x", chan, control);
580 count = min_t(unsigned int, conn->mtu, hlen);
582 control |= __set_sframe(chan);
584 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
585 control |= L2CAP_CTRL_FINAL;
587 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
588 control |= L2CAP_CTRL_POLL;
590 skb = bt_skb_alloc(count, GFP_ATOMIC);
594 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
595 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
596 lh->cid = cpu_to_le16(chan->dcid);
597 put_unaligned_le16(control, skb_put(skb, 2));
599 if (chan->fcs == L2CAP_FCS_CRC16) {
600 u16 fcs = crc16(0, (u8 *)lh, count - 2);
601 put_unaligned_le16(fcs, skb_put(skb, 2));
604 if (lmp_no_flush_capable(conn->hcon->hdev))
605 flags = ACL_START_NO_FLUSH;
609 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
611 hci_send_acl(chan->conn->hcon, skb, flags);
614 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u16 control)
616 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
617 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
618 set_bit(CONN_RNR_SENT, &chan->conn_state);
620 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
622 control |= __set_reqseq(chan, chan->buffer_seq);
624 l2cap_send_sframe(chan, control);
627 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
629 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
632 static void l2cap_do_start(struct l2cap_chan *chan)
634 struct l2cap_conn *conn = chan->conn;
636 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
637 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
640 if (l2cap_check_security(chan) &&
641 __l2cap_no_conn_pending(chan)) {
642 struct l2cap_conn_req req;
643 req.scid = cpu_to_le16(chan->scid);
646 chan->ident = l2cap_get_ident(conn);
647 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
649 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
653 struct l2cap_info_req req;
654 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
656 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
657 conn->info_ident = l2cap_get_ident(conn);
659 mod_timer(&conn->info_timer, jiffies +
660 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
662 l2cap_send_cmd(conn, conn->info_ident,
663 L2CAP_INFO_REQ, sizeof(req), &req);
667 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
669 u32 local_feat_mask = l2cap_feat_mask;
671 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
674 case L2CAP_MODE_ERTM:
675 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
676 case L2CAP_MODE_STREAMING:
677 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
683 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
686 struct l2cap_disconn_req req;
693 if (chan->mode == L2CAP_MODE_ERTM) {
694 __clear_retrans_timer(chan);
695 __clear_monitor_timer(chan);
696 __clear_ack_timer(chan);
699 req.dcid = cpu_to_le16(chan->dcid);
700 req.scid = cpu_to_le16(chan->scid);
701 l2cap_send_cmd(conn, l2cap_get_ident(conn),
702 L2CAP_DISCONN_REQ, sizeof(req), &req);
704 l2cap_state_change(chan, BT_DISCONN);
708 /* ---- L2CAP connections ---- */
709 static void l2cap_conn_start(struct l2cap_conn *conn)
711 struct l2cap_chan *chan, *tmp;
713 BT_DBG("conn %p", conn);
715 read_lock(&conn->chan_lock);
717 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
718 struct sock *sk = chan->sk;
722 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
727 if (chan->state == BT_CONNECT) {
728 struct l2cap_conn_req req;
730 if (!l2cap_check_security(chan) ||
731 !__l2cap_no_conn_pending(chan)) {
736 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
737 && test_bit(CONF_STATE2_DEVICE,
738 &chan->conf_state)) {
739 /* l2cap_chan_close() calls list_del(chan)
740 * so release the lock */
741 read_unlock(&conn->chan_lock);
742 l2cap_chan_close(chan, ECONNRESET);
743 read_lock(&conn->chan_lock);
748 req.scid = cpu_to_le16(chan->scid);
751 chan->ident = l2cap_get_ident(conn);
752 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
754 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
757 } else if (chan->state == BT_CONNECT2) {
758 struct l2cap_conn_rsp rsp;
760 rsp.scid = cpu_to_le16(chan->dcid);
761 rsp.dcid = cpu_to_le16(chan->scid);
763 if (l2cap_check_security(chan)) {
764 if (bt_sk(sk)->defer_setup) {
765 struct sock *parent = bt_sk(sk)->parent;
766 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
767 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
769 parent->sk_data_ready(parent, 0);
772 l2cap_state_change(chan, BT_CONFIG);
773 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
774 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
777 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
778 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
781 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
784 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
785 rsp.result != L2CAP_CR_SUCCESS) {
790 set_bit(CONF_REQ_SENT, &chan->conf_state);
791 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
792 l2cap_build_conf_req(chan, buf), buf);
793 chan->num_conf_req++;
799 read_unlock(&conn->chan_lock);
802 /* Find socket with cid and source bdaddr.
803 * Returns closest match, locked.
805 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
807 struct l2cap_chan *c, *c1 = NULL;
809 read_lock(&chan_list_lock);
811 list_for_each_entry(c, &chan_list, global_l) {
812 struct sock *sk = c->sk;
814 if (state && c->state != state)
817 if (c->scid == cid) {
819 if (!bacmp(&bt_sk(sk)->src, src)) {
820 read_unlock(&chan_list_lock);
825 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
830 read_unlock(&chan_list_lock);
835 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
837 struct sock *parent, *sk;
838 struct l2cap_chan *chan, *pchan;
842 /* Check if we have socket listening on cid */
843 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
850 bh_lock_sock(parent);
852 /* Check for backlog size */
853 if (sk_acceptq_is_full(parent)) {
854 BT_DBG("backlog full %d", parent->sk_ack_backlog);
858 chan = pchan->ops->new_connection(pchan->data);
864 write_lock_bh(&conn->chan_lock);
866 hci_conn_hold(conn->hcon);
868 bacpy(&bt_sk(sk)->src, conn->src);
869 bacpy(&bt_sk(sk)->dst, conn->dst);
871 bt_accept_enqueue(parent, sk);
873 __l2cap_chan_add(conn, chan);
875 __set_chan_timer(chan, sk->sk_sndtimeo);
877 l2cap_state_change(chan, BT_CONNECTED);
878 parent->sk_data_ready(parent, 0);
880 write_unlock_bh(&conn->chan_lock);
883 bh_unlock_sock(parent);
886 static void l2cap_chan_ready(struct sock *sk)
888 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
889 struct sock *parent = bt_sk(sk)->parent;
891 BT_DBG("sk %p, parent %p", sk, parent);
893 chan->conf_state = 0;
894 __clear_chan_timer(chan);
896 l2cap_state_change(chan, BT_CONNECTED);
897 sk->sk_state_change(sk);
900 parent->sk_data_ready(parent, 0);
903 static void l2cap_conn_ready(struct l2cap_conn *conn)
905 struct l2cap_chan *chan;
907 BT_DBG("conn %p", conn);
909 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
910 l2cap_le_conn_ready(conn);
912 if (conn->hcon->out && conn->hcon->type == LE_LINK)
913 smp_conn_security(conn, conn->hcon->pending_sec_level);
915 read_lock(&conn->chan_lock);
917 list_for_each_entry(chan, &conn->chan_l, list) {
918 struct sock *sk = chan->sk;
922 if (conn->hcon->type == LE_LINK) {
923 if (smp_conn_security(conn, chan->sec_level))
924 l2cap_chan_ready(sk);
926 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
927 __clear_chan_timer(chan);
928 l2cap_state_change(chan, BT_CONNECTED);
929 sk->sk_state_change(sk);
931 } else if (chan->state == BT_CONNECT)
932 l2cap_do_start(chan);
937 read_unlock(&conn->chan_lock);
940 /* Notify sockets that we cannot guaranty reliability anymore */
941 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
943 struct l2cap_chan *chan;
945 BT_DBG("conn %p", conn);
947 read_lock(&conn->chan_lock);
949 list_for_each_entry(chan, &conn->chan_l, list) {
950 struct sock *sk = chan->sk;
952 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
956 read_unlock(&conn->chan_lock);
959 static void l2cap_info_timeout(unsigned long arg)
961 struct l2cap_conn *conn = (void *) arg;
963 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
964 conn->info_ident = 0;
966 l2cap_conn_start(conn);
969 static void l2cap_conn_del(struct hci_conn *hcon, int err)
971 struct l2cap_conn *conn = hcon->l2cap_data;
972 struct l2cap_chan *chan, *l;
978 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
980 kfree_skb(conn->rx_skb);
983 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
986 l2cap_chan_del(chan, err);
988 chan->ops->close(chan->data);
991 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
992 del_timer_sync(&conn->info_timer);
994 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
995 del_timer(&conn->security_timer);
996 smp_chan_destroy(conn);
999 hcon->l2cap_data = NULL;
1003 static void security_timeout(unsigned long arg)
1005 struct l2cap_conn *conn = (void *) arg;
1007 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1010 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1012 struct l2cap_conn *conn = hcon->l2cap_data;
1017 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1021 hcon->l2cap_data = conn;
1024 BT_DBG("hcon %p conn %p", hcon, conn);
1026 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1027 conn->mtu = hcon->hdev->le_mtu;
1029 conn->mtu = hcon->hdev->acl_mtu;
1031 conn->src = &hcon->hdev->bdaddr;
1032 conn->dst = &hcon->dst;
1034 conn->feat_mask = 0;
1036 spin_lock_init(&conn->lock);
1037 rwlock_init(&conn->chan_lock);
1039 INIT_LIST_HEAD(&conn->chan_l);
1041 if (hcon->type == LE_LINK)
1042 setup_timer(&conn->security_timer, security_timeout,
1043 (unsigned long) conn);
1045 setup_timer(&conn->info_timer, l2cap_info_timeout,
1046 (unsigned long) conn);
1048 conn->disc_reason = 0x13;
1053 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1055 write_lock_bh(&conn->chan_lock);
1056 __l2cap_chan_add(conn, chan);
1057 write_unlock_bh(&conn->chan_lock);
1060 /* ---- Socket interface ---- */
1062 /* Find socket with psm and source bdaddr.
1063 * Returns closest match.
1065 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1067 struct l2cap_chan *c, *c1 = NULL;
1069 read_lock(&chan_list_lock);
1071 list_for_each_entry(c, &chan_list, global_l) {
1072 struct sock *sk = c->sk;
1074 if (state && c->state != state)
1077 if (c->psm == psm) {
1079 if (!bacmp(&bt_sk(sk)->src, src)) {
1080 read_unlock(&chan_list_lock);
1085 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1090 read_unlock(&chan_list_lock);
1095 int l2cap_chan_connect(struct l2cap_chan *chan)
1097 struct sock *sk = chan->sk;
1098 bdaddr_t *src = &bt_sk(sk)->src;
1099 bdaddr_t *dst = &bt_sk(sk)->dst;
1100 struct l2cap_conn *conn;
1101 struct hci_conn *hcon;
1102 struct hci_dev *hdev;
1106 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1109 hdev = hci_get_route(dst, src);
1111 return -EHOSTUNREACH;
1113 hci_dev_lock_bh(hdev);
1115 auth_type = l2cap_get_auth_type(chan);
1117 if (chan->dcid == L2CAP_CID_LE_DATA)
1118 hcon = hci_connect(hdev, LE_LINK, dst,
1119 chan->sec_level, auth_type);
1121 hcon = hci_connect(hdev, ACL_LINK, dst,
1122 chan->sec_level, auth_type);
1125 err = PTR_ERR(hcon);
1129 conn = l2cap_conn_add(hcon, 0);
1136 /* Update source addr of the socket */
1137 bacpy(src, conn->src);
1139 l2cap_chan_add(conn, chan);
1141 l2cap_state_change(chan, BT_CONNECT);
1142 __set_chan_timer(chan, sk->sk_sndtimeo);
1144 if (hcon->state == BT_CONNECTED) {
1145 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1146 __clear_chan_timer(chan);
1147 if (l2cap_check_security(chan))
1148 l2cap_state_change(chan, BT_CONNECTED);
1150 l2cap_do_start(chan);
1156 hci_dev_unlock_bh(hdev);
1161 int __l2cap_wait_ack(struct sock *sk)
1163 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1164 DECLARE_WAITQUEUE(wait, current);
1168 add_wait_queue(sk_sleep(sk), &wait);
1169 set_current_state(TASK_INTERRUPTIBLE);
1170 while (chan->unacked_frames > 0 && chan->conn) {
1174 if (signal_pending(current)) {
1175 err = sock_intr_errno(timeo);
1180 timeo = schedule_timeout(timeo);
1182 set_current_state(TASK_INTERRUPTIBLE);
1184 err = sock_error(sk);
1188 set_current_state(TASK_RUNNING);
1189 remove_wait_queue(sk_sleep(sk), &wait);
1193 static void l2cap_monitor_timeout(unsigned long arg)
1195 struct l2cap_chan *chan = (void *) arg;
1196 struct sock *sk = chan->sk;
1198 BT_DBG("chan %p", chan);
1201 if (chan->retry_count >= chan->remote_max_tx) {
1202 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1207 chan->retry_count++;
1208 __set_monitor_timer(chan);
1210 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1214 static void l2cap_retrans_timeout(unsigned long arg)
1216 struct l2cap_chan *chan = (void *) arg;
1217 struct sock *sk = chan->sk;
1219 BT_DBG("chan %p", chan);
1222 chan->retry_count = 1;
1223 __set_monitor_timer(chan);
1225 set_bit(CONN_WAIT_F, &chan->conn_state);
1227 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1231 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1233 struct sk_buff *skb;
1235 while ((skb = skb_peek(&chan->tx_q)) &&
1236 chan->unacked_frames) {
1237 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1240 skb = skb_dequeue(&chan->tx_q);
1243 chan->unacked_frames--;
1246 if (!chan->unacked_frames)
1247 __clear_retrans_timer(chan);
1250 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1252 struct hci_conn *hcon = chan->conn->hcon;
1255 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1257 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1258 lmp_no_flush_capable(hcon->hdev))
1259 flags = ACL_START_NO_FLUSH;
1263 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1264 hci_send_acl(hcon, skb, flags);
1267 static void l2cap_streaming_send(struct l2cap_chan *chan)
1269 struct sk_buff *skb;
1272 while ((skb = skb_dequeue(&chan->tx_q))) {
1273 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1274 control |= __set_txseq(chan, chan->next_tx_seq);
1275 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1277 if (chan->fcs == L2CAP_FCS_CRC16) {
1278 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1279 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1282 l2cap_do_send(chan, skb);
1284 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1288 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1290 struct sk_buff *skb, *tx_skb;
1293 skb = skb_peek(&chan->tx_q);
1298 if (bt_cb(skb)->tx_seq == tx_seq)
1301 if (skb_queue_is_last(&chan->tx_q, skb))
1304 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1306 if (chan->remote_max_tx &&
1307 bt_cb(skb)->retries == chan->remote_max_tx) {
1308 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1312 tx_skb = skb_clone(skb, GFP_ATOMIC);
1313 bt_cb(skb)->retries++;
1314 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1315 control &= __get_sar_mask(chan);
1317 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1318 control |= L2CAP_CTRL_FINAL;
1320 control |= __set_reqseq(chan, chan->buffer_seq);
1321 control |= __set_txseq(chan, tx_seq);
1323 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1325 if (chan->fcs == L2CAP_FCS_CRC16) {
1326 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1327 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1330 l2cap_do_send(chan, tx_skb);
1333 static int l2cap_ertm_send(struct l2cap_chan *chan)
1335 struct sk_buff *skb, *tx_skb;
1339 if (chan->state != BT_CONNECTED)
1342 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1344 if (chan->remote_max_tx &&
1345 bt_cb(skb)->retries == chan->remote_max_tx) {
1346 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1350 tx_skb = skb_clone(skb, GFP_ATOMIC);
1352 bt_cb(skb)->retries++;
1354 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1355 control &= __get_sar_mask(chan);
1357 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1358 control |= L2CAP_CTRL_FINAL;
1360 control |= __set_reqseq(chan, chan->buffer_seq);
1361 control |= __set_txseq(chan, chan->next_tx_seq);
1362 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1365 if (chan->fcs == L2CAP_FCS_CRC16) {
1366 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1367 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1370 l2cap_do_send(chan, tx_skb);
1372 __set_retrans_timer(chan);
1374 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1375 chan->next_tx_seq = (chan->next_tx_seq + 1) % 64;
1377 if (bt_cb(skb)->retries == 1)
1378 chan->unacked_frames++;
1380 chan->frames_sent++;
1382 if (skb_queue_is_last(&chan->tx_q, skb))
1383 chan->tx_send_head = NULL;
1385 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1393 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1397 if (!skb_queue_empty(&chan->tx_q))
1398 chan->tx_send_head = chan->tx_q.next;
1400 chan->next_tx_seq = chan->expected_ack_seq;
1401 ret = l2cap_ertm_send(chan);
1405 static void l2cap_send_ack(struct l2cap_chan *chan)
1409 control |= __set_reqseq(chan, chan->buffer_seq);
1411 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1412 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1413 set_bit(CONN_RNR_SENT, &chan->conn_state);
1414 l2cap_send_sframe(chan, control);
1418 if (l2cap_ertm_send(chan) > 0)
1421 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1422 l2cap_send_sframe(chan, control);
1425 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1427 struct srej_list *tail;
1430 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1431 control |= L2CAP_CTRL_FINAL;
1433 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1434 control |= __set_reqseq(chan, tail->tx_seq);
1436 l2cap_send_sframe(chan, control);
1439 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1441 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1442 struct sk_buff **frag;
1445 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1451 /* Continuation fragments (no L2CAP header) */
1452 frag = &skb_shinfo(skb)->frag_list;
1454 count = min_t(unsigned int, conn->mtu, len);
1456 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1459 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1465 frag = &(*frag)->next;
1471 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1473 struct sock *sk = chan->sk;
1474 struct l2cap_conn *conn = chan->conn;
1475 struct sk_buff *skb;
1476 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1477 struct l2cap_hdr *lh;
1479 BT_DBG("sk %p len %d", sk, (int)len);
1481 count = min_t(unsigned int, (conn->mtu - hlen), len);
1482 skb = bt_skb_send_alloc(sk, count + hlen,
1483 msg->msg_flags & MSG_DONTWAIT, &err);
1485 return ERR_PTR(err);
1487 /* Create L2CAP header */
1488 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1489 lh->cid = cpu_to_le16(chan->dcid);
1490 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1491 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1493 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1494 if (unlikely(err < 0)) {
1496 return ERR_PTR(err);
1501 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1503 struct sock *sk = chan->sk;
1504 struct l2cap_conn *conn = chan->conn;
1505 struct sk_buff *skb;
1506 int err, count, hlen = L2CAP_HDR_SIZE;
1507 struct l2cap_hdr *lh;
1509 BT_DBG("sk %p len %d", sk, (int)len);
1511 count = min_t(unsigned int, (conn->mtu - hlen), len);
1512 skb = bt_skb_send_alloc(sk, count + hlen,
1513 msg->msg_flags & MSG_DONTWAIT, &err);
1515 return ERR_PTR(err);
1517 /* Create L2CAP header */
1518 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1519 lh->cid = cpu_to_le16(chan->dcid);
1520 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1522 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1523 if (unlikely(err < 0)) {
1525 return ERR_PTR(err);
1530 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1531 struct msghdr *msg, size_t len,
1532 u16 control, u16 sdulen)
1534 struct sock *sk = chan->sk;
1535 struct l2cap_conn *conn = chan->conn;
1536 struct sk_buff *skb;
1537 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1538 struct l2cap_hdr *lh;
1540 BT_DBG("sk %p len %d", sk, (int)len);
1543 return ERR_PTR(-ENOTCONN);
1548 if (chan->fcs == L2CAP_FCS_CRC16)
1551 count = min_t(unsigned int, (conn->mtu - hlen), len);
1552 skb = bt_skb_send_alloc(sk, count + hlen,
1553 msg->msg_flags & MSG_DONTWAIT, &err);
1555 return ERR_PTR(err);
1557 /* Create L2CAP header */
1558 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1559 lh->cid = cpu_to_le16(chan->dcid);
1560 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1561 put_unaligned_le16(control, skb_put(skb, 2));
1563 put_unaligned_le16(sdulen, skb_put(skb, 2));
1565 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1566 if (unlikely(err < 0)) {
1568 return ERR_PTR(err);
1571 if (chan->fcs == L2CAP_FCS_CRC16)
1572 put_unaligned_le16(0, skb_put(skb, 2));
1574 bt_cb(skb)->retries = 0;
1578 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1580 struct sk_buff *skb;
1581 struct sk_buff_head sar_queue;
1585 skb_queue_head_init(&sar_queue);
1586 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1587 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1589 return PTR_ERR(skb);
1591 __skb_queue_tail(&sar_queue, skb);
1592 len -= chan->remote_mps;
1593 size += chan->remote_mps;
1598 if (len > chan->remote_mps) {
1599 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1600 buflen = chan->remote_mps;
1602 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1606 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1608 skb_queue_purge(&sar_queue);
1609 return PTR_ERR(skb);
1612 __skb_queue_tail(&sar_queue, skb);
1616 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1617 if (chan->tx_send_head == NULL)
1618 chan->tx_send_head = sar_queue.next;
1623 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1625 struct sk_buff *skb;
1629 /* Connectionless channel */
1630 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1631 skb = l2cap_create_connless_pdu(chan, msg, len);
1633 return PTR_ERR(skb);
1635 l2cap_do_send(chan, skb);
1639 switch (chan->mode) {
1640 case L2CAP_MODE_BASIC:
1641 /* Check outgoing MTU */
1642 if (len > chan->omtu)
1645 /* Create a basic PDU */
1646 skb = l2cap_create_basic_pdu(chan, msg, len);
1648 return PTR_ERR(skb);
1650 l2cap_do_send(chan, skb);
1654 case L2CAP_MODE_ERTM:
1655 case L2CAP_MODE_STREAMING:
1656 /* Entire SDU fits into one PDU */
1657 if (len <= chan->remote_mps) {
1658 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1659 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1662 return PTR_ERR(skb);
1664 __skb_queue_tail(&chan->tx_q, skb);
1666 if (chan->tx_send_head == NULL)
1667 chan->tx_send_head = skb;
1670 /* Segment SDU into multiples PDUs */
1671 err = l2cap_sar_segment_sdu(chan, msg, len);
1676 if (chan->mode == L2CAP_MODE_STREAMING) {
1677 l2cap_streaming_send(chan);
1682 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1683 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1688 err = l2cap_ertm_send(chan);
1695 BT_DBG("bad state %1.1x", chan->mode);
1702 /* Copy frame to all raw sockets on that connection */
1703 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1705 struct sk_buff *nskb;
1706 struct l2cap_chan *chan;
1708 BT_DBG("conn %p", conn);
1710 read_lock(&conn->chan_lock);
1711 list_for_each_entry(chan, &conn->chan_l, list) {
1712 struct sock *sk = chan->sk;
1713 if (chan->chan_type != L2CAP_CHAN_RAW)
1716 /* Don't send frame to the socket it came from */
1719 nskb = skb_clone(skb, GFP_ATOMIC);
1723 if (chan->ops->recv(chan->data, nskb))
1726 read_unlock(&conn->chan_lock);
1729 /* ---- L2CAP signalling commands ---- */
1730 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1731 u8 code, u8 ident, u16 dlen, void *data)
1733 struct sk_buff *skb, **frag;
1734 struct l2cap_cmd_hdr *cmd;
1735 struct l2cap_hdr *lh;
1738 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1739 conn, code, ident, dlen);
1741 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1742 count = min_t(unsigned int, conn->mtu, len);
1744 skb = bt_skb_alloc(count, GFP_ATOMIC);
1748 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1749 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1751 if (conn->hcon->type == LE_LINK)
1752 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1754 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1756 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1759 cmd->len = cpu_to_le16(dlen);
1762 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1763 memcpy(skb_put(skb, count), data, count);
1769 /* Continuation fragments (no L2CAP header) */
1770 frag = &skb_shinfo(skb)->frag_list;
1772 count = min_t(unsigned int, conn->mtu, len);
1774 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1778 memcpy(skb_put(*frag, count), data, count);
1783 frag = &(*frag)->next;
1793 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1795 struct l2cap_conf_opt *opt = *ptr;
1798 len = L2CAP_CONF_OPT_SIZE + opt->len;
1806 *val = *((u8 *) opt->val);
1810 *val = get_unaligned_le16(opt->val);
1814 *val = get_unaligned_le32(opt->val);
1818 *val = (unsigned long) opt->val;
1822 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1826 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1828 struct l2cap_conf_opt *opt = *ptr;
1830 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1837 *((u8 *) opt->val) = val;
1841 put_unaligned_le16(val, opt->val);
1845 put_unaligned_le32(val, opt->val);
1849 memcpy(opt->val, (void *) val, len);
1853 *ptr += L2CAP_CONF_OPT_SIZE + len;
1856 static void l2cap_ack_timeout(unsigned long arg)
1858 struct l2cap_chan *chan = (void *) arg;
1860 bh_lock_sock(chan->sk);
1861 l2cap_send_ack(chan);
1862 bh_unlock_sock(chan->sk);
1865 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1867 struct sock *sk = chan->sk;
1869 chan->expected_ack_seq = 0;
1870 chan->unacked_frames = 0;
1871 chan->buffer_seq = 0;
1872 chan->num_acked = 0;
1873 chan->frames_sent = 0;
1875 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1876 (unsigned long) chan);
1877 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1878 (unsigned long) chan);
1879 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1881 skb_queue_head_init(&chan->srej_q);
1883 INIT_LIST_HEAD(&chan->srej_l);
1886 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1889 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1892 case L2CAP_MODE_STREAMING:
1893 case L2CAP_MODE_ERTM:
1894 if (l2cap_mode_supported(mode, remote_feat_mask))
1898 return L2CAP_MODE_BASIC;
1902 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1904 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1907 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
1909 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
1910 __l2cap_ews_supported(chan))
1911 /* use extended control field */
1912 set_bit(FLAG_EXT_CTRL, &chan->flags);
1914 chan->tx_win = min_t(u16, chan->tx_win,
1915 L2CAP_DEFAULT_TX_WINDOW);
1918 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1920 struct l2cap_conf_req *req = data;
1921 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1922 void *ptr = req->data;
1924 BT_DBG("chan %p", chan);
1926 if (chan->num_conf_req || chan->num_conf_rsp)
1929 switch (chan->mode) {
1930 case L2CAP_MODE_STREAMING:
1931 case L2CAP_MODE_ERTM:
1932 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
1937 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
1942 if (chan->imtu != L2CAP_DEFAULT_MTU)
1943 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
1945 switch (chan->mode) {
1946 case L2CAP_MODE_BASIC:
1947 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
1948 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
1951 rfc.mode = L2CAP_MODE_BASIC;
1953 rfc.max_transmit = 0;
1954 rfc.retrans_timeout = 0;
1955 rfc.monitor_timeout = 0;
1956 rfc.max_pdu_size = 0;
1958 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1959 (unsigned long) &rfc);
1962 case L2CAP_MODE_ERTM:
1963 rfc.mode = L2CAP_MODE_ERTM;
1964 rfc.max_transmit = chan->max_tx;
1965 rfc.retrans_timeout = 0;
1966 rfc.monitor_timeout = 0;
1967 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
1968 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
1969 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
1971 l2cap_txwin_setup(chan);
1973 rfc.txwin_size = min_t(u16, chan->tx_win,
1974 L2CAP_DEFAULT_TX_WINDOW);
1976 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
1977 (unsigned long) &rfc);
1979 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
1982 if (chan->fcs == L2CAP_FCS_NONE ||
1983 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
1984 chan->fcs = L2CAP_FCS_NONE;
1985 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
1988 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1989 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
1993 case L2CAP_MODE_STREAMING:
1994 rfc.mode = L2CAP_MODE_STREAMING;
1996 rfc.max_transmit = 0;
1997 rfc.retrans_timeout = 0;
1998 rfc.monitor_timeout = 0;
1999 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2000 if (L2CAP_DEFAULT_MAX_PDU_SIZE > chan->conn->mtu - 10)
2001 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2003 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2004 (unsigned long) &rfc);
2006 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2009 if (chan->fcs == L2CAP_FCS_NONE ||
2010 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2011 chan->fcs = L2CAP_FCS_NONE;
2012 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2017 req->dcid = cpu_to_le16(chan->dcid);
2018 req->flags = cpu_to_le16(0);
2023 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2025 struct l2cap_conf_rsp *rsp = data;
2026 void *ptr = rsp->data;
2027 void *req = chan->conf_req;
2028 int len = chan->conf_len;
2029 int type, hint, olen;
2031 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2032 u16 mtu = L2CAP_DEFAULT_MTU;
2033 u16 result = L2CAP_CONF_SUCCESS;
2035 BT_DBG("chan %p", chan);
2037 while (len >= L2CAP_CONF_OPT_SIZE) {
2038 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2040 hint = type & L2CAP_CONF_HINT;
2041 type &= L2CAP_CONF_MASK;
2044 case L2CAP_CONF_MTU:
2048 case L2CAP_CONF_FLUSH_TO:
2049 chan->flush_to = val;
2052 case L2CAP_CONF_QOS:
2055 case L2CAP_CONF_RFC:
2056 if (olen == sizeof(rfc))
2057 memcpy(&rfc, (void *) val, olen);
2060 case L2CAP_CONF_FCS:
2061 if (val == L2CAP_FCS_NONE)
2062 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2066 case L2CAP_CONF_EWS:
2068 return -ECONNREFUSED;
2070 set_bit(FLAG_EXT_CTRL, &chan->flags);
2071 set_bit(CONF_EWS_RECV, &chan->conf_state);
2072 chan->remote_tx_win = val;
2079 result = L2CAP_CONF_UNKNOWN;
2080 *((u8 *) ptr++) = type;
2085 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2088 switch (chan->mode) {
2089 case L2CAP_MODE_STREAMING:
2090 case L2CAP_MODE_ERTM:
2091 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2092 chan->mode = l2cap_select_mode(rfc.mode,
2093 chan->conn->feat_mask);
2097 if (chan->mode != rfc.mode)
2098 return -ECONNREFUSED;
2104 if (chan->mode != rfc.mode) {
2105 result = L2CAP_CONF_UNACCEPT;
2106 rfc.mode = chan->mode;
2108 if (chan->num_conf_rsp == 1)
2109 return -ECONNREFUSED;
2111 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2112 sizeof(rfc), (unsigned long) &rfc);
2116 if (result == L2CAP_CONF_SUCCESS) {
2117 /* Configure output options and let the other side know
2118 * which ones we don't like. */
2120 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2121 result = L2CAP_CONF_UNACCEPT;
2124 set_bit(CONF_MTU_DONE, &chan->conf_state);
2126 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2129 case L2CAP_MODE_BASIC:
2130 chan->fcs = L2CAP_FCS_NONE;
2131 set_bit(CONF_MODE_DONE, &chan->conf_state);
2134 case L2CAP_MODE_ERTM:
2135 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2136 chan->remote_tx_win = rfc.txwin_size;
2138 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2140 chan->remote_max_tx = rfc.max_transmit;
2142 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2143 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2145 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2147 rfc.retrans_timeout =
2148 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2149 rfc.monitor_timeout =
2150 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2152 set_bit(CONF_MODE_DONE, &chan->conf_state);
2154 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2155 sizeof(rfc), (unsigned long) &rfc);
2159 case L2CAP_MODE_STREAMING:
2160 if (le16_to_cpu(rfc.max_pdu_size) > chan->conn->mtu - 10)
2161 rfc.max_pdu_size = cpu_to_le16(chan->conn->mtu - 10);
2163 chan->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2165 set_bit(CONF_MODE_DONE, &chan->conf_state);
2167 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2168 sizeof(rfc), (unsigned long) &rfc);
2173 result = L2CAP_CONF_UNACCEPT;
2175 memset(&rfc, 0, sizeof(rfc));
2176 rfc.mode = chan->mode;
2179 if (result == L2CAP_CONF_SUCCESS)
2180 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2182 rsp->scid = cpu_to_le16(chan->dcid);
2183 rsp->result = cpu_to_le16(result);
2184 rsp->flags = cpu_to_le16(0x0000);
2189 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2191 struct l2cap_conf_req *req = data;
2192 void *ptr = req->data;
2195 struct l2cap_conf_rfc rfc;
2197 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2199 while (len >= L2CAP_CONF_OPT_SIZE) {
2200 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2203 case L2CAP_CONF_MTU:
2204 if (val < L2CAP_DEFAULT_MIN_MTU) {
2205 *result = L2CAP_CONF_UNACCEPT;
2206 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2209 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2212 case L2CAP_CONF_FLUSH_TO:
2213 chan->flush_to = val;
2214 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2218 case L2CAP_CONF_RFC:
2219 if (olen == sizeof(rfc))
2220 memcpy(&rfc, (void *)val, olen);
2222 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2223 rfc.mode != chan->mode)
2224 return -ECONNREFUSED;
2228 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2229 sizeof(rfc), (unsigned long) &rfc);
2232 case L2CAP_CONF_EWS:
2233 chan->tx_win = min_t(u16, val,
2234 L2CAP_DEFAULT_EXT_WINDOW);
2235 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS,
2241 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2242 return -ECONNREFUSED;
2244 chan->mode = rfc.mode;
2246 if (*result == L2CAP_CONF_SUCCESS) {
2248 case L2CAP_MODE_ERTM:
2249 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2250 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2251 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2253 case L2CAP_MODE_STREAMING:
2254 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2258 req->dcid = cpu_to_le16(chan->dcid);
2259 req->flags = cpu_to_le16(0x0000);
2264 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2266 struct l2cap_conf_rsp *rsp = data;
2267 void *ptr = rsp->data;
2269 BT_DBG("chan %p", chan);
2271 rsp->scid = cpu_to_le16(chan->dcid);
2272 rsp->result = cpu_to_le16(result);
2273 rsp->flags = cpu_to_le16(flags);
2278 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2280 struct l2cap_conn_rsp rsp;
2281 struct l2cap_conn *conn = chan->conn;
2284 rsp.scid = cpu_to_le16(chan->dcid);
2285 rsp.dcid = cpu_to_le16(chan->scid);
2286 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2287 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2288 l2cap_send_cmd(conn, chan->ident,
2289 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2291 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2294 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2295 l2cap_build_conf_req(chan, buf), buf);
2296 chan->num_conf_req++;
2299 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2303 struct l2cap_conf_rfc rfc;
2305 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2307 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2310 while (len >= L2CAP_CONF_OPT_SIZE) {
2311 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2314 case L2CAP_CONF_RFC:
2315 if (olen == sizeof(rfc))
2316 memcpy(&rfc, (void *)val, olen);
2323 case L2CAP_MODE_ERTM:
2324 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2325 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2326 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2328 case L2CAP_MODE_STREAMING:
2329 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2333 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2335 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2337 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2340 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2341 cmd->ident == conn->info_ident) {
2342 del_timer(&conn->info_timer);
2344 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2345 conn->info_ident = 0;
2347 l2cap_conn_start(conn);
2353 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2355 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2356 struct l2cap_conn_rsp rsp;
2357 struct l2cap_chan *chan = NULL, *pchan;
2358 struct sock *parent, *sk = NULL;
2359 int result, status = L2CAP_CS_NO_INFO;
2361 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2362 __le16 psm = req->psm;
2364 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2366 /* Check if we have socket listening on psm */
2367 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2369 result = L2CAP_CR_BAD_PSM;
2375 bh_lock_sock(parent);
2377 /* Check if the ACL is secure enough (if not SDP) */
2378 if (psm != cpu_to_le16(0x0001) &&
2379 !hci_conn_check_link_mode(conn->hcon)) {
2380 conn->disc_reason = 0x05;
2381 result = L2CAP_CR_SEC_BLOCK;
2385 result = L2CAP_CR_NO_MEM;
2387 /* Check for backlog size */
2388 if (sk_acceptq_is_full(parent)) {
2389 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2393 chan = pchan->ops->new_connection(pchan->data);
2399 write_lock_bh(&conn->chan_lock);
2401 /* Check if we already have channel with that dcid */
2402 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2403 write_unlock_bh(&conn->chan_lock);
2404 sock_set_flag(sk, SOCK_ZAPPED);
2405 chan->ops->close(chan->data);
2409 hci_conn_hold(conn->hcon);
2411 bacpy(&bt_sk(sk)->src, conn->src);
2412 bacpy(&bt_sk(sk)->dst, conn->dst);
2416 bt_accept_enqueue(parent, sk);
2418 __l2cap_chan_add(conn, chan);
2422 __set_chan_timer(chan, sk->sk_sndtimeo);
2424 chan->ident = cmd->ident;
2426 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2427 if (l2cap_check_security(chan)) {
2428 if (bt_sk(sk)->defer_setup) {
2429 l2cap_state_change(chan, BT_CONNECT2);
2430 result = L2CAP_CR_PEND;
2431 status = L2CAP_CS_AUTHOR_PEND;
2432 parent->sk_data_ready(parent, 0);
2434 l2cap_state_change(chan, BT_CONFIG);
2435 result = L2CAP_CR_SUCCESS;
2436 status = L2CAP_CS_NO_INFO;
2439 l2cap_state_change(chan, BT_CONNECT2);
2440 result = L2CAP_CR_PEND;
2441 status = L2CAP_CS_AUTHEN_PEND;
2444 l2cap_state_change(chan, BT_CONNECT2);
2445 result = L2CAP_CR_PEND;
2446 status = L2CAP_CS_NO_INFO;
2449 write_unlock_bh(&conn->chan_lock);
2452 bh_unlock_sock(parent);
2455 rsp.scid = cpu_to_le16(scid);
2456 rsp.dcid = cpu_to_le16(dcid);
2457 rsp.result = cpu_to_le16(result);
2458 rsp.status = cpu_to_le16(status);
2459 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2461 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2462 struct l2cap_info_req info;
2463 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2465 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2466 conn->info_ident = l2cap_get_ident(conn);
2468 mod_timer(&conn->info_timer, jiffies +
2469 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2471 l2cap_send_cmd(conn, conn->info_ident,
2472 L2CAP_INFO_REQ, sizeof(info), &info);
2475 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2476 result == L2CAP_CR_SUCCESS) {
2478 set_bit(CONF_REQ_SENT, &chan->conf_state);
2479 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2480 l2cap_build_conf_req(chan, buf), buf);
2481 chan->num_conf_req++;
2487 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2489 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2490 u16 scid, dcid, result, status;
2491 struct l2cap_chan *chan;
2495 scid = __le16_to_cpu(rsp->scid);
2496 dcid = __le16_to_cpu(rsp->dcid);
2497 result = __le16_to_cpu(rsp->result);
2498 status = __le16_to_cpu(rsp->status);
2500 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2503 chan = l2cap_get_chan_by_scid(conn, scid);
2507 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2515 case L2CAP_CR_SUCCESS:
2516 l2cap_state_change(chan, BT_CONFIG);
2519 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2521 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2524 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2525 l2cap_build_conf_req(chan, req), req);
2526 chan->num_conf_req++;
2530 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2534 /* don't delete l2cap channel if sk is owned by user */
2535 if (sock_owned_by_user(sk)) {
2536 l2cap_state_change(chan, BT_DISCONN);
2537 __clear_chan_timer(chan);
2538 __set_chan_timer(chan, HZ / 5);
2542 l2cap_chan_del(chan, ECONNREFUSED);
2550 static inline void set_default_fcs(struct l2cap_chan *chan)
2552 /* FCS is enabled only in ERTM or streaming mode, if one or both
2555 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2556 chan->fcs = L2CAP_FCS_NONE;
2557 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2558 chan->fcs = L2CAP_FCS_CRC16;
2561 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2563 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2566 struct l2cap_chan *chan;
2570 dcid = __le16_to_cpu(req->dcid);
2571 flags = __le16_to_cpu(req->flags);
2573 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2575 chan = l2cap_get_chan_by_scid(conn, dcid);
2581 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2582 struct l2cap_cmd_rej_cid rej;
2584 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2585 rej.scid = cpu_to_le16(chan->scid);
2586 rej.dcid = cpu_to_le16(chan->dcid);
2588 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2593 /* Reject if config buffer is too small. */
2594 len = cmd_len - sizeof(*req);
2595 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2596 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2597 l2cap_build_conf_rsp(chan, rsp,
2598 L2CAP_CONF_REJECT, flags), rsp);
2603 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2604 chan->conf_len += len;
2606 if (flags & 0x0001) {
2607 /* Incomplete config. Send empty response. */
2608 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2609 l2cap_build_conf_rsp(chan, rsp,
2610 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2614 /* Complete config. */
2615 len = l2cap_parse_conf_req(chan, rsp);
2617 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2621 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2622 chan->num_conf_rsp++;
2624 /* Reset config buffer. */
2627 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2630 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2631 set_default_fcs(chan);
2633 l2cap_state_change(chan, BT_CONNECTED);
2635 chan->next_tx_seq = 0;
2636 chan->expected_tx_seq = 0;
2637 skb_queue_head_init(&chan->tx_q);
2638 if (chan->mode == L2CAP_MODE_ERTM)
2639 l2cap_ertm_init(chan);
2641 l2cap_chan_ready(sk);
2645 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2647 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2648 l2cap_build_conf_req(chan, buf), buf);
2649 chan->num_conf_req++;
2657 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2659 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2660 u16 scid, flags, result;
2661 struct l2cap_chan *chan;
2663 int len = cmd->len - sizeof(*rsp);
2665 scid = __le16_to_cpu(rsp->scid);
2666 flags = __le16_to_cpu(rsp->flags);
2667 result = __le16_to_cpu(rsp->result);
2669 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2670 scid, flags, result);
2672 chan = l2cap_get_chan_by_scid(conn, scid);
2679 case L2CAP_CONF_SUCCESS:
2680 l2cap_conf_rfc_get(chan, rsp->data, len);
2683 case L2CAP_CONF_UNACCEPT:
2684 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2687 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2688 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2692 /* throw out any old stored conf requests */
2693 result = L2CAP_CONF_SUCCESS;
2694 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2697 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2701 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2702 L2CAP_CONF_REQ, len, req);
2703 chan->num_conf_req++;
2704 if (result != L2CAP_CONF_SUCCESS)
2710 sk->sk_err = ECONNRESET;
2711 __set_chan_timer(chan, HZ * 5);
2712 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2719 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2721 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2722 set_default_fcs(chan);
2724 l2cap_state_change(chan, BT_CONNECTED);
2725 chan->next_tx_seq = 0;
2726 chan->expected_tx_seq = 0;
2727 skb_queue_head_init(&chan->tx_q);
2728 if (chan->mode == L2CAP_MODE_ERTM)
2729 l2cap_ertm_init(chan);
2731 l2cap_chan_ready(sk);
2739 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2741 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2742 struct l2cap_disconn_rsp rsp;
2744 struct l2cap_chan *chan;
2747 scid = __le16_to_cpu(req->scid);
2748 dcid = __le16_to_cpu(req->dcid);
2750 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2752 chan = l2cap_get_chan_by_scid(conn, dcid);
2758 rsp.dcid = cpu_to_le16(chan->scid);
2759 rsp.scid = cpu_to_le16(chan->dcid);
2760 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2762 sk->sk_shutdown = SHUTDOWN_MASK;
2764 /* don't delete l2cap channel if sk is owned by user */
2765 if (sock_owned_by_user(sk)) {
2766 l2cap_state_change(chan, BT_DISCONN);
2767 __clear_chan_timer(chan);
2768 __set_chan_timer(chan, HZ / 5);
2773 l2cap_chan_del(chan, ECONNRESET);
2776 chan->ops->close(chan->data);
2780 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2782 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2784 struct l2cap_chan *chan;
2787 scid = __le16_to_cpu(rsp->scid);
2788 dcid = __le16_to_cpu(rsp->dcid);
2790 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2792 chan = l2cap_get_chan_by_scid(conn, scid);
2798 /* don't delete l2cap channel if sk is owned by user */
2799 if (sock_owned_by_user(sk)) {
2800 l2cap_state_change(chan,BT_DISCONN);
2801 __clear_chan_timer(chan);
2802 __set_chan_timer(chan, HZ / 5);
2807 l2cap_chan_del(chan, 0);
2810 chan->ops->close(chan->data);
2814 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2816 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2819 type = __le16_to_cpu(req->type);
2821 BT_DBG("type 0x%4.4x", type);
2823 if (type == L2CAP_IT_FEAT_MASK) {
2825 u32 feat_mask = l2cap_feat_mask;
2826 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2827 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2828 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2830 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2833 feat_mask |= L2CAP_FEAT_EXT_FLOW
2834 | L2CAP_FEAT_EXT_WINDOW;
2836 put_unaligned_le32(feat_mask, rsp->data);
2837 l2cap_send_cmd(conn, cmd->ident,
2838 L2CAP_INFO_RSP, sizeof(buf), buf);
2839 } else if (type == L2CAP_IT_FIXED_CHAN) {
2841 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2842 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2843 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2844 memcpy(buf + 4, l2cap_fixed_chan, 8);
2845 l2cap_send_cmd(conn, cmd->ident,
2846 L2CAP_INFO_RSP, sizeof(buf), buf);
2848 struct l2cap_info_rsp rsp;
2849 rsp.type = cpu_to_le16(type);
2850 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2851 l2cap_send_cmd(conn, cmd->ident,
2852 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2858 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2860 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2863 type = __le16_to_cpu(rsp->type);
2864 result = __le16_to_cpu(rsp->result);
2866 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
2868 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
2869 if (cmd->ident != conn->info_ident ||
2870 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
2873 del_timer(&conn->info_timer);
2875 if (result != L2CAP_IR_SUCCESS) {
2876 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2877 conn->info_ident = 0;
2879 l2cap_conn_start(conn);
2884 if (type == L2CAP_IT_FEAT_MASK) {
2885 conn->feat_mask = get_unaligned_le32(rsp->data);
2887 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
2888 struct l2cap_info_req req;
2889 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2891 conn->info_ident = l2cap_get_ident(conn);
2893 l2cap_send_cmd(conn, conn->info_ident,
2894 L2CAP_INFO_REQ, sizeof(req), &req);
2896 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2897 conn->info_ident = 0;
2899 l2cap_conn_start(conn);
2901 } else if (type == L2CAP_IT_FIXED_CHAN) {
2902 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2903 conn->info_ident = 0;
2905 l2cap_conn_start(conn);
2911 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
2916 if (min > max || min < 6 || max > 3200)
2919 if (to_multiplier < 10 || to_multiplier > 3200)
2922 if (max >= to_multiplier * 8)
2925 max_latency = (to_multiplier * 8 / max) - 1;
2926 if (latency > 499 || latency > max_latency)
2932 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
2933 struct l2cap_cmd_hdr *cmd, u8 *data)
2935 struct hci_conn *hcon = conn->hcon;
2936 struct l2cap_conn_param_update_req *req;
2937 struct l2cap_conn_param_update_rsp rsp;
2938 u16 min, max, latency, to_multiplier, cmd_len;
2941 if (!(hcon->link_mode & HCI_LM_MASTER))
2944 cmd_len = __le16_to_cpu(cmd->len);
2945 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
2948 req = (struct l2cap_conn_param_update_req *) data;
2949 min = __le16_to_cpu(req->min);
2950 max = __le16_to_cpu(req->max);
2951 latency = __le16_to_cpu(req->latency);
2952 to_multiplier = __le16_to_cpu(req->to_multiplier);
2954 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
2955 min, max, latency, to_multiplier);
2957 memset(&rsp, 0, sizeof(rsp));
2959 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
2961 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
2963 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
2965 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
2969 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
2974 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
2975 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2979 switch (cmd->code) {
2980 case L2CAP_COMMAND_REJ:
2981 l2cap_command_rej(conn, cmd, data);
2984 case L2CAP_CONN_REQ:
2985 err = l2cap_connect_req(conn, cmd, data);
2988 case L2CAP_CONN_RSP:
2989 err = l2cap_connect_rsp(conn, cmd, data);
2992 case L2CAP_CONF_REQ:
2993 err = l2cap_config_req(conn, cmd, cmd_len, data);
2996 case L2CAP_CONF_RSP:
2997 err = l2cap_config_rsp(conn, cmd, data);
3000 case L2CAP_DISCONN_REQ:
3001 err = l2cap_disconnect_req(conn, cmd, data);
3004 case L2CAP_DISCONN_RSP:
3005 err = l2cap_disconnect_rsp(conn, cmd, data);
3008 case L2CAP_ECHO_REQ:
3009 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3012 case L2CAP_ECHO_RSP:
3015 case L2CAP_INFO_REQ:
3016 err = l2cap_information_req(conn, cmd, data);
3019 case L2CAP_INFO_RSP:
3020 err = l2cap_information_rsp(conn, cmd, data);
3024 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3032 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3033 struct l2cap_cmd_hdr *cmd, u8 *data)
3035 switch (cmd->code) {
3036 case L2CAP_COMMAND_REJ:
3039 case L2CAP_CONN_PARAM_UPDATE_REQ:
3040 return l2cap_conn_param_update_req(conn, cmd, data);
3042 case L2CAP_CONN_PARAM_UPDATE_RSP:
3046 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3051 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3052 struct sk_buff *skb)
3054 u8 *data = skb->data;
3056 struct l2cap_cmd_hdr cmd;
3059 l2cap_raw_recv(conn, skb);
3061 while (len >= L2CAP_CMD_HDR_SIZE) {
3063 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3064 data += L2CAP_CMD_HDR_SIZE;
3065 len -= L2CAP_CMD_HDR_SIZE;
3067 cmd_len = le16_to_cpu(cmd.len);
3069 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3071 if (cmd_len > len || !cmd.ident) {
3072 BT_DBG("corrupted command");
3076 if (conn->hcon->type == LE_LINK)
3077 err = l2cap_le_sig_cmd(conn, &cmd, data);
3079 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3082 struct l2cap_cmd_rej_unk rej;
3084 BT_ERR("Wrong link type (%d)", err);
3086 /* FIXME: Map err to a valid reason */
3087 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3088 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3098 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3100 u16 our_fcs, rcv_fcs;
3101 int hdr_size = L2CAP_HDR_SIZE + 2;
3103 if (chan->fcs == L2CAP_FCS_CRC16) {
3104 skb_trim(skb, skb->len - 2);
3105 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3106 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3108 if (our_fcs != rcv_fcs)
3114 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3118 chan->frames_sent = 0;
3120 control |= __set_reqseq(chan, chan->buffer_seq);
3122 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3123 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3124 l2cap_send_sframe(chan, control);
3125 set_bit(CONN_RNR_SENT, &chan->conn_state);
3128 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3129 l2cap_retransmit_frames(chan);
3131 l2cap_ertm_send(chan);
3133 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3134 chan->frames_sent == 0) {
3135 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3136 l2cap_send_sframe(chan, control);
3140 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3142 struct sk_buff *next_skb;
3143 int tx_seq_offset, next_tx_seq_offset;
3145 bt_cb(skb)->tx_seq = tx_seq;
3146 bt_cb(skb)->sar = sar;
3148 next_skb = skb_peek(&chan->srej_q);
3150 __skb_queue_tail(&chan->srej_q, skb);
3154 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3155 if (tx_seq_offset < 0)
3156 tx_seq_offset += 64;
3159 if (bt_cb(next_skb)->tx_seq == tx_seq)
3162 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3163 chan->buffer_seq) % 64;
3164 if (next_tx_seq_offset < 0)
3165 next_tx_seq_offset += 64;
3167 if (next_tx_seq_offset > tx_seq_offset) {
3168 __skb_queue_before(&chan->srej_q, next_skb, skb);
3172 if (skb_queue_is_last(&chan->srej_q, next_skb))
3175 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3177 __skb_queue_tail(&chan->srej_q, skb);
3182 static void append_skb_frag(struct sk_buff *skb,
3183 struct sk_buff *new_frag, struct sk_buff **last_frag)
3185 /* skb->len reflects data in skb as well as all fragments
3186 * skb->data_len reflects only data in fragments
3188 if (!skb_has_frag_list(skb))
3189 skb_shinfo(skb)->frag_list = new_frag;
3191 new_frag->next = NULL;
3193 (*last_frag)->next = new_frag;
3194 *last_frag = new_frag;
3196 skb->len += new_frag->len;
3197 skb->data_len += new_frag->len;
3198 skb->truesize += new_frag->truesize;
3201 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u16 control)
3205 switch (__get_ctrl_sar(chan, control)) {
3206 case L2CAP_SAR_UNSEGMENTED:
3210 err = chan->ops->recv(chan->data, skb);
3213 case L2CAP_SAR_START:
3217 chan->sdu_len = get_unaligned_le16(skb->data);
3220 if (chan->sdu_len > chan->imtu) {
3225 if (skb->len >= chan->sdu_len)
3229 chan->sdu_last_frag = skb;
3235 case L2CAP_SAR_CONTINUE:
3239 append_skb_frag(chan->sdu, skb,
3240 &chan->sdu_last_frag);
3243 if (chan->sdu->len >= chan->sdu_len)
3253 append_skb_frag(chan->sdu, skb,
3254 &chan->sdu_last_frag);
3257 if (chan->sdu->len != chan->sdu_len)
3260 err = chan->ops->recv(chan->data, chan->sdu);
3263 /* Reassembly complete */
3265 chan->sdu_last_frag = NULL;
3273 kfree_skb(chan->sdu);
3275 chan->sdu_last_frag = NULL;
3282 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3286 BT_DBG("chan %p, Enter local busy", chan);
3288 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3290 control = __set_reqseq(chan, chan->buffer_seq);
3291 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3292 l2cap_send_sframe(chan, control);
3294 set_bit(CONN_RNR_SENT, &chan->conn_state);
3296 __clear_ack_timer(chan);
3299 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3303 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3306 control = __set_reqseq(chan, chan->buffer_seq);
3307 control |= L2CAP_CTRL_POLL;
3308 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3309 l2cap_send_sframe(chan, control);
3310 chan->retry_count = 1;
3312 __clear_retrans_timer(chan);
3313 __set_monitor_timer(chan);
3315 set_bit(CONN_WAIT_F, &chan->conn_state);
3318 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3319 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3321 BT_DBG("chan %p, Exit local busy", chan);
3324 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3326 if (chan->mode == L2CAP_MODE_ERTM) {
3328 l2cap_ertm_enter_local_busy(chan);
3330 l2cap_ertm_exit_local_busy(chan);
3334 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3336 struct sk_buff *skb;
3339 while ((skb = skb_peek(&chan->srej_q)) &&
3340 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3343 if (bt_cb(skb)->tx_seq != tx_seq)
3346 skb = skb_dequeue(&chan->srej_q);
3347 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3348 err = l2cap_reassemble_sdu(chan, skb, control);
3351 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3355 chan->buffer_seq_srej =
3356 (chan->buffer_seq_srej + 1) % 64;
3357 tx_seq = (tx_seq + 1) % 64;
3361 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3363 struct srej_list *l, *tmp;
3366 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3367 if (l->tx_seq == tx_seq) {
3372 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3373 control |= __set_reqseq(chan, l->tx_seq);
3374 l2cap_send_sframe(chan, control);
3376 list_add_tail(&l->list, &chan->srej_l);
3380 static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3382 struct srej_list *new;
3385 while (tx_seq != chan->expected_tx_seq) {
3386 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3387 control |= __set_reqseq(chan, chan->expected_tx_seq);
3388 l2cap_send_sframe(chan, control);
3390 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3391 new->tx_seq = chan->expected_tx_seq;
3392 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3393 list_add_tail(&new->list, &chan->srej_l);
3395 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3398 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3400 u16 tx_seq = __get_txseq(chan, rx_control);
3401 u16 req_seq = __get_reqseq(chan, rx_control);
3402 u8 sar = __get_ctrl_sar(chan, rx_control);
3403 int tx_seq_offset, expected_tx_seq_offset;
3404 int num_to_ack = (chan->tx_win/6) + 1;
3407 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%4.4x", chan, skb->len,
3408 tx_seq, rx_control);
3410 if (L2CAP_CTRL_FINAL & rx_control &&
3411 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3412 __clear_monitor_timer(chan);
3413 if (chan->unacked_frames > 0)
3414 __set_retrans_timer(chan);
3415 clear_bit(CONN_WAIT_F, &chan->conn_state);
3418 chan->expected_ack_seq = req_seq;
3419 l2cap_drop_acked_frames(chan);
3421 tx_seq_offset = (tx_seq - chan->buffer_seq) % 64;
3422 if (tx_seq_offset < 0)
3423 tx_seq_offset += 64;
3425 /* invalid tx_seq */
3426 if (tx_seq_offset >= chan->tx_win) {
3427 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3431 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3434 if (tx_seq == chan->expected_tx_seq)
3437 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3438 struct srej_list *first;
3440 first = list_first_entry(&chan->srej_l,
3441 struct srej_list, list);
3442 if (tx_seq == first->tx_seq) {
3443 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3444 l2cap_check_srej_gap(chan, tx_seq);
3446 list_del(&first->list);
3449 if (list_empty(&chan->srej_l)) {
3450 chan->buffer_seq = chan->buffer_seq_srej;
3451 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3452 l2cap_send_ack(chan);
3453 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3456 struct srej_list *l;
3458 /* duplicated tx_seq */
3459 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3462 list_for_each_entry(l, &chan->srej_l, list) {
3463 if (l->tx_seq == tx_seq) {
3464 l2cap_resend_srejframe(chan, tx_seq);
3468 l2cap_send_srejframe(chan, tx_seq);
3471 expected_tx_seq_offset =
3472 (chan->expected_tx_seq - chan->buffer_seq) % 64;
3473 if (expected_tx_seq_offset < 0)
3474 expected_tx_seq_offset += 64;
3476 /* duplicated tx_seq */
3477 if (tx_seq_offset < expected_tx_seq_offset)
3480 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3482 BT_DBG("chan %p, Enter SREJ", chan);
3484 INIT_LIST_HEAD(&chan->srej_l);
3485 chan->buffer_seq_srej = chan->buffer_seq;
3487 __skb_queue_head_init(&chan->srej_q);
3488 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3490 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3492 l2cap_send_srejframe(chan, tx_seq);
3494 __clear_ack_timer(chan);
3499 chan->expected_tx_seq = (chan->expected_tx_seq + 1) % 64;
3501 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3502 bt_cb(skb)->tx_seq = tx_seq;
3503 bt_cb(skb)->sar = sar;
3504 __skb_queue_tail(&chan->srej_q, skb);
3508 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3509 chan->buffer_seq = (chan->buffer_seq + 1) % 64;
3511 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3515 if (rx_control & L2CAP_CTRL_FINAL) {
3516 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3517 l2cap_retransmit_frames(chan);
3520 __set_ack_timer(chan);
3522 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3523 if (chan->num_acked == num_to_ack - 1)
3524 l2cap_send_ack(chan);
3533 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u16 rx_control)
3535 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan,
3536 __get_reqseq(chan, rx_control), rx_control);
3538 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3539 l2cap_drop_acked_frames(chan);
3541 if (rx_control & L2CAP_CTRL_POLL) {
3542 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3543 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3544 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3545 (chan->unacked_frames > 0))
3546 __set_retrans_timer(chan);
3548 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3549 l2cap_send_srejtail(chan);
3551 l2cap_send_i_or_rr_or_rnr(chan);
3554 } else if (rx_control & L2CAP_CTRL_FINAL) {
3555 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3557 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3558 l2cap_retransmit_frames(chan);
3561 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3562 (chan->unacked_frames > 0))
3563 __set_retrans_timer(chan);
3565 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3566 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3567 l2cap_send_ack(chan);
3569 l2cap_ertm_send(chan);
3573 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u16 rx_control)
3575 u16 tx_seq = __get_reqseq(chan, rx_control);
3577 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3579 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3581 chan->expected_ack_seq = tx_seq;
3582 l2cap_drop_acked_frames(chan);
3584 if (rx_control & L2CAP_CTRL_FINAL) {
3585 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3586 l2cap_retransmit_frames(chan);
3588 l2cap_retransmit_frames(chan);
3590 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3591 set_bit(CONN_REJ_ACT, &chan->conn_state);
3594 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u16 rx_control)
3596 u16 tx_seq = __get_reqseq(chan, rx_control);
3598 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3600 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3602 if (rx_control & L2CAP_CTRL_POLL) {
3603 chan->expected_ack_seq = tx_seq;
3604 l2cap_drop_acked_frames(chan);
3606 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3607 l2cap_retransmit_one_frame(chan, tx_seq);
3609 l2cap_ertm_send(chan);
3611 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3612 chan->srej_save_reqseq = tx_seq;
3613 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3615 } else if (rx_control & L2CAP_CTRL_FINAL) {
3616 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3617 chan->srej_save_reqseq == tx_seq)
3618 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3620 l2cap_retransmit_one_frame(chan, tx_seq);
3622 l2cap_retransmit_one_frame(chan, tx_seq);
3623 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3624 chan->srej_save_reqseq = tx_seq;
3625 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3630 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u16 rx_control)
3632 u16 tx_seq = __get_reqseq(chan, rx_control);
3634 BT_DBG("chan %p, req_seq %d ctrl 0x%4.4x", chan, tx_seq, rx_control);
3636 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3637 chan->expected_ack_seq = tx_seq;
3638 l2cap_drop_acked_frames(chan);
3640 if (rx_control & L2CAP_CTRL_POLL)
3641 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3643 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3644 __clear_retrans_timer(chan);
3645 if (rx_control & L2CAP_CTRL_POLL)
3646 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3650 if (rx_control & L2CAP_CTRL_POLL) {
3651 l2cap_send_srejtail(chan);
3653 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
3654 l2cap_send_sframe(chan, rx_control);
3658 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u16 rx_control, struct sk_buff *skb)
3660 BT_DBG("chan %p rx_control 0x%4.4x len %d", chan, rx_control, skb->len);
3662 if (L2CAP_CTRL_FINAL & rx_control &&
3663 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3664 __clear_monitor_timer(chan);
3665 if (chan->unacked_frames > 0)
3666 __set_retrans_timer(chan);
3667 clear_bit(CONN_WAIT_F, &chan->conn_state);
3670 switch (__get_ctrl_super(chan, rx_control)) {
3671 case L2CAP_SUPER_RR:
3672 l2cap_data_channel_rrframe(chan, rx_control);
3675 case L2CAP_SUPER_REJ:
3676 l2cap_data_channel_rejframe(chan, rx_control);
3679 case L2CAP_SUPER_SREJ:
3680 l2cap_data_channel_srejframe(chan, rx_control);
3683 case L2CAP_SUPER_RNR:
3684 l2cap_data_channel_rnrframe(chan, rx_control);
3692 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3694 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3697 int len, next_tx_seq_offset, req_seq_offset;
3699 control = get_unaligned_le16(skb->data);
3704 * We can just drop the corrupted I-frame here.
3705 * Receiver will miss it and start proper recovery
3706 * procedures and ask retransmission.
3708 if (l2cap_check_fcs(chan, skb))
3711 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
3714 if (chan->fcs == L2CAP_FCS_CRC16)
3717 if (len > chan->mps) {
3718 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3722 req_seq = __get_reqseq(chan, control);
3723 req_seq_offset = (req_seq - chan->expected_ack_seq) % 64;
3724 if (req_seq_offset < 0)
3725 req_seq_offset += 64;
3727 next_tx_seq_offset =
3728 (chan->next_tx_seq - chan->expected_ack_seq) % 64;
3729 if (next_tx_seq_offset < 0)
3730 next_tx_seq_offset += 64;
3732 /* check for invalid req-seq */
3733 if (req_seq_offset > next_tx_seq_offset) {
3734 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3738 if (!__is_sframe(chan, control)) {
3740 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3744 l2cap_data_channel_iframe(chan, control, skb);
3748 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3752 l2cap_data_channel_sframe(chan, control, skb);
3762 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3764 struct l2cap_chan *chan;
3765 struct sock *sk = NULL;
3770 chan = l2cap_get_chan_by_scid(conn, cid);
3772 BT_DBG("unknown cid 0x%4.4x", cid);
3778 BT_DBG("chan %p, len %d", chan, skb->len);
3780 if (chan->state != BT_CONNECTED)
3783 switch (chan->mode) {
3784 case L2CAP_MODE_BASIC:
3785 /* If socket recv buffers overflows we drop data here
3786 * which is *bad* because L2CAP has to be reliable.
3787 * But we don't have any other choice. L2CAP doesn't
3788 * provide flow control mechanism. */
3790 if (chan->imtu < skb->len)
3793 if (!chan->ops->recv(chan->data, skb))
3797 case L2CAP_MODE_ERTM:
3798 if (!sock_owned_by_user(sk)) {
3799 l2cap_ertm_data_rcv(sk, skb);
3801 if (sk_add_backlog(sk, skb))
3807 case L2CAP_MODE_STREAMING:
3808 control = get_unaligned_le16(skb->data);
3812 if (l2cap_check_fcs(chan, skb))
3815 if (__is_sar_start(chan, control))
3818 if (chan->fcs == L2CAP_FCS_CRC16)
3821 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
3824 tx_seq = __get_txseq(chan, control);
3826 if (chan->expected_tx_seq != tx_seq) {
3827 /* Frame(s) missing - must discard partial SDU */
3828 kfree_skb(chan->sdu);
3830 chan->sdu_last_frag = NULL;
3833 /* TODO: Notify userland of missing data */
3836 chan->expected_tx_seq = (tx_seq + 1) % 64;
3838 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
3839 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3844 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3858 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3860 struct sock *sk = NULL;
3861 struct l2cap_chan *chan;
3863 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
3871 BT_DBG("sk %p, len %d", sk, skb->len);
3873 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3876 if (chan->imtu < skb->len)
3879 if (!chan->ops->recv(chan->data, skb))
3891 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
3893 struct sock *sk = NULL;
3894 struct l2cap_chan *chan;
3896 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
3904 BT_DBG("sk %p, len %d", sk, skb->len);
3906 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
3909 if (chan->imtu < skb->len)
3912 if (!chan->ops->recv(chan->data, skb))
3924 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3926 struct l2cap_hdr *lh = (void *) skb->data;
3930 skb_pull(skb, L2CAP_HDR_SIZE);
3931 cid = __le16_to_cpu(lh->cid);
3932 len = __le16_to_cpu(lh->len);
3934 if (len != skb->len) {
3939 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3942 case L2CAP_CID_LE_SIGNALING:
3943 case L2CAP_CID_SIGNALING:
3944 l2cap_sig_channel(conn, skb);
3947 case L2CAP_CID_CONN_LESS:
3948 psm = get_unaligned_le16(skb->data);
3950 l2cap_conless_channel(conn, psm, skb);
3953 case L2CAP_CID_LE_DATA:
3954 l2cap_att_channel(conn, cid, skb);
3958 if (smp_sig_channel(conn, skb))
3959 l2cap_conn_del(conn->hcon, EACCES);
3963 l2cap_data_channel(conn, cid, skb);
3968 /* ---- L2CAP interface with lower layer (HCI) ---- */
3970 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3972 int exact = 0, lm1 = 0, lm2 = 0;
3973 struct l2cap_chan *c;
3975 if (type != ACL_LINK)
3978 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3980 /* Find listening sockets and check their link_mode */
3981 read_lock(&chan_list_lock);
3982 list_for_each_entry(c, &chan_list, global_l) {
3983 struct sock *sk = c->sk;
3985 if (c->state != BT_LISTEN)
3988 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3989 lm1 |= HCI_LM_ACCEPT;
3990 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
3991 lm1 |= HCI_LM_MASTER;
3993 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3994 lm2 |= HCI_LM_ACCEPT;
3995 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
3996 lm2 |= HCI_LM_MASTER;
3999 read_unlock(&chan_list_lock);
4001 return exact ? lm1 : lm2;
4004 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4006 struct l2cap_conn *conn;
4008 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4010 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4014 conn = l2cap_conn_add(hcon, status);
4016 l2cap_conn_ready(conn);
4018 l2cap_conn_del(hcon, bt_to_errno(status));
4023 static int l2cap_disconn_ind(struct hci_conn *hcon)
4025 struct l2cap_conn *conn = hcon->l2cap_data;
4027 BT_DBG("hcon %p", hcon);
4029 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4032 return conn->disc_reason;
4035 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4037 BT_DBG("hcon %p reason %d", hcon, reason);
4039 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4042 l2cap_conn_del(hcon, bt_to_errno(reason));
4047 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4049 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4052 if (encrypt == 0x00) {
4053 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4054 __clear_chan_timer(chan);
4055 __set_chan_timer(chan, HZ * 5);
4056 } else if (chan->sec_level == BT_SECURITY_HIGH)
4057 l2cap_chan_close(chan, ECONNREFUSED);
4059 if (chan->sec_level == BT_SECURITY_MEDIUM)
4060 __clear_chan_timer(chan);
4064 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4066 struct l2cap_conn *conn = hcon->l2cap_data;
4067 struct l2cap_chan *chan;
4072 BT_DBG("conn %p", conn);
4074 if (hcon->type == LE_LINK) {
4075 smp_distribute_keys(conn, 0);
4076 del_timer(&conn->security_timer);
4079 read_lock(&conn->chan_lock);
4081 list_for_each_entry(chan, &conn->chan_l, list) {
4082 struct sock *sk = chan->sk;
4086 BT_DBG("chan->scid %d", chan->scid);
4088 if (chan->scid == L2CAP_CID_LE_DATA) {
4089 if (!status && encrypt) {
4090 chan->sec_level = hcon->sec_level;
4091 l2cap_chan_ready(sk);
4098 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4103 if (!status && (chan->state == BT_CONNECTED ||
4104 chan->state == BT_CONFIG)) {
4105 l2cap_check_encryption(chan, encrypt);
4110 if (chan->state == BT_CONNECT) {
4112 struct l2cap_conn_req req;
4113 req.scid = cpu_to_le16(chan->scid);
4114 req.psm = chan->psm;
4116 chan->ident = l2cap_get_ident(conn);
4117 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4119 l2cap_send_cmd(conn, chan->ident,
4120 L2CAP_CONN_REQ, sizeof(req), &req);
4122 __clear_chan_timer(chan);
4123 __set_chan_timer(chan, HZ / 10);
4125 } else if (chan->state == BT_CONNECT2) {
4126 struct l2cap_conn_rsp rsp;
4130 if (bt_sk(sk)->defer_setup) {
4131 struct sock *parent = bt_sk(sk)->parent;
4132 res = L2CAP_CR_PEND;
4133 stat = L2CAP_CS_AUTHOR_PEND;
4135 parent->sk_data_ready(parent, 0);
4137 l2cap_state_change(chan, BT_CONFIG);
4138 res = L2CAP_CR_SUCCESS;
4139 stat = L2CAP_CS_NO_INFO;
4142 l2cap_state_change(chan, BT_DISCONN);
4143 __set_chan_timer(chan, HZ / 10);
4144 res = L2CAP_CR_SEC_BLOCK;
4145 stat = L2CAP_CS_NO_INFO;
4148 rsp.scid = cpu_to_le16(chan->dcid);
4149 rsp.dcid = cpu_to_le16(chan->scid);
4150 rsp.result = cpu_to_le16(res);
4151 rsp.status = cpu_to_le16(stat);
4152 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4159 read_unlock(&conn->chan_lock);
4164 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4166 struct l2cap_conn *conn = hcon->l2cap_data;
4169 conn = l2cap_conn_add(hcon, 0);
4174 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4176 if (!(flags & ACL_CONT)) {
4177 struct l2cap_hdr *hdr;
4178 struct l2cap_chan *chan;
4183 BT_ERR("Unexpected start frame (len %d)", skb->len);
4184 kfree_skb(conn->rx_skb);
4185 conn->rx_skb = NULL;
4187 l2cap_conn_unreliable(conn, ECOMM);
4190 /* Start fragment always begin with Basic L2CAP header */
4191 if (skb->len < L2CAP_HDR_SIZE) {
4192 BT_ERR("Frame is too short (len %d)", skb->len);
4193 l2cap_conn_unreliable(conn, ECOMM);
4197 hdr = (struct l2cap_hdr *) skb->data;
4198 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4199 cid = __le16_to_cpu(hdr->cid);
4201 if (len == skb->len) {
4202 /* Complete frame received */
4203 l2cap_recv_frame(conn, skb);
4207 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4209 if (skb->len > len) {
4210 BT_ERR("Frame is too long (len %d, expected len %d)",
4212 l2cap_conn_unreliable(conn, ECOMM);
4216 chan = l2cap_get_chan_by_scid(conn, cid);
4218 if (chan && chan->sk) {
4219 struct sock *sk = chan->sk;
4221 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4222 BT_ERR("Frame exceeding recv MTU (len %d, "
4226 l2cap_conn_unreliable(conn, ECOMM);
4232 /* Allocate skb for the complete frame (with header) */
4233 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4237 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4239 conn->rx_len = len - skb->len;
4241 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4243 if (!conn->rx_len) {
4244 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4245 l2cap_conn_unreliable(conn, ECOMM);
4249 if (skb->len > conn->rx_len) {
4250 BT_ERR("Fragment is too long (len %d, expected %d)",
4251 skb->len, conn->rx_len);
4252 kfree_skb(conn->rx_skb);
4253 conn->rx_skb = NULL;
4255 l2cap_conn_unreliable(conn, ECOMM);
4259 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4261 conn->rx_len -= skb->len;
4263 if (!conn->rx_len) {
4264 /* Complete frame received */
4265 l2cap_recv_frame(conn, conn->rx_skb);
4266 conn->rx_skb = NULL;
4275 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4277 struct l2cap_chan *c;
4279 read_lock_bh(&chan_list_lock);
4281 list_for_each_entry(c, &chan_list, global_l) {
4282 struct sock *sk = c->sk;
4284 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4285 batostr(&bt_sk(sk)->src),
4286 batostr(&bt_sk(sk)->dst),
4287 c->state, __le16_to_cpu(c->psm),
4288 c->scid, c->dcid, c->imtu, c->omtu,
4289 c->sec_level, c->mode);
4292 read_unlock_bh(&chan_list_lock);
4297 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4299 return single_open(file, l2cap_debugfs_show, inode->i_private);
4302 static const struct file_operations l2cap_debugfs_fops = {
4303 .open = l2cap_debugfs_open,
4305 .llseek = seq_lseek,
4306 .release = single_release,
4309 static struct dentry *l2cap_debugfs;
4311 static struct hci_proto l2cap_hci_proto = {
4313 .id = HCI_PROTO_L2CAP,
4314 .connect_ind = l2cap_connect_ind,
4315 .connect_cfm = l2cap_connect_cfm,
4316 .disconn_ind = l2cap_disconn_ind,
4317 .disconn_cfm = l2cap_disconn_cfm,
4318 .security_cfm = l2cap_security_cfm,
4319 .recv_acldata = l2cap_recv_acldata
4322 int __init l2cap_init(void)
4326 err = l2cap_init_sockets();
4330 err = hci_register_proto(&l2cap_hci_proto);
4332 BT_ERR("L2CAP protocol registration failed");
4333 bt_sock_unregister(BTPROTO_L2CAP);
4338 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4339 bt_debugfs, NULL, &l2cap_debugfs_fops);
4341 BT_ERR("Failed to create L2CAP debug file");
4347 l2cap_cleanup_sockets();
4351 void l2cap_exit(void)
4353 debugfs_remove(l2cap_debugfs);
4355 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4356 BT_ERR("L2CAP protocol unregistration failed");
4358 l2cap_cleanup_sockets();
4361 module_param(disable_ertm, bool, 0644);
4362 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4364 module_param(enable_hs, bool, 0644);
4365 MODULE_PARM_DESC(enable_hs, "Enable High Speed");