2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
78 /* ---- L2CAP channels ---- */
80 static inline void chan_hold(struct l2cap_chan *c)
82 atomic_inc(&c->refcnt);
85 static inline void chan_put(struct l2cap_chan *c)
87 if (atomic_dec_and_test(&c->refcnt))
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
95 list_for_each_entry(c, &conn->chan_l, list) {
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
105 struct l2cap_chan *c;
107 list_for_each_entry(c, &conn->chan_l, list) {
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
118 struct l2cap_chan *c;
120 read_lock(&conn->chan_lock);
121 c = __l2cap_get_chan_by_scid(conn, cid);
124 read_unlock(&conn->chan_lock);
128 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
130 struct l2cap_chan *c;
132 list_for_each_entry(c, &conn->chan_l, list) {
133 if (c->ident == ident)
139 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
141 struct l2cap_chan *c;
143 read_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_ident(conn, ident);
147 read_unlock(&conn->chan_lock);
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
153 struct l2cap_chan *c;
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
165 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
169 write_lock_bh(&chan_list_lock);
171 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
184 for (p = 0x1001; p < 0x1100; p += 2)
185 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
186 chan->psm = cpu_to_le16(p);
187 chan->sport = cpu_to_le16(p);
194 write_unlock_bh(&chan_list_lock);
198 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
200 write_lock_bh(&chan_list_lock);
204 write_unlock_bh(&chan_list_lock);
209 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
211 u16 cid = L2CAP_CID_DYN_START;
213 for (; cid < L2CAP_CID_DYN_END; cid++) {
214 if (!__l2cap_get_chan_by_scid(conn, cid))
221 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
223 BT_DBG("chan %p state %d timeout %ld", chan->sk, chan->state, timeout);
225 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
229 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
231 BT_DBG("chan %p state %d", chan, chan->state);
233 if (timer_pending(timer) && del_timer(timer))
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
240 chan->ops->state_change(chan->data, state);
243 static void l2cap_chan_timeout(unsigned long arg)
245 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
246 struct sock *sk = chan->sk;
249 BT_DBG("chan %p state %d", chan, chan->state);
253 if (sock_owned_by_user(sk)) {
254 /* sk is owned by user. Try again later */
255 __set_chan_timer(chan, HZ / 5);
261 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
262 reason = ECONNREFUSED;
263 else if (chan->state == BT_CONNECT &&
264 chan->sec_level != BT_SECURITY_SDP)
265 reason = ECONNREFUSED;
269 l2cap_chan_close(chan, reason);
273 chan->ops->close(chan->data);
277 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
279 struct l2cap_chan *chan;
281 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
287 write_lock_bh(&chan_list_lock);
288 list_add(&chan->global_l, &chan_list);
289 write_unlock_bh(&chan_list_lock);
291 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
293 chan->state = BT_OPEN;
295 atomic_set(&chan->refcnt, 1);
300 void l2cap_chan_destroy(struct l2cap_chan *chan)
302 write_lock_bh(&chan_list_lock);
303 list_del(&chan->global_l);
304 write_unlock_bh(&chan_list_lock);
309 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
311 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
312 chan->psm, chan->dcid);
314 conn->disc_reason = 0x13;
318 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
319 if (conn->hcon->type == LE_LINK) {
321 chan->omtu = L2CAP_LE_DEFAULT_MTU;
322 chan->scid = L2CAP_CID_LE_DATA;
323 chan->dcid = L2CAP_CID_LE_DATA;
325 /* Alloc CID for connection-oriented socket */
326 chan->scid = l2cap_alloc_cid(conn);
327 chan->omtu = L2CAP_DEFAULT_MTU;
329 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
330 /* Connectionless socket */
331 chan->scid = L2CAP_CID_CONN_LESS;
332 chan->dcid = L2CAP_CID_CONN_LESS;
333 chan->omtu = L2CAP_DEFAULT_MTU;
335 /* Raw socket can send/recv signalling messages only */
336 chan->scid = L2CAP_CID_SIGNALING;
337 chan->dcid = L2CAP_CID_SIGNALING;
338 chan->omtu = L2CAP_DEFAULT_MTU;
341 chan->local_id = L2CAP_BESTEFFORT_ID;
342 chan->local_stype = L2CAP_SERV_BESTEFFORT;
343 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
344 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
345 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
346 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
350 list_add(&chan->list, &conn->chan_l);
354 * Must be called on the locked socket. */
355 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
357 struct sock *sk = chan->sk;
358 struct l2cap_conn *conn = chan->conn;
359 struct sock *parent = bt_sk(sk)->parent;
361 __clear_chan_timer(chan);
363 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
366 /* Delete from channel list */
367 write_lock_bh(&conn->chan_lock);
368 list_del(&chan->list);
369 write_unlock_bh(&conn->chan_lock);
373 hci_conn_put(conn->hcon);
376 l2cap_state_change(chan, BT_CLOSED);
377 sock_set_flag(sk, SOCK_ZAPPED);
383 bt_accept_unlink(sk);
384 parent->sk_data_ready(parent, 0);
386 sk->sk_state_change(sk);
388 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
389 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
392 skb_queue_purge(&chan->tx_q);
394 if (chan->mode == L2CAP_MODE_ERTM) {
395 struct srej_list *l, *tmp;
397 __clear_retrans_timer(chan);
398 __clear_monitor_timer(chan);
399 __clear_ack_timer(chan);
401 skb_queue_purge(&chan->srej_q);
403 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
410 static void l2cap_chan_cleanup_listen(struct sock *parent)
414 BT_DBG("parent %p", parent);
416 /* Close not yet accepted channels */
417 while ((sk = bt_accept_dequeue(parent, NULL))) {
418 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
419 __clear_chan_timer(chan);
421 l2cap_chan_close(chan, ECONNRESET);
423 chan->ops->close(chan->data);
427 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
429 struct l2cap_conn *conn = chan->conn;
430 struct sock *sk = chan->sk;
432 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
434 switch (chan->state) {
436 l2cap_chan_cleanup_listen(sk);
438 l2cap_state_change(chan, BT_CLOSED);
439 sock_set_flag(sk, SOCK_ZAPPED);
444 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
445 conn->hcon->type == ACL_LINK) {
446 __clear_chan_timer(chan);
447 __set_chan_timer(chan, sk->sk_sndtimeo);
448 l2cap_send_disconn_req(conn, chan, reason);
450 l2cap_chan_del(chan, reason);
454 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
455 conn->hcon->type == ACL_LINK) {
456 struct l2cap_conn_rsp rsp;
459 if (bt_sk(sk)->defer_setup)
460 result = L2CAP_CR_SEC_BLOCK;
462 result = L2CAP_CR_BAD_PSM;
463 l2cap_state_change(chan, BT_DISCONN);
465 rsp.scid = cpu_to_le16(chan->dcid);
466 rsp.dcid = cpu_to_le16(chan->scid);
467 rsp.result = cpu_to_le16(result);
468 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
469 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
473 l2cap_chan_del(chan, reason);
478 l2cap_chan_del(chan, reason);
482 sock_set_flag(sk, SOCK_ZAPPED);
487 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
489 if (chan->chan_type == L2CAP_CHAN_RAW) {
490 switch (chan->sec_level) {
491 case BT_SECURITY_HIGH:
492 return HCI_AT_DEDICATED_BONDING_MITM;
493 case BT_SECURITY_MEDIUM:
494 return HCI_AT_DEDICATED_BONDING;
496 return HCI_AT_NO_BONDING;
498 } else if (chan->psm == cpu_to_le16(0x0001)) {
499 if (chan->sec_level == BT_SECURITY_LOW)
500 chan->sec_level = BT_SECURITY_SDP;
502 if (chan->sec_level == BT_SECURITY_HIGH)
503 return HCI_AT_NO_BONDING_MITM;
505 return HCI_AT_NO_BONDING;
507 switch (chan->sec_level) {
508 case BT_SECURITY_HIGH:
509 return HCI_AT_GENERAL_BONDING_MITM;
510 case BT_SECURITY_MEDIUM:
511 return HCI_AT_GENERAL_BONDING;
513 return HCI_AT_NO_BONDING;
518 /* Service level security */
519 static inline int l2cap_check_security(struct l2cap_chan *chan)
521 struct l2cap_conn *conn = chan->conn;
524 auth_type = l2cap_get_auth_type(chan);
526 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
529 static u8 l2cap_get_ident(struct l2cap_conn *conn)
533 /* Get next available identificator.
534 * 1 - 128 are used by kernel.
535 * 129 - 199 are reserved.
536 * 200 - 254 are used by utilities like l2ping, etc.
539 spin_lock_bh(&conn->lock);
541 if (++conn->tx_ident > 128)
546 spin_unlock_bh(&conn->lock);
551 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
553 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
556 BT_DBG("code 0x%2.2x", code);
561 if (lmp_no_flush_capable(conn->hcon->hdev))
562 flags = ACL_START_NO_FLUSH;
566 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
568 hci_send_acl(conn->hcon, skb, flags);
571 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
574 struct l2cap_hdr *lh;
575 struct l2cap_conn *conn = chan->conn;
579 if (chan->state != BT_CONNECTED)
582 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
583 hlen = L2CAP_EXT_HDR_SIZE;
585 hlen = L2CAP_ENH_HDR_SIZE;
587 if (chan->fcs == L2CAP_FCS_CRC16)
588 hlen += L2CAP_FCS_SIZE;
590 BT_DBG("chan %p, control 0x%8.8x", chan, control);
592 count = min_t(unsigned int, conn->mtu, hlen);
594 control |= __set_sframe(chan);
596 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
597 control |= __set_ctrl_final(chan);
599 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
600 control |= __set_ctrl_poll(chan);
602 skb = bt_skb_alloc(count, GFP_ATOMIC);
606 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
607 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
608 lh->cid = cpu_to_le16(chan->dcid);
610 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
612 if (chan->fcs == L2CAP_FCS_CRC16) {
613 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
614 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
617 if (lmp_no_flush_capable(conn->hcon->hdev))
618 flags = ACL_START_NO_FLUSH;
622 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
624 hci_send_acl(chan->conn->hcon, skb, flags);
627 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
629 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
630 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
631 set_bit(CONN_RNR_SENT, &chan->conn_state);
633 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
635 control |= __set_reqseq(chan, chan->buffer_seq);
637 l2cap_send_sframe(chan, control);
640 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
642 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
645 static void l2cap_do_start(struct l2cap_chan *chan)
647 struct l2cap_conn *conn = chan->conn;
649 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
650 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
653 if (l2cap_check_security(chan) &&
654 __l2cap_no_conn_pending(chan)) {
655 struct l2cap_conn_req req;
656 req.scid = cpu_to_le16(chan->scid);
659 chan->ident = l2cap_get_ident(conn);
660 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
662 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
666 struct l2cap_info_req req;
667 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
669 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
670 conn->info_ident = l2cap_get_ident(conn);
672 mod_timer(&conn->info_timer, jiffies +
673 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
675 l2cap_send_cmd(conn, conn->info_ident,
676 L2CAP_INFO_REQ, sizeof(req), &req);
680 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
682 u32 local_feat_mask = l2cap_feat_mask;
684 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
687 case L2CAP_MODE_ERTM:
688 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
689 case L2CAP_MODE_STREAMING:
690 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
696 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
699 struct l2cap_disconn_req req;
706 if (chan->mode == L2CAP_MODE_ERTM) {
707 __clear_retrans_timer(chan);
708 __clear_monitor_timer(chan);
709 __clear_ack_timer(chan);
712 req.dcid = cpu_to_le16(chan->dcid);
713 req.scid = cpu_to_le16(chan->scid);
714 l2cap_send_cmd(conn, l2cap_get_ident(conn),
715 L2CAP_DISCONN_REQ, sizeof(req), &req);
717 l2cap_state_change(chan, BT_DISCONN);
721 /* ---- L2CAP connections ---- */
722 static void l2cap_conn_start(struct l2cap_conn *conn)
724 struct l2cap_chan *chan, *tmp;
726 BT_DBG("conn %p", conn);
728 read_lock(&conn->chan_lock);
730 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
731 struct sock *sk = chan->sk;
735 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
740 if (chan->state == BT_CONNECT) {
741 struct l2cap_conn_req req;
743 if (!l2cap_check_security(chan) ||
744 !__l2cap_no_conn_pending(chan)) {
749 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
750 && test_bit(CONF_STATE2_DEVICE,
751 &chan->conf_state)) {
752 /* l2cap_chan_close() calls list_del(chan)
753 * so release the lock */
754 read_unlock(&conn->chan_lock);
755 l2cap_chan_close(chan, ECONNRESET);
756 read_lock(&conn->chan_lock);
761 req.scid = cpu_to_le16(chan->scid);
764 chan->ident = l2cap_get_ident(conn);
765 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
767 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
770 } else if (chan->state == BT_CONNECT2) {
771 struct l2cap_conn_rsp rsp;
773 rsp.scid = cpu_to_le16(chan->dcid);
774 rsp.dcid = cpu_to_le16(chan->scid);
776 if (l2cap_check_security(chan)) {
777 if (bt_sk(sk)->defer_setup) {
778 struct sock *parent = bt_sk(sk)->parent;
779 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
780 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
782 parent->sk_data_ready(parent, 0);
785 l2cap_state_change(chan, BT_CONFIG);
786 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
787 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
790 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
791 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
794 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
797 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
798 rsp.result != L2CAP_CR_SUCCESS) {
803 set_bit(CONF_REQ_SENT, &chan->conf_state);
804 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
805 l2cap_build_conf_req(chan, buf), buf);
806 chan->num_conf_req++;
812 read_unlock(&conn->chan_lock);
815 /* Find socket with cid and source bdaddr.
816 * Returns closest match, locked.
818 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
820 struct l2cap_chan *c, *c1 = NULL;
822 read_lock(&chan_list_lock);
824 list_for_each_entry(c, &chan_list, global_l) {
825 struct sock *sk = c->sk;
827 if (state && c->state != state)
830 if (c->scid == cid) {
832 if (!bacmp(&bt_sk(sk)->src, src)) {
833 read_unlock(&chan_list_lock);
838 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
843 read_unlock(&chan_list_lock);
848 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
850 struct sock *parent, *sk;
851 struct l2cap_chan *chan, *pchan;
855 /* Check if we have socket listening on cid */
856 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
863 bh_lock_sock(parent);
865 /* Check for backlog size */
866 if (sk_acceptq_is_full(parent)) {
867 BT_DBG("backlog full %d", parent->sk_ack_backlog);
871 chan = pchan->ops->new_connection(pchan->data);
877 write_lock_bh(&conn->chan_lock);
879 hci_conn_hold(conn->hcon);
881 bacpy(&bt_sk(sk)->src, conn->src);
882 bacpy(&bt_sk(sk)->dst, conn->dst);
884 bt_accept_enqueue(parent, sk);
886 __l2cap_chan_add(conn, chan);
888 __set_chan_timer(chan, sk->sk_sndtimeo);
890 l2cap_state_change(chan, BT_CONNECTED);
891 parent->sk_data_ready(parent, 0);
893 write_unlock_bh(&conn->chan_lock);
896 bh_unlock_sock(parent);
899 static void l2cap_chan_ready(struct sock *sk)
901 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
902 struct sock *parent = bt_sk(sk)->parent;
904 BT_DBG("sk %p, parent %p", sk, parent);
906 chan->conf_state = 0;
907 __clear_chan_timer(chan);
909 l2cap_state_change(chan, BT_CONNECTED);
910 sk->sk_state_change(sk);
913 parent->sk_data_ready(parent, 0);
916 static void l2cap_conn_ready(struct l2cap_conn *conn)
918 struct l2cap_chan *chan;
920 BT_DBG("conn %p", conn);
922 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
923 l2cap_le_conn_ready(conn);
925 if (conn->hcon->out && conn->hcon->type == LE_LINK)
926 smp_conn_security(conn, conn->hcon->pending_sec_level);
928 read_lock(&conn->chan_lock);
930 list_for_each_entry(chan, &conn->chan_l, list) {
931 struct sock *sk = chan->sk;
935 if (conn->hcon->type == LE_LINK) {
936 if (smp_conn_security(conn, chan->sec_level))
937 l2cap_chan_ready(sk);
939 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
940 __clear_chan_timer(chan);
941 l2cap_state_change(chan, BT_CONNECTED);
942 sk->sk_state_change(sk);
944 } else if (chan->state == BT_CONNECT)
945 l2cap_do_start(chan);
950 read_unlock(&conn->chan_lock);
953 /* Notify sockets that we cannot guaranty reliability anymore */
954 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
956 struct l2cap_chan *chan;
958 BT_DBG("conn %p", conn);
960 read_lock(&conn->chan_lock);
962 list_for_each_entry(chan, &conn->chan_l, list) {
963 struct sock *sk = chan->sk;
965 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
969 read_unlock(&conn->chan_lock);
972 static void l2cap_info_timeout(unsigned long arg)
974 struct l2cap_conn *conn = (void *) arg;
976 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
977 conn->info_ident = 0;
979 l2cap_conn_start(conn);
982 static void l2cap_conn_del(struct hci_conn *hcon, int err)
984 struct l2cap_conn *conn = hcon->l2cap_data;
985 struct l2cap_chan *chan, *l;
991 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
993 kfree_skb(conn->rx_skb);
996 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
999 l2cap_chan_del(chan, err);
1001 chan->ops->close(chan->data);
1004 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1005 del_timer_sync(&conn->info_timer);
1007 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1008 del_timer(&conn->security_timer);
1009 smp_chan_destroy(conn);
1012 hcon->l2cap_data = NULL;
1016 static void security_timeout(unsigned long arg)
1018 struct l2cap_conn *conn = (void *) arg;
1020 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1023 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1025 struct l2cap_conn *conn = hcon->l2cap_data;
1030 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1034 hcon->l2cap_data = conn;
1037 BT_DBG("hcon %p conn %p", hcon, conn);
1039 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1040 conn->mtu = hcon->hdev->le_mtu;
1042 conn->mtu = hcon->hdev->acl_mtu;
1044 conn->src = &hcon->hdev->bdaddr;
1045 conn->dst = &hcon->dst;
1047 conn->feat_mask = 0;
1049 spin_lock_init(&conn->lock);
1050 rwlock_init(&conn->chan_lock);
1052 INIT_LIST_HEAD(&conn->chan_l);
1054 if (hcon->type == LE_LINK)
1055 setup_timer(&conn->security_timer, security_timeout,
1056 (unsigned long) conn);
1058 setup_timer(&conn->info_timer, l2cap_info_timeout,
1059 (unsigned long) conn);
1061 conn->disc_reason = 0x13;
1066 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1068 write_lock_bh(&conn->chan_lock);
1069 __l2cap_chan_add(conn, chan);
1070 write_unlock_bh(&conn->chan_lock);
1073 /* ---- Socket interface ---- */
1075 /* Find socket with psm and source bdaddr.
1076 * Returns closest match.
1078 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1080 struct l2cap_chan *c, *c1 = NULL;
1082 read_lock(&chan_list_lock);
1084 list_for_each_entry(c, &chan_list, global_l) {
1085 struct sock *sk = c->sk;
1087 if (state && c->state != state)
1090 if (c->psm == psm) {
1092 if (!bacmp(&bt_sk(sk)->src, src)) {
1093 read_unlock(&chan_list_lock);
1098 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1103 read_unlock(&chan_list_lock);
1108 int l2cap_chan_connect(struct l2cap_chan *chan)
1110 struct sock *sk = chan->sk;
1111 bdaddr_t *src = &bt_sk(sk)->src;
1112 bdaddr_t *dst = &bt_sk(sk)->dst;
1113 struct l2cap_conn *conn;
1114 struct hci_conn *hcon;
1115 struct hci_dev *hdev;
1119 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1122 hdev = hci_get_route(dst, src);
1124 return -EHOSTUNREACH;
1126 hci_dev_lock_bh(hdev);
1128 auth_type = l2cap_get_auth_type(chan);
1130 if (chan->dcid == L2CAP_CID_LE_DATA)
1131 hcon = hci_connect(hdev, LE_LINK, dst,
1132 chan->sec_level, auth_type);
1134 hcon = hci_connect(hdev, ACL_LINK, dst,
1135 chan->sec_level, auth_type);
1138 err = PTR_ERR(hcon);
1142 conn = l2cap_conn_add(hcon, 0);
1149 /* Update source addr of the socket */
1150 bacpy(src, conn->src);
1152 l2cap_chan_add(conn, chan);
1154 l2cap_state_change(chan, BT_CONNECT);
1155 __set_chan_timer(chan, sk->sk_sndtimeo);
1157 if (hcon->state == BT_CONNECTED) {
1158 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1159 __clear_chan_timer(chan);
1160 if (l2cap_check_security(chan))
1161 l2cap_state_change(chan, BT_CONNECTED);
1163 l2cap_do_start(chan);
1169 hci_dev_unlock_bh(hdev);
1174 int __l2cap_wait_ack(struct sock *sk)
1176 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1177 DECLARE_WAITQUEUE(wait, current);
1181 add_wait_queue(sk_sleep(sk), &wait);
1182 set_current_state(TASK_INTERRUPTIBLE);
1183 while (chan->unacked_frames > 0 && chan->conn) {
1187 if (signal_pending(current)) {
1188 err = sock_intr_errno(timeo);
1193 timeo = schedule_timeout(timeo);
1195 set_current_state(TASK_INTERRUPTIBLE);
1197 err = sock_error(sk);
1201 set_current_state(TASK_RUNNING);
1202 remove_wait_queue(sk_sleep(sk), &wait);
1206 static void l2cap_monitor_timeout(unsigned long arg)
1208 struct l2cap_chan *chan = (void *) arg;
1209 struct sock *sk = chan->sk;
1211 BT_DBG("chan %p", chan);
1214 if (chan->retry_count >= chan->remote_max_tx) {
1215 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1220 chan->retry_count++;
1221 __set_monitor_timer(chan);
1223 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1227 static void l2cap_retrans_timeout(unsigned long arg)
1229 struct l2cap_chan *chan = (void *) arg;
1230 struct sock *sk = chan->sk;
1232 BT_DBG("chan %p", chan);
1235 chan->retry_count = 1;
1236 __set_monitor_timer(chan);
1238 set_bit(CONN_WAIT_F, &chan->conn_state);
1240 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1244 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1246 struct sk_buff *skb;
1248 while ((skb = skb_peek(&chan->tx_q)) &&
1249 chan->unacked_frames) {
1250 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1253 skb = skb_dequeue(&chan->tx_q);
1256 chan->unacked_frames--;
1259 if (!chan->unacked_frames)
1260 __clear_retrans_timer(chan);
1263 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
1265 struct hci_conn *hcon = chan->conn->hcon;
1268 BT_DBG("chan %p, skb %p len %d", chan, skb, skb->len);
1270 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
1271 lmp_no_flush_capable(hcon->hdev))
1272 flags = ACL_START_NO_FLUSH;
1276 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
1277 hci_send_acl(hcon, skb, flags);
1280 static void l2cap_streaming_send(struct l2cap_chan *chan)
1282 struct sk_buff *skb;
1286 while ((skb = skb_dequeue(&chan->tx_q))) {
1287 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1288 control |= __set_txseq(chan, chan->next_tx_seq);
1289 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1291 if (chan->fcs == L2CAP_FCS_CRC16) {
1292 fcs = crc16(0, (u8 *)skb->data,
1293 skb->len - L2CAP_FCS_SIZE);
1294 put_unaligned_le16(fcs,
1295 skb->data + skb->len - L2CAP_FCS_SIZE);
1298 l2cap_do_send(chan, skb);
1300 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1304 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1306 struct sk_buff *skb, *tx_skb;
1310 skb = skb_peek(&chan->tx_q);
1315 if (bt_cb(skb)->tx_seq == tx_seq)
1318 if (skb_queue_is_last(&chan->tx_q, skb))
1321 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1323 if (chan->remote_max_tx &&
1324 bt_cb(skb)->retries == chan->remote_max_tx) {
1325 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1329 tx_skb = skb_clone(skb, GFP_ATOMIC);
1330 bt_cb(skb)->retries++;
1332 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1333 control &= __get_sar_mask(chan);
1335 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1336 control |= __set_ctrl_final(chan);
1338 control |= __set_reqseq(chan, chan->buffer_seq);
1339 control |= __set_txseq(chan, tx_seq);
1341 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1343 if (chan->fcs == L2CAP_FCS_CRC16) {
1344 fcs = crc16(0, (u8 *)tx_skb->data,
1345 tx_skb->len - L2CAP_FCS_SIZE);
1346 put_unaligned_le16(fcs,
1347 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1350 l2cap_do_send(chan, tx_skb);
1353 static int l2cap_ertm_send(struct l2cap_chan *chan)
1355 struct sk_buff *skb, *tx_skb;
1360 if (chan->state != BT_CONNECTED)
1363 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1365 if (chan->remote_max_tx &&
1366 bt_cb(skb)->retries == chan->remote_max_tx) {
1367 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1371 tx_skb = skb_clone(skb, GFP_ATOMIC);
1373 bt_cb(skb)->retries++;
1375 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1376 control &= __get_sar_mask(chan);
1378 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1379 control |= __set_ctrl_final(chan);
1381 control |= __set_reqseq(chan, chan->buffer_seq);
1382 control |= __set_txseq(chan, chan->next_tx_seq);
1384 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1386 if (chan->fcs == L2CAP_FCS_CRC16) {
1387 fcs = crc16(0, (u8 *)skb->data,
1388 tx_skb->len - L2CAP_FCS_SIZE);
1389 put_unaligned_le16(fcs, skb->data +
1390 tx_skb->len - L2CAP_FCS_SIZE);
1393 l2cap_do_send(chan, tx_skb);
1395 __set_retrans_timer(chan);
1397 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1399 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1401 if (bt_cb(skb)->retries == 1)
1402 chan->unacked_frames++;
1404 chan->frames_sent++;
1406 if (skb_queue_is_last(&chan->tx_q, skb))
1407 chan->tx_send_head = NULL;
1409 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1417 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1421 if (!skb_queue_empty(&chan->tx_q))
1422 chan->tx_send_head = chan->tx_q.next;
1424 chan->next_tx_seq = chan->expected_ack_seq;
1425 ret = l2cap_ertm_send(chan);
1429 static void l2cap_send_ack(struct l2cap_chan *chan)
1433 control |= __set_reqseq(chan, chan->buffer_seq);
1435 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1436 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1437 set_bit(CONN_RNR_SENT, &chan->conn_state);
1438 l2cap_send_sframe(chan, control);
1442 if (l2cap_ertm_send(chan) > 0)
1445 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1446 l2cap_send_sframe(chan, control);
1449 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1451 struct srej_list *tail;
1454 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1455 control |= __set_ctrl_final(chan);
1457 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1458 control |= __set_reqseq(chan, tail->tx_seq);
1460 l2cap_send_sframe(chan, control);
1463 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1465 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1466 struct sk_buff **frag;
1469 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1475 /* Continuation fragments (no L2CAP header) */
1476 frag = &skb_shinfo(skb)->frag_list;
1478 count = min_t(unsigned int, conn->mtu, len);
1480 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1483 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1489 frag = &(*frag)->next;
1495 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1497 struct sock *sk = chan->sk;
1498 struct l2cap_conn *conn = chan->conn;
1499 struct sk_buff *skb;
1500 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1501 struct l2cap_hdr *lh;
1503 BT_DBG("sk %p len %d", sk, (int)len);
1505 count = min_t(unsigned int, (conn->mtu - hlen), len);
1506 skb = bt_skb_send_alloc(sk, count + hlen,
1507 msg->msg_flags & MSG_DONTWAIT, &err);
1509 return ERR_PTR(err);
1511 /* Create L2CAP header */
1512 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1513 lh->cid = cpu_to_le16(chan->dcid);
1514 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1515 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1517 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1518 if (unlikely(err < 0)) {
1520 return ERR_PTR(err);
1525 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1527 struct sock *sk = chan->sk;
1528 struct l2cap_conn *conn = chan->conn;
1529 struct sk_buff *skb;
1530 int err, count, hlen = L2CAP_HDR_SIZE;
1531 struct l2cap_hdr *lh;
1533 BT_DBG("sk %p len %d", sk, (int)len);
1535 count = min_t(unsigned int, (conn->mtu - hlen), len);
1536 skb = bt_skb_send_alloc(sk, count + hlen,
1537 msg->msg_flags & MSG_DONTWAIT, &err);
1539 return ERR_PTR(err);
1541 /* Create L2CAP header */
1542 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1543 lh->cid = cpu_to_le16(chan->dcid);
1544 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1546 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1547 if (unlikely(err < 0)) {
1549 return ERR_PTR(err);
1554 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1555 struct msghdr *msg, size_t len,
1556 u32 control, u16 sdulen)
1558 struct sock *sk = chan->sk;
1559 struct l2cap_conn *conn = chan->conn;
1560 struct sk_buff *skb;
1561 int err, count, hlen;
1562 struct l2cap_hdr *lh;
1564 BT_DBG("sk %p len %d", sk, (int)len);
1567 return ERR_PTR(-ENOTCONN);
1569 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1570 hlen = L2CAP_EXT_HDR_SIZE;
1572 hlen = L2CAP_ENH_HDR_SIZE;
1575 hlen += L2CAP_SDULEN_SIZE;
1577 if (chan->fcs == L2CAP_FCS_CRC16)
1578 hlen += L2CAP_FCS_SIZE;
1580 count = min_t(unsigned int, (conn->mtu - hlen), len);
1581 skb = bt_skb_send_alloc(sk, count + hlen,
1582 msg->msg_flags & MSG_DONTWAIT, &err);
1584 return ERR_PTR(err);
1586 /* Create L2CAP header */
1587 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1588 lh->cid = cpu_to_le16(chan->dcid);
1589 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1591 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1594 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1596 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1597 if (unlikely(err < 0)) {
1599 return ERR_PTR(err);
1602 if (chan->fcs == L2CAP_FCS_CRC16)
1603 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1605 bt_cb(skb)->retries = 0;
1609 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1611 struct sk_buff *skb;
1612 struct sk_buff_head sar_queue;
1616 skb_queue_head_init(&sar_queue);
1617 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1618 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1620 return PTR_ERR(skb);
1622 __skb_queue_tail(&sar_queue, skb);
1623 len -= chan->remote_mps;
1624 size += chan->remote_mps;
1629 if (len > chan->remote_mps) {
1630 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1631 buflen = chan->remote_mps;
1633 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1637 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1639 skb_queue_purge(&sar_queue);
1640 return PTR_ERR(skb);
1643 __skb_queue_tail(&sar_queue, skb);
1647 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1648 if (chan->tx_send_head == NULL)
1649 chan->tx_send_head = sar_queue.next;
1654 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1656 struct sk_buff *skb;
1660 /* Connectionless channel */
1661 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1662 skb = l2cap_create_connless_pdu(chan, msg, len);
1664 return PTR_ERR(skb);
1666 l2cap_do_send(chan, skb);
1670 switch (chan->mode) {
1671 case L2CAP_MODE_BASIC:
1672 /* Check outgoing MTU */
1673 if (len > chan->omtu)
1676 /* Create a basic PDU */
1677 skb = l2cap_create_basic_pdu(chan, msg, len);
1679 return PTR_ERR(skb);
1681 l2cap_do_send(chan, skb);
1685 case L2CAP_MODE_ERTM:
1686 case L2CAP_MODE_STREAMING:
1687 /* Entire SDU fits into one PDU */
1688 if (len <= chan->remote_mps) {
1689 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1690 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1693 return PTR_ERR(skb);
1695 __skb_queue_tail(&chan->tx_q, skb);
1697 if (chan->tx_send_head == NULL)
1698 chan->tx_send_head = skb;
1701 /* Segment SDU into multiples PDUs */
1702 err = l2cap_sar_segment_sdu(chan, msg, len);
1707 if (chan->mode == L2CAP_MODE_STREAMING) {
1708 l2cap_streaming_send(chan);
1713 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1714 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1719 err = l2cap_ertm_send(chan);
1726 BT_DBG("bad state %1.1x", chan->mode);
1733 /* Copy frame to all raw sockets on that connection */
1734 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1736 struct sk_buff *nskb;
1737 struct l2cap_chan *chan;
1739 BT_DBG("conn %p", conn);
1741 read_lock(&conn->chan_lock);
1742 list_for_each_entry(chan, &conn->chan_l, list) {
1743 struct sock *sk = chan->sk;
1744 if (chan->chan_type != L2CAP_CHAN_RAW)
1747 /* Don't send frame to the socket it came from */
1750 nskb = skb_clone(skb, GFP_ATOMIC);
1754 if (chan->ops->recv(chan->data, nskb))
1757 read_unlock(&conn->chan_lock);
1760 /* ---- L2CAP signalling commands ---- */
1761 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1762 u8 code, u8 ident, u16 dlen, void *data)
1764 struct sk_buff *skb, **frag;
1765 struct l2cap_cmd_hdr *cmd;
1766 struct l2cap_hdr *lh;
1769 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1770 conn, code, ident, dlen);
1772 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1773 count = min_t(unsigned int, conn->mtu, len);
1775 skb = bt_skb_alloc(count, GFP_ATOMIC);
1779 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1780 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1782 if (conn->hcon->type == LE_LINK)
1783 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1785 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1787 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1790 cmd->len = cpu_to_le16(dlen);
1793 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1794 memcpy(skb_put(skb, count), data, count);
1800 /* Continuation fragments (no L2CAP header) */
1801 frag = &skb_shinfo(skb)->frag_list;
1803 count = min_t(unsigned int, conn->mtu, len);
1805 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1809 memcpy(skb_put(*frag, count), data, count);
1814 frag = &(*frag)->next;
1824 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1826 struct l2cap_conf_opt *opt = *ptr;
1829 len = L2CAP_CONF_OPT_SIZE + opt->len;
1837 *val = *((u8 *) opt->val);
1841 *val = get_unaligned_le16(opt->val);
1845 *val = get_unaligned_le32(opt->val);
1849 *val = (unsigned long) opt->val;
1853 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1857 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1859 struct l2cap_conf_opt *opt = *ptr;
1861 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1868 *((u8 *) opt->val) = val;
1872 put_unaligned_le16(val, opt->val);
1876 put_unaligned_le32(val, opt->val);
1880 memcpy(opt->val, (void *) val, len);
1884 *ptr += L2CAP_CONF_OPT_SIZE + len;
1887 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1889 struct l2cap_conf_efs efs;
1891 switch(chan->mode) {
1892 case L2CAP_MODE_ERTM:
1893 efs.id = chan->local_id;
1894 efs.stype = chan->local_stype;
1895 efs.msdu = cpu_to_le16(chan->local_msdu);
1896 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1897 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1898 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1901 case L2CAP_MODE_STREAMING:
1903 efs.stype = L2CAP_SERV_BESTEFFORT;
1904 efs.msdu = cpu_to_le16(chan->local_msdu);
1905 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1914 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1915 (unsigned long) &efs);
1918 static void l2cap_ack_timeout(unsigned long arg)
1920 struct l2cap_chan *chan = (void *) arg;
1922 bh_lock_sock(chan->sk);
1923 l2cap_send_ack(chan);
1924 bh_unlock_sock(chan->sk);
1927 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1929 struct sock *sk = chan->sk;
1931 chan->expected_ack_seq = 0;
1932 chan->unacked_frames = 0;
1933 chan->buffer_seq = 0;
1934 chan->num_acked = 0;
1935 chan->frames_sent = 0;
1937 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1938 (unsigned long) chan);
1939 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1940 (unsigned long) chan);
1941 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1943 skb_queue_head_init(&chan->srej_q);
1945 INIT_LIST_HEAD(&chan->srej_l);
1948 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1951 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1954 case L2CAP_MODE_STREAMING:
1955 case L2CAP_MODE_ERTM:
1956 if (l2cap_mode_supported(mode, remote_feat_mask))
1960 return L2CAP_MODE_BASIC;
1964 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1966 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1969 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
1971 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
1974 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
1976 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
1977 __l2cap_ews_supported(chan)) {
1978 /* use extended control field */
1979 set_bit(FLAG_EXT_CTRL, &chan->flags);
1980 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
1982 chan->tx_win = min_t(u16, chan->tx_win,
1983 L2CAP_DEFAULT_TX_WINDOW);
1984 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
1988 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
1990 struct l2cap_conf_req *req = data;
1991 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
1992 void *ptr = req->data;
1995 BT_DBG("chan %p", chan);
1997 if (chan->num_conf_req || chan->num_conf_rsp)
2000 switch (chan->mode) {
2001 case L2CAP_MODE_STREAMING:
2002 case L2CAP_MODE_ERTM:
2003 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2006 if (__l2cap_efs_supported(chan))
2007 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2011 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2016 if (chan->imtu != L2CAP_DEFAULT_MTU)
2017 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2019 switch (chan->mode) {
2020 case L2CAP_MODE_BASIC:
2021 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2022 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2025 rfc.mode = L2CAP_MODE_BASIC;
2027 rfc.max_transmit = 0;
2028 rfc.retrans_timeout = 0;
2029 rfc.monitor_timeout = 0;
2030 rfc.max_pdu_size = 0;
2032 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2033 (unsigned long) &rfc);
2036 case L2CAP_MODE_ERTM:
2037 rfc.mode = L2CAP_MODE_ERTM;
2038 rfc.max_transmit = chan->max_tx;
2039 rfc.retrans_timeout = 0;
2040 rfc.monitor_timeout = 0;
2042 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2043 L2CAP_EXT_HDR_SIZE -
2046 rfc.max_pdu_size = cpu_to_le16(size);
2048 l2cap_txwin_setup(chan);
2050 rfc.txwin_size = min_t(u16, chan->tx_win,
2051 L2CAP_DEFAULT_TX_WINDOW);
2053 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2054 (unsigned long) &rfc);
2056 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2057 l2cap_add_opt_efs(&ptr, chan);
2059 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2062 if (chan->fcs == L2CAP_FCS_NONE ||
2063 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2064 chan->fcs = L2CAP_FCS_NONE;
2065 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2068 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2069 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2073 case L2CAP_MODE_STREAMING:
2074 rfc.mode = L2CAP_MODE_STREAMING;
2076 rfc.max_transmit = 0;
2077 rfc.retrans_timeout = 0;
2078 rfc.monitor_timeout = 0;
2080 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2081 L2CAP_EXT_HDR_SIZE -
2084 rfc.max_pdu_size = cpu_to_le16(size);
2086 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2087 (unsigned long) &rfc);
2089 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2090 l2cap_add_opt_efs(&ptr, chan);
2092 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2095 if (chan->fcs == L2CAP_FCS_NONE ||
2096 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2097 chan->fcs = L2CAP_FCS_NONE;
2098 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2103 req->dcid = cpu_to_le16(chan->dcid);
2104 req->flags = cpu_to_le16(0);
2109 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2111 struct l2cap_conf_rsp *rsp = data;
2112 void *ptr = rsp->data;
2113 void *req = chan->conf_req;
2114 int len = chan->conf_len;
2115 int type, hint, olen;
2117 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2118 struct l2cap_conf_efs efs;
2120 u16 mtu = L2CAP_DEFAULT_MTU;
2121 u16 result = L2CAP_CONF_SUCCESS;
2124 BT_DBG("chan %p", chan);
2126 while (len >= L2CAP_CONF_OPT_SIZE) {
2127 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2129 hint = type & L2CAP_CONF_HINT;
2130 type &= L2CAP_CONF_MASK;
2133 case L2CAP_CONF_MTU:
2137 case L2CAP_CONF_FLUSH_TO:
2138 chan->flush_to = val;
2141 case L2CAP_CONF_QOS:
2144 case L2CAP_CONF_RFC:
2145 if (olen == sizeof(rfc))
2146 memcpy(&rfc, (void *) val, olen);
2149 case L2CAP_CONF_FCS:
2150 if (val == L2CAP_FCS_NONE)
2151 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2154 case L2CAP_CONF_EFS:
2156 if (olen == sizeof(efs))
2157 memcpy(&efs, (void *) val, olen);
2160 case L2CAP_CONF_EWS:
2162 return -ECONNREFUSED;
2164 set_bit(FLAG_EXT_CTRL, &chan->flags);
2165 set_bit(CONF_EWS_RECV, &chan->conf_state);
2166 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2167 chan->remote_tx_win = val;
2174 result = L2CAP_CONF_UNKNOWN;
2175 *((u8 *) ptr++) = type;
2180 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2183 switch (chan->mode) {
2184 case L2CAP_MODE_STREAMING:
2185 case L2CAP_MODE_ERTM:
2186 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2187 chan->mode = l2cap_select_mode(rfc.mode,
2188 chan->conn->feat_mask);
2193 if (__l2cap_efs_supported(chan))
2194 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2196 return -ECONNREFUSED;
2199 if (chan->mode != rfc.mode)
2200 return -ECONNREFUSED;
2206 if (chan->mode != rfc.mode) {
2207 result = L2CAP_CONF_UNACCEPT;
2208 rfc.mode = chan->mode;
2210 if (chan->num_conf_rsp == 1)
2211 return -ECONNREFUSED;
2213 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2214 sizeof(rfc), (unsigned long) &rfc);
2217 if (result == L2CAP_CONF_SUCCESS) {
2218 /* Configure output options and let the other side know
2219 * which ones we don't like. */
2221 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2222 result = L2CAP_CONF_UNACCEPT;
2225 set_bit(CONF_MTU_DONE, &chan->conf_state);
2227 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2230 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2231 efs.stype != L2CAP_SERV_NOTRAFIC &&
2232 efs.stype != chan->local_stype) {
2234 result = L2CAP_CONF_UNACCEPT;
2236 if (chan->num_conf_req >= 1)
2237 return -ECONNREFUSED;
2239 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2241 (unsigned long) &efs);
2246 case L2CAP_MODE_BASIC:
2247 chan->fcs = L2CAP_FCS_NONE;
2248 set_bit(CONF_MODE_DONE, &chan->conf_state);
2251 case L2CAP_MODE_ERTM:
2252 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2253 chan->remote_tx_win = rfc.txwin_size;
2255 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2257 chan->remote_max_tx = rfc.max_transmit;
2259 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2261 L2CAP_EXT_HDR_SIZE -
2264 rfc.max_pdu_size = cpu_to_le16(size);
2265 chan->remote_mps = size;
2267 rfc.retrans_timeout =
2268 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2269 rfc.monitor_timeout =
2270 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2272 set_bit(CONF_MODE_DONE, &chan->conf_state);
2274 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2275 sizeof(rfc), (unsigned long) &rfc);
2277 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2278 chan->remote_id = efs.id;
2279 chan->remote_stype = efs.stype;
2280 chan->remote_msdu = le16_to_cpu(efs.msdu);
2281 chan->remote_flush_to =
2282 le32_to_cpu(efs.flush_to);
2283 chan->remote_acc_lat =
2284 le32_to_cpu(efs.acc_lat);
2285 chan->remote_sdu_itime =
2286 le32_to_cpu(efs.sdu_itime);
2287 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2288 sizeof(efs), (unsigned long) &efs);
2292 case L2CAP_MODE_STREAMING:
2293 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2295 L2CAP_EXT_HDR_SIZE -
2298 rfc.max_pdu_size = cpu_to_le16(size);
2299 chan->remote_mps = size;
2301 set_bit(CONF_MODE_DONE, &chan->conf_state);
2303 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2304 sizeof(rfc), (unsigned long) &rfc);
2309 result = L2CAP_CONF_UNACCEPT;
2311 memset(&rfc, 0, sizeof(rfc));
2312 rfc.mode = chan->mode;
2315 if (result == L2CAP_CONF_SUCCESS)
2316 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2318 rsp->scid = cpu_to_le16(chan->dcid);
2319 rsp->result = cpu_to_le16(result);
2320 rsp->flags = cpu_to_le16(0x0000);
2325 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2327 struct l2cap_conf_req *req = data;
2328 void *ptr = req->data;
2331 struct l2cap_conf_rfc rfc;
2333 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2335 while (len >= L2CAP_CONF_OPT_SIZE) {
2336 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2339 case L2CAP_CONF_MTU:
2340 if (val < L2CAP_DEFAULT_MIN_MTU) {
2341 *result = L2CAP_CONF_UNACCEPT;
2342 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2345 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2348 case L2CAP_CONF_FLUSH_TO:
2349 chan->flush_to = val;
2350 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2354 case L2CAP_CONF_RFC:
2355 if (olen == sizeof(rfc))
2356 memcpy(&rfc, (void *)val, olen);
2358 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2359 rfc.mode != chan->mode)
2360 return -ECONNREFUSED;
2364 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2365 sizeof(rfc), (unsigned long) &rfc);
2368 case L2CAP_CONF_EWS:
2369 chan->tx_win = min_t(u16, val,
2370 L2CAP_DEFAULT_EXT_WINDOW);
2371 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS,
2377 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2378 return -ECONNREFUSED;
2380 chan->mode = rfc.mode;
2382 if (*result == L2CAP_CONF_SUCCESS) {
2384 case L2CAP_MODE_ERTM:
2385 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2386 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2387 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2389 case L2CAP_MODE_STREAMING:
2390 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2394 req->dcid = cpu_to_le16(chan->dcid);
2395 req->flags = cpu_to_le16(0x0000);
2400 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2402 struct l2cap_conf_rsp *rsp = data;
2403 void *ptr = rsp->data;
2405 BT_DBG("chan %p", chan);
2407 rsp->scid = cpu_to_le16(chan->dcid);
2408 rsp->result = cpu_to_le16(result);
2409 rsp->flags = cpu_to_le16(flags);
2414 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2416 struct l2cap_conn_rsp rsp;
2417 struct l2cap_conn *conn = chan->conn;
2420 rsp.scid = cpu_to_le16(chan->dcid);
2421 rsp.dcid = cpu_to_le16(chan->scid);
2422 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2423 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2424 l2cap_send_cmd(conn, chan->ident,
2425 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2427 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2430 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2431 l2cap_build_conf_req(chan, buf), buf);
2432 chan->num_conf_req++;
2435 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2439 struct l2cap_conf_rfc rfc;
2441 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2443 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2446 while (len >= L2CAP_CONF_OPT_SIZE) {
2447 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2450 case L2CAP_CONF_RFC:
2451 if (olen == sizeof(rfc))
2452 memcpy(&rfc, (void *)val, olen);
2459 case L2CAP_MODE_ERTM:
2460 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2461 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2462 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2464 case L2CAP_MODE_STREAMING:
2465 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2469 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2471 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2473 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2476 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2477 cmd->ident == conn->info_ident) {
2478 del_timer(&conn->info_timer);
2480 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2481 conn->info_ident = 0;
2483 l2cap_conn_start(conn);
2489 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2491 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2492 struct l2cap_conn_rsp rsp;
2493 struct l2cap_chan *chan = NULL, *pchan;
2494 struct sock *parent, *sk = NULL;
2495 int result, status = L2CAP_CS_NO_INFO;
2497 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2498 __le16 psm = req->psm;
2500 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2502 /* Check if we have socket listening on psm */
2503 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2505 result = L2CAP_CR_BAD_PSM;
2511 bh_lock_sock(parent);
2513 /* Check if the ACL is secure enough (if not SDP) */
2514 if (psm != cpu_to_le16(0x0001) &&
2515 !hci_conn_check_link_mode(conn->hcon)) {
2516 conn->disc_reason = 0x05;
2517 result = L2CAP_CR_SEC_BLOCK;
2521 result = L2CAP_CR_NO_MEM;
2523 /* Check for backlog size */
2524 if (sk_acceptq_is_full(parent)) {
2525 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2529 chan = pchan->ops->new_connection(pchan->data);
2535 write_lock_bh(&conn->chan_lock);
2537 /* Check if we already have channel with that dcid */
2538 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2539 write_unlock_bh(&conn->chan_lock);
2540 sock_set_flag(sk, SOCK_ZAPPED);
2541 chan->ops->close(chan->data);
2545 hci_conn_hold(conn->hcon);
2547 bacpy(&bt_sk(sk)->src, conn->src);
2548 bacpy(&bt_sk(sk)->dst, conn->dst);
2552 bt_accept_enqueue(parent, sk);
2554 __l2cap_chan_add(conn, chan);
2558 __set_chan_timer(chan, sk->sk_sndtimeo);
2560 chan->ident = cmd->ident;
2562 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2563 if (l2cap_check_security(chan)) {
2564 if (bt_sk(sk)->defer_setup) {
2565 l2cap_state_change(chan, BT_CONNECT2);
2566 result = L2CAP_CR_PEND;
2567 status = L2CAP_CS_AUTHOR_PEND;
2568 parent->sk_data_ready(parent, 0);
2570 l2cap_state_change(chan, BT_CONFIG);
2571 result = L2CAP_CR_SUCCESS;
2572 status = L2CAP_CS_NO_INFO;
2575 l2cap_state_change(chan, BT_CONNECT2);
2576 result = L2CAP_CR_PEND;
2577 status = L2CAP_CS_AUTHEN_PEND;
2580 l2cap_state_change(chan, BT_CONNECT2);
2581 result = L2CAP_CR_PEND;
2582 status = L2CAP_CS_NO_INFO;
2585 write_unlock_bh(&conn->chan_lock);
2588 bh_unlock_sock(parent);
2591 rsp.scid = cpu_to_le16(scid);
2592 rsp.dcid = cpu_to_le16(dcid);
2593 rsp.result = cpu_to_le16(result);
2594 rsp.status = cpu_to_le16(status);
2595 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2597 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2598 struct l2cap_info_req info;
2599 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2601 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2602 conn->info_ident = l2cap_get_ident(conn);
2604 mod_timer(&conn->info_timer, jiffies +
2605 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2607 l2cap_send_cmd(conn, conn->info_ident,
2608 L2CAP_INFO_REQ, sizeof(info), &info);
2611 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2612 result == L2CAP_CR_SUCCESS) {
2614 set_bit(CONF_REQ_SENT, &chan->conf_state);
2615 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2616 l2cap_build_conf_req(chan, buf), buf);
2617 chan->num_conf_req++;
2623 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2625 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2626 u16 scid, dcid, result, status;
2627 struct l2cap_chan *chan;
2631 scid = __le16_to_cpu(rsp->scid);
2632 dcid = __le16_to_cpu(rsp->dcid);
2633 result = __le16_to_cpu(rsp->result);
2634 status = __le16_to_cpu(rsp->status);
2636 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2639 chan = l2cap_get_chan_by_scid(conn, scid);
2643 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2651 case L2CAP_CR_SUCCESS:
2652 l2cap_state_change(chan, BT_CONFIG);
2655 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2657 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2660 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2661 l2cap_build_conf_req(chan, req), req);
2662 chan->num_conf_req++;
2666 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2670 /* don't delete l2cap channel if sk is owned by user */
2671 if (sock_owned_by_user(sk)) {
2672 l2cap_state_change(chan, BT_DISCONN);
2673 __clear_chan_timer(chan);
2674 __set_chan_timer(chan, HZ / 5);
2678 l2cap_chan_del(chan, ECONNREFUSED);
2686 static inline void set_default_fcs(struct l2cap_chan *chan)
2688 /* FCS is enabled only in ERTM or streaming mode, if one or both
2691 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2692 chan->fcs = L2CAP_FCS_NONE;
2693 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2694 chan->fcs = L2CAP_FCS_CRC16;
2697 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2699 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2702 struct l2cap_chan *chan;
2706 dcid = __le16_to_cpu(req->dcid);
2707 flags = __le16_to_cpu(req->flags);
2709 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2711 chan = l2cap_get_chan_by_scid(conn, dcid);
2717 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2718 struct l2cap_cmd_rej_cid rej;
2720 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2721 rej.scid = cpu_to_le16(chan->scid);
2722 rej.dcid = cpu_to_le16(chan->dcid);
2724 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2729 /* Reject if config buffer is too small. */
2730 len = cmd_len - sizeof(*req);
2731 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2732 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2733 l2cap_build_conf_rsp(chan, rsp,
2734 L2CAP_CONF_REJECT, flags), rsp);
2739 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2740 chan->conf_len += len;
2742 if (flags & 0x0001) {
2743 /* Incomplete config. Send empty response. */
2744 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2745 l2cap_build_conf_rsp(chan, rsp,
2746 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2750 /* Complete config. */
2751 len = l2cap_parse_conf_req(chan, rsp);
2753 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2757 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2758 chan->num_conf_rsp++;
2760 /* Reset config buffer. */
2763 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2766 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2767 set_default_fcs(chan);
2769 l2cap_state_change(chan, BT_CONNECTED);
2771 chan->next_tx_seq = 0;
2772 chan->expected_tx_seq = 0;
2773 skb_queue_head_init(&chan->tx_q);
2774 if (chan->mode == L2CAP_MODE_ERTM)
2775 l2cap_ertm_init(chan);
2777 l2cap_chan_ready(sk);
2781 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2783 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2784 l2cap_build_conf_req(chan, buf), buf);
2785 chan->num_conf_req++;
2793 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2795 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2796 u16 scid, flags, result;
2797 struct l2cap_chan *chan;
2799 int len = cmd->len - sizeof(*rsp);
2801 scid = __le16_to_cpu(rsp->scid);
2802 flags = __le16_to_cpu(rsp->flags);
2803 result = __le16_to_cpu(rsp->result);
2805 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2806 scid, flags, result);
2808 chan = l2cap_get_chan_by_scid(conn, scid);
2815 case L2CAP_CONF_SUCCESS:
2816 l2cap_conf_rfc_get(chan, rsp->data, len);
2819 case L2CAP_CONF_UNACCEPT:
2820 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2823 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2824 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2828 /* throw out any old stored conf requests */
2829 result = L2CAP_CONF_SUCCESS;
2830 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2833 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2837 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2838 L2CAP_CONF_REQ, len, req);
2839 chan->num_conf_req++;
2840 if (result != L2CAP_CONF_SUCCESS)
2846 sk->sk_err = ECONNRESET;
2847 __set_chan_timer(chan, HZ * 5);
2848 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2855 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2857 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2858 set_default_fcs(chan);
2860 l2cap_state_change(chan, BT_CONNECTED);
2861 chan->next_tx_seq = 0;
2862 chan->expected_tx_seq = 0;
2863 skb_queue_head_init(&chan->tx_q);
2864 if (chan->mode == L2CAP_MODE_ERTM)
2865 l2cap_ertm_init(chan);
2867 l2cap_chan_ready(sk);
2875 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2877 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2878 struct l2cap_disconn_rsp rsp;
2880 struct l2cap_chan *chan;
2883 scid = __le16_to_cpu(req->scid);
2884 dcid = __le16_to_cpu(req->dcid);
2886 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2888 chan = l2cap_get_chan_by_scid(conn, dcid);
2894 rsp.dcid = cpu_to_le16(chan->scid);
2895 rsp.scid = cpu_to_le16(chan->dcid);
2896 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2898 sk->sk_shutdown = SHUTDOWN_MASK;
2900 /* don't delete l2cap channel if sk is owned by user */
2901 if (sock_owned_by_user(sk)) {
2902 l2cap_state_change(chan, BT_DISCONN);
2903 __clear_chan_timer(chan);
2904 __set_chan_timer(chan, HZ / 5);
2909 l2cap_chan_del(chan, ECONNRESET);
2912 chan->ops->close(chan->data);
2916 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2918 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2920 struct l2cap_chan *chan;
2923 scid = __le16_to_cpu(rsp->scid);
2924 dcid = __le16_to_cpu(rsp->dcid);
2926 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2928 chan = l2cap_get_chan_by_scid(conn, scid);
2934 /* don't delete l2cap channel if sk is owned by user */
2935 if (sock_owned_by_user(sk)) {
2936 l2cap_state_change(chan,BT_DISCONN);
2937 __clear_chan_timer(chan);
2938 __set_chan_timer(chan, HZ / 5);
2943 l2cap_chan_del(chan, 0);
2946 chan->ops->close(chan->data);
2950 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2952 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
2955 type = __le16_to_cpu(req->type);
2957 BT_DBG("type 0x%4.4x", type);
2959 if (type == L2CAP_IT_FEAT_MASK) {
2961 u32 feat_mask = l2cap_feat_mask;
2962 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2963 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2964 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2966 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
2969 feat_mask |= L2CAP_FEAT_EXT_FLOW
2970 | L2CAP_FEAT_EXT_WINDOW;
2972 put_unaligned_le32(feat_mask, rsp->data);
2973 l2cap_send_cmd(conn, cmd->ident,
2974 L2CAP_INFO_RSP, sizeof(buf), buf);
2975 } else if (type == L2CAP_IT_FIXED_CHAN) {
2977 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
2978 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
2979 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
2980 memcpy(buf + 4, l2cap_fixed_chan, 8);
2981 l2cap_send_cmd(conn, cmd->ident,
2982 L2CAP_INFO_RSP, sizeof(buf), buf);
2984 struct l2cap_info_rsp rsp;
2985 rsp.type = cpu_to_le16(type);
2986 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
2987 l2cap_send_cmd(conn, cmd->ident,
2988 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
2994 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2996 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
2999 type = __le16_to_cpu(rsp->type);
3000 result = __le16_to_cpu(rsp->result);
3002 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3004 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3005 if (cmd->ident != conn->info_ident ||
3006 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3009 del_timer(&conn->info_timer);
3011 if (result != L2CAP_IR_SUCCESS) {
3012 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3013 conn->info_ident = 0;
3015 l2cap_conn_start(conn);
3020 if (type == L2CAP_IT_FEAT_MASK) {
3021 conn->feat_mask = get_unaligned_le32(rsp->data);
3023 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3024 struct l2cap_info_req req;
3025 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3027 conn->info_ident = l2cap_get_ident(conn);
3029 l2cap_send_cmd(conn, conn->info_ident,
3030 L2CAP_INFO_REQ, sizeof(req), &req);
3032 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3033 conn->info_ident = 0;
3035 l2cap_conn_start(conn);
3037 } else if (type == L2CAP_IT_FIXED_CHAN) {
3038 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3039 conn->info_ident = 0;
3041 l2cap_conn_start(conn);
3047 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3052 if (min > max || min < 6 || max > 3200)
3055 if (to_multiplier < 10 || to_multiplier > 3200)
3058 if (max >= to_multiplier * 8)
3061 max_latency = (to_multiplier * 8 / max) - 1;
3062 if (latency > 499 || latency > max_latency)
3068 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3069 struct l2cap_cmd_hdr *cmd, u8 *data)
3071 struct hci_conn *hcon = conn->hcon;
3072 struct l2cap_conn_param_update_req *req;
3073 struct l2cap_conn_param_update_rsp rsp;
3074 u16 min, max, latency, to_multiplier, cmd_len;
3077 if (!(hcon->link_mode & HCI_LM_MASTER))
3080 cmd_len = __le16_to_cpu(cmd->len);
3081 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3084 req = (struct l2cap_conn_param_update_req *) data;
3085 min = __le16_to_cpu(req->min);
3086 max = __le16_to_cpu(req->max);
3087 latency = __le16_to_cpu(req->latency);
3088 to_multiplier = __le16_to_cpu(req->to_multiplier);
3090 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3091 min, max, latency, to_multiplier);
3093 memset(&rsp, 0, sizeof(rsp));
3095 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3097 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3099 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3101 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3105 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3110 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3111 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3115 switch (cmd->code) {
3116 case L2CAP_COMMAND_REJ:
3117 l2cap_command_rej(conn, cmd, data);
3120 case L2CAP_CONN_REQ:
3121 err = l2cap_connect_req(conn, cmd, data);
3124 case L2CAP_CONN_RSP:
3125 err = l2cap_connect_rsp(conn, cmd, data);
3128 case L2CAP_CONF_REQ:
3129 err = l2cap_config_req(conn, cmd, cmd_len, data);
3132 case L2CAP_CONF_RSP:
3133 err = l2cap_config_rsp(conn, cmd, data);
3136 case L2CAP_DISCONN_REQ:
3137 err = l2cap_disconnect_req(conn, cmd, data);
3140 case L2CAP_DISCONN_RSP:
3141 err = l2cap_disconnect_rsp(conn, cmd, data);
3144 case L2CAP_ECHO_REQ:
3145 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3148 case L2CAP_ECHO_RSP:
3151 case L2CAP_INFO_REQ:
3152 err = l2cap_information_req(conn, cmd, data);
3155 case L2CAP_INFO_RSP:
3156 err = l2cap_information_rsp(conn, cmd, data);
3160 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3168 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3169 struct l2cap_cmd_hdr *cmd, u8 *data)
3171 switch (cmd->code) {
3172 case L2CAP_COMMAND_REJ:
3175 case L2CAP_CONN_PARAM_UPDATE_REQ:
3176 return l2cap_conn_param_update_req(conn, cmd, data);
3178 case L2CAP_CONN_PARAM_UPDATE_RSP:
3182 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3187 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3188 struct sk_buff *skb)
3190 u8 *data = skb->data;
3192 struct l2cap_cmd_hdr cmd;
3195 l2cap_raw_recv(conn, skb);
3197 while (len >= L2CAP_CMD_HDR_SIZE) {
3199 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3200 data += L2CAP_CMD_HDR_SIZE;
3201 len -= L2CAP_CMD_HDR_SIZE;
3203 cmd_len = le16_to_cpu(cmd.len);
3205 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3207 if (cmd_len > len || !cmd.ident) {
3208 BT_DBG("corrupted command");
3212 if (conn->hcon->type == LE_LINK)
3213 err = l2cap_le_sig_cmd(conn, &cmd, data);
3215 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3218 struct l2cap_cmd_rej_unk rej;
3220 BT_ERR("Wrong link type (%d)", err);
3222 /* FIXME: Map err to a valid reason */
3223 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3224 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3234 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3236 u16 our_fcs, rcv_fcs;
3239 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3240 hdr_size = L2CAP_EXT_HDR_SIZE;
3242 hdr_size = L2CAP_ENH_HDR_SIZE;
3244 if (chan->fcs == L2CAP_FCS_CRC16) {
3245 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3246 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3247 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3249 if (our_fcs != rcv_fcs)
3255 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3259 chan->frames_sent = 0;
3261 control |= __set_reqseq(chan, chan->buffer_seq);
3263 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3264 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3265 l2cap_send_sframe(chan, control);
3266 set_bit(CONN_RNR_SENT, &chan->conn_state);
3269 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3270 l2cap_retransmit_frames(chan);
3272 l2cap_ertm_send(chan);
3274 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3275 chan->frames_sent == 0) {
3276 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3277 l2cap_send_sframe(chan, control);
3281 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3283 struct sk_buff *next_skb;
3284 int tx_seq_offset, next_tx_seq_offset;
3286 bt_cb(skb)->tx_seq = tx_seq;
3287 bt_cb(skb)->sar = sar;
3289 next_skb = skb_peek(&chan->srej_q);
3291 __skb_queue_tail(&chan->srej_q, skb);
3295 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3298 if (bt_cb(next_skb)->tx_seq == tx_seq)
3301 next_tx_seq_offset = __seq_offset(chan,
3302 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3304 if (next_tx_seq_offset > tx_seq_offset) {
3305 __skb_queue_before(&chan->srej_q, next_skb, skb);
3309 if (skb_queue_is_last(&chan->srej_q, next_skb))
3312 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3314 __skb_queue_tail(&chan->srej_q, skb);
3319 static void append_skb_frag(struct sk_buff *skb,
3320 struct sk_buff *new_frag, struct sk_buff **last_frag)
3322 /* skb->len reflects data in skb as well as all fragments
3323 * skb->data_len reflects only data in fragments
3325 if (!skb_has_frag_list(skb))
3326 skb_shinfo(skb)->frag_list = new_frag;
3328 new_frag->next = NULL;
3330 (*last_frag)->next = new_frag;
3331 *last_frag = new_frag;
3333 skb->len += new_frag->len;
3334 skb->data_len += new_frag->len;
3335 skb->truesize += new_frag->truesize;
3338 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3342 switch (__get_ctrl_sar(chan, control)) {
3343 case L2CAP_SAR_UNSEGMENTED:
3347 err = chan->ops->recv(chan->data, skb);
3350 case L2CAP_SAR_START:
3354 chan->sdu_len = get_unaligned_le16(skb->data);
3355 skb_pull(skb, L2CAP_SDULEN_SIZE);
3357 if (chan->sdu_len > chan->imtu) {
3362 if (skb->len >= chan->sdu_len)
3366 chan->sdu_last_frag = skb;
3372 case L2CAP_SAR_CONTINUE:
3376 append_skb_frag(chan->sdu, skb,
3377 &chan->sdu_last_frag);
3380 if (chan->sdu->len >= chan->sdu_len)
3390 append_skb_frag(chan->sdu, skb,
3391 &chan->sdu_last_frag);
3394 if (chan->sdu->len != chan->sdu_len)
3397 err = chan->ops->recv(chan->data, chan->sdu);
3400 /* Reassembly complete */
3402 chan->sdu_last_frag = NULL;
3410 kfree_skb(chan->sdu);
3412 chan->sdu_last_frag = NULL;
3419 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3423 BT_DBG("chan %p, Enter local busy", chan);
3425 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3427 control = __set_reqseq(chan, chan->buffer_seq);
3428 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3429 l2cap_send_sframe(chan, control);
3431 set_bit(CONN_RNR_SENT, &chan->conn_state);
3433 __clear_ack_timer(chan);
3436 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3440 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3443 control = __set_reqseq(chan, chan->buffer_seq);
3444 control |= __set_ctrl_poll(chan);
3445 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3446 l2cap_send_sframe(chan, control);
3447 chan->retry_count = 1;
3449 __clear_retrans_timer(chan);
3450 __set_monitor_timer(chan);
3452 set_bit(CONN_WAIT_F, &chan->conn_state);
3455 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3456 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3458 BT_DBG("chan %p, Exit local busy", chan);
3461 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3463 if (chan->mode == L2CAP_MODE_ERTM) {
3465 l2cap_ertm_enter_local_busy(chan);
3467 l2cap_ertm_exit_local_busy(chan);
3471 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3473 struct sk_buff *skb;
3476 while ((skb = skb_peek(&chan->srej_q)) &&
3477 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3480 if (bt_cb(skb)->tx_seq != tx_seq)
3483 skb = skb_dequeue(&chan->srej_q);
3484 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3485 err = l2cap_reassemble_sdu(chan, skb, control);
3488 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3492 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3493 tx_seq = __next_seq(chan, tx_seq);
3497 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3499 struct srej_list *l, *tmp;
3502 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3503 if (l->tx_seq == tx_seq) {
3508 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3509 control |= __set_reqseq(chan, l->tx_seq);
3510 l2cap_send_sframe(chan, control);
3512 list_add_tail(&l->list, &chan->srej_l);
3516 static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3518 struct srej_list *new;
3521 while (tx_seq != chan->expected_tx_seq) {
3522 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3523 control |= __set_reqseq(chan, chan->expected_tx_seq);
3524 l2cap_send_sframe(chan, control);
3526 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3527 new->tx_seq = chan->expected_tx_seq;
3529 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3531 list_add_tail(&new->list, &chan->srej_l);
3534 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3537 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3539 u16 tx_seq = __get_txseq(chan, rx_control);
3540 u16 req_seq = __get_reqseq(chan, rx_control);
3541 u8 sar = __get_ctrl_sar(chan, rx_control);
3542 int tx_seq_offset, expected_tx_seq_offset;
3543 int num_to_ack = (chan->tx_win/6) + 1;
3546 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3547 tx_seq, rx_control);
3549 if (__is_ctrl_final(chan, rx_control) &&
3550 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3551 __clear_monitor_timer(chan);
3552 if (chan->unacked_frames > 0)
3553 __set_retrans_timer(chan);
3554 clear_bit(CONN_WAIT_F, &chan->conn_state);
3557 chan->expected_ack_seq = req_seq;
3558 l2cap_drop_acked_frames(chan);
3560 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3562 /* invalid tx_seq */
3563 if (tx_seq_offset >= chan->tx_win) {
3564 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3568 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3571 if (tx_seq == chan->expected_tx_seq)
3574 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3575 struct srej_list *first;
3577 first = list_first_entry(&chan->srej_l,
3578 struct srej_list, list);
3579 if (tx_seq == first->tx_seq) {
3580 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3581 l2cap_check_srej_gap(chan, tx_seq);
3583 list_del(&first->list);
3586 if (list_empty(&chan->srej_l)) {
3587 chan->buffer_seq = chan->buffer_seq_srej;
3588 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3589 l2cap_send_ack(chan);
3590 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3593 struct srej_list *l;
3595 /* duplicated tx_seq */
3596 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3599 list_for_each_entry(l, &chan->srej_l, list) {
3600 if (l->tx_seq == tx_seq) {
3601 l2cap_resend_srejframe(chan, tx_seq);
3605 l2cap_send_srejframe(chan, tx_seq);
3608 expected_tx_seq_offset = __seq_offset(chan,
3609 chan->expected_tx_seq, chan->buffer_seq);
3611 /* duplicated tx_seq */
3612 if (tx_seq_offset < expected_tx_seq_offset)
3615 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3617 BT_DBG("chan %p, Enter SREJ", chan);
3619 INIT_LIST_HEAD(&chan->srej_l);
3620 chan->buffer_seq_srej = chan->buffer_seq;
3622 __skb_queue_head_init(&chan->srej_q);
3623 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3625 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3627 l2cap_send_srejframe(chan, tx_seq);
3629 __clear_ack_timer(chan);
3634 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3636 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3637 bt_cb(skb)->tx_seq = tx_seq;
3638 bt_cb(skb)->sar = sar;
3639 __skb_queue_tail(&chan->srej_q, skb);
3643 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3644 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3647 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3651 if (__is_ctrl_final(chan, rx_control)) {
3652 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3653 l2cap_retransmit_frames(chan);
3656 __set_ack_timer(chan);
3658 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3659 if (chan->num_acked == num_to_ack - 1)
3660 l2cap_send_ack(chan);
3669 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3671 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3672 __get_reqseq(chan, rx_control), rx_control);
3674 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3675 l2cap_drop_acked_frames(chan);
3677 if (__is_ctrl_poll(chan, rx_control)) {
3678 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3679 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3680 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3681 (chan->unacked_frames > 0))
3682 __set_retrans_timer(chan);
3684 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3685 l2cap_send_srejtail(chan);
3687 l2cap_send_i_or_rr_or_rnr(chan);
3690 } else if (__is_ctrl_final(chan, rx_control)) {
3691 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3693 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3694 l2cap_retransmit_frames(chan);
3697 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3698 (chan->unacked_frames > 0))
3699 __set_retrans_timer(chan);
3701 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3702 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3703 l2cap_send_ack(chan);
3705 l2cap_ertm_send(chan);
3709 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
3711 u16 tx_seq = __get_reqseq(chan, rx_control);
3713 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3715 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3717 chan->expected_ack_seq = tx_seq;
3718 l2cap_drop_acked_frames(chan);
3720 if (__is_ctrl_final(chan, rx_control)) {
3721 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3722 l2cap_retransmit_frames(chan);
3724 l2cap_retransmit_frames(chan);
3726 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3727 set_bit(CONN_REJ_ACT, &chan->conn_state);
3730 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
3732 u16 tx_seq = __get_reqseq(chan, rx_control);
3734 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3736 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3738 if (__is_ctrl_poll(chan, rx_control)) {
3739 chan->expected_ack_seq = tx_seq;
3740 l2cap_drop_acked_frames(chan);
3742 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3743 l2cap_retransmit_one_frame(chan, tx_seq);
3745 l2cap_ertm_send(chan);
3747 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3748 chan->srej_save_reqseq = tx_seq;
3749 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3751 } else if (__is_ctrl_final(chan, rx_control)) {
3752 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3753 chan->srej_save_reqseq == tx_seq)
3754 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3756 l2cap_retransmit_one_frame(chan, tx_seq);
3758 l2cap_retransmit_one_frame(chan, tx_seq);
3759 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3760 chan->srej_save_reqseq = tx_seq;
3761 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3766 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
3768 u16 tx_seq = __get_reqseq(chan, rx_control);
3770 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3772 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3773 chan->expected_ack_seq = tx_seq;
3774 l2cap_drop_acked_frames(chan);
3776 if (__is_ctrl_poll(chan, rx_control))
3777 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3779 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3780 __clear_retrans_timer(chan);
3781 if (__is_ctrl_poll(chan, rx_control))
3782 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3786 if (__is_ctrl_poll(chan, rx_control)) {
3787 l2cap_send_srejtail(chan);
3789 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
3790 l2cap_send_sframe(chan, rx_control);
3794 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3796 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
3798 if (__is_ctrl_final(chan, rx_control) &&
3799 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3800 __clear_monitor_timer(chan);
3801 if (chan->unacked_frames > 0)
3802 __set_retrans_timer(chan);
3803 clear_bit(CONN_WAIT_F, &chan->conn_state);
3806 switch (__get_ctrl_super(chan, rx_control)) {
3807 case L2CAP_SUPER_RR:
3808 l2cap_data_channel_rrframe(chan, rx_control);
3811 case L2CAP_SUPER_REJ:
3812 l2cap_data_channel_rejframe(chan, rx_control);
3815 case L2CAP_SUPER_SREJ:
3816 l2cap_data_channel_srejframe(chan, rx_control);
3819 case L2CAP_SUPER_RNR:
3820 l2cap_data_channel_rnrframe(chan, rx_control);
3828 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3830 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3833 int len, next_tx_seq_offset, req_seq_offset;
3835 control = __get_control(chan, skb->data);
3836 skb_pull(skb, __ctrl_size(chan));
3840 * We can just drop the corrupted I-frame here.
3841 * Receiver will miss it and start proper recovery
3842 * procedures and ask retransmission.
3844 if (l2cap_check_fcs(chan, skb))
3847 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
3848 len -= L2CAP_SDULEN_SIZE;
3850 if (chan->fcs == L2CAP_FCS_CRC16)
3851 len -= L2CAP_FCS_SIZE;
3853 if (len > chan->mps) {
3854 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3858 req_seq = __get_reqseq(chan, control);
3860 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
3862 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
3863 chan->expected_ack_seq);
3865 /* check for invalid req-seq */
3866 if (req_seq_offset > next_tx_seq_offset) {
3867 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3871 if (!__is_sframe(chan, control)) {
3873 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3877 l2cap_data_channel_iframe(chan, control, skb);
3881 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3885 l2cap_data_channel_sframe(chan, control, skb);
3895 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3897 struct l2cap_chan *chan;
3898 struct sock *sk = NULL;
3903 chan = l2cap_get_chan_by_scid(conn, cid);
3905 BT_DBG("unknown cid 0x%4.4x", cid);
3911 BT_DBG("chan %p, len %d", chan, skb->len);
3913 if (chan->state != BT_CONNECTED)
3916 switch (chan->mode) {
3917 case L2CAP_MODE_BASIC:
3918 /* If socket recv buffers overflows we drop data here
3919 * which is *bad* because L2CAP has to be reliable.
3920 * But we don't have any other choice. L2CAP doesn't
3921 * provide flow control mechanism. */
3923 if (chan->imtu < skb->len)
3926 if (!chan->ops->recv(chan->data, skb))
3930 case L2CAP_MODE_ERTM:
3931 if (!sock_owned_by_user(sk)) {
3932 l2cap_ertm_data_rcv(sk, skb);
3934 if (sk_add_backlog(sk, skb))
3940 case L2CAP_MODE_STREAMING:
3941 control = __get_control(chan, skb->data);
3942 skb_pull(skb, __ctrl_size(chan));
3945 if (l2cap_check_fcs(chan, skb))
3948 if (__is_sar_start(chan, control))
3949 len -= L2CAP_SDULEN_SIZE;
3951 if (chan->fcs == L2CAP_FCS_CRC16)
3952 len -= L2CAP_FCS_SIZE;
3954 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
3957 tx_seq = __get_txseq(chan, control);
3959 if (chan->expected_tx_seq != tx_seq) {
3960 /* Frame(s) missing - must discard partial SDU */
3961 kfree_skb(chan->sdu);
3963 chan->sdu_last_frag = NULL;
3966 /* TODO: Notify userland of missing data */
3969 chan->expected_tx_seq = __next_seq(chan, tx_seq);
3971 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
3972 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3977 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
3991 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3993 struct sock *sk = NULL;
3994 struct l2cap_chan *chan;
3996 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4004 BT_DBG("sk %p, len %d", sk, skb->len);
4006 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4009 if (chan->imtu < skb->len)
4012 if (!chan->ops->recv(chan->data, skb))
4024 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4026 struct sock *sk = NULL;
4027 struct l2cap_chan *chan;
4029 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4037 BT_DBG("sk %p, len %d", sk, skb->len);
4039 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4042 if (chan->imtu < skb->len)
4045 if (!chan->ops->recv(chan->data, skb))
4057 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4059 struct l2cap_hdr *lh = (void *) skb->data;
4063 skb_pull(skb, L2CAP_HDR_SIZE);
4064 cid = __le16_to_cpu(lh->cid);
4065 len = __le16_to_cpu(lh->len);
4067 if (len != skb->len) {
4072 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4075 case L2CAP_CID_LE_SIGNALING:
4076 case L2CAP_CID_SIGNALING:
4077 l2cap_sig_channel(conn, skb);
4080 case L2CAP_CID_CONN_LESS:
4081 psm = get_unaligned_le16(skb->data);
4083 l2cap_conless_channel(conn, psm, skb);
4086 case L2CAP_CID_LE_DATA:
4087 l2cap_att_channel(conn, cid, skb);
4091 if (smp_sig_channel(conn, skb))
4092 l2cap_conn_del(conn->hcon, EACCES);
4096 l2cap_data_channel(conn, cid, skb);
4101 /* ---- L2CAP interface with lower layer (HCI) ---- */
4103 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4105 int exact = 0, lm1 = 0, lm2 = 0;
4106 struct l2cap_chan *c;
4108 if (type != ACL_LINK)
4111 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4113 /* Find listening sockets and check their link_mode */
4114 read_lock(&chan_list_lock);
4115 list_for_each_entry(c, &chan_list, global_l) {
4116 struct sock *sk = c->sk;
4118 if (c->state != BT_LISTEN)
4121 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4122 lm1 |= HCI_LM_ACCEPT;
4123 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4124 lm1 |= HCI_LM_MASTER;
4126 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4127 lm2 |= HCI_LM_ACCEPT;
4128 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4129 lm2 |= HCI_LM_MASTER;
4132 read_unlock(&chan_list_lock);
4134 return exact ? lm1 : lm2;
4137 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4139 struct l2cap_conn *conn;
4141 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4143 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4147 conn = l2cap_conn_add(hcon, status);
4149 l2cap_conn_ready(conn);
4151 l2cap_conn_del(hcon, bt_to_errno(status));
4156 static int l2cap_disconn_ind(struct hci_conn *hcon)
4158 struct l2cap_conn *conn = hcon->l2cap_data;
4160 BT_DBG("hcon %p", hcon);
4162 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4165 return conn->disc_reason;
4168 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4170 BT_DBG("hcon %p reason %d", hcon, reason);
4172 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4175 l2cap_conn_del(hcon, bt_to_errno(reason));
4180 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4182 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4185 if (encrypt == 0x00) {
4186 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4187 __clear_chan_timer(chan);
4188 __set_chan_timer(chan, HZ * 5);
4189 } else if (chan->sec_level == BT_SECURITY_HIGH)
4190 l2cap_chan_close(chan, ECONNREFUSED);
4192 if (chan->sec_level == BT_SECURITY_MEDIUM)
4193 __clear_chan_timer(chan);
4197 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4199 struct l2cap_conn *conn = hcon->l2cap_data;
4200 struct l2cap_chan *chan;
4205 BT_DBG("conn %p", conn);
4207 if (hcon->type == LE_LINK) {
4208 smp_distribute_keys(conn, 0);
4209 del_timer(&conn->security_timer);
4212 read_lock(&conn->chan_lock);
4214 list_for_each_entry(chan, &conn->chan_l, list) {
4215 struct sock *sk = chan->sk;
4219 BT_DBG("chan->scid %d", chan->scid);
4221 if (chan->scid == L2CAP_CID_LE_DATA) {
4222 if (!status && encrypt) {
4223 chan->sec_level = hcon->sec_level;
4224 l2cap_chan_ready(sk);
4231 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4236 if (!status && (chan->state == BT_CONNECTED ||
4237 chan->state == BT_CONFIG)) {
4238 l2cap_check_encryption(chan, encrypt);
4243 if (chan->state == BT_CONNECT) {
4245 struct l2cap_conn_req req;
4246 req.scid = cpu_to_le16(chan->scid);
4247 req.psm = chan->psm;
4249 chan->ident = l2cap_get_ident(conn);
4250 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4252 l2cap_send_cmd(conn, chan->ident,
4253 L2CAP_CONN_REQ, sizeof(req), &req);
4255 __clear_chan_timer(chan);
4256 __set_chan_timer(chan, HZ / 10);
4258 } else if (chan->state == BT_CONNECT2) {
4259 struct l2cap_conn_rsp rsp;
4263 if (bt_sk(sk)->defer_setup) {
4264 struct sock *parent = bt_sk(sk)->parent;
4265 res = L2CAP_CR_PEND;
4266 stat = L2CAP_CS_AUTHOR_PEND;
4268 parent->sk_data_ready(parent, 0);
4270 l2cap_state_change(chan, BT_CONFIG);
4271 res = L2CAP_CR_SUCCESS;
4272 stat = L2CAP_CS_NO_INFO;
4275 l2cap_state_change(chan, BT_DISCONN);
4276 __set_chan_timer(chan, HZ / 10);
4277 res = L2CAP_CR_SEC_BLOCK;
4278 stat = L2CAP_CS_NO_INFO;
4281 rsp.scid = cpu_to_le16(chan->dcid);
4282 rsp.dcid = cpu_to_le16(chan->scid);
4283 rsp.result = cpu_to_le16(res);
4284 rsp.status = cpu_to_le16(stat);
4285 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4292 read_unlock(&conn->chan_lock);
4297 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4299 struct l2cap_conn *conn = hcon->l2cap_data;
4302 conn = l2cap_conn_add(hcon, 0);
4307 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4309 if (!(flags & ACL_CONT)) {
4310 struct l2cap_hdr *hdr;
4311 struct l2cap_chan *chan;
4316 BT_ERR("Unexpected start frame (len %d)", skb->len);
4317 kfree_skb(conn->rx_skb);
4318 conn->rx_skb = NULL;
4320 l2cap_conn_unreliable(conn, ECOMM);
4323 /* Start fragment always begin with Basic L2CAP header */
4324 if (skb->len < L2CAP_HDR_SIZE) {
4325 BT_ERR("Frame is too short (len %d)", skb->len);
4326 l2cap_conn_unreliable(conn, ECOMM);
4330 hdr = (struct l2cap_hdr *) skb->data;
4331 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4332 cid = __le16_to_cpu(hdr->cid);
4334 if (len == skb->len) {
4335 /* Complete frame received */
4336 l2cap_recv_frame(conn, skb);
4340 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4342 if (skb->len > len) {
4343 BT_ERR("Frame is too long (len %d, expected len %d)",
4345 l2cap_conn_unreliable(conn, ECOMM);
4349 chan = l2cap_get_chan_by_scid(conn, cid);
4351 if (chan && chan->sk) {
4352 struct sock *sk = chan->sk;
4354 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4355 BT_ERR("Frame exceeding recv MTU (len %d, "
4359 l2cap_conn_unreliable(conn, ECOMM);
4365 /* Allocate skb for the complete frame (with header) */
4366 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4370 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4372 conn->rx_len = len - skb->len;
4374 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4376 if (!conn->rx_len) {
4377 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4378 l2cap_conn_unreliable(conn, ECOMM);
4382 if (skb->len > conn->rx_len) {
4383 BT_ERR("Fragment is too long (len %d, expected %d)",
4384 skb->len, conn->rx_len);
4385 kfree_skb(conn->rx_skb);
4386 conn->rx_skb = NULL;
4388 l2cap_conn_unreliable(conn, ECOMM);
4392 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4394 conn->rx_len -= skb->len;
4396 if (!conn->rx_len) {
4397 /* Complete frame received */
4398 l2cap_recv_frame(conn, conn->rx_skb);
4399 conn->rx_skb = NULL;
4408 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4410 struct l2cap_chan *c;
4412 read_lock_bh(&chan_list_lock);
4414 list_for_each_entry(c, &chan_list, global_l) {
4415 struct sock *sk = c->sk;
4417 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4418 batostr(&bt_sk(sk)->src),
4419 batostr(&bt_sk(sk)->dst),
4420 c->state, __le16_to_cpu(c->psm),
4421 c->scid, c->dcid, c->imtu, c->omtu,
4422 c->sec_level, c->mode);
4425 read_unlock_bh(&chan_list_lock);
4430 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4432 return single_open(file, l2cap_debugfs_show, inode->i_private);
4435 static const struct file_operations l2cap_debugfs_fops = {
4436 .open = l2cap_debugfs_open,
4438 .llseek = seq_lseek,
4439 .release = single_release,
4442 static struct dentry *l2cap_debugfs;
4444 static struct hci_proto l2cap_hci_proto = {
4446 .id = HCI_PROTO_L2CAP,
4447 .connect_ind = l2cap_connect_ind,
4448 .connect_cfm = l2cap_connect_cfm,
4449 .disconn_ind = l2cap_disconn_ind,
4450 .disconn_cfm = l2cap_disconn_cfm,
4451 .security_cfm = l2cap_security_cfm,
4452 .recv_acldata = l2cap_recv_acldata
4455 int __init l2cap_init(void)
4459 err = l2cap_init_sockets();
4463 err = hci_register_proto(&l2cap_hci_proto);
4465 BT_ERR("L2CAP protocol registration failed");
4466 bt_sock_unregister(BTPROTO_L2CAP);
4471 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4472 bt_debugfs, NULL, &l2cap_debugfs_fops);
4474 BT_ERR("Failed to create L2CAP debug file");
4480 l2cap_cleanup_sockets();
4484 void l2cap_exit(void)
4486 debugfs_remove(l2cap_debugfs);
4488 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4489 BT_ERR("L2CAP protocol unregistration failed");
4491 l2cap_cleanup_sockets();
4494 module_param(disable_ertm, bool, 0644);
4495 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4497 module_param(enable_hs, bool, 0644);
4498 MODULE_PARM_DESC(enable_hs, "Enable High Speed");