2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
78 /* ---- L2CAP channels ---- */
80 static inline void chan_hold(struct l2cap_chan *c)
82 atomic_inc(&c->refcnt);
85 static inline void chan_put(struct l2cap_chan *c)
87 if (atomic_dec_and_test(&c->refcnt))
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
95 list_for_each_entry(c, &conn->chan_l, list) {
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
105 struct l2cap_chan *c;
107 list_for_each_entry(c, &conn->chan_l, list) {
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
118 struct l2cap_chan *c;
120 read_lock(&conn->chan_lock);
121 c = __l2cap_get_chan_by_scid(conn, cid);
124 read_unlock(&conn->chan_lock);
128 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
130 struct l2cap_chan *c;
132 list_for_each_entry(c, &conn->chan_l, list) {
133 if (c->ident == ident)
139 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
141 struct l2cap_chan *c;
143 read_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_ident(conn, ident);
147 read_unlock(&conn->chan_lock);
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
153 struct l2cap_chan *c;
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
165 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
169 write_lock_bh(&chan_list_lock);
171 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
184 for (p = 0x1001; p < 0x1100; p += 2)
185 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
186 chan->psm = cpu_to_le16(p);
187 chan->sport = cpu_to_le16(p);
194 write_unlock_bh(&chan_list_lock);
198 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
200 write_lock_bh(&chan_list_lock);
204 write_unlock_bh(&chan_list_lock);
209 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
211 u16 cid = L2CAP_CID_DYN_START;
213 for (; cid < L2CAP_CID_DYN_END; cid++) {
214 if (!__l2cap_get_chan_by_scid(conn, cid))
221 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
223 BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
225 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
229 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
231 BT_DBG("chan %p state %d", chan, chan->state);
233 if (timer_pending(timer) && del_timer(timer))
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
240 chan->ops->state_change(chan->data, state);
243 static void l2cap_chan_timeout(unsigned long arg)
245 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
246 struct sock *sk = chan->sk;
249 BT_DBG("chan %p state %d", chan, chan->state);
253 if (sock_owned_by_user(sk)) {
254 /* sk is owned by user. Try again later */
255 __set_chan_timer(chan, HZ / 5);
261 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
262 reason = ECONNREFUSED;
263 else if (chan->state == BT_CONNECT &&
264 chan->sec_level != BT_SECURITY_SDP)
265 reason = ECONNREFUSED;
269 l2cap_chan_close(chan, reason);
273 chan->ops->close(chan->data);
277 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
279 struct l2cap_chan *chan;
281 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
287 write_lock_bh(&chan_list_lock);
288 list_add(&chan->global_l, &chan_list);
289 write_unlock_bh(&chan_list_lock);
291 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
293 chan->state = BT_OPEN;
295 atomic_set(&chan->refcnt, 1);
300 void l2cap_chan_destroy(struct l2cap_chan *chan)
302 write_lock_bh(&chan_list_lock);
303 list_del(&chan->global_l);
304 write_unlock_bh(&chan_list_lock);
309 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
311 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
312 chan->psm, chan->dcid);
314 conn->disc_reason = 0x13;
318 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
319 if (conn->hcon->type == LE_LINK) {
321 chan->omtu = L2CAP_LE_DEFAULT_MTU;
322 chan->scid = L2CAP_CID_LE_DATA;
323 chan->dcid = L2CAP_CID_LE_DATA;
325 /* Alloc CID for connection-oriented socket */
326 chan->scid = l2cap_alloc_cid(conn);
327 chan->omtu = L2CAP_DEFAULT_MTU;
329 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
330 /* Connectionless socket */
331 chan->scid = L2CAP_CID_CONN_LESS;
332 chan->dcid = L2CAP_CID_CONN_LESS;
333 chan->omtu = L2CAP_DEFAULT_MTU;
335 /* Raw socket can send/recv signalling messages only */
336 chan->scid = L2CAP_CID_SIGNALING;
337 chan->dcid = L2CAP_CID_SIGNALING;
338 chan->omtu = L2CAP_DEFAULT_MTU;
341 chan->local_id = L2CAP_BESTEFFORT_ID;
342 chan->local_stype = L2CAP_SERV_BESTEFFORT;
343 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
344 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
345 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
346 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
350 list_add(&chan->list, &conn->chan_l);
354 * Must be called on the locked socket. */
355 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
357 struct sock *sk = chan->sk;
358 struct l2cap_conn *conn = chan->conn;
359 struct sock *parent = bt_sk(sk)->parent;
361 __clear_chan_timer(chan);
363 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
366 /* Delete from channel list */
367 write_lock_bh(&conn->chan_lock);
368 list_del(&chan->list);
369 write_unlock_bh(&conn->chan_lock);
373 hci_conn_put(conn->hcon);
376 l2cap_state_change(chan, BT_CLOSED);
377 sock_set_flag(sk, SOCK_ZAPPED);
383 bt_accept_unlink(sk);
384 parent->sk_data_ready(parent, 0);
386 sk->sk_state_change(sk);
388 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
389 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
392 skb_queue_purge(&chan->tx_q);
394 if (chan->mode == L2CAP_MODE_ERTM) {
395 struct srej_list *l, *tmp;
397 __clear_retrans_timer(chan);
398 __clear_monitor_timer(chan);
399 __clear_ack_timer(chan);
401 skb_queue_purge(&chan->srej_q);
403 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
410 static void l2cap_chan_cleanup_listen(struct sock *parent)
414 BT_DBG("parent %p", parent);
416 /* Close not yet accepted channels */
417 while ((sk = bt_accept_dequeue(parent, NULL))) {
418 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
419 __clear_chan_timer(chan);
421 l2cap_chan_close(chan, ECONNRESET);
423 chan->ops->close(chan->data);
427 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
429 struct l2cap_conn *conn = chan->conn;
430 struct sock *sk = chan->sk;
432 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
434 switch (chan->state) {
436 l2cap_chan_cleanup_listen(sk);
438 l2cap_state_change(chan, BT_CLOSED);
439 sock_set_flag(sk, SOCK_ZAPPED);
444 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
445 conn->hcon->type == ACL_LINK) {
446 __clear_chan_timer(chan);
447 __set_chan_timer(chan, sk->sk_sndtimeo);
448 l2cap_send_disconn_req(conn, chan, reason);
450 l2cap_chan_del(chan, reason);
454 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
455 conn->hcon->type == ACL_LINK) {
456 struct l2cap_conn_rsp rsp;
459 if (bt_sk(sk)->defer_setup)
460 result = L2CAP_CR_SEC_BLOCK;
462 result = L2CAP_CR_BAD_PSM;
463 l2cap_state_change(chan, BT_DISCONN);
465 rsp.scid = cpu_to_le16(chan->dcid);
466 rsp.dcid = cpu_to_le16(chan->scid);
467 rsp.result = cpu_to_le16(result);
468 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
469 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
473 l2cap_chan_del(chan, reason);
478 l2cap_chan_del(chan, reason);
482 sock_set_flag(sk, SOCK_ZAPPED);
487 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
489 if (chan->chan_type == L2CAP_CHAN_RAW) {
490 switch (chan->sec_level) {
491 case BT_SECURITY_HIGH:
492 return HCI_AT_DEDICATED_BONDING_MITM;
493 case BT_SECURITY_MEDIUM:
494 return HCI_AT_DEDICATED_BONDING;
496 return HCI_AT_NO_BONDING;
498 } else if (chan->psm == cpu_to_le16(0x0001)) {
499 if (chan->sec_level == BT_SECURITY_LOW)
500 chan->sec_level = BT_SECURITY_SDP;
502 if (chan->sec_level == BT_SECURITY_HIGH)
503 return HCI_AT_NO_BONDING_MITM;
505 return HCI_AT_NO_BONDING;
507 switch (chan->sec_level) {
508 case BT_SECURITY_HIGH:
509 return HCI_AT_GENERAL_BONDING_MITM;
510 case BT_SECURITY_MEDIUM:
511 return HCI_AT_GENERAL_BONDING;
513 return HCI_AT_NO_BONDING;
518 /* Service level security */
519 static inline int l2cap_check_security(struct l2cap_chan *chan)
521 struct l2cap_conn *conn = chan->conn;
524 auth_type = l2cap_get_auth_type(chan);
526 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
529 static u8 l2cap_get_ident(struct l2cap_conn *conn)
533 /* Get next available identificator.
534 * 1 - 128 are used by kernel.
535 * 129 - 199 are reserved.
536 * 200 - 254 are used by utilities like l2ping, etc.
539 spin_lock_bh(&conn->lock);
541 if (++conn->tx_ident > 128)
546 spin_unlock_bh(&conn->lock);
551 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
553 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
556 BT_DBG("code 0x%2.2x", code);
561 if (lmp_no_flush_capable(conn->hcon->hdev))
562 flags = ACL_START_NO_FLUSH;
566 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
567 skb->priority = HCI_PRIO_MAX;
569 hci_send_acl(conn->hchan, skb, flags);
572 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
574 struct hci_conn *hcon = chan->conn->hcon;
577 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
580 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
581 lmp_no_flush_capable(hcon->hdev))
582 flags = ACL_START_NO_FLUSH;
586 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
587 hci_send_acl(chan->conn->hchan, skb, flags);
590 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
593 struct l2cap_hdr *lh;
594 struct l2cap_conn *conn = chan->conn;
597 if (chan->state != BT_CONNECTED)
600 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
601 hlen = L2CAP_EXT_HDR_SIZE;
603 hlen = L2CAP_ENH_HDR_SIZE;
605 if (chan->fcs == L2CAP_FCS_CRC16)
606 hlen += L2CAP_FCS_SIZE;
608 BT_DBG("chan %p, control 0x%8.8x", chan, control);
610 count = min_t(unsigned int, conn->mtu, hlen);
612 control |= __set_sframe(chan);
614 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
615 control |= __set_ctrl_final(chan);
617 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
618 control |= __set_ctrl_poll(chan);
620 skb = bt_skb_alloc(count, GFP_ATOMIC);
624 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
625 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
626 lh->cid = cpu_to_le16(chan->dcid);
628 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
630 if (chan->fcs == L2CAP_FCS_CRC16) {
631 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
632 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
635 skb->priority = HCI_PRIO_MAX;
636 l2cap_do_send(chan, skb);
639 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
641 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
642 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
643 set_bit(CONN_RNR_SENT, &chan->conn_state);
645 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
647 control |= __set_reqseq(chan, chan->buffer_seq);
649 l2cap_send_sframe(chan, control);
652 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
654 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
657 static void l2cap_do_start(struct l2cap_chan *chan)
659 struct l2cap_conn *conn = chan->conn;
661 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
662 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
665 if (l2cap_check_security(chan) &&
666 __l2cap_no_conn_pending(chan)) {
667 struct l2cap_conn_req req;
668 req.scid = cpu_to_le16(chan->scid);
671 chan->ident = l2cap_get_ident(conn);
672 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
674 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
678 struct l2cap_info_req req;
679 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
681 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
682 conn->info_ident = l2cap_get_ident(conn);
684 mod_timer(&conn->info_timer, jiffies +
685 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
687 l2cap_send_cmd(conn, conn->info_ident,
688 L2CAP_INFO_REQ, sizeof(req), &req);
692 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
694 u32 local_feat_mask = l2cap_feat_mask;
696 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
699 case L2CAP_MODE_ERTM:
700 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
701 case L2CAP_MODE_STREAMING:
702 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
708 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
711 struct l2cap_disconn_req req;
718 if (chan->mode == L2CAP_MODE_ERTM) {
719 __clear_retrans_timer(chan);
720 __clear_monitor_timer(chan);
721 __clear_ack_timer(chan);
724 req.dcid = cpu_to_le16(chan->dcid);
725 req.scid = cpu_to_le16(chan->scid);
726 l2cap_send_cmd(conn, l2cap_get_ident(conn),
727 L2CAP_DISCONN_REQ, sizeof(req), &req);
729 l2cap_state_change(chan, BT_DISCONN);
733 /* ---- L2CAP connections ---- */
734 static void l2cap_conn_start(struct l2cap_conn *conn)
736 struct l2cap_chan *chan, *tmp;
738 BT_DBG("conn %p", conn);
740 read_lock(&conn->chan_lock);
742 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
743 struct sock *sk = chan->sk;
747 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
752 if (chan->state == BT_CONNECT) {
753 struct l2cap_conn_req req;
755 if (!l2cap_check_security(chan) ||
756 !__l2cap_no_conn_pending(chan)) {
761 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
762 && test_bit(CONF_STATE2_DEVICE,
763 &chan->conf_state)) {
764 /* l2cap_chan_close() calls list_del(chan)
765 * so release the lock */
766 read_unlock(&conn->chan_lock);
767 l2cap_chan_close(chan, ECONNRESET);
768 read_lock(&conn->chan_lock);
773 req.scid = cpu_to_le16(chan->scid);
776 chan->ident = l2cap_get_ident(conn);
777 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
779 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
782 } else if (chan->state == BT_CONNECT2) {
783 struct l2cap_conn_rsp rsp;
785 rsp.scid = cpu_to_le16(chan->dcid);
786 rsp.dcid = cpu_to_le16(chan->scid);
788 if (l2cap_check_security(chan)) {
789 if (bt_sk(sk)->defer_setup) {
790 struct sock *parent = bt_sk(sk)->parent;
791 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
792 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
794 parent->sk_data_ready(parent, 0);
797 l2cap_state_change(chan, BT_CONFIG);
798 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
799 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
802 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
803 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
806 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
809 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
810 rsp.result != L2CAP_CR_SUCCESS) {
815 set_bit(CONF_REQ_SENT, &chan->conf_state);
816 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
817 l2cap_build_conf_req(chan, buf), buf);
818 chan->num_conf_req++;
824 read_unlock(&conn->chan_lock);
827 /* Find socket with cid and source bdaddr.
828 * Returns closest match, locked.
830 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
832 struct l2cap_chan *c, *c1 = NULL;
834 read_lock(&chan_list_lock);
836 list_for_each_entry(c, &chan_list, global_l) {
837 struct sock *sk = c->sk;
839 if (state && c->state != state)
842 if (c->scid == cid) {
844 if (!bacmp(&bt_sk(sk)->src, src)) {
845 read_unlock(&chan_list_lock);
850 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
855 read_unlock(&chan_list_lock);
860 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
862 struct sock *parent, *sk;
863 struct l2cap_chan *chan, *pchan;
867 /* Check if we have socket listening on cid */
868 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
875 bh_lock_sock(parent);
877 /* Check for backlog size */
878 if (sk_acceptq_is_full(parent)) {
879 BT_DBG("backlog full %d", parent->sk_ack_backlog);
883 chan = pchan->ops->new_connection(pchan->data);
889 write_lock_bh(&conn->chan_lock);
891 hci_conn_hold(conn->hcon);
893 bacpy(&bt_sk(sk)->src, conn->src);
894 bacpy(&bt_sk(sk)->dst, conn->dst);
896 bt_accept_enqueue(parent, sk);
898 __l2cap_chan_add(conn, chan);
900 __set_chan_timer(chan, sk->sk_sndtimeo);
902 l2cap_state_change(chan, BT_CONNECTED);
903 parent->sk_data_ready(parent, 0);
905 write_unlock_bh(&conn->chan_lock);
908 bh_unlock_sock(parent);
911 static void l2cap_chan_ready(struct sock *sk)
913 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
914 struct sock *parent = bt_sk(sk)->parent;
916 BT_DBG("sk %p, parent %p", sk, parent);
918 chan->conf_state = 0;
919 __clear_chan_timer(chan);
921 l2cap_state_change(chan, BT_CONNECTED);
922 sk->sk_state_change(sk);
925 parent->sk_data_ready(parent, 0);
928 static void l2cap_conn_ready(struct l2cap_conn *conn)
930 struct l2cap_chan *chan;
932 BT_DBG("conn %p", conn);
934 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
935 l2cap_le_conn_ready(conn);
937 if (conn->hcon->out && conn->hcon->type == LE_LINK)
938 smp_conn_security(conn, conn->hcon->pending_sec_level);
940 read_lock(&conn->chan_lock);
942 list_for_each_entry(chan, &conn->chan_l, list) {
943 struct sock *sk = chan->sk;
947 if (conn->hcon->type == LE_LINK) {
948 if (smp_conn_security(conn, chan->sec_level))
949 l2cap_chan_ready(sk);
951 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
952 __clear_chan_timer(chan);
953 l2cap_state_change(chan, BT_CONNECTED);
954 sk->sk_state_change(sk);
956 } else if (chan->state == BT_CONNECT)
957 l2cap_do_start(chan);
962 read_unlock(&conn->chan_lock);
965 /* Notify sockets that we cannot guaranty reliability anymore */
966 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
968 struct l2cap_chan *chan;
970 BT_DBG("conn %p", conn);
972 read_lock(&conn->chan_lock);
974 list_for_each_entry(chan, &conn->chan_l, list) {
975 struct sock *sk = chan->sk;
977 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
981 read_unlock(&conn->chan_lock);
984 static void l2cap_info_timeout(unsigned long arg)
986 struct l2cap_conn *conn = (void *) arg;
988 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
989 conn->info_ident = 0;
991 l2cap_conn_start(conn);
994 static void l2cap_conn_del(struct hci_conn *hcon, int err)
996 struct l2cap_conn *conn = hcon->l2cap_data;
997 struct l2cap_chan *chan, *l;
1003 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1005 kfree_skb(conn->rx_skb);
1008 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1011 l2cap_chan_del(chan, err);
1013 chan->ops->close(chan->data);
1016 hci_chan_del(conn->hchan);
1018 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1019 del_timer_sync(&conn->info_timer);
1021 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1022 del_timer(&conn->security_timer);
1023 smp_chan_destroy(conn);
1026 hcon->l2cap_data = NULL;
1030 static void security_timeout(unsigned long arg)
1032 struct l2cap_conn *conn = (void *) arg;
1034 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1037 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1039 struct l2cap_conn *conn = hcon->l2cap_data;
1040 struct hci_chan *hchan;
1045 hchan = hci_chan_create(hcon);
1049 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1051 hci_chan_del(hchan);
1055 hcon->l2cap_data = conn;
1057 conn->hchan = hchan;
1059 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1061 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1062 conn->mtu = hcon->hdev->le_mtu;
1064 conn->mtu = hcon->hdev->acl_mtu;
1066 conn->src = &hcon->hdev->bdaddr;
1067 conn->dst = &hcon->dst;
1069 conn->feat_mask = 0;
1071 spin_lock_init(&conn->lock);
1072 rwlock_init(&conn->chan_lock);
1074 INIT_LIST_HEAD(&conn->chan_l);
1076 if (hcon->type == LE_LINK)
1077 setup_timer(&conn->security_timer, security_timeout,
1078 (unsigned long) conn);
1080 setup_timer(&conn->info_timer, l2cap_info_timeout,
1081 (unsigned long) conn);
1083 conn->disc_reason = 0x13;
1088 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1090 write_lock_bh(&conn->chan_lock);
1091 __l2cap_chan_add(conn, chan);
1092 write_unlock_bh(&conn->chan_lock);
1095 /* ---- Socket interface ---- */
1097 /* Find socket with psm and source bdaddr.
1098 * Returns closest match.
1100 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1102 struct l2cap_chan *c, *c1 = NULL;
1104 read_lock(&chan_list_lock);
1106 list_for_each_entry(c, &chan_list, global_l) {
1107 struct sock *sk = c->sk;
1109 if (state && c->state != state)
1112 if (c->psm == psm) {
1114 if (!bacmp(&bt_sk(sk)->src, src)) {
1115 read_unlock(&chan_list_lock);
1120 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1125 read_unlock(&chan_list_lock);
1130 int l2cap_chan_connect(struct l2cap_chan *chan)
1132 struct sock *sk = chan->sk;
1133 bdaddr_t *src = &bt_sk(sk)->src;
1134 bdaddr_t *dst = &bt_sk(sk)->dst;
1135 struct l2cap_conn *conn;
1136 struct hci_conn *hcon;
1137 struct hci_dev *hdev;
1141 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1144 hdev = hci_get_route(dst, src);
1146 return -EHOSTUNREACH;
1148 hci_dev_lock_bh(hdev);
1150 auth_type = l2cap_get_auth_type(chan);
1152 if (chan->dcid == L2CAP_CID_LE_DATA)
1153 hcon = hci_connect(hdev, LE_LINK, dst,
1154 chan->sec_level, auth_type);
1156 hcon = hci_connect(hdev, ACL_LINK, dst,
1157 chan->sec_level, auth_type);
1160 err = PTR_ERR(hcon);
1164 conn = l2cap_conn_add(hcon, 0);
1171 /* Update source addr of the socket */
1172 bacpy(src, conn->src);
1174 l2cap_chan_add(conn, chan);
1176 l2cap_state_change(chan, BT_CONNECT);
1177 __set_chan_timer(chan, sk->sk_sndtimeo);
1179 if (hcon->state == BT_CONNECTED) {
1180 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1181 __clear_chan_timer(chan);
1182 if (l2cap_check_security(chan))
1183 l2cap_state_change(chan, BT_CONNECTED);
1185 l2cap_do_start(chan);
1191 hci_dev_unlock_bh(hdev);
1196 int __l2cap_wait_ack(struct sock *sk)
1198 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1199 DECLARE_WAITQUEUE(wait, current);
1203 add_wait_queue(sk_sleep(sk), &wait);
1204 set_current_state(TASK_INTERRUPTIBLE);
1205 while (chan->unacked_frames > 0 && chan->conn) {
1209 if (signal_pending(current)) {
1210 err = sock_intr_errno(timeo);
1215 timeo = schedule_timeout(timeo);
1217 set_current_state(TASK_INTERRUPTIBLE);
1219 err = sock_error(sk);
1223 set_current_state(TASK_RUNNING);
1224 remove_wait_queue(sk_sleep(sk), &wait);
1228 static void l2cap_monitor_timeout(unsigned long arg)
1230 struct l2cap_chan *chan = (void *) arg;
1231 struct sock *sk = chan->sk;
1233 BT_DBG("chan %p", chan);
1236 if (chan->retry_count >= chan->remote_max_tx) {
1237 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1242 chan->retry_count++;
1243 __set_monitor_timer(chan);
1245 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1249 static void l2cap_retrans_timeout(unsigned long arg)
1251 struct l2cap_chan *chan = (void *) arg;
1252 struct sock *sk = chan->sk;
1254 BT_DBG("chan %p", chan);
1257 chan->retry_count = 1;
1258 __set_monitor_timer(chan);
1260 set_bit(CONN_WAIT_F, &chan->conn_state);
1262 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1266 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1268 struct sk_buff *skb;
1270 while ((skb = skb_peek(&chan->tx_q)) &&
1271 chan->unacked_frames) {
1272 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1275 skb = skb_dequeue(&chan->tx_q);
1278 chan->unacked_frames--;
1281 if (!chan->unacked_frames)
1282 __clear_retrans_timer(chan);
1285 static void l2cap_streaming_send(struct l2cap_chan *chan)
1287 struct sk_buff *skb;
1291 while ((skb = skb_dequeue(&chan->tx_q))) {
1292 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1293 control |= __set_txseq(chan, chan->next_tx_seq);
1294 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1296 if (chan->fcs == L2CAP_FCS_CRC16) {
1297 fcs = crc16(0, (u8 *)skb->data,
1298 skb->len - L2CAP_FCS_SIZE);
1299 put_unaligned_le16(fcs,
1300 skb->data + skb->len - L2CAP_FCS_SIZE);
1303 l2cap_do_send(chan, skb);
1305 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1309 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1311 struct sk_buff *skb, *tx_skb;
1315 skb = skb_peek(&chan->tx_q);
1320 if (bt_cb(skb)->tx_seq == tx_seq)
1323 if (skb_queue_is_last(&chan->tx_q, skb))
1326 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1328 if (chan->remote_max_tx &&
1329 bt_cb(skb)->retries == chan->remote_max_tx) {
1330 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1334 tx_skb = skb_clone(skb, GFP_ATOMIC);
1335 bt_cb(skb)->retries++;
1337 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1338 control &= __get_sar_mask(chan);
1340 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1341 control |= __set_ctrl_final(chan);
1343 control |= __set_reqseq(chan, chan->buffer_seq);
1344 control |= __set_txseq(chan, tx_seq);
1346 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1348 if (chan->fcs == L2CAP_FCS_CRC16) {
1349 fcs = crc16(0, (u8 *)tx_skb->data,
1350 tx_skb->len - L2CAP_FCS_SIZE);
1351 put_unaligned_le16(fcs,
1352 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1355 l2cap_do_send(chan, tx_skb);
1358 static int l2cap_ertm_send(struct l2cap_chan *chan)
1360 struct sk_buff *skb, *tx_skb;
1365 if (chan->state != BT_CONNECTED)
1368 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1370 if (chan->remote_max_tx &&
1371 bt_cb(skb)->retries == chan->remote_max_tx) {
1372 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1376 tx_skb = skb_clone(skb, GFP_ATOMIC);
1378 bt_cb(skb)->retries++;
1380 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1381 control &= __get_sar_mask(chan);
1383 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1384 control |= __set_ctrl_final(chan);
1386 control |= __set_reqseq(chan, chan->buffer_seq);
1387 control |= __set_txseq(chan, chan->next_tx_seq);
1389 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1391 if (chan->fcs == L2CAP_FCS_CRC16) {
1392 fcs = crc16(0, (u8 *)skb->data,
1393 tx_skb->len - L2CAP_FCS_SIZE);
1394 put_unaligned_le16(fcs, skb->data +
1395 tx_skb->len - L2CAP_FCS_SIZE);
1398 l2cap_do_send(chan, tx_skb);
1400 __set_retrans_timer(chan);
1402 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1404 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1406 if (bt_cb(skb)->retries == 1)
1407 chan->unacked_frames++;
1409 chan->frames_sent++;
1411 if (skb_queue_is_last(&chan->tx_q, skb))
1412 chan->tx_send_head = NULL;
1414 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1422 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1426 if (!skb_queue_empty(&chan->tx_q))
1427 chan->tx_send_head = chan->tx_q.next;
1429 chan->next_tx_seq = chan->expected_ack_seq;
1430 ret = l2cap_ertm_send(chan);
1434 static void l2cap_send_ack(struct l2cap_chan *chan)
1438 control |= __set_reqseq(chan, chan->buffer_seq);
1440 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1441 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1442 set_bit(CONN_RNR_SENT, &chan->conn_state);
1443 l2cap_send_sframe(chan, control);
1447 if (l2cap_ertm_send(chan) > 0)
1450 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1451 l2cap_send_sframe(chan, control);
1454 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1456 struct srej_list *tail;
1459 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1460 control |= __set_ctrl_final(chan);
1462 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1463 control |= __set_reqseq(chan, tail->tx_seq);
1465 l2cap_send_sframe(chan, control);
1468 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1470 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1471 struct sk_buff **frag;
1474 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1480 /* Continuation fragments (no L2CAP header) */
1481 frag = &skb_shinfo(skb)->frag_list;
1483 count = min_t(unsigned int, conn->mtu, len);
1485 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1488 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1491 (*frag)->priority = skb->priority;
1496 frag = &(*frag)->next;
1502 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1503 struct msghdr *msg, size_t len,
1506 struct sock *sk = chan->sk;
1507 struct l2cap_conn *conn = chan->conn;
1508 struct sk_buff *skb;
1509 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1510 struct l2cap_hdr *lh;
1512 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1514 count = min_t(unsigned int, (conn->mtu - hlen), len);
1515 skb = bt_skb_send_alloc(sk, count + hlen,
1516 msg->msg_flags & MSG_DONTWAIT, &err);
1518 return ERR_PTR(err);
1520 skb->priority = priority;
1522 /* Create L2CAP header */
1523 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1524 lh->cid = cpu_to_le16(chan->dcid);
1525 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1526 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1528 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1529 if (unlikely(err < 0)) {
1531 return ERR_PTR(err);
1536 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1537 struct msghdr *msg, size_t len,
1540 struct sock *sk = chan->sk;
1541 struct l2cap_conn *conn = chan->conn;
1542 struct sk_buff *skb;
1543 int err, count, hlen = L2CAP_HDR_SIZE;
1544 struct l2cap_hdr *lh;
1546 BT_DBG("sk %p len %d", sk, (int)len);
1548 count = min_t(unsigned int, (conn->mtu - hlen), len);
1549 skb = bt_skb_send_alloc(sk, count + hlen,
1550 msg->msg_flags & MSG_DONTWAIT, &err);
1552 return ERR_PTR(err);
1554 skb->priority = priority;
1556 /* Create L2CAP header */
1557 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1558 lh->cid = cpu_to_le16(chan->dcid);
1559 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1561 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1562 if (unlikely(err < 0)) {
1564 return ERR_PTR(err);
1569 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1570 struct msghdr *msg, size_t len,
1571 u32 control, u16 sdulen)
1573 struct sock *sk = chan->sk;
1574 struct l2cap_conn *conn = chan->conn;
1575 struct sk_buff *skb;
1576 int err, count, hlen;
1577 struct l2cap_hdr *lh;
1579 BT_DBG("sk %p len %d", sk, (int)len);
1582 return ERR_PTR(-ENOTCONN);
1584 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1585 hlen = L2CAP_EXT_HDR_SIZE;
1587 hlen = L2CAP_ENH_HDR_SIZE;
1590 hlen += L2CAP_SDULEN_SIZE;
1592 if (chan->fcs == L2CAP_FCS_CRC16)
1593 hlen += L2CAP_FCS_SIZE;
1595 count = min_t(unsigned int, (conn->mtu - hlen), len);
1596 skb = bt_skb_send_alloc(sk, count + hlen,
1597 msg->msg_flags & MSG_DONTWAIT, &err);
1599 return ERR_PTR(err);
1601 /* Create L2CAP header */
1602 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1603 lh->cid = cpu_to_le16(chan->dcid);
1604 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1606 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1609 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1611 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1612 if (unlikely(err < 0)) {
1614 return ERR_PTR(err);
1617 if (chan->fcs == L2CAP_FCS_CRC16)
1618 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1620 bt_cb(skb)->retries = 0;
1624 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1626 struct sk_buff *skb;
1627 struct sk_buff_head sar_queue;
1631 skb_queue_head_init(&sar_queue);
1632 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1633 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1635 return PTR_ERR(skb);
1637 __skb_queue_tail(&sar_queue, skb);
1638 len -= chan->remote_mps;
1639 size += chan->remote_mps;
1644 if (len > chan->remote_mps) {
1645 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1646 buflen = chan->remote_mps;
1648 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1652 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1654 skb_queue_purge(&sar_queue);
1655 return PTR_ERR(skb);
1658 __skb_queue_tail(&sar_queue, skb);
1662 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1663 if (chan->tx_send_head == NULL)
1664 chan->tx_send_head = sar_queue.next;
1669 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1672 struct sk_buff *skb;
1676 /* Connectionless channel */
1677 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1678 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1680 return PTR_ERR(skb);
1682 l2cap_do_send(chan, skb);
1686 switch (chan->mode) {
1687 case L2CAP_MODE_BASIC:
1688 /* Check outgoing MTU */
1689 if (len > chan->omtu)
1692 /* Create a basic PDU */
1693 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1695 return PTR_ERR(skb);
1697 l2cap_do_send(chan, skb);
1701 case L2CAP_MODE_ERTM:
1702 case L2CAP_MODE_STREAMING:
1703 /* Entire SDU fits into one PDU */
1704 if (len <= chan->remote_mps) {
1705 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1706 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1709 return PTR_ERR(skb);
1711 __skb_queue_tail(&chan->tx_q, skb);
1713 if (chan->tx_send_head == NULL)
1714 chan->tx_send_head = skb;
1717 /* Segment SDU into multiples PDUs */
1718 err = l2cap_sar_segment_sdu(chan, msg, len);
1723 if (chan->mode == L2CAP_MODE_STREAMING) {
1724 l2cap_streaming_send(chan);
1729 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1730 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1735 err = l2cap_ertm_send(chan);
1742 BT_DBG("bad state %1.1x", chan->mode);
1749 /* Copy frame to all raw sockets on that connection */
1750 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1752 struct sk_buff *nskb;
1753 struct l2cap_chan *chan;
1755 BT_DBG("conn %p", conn);
1757 read_lock(&conn->chan_lock);
1758 list_for_each_entry(chan, &conn->chan_l, list) {
1759 struct sock *sk = chan->sk;
1760 if (chan->chan_type != L2CAP_CHAN_RAW)
1763 /* Don't send frame to the socket it came from */
1766 nskb = skb_clone(skb, GFP_ATOMIC);
1770 if (chan->ops->recv(chan->data, nskb))
1773 read_unlock(&conn->chan_lock);
1776 /* ---- L2CAP signalling commands ---- */
1777 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1778 u8 code, u8 ident, u16 dlen, void *data)
1780 struct sk_buff *skb, **frag;
1781 struct l2cap_cmd_hdr *cmd;
1782 struct l2cap_hdr *lh;
1785 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1786 conn, code, ident, dlen);
1788 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1789 count = min_t(unsigned int, conn->mtu, len);
1791 skb = bt_skb_alloc(count, GFP_ATOMIC);
1795 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1796 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1798 if (conn->hcon->type == LE_LINK)
1799 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1801 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1803 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1806 cmd->len = cpu_to_le16(dlen);
1809 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1810 memcpy(skb_put(skb, count), data, count);
1816 /* Continuation fragments (no L2CAP header) */
1817 frag = &skb_shinfo(skb)->frag_list;
1819 count = min_t(unsigned int, conn->mtu, len);
1821 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1825 memcpy(skb_put(*frag, count), data, count);
1830 frag = &(*frag)->next;
1840 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1842 struct l2cap_conf_opt *opt = *ptr;
1845 len = L2CAP_CONF_OPT_SIZE + opt->len;
1853 *val = *((u8 *) opt->val);
1857 *val = get_unaligned_le16(opt->val);
1861 *val = get_unaligned_le32(opt->val);
1865 *val = (unsigned long) opt->val;
1869 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1873 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1875 struct l2cap_conf_opt *opt = *ptr;
1877 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1884 *((u8 *) opt->val) = val;
1888 put_unaligned_le16(val, opt->val);
1892 put_unaligned_le32(val, opt->val);
1896 memcpy(opt->val, (void *) val, len);
1900 *ptr += L2CAP_CONF_OPT_SIZE + len;
1903 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1905 struct l2cap_conf_efs efs;
1907 switch(chan->mode) {
1908 case L2CAP_MODE_ERTM:
1909 efs.id = chan->local_id;
1910 efs.stype = chan->local_stype;
1911 efs.msdu = cpu_to_le16(chan->local_msdu);
1912 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1913 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1914 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1917 case L2CAP_MODE_STREAMING:
1919 efs.stype = L2CAP_SERV_BESTEFFORT;
1920 efs.msdu = cpu_to_le16(chan->local_msdu);
1921 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1930 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1931 (unsigned long) &efs);
1934 static void l2cap_ack_timeout(unsigned long arg)
1936 struct l2cap_chan *chan = (void *) arg;
1938 bh_lock_sock(chan->sk);
1939 l2cap_send_ack(chan);
1940 bh_unlock_sock(chan->sk);
1943 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1945 struct sock *sk = chan->sk;
1947 chan->expected_ack_seq = 0;
1948 chan->unacked_frames = 0;
1949 chan->buffer_seq = 0;
1950 chan->num_acked = 0;
1951 chan->frames_sent = 0;
1953 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1954 (unsigned long) chan);
1955 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1956 (unsigned long) chan);
1957 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1959 skb_queue_head_init(&chan->srej_q);
1961 INIT_LIST_HEAD(&chan->srej_l);
1964 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1967 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1970 case L2CAP_MODE_STREAMING:
1971 case L2CAP_MODE_ERTM:
1972 if (l2cap_mode_supported(mode, remote_feat_mask))
1976 return L2CAP_MODE_BASIC;
1980 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1982 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1985 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
1987 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
1990 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
1992 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
1993 __l2cap_ews_supported(chan)) {
1994 /* use extended control field */
1995 set_bit(FLAG_EXT_CTRL, &chan->flags);
1996 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
1998 chan->tx_win = min_t(u16, chan->tx_win,
1999 L2CAP_DEFAULT_TX_WINDOW);
2000 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2004 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2006 struct l2cap_conf_req *req = data;
2007 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2008 void *ptr = req->data;
2011 BT_DBG("chan %p", chan);
2013 if (chan->num_conf_req || chan->num_conf_rsp)
2016 switch (chan->mode) {
2017 case L2CAP_MODE_STREAMING:
2018 case L2CAP_MODE_ERTM:
2019 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2022 if (__l2cap_efs_supported(chan))
2023 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2027 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2032 if (chan->imtu != L2CAP_DEFAULT_MTU)
2033 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2035 switch (chan->mode) {
2036 case L2CAP_MODE_BASIC:
2037 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2038 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2041 rfc.mode = L2CAP_MODE_BASIC;
2043 rfc.max_transmit = 0;
2044 rfc.retrans_timeout = 0;
2045 rfc.monitor_timeout = 0;
2046 rfc.max_pdu_size = 0;
2048 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2049 (unsigned long) &rfc);
2052 case L2CAP_MODE_ERTM:
2053 rfc.mode = L2CAP_MODE_ERTM;
2054 rfc.max_transmit = chan->max_tx;
2055 rfc.retrans_timeout = 0;
2056 rfc.monitor_timeout = 0;
2058 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2059 L2CAP_EXT_HDR_SIZE -
2062 rfc.max_pdu_size = cpu_to_le16(size);
2064 l2cap_txwin_setup(chan);
2066 rfc.txwin_size = min_t(u16, chan->tx_win,
2067 L2CAP_DEFAULT_TX_WINDOW);
2069 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2070 (unsigned long) &rfc);
2072 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2073 l2cap_add_opt_efs(&ptr, chan);
2075 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2078 if (chan->fcs == L2CAP_FCS_NONE ||
2079 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2080 chan->fcs = L2CAP_FCS_NONE;
2081 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2084 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2085 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2089 case L2CAP_MODE_STREAMING:
2090 rfc.mode = L2CAP_MODE_STREAMING;
2092 rfc.max_transmit = 0;
2093 rfc.retrans_timeout = 0;
2094 rfc.monitor_timeout = 0;
2096 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2097 L2CAP_EXT_HDR_SIZE -
2100 rfc.max_pdu_size = cpu_to_le16(size);
2102 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2103 (unsigned long) &rfc);
2105 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2106 l2cap_add_opt_efs(&ptr, chan);
2108 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2111 if (chan->fcs == L2CAP_FCS_NONE ||
2112 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2113 chan->fcs = L2CAP_FCS_NONE;
2114 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2119 req->dcid = cpu_to_le16(chan->dcid);
2120 req->flags = cpu_to_le16(0);
2125 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2127 struct l2cap_conf_rsp *rsp = data;
2128 void *ptr = rsp->data;
2129 void *req = chan->conf_req;
2130 int len = chan->conf_len;
2131 int type, hint, olen;
2133 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2134 struct l2cap_conf_efs efs;
2136 u16 mtu = L2CAP_DEFAULT_MTU;
2137 u16 result = L2CAP_CONF_SUCCESS;
2140 BT_DBG("chan %p", chan);
2142 while (len >= L2CAP_CONF_OPT_SIZE) {
2143 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2145 hint = type & L2CAP_CONF_HINT;
2146 type &= L2CAP_CONF_MASK;
2149 case L2CAP_CONF_MTU:
2153 case L2CAP_CONF_FLUSH_TO:
2154 chan->flush_to = val;
2157 case L2CAP_CONF_QOS:
2160 case L2CAP_CONF_RFC:
2161 if (olen == sizeof(rfc))
2162 memcpy(&rfc, (void *) val, olen);
2165 case L2CAP_CONF_FCS:
2166 if (val == L2CAP_FCS_NONE)
2167 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2170 case L2CAP_CONF_EFS:
2172 if (olen == sizeof(efs))
2173 memcpy(&efs, (void *) val, olen);
2176 case L2CAP_CONF_EWS:
2178 return -ECONNREFUSED;
2180 set_bit(FLAG_EXT_CTRL, &chan->flags);
2181 set_bit(CONF_EWS_RECV, &chan->conf_state);
2182 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2183 chan->remote_tx_win = val;
2190 result = L2CAP_CONF_UNKNOWN;
2191 *((u8 *) ptr++) = type;
2196 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2199 switch (chan->mode) {
2200 case L2CAP_MODE_STREAMING:
2201 case L2CAP_MODE_ERTM:
2202 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2203 chan->mode = l2cap_select_mode(rfc.mode,
2204 chan->conn->feat_mask);
2209 if (__l2cap_efs_supported(chan))
2210 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2212 return -ECONNREFUSED;
2215 if (chan->mode != rfc.mode)
2216 return -ECONNREFUSED;
2222 if (chan->mode != rfc.mode) {
2223 result = L2CAP_CONF_UNACCEPT;
2224 rfc.mode = chan->mode;
2226 if (chan->num_conf_rsp == 1)
2227 return -ECONNREFUSED;
2229 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2230 sizeof(rfc), (unsigned long) &rfc);
2233 if (result == L2CAP_CONF_SUCCESS) {
2234 /* Configure output options and let the other side know
2235 * which ones we don't like. */
2237 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2238 result = L2CAP_CONF_UNACCEPT;
2241 set_bit(CONF_MTU_DONE, &chan->conf_state);
2243 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2246 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2247 efs.stype != L2CAP_SERV_NOTRAFIC &&
2248 efs.stype != chan->local_stype) {
2250 result = L2CAP_CONF_UNACCEPT;
2252 if (chan->num_conf_req >= 1)
2253 return -ECONNREFUSED;
2255 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2257 (unsigned long) &efs);
2259 /* Send PENDING Conf Rsp */
2260 result = L2CAP_CONF_PENDING;
2261 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2266 case L2CAP_MODE_BASIC:
2267 chan->fcs = L2CAP_FCS_NONE;
2268 set_bit(CONF_MODE_DONE, &chan->conf_state);
2271 case L2CAP_MODE_ERTM:
2272 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2273 chan->remote_tx_win = rfc.txwin_size;
2275 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2277 chan->remote_max_tx = rfc.max_transmit;
2279 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2281 L2CAP_EXT_HDR_SIZE -
2284 rfc.max_pdu_size = cpu_to_le16(size);
2285 chan->remote_mps = size;
2287 rfc.retrans_timeout =
2288 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2289 rfc.monitor_timeout =
2290 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2292 set_bit(CONF_MODE_DONE, &chan->conf_state);
2294 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2295 sizeof(rfc), (unsigned long) &rfc);
2297 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2298 chan->remote_id = efs.id;
2299 chan->remote_stype = efs.stype;
2300 chan->remote_msdu = le16_to_cpu(efs.msdu);
2301 chan->remote_flush_to =
2302 le32_to_cpu(efs.flush_to);
2303 chan->remote_acc_lat =
2304 le32_to_cpu(efs.acc_lat);
2305 chan->remote_sdu_itime =
2306 le32_to_cpu(efs.sdu_itime);
2307 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2308 sizeof(efs), (unsigned long) &efs);
2312 case L2CAP_MODE_STREAMING:
2313 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2315 L2CAP_EXT_HDR_SIZE -
2318 rfc.max_pdu_size = cpu_to_le16(size);
2319 chan->remote_mps = size;
2321 set_bit(CONF_MODE_DONE, &chan->conf_state);
2323 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2324 sizeof(rfc), (unsigned long) &rfc);
2329 result = L2CAP_CONF_UNACCEPT;
2331 memset(&rfc, 0, sizeof(rfc));
2332 rfc.mode = chan->mode;
2335 if (result == L2CAP_CONF_SUCCESS)
2336 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2338 rsp->scid = cpu_to_le16(chan->dcid);
2339 rsp->result = cpu_to_le16(result);
2340 rsp->flags = cpu_to_le16(0x0000);
2345 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2347 struct l2cap_conf_req *req = data;
2348 void *ptr = req->data;
2351 struct l2cap_conf_rfc rfc;
2353 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2355 while (len >= L2CAP_CONF_OPT_SIZE) {
2356 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2359 case L2CAP_CONF_MTU:
2360 if (val < L2CAP_DEFAULT_MIN_MTU) {
2361 *result = L2CAP_CONF_UNACCEPT;
2362 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2365 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2368 case L2CAP_CONF_FLUSH_TO:
2369 chan->flush_to = val;
2370 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2374 case L2CAP_CONF_RFC:
2375 if (olen == sizeof(rfc))
2376 memcpy(&rfc, (void *)val, olen);
2378 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2379 rfc.mode != chan->mode)
2380 return -ECONNREFUSED;
2384 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2385 sizeof(rfc), (unsigned long) &rfc);
2388 case L2CAP_CONF_EWS:
2389 chan->tx_win = min_t(u16, val,
2390 L2CAP_DEFAULT_EXT_WINDOW);
2391 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2397 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2398 return -ECONNREFUSED;
2400 chan->mode = rfc.mode;
2402 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2404 case L2CAP_MODE_ERTM:
2405 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2406 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2407 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2409 case L2CAP_MODE_STREAMING:
2410 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2414 req->dcid = cpu_to_le16(chan->dcid);
2415 req->flags = cpu_to_le16(0x0000);
2420 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2422 struct l2cap_conf_rsp *rsp = data;
2423 void *ptr = rsp->data;
2425 BT_DBG("chan %p", chan);
2427 rsp->scid = cpu_to_le16(chan->dcid);
2428 rsp->result = cpu_to_le16(result);
2429 rsp->flags = cpu_to_le16(flags);
2434 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2436 struct l2cap_conn_rsp rsp;
2437 struct l2cap_conn *conn = chan->conn;
2440 rsp.scid = cpu_to_le16(chan->dcid);
2441 rsp.dcid = cpu_to_le16(chan->scid);
2442 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2443 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2444 l2cap_send_cmd(conn, chan->ident,
2445 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2447 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2450 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2451 l2cap_build_conf_req(chan, buf), buf);
2452 chan->num_conf_req++;
2455 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2459 struct l2cap_conf_rfc rfc;
2461 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2463 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2466 while (len >= L2CAP_CONF_OPT_SIZE) {
2467 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2470 case L2CAP_CONF_RFC:
2471 if (olen == sizeof(rfc))
2472 memcpy(&rfc, (void *)val, olen);
2479 case L2CAP_MODE_ERTM:
2480 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2481 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2482 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2484 case L2CAP_MODE_STREAMING:
2485 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2489 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2491 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2493 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2496 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2497 cmd->ident == conn->info_ident) {
2498 del_timer(&conn->info_timer);
2500 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2501 conn->info_ident = 0;
2503 l2cap_conn_start(conn);
2509 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2511 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2512 struct l2cap_conn_rsp rsp;
2513 struct l2cap_chan *chan = NULL, *pchan;
2514 struct sock *parent, *sk = NULL;
2515 int result, status = L2CAP_CS_NO_INFO;
2517 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2518 __le16 psm = req->psm;
2520 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2522 /* Check if we have socket listening on psm */
2523 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2525 result = L2CAP_CR_BAD_PSM;
2531 bh_lock_sock(parent);
2533 /* Check if the ACL is secure enough (if not SDP) */
2534 if (psm != cpu_to_le16(0x0001) &&
2535 !hci_conn_check_link_mode(conn->hcon)) {
2536 conn->disc_reason = 0x05;
2537 result = L2CAP_CR_SEC_BLOCK;
2541 result = L2CAP_CR_NO_MEM;
2543 /* Check for backlog size */
2544 if (sk_acceptq_is_full(parent)) {
2545 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2549 chan = pchan->ops->new_connection(pchan->data);
2555 write_lock_bh(&conn->chan_lock);
2557 /* Check if we already have channel with that dcid */
2558 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2559 write_unlock_bh(&conn->chan_lock);
2560 sock_set_flag(sk, SOCK_ZAPPED);
2561 chan->ops->close(chan->data);
2565 hci_conn_hold(conn->hcon);
2567 bacpy(&bt_sk(sk)->src, conn->src);
2568 bacpy(&bt_sk(sk)->dst, conn->dst);
2572 bt_accept_enqueue(parent, sk);
2574 __l2cap_chan_add(conn, chan);
2578 __set_chan_timer(chan, sk->sk_sndtimeo);
2580 chan->ident = cmd->ident;
2582 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2583 if (l2cap_check_security(chan)) {
2584 if (bt_sk(sk)->defer_setup) {
2585 l2cap_state_change(chan, BT_CONNECT2);
2586 result = L2CAP_CR_PEND;
2587 status = L2CAP_CS_AUTHOR_PEND;
2588 parent->sk_data_ready(parent, 0);
2590 l2cap_state_change(chan, BT_CONFIG);
2591 result = L2CAP_CR_SUCCESS;
2592 status = L2CAP_CS_NO_INFO;
2595 l2cap_state_change(chan, BT_CONNECT2);
2596 result = L2CAP_CR_PEND;
2597 status = L2CAP_CS_AUTHEN_PEND;
2600 l2cap_state_change(chan, BT_CONNECT2);
2601 result = L2CAP_CR_PEND;
2602 status = L2CAP_CS_NO_INFO;
2605 write_unlock_bh(&conn->chan_lock);
2608 bh_unlock_sock(parent);
2611 rsp.scid = cpu_to_le16(scid);
2612 rsp.dcid = cpu_to_le16(dcid);
2613 rsp.result = cpu_to_le16(result);
2614 rsp.status = cpu_to_le16(status);
2615 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2617 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2618 struct l2cap_info_req info;
2619 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2621 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2622 conn->info_ident = l2cap_get_ident(conn);
2624 mod_timer(&conn->info_timer, jiffies +
2625 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2627 l2cap_send_cmd(conn, conn->info_ident,
2628 L2CAP_INFO_REQ, sizeof(info), &info);
2631 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2632 result == L2CAP_CR_SUCCESS) {
2634 set_bit(CONF_REQ_SENT, &chan->conf_state);
2635 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2636 l2cap_build_conf_req(chan, buf), buf);
2637 chan->num_conf_req++;
2643 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2645 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2646 u16 scid, dcid, result, status;
2647 struct l2cap_chan *chan;
2651 scid = __le16_to_cpu(rsp->scid);
2652 dcid = __le16_to_cpu(rsp->dcid);
2653 result = __le16_to_cpu(rsp->result);
2654 status = __le16_to_cpu(rsp->status);
2656 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2659 chan = l2cap_get_chan_by_scid(conn, scid);
2663 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2671 case L2CAP_CR_SUCCESS:
2672 l2cap_state_change(chan, BT_CONFIG);
2675 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2677 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2680 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2681 l2cap_build_conf_req(chan, req), req);
2682 chan->num_conf_req++;
2686 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2690 /* don't delete l2cap channel if sk is owned by user */
2691 if (sock_owned_by_user(sk)) {
2692 l2cap_state_change(chan, BT_DISCONN);
2693 __clear_chan_timer(chan);
2694 __set_chan_timer(chan, HZ / 5);
2698 l2cap_chan_del(chan, ECONNREFUSED);
2706 static inline void set_default_fcs(struct l2cap_chan *chan)
2708 /* FCS is enabled only in ERTM or streaming mode, if one or both
2711 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2712 chan->fcs = L2CAP_FCS_NONE;
2713 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2714 chan->fcs = L2CAP_FCS_CRC16;
2717 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2719 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2722 struct l2cap_chan *chan;
2726 dcid = __le16_to_cpu(req->dcid);
2727 flags = __le16_to_cpu(req->flags);
2729 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2731 chan = l2cap_get_chan_by_scid(conn, dcid);
2737 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2738 struct l2cap_cmd_rej_cid rej;
2740 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2741 rej.scid = cpu_to_le16(chan->scid);
2742 rej.dcid = cpu_to_le16(chan->dcid);
2744 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2749 /* Reject if config buffer is too small. */
2750 len = cmd_len - sizeof(*req);
2751 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2752 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2753 l2cap_build_conf_rsp(chan, rsp,
2754 L2CAP_CONF_REJECT, flags), rsp);
2759 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2760 chan->conf_len += len;
2762 if (flags & 0x0001) {
2763 /* Incomplete config. Send empty response. */
2764 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2765 l2cap_build_conf_rsp(chan, rsp,
2766 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2770 /* Complete config. */
2771 len = l2cap_parse_conf_req(chan, rsp);
2773 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2777 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2778 chan->num_conf_rsp++;
2780 /* Reset config buffer. */
2783 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2786 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2787 set_default_fcs(chan);
2789 l2cap_state_change(chan, BT_CONNECTED);
2791 chan->next_tx_seq = 0;
2792 chan->expected_tx_seq = 0;
2793 skb_queue_head_init(&chan->tx_q);
2794 if (chan->mode == L2CAP_MODE_ERTM)
2795 l2cap_ertm_init(chan);
2797 l2cap_chan_ready(sk);
2801 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2803 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2804 l2cap_build_conf_req(chan, buf), buf);
2805 chan->num_conf_req++;
2808 /* Got Conf Rsp PENDING from remote side and asume we sent
2809 Conf Rsp PENDING in the code above */
2810 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2811 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2813 /* check compatibility */
2815 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2816 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2818 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2819 l2cap_build_conf_rsp(chan, rsp,
2820 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2828 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2830 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2831 u16 scid, flags, result;
2832 struct l2cap_chan *chan;
2834 int len = cmd->len - sizeof(*rsp);
2836 scid = __le16_to_cpu(rsp->scid);
2837 flags = __le16_to_cpu(rsp->flags);
2838 result = __le16_to_cpu(rsp->result);
2840 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2841 scid, flags, result);
2843 chan = l2cap_get_chan_by_scid(conn, scid);
2850 case L2CAP_CONF_SUCCESS:
2851 l2cap_conf_rfc_get(chan, rsp->data, len);
2852 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2855 case L2CAP_CONF_PENDING:
2856 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2858 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2861 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2864 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2868 /* check compatibility */
2870 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2871 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2873 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2874 l2cap_build_conf_rsp(chan, buf,
2875 L2CAP_CONF_SUCCESS, 0x0000), buf);
2879 case L2CAP_CONF_UNACCEPT:
2880 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2883 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2884 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2888 /* throw out any old stored conf requests */
2889 result = L2CAP_CONF_SUCCESS;
2890 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2893 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2897 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2898 L2CAP_CONF_REQ, len, req);
2899 chan->num_conf_req++;
2900 if (result != L2CAP_CONF_SUCCESS)
2906 sk->sk_err = ECONNRESET;
2907 __set_chan_timer(chan, HZ * 5);
2908 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2915 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2917 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2918 set_default_fcs(chan);
2920 l2cap_state_change(chan, BT_CONNECTED);
2921 chan->next_tx_seq = 0;
2922 chan->expected_tx_seq = 0;
2923 skb_queue_head_init(&chan->tx_q);
2924 if (chan->mode == L2CAP_MODE_ERTM)
2925 l2cap_ertm_init(chan);
2927 l2cap_chan_ready(sk);
2935 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2937 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2938 struct l2cap_disconn_rsp rsp;
2940 struct l2cap_chan *chan;
2943 scid = __le16_to_cpu(req->scid);
2944 dcid = __le16_to_cpu(req->dcid);
2946 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2948 chan = l2cap_get_chan_by_scid(conn, dcid);
2954 rsp.dcid = cpu_to_le16(chan->scid);
2955 rsp.scid = cpu_to_le16(chan->dcid);
2956 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2958 sk->sk_shutdown = SHUTDOWN_MASK;
2960 /* don't delete l2cap channel if sk is owned by user */
2961 if (sock_owned_by_user(sk)) {
2962 l2cap_state_change(chan, BT_DISCONN);
2963 __clear_chan_timer(chan);
2964 __set_chan_timer(chan, HZ / 5);
2969 l2cap_chan_del(chan, ECONNRESET);
2972 chan->ops->close(chan->data);
2976 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2978 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2980 struct l2cap_chan *chan;
2983 scid = __le16_to_cpu(rsp->scid);
2984 dcid = __le16_to_cpu(rsp->dcid);
2986 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2988 chan = l2cap_get_chan_by_scid(conn, scid);
2994 /* don't delete l2cap channel if sk is owned by user */
2995 if (sock_owned_by_user(sk)) {
2996 l2cap_state_change(chan,BT_DISCONN);
2997 __clear_chan_timer(chan);
2998 __set_chan_timer(chan, HZ / 5);
3003 l2cap_chan_del(chan, 0);
3006 chan->ops->close(chan->data);
3010 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3012 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3015 type = __le16_to_cpu(req->type);
3017 BT_DBG("type 0x%4.4x", type);
3019 if (type == L2CAP_IT_FEAT_MASK) {
3021 u32 feat_mask = l2cap_feat_mask;
3022 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3023 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3024 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3026 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3029 feat_mask |= L2CAP_FEAT_EXT_FLOW
3030 | L2CAP_FEAT_EXT_WINDOW;
3032 put_unaligned_le32(feat_mask, rsp->data);
3033 l2cap_send_cmd(conn, cmd->ident,
3034 L2CAP_INFO_RSP, sizeof(buf), buf);
3035 } else if (type == L2CAP_IT_FIXED_CHAN) {
3037 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3038 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3039 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3040 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3041 l2cap_send_cmd(conn, cmd->ident,
3042 L2CAP_INFO_RSP, sizeof(buf), buf);
3044 struct l2cap_info_rsp rsp;
3045 rsp.type = cpu_to_le16(type);
3046 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3047 l2cap_send_cmd(conn, cmd->ident,
3048 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3054 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3056 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3059 type = __le16_to_cpu(rsp->type);
3060 result = __le16_to_cpu(rsp->result);
3062 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3064 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3065 if (cmd->ident != conn->info_ident ||
3066 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3069 del_timer(&conn->info_timer);
3071 if (result != L2CAP_IR_SUCCESS) {
3072 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3073 conn->info_ident = 0;
3075 l2cap_conn_start(conn);
3080 if (type == L2CAP_IT_FEAT_MASK) {
3081 conn->feat_mask = get_unaligned_le32(rsp->data);
3083 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3084 struct l2cap_info_req req;
3085 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3087 conn->info_ident = l2cap_get_ident(conn);
3089 l2cap_send_cmd(conn, conn->info_ident,
3090 L2CAP_INFO_REQ, sizeof(req), &req);
3092 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3093 conn->info_ident = 0;
3095 l2cap_conn_start(conn);
3097 } else if (type == L2CAP_IT_FIXED_CHAN) {
3098 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3099 conn->info_ident = 0;
3101 l2cap_conn_start(conn);
3107 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3112 if (min > max || min < 6 || max > 3200)
3115 if (to_multiplier < 10 || to_multiplier > 3200)
3118 if (max >= to_multiplier * 8)
3121 max_latency = (to_multiplier * 8 / max) - 1;
3122 if (latency > 499 || latency > max_latency)
3128 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3129 struct l2cap_cmd_hdr *cmd, u8 *data)
3131 struct hci_conn *hcon = conn->hcon;
3132 struct l2cap_conn_param_update_req *req;
3133 struct l2cap_conn_param_update_rsp rsp;
3134 u16 min, max, latency, to_multiplier, cmd_len;
3137 if (!(hcon->link_mode & HCI_LM_MASTER))
3140 cmd_len = __le16_to_cpu(cmd->len);
3141 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3144 req = (struct l2cap_conn_param_update_req *) data;
3145 min = __le16_to_cpu(req->min);
3146 max = __le16_to_cpu(req->max);
3147 latency = __le16_to_cpu(req->latency);
3148 to_multiplier = __le16_to_cpu(req->to_multiplier);
3150 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3151 min, max, latency, to_multiplier);
3153 memset(&rsp, 0, sizeof(rsp));
3155 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3157 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3159 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3161 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3165 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3170 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3171 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3175 switch (cmd->code) {
3176 case L2CAP_COMMAND_REJ:
3177 l2cap_command_rej(conn, cmd, data);
3180 case L2CAP_CONN_REQ:
3181 err = l2cap_connect_req(conn, cmd, data);
3184 case L2CAP_CONN_RSP:
3185 err = l2cap_connect_rsp(conn, cmd, data);
3188 case L2CAP_CONF_REQ:
3189 err = l2cap_config_req(conn, cmd, cmd_len, data);
3192 case L2CAP_CONF_RSP:
3193 err = l2cap_config_rsp(conn, cmd, data);
3196 case L2CAP_DISCONN_REQ:
3197 err = l2cap_disconnect_req(conn, cmd, data);
3200 case L2CAP_DISCONN_RSP:
3201 err = l2cap_disconnect_rsp(conn, cmd, data);
3204 case L2CAP_ECHO_REQ:
3205 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3208 case L2CAP_ECHO_RSP:
3211 case L2CAP_INFO_REQ:
3212 err = l2cap_information_req(conn, cmd, data);
3215 case L2CAP_INFO_RSP:
3216 err = l2cap_information_rsp(conn, cmd, data);
3220 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3228 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3229 struct l2cap_cmd_hdr *cmd, u8 *data)
3231 switch (cmd->code) {
3232 case L2CAP_COMMAND_REJ:
3235 case L2CAP_CONN_PARAM_UPDATE_REQ:
3236 return l2cap_conn_param_update_req(conn, cmd, data);
3238 case L2CAP_CONN_PARAM_UPDATE_RSP:
3242 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3247 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3248 struct sk_buff *skb)
3250 u8 *data = skb->data;
3252 struct l2cap_cmd_hdr cmd;
3255 l2cap_raw_recv(conn, skb);
3257 while (len >= L2CAP_CMD_HDR_SIZE) {
3259 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3260 data += L2CAP_CMD_HDR_SIZE;
3261 len -= L2CAP_CMD_HDR_SIZE;
3263 cmd_len = le16_to_cpu(cmd.len);
3265 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3267 if (cmd_len > len || !cmd.ident) {
3268 BT_DBG("corrupted command");
3272 if (conn->hcon->type == LE_LINK)
3273 err = l2cap_le_sig_cmd(conn, &cmd, data);
3275 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3278 struct l2cap_cmd_rej_unk rej;
3280 BT_ERR("Wrong link type (%d)", err);
3282 /* FIXME: Map err to a valid reason */
3283 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3284 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3294 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3296 u16 our_fcs, rcv_fcs;
3299 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3300 hdr_size = L2CAP_EXT_HDR_SIZE;
3302 hdr_size = L2CAP_ENH_HDR_SIZE;
3304 if (chan->fcs == L2CAP_FCS_CRC16) {
3305 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3306 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3307 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3309 if (our_fcs != rcv_fcs)
3315 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3319 chan->frames_sent = 0;
3321 control |= __set_reqseq(chan, chan->buffer_seq);
3323 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3324 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3325 l2cap_send_sframe(chan, control);
3326 set_bit(CONN_RNR_SENT, &chan->conn_state);
3329 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3330 l2cap_retransmit_frames(chan);
3332 l2cap_ertm_send(chan);
3334 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3335 chan->frames_sent == 0) {
3336 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3337 l2cap_send_sframe(chan, control);
3341 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3343 struct sk_buff *next_skb;
3344 int tx_seq_offset, next_tx_seq_offset;
3346 bt_cb(skb)->tx_seq = tx_seq;
3347 bt_cb(skb)->sar = sar;
3349 next_skb = skb_peek(&chan->srej_q);
3351 __skb_queue_tail(&chan->srej_q, skb);
3355 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3358 if (bt_cb(next_skb)->tx_seq == tx_seq)
3361 next_tx_seq_offset = __seq_offset(chan,
3362 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3364 if (next_tx_seq_offset > tx_seq_offset) {
3365 __skb_queue_before(&chan->srej_q, next_skb, skb);
3369 if (skb_queue_is_last(&chan->srej_q, next_skb))
3372 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3374 __skb_queue_tail(&chan->srej_q, skb);
3379 static void append_skb_frag(struct sk_buff *skb,
3380 struct sk_buff *new_frag, struct sk_buff **last_frag)
3382 /* skb->len reflects data in skb as well as all fragments
3383 * skb->data_len reflects only data in fragments
3385 if (!skb_has_frag_list(skb))
3386 skb_shinfo(skb)->frag_list = new_frag;
3388 new_frag->next = NULL;
3390 (*last_frag)->next = new_frag;
3391 *last_frag = new_frag;
3393 skb->len += new_frag->len;
3394 skb->data_len += new_frag->len;
3395 skb->truesize += new_frag->truesize;
3398 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3402 switch (__get_ctrl_sar(chan, control)) {
3403 case L2CAP_SAR_UNSEGMENTED:
3407 err = chan->ops->recv(chan->data, skb);
3410 case L2CAP_SAR_START:
3414 chan->sdu_len = get_unaligned_le16(skb->data);
3415 skb_pull(skb, L2CAP_SDULEN_SIZE);
3417 if (chan->sdu_len > chan->imtu) {
3422 if (skb->len >= chan->sdu_len)
3426 chan->sdu_last_frag = skb;
3432 case L2CAP_SAR_CONTINUE:
3436 append_skb_frag(chan->sdu, skb,
3437 &chan->sdu_last_frag);
3440 if (chan->sdu->len >= chan->sdu_len)
3450 append_skb_frag(chan->sdu, skb,
3451 &chan->sdu_last_frag);
3454 if (chan->sdu->len != chan->sdu_len)
3457 err = chan->ops->recv(chan->data, chan->sdu);
3460 /* Reassembly complete */
3462 chan->sdu_last_frag = NULL;
3470 kfree_skb(chan->sdu);
3472 chan->sdu_last_frag = NULL;
3479 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3483 BT_DBG("chan %p, Enter local busy", chan);
3485 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3487 control = __set_reqseq(chan, chan->buffer_seq);
3488 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3489 l2cap_send_sframe(chan, control);
3491 set_bit(CONN_RNR_SENT, &chan->conn_state);
3493 __clear_ack_timer(chan);
3496 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3500 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3503 control = __set_reqseq(chan, chan->buffer_seq);
3504 control |= __set_ctrl_poll(chan);
3505 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3506 l2cap_send_sframe(chan, control);
3507 chan->retry_count = 1;
3509 __clear_retrans_timer(chan);
3510 __set_monitor_timer(chan);
3512 set_bit(CONN_WAIT_F, &chan->conn_state);
3515 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3516 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3518 BT_DBG("chan %p, Exit local busy", chan);
3521 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3523 if (chan->mode == L2CAP_MODE_ERTM) {
3525 l2cap_ertm_enter_local_busy(chan);
3527 l2cap_ertm_exit_local_busy(chan);
3531 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3533 struct sk_buff *skb;
3536 while ((skb = skb_peek(&chan->srej_q)) &&
3537 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3540 if (bt_cb(skb)->tx_seq != tx_seq)
3543 skb = skb_dequeue(&chan->srej_q);
3544 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3545 err = l2cap_reassemble_sdu(chan, skb, control);
3548 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3552 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3553 tx_seq = __next_seq(chan, tx_seq);
3557 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3559 struct srej_list *l, *tmp;
3562 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3563 if (l->tx_seq == tx_seq) {
3568 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3569 control |= __set_reqseq(chan, l->tx_seq);
3570 l2cap_send_sframe(chan, control);
3572 list_add_tail(&l->list, &chan->srej_l);
3576 static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3578 struct srej_list *new;
3581 while (tx_seq != chan->expected_tx_seq) {
3582 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3583 control |= __set_reqseq(chan, chan->expected_tx_seq);
3584 l2cap_send_sframe(chan, control);
3586 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3587 new->tx_seq = chan->expected_tx_seq;
3589 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3591 list_add_tail(&new->list, &chan->srej_l);
3594 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3597 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3599 u16 tx_seq = __get_txseq(chan, rx_control);
3600 u16 req_seq = __get_reqseq(chan, rx_control);
3601 u8 sar = __get_ctrl_sar(chan, rx_control);
3602 int tx_seq_offset, expected_tx_seq_offset;
3603 int num_to_ack = (chan->tx_win/6) + 1;
3606 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3607 tx_seq, rx_control);
3609 if (__is_ctrl_final(chan, rx_control) &&
3610 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3611 __clear_monitor_timer(chan);
3612 if (chan->unacked_frames > 0)
3613 __set_retrans_timer(chan);
3614 clear_bit(CONN_WAIT_F, &chan->conn_state);
3617 chan->expected_ack_seq = req_seq;
3618 l2cap_drop_acked_frames(chan);
3620 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3622 /* invalid tx_seq */
3623 if (tx_seq_offset >= chan->tx_win) {
3624 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3628 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3631 if (tx_seq == chan->expected_tx_seq)
3634 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3635 struct srej_list *first;
3637 first = list_first_entry(&chan->srej_l,
3638 struct srej_list, list);
3639 if (tx_seq == first->tx_seq) {
3640 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3641 l2cap_check_srej_gap(chan, tx_seq);
3643 list_del(&first->list);
3646 if (list_empty(&chan->srej_l)) {
3647 chan->buffer_seq = chan->buffer_seq_srej;
3648 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3649 l2cap_send_ack(chan);
3650 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3653 struct srej_list *l;
3655 /* duplicated tx_seq */
3656 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3659 list_for_each_entry(l, &chan->srej_l, list) {
3660 if (l->tx_seq == tx_seq) {
3661 l2cap_resend_srejframe(chan, tx_seq);
3665 l2cap_send_srejframe(chan, tx_seq);
3668 expected_tx_seq_offset = __seq_offset(chan,
3669 chan->expected_tx_seq, chan->buffer_seq);
3671 /* duplicated tx_seq */
3672 if (tx_seq_offset < expected_tx_seq_offset)
3675 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3677 BT_DBG("chan %p, Enter SREJ", chan);
3679 INIT_LIST_HEAD(&chan->srej_l);
3680 chan->buffer_seq_srej = chan->buffer_seq;
3682 __skb_queue_head_init(&chan->srej_q);
3683 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3685 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3687 l2cap_send_srejframe(chan, tx_seq);
3689 __clear_ack_timer(chan);
3694 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3696 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3697 bt_cb(skb)->tx_seq = tx_seq;
3698 bt_cb(skb)->sar = sar;
3699 __skb_queue_tail(&chan->srej_q, skb);
3703 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3704 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3707 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3711 if (__is_ctrl_final(chan, rx_control)) {
3712 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3713 l2cap_retransmit_frames(chan);
3716 __set_ack_timer(chan);
3718 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3719 if (chan->num_acked == num_to_ack - 1)
3720 l2cap_send_ack(chan);
3729 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3731 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3732 __get_reqseq(chan, rx_control), rx_control);
3734 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3735 l2cap_drop_acked_frames(chan);
3737 if (__is_ctrl_poll(chan, rx_control)) {
3738 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3739 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3740 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3741 (chan->unacked_frames > 0))
3742 __set_retrans_timer(chan);
3744 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3745 l2cap_send_srejtail(chan);
3747 l2cap_send_i_or_rr_or_rnr(chan);
3750 } else if (__is_ctrl_final(chan, rx_control)) {
3751 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3753 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3754 l2cap_retransmit_frames(chan);
3757 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3758 (chan->unacked_frames > 0))
3759 __set_retrans_timer(chan);
3761 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3762 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3763 l2cap_send_ack(chan);
3765 l2cap_ertm_send(chan);
3769 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
3771 u16 tx_seq = __get_reqseq(chan, rx_control);
3773 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3775 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3777 chan->expected_ack_seq = tx_seq;
3778 l2cap_drop_acked_frames(chan);
3780 if (__is_ctrl_final(chan, rx_control)) {
3781 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3782 l2cap_retransmit_frames(chan);
3784 l2cap_retransmit_frames(chan);
3786 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3787 set_bit(CONN_REJ_ACT, &chan->conn_state);
3790 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
3792 u16 tx_seq = __get_reqseq(chan, rx_control);
3794 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3796 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3798 if (__is_ctrl_poll(chan, rx_control)) {
3799 chan->expected_ack_seq = tx_seq;
3800 l2cap_drop_acked_frames(chan);
3802 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3803 l2cap_retransmit_one_frame(chan, tx_seq);
3805 l2cap_ertm_send(chan);
3807 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3808 chan->srej_save_reqseq = tx_seq;
3809 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3811 } else if (__is_ctrl_final(chan, rx_control)) {
3812 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3813 chan->srej_save_reqseq == tx_seq)
3814 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3816 l2cap_retransmit_one_frame(chan, tx_seq);
3818 l2cap_retransmit_one_frame(chan, tx_seq);
3819 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3820 chan->srej_save_reqseq = tx_seq;
3821 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3826 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
3828 u16 tx_seq = __get_reqseq(chan, rx_control);
3830 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3832 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3833 chan->expected_ack_seq = tx_seq;
3834 l2cap_drop_acked_frames(chan);
3836 if (__is_ctrl_poll(chan, rx_control))
3837 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3839 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3840 __clear_retrans_timer(chan);
3841 if (__is_ctrl_poll(chan, rx_control))
3842 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3846 if (__is_ctrl_poll(chan, rx_control)) {
3847 l2cap_send_srejtail(chan);
3849 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
3850 l2cap_send_sframe(chan, rx_control);
3854 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3856 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
3858 if (__is_ctrl_final(chan, rx_control) &&
3859 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3860 __clear_monitor_timer(chan);
3861 if (chan->unacked_frames > 0)
3862 __set_retrans_timer(chan);
3863 clear_bit(CONN_WAIT_F, &chan->conn_state);
3866 switch (__get_ctrl_super(chan, rx_control)) {
3867 case L2CAP_SUPER_RR:
3868 l2cap_data_channel_rrframe(chan, rx_control);
3871 case L2CAP_SUPER_REJ:
3872 l2cap_data_channel_rejframe(chan, rx_control);
3875 case L2CAP_SUPER_SREJ:
3876 l2cap_data_channel_srejframe(chan, rx_control);
3879 case L2CAP_SUPER_RNR:
3880 l2cap_data_channel_rnrframe(chan, rx_control);
3888 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3890 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3893 int len, next_tx_seq_offset, req_seq_offset;
3895 control = __get_control(chan, skb->data);
3896 skb_pull(skb, __ctrl_size(chan));
3900 * We can just drop the corrupted I-frame here.
3901 * Receiver will miss it and start proper recovery
3902 * procedures and ask retransmission.
3904 if (l2cap_check_fcs(chan, skb))
3907 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
3908 len -= L2CAP_SDULEN_SIZE;
3910 if (chan->fcs == L2CAP_FCS_CRC16)
3911 len -= L2CAP_FCS_SIZE;
3913 if (len > chan->mps) {
3914 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3918 req_seq = __get_reqseq(chan, control);
3920 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
3922 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
3923 chan->expected_ack_seq);
3925 /* check for invalid req-seq */
3926 if (req_seq_offset > next_tx_seq_offset) {
3927 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3931 if (!__is_sframe(chan, control)) {
3933 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3937 l2cap_data_channel_iframe(chan, control, skb);
3941 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3945 l2cap_data_channel_sframe(chan, control, skb);
3955 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3957 struct l2cap_chan *chan;
3958 struct sock *sk = NULL;
3963 chan = l2cap_get_chan_by_scid(conn, cid);
3965 BT_DBG("unknown cid 0x%4.4x", cid);
3971 BT_DBG("chan %p, len %d", chan, skb->len);
3973 if (chan->state != BT_CONNECTED)
3976 switch (chan->mode) {
3977 case L2CAP_MODE_BASIC:
3978 /* If socket recv buffers overflows we drop data here
3979 * which is *bad* because L2CAP has to be reliable.
3980 * But we don't have any other choice. L2CAP doesn't
3981 * provide flow control mechanism. */
3983 if (chan->imtu < skb->len)
3986 if (!chan->ops->recv(chan->data, skb))
3990 case L2CAP_MODE_ERTM:
3991 if (!sock_owned_by_user(sk)) {
3992 l2cap_ertm_data_rcv(sk, skb);
3994 if (sk_add_backlog(sk, skb))
4000 case L2CAP_MODE_STREAMING:
4001 control = __get_control(chan, skb->data);
4002 skb_pull(skb, __ctrl_size(chan));
4005 if (l2cap_check_fcs(chan, skb))
4008 if (__is_sar_start(chan, control))
4009 len -= L2CAP_SDULEN_SIZE;
4011 if (chan->fcs == L2CAP_FCS_CRC16)
4012 len -= L2CAP_FCS_SIZE;
4014 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4017 tx_seq = __get_txseq(chan, control);
4019 if (chan->expected_tx_seq != tx_seq) {
4020 /* Frame(s) missing - must discard partial SDU */
4021 kfree_skb(chan->sdu);
4023 chan->sdu_last_frag = NULL;
4026 /* TODO: Notify userland of missing data */
4029 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4031 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4032 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4037 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4051 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4053 struct sock *sk = NULL;
4054 struct l2cap_chan *chan;
4056 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4064 BT_DBG("sk %p, len %d", sk, skb->len);
4066 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4069 if (chan->imtu < skb->len)
4072 if (!chan->ops->recv(chan->data, skb))
4084 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4086 struct sock *sk = NULL;
4087 struct l2cap_chan *chan;
4089 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4097 BT_DBG("sk %p, len %d", sk, skb->len);
4099 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4102 if (chan->imtu < skb->len)
4105 if (!chan->ops->recv(chan->data, skb))
4117 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4119 struct l2cap_hdr *lh = (void *) skb->data;
4123 skb_pull(skb, L2CAP_HDR_SIZE);
4124 cid = __le16_to_cpu(lh->cid);
4125 len = __le16_to_cpu(lh->len);
4127 if (len != skb->len) {
4132 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4135 case L2CAP_CID_LE_SIGNALING:
4136 case L2CAP_CID_SIGNALING:
4137 l2cap_sig_channel(conn, skb);
4140 case L2CAP_CID_CONN_LESS:
4141 psm = get_unaligned_le16(skb->data);
4143 l2cap_conless_channel(conn, psm, skb);
4146 case L2CAP_CID_LE_DATA:
4147 l2cap_att_channel(conn, cid, skb);
4151 if (smp_sig_channel(conn, skb))
4152 l2cap_conn_del(conn->hcon, EACCES);
4156 l2cap_data_channel(conn, cid, skb);
4161 /* ---- L2CAP interface with lower layer (HCI) ---- */
4163 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4165 int exact = 0, lm1 = 0, lm2 = 0;
4166 struct l2cap_chan *c;
4168 if (type != ACL_LINK)
4171 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4173 /* Find listening sockets and check their link_mode */
4174 read_lock(&chan_list_lock);
4175 list_for_each_entry(c, &chan_list, global_l) {
4176 struct sock *sk = c->sk;
4178 if (c->state != BT_LISTEN)
4181 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4182 lm1 |= HCI_LM_ACCEPT;
4183 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4184 lm1 |= HCI_LM_MASTER;
4186 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4187 lm2 |= HCI_LM_ACCEPT;
4188 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4189 lm2 |= HCI_LM_MASTER;
4192 read_unlock(&chan_list_lock);
4194 return exact ? lm1 : lm2;
4197 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4199 struct l2cap_conn *conn;
4201 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4203 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4207 conn = l2cap_conn_add(hcon, status);
4209 l2cap_conn_ready(conn);
4211 l2cap_conn_del(hcon, bt_to_errno(status));
4216 static int l2cap_disconn_ind(struct hci_conn *hcon)
4218 struct l2cap_conn *conn = hcon->l2cap_data;
4220 BT_DBG("hcon %p", hcon);
4222 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4225 return conn->disc_reason;
4228 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4230 BT_DBG("hcon %p reason %d", hcon, reason);
4232 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4235 l2cap_conn_del(hcon, bt_to_errno(reason));
4240 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4242 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4245 if (encrypt == 0x00) {
4246 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4247 __clear_chan_timer(chan);
4248 __set_chan_timer(chan, HZ * 5);
4249 } else if (chan->sec_level == BT_SECURITY_HIGH)
4250 l2cap_chan_close(chan, ECONNREFUSED);
4252 if (chan->sec_level == BT_SECURITY_MEDIUM)
4253 __clear_chan_timer(chan);
4257 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4259 struct l2cap_conn *conn = hcon->l2cap_data;
4260 struct l2cap_chan *chan;
4265 BT_DBG("conn %p", conn);
4267 if (hcon->type == LE_LINK) {
4268 smp_distribute_keys(conn, 0);
4269 del_timer(&conn->security_timer);
4272 read_lock(&conn->chan_lock);
4274 list_for_each_entry(chan, &conn->chan_l, list) {
4275 struct sock *sk = chan->sk;
4279 BT_DBG("chan->scid %d", chan->scid);
4281 if (chan->scid == L2CAP_CID_LE_DATA) {
4282 if (!status && encrypt) {
4283 chan->sec_level = hcon->sec_level;
4284 l2cap_chan_ready(sk);
4291 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4296 if (!status && (chan->state == BT_CONNECTED ||
4297 chan->state == BT_CONFIG)) {
4298 l2cap_check_encryption(chan, encrypt);
4303 if (chan->state == BT_CONNECT) {
4305 struct l2cap_conn_req req;
4306 req.scid = cpu_to_le16(chan->scid);
4307 req.psm = chan->psm;
4309 chan->ident = l2cap_get_ident(conn);
4310 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4312 l2cap_send_cmd(conn, chan->ident,
4313 L2CAP_CONN_REQ, sizeof(req), &req);
4315 __clear_chan_timer(chan);
4316 __set_chan_timer(chan, HZ / 10);
4318 } else if (chan->state == BT_CONNECT2) {
4319 struct l2cap_conn_rsp rsp;
4323 if (bt_sk(sk)->defer_setup) {
4324 struct sock *parent = bt_sk(sk)->parent;
4325 res = L2CAP_CR_PEND;
4326 stat = L2CAP_CS_AUTHOR_PEND;
4328 parent->sk_data_ready(parent, 0);
4330 l2cap_state_change(chan, BT_CONFIG);
4331 res = L2CAP_CR_SUCCESS;
4332 stat = L2CAP_CS_NO_INFO;
4335 l2cap_state_change(chan, BT_DISCONN);
4336 __set_chan_timer(chan, HZ / 10);
4337 res = L2CAP_CR_SEC_BLOCK;
4338 stat = L2CAP_CS_NO_INFO;
4341 rsp.scid = cpu_to_le16(chan->dcid);
4342 rsp.dcid = cpu_to_le16(chan->scid);
4343 rsp.result = cpu_to_le16(res);
4344 rsp.status = cpu_to_le16(stat);
4345 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4352 read_unlock(&conn->chan_lock);
4357 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4359 struct l2cap_conn *conn = hcon->l2cap_data;
4362 conn = l2cap_conn_add(hcon, 0);
4367 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4369 if (!(flags & ACL_CONT)) {
4370 struct l2cap_hdr *hdr;
4371 struct l2cap_chan *chan;
4376 BT_ERR("Unexpected start frame (len %d)", skb->len);
4377 kfree_skb(conn->rx_skb);
4378 conn->rx_skb = NULL;
4380 l2cap_conn_unreliable(conn, ECOMM);
4383 /* Start fragment always begin with Basic L2CAP header */
4384 if (skb->len < L2CAP_HDR_SIZE) {
4385 BT_ERR("Frame is too short (len %d)", skb->len);
4386 l2cap_conn_unreliable(conn, ECOMM);
4390 hdr = (struct l2cap_hdr *) skb->data;
4391 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4392 cid = __le16_to_cpu(hdr->cid);
4394 if (len == skb->len) {
4395 /* Complete frame received */
4396 l2cap_recv_frame(conn, skb);
4400 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4402 if (skb->len > len) {
4403 BT_ERR("Frame is too long (len %d, expected len %d)",
4405 l2cap_conn_unreliable(conn, ECOMM);
4409 chan = l2cap_get_chan_by_scid(conn, cid);
4411 if (chan && chan->sk) {
4412 struct sock *sk = chan->sk;
4414 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4415 BT_ERR("Frame exceeding recv MTU (len %d, "
4419 l2cap_conn_unreliable(conn, ECOMM);
4425 /* Allocate skb for the complete frame (with header) */
4426 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4430 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4432 conn->rx_len = len - skb->len;
4434 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4436 if (!conn->rx_len) {
4437 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4438 l2cap_conn_unreliable(conn, ECOMM);
4442 if (skb->len > conn->rx_len) {
4443 BT_ERR("Fragment is too long (len %d, expected %d)",
4444 skb->len, conn->rx_len);
4445 kfree_skb(conn->rx_skb);
4446 conn->rx_skb = NULL;
4448 l2cap_conn_unreliable(conn, ECOMM);
4452 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4454 conn->rx_len -= skb->len;
4456 if (!conn->rx_len) {
4457 /* Complete frame received */
4458 l2cap_recv_frame(conn, conn->rx_skb);
4459 conn->rx_skb = NULL;
4468 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4470 struct l2cap_chan *c;
4472 read_lock_bh(&chan_list_lock);
4474 list_for_each_entry(c, &chan_list, global_l) {
4475 struct sock *sk = c->sk;
4477 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4478 batostr(&bt_sk(sk)->src),
4479 batostr(&bt_sk(sk)->dst),
4480 c->state, __le16_to_cpu(c->psm),
4481 c->scid, c->dcid, c->imtu, c->omtu,
4482 c->sec_level, c->mode);
4485 read_unlock_bh(&chan_list_lock);
4490 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4492 return single_open(file, l2cap_debugfs_show, inode->i_private);
4495 static const struct file_operations l2cap_debugfs_fops = {
4496 .open = l2cap_debugfs_open,
4498 .llseek = seq_lseek,
4499 .release = single_release,
4502 static struct dentry *l2cap_debugfs;
4504 static struct hci_proto l2cap_hci_proto = {
4506 .id = HCI_PROTO_L2CAP,
4507 .connect_ind = l2cap_connect_ind,
4508 .connect_cfm = l2cap_connect_cfm,
4509 .disconn_ind = l2cap_disconn_ind,
4510 .disconn_cfm = l2cap_disconn_cfm,
4511 .security_cfm = l2cap_security_cfm,
4512 .recv_acldata = l2cap_recv_acldata
4515 int __init l2cap_init(void)
4519 err = l2cap_init_sockets();
4523 err = hci_register_proto(&l2cap_hci_proto);
4525 BT_ERR("L2CAP protocol registration failed");
4526 bt_sock_unregister(BTPROTO_L2CAP);
4531 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4532 bt_debugfs, NULL, &l2cap_debugfs_fops);
4534 BT_ERR("Failed to create L2CAP debug file");
4540 l2cap_cleanup_sockets();
4544 void l2cap_exit(void)
4546 debugfs_remove(l2cap_debugfs);
4548 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4549 BT_ERR("L2CAP protocol unregistration failed");
4551 l2cap_cleanup_sockets();
4554 module_param(disable_ertm, bool, 0644);
4555 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4557 module_param(enable_hs, bool, 0644);
4558 MODULE_PARM_DESC(enable_hs, "Enable High Speed");