2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
78 /* ---- L2CAP channels ---- */
80 static inline void chan_hold(struct l2cap_chan *c)
82 atomic_inc(&c->refcnt);
85 static inline void chan_put(struct l2cap_chan *c)
87 if (atomic_dec_and_test(&c->refcnt))
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
95 list_for_each_entry(c, &conn->chan_l, list) {
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
105 struct l2cap_chan *c;
107 list_for_each_entry(c, &conn->chan_l, list) {
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
118 struct l2cap_chan *c;
120 read_lock(&conn->chan_lock);
121 c = __l2cap_get_chan_by_scid(conn, cid);
124 read_unlock(&conn->chan_lock);
128 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
130 struct l2cap_chan *c;
132 list_for_each_entry(c, &conn->chan_l, list) {
133 if (c->ident == ident)
139 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
141 struct l2cap_chan *c;
143 read_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_ident(conn, ident);
147 read_unlock(&conn->chan_lock);
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
153 struct l2cap_chan *c;
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
165 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
169 write_lock_bh(&chan_list_lock);
171 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
184 for (p = 0x1001; p < 0x1100; p += 2)
185 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
186 chan->psm = cpu_to_le16(p);
187 chan->sport = cpu_to_le16(p);
194 write_unlock_bh(&chan_list_lock);
198 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
200 write_lock_bh(&chan_list_lock);
204 write_unlock_bh(&chan_list_lock);
209 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
211 u16 cid = L2CAP_CID_DYN_START;
213 for (; cid < L2CAP_CID_DYN_END; cid++) {
214 if (!__l2cap_get_chan_by_scid(conn, cid))
221 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
223 BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
225 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
229 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
231 BT_DBG("chan %p state %d", chan, chan->state);
233 if (timer_pending(timer) && del_timer(timer))
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
240 chan->ops->state_change(chan->data, state);
243 static void l2cap_chan_timeout(unsigned long arg)
245 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
246 struct sock *sk = chan->sk;
249 BT_DBG("chan %p state %d", chan, chan->state);
253 if (sock_owned_by_user(sk)) {
254 /* sk is owned by user. Try again later */
255 __set_chan_timer(chan, HZ / 5);
261 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
262 reason = ECONNREFUSED;
263 else if (chan->state == BT_CONNECT &&
264 chan->sec_level != BT_SECURITY_SDP)
265 reason = ECONNREFUSED;
269 l2cap_chan_close(chan, reason);
273 chan->ops->close(chan->data);
277 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
279 struct l2cap_chan *chan;
281 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
287 write_lock_bh(&chan_list_lock);
288 list_add(&chan->global_l, &chan_list);
289 write_unlock_bh(&chan_list_lock);
291 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
293 chan->state = BT_OPEN;
295 atomic_set(&chan->refcnt, 1);
300 void l2cap_chan_destroy(struct l2cap_chan *chan)
302 write_lock_bh(&chan_list_lock);
303 list_del(&chan->global_l);
304 write_unlock_bh(&chan_list_lock);
309 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
311 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
312 chan->psm, chan->dcid);
314 conn->disc_reason = 0x13;
318 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
319 if (conn->hcon->type == LE_LINK) {
321 chan->omtu = L2CAP_LE_DEFAULT_MTU;
322 chan->scid = L2CAP_CID_LE_DATA;
323 chan->dcid = L2CAP_CID_LE_DATA;
325 /* Alloc CID for connection-oriented socket */
326 chan->scid = l2cap_alloc_cid(conn);
327 chan->omtu = L2CAP_DEFAULT_MTU;
329 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
330 /* Connectionless socket */
331 chan->scid = L2CAP_CID_CONN_LESS;
332 chan->dcid = L2CAP_CID_CONN_LESS;
333 chan->omtu = L2CAP_DEFAULT_MTU;
335 /* Raw socket can send/recv signalling messages only */
336 chan->scid = L2CAP_CID_SIGNALING;
337 chan->dcid = L2CAP_CID_SIGNALING;
338 chan->omtu = L2CAP_DEFAULT_MTU;
341 chan->local_id = L2CAP_BESTEFFORT_ID;
342 chan->local_stype = L2CAP_SERV_BESTEFFORT;
343 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
344 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
345 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
346 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
350 list_add(&chan->list, &conn->chan_l);
354 * Must be called on the locked socket. */
355 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
357 struct sock *sk = chan->sk;
358 struct l2cap_conn *conn = chan->conn;
359 struct sock *parent = bt_sk(sk)->parent;
361 __clear_chan_timer(chan);
363 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
366 /* Delete from channel list */
367 write_lock_bh(&conn->chan_lock);
368 list_del(&chan->list);
369 write_unlock_bh(&conn->chan_lock);
373 hci_conn_put(conn->hcon);
376 l2cap_state_change(chan, BT_CLOSED);
377 sock_set_flag(sk, SOCK_ZAPPED);
383 bt_accept_unlink(sk);
384 parent->sk_data_ready(parent, 0);
386 sk->sk_state_change(sk);
388 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
389 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
392 skb_queue_purge(&chan->tx_q);
394 if (chan->mode == L2CAP_MODE_ERTM) {
395 struct srej_list *l, *tmp;
397 __clear_retrans_timer(chan);
398 __clear_monitor_timer(chan);
399 __clear_ack_timer(chan);
401 skb_queue_purge(&chan->srej_q);
403 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
410 static void l2cap_chan_cleanup_listen(struct sock *parent)
414 BT_DBG("parent %p", parent);
416 /* Close not yet accepted channels */
417 while ((sk = bt_accept_dequeue(parent, NULL))) {
418 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
419 __clear_chan_timer(chan);
421 l2cap_chan_close(chan, ECONNRESET);
423 chan->ops->close(chan->data);
427 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
429 struct l2cap_conn *conn = chan->conn;
430 struct sock *sk = chan->sk;
432 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
434 switch (chan->state) {
436 l2cap_chan_cleanup_listen(sk);
438 l2cap_state_change(chan, BT_CLOSED);
439 sock_set_flag(sk, SOCK_ZAPPED);
444 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
445 conn->hcon->type == ACL_LINK) {
446 __clear_chan_timer(chan);
447 __set_chan_timer(chan, sk->sk_sndtimeo);
448 l2cap_send_disconn_req(conn, chan, reason);
450 l2cap_chan_del(chan, reason);
454 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
455 conn->hcon->type == ACL_LINK) {
456 struct l2cap_conn_rsp rsp;
459 if (bt_sk(sk)->defer_setup)
460 result = L2CAP_CR_SEC_BLOCK;
462 result = L2CAP_CR_BAD_PSM;
463 l2cap_state_change(chan, BT_DISCONN);
465 rsp.scid = cpu_to_le16(chan->dcid);
466 rsp.dcid = cpu_to_le16(chan->scid);
467 rsp.result = cpu_to_le16(result);
468 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
469 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
473 l2cap_chan_del(chan, reason);
478 l2cap_chan_del(chan, reason);
482 sock_set_flag(sk, SOCK_ZAPPED);
487 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
489 if (chan->chan_type == L2CAP_CHAN_RAW) {
490 switch (chan->sec_level) {
491 case BT_SECURITY_HIGH:
492 return HCI_AT_DEDICATED_BONDING_MITM;
493 case BT_SECURITY_MEDIUM:
494 return HCI_AT_DEDICATED_BONDING;
496 return HCI_AT_NO_BONDING;
498 } else if (chan->psm == cpu_to_le16(0x0001)) {
499 if (chan->sec_level == BT_SECURITY_LOW)
500 chan->sec_level = BT_SECURITY_SDP;
502 if (chan->sec_level == BT_SECURITY_HIGH)
503 return HCI_AT_NO_BONDING_MITM;
505 return HCI_AT_NO_BONDING;
507 switch (chan->sec_level) {
508 case BT_SECURITY_HIGH:
509 return HCI_AT_GENERAL_BONDING_MITM;
510 case BT_SECURITY_MEDIUM:
511 return HCI_AT_GENERAL_BONDING;
513 return HCI_AT_NO_BONDING;
518 /* Service level security */
519 static inline int l2cap_check_security(struct l2cap_chan *chan)
521 struct l2cap_conn *conn = chan->conn;
524 auth_type = l2cap_get_auth_type(chan);
526 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
529 static u8 l2cap_get_ident(struct l2cap_conn *conn)
533 /* Get next available identificator.
534 * 1 - 128 are used by kernel.
535 * 129 - 199 are reserved.
536 * 200 - 254 are used by utilities like l2ping, etc.
539 spin_lock_bh(&conn->lock);
541 if (++conn->tx_ident > 128)
546 spin_unlock_bh(&conn->lock);
551 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
553 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
556 BT_DBG("code 0x%2.2x", code);
561 if (lmp_no_flush_capable(conn->hcon->hdev))
562 flags = ACL_START_NO_FLUSH;
566 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
567 skb->priority = HCI_PRIO_MAX;
569 hci_send_acl(conn->hchan, skb, flags);
572 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
574 struct hci_conn *hcon = chan->conn->hcon;
577 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
580 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
581 lmp_no_flush_capable(hcon->hdev))
582 flags = ACL_START_NO_FLUSH;
586 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
587 hci_send_acl(chan->conn->hchan, skb, flags);
590 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
593 struct l2cap_hdr *lh;
594 struct l2cap_conn *conn = chan->conn;
597 if (chan->state != BT_CONNECTED)
600 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
601 hlen = L2CAP_EXT_HDR_SIZE;
603 hlen = L2CAP_ENH_HDR_SIZE;
605 if (chan->fcs == L2CAP_FCS_CRC16)
606 hlen += L2CAP_FCS_SIZE;
608 BT_DBG("chan %p, control 0x%8.8x", chan, control);
610 count = min_t(unsigned int, conn->mtu, hlen);
612 control |= __set_sframe(chan);
614 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
615 control |= __set_ctrl_final(chan);
617 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
618 control |= __set_ctrl_poll(chan);
620 skb = bt_skb_alloc(count, GFP_ATOMIC);
624 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
625 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
626 lh->cid = cpu_to_le16(chan->dcid);
628 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
630 if (chan->fcs == L2CAP_FCS_CRC16) {
631 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
632 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
635 skb->priority = HCI_PRIO_MAX;
636 l2cap_do_send(chan, skb);
639 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
641 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
642 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
643 set_bit(CONN_RNR_SENT, &chan->conn_state);
645 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
647 control |= __set_reqseq(chan, chan->buffer_seq);
649 l2cap_send_sframe(chan, control);
652 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
654 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
657 static void l2cap_do_start(struct l2cap_chan *chan)
659 struct l2cap_conn *conn = chan->conn;
661 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
662 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
665 if (l2cap_check_security(chan) &&
666 __l2cap_no_conn_pending(chan)) {
667 struct l2cap_conn_req req;
668 req.scid = cpu_to_le16(chan->scid);
671 chan->ident = l2cap_get_ident(conn);
672 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
674 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
678 struct l2cap_info_req req;
679 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
681 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
682 conn->info_ident = l2cap_get_ident(conn);
684 mod_timer(&conn->info_timer, jiffies +
685 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
687 l2cap_send_cmd(conn, conn->info_ident,
688 L2CAP_INFO_REQ, sizeof(req), &req);
692 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
694 u32 local_feat_mask = l2cap_feat_mask;
696 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
699 case L2CAP_MODE_ERTM:
700 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
701 case L2CAP_MODE_STREAMING:
702 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
708 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
711 struct l2cap_disconn_req req;
718 if (chan->mode == L2CAP_MODE_ERTM) {
719 __clear_retrans_timer(chan);
720 __clear_monitor_timer(chan);
721 __clear_ack_timer(chan);
724 req.dcid = cpu_to_le16(chan->dcid);
725 req.scid = cpu_to_le16(chan->scid);
726 l2cap_send_cmd(conn, l2cap_get_ident(conn),
727 L2CAP_DISCONN_REQ, sizeof(req), &req);
729 l2cap_state_change(chan, BT_DISCONN);
733 /* ---- L2CAP connections ---- */
734 static void l2cap_conn_start(struct l2cap_conn *conn)
736 struct l2cap_chan *chan, *tmp;
738 BT_DBG("conn %p", conn);
740 read_lock(&conn->chan_lock);
742 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
743 struct sock *sk = chan->sk;
747 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
752 if (chan->state == BT_CONNECT) {
753 struct l2cap_conn_req req;
755 if (!l2cap_check_security(chan) ||
756 !__l2cap_no_conn_pending(chan)) {
761 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
762 && test_bit(CONF_STATE2_DEVICE,
763 &chan->conf_state)) {
764 /* l2cap_chan_close() calls list_del(chan)
765 * so release the lock */
766 read_unlock(&conn->chan_lock);
767 l2cap_chan_close(chan, ECONNRESET);
768 read_lock(&conn->chan_lock);
773 req.scid = cpu_to_le16(chan->scid);
776 chan->ident = l2cap_get_ident(conn);
777 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
779 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
782 } else if (chan->state == BT_CONNECT2) {
783 struct l2cap_conn_rsp rsp;
785 rsp.scid = cpu_to_le16(chan->dcid);
786 rsp.dcid = cpu_to_le16(chan->scid);
788 if (l2cap_check_security(chan)) {
789 if (bt_sk(sk)->defer_setup) {
790 struct sock *parent = bt_sk(sk)->parent;
791 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
792 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
794 parent->sk_data_ready(parent, 0);
797 l2cap_state_change(chan, BT_CONFIG);
798 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
799 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
802 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
803 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
806 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
809 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
810 rsp.result != L2CAP_CR_SUCCESS) {
815 set_bit(CONF_REQ_SENT, &chan->conf_state);
816 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
817 l2cap_build_conf_req(chan, buf), buf);
818 chan->num_conf_req++;
824 read_unlock(&conn->chan_lock);
827 /* Find socket with cid and source bdaddr.
828 * Returns closest match, locked.
830 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
832 struct l2cap_chan *c, *c1 = NULL;
834 read_lock(&chan_list_lock);
836 list_for_each_entry(c, &chan_list, global_l) {
837 struct sock *sk = c->sk;
839 if (state && c->state != state)
842 if (c->scid == cid) {
844 if (!bacmp(&bt_sk(sk)->src, src)) {
845 read_unlock(&chan_list_lock);
850 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
855 read_unlock(&chan_list_lock);
860 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
862 struct sock *parent, *sk;
863 struct l2cap_chan *chan, *pchan;
867 /* Check if we have socket listening on cid */
868 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
875 bh_lock_sock(parent);
877 /* Check for backlog size */
878 if (sk_acceptq_is_full(parent)) {
879 BT_DBG("backlog full %d", parent->sk_ack_backlog);
883 chan = pchan->ops->new_connection(pchan->data);
889 write_lock_bh(&conn->chan_lock);
891 hci_conn_hold(conn->hcon);
893 bacpy(&bt_sk(sk)->src, conn->src);
894 bacpy(&bt_sk(sk)->dst, conn->dst);
896 bt_accept_enqueue(parent, sk);
898 __l2cap_chan_add(conn, chan);
900 __set_chan_timer(chan, sk->sk_sndtimeo);
902 l2cap_state_change(chan, BT_CONNECTED);
903 parent->sk_data_ready(parent, 0);
905 write_unlock_bh(&conn->chan_lock);
908 bh_unlock_sock(parent);
911 static void l2cap_chan_ready(struct sock *sk)
913 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
914 struct sock *parent = bt_sk(sk)->parent;
916 BT_DBG("sk %p, parent %p", sk, parent);
918 chan->conf_state = 0;
919 __clear_chan_timer(chan);
921 l2cap_state_change(chan, BT_CONNECTED);
922 sk->sk_state_change(sk);
925 parent->sk_data_ready(parent, 0);
928 static void l2cap_conn_ready(struct l2cap_conn *conn)
930 struct l2cap_chan *chan;
932 BT_DBG("conn %p", conn);
934 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
935 l2cap_le_conn_ready(conn);
937 if (conn->hcon->out && conn->hcon->type == LE_LINK)
938 smp_conn_security(conn, conn->hcon->pending_sec_level);
940 read_lock(&conn->chan_lock);
942 list_for_each_entry(chan, &conn->chan_l, list) {
943 struct sock *sk = chan->sk;
947 if (conn->hcon->type == LE_LINK) {
948 if (smp_conn_security(conn, chan->sec_level))
949 l2cap_chan_ready(sk);
951 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
952 __clear_chan_timer(chan);
953 l2cap_state_change(chan, BT_CONNECTED);
954 sk->sk_state_change(sk);
956 } else if (chan->state == BT_CONNECT)
957 l2cap_do_start(chan);
962 read_unlock(&conn->chan_lock);
965 /* Notify sockets that we cannot guaranty reliability anymore */
966 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
968 struct l2cap_chan *chan;
970 BT_DBG("conn %p", conn);
972 read_lock(&conn->chan_lock);
974 list_for_each_entry(chan, &conn->chan_l, list) {
975 struct sock *sk = chan->sk;
977 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
981 read_unlock(&conn->chan_lock);
984 static void l2cap_info_timeout(unsigned long arg)
986 struct l2cap_conn *conn = (void *) arg;
988 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
989 conn->info_ident = 0;
991 l2cap_conn_start(conn);
994 static void l2cap_conn_del(struct hci_conn *hcon, int err)
996 struct l2cap_conn *conn = hcon->l2cap_data;
997 struct l2cap_chan *chan, *l;
1003 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1005 kfree_skb(conn->rx_skb);
1008 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1011 l2cap_chan_del(chan, err);
1013 chan->ops->close(chan->data);
1016 hci_chan_del(conn->hchan);
1018 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1019 del_timer_sync(&conn->info_timer);
1021 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1022 del_timer(&conn->security_timer);
1023 smp_chan_destroy(conn);
1026 hcon->l2cap_data = NULL;
1030 static void security_timeout(unsigned long arg)
1032 struct l2cap_conn *conn = (void *) arg;
1034 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1037 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1039 struct l2cap_conn *conn = hcon->l2cap_data;
1040 struct hci_chan *hchan;
1045 hchan = hci_chan_create(hcon);
1049 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1051 hci_chan_del(hchan);
1055 hcon->l2cap_data = conn;
1057 conn->hchan = hchan;
1059 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1061 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1062 conn->mtu = hcon->hdev->le_mtu;
1064 conn->mtu = hcon->hdev->acl_mtu;
1066 conn->src = &hcon->hdev->bdaddr;
1067 conn->dst = &hcon->dst;
1069 conn->feat_mask = 0;
1071 spin_lock_init(&conn->lock);
1072 rwlock_init(&conn->chan_lock);
1074 INIT_LIST_HEAD(&conn->chan_l);
1076 if (hcon->type == LE_LINK)
1077 setup_timer(&conn->security_timer, security_timeout,
1078 (unsigned long) conn);
1080 setup_timer(&conn->info_timer, l2cap_info_timeout,
1081 (unsigned long) conn);
1083 conn->disc_reason = 0x13;
1088 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1090 write_lock_bh(&conn->chan_lock);
1091 __l2cap_chan_add(conn, chan);
1092 write_unlock_bh(&conn->chan_lock);
1095 /* ---- Socket interface ---- */
1097 /* Find socket with psm and source bdaddr.
1098 * Returns closest match.
1100 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1102 struct l2cap_chan *c, *c1 = NULL;
1104 read_lock(&chan_list_lock);
1106 list_for_each_entry(c, &chan_list, global_l) {
1107 struct sock *sk = c->sk;
1109 if (state && c->state != state)
1112 if (c->psm == psm) {
1114 if (!bacmp(&bt_sk(sk)->src, src)) {
1115 read_unlock(&chan_list_lock);
1120 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1125 read_unlock(&chan_list_lock);
1130 int l2cap_chan_connect(struct l2cap_chan *chan)
1132 struct sock *sk = chan->sk;
1133 bdaddr_t *src = &bt_sk(sk)->src;
1134 bdaddr_t *dst = &bt_sk(sk)->dst;
1135 struct l2cap_conn *conn;
1136 struct hci_conn *hcon;
1137 struct hci_dev *hdev;
1141 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1144 hdev = hci_get_route(dst, src);
1146 return -EHOSTUNREACH;
1148 hci_dev_lock_bh(hdev);
1150 auth_type = l2cap_get_auth_type(chan);
1152 if (chan->dcid == L2CAP_CID_LE_DATA)
1153 hcon = hci_connect(hdev, LE_LINK, dst,
1154 chan->sec_level, auth_type);
1156 hcon = hci_connect(hdev, ACL_LINK, dst,
1157 chan->sec_level, auth_type);
1160 err = PTR_ERR(hcon);
1164 conn = l2cap_conn_add(hcon, 0);
1171 /* Update source addr of the socket */
1172 bacpy(src, conn->src);
1174 l2cap_chan_add(conn, chan);
1176 l2cap_state_change(chan, BT_CONNECT);
1177 __set_chan_timer(chan, sk->sk_sndtimeo);
1179 if (hcon->state == BT_CONNECTED) {
1180 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1181 __clear_chan_timer(chan);
1182 if (l2cap_check_security(chan))
1183 l2cap_state_change(chan, BT_CONNECTED);
1185 l2cap_do_start(chan);
1191 hci_dev_unlock_bh(hdev);
1196 int __l2cap_wait_ack(struct sock *sk)
1198 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1199 DECLARE_WAITQUEUE(wait, current);
1203 add_wait_queue(sk_sleep(sk), &wait);
1204 set_current_state(TASK_INTERRUPTIBLE);
1205 while (chan->unacked_frames > 0 && chan->conn) {
1209 if (signal_pending(current)) {
1210 err = sock_intr_errno(timeo);
1215 timeo = schedule_timeout(timeo);
1217 set_current_state(TASK_INTERRUPTIBLE);
1219 err = sock_error(sk);
1223 set_current_state(TASK_RUNNING);
1224 remove_wait_queue(sk_sleep(sk), &wait);
1228 static void l2cap_monitor_timeout(unsigned long arg)
1230 struct l2cap_chan *chan = (void *) arg;
1231 struct sock *sk = chan->sk;
1233 BT_DBG("chan %p", chan);
1236 if (chan->retry_count >= chan->remote_max_tx) {
1237 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1242 chan->retry_count++;
1243 __set_monitor_timer(chan);
1245 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1249 static void l2cap_retrans_timeout(unsigned long arg)
1251 struct l2cap_chan *chan = (void *) arg;
1252 struct sock *sk = chan->sk;
1254 BT_DBG("chan %p", chan);
1257 chan->retry_count = 1;
1258 __set_monitor_timer(chan);
1260 set_bit(CONN_WAIT_F, &chan->conn_state);
1262 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1266 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1268 struct sk_buff *skb;
1270 while ((skb = skb_peek(&chan->tx_q)) &&
1271 chan->unacked_frames) {
1272 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1275 skb = skb_dequeue(&chan->tx_q);
1278 chan->unacked_frames--;
1281 if (!chan->unacked_frames)
1282 __clear_retrans_timer(chan);
1285 static void l2cap_streaming_send(struct l2cap_chan *chan)
1287 struct sk_buff *skb;
1291 while ((skb = skb_dequeue(&chan->tx_q))) {
1292 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1293 control |= __set_txseq(chan, chan->next_tx_seq);
1294 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1296 if (chan->fcs == L2CAP_FCS_CRC16) {
1297 fcs = crc16(0, (u8 *)skb->data,
1298 skb->len - L2CAP_FCS_SIZE);
1299 put_unaligned_le16(fcs,
1300 skb->data + skb->len - L2CAP_FCS_SIZE);
1303 l2cap_do_send(chan, skb);
1305 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1309 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1311 struct sk_buff *skb, *tx_skb;
1315 skb = skb_peek(&chan->tx_q);
1320 if (bt_cb(skb)->tx_seq == tx_seq)
1323 if (skb_queue_is_last(&chan->tx_q, skb))
1326 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1328 if (chan->remote_max_tx &&
1329 bt_cb(skb)->retries == chan->remote_max_tx) {
1330 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1334 tx_skb = skb_clone(skb, GFP_ATOMIC);
1335 bt_cb(skb)->retries++;
1337 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1338 control &= __get_sar_mask(chan);
1340 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1341 control |= __set_ctrl_final(chan);
1343 control |= __set_reqseq(chan, chan->buffer_seq);
1344 control |= __set_txseq(chan, tx_seq);
1346 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1348 if (chan->fcs == L2CAP_FCS_CRC16) {
1349 fcs = crc16(0, (u8 *)tx_skb->data,
1350 tx_skb->len - L2CAP_FCS_SIZE);
1351 put_unaligned_le16(fcs,
1352 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1355 l2cap_do_send(chan, tx_skb);
1358 static int l2cap_ertm_send(struct l2cap_chan *chan)
1360 struct sk_buff *skb, *tx_skb;
1365 if (chan->state != BT_CONNECTED)
1368 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1370 if (chan->remote_max_tx &&
1371 bt_cb(skb)->retries == chan->remote_max_tx) {
1372 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1376 tx_skb = skb_clone(skb, GFP_ATOMIC);
1378 bt_cb(skb)->retries++;
1380 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1381 control &= __get_sar_mask(chan);
1383 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1384 control |= __set_ctrl_final(chan);
1386 control |= __set_reqseq(chan, chan->buffer_seq);
1387 control |= __set_txseq(chan, chan->next_tx_seq);
1389 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1391 if (chan->fcs == L2CAP_FCS_CRC16) {
1392 fcs = crc16(0, (u8 *)skb->data,
1393 tx_skb->len - L2CAP_FCS_SIZE);
1394 put_unaligned_le16(fcs, skb->data +
1395 tx_skb->len - L2CAP_FCS_SIZE);
1398 l2cap_do_send(chan, tx_skb);
1400 __set_retrans_timer(chan);
1402 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1404 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1406 if (bt_cb(skb)->retries == 1)
1407 chan->unacked_frames++;
1409 chan->frames_sent++;
1411 if (skb_queue_is_last(&chan->tx_q, skb))
1412 chan->tx_send_head = NULL;
1414 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1422 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1426 if (!skb_queue_empty(&chan->tx_q))
1427 chan->tx_send_head = chan->tx_q.next;
1429 chan->next_tx_seq = chan->expected_ack_seq;
1430 ret = l2cap_ertm_send(chan);
1434 static void l2cap_send_ack(struct l2cap_chan *chan)
1438 control |= __set_reqseq(chan, chan->buffer_seq);
1440 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1441 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1442 set_bit(CONN_RNR_SENT, &chan->conn_state);
1443 l2cap_send_sframe(chan, control);
1447 if (l2cap_ertm_send(chan) > 0)
1450 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1451 l2cap_send_sframe(chan, control);
1454 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1456 struct srej_list *tail;
1459 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1460 control |= __set_ctrl_final(chan);
1462 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1463 control |= __set_reqseq(chan, tail->tx_seq);
1465 l2cap_send_sframe(chan, control);
1468 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1470 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1471 struct sk_buff **frag;
1474 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1480 /* Continuation fragments (no L2CAP header) */
1481 frag = &skb_shinfo(skb)->frag_list;
1483 count = min_t(unsigned int, conn->mtu, len);
1485 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1488 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1491 (*frag)->priority = skb->priority;
1496 frag = &(*frag)->next;
1502 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1503 struct msghdr *msg, size_t len,
1506 struct sock *sk = chan->sk;
1507 struct l2cap_conn *conn = chan->conn;
1508 struct sk_buff *skb;
1509 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1510 struct l2cap_hdr *lh;
1512 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1514 count = min_t(unsigned int, (conn->mtu - hlen), len);
1515 skb = bt_skb_send_alloc(sk, count + hlen,
1516 msg->msg_flags & MSG_DONTWAIT, &err);
1518 return ERR_PTR(err);
1520 skb->priority = priority;
1522 /* Create L2CAP header */
1523 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1524 lh->cid = cpu_to_le16(chan->dcid);
1525 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1526 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1528 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1529 if (unlikely(err < 0)) {
1531 return ERR_PTR(err);
1536 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1537 struct msghdr *msg, size_t len,
1540 struct sock *sk = chan->sk;
1541 struct l2cap_conn *conn = chan->conn;
1542 struct sk_buff *skb;
1543 int err, count, hlen = L2CAP_HDR_SIZE;
1544 struct l2cap_hdr *lh;
1546 BT_DBG("sk %p len %d", sk, (int)len);
1548 count = min_t(unsigned int, (conn->mtu - hlen), len);
1549 skb = bt_skb_send_alloc(sk, count + hlen,
1550 msg->msg_flags & MSG_DONTWAIT, &err);
1552 return ERR_PTR(err);
1554 skb->priority = priority;
1556 /* Create L2CAP header */
1557 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1558 lh->cid = cpu_to_le16(chan->dcid);
1559 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1561 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1562 if (unlikely(err < 0)) {
1564 return ERR_PTR(err);
1569 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1570 struct msghdr *msg, size_t len,
1571 u32 control, u16 sdulen)
1573 struct sock *sk = chan->sk;
1574 struct l2cap_conn *conn = chan->conn;
1575 struct sk_buff *skb;
1576 int err, count, hlen;
1577 struct l2cap_hdr *lh;
1579 BT_DBG("sk %p len %d", sk, (int)len);
1582 return ERR_PTR(-ENOTCONN);
1584 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1585 hlen = L2CAP_EXT_HDR_SIZE;
1587 hlen = L2CAP_ENH_HDR_SIZE;
1590 hlen += L2CAP_SDULEN_SIZE;
1592 if (chan->fcs == L2CAP_FCS_CRC16)
1593 hlen += L2CAP_FCS_SIZE;
1595 count = min_t(unsigned int, (conn->mtu - hlen), len);
1596 skb = bt_skb_send_alloc(sk, count + hlen,
1597 msg->msg_flags & MSG_DONTWAIT, &err);
1599 return ERR_PTR(err);
1601 /* Create L2CAP header */
1602 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1603 lh->cid = cpu_to_le16(chan->dcid);
1604 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1606 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1609 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1611 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1612 if (unlikely(err < 0)) {
1614 return ERR_PTR(err);
1617 if (chan->fcs == L2CAP_FCS_CRC16)
1618 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1620 bt_cb(skb)->retries = 0;
1624 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1626 struct sk_buff *skb;
1627 struct sk_buff_head sar_queue;
1631 skb_queue_head_init(&sar_queue);
1632 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1633 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1635 return PTR_ERR(skb);
1637 __skb_queue_tail(&sar_queue, skb);
1638 len -= chan->remote_mps;
1639 size += chan->remote_mps;
1644 if (len > chan->remote_mps) {
1645 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1646 buflen = chan->remote_mps;
1648 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1652 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1654 skb_queue_purge(&sar_queue);
1655 return PTR_ERR(skb);
1658 __skb_queue_tail(&sar_queue, skb);
1662 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1663 if (chan->tx_send_head == NULL)
1664 chan->tx_send_head = sar_queue.next;
1669 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1672 struct sk_buff *skb;
1676 /* Connectionless channel */
1677 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1678 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1680 return PTR_ERR(skb);
1682 l2cap_do_send(chan, skb);
1686 switch (chan->mode) {
1687 case L2CAP_MODE_BASIC:
1688 /* Check outgoing MTU */
1689 if (len > chan->omtu)
1692 /* Create a basic PDU */
1693 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1695 return PTR_ERR(skb);
1697 l2cap_do_send(chan, skb);
1701 case L2CAP_MODE_ERTM:
1702 case L2CAP_MODE_STREAMING:
1703 /* Entire SDU fits into one PDU */
1704 if (len <= chan->remote_mps) {
1705 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1706 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1709 return PTR_ERR(skb);
1711 __skb_queue_tail(&chan->tx_q, skb);
1713 if (chan->tx_send_head == NULL)
1714 chan->tx_send_head = skb;
1717 /* Segment SDU into multiples PDUs */
1718 err = l2cap_sar_segment_sdu(chan, msg, len);
1723 if (chan->mode == L2CAP_MODE_STREAMING) {
1724 l2cap_streaming_send(chan);
1729 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1730 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1735 err = l2cap_ertm_send(chan);
1742 BT_DBG("bad state %1.1x", chan->mode);
1749 /* Copy frame to all raw sockets on that connection */
1750 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1752 struct sk_buff *nskb;
1753 struct l2cap_chan *chan;
1755 BT_DBG("conn %p", conn);
1757 read_lock(&conn->chan_lock);
1758 list_for_each_entry(chan, &conn->chan_l, list) {
1759 struct sock *sk = chan->sk;
1760 if (chan->chan_type != L2CAP_CHAN_RAW)
1763 /* Don't send frame to the socket it came from */
1766 nskb = skb_clone(skb, GFP_ATOMIC);
1770 if (chan->ops->recv(chan->data, nskb))
1773 read_unlock(&conn->chan_lock);
1776 /* ---- L2CAP signalling commands ---- */
1777 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1778 u8 code, u8 ident, u16 dlen, void *data)
1780 struct sk_buff *skb, **frag;
1781 struct l2cap_cmd_hdr *cmd;
1782 struct l2cap_hdr *lh;
1785 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1786 conn, code, ident, dlen);
1788 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1789 count = min_t(unsigned int, conn->mtu, len);
1791 skb = bt_skb_alloc(count, GFP_ATOMIC);
1795 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1796 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1798 if (conn->hcon->type == LE_LINK)
1799 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1801 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1803 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1806 cmd->len = cpu_to_le16(dlen);
1809 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1810 memcpy(skb_put(skb, count), data, count);
1816 /* Continuation fragments (no L2CAP header) */
1817 frag = &skb_shinfo(skb)->frag_list;
1819 count = min_t(unsigned int, conn->mtu, len);
1821 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1825 memcpy(skb_put(*frag, count), data, count);
1830 frag = &(*frag)->next;
1840 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1842 struct l2cap_conf_opt *opt = *ptr;
1845 len = L2CAP_CONF_OPT_SIZE + opt->len;
1853 *val = *((u8 *) opt->val);
1857 *val = get_unaligned_le16(opt->val);
1861 *val = get_unaligned_le32(opt->val);
1865 *val = (unsigned long) opt->val;
1869 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1873 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1875 struct l2cap_conf_opt *opt = *ptr;
1877 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1884 *((u8 *) opt->val) = val;
1888 put_unaligned_le16(val, opt->val);
1892 put_unaligned_le32(val, opt->val);
1896 memcpy(opt->val, (void *) val, len);
1900 *ptr += L2CAP_CONF_OPT_SIZE + len;
1903 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1905 struct l2cap_conf_efs efs;
1907 switch(chan->mode) {
1908 case L2CAP_MODE_ERTM:
1909 efs.id = chan->local_id;
1910 efs.stype = chan->local_stype;
1911 efs.msdu = cpu_to_le16(chan->local_msdu);
1912 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1913 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1914 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1917 case L2CAP_MODE_STREAMING:
1919 efs.stype = L2CAP_SERV_BESTEFFORT;
1920 efs.msdu = cpu_to_le16(chan->local_msdu);
1921 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1930 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1931 (unsigned long) &efs);
1934 static void l2cap_ack_timeout(unsigned long arg)
1936 struct l2cap_chan *chan = (void *) arg;
1938 bh_lock_sock(chan->sk);
1939 l2cap_send_ack(chan);
1940 bh_unlock_sock(chan->sk);
1943 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1945 struct sock *sk = chan->sk;
1947 chan->expected_ack_seq = 0;
1948 chan->unacked_frames = 0;
1949 chan->buffer_seq = 0;
1950 chan->num_acked = 0;
1951 chan->frames_sent = 0;
1953 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1954 (unsigned long) chan);
1955 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1956 (unsigned long) chan);
1957 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1959 skb_queue_head_init(&chan->srej_q);
1961 INIT_LIST_HEAD(&chan->srej_l);
1964 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1967 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1970 case L2CAP_MODE_STREAMING:
1971 case L2CAP_MODE_ERTM:
1972 if (l2cap_mode_supported(mode, remote_feat_mask))
1976 return L2CAP_MODE_BASIC;
1980 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1982 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1985 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
1987 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
1990 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
1992 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
1993 __l2cap_ews_supported(chan)) {
1994 /* use extended control field */
1995 set_bit(FLAG_EXT_CTRL, &chan->flags);
1996 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
1998 chan->tx_win = min_t(u16, chan->tx_win,
1999 L2CAP_DEFAULT_TX_WINDOW);
2000 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2004 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2006 struct l2cap_conf_req *req = data;
2007 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2008 void *ptr = req->data;
2011 BT_DBG("chan %p", chan);
2013 if (chan->num_conf_req || chan->num_conf_rsp)
2016 switch (chan->mode) {
2017 case L2CAP_MODE_STREAMING:
2018 case L2CAP_MODE_ERTM:
2019 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2022 if (__l2cap_efs_supported(chan))
2023 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2027 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2032 if (chan->imtu != L2CAP_DEFAULT_MTU)
2033 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2035 switch (chan->mode) {
2036 case L2CAP_MODE_BASIC:
2037 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2038 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2041 rfc.mode = L2CAP_MODE_BASIC;
2043 rfc.max_transmit = 0;
2044 rfc.retrans_timeout = 0;
2045 rfc.monitor_timeout = 0;
2046 rfc.max_pdu_size = 0;
2048 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2049 (unsigned long) &rfc);
2052 case L2CAP_MODE_ERTM:
2053 rfc.mode = L2CAP_MODE_ERTM;
2054 rfc.max_transmit = chan->max_tx;
2055 rfc.retrans_timeout = 0;
2056 rfc.monitor_timeout = 0;
2058 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2059 L2CAP_EXT_HDR_SIZE -
2062 rfc.max_pdu_size = cpu_to_le16(size);
2064 l2cap_txwin_setup(chan);
2066 rfc.txwin_size = min_t(u16, chan->tx_win,
2067 L2CAP_DEFAULT_TX_WINDOW);
2069 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2070 (unsigned long) &rfc);
2072 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2073 l2cap_add_opt_efs(&ptr, chan);
2075 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2078 if (chan->fcs == L2CAP_FCS_NONE ||
2079 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2080 chan->fcs = L2CAP_FCS_NONE;
2081 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2084 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2085 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2089 case L2CAP_MODE_STREAMING:
2090 rfc.mode = L2CAP_MODE_STREAMING;
2092 rfc.max_transmit = 0;
2093 rfc.retrans_timeout = 0;
2094 rfc.monitor_timeout = 0;
2096 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2097 L2CAP_EXT_HDR_SIZE -
2100 rfc.max_pdu_size = cpu_to_le16(size);
2102 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2103 (unsigned long) &rfc);
2105 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2106 l2cap_add_opt_efs(&ptr, chan);
2108 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2111 if (chan->fcs == L2CAP_FCS_NONE ||
2112 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2113 chan->fcs = L2CAP_FCS_NONE;
2114 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2119 req->dcid = cpu_to_le16(chan->dcid);
2120 req->flags = cpu_to_le16(0);
2125 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2127 struct l2cap_conf_rsp *rsp = data;
2128 void *ptr = rsp->data;
2129 void *req = chan->conf_req;
2130 int len = chan->conf_len;
2131 int type, hint, olen;
2133 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2134 struct l2cap_conf_efs efs;
2136 u16 mtu = L2CAP_DEFAULT_MTU;
2137 u16 result = L2CAP_CONF_SUCCESS;
2140 BT_DBG("chan %p", chan);
2142 while (len >= L2CAP_CONF_OPT_SIZE) {
2143 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2145 hint = type & L2CAP_CONF_HINT;
2146 type &= L2CAP_CONF_MASK;
2149 case L2CAP_CONF_MTU:
2153 case L2CAP_CONF_FLUSH_TO:
2154 chan->flush_to = val;
2157 case L2CAP_CONF_QOS:
2160 case L2CAP_CONF_RFC:
2161 if (olen == sizeof(rfc))
2162 memcpy(&rfc, (void *) val, olen);
2165 case L2CAP_CONF_FCS:
2166 if (val == L2CAP_FCS_NONE)
2167 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2170 case L2CAP_CONF_EFS:
2172 if (olen == sizeof(efs))
2173 memcpy(&efs, (void *) val, olen);
2176 case L2CAP_CONF_EWS:
2178 return -ECONNREFUSED;
2180 set_bit(FLAG_EXT_CTRL, &chan->flags);
2181 set_bit(CONF_EWS_RECV, &chan->conf_state);
2182 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2183 chan->remote_tx_win = val;
2190 result = L2CAP_CONF_UNKNOWN;
2191 *((u8 *) ptr++) = type;
2196 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2199 switch (chan->mode) {
2200 case L2CAP_MODE_STREAMING:
2201 case L2CAP_MODE_ERTM:
2202 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2203 chan->mode = l2cap_select_mode(rfc.mode,
2204 chan->conn->feat_mask);
2209 if (__l2cap_efs_supported(chan))
2210 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2212 return -ECONNREFUSED;
2215 if (chan->mode != rfc.mode)
2216 return -ECONNREFUSED;
2222 if (chan->mode != rfc.mode) {
2223 result = L2CAP_CONF_UNACCEPT;
2224 rfc.mode = chan->mode;
2226 if (chan->num_conf_rsp == 1)
2227 return -ECONNREFUSED;
2229 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2230 sizeof(rfc), (unsigned long) &rfc);
2233 if (result == L2CAP_CONF_SUCCESS) {
2234 /* Configure output options and let the other side know
2235 * which ones we don't like. */
2237 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2238 result = L2CAP_CONF_UNACCEPT;
2241 set_bit(CONF_MTU_DONE, &chan->conf_state);
2243 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2246 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2247 efs.stype != L2CAP_SERV_NOTRAFIC &&
2248 efs.stype != chan->local_stype) {
2250 result = L2CAP_CONF_UNACCEPT;
2252 if (chan->num_conf_req >= 1)
2253 return -ECONNREFUSED;
2255 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2257 (unsigned long) &efs);
2259 /* Send PENDING Conf Rsp */
2260 result = L2CAP_CONF_PENDING;
2261 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2266 case L2CAP_MODE_BASIC:
2267 chan->fcs = L2CAP_FCS_NONE;
2268 set_bit(CONF_MODE_DONE, &chan->conf_state);
2271 case L2CAP_MODE_ERTM:
2272 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2273 chan->remote_tx_win = rfc.txwin_size;
2275 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2277 chan->remote_max_tx = rfc.max_transmit;
2279 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2281 L2CAP_EXT_HDR_SIZE -
2284 rfc.max_pdu_size = cpu_to_le16(size);
2285 chan->remote_mps = size;
2287 rfc.retrans_timeout =
2288 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2289 rfc.monitor_timeout =
2290 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2292 set_bit(CONF_MODE_DONE, &chan->conf_state);
2294 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2295 sizeof(rfc), (unsigned long) &rfc);
2297 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2298 chan->remote_id = efs.id;
2299 chan->remote_stype = efs.stype;
2300 chan->remote_msdu = le16_to_cpu(efs.msdu);
2301 chan->remote_flush_to =
2302 le32_to_cpu(efs.flush_to);
2303 chan->remote_acc_lat =
2304 le32_to_cpu(efs.acc_lat);
2305 chan->remote_sdu_itime =
2306 le32_to_cpu(efs.sdu_itime);
2307 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2308 sizeof(efs), (unsigned long) &efs);
2312 case L2CAP_MODE_STREAMING:
2313 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2315 L2CAP_EXT_HDR_SIZE -
2318 rfc.max_pdu_size = cpu_to_le16(size);
2319 chan->remote_mps = size;
2321 set_bit(CONF_MODE_DONE, &chan->conf_state);
2323 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2324 sizeof(rfc), (unsigned long) &rfc);
2329 result = L2CAP_CONF_UNACCEPT;
2331 memset(&rfc, 0, sizeof(rfc));
2332 rfc.mode = chan->mode;
2335 if (result == L2CAP_CONF_SUCCESS)
2336 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2338 rsp->scid = cpu_to_le16(chan->dcid);
2339 rsp->result = cpu_to_le16(result);
2340 rsp->flags = cpu_to_le16(0x0000);
2345 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2347 struct l2cap_conf_req *req = data;
2348 void *ptr = req->data;
2351 struct l2cap_conf_rfc rfc;
2353 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2355 while (len >= L2CAP_CONF_OPT_SIZE) {
2356 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2359 case L2CAP_CONF_MTU:
2360 if (val < L2CAP_DEFAULT_MIN_MTU) {
2361 *result = L2CAP_CONF_UNACCEPT;
2362 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2365 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2368 case L2CAP_CONF_FLUSH_TO:
2369 chan->flush_to = val;
2370 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2374 case L2CAP_CONF_RFC:
2375 if (olen == sizeof(rfc))
2376 memcpy(&rfc, (void *)val, olen);
2378 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2379 rfc.mode != chan->mode)
2380 return -ECONNREFUSED;
2384 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2385 sizeof(rfc), (unsigned long) &rfc);
2388 case L2CAP_CONF_EWS:
2389 chan->tx_win = min_t(u16, val,
2390 L2CAP_DEFAULT_EXT_WINDOW);
2391 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2397 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2398 return -ECONNREFUSED;
2400 chan->mode = rfc.mode;
2402 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2404 case L2CAP_MODE_ERTM:
2405 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2406 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2407 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2409 case L2CAP_MODE_STREAMING:
2410 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2414 req->dcid = cpu_to_le16(chan->dcid);
2415 req->flags = cpu_to_le16(0x0000);
2420 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2422 struct l2cap_conf_rsp *rsp = data;
2423 void *ptr = rsp->data;
2425 BT_DBG("chan %p", chan);
2427 rsp->scid = cpu_to_le16(chan->dcid);
2428 rsp->result = cpu_to_le16(result);
2429 rsp->flags = cpu_to_le16(flags);
2434 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2436 struct l2cap_conn_rsp rsp;
2437 struct l2cap_conn *conn = chan->conn;
2440 rsp.scid = cpu_to_le16(chan->dcid);
2441 rsp.dcid = cpu_to_le16(chan->scid);
2442 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2443 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2444 l2cap_send_cmd(conn, chan->ident,
2445 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2447 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2450 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2451 l2cap_build_conf_req(chan, buf), buf);
2452 chan->num_conf_req++;
2455 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2459 struct l2cap_conf_rfc rfc;
2461 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2463 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2466 while (len >= L2CAP_CONF_OPT_SIZE) {
2467 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2470 case L2CAP_CONF_RFC:
2471 if (olen == sizeof(rfc))
2472 memcpy(&rfc, (void *)val, olen);
2479 case L2CAP_MODE_ERTM:
2480 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2481 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2482 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2484 case L2CAP_MODE_STREAMING:
2485 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2489 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2491 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2493 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2496 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2497 cmd->ident == conn->info_ident) {
2498 del_timer(&conn->info_timer);
2500 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2501 conn->info_ident = 0;
2503 l2cap_conn_start(conn);
2509 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2511 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2512 struct l2cap_conn_rsp rsp;
2513 struct l2cap_chan *chan = NULL, *pchan;
2514 struct sock *parent, *sk = NULL;
2515 int result, status = L2CAP_CS_NO_INFO;
2517 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2518 __le16 psm = req->psm;
2520 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2522 /* Check if we have socket listening on psm */
2523 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2525 result = L2CAP_CR_BAD_PSM;
2531 bh_lock_sock(parent);
2533 /* Check if the ACL is secure enough (if not SDP) */
2534 if (psm != cpu_to_le16(0x0001) &&
2535 !hci_conn_check_link_mode(conn->hcon)) {
2536 conn->disc_reason = 0x05;
2537 result = L2CAP_CR_SEC_BLOCK;
2541 result = L2CAP_CR_NO_MEM;
2543 /* Check for backlog size */
2544 if (sk_acceptq_is_full(parent)) {
2545 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2549 chan = pchan->ops->new_connection(pchan->data);
2555 write_lock_bh(&conn->chan_lock);
2557 /* Check if we already have channel with that dcid */
2558 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2559 write_unlock_bh(&conn->chan_lock);
2560 sock_set_flag(sk, SOCK_ZAPPED);
2561 chan->ops->close(chan->data);
2565 hci_conn_hold(conn->hcon);
2567 bacpy(&bt_sk(sk)->src, conn->src);
2568 bacpy(&bt_sk(sk)->dst, conn->dst);
2572 bt_accept_enqueue(parent, sk);
2574 __l2cap_chan_add(conn, chan);
2578 __set_chan_timer(chan, sk->sk_sndtimeo);
2580 chan->ident = cmd->ident;
2582 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2583 if (l2cap_check_security(chan)) {
2584 if (bt_sk(sk)->defer_setup) {
2585 l2cap_state_change(chan, BT_CONNECT2);
2586 result = L2CAP_CR_PEND;
2587 status = L2CAP_CS_AUTHOR_PEND;
2588 parent->sk_data_ready(parent, 0);
2590 l2cap_state_change(chan, BT_CONFIG);
2591 result = L2CAP_CR_SUCCESS;
2592 status = L2CAP_CS_NO_INFO;
2595 l2cap_state_change(chan, BT_CONNECT2);
2596 result = L2CAP_CR_PEND;
2597 status = L2CAP_CS_AUTHEN_PEND;
2600 l2cap_state_change(chan, BT_CONNECT2);
2601 result = L2CAP_CR_PEND;
2602 status = L2CAP_CS_NO_INFO;
2605 write_unlock_bh(&conn->chan_lock);
2608 bh_unlock_sock(parent);
2611 rsp.scid = cpu_to_le16(scid);
2612 rsp.dcid = cpu_to_le16(dcid);
2613 rsp.result = cpu_to_le16(result);
2614 rsp.status = cpu_to_le16(status);
2615 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2617 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2618 struct l2cap_info_req info;
2619 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2621 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2622 conn->info_ident = l2cap_get_ident(conn);
2624 mod_timer(&conn->info_timer, jiffies +
2625 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2627 l2cap_send_cmd(conn, conn->info_ident,
2628 L2CAP_INFO_REQ, sizeof(info), &info);
2631 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2632 result == L2CAP_CR_SUCCESS) {
2634 set_bit(CONF_REQ_SENT, &chan->conf_state);
2635 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2636 l2cap_build_conf_req(chan, buf), buf);
2637 chan->num_conf_req++;
2643 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2645 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2646 u16 scid, dcid, result, status;
2647 struct l2cap_chan *chan;
2651 scid = __le16_to_cpu(rsp->scid);
2652 dcid = __le16_to_cpu(rsp->dcid);
2653 result = __le16_to_cpu(rsp->result);
2654 status = __le16_to_cpu(rsp->status);
2656 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2659 chan = l2cap_get_chan_by_scid(conn, scid);
2663 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2671 case L2CAP_CR_SUCCESS:
2672 l2cap_state_change(chan, BT_CONFIG);
2675 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2677 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2680 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2681 l2cap_build_conf_req(chan, req), req);
2682 chan->num_conf_req++;
2686 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2690 /* don't delete l2cap channel if sk is owned by user */
2691 if (sock_owned_by_user(sk)) {
2692 l2cap_state_change(chan, BT_DISCONN);
2693 __clear_chan_timer(chan);
2694 __set_chan_timer(chan, HZ / 5);
2698 l2cap_chan_del(chan, ECONNREFUSED);
2706 static inline void set_default_fcs(struct l2cap_chan *chan)
2708 /* FCS is enabled only in ERTM or streaming mode, if one or both
2711 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2712 chan->fcs = L2CAP_FCS_NONE;
2713 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2714 chan->fcs = L2CAP_FCS_CRC16;
2717 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2719 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2722 struct l2cap_chan *chan;
2726 dcid = __le16_to_cpu(req->dcid);
2727 flags = __le16_to_cpu(req->flags);
2729 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2731 chan = l2cap_get_chan_by_scid(conn, dcid);
2737 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2738 struct l2cap_cmd_rej_cid rej;
2740 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2741 rej.scid = cpu_to_le16(chan->scid);
2742 rej.dcid = cpu_to_le16(chan->dcid);
2744 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2749 /* Reject if config buffer is too small. */
2750 len = cmd_len - sizeof(*req);
2751 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2752 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2753 l2cap_build_conf_rsp(chan, rsp,
2754 L2CAP_CONF_REJECT, flags), rsp);
2759 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2760 chan->conf_len += len;
2762 if (flags & 0x0001) {
2763 /* Incomplete config. Send empty response. */
2764 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2765 l2cap_build_conf_rsp(chan, rsp,
2766 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2770 /* Complete config. */
2771 len = l2cap_parse_conf_req(chan, rsp);
2773 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2777 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2778 chan->num_conf_rsp++;
2780 /* Reset config buffer. */
2783 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2786 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2787 set_default_fcs(chan);
2789 l2cap_state_change(chan, BT_CONNECTED);
2791 chan->next_tx_seq = 0;
2792 chan->expected_tx_seq = 0;
2793 skb_queue_head_init(&chan->tx_q);
2794 if (chan->mode == L2CAP_MODE_ERTM)
2795 l2cap_ertm_init(chan);
2797 l2cap_chan_ready(sk);
2801 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2803 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2804 l2cap_build_conf_req(chan, buf), buf);
2805 chan->num_conf_req++;
2808 /* Got Conf Rsp PENDING from remote side and asume we sent
2809 Conf Rsp PENDING in the code above */
2810 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2811 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2813 /* check compatibility */
2815 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2816 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2818 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2819 l2cap_build_conf_rsp(chan, rsp,
2820 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2828 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2830 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2831 u16 scid, flags, result;
2832 struct l2cap_chan *chan;
2834 int len = cmd->len - sizeof(*rsp);
2836 scid = __le16_to_cpu(rsp->scid);
2837 flags = __le16_to_cpu(rsp->flags);
2838 result = __le16_to_cpu(rsp->result);
2840 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2841 scid, flags, result);
2843 chan = l2cap_get_chan_by_scid(conn, scid);
2850 case L2CAP_CONF_SUCCESS:
2851 l2cap_conf_rfc_get(chan, rsp->data, len);
2852 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2855 case L2CAP_CONF_PENDING:
2856 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2858 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2861 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2864 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2868 /* check compatibility */
2870 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2871 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2873 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2874 l2cap_build_conf_rsp(chan, buf,
2875 L2CAP_CONF_SUCCESS, 0x0000), buf);
2879 case L2CAP_CONF_UNACCEPT:
2880 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2883 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2884 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2888 /* throw out any old stored conf requests */
2889 result = L2CAP_CONF_SUCCESS;
2890 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2893 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2897 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2898 L2CAP_CONF_REQ, len, req);
2899 chan->num_conf_req++;
2900 if (result != L2CAP_CONF_SUCCESS)
2906 sk->sk_err = ECONNRESET;
2907 __set_chan_timer(chan, HZ * 5);
2908 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2915 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2917 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2918 set_default_fcs(chan);
2920 l2cap_state_change(chan, BT_CONNECTED);
2921 chan->next_tx_seq = 0;
2922 chan->expected_tx_seq = 0;
2923 skb_queue_head_init(&chan->tx_q);
2924 if (chan->mode == L2CAP_MODE_ERTM)
2925 l2cap_ertm_init(chan);
2927 l2cap_chan_ready(sk);
2935 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2937 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2938 struct l2cap_disconn_rsp rsp;
2940 struct l2cap_chan *chan;
2943 scid = __le16_to_cpu(req->scid);
2944 dcid = __le16_to_cpu(req->dcid);
2946 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2948 chan = l2cap_get_chan_by_scid(conn, dcid);
2954 rsp.dcid = cpu_to_le16(chan->scid);
2955 rsp.scid = cpu_to_le16(chan->dcid);
2956 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2958 sk->sk_shutdown = SHUTDOWN_MASK;
2960 /* don't delete l2cap channel if sk is owned by user */
2961 if (sock_owned_by_user(sk)) {
2962 l2cap_state_change(chan, BT_DISCONN);
2963 __clear_chan_timer(chan);
2964 __set_chan_timer(chan, HZ / 5);
2969 l2cap_chan_del(chan, ECONNRESET);
2972 chan->ops->close(chan->data);
2976 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2978 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
2980 struct l2cap_chan *chan;
2983 scid = __le16_to_cpu(rsp->scid);
2984 dcid = __le16_to_cpu(rsp->dcid);
2986 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
2988 chan = l2cap_get_chan_by_scid(conn, scid);
2994 /* don't delete l2cap channel if sk is owned by user */
2995 if (sock_owned_by_user(sk)) {
2996 l2cap_state_change(chan,BT_DISCONN);
2997 __clear_chan_timer(chan);
2998 __set_chan_timer(chan, HZ / 5);
3003 l2cap_chan_del(chan, 0);
3006 chan->ops->close(chan->data);
3010 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3012 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3015 type = __le16_to_cpu(req->type);
3017 BT_DBG("type 0x%4.4x", type);
3019 if (type == L2CAP_IT_FEAT_MASK) {
3021 u32 feat_mask = l2cap_feat_mask;
3022 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3023 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3024 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3026 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3029 feat_mask |= L2CAP_FEAT_EXT_FLOW
3030 | L2CAP_FEAT_EXT_WINDOW;
3032 put_unaligned_le32(feat_mask, rsp->data);
3033 l2cap_send_cmd(conn, cmd->ident,
3034 L2CAP_INFO_RSP, sizeof(buf), buf);
3035 } else if (type == L2CAP_IT_FIXED_CHAN) {
3037 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3038 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3039 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3040 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3041 l2cap_send_cmd(conn, cmd->ident,
3042 L2CAP_INFO_RSP, sizeof(buf), buf);
3044 struct l2cap_info_rsp rsp;
3045 rsp.type = cpu_to_le16(type);
3046 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3047 l2cap_send_cmd(conn, cmd->ident,
3048 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3054 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3056 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3059 type = __le16_to_cpu(rsp->type);
3060 result = __le16_to_cpu(rsp->result);
3062 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3064 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3065 if (cmd->ident != conn->info_ident ||
3066 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3069 del_timer(&conn->info_timer);
3071 if (result != L2CAP_IR_SUCCESS) {
3072 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3073 conn->info_ident = 0;
3075 l2cap_conn_start(conn);
3080 if (type == L2CAP_IT_FEAT_MASK) {
3081 conn->feat_mask = get_unaligned_le32(rsp->data);
3083 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3084 struct l2cap_info_req req;
3085 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3087 conn->info_ident = l2cap_get_ident(conn);
3089 l2cap_send_cmd(conn, conn->info_ident,
3090 L2CAP_INFO_REQ, sizeof(req), &req);
3092 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3093 conn->info_ident = 0;
3095 l2cap_conn_start(conn);
3097 } else if (type == L2CAP_IT_FIXED_CHAN) {
3098 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3099 conn->info_ident = 0;
3101 l2cap_conn_start(conn);
3107 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3108 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3111 struct l2cap_create_chan_req *req = data;
3112 struct l2cap_create_chan_rsp rsp;
3115 if (cmd_len != sizeof(*req))
3121 psm = le16_to_cpu(req->psm);
3122 scid = le16_to_cpu(req->scid);
3124 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3126 /* Placeholder: Always reject */
3128 rsp.scid = cpu_to_le16(scid);
3129 rsp.result = L2CAP_CR_NO_MEM;
3130 rsp.status = L2CAP_CS_NO_INFO;
3132 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3138 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3139 struct l2cap_cmd_hdr *cmd, void *data)
3141 BT_DBG("conn %p", conn);
3143 return l2cap_connect_rsp(conn, cmd, data);
3146 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3151 if (min > max || min < 6 || max > 3200)
3154 if (to_multiplier < 10 || to_multiplier > 3200)
3157 if (max >= to_multiplier * 8)
3160 max_latency = (to_multiplier * 8 / max) - 1;
3161 if (latency > 499 || latency > max_latency)
3167 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3168 struct l2cap_cmd_hdr *cmd, u8 *data)
3170 struct hci_conn *hcon = conn->hcon;
3171 struct l2cap_conn_param_update_req *req;
3172 struct l2cap_conn_param_update_rsp rsp;
3173 u16 min, max, latency, to_multiplier, cmd_len;
3176 if (!(hcon->link_mode & HCI_LM_MASTER))
3179 cmd_len = __le16_to_cpu(cmd->len);
3180 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3183 req = (struct l2cap_conn_param_update_req *) data;
3184 min = __le16_to_cpu(req->min);
3185 max = __le16_to_cpu(req->max);
3186 latency = __le16_to_cpu(req->latency);
3187 to_multiplier = __le16_to_cpu(req->to_multiplier);
3189 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3190 min, max, latency, to_multiplier);
3192 memset(&rsp, 0, sizeof(rsp));
3194 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3196 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3198 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3200 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3204 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3209 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3210 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3214 switch (cmd->code) {
3215 case L2CAP_COMMAND_REJ:
3216 l2cap_command_rej(conn, cmd, data);
3219 case L2CAP_CONN_REQ:
3220 err = l2cap_connect_req(conn, cmd, data);
3223 case L2CAP_CONN_RSP:
3224 err = l2cap_connect_rsp(conn, cmd, data);
3227 case L2CAP_CONF_REQ:
3228 err = l2cap_config_req(conn, cmd, cmd_len, data);
3231 case L2CAP_CONF_RSP:
3232 err = l2cap_config_rsp(conn, cmd, data);
3235 case L2CAP_DISCONN_REQ:
3236 err = l2cap_disconnect_req(conn, cmd, data);
3239 case L2CAP_DISCONN_RSP:
3240 err = l2cap_disconnect_rsp(conn, cmd, data);
3243 case L2CAP_ECHO_REQ:
3244 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3247 case L2CAP_ECHO_RSP:
3250 case L2CAP_INFO_REQ:
3251 err = l2cap_information_req(conn, cmd, data);
3254 case L2CAP_INFO_RSP:
3255 err = l2cap_information_rsp(conn, cmd, data);
3258 case L2CAP_CREATE_CHAN_REQ:
3259 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3262 case L2CAP_CREATE_CHAN_RSP:
3263 err = l2cap_create_channel_rsp(conn, cmd, data);
3267 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3275 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3276 struct l2cap_cmd_hdr *cmd, u8 *data)
3278 switch (cmd->code) {
3279 case L2CAP_COMMAND_REJ:
3282 case L2CAP_CONN_PARAM_UPDATE_REQ:
3283 return l2cap_conn_param_update_req(conn, cmd, data);
3285 case L2CAP_CONN_PARAM_UPDATE_RSP:
3289 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3294 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3295 struct sk_buff *skb)
3297 u8 *data = skb->data;
3299 struct l2cap_cmd_hdr cmd;
3302 l2cap_raw_recv(conn, skb);
3304 while (len >= L2CAP_CMD_HDR_SIZE) {
3306 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3307 data += L2CAP_CMD_HDR_SIZE;
3308 len -= L2CAP_CMD_HDR_SIZE;
3310 cmd_len = le16_to_cpu(cmd.len);
3312 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3314 if (cmd_len > len || !cmd.ident) {
3315 BT_DBG("corrupted command");
3319 if (conn->hcon->type == LE_LINK)
3320 err = l2cap_le_sig_cmd(conn, &cmd, data);
3322 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3325 struct l2cap_cmd_rej_unk rej;
3327 BT_ERR("Wrong link type (%d)", err);
3329 /* FIXME: Map err to a valid reason */
3330 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3331 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3341 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3343 u16 our_fcs, rcv_fcs;
3346 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3347 hdr_size = L2CAP_EXT_HDR_SIZE;
3349 hdr_size = L2CAP_ENH_HDR_SIZE;
3351 if (chan->fcs == L2CAP_FCS_CRC16) {
3352 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3353 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3354 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3356 if (our_fcs != rcv_fcs)
3362 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3366 chan->frames_sent = 0;
3368 control |= __set_reqseq(chan, chan->buffer_seq);
3370 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3371 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3372 l2cap_send_sframe(chan, control);
3373 set_bit(CONN_RNR_SENT, &chan->conn_state);
3376 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3377 l2cap_retransmit_frames(chan);
3379 l2cap_ertm_send(chan);
3381 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3382 chan->frames_sent == 0) {
3383 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3384 l2cap_send_sframe(chan, control);
3388 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3390 struct sk_buff *next_skb;
3391 int tx_seq_offset, next_tx_seq_offset;
3393 bt_cb(skb)->tx_seq = tx_seq;
3394 bt_cb(skb)->sar = sar;
3396 next_skb = skb_peek(&chan->srej_q);
3398 __skb_queue_tail(&chan->srej_q, skb);
3402 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3405 if (bt_cb(next_skb)->tx_seq == tx_seq)
3408 next_tx_seq_offset = __seq_offset(chan,
3409 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3411 if (next_tx_seq_offset > tx_seq_offset) {
3412 __skb_queue_before(&chan->srej_q, next_skb, skb);
3416 if (skb_queue_is_last(&chan->srej_q, next_skb))
3419 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3421 __skb_queue_tail(&chan->srej_q, skb);
3426 static void append_skb_frag(struct sk_buff *skb,
3427 struct sk_buff *new_frag, struct sk_buff **last_frag)
3429 /* skb->len reflects data in skb as well as all fragments
3430 * skb->data_len reflects only data in fragments
3432 if (!skb_has_frag_list(skb))
3433 skb_shinfo(skb)->frag_list = new_frag;
3435 new_frag->next = NULL;
3437 (*last_frag)->next = new_frag;
3438 *last_frag = new_frag;
3440 skb->len += new_frag->len;
3441 skb->data_len += new_frag->len;
3442 skb->truesize += new_frag->truesize;
3445 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3449 switch (__get_ctrl_sar(chan, control)) {
3450 case L2CAP_SAR_UNSEGMENTED:
3454 err = chan->ops->recv(chan->data, skb);
3457 case L2CAP_SAR_START:
3461 chan->sdu_len = get_unaligned_le16(skb->data);
3462 skb_pull(skb, L2CAP_SDULEN_SIZE);
3464 if (chan->sdu_len > chan->imtu) {
3469 if (skb->len >= chan->sdu_len)
3473 chan->sdu_last_frag = skb;
3479 case L2CAP_SAR_CONTINUE:
3483 append_skb_frag(chan->sdu, skb,
3484 &chan->sdu_last_frag);
3487 if (chan->sdu->len >= chan->sdu_len)
3497 append_skb_frag(chan->sdu, skb,
3498 &chan->sdu_last_frag);
3501 if (chan->sdu->len != chan->sdu_len)
3504 err = chan->ops->recv(chan->data, chan->sdu);
3507 /* Reassembly complete */
3509 chan->sdu_last_frag = NULL;
3517 kfree_skb(chan->sdu);
3519 chan->sdu_last_frag = NULL;
3526 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3530 BT_DBG("chan %p, Enter local busy", chan);
3532 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3534 control = __set_reqseq(chan, chan->buffer_seq);
3535 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3536 l2cap_send_sframe(chan, control);
3538 set_bit(CONN_RNR_SENT, &chan->conn_state);
3540 __clear_ack_timer(chan);
3543 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3547 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3550 control = __set_reqseq(chan, chan->buffer_seq);
3551 control |= __set_ctrl_poll(chan);
3552 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3553 l2cap_send_sframe(chan, control);
3554 chan->retry_count = 1;
3556 __clear_retrans_timer(chan);
3557 __set_monitor_timer(chan);
3559 set_bit(CONN_WAIT_F, &chan->conn_state);
3562 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3563 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3565 BT_DBG("chan %p, Exit local busy", chan);
3568 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3570 if (chan->mode == L2CAP_MODE_ERTM) {
3572 l2cap_ertm_enter_local_busy(chan);
3574 l2cap_ertm_exit_local_busy(chan);
3578 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3580 struct sk_buff *skb;
3583 while ((skb = skb_peek(&chan->srej_q)) &&
3584 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3587 if (bt_cb(skb)->tx_seq != tx_seq)
3590 skb = skb_dequeue(&chan->srej_q);
3591 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3592 err = l2cap_reassemble_sdu(chan, skb, control);
3595 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3599 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3600 tx_seq = __next_seq(chan, tx_seq);
3604 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3606 struct srej_list *l, *tmp;
3609 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3610 if (l->tx_seq == tx_seq) {
3615 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3616 control |= __set_reqseq(chan, l->tx_seq);
3617 l2cap_send_sframe(chan, control);
3619 list_add_tail(&l->list, &chan->srej_l);
3623 static void l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3625 struct srej_list *new;
3628 while (tx_seq != chan->expected_tx_seq) {
3629 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3630 control |= __set_reqseq(chan, chan->expected_tx_seq);
3631 l2cap_send_sframe(chan, control);
3633 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3634 new->tx_seq = chan->expected_tx_seq;
3636 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3638 list_add_tail(&new->list, &chan->srej_l);
3641 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3644 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3646 u16 tx_seq = __get_txseq(chan, rx_control);
3647 u16 req_seq = __get_reqseq(chan, rx_control);
3648 u8 sar = __get_ctrl_sar(chan, rx_control);
3649 int tx_seq_offset, expected_tx_seq_offset;
3650 int num_to_ack = (chan->tx_win/6) + 1;
3653 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3654 tx_seq, rx_control);
3656 if (__is_ctrl_final(chan, rx_control) &&
3657 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3658 __clear_monitor_timer(chan);
3659 if (chan->unacked_frames > 0)
3660 __set_retrans_timer(chan);
3661 clear_bit(CONN_WAIT_F, &chan->conn_state);
3664 chan->expected_ack_seq = req_seq;
3665 l2cap_drop_acked_frames(chan);
3667 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3669 /* invalid tx_seq */
3670 if (tx_seq_offset >= chan->tx_win) {
3671 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3675 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3678 if (tx_seq == chan->expected_tx_seq)
3681 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3682 struct srej_list *first;
3684 first = list_first_entry(&chan->srej_l,
3685 struct srej_list, list);
3686 if (tx_seq == first->tx_seq) {
3687 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3688 l2cap_check_srej_gap(chan, tx_seq);
3690 list_del(&first->list);
3693 if (list_empty(&chan->srej_l)) {
3694 chan->buffer_seq = chan->buffer_seq_srej;
3695 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3696 l2cap_send_ack(chan);
3697 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3700 struct srej_list *l;
3702 /* duplicated tx_seq */
3703 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3706 list_for_each_entry(l, &chan->srej_l, list) {
3707 if (l->tx_seq == tx_seq) {
3708 l2cap_resend_srejframe(chan, tx_seq);
3712 l2cap_send_srejframe(chan, tx_seq);
3715 expected_tx_seq_offset = __seq_offset(chan,
3716 chan->expected_tx_seq, chan->buffer_seq);
3718 /* duplicated tx_seq */
3719 if (tx_seq_offset < expected_tx_seq_offset)
3722 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3724 BT_DBG("chan %p, Enter SREJ", chan);
3726 INIT_LIST_HEAD(&chan->srej_l);
3727 chan->buffer_seq_srej = chan->buffer_seq;
3729 __skb_queue_head_init(&chan->srej_q);
3730 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3732 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3734 l2cap_send_srejframe(chan, tx_seq);
3736 __clear_ack_timer(chan);
3741 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3743 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3744 bt_cb(skb)->tx_seq = tx_seq;
3745 bt_cb(skb)->sar = sar;
3746 __skb_queue_tail(&chan->srej_q, skb);
3750 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3751 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3754 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3758 if (__is_ctrl_final(chan, rx_control)) {
3759 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3760 l2cap_retransmit_frames(chan);
3763 __set_ack_timer(chan);
3765 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3766 if (chan->num_acked == num_to_ack - 1)
3767 l2cap_send_ack(chan);
3776 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3778 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3779 __get_reqseq(chan, rx_control), rx_control);
3781 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3782 l2cap_drop_acked_frames(chan);
3784 if (__is_ctrl_poll(chan, rx_control)) {
3785 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3786 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3787 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3788 (chan->unacked_frames > 0))
3789 __set_retrans_timer(chan);
3791 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3792 l2cap_send_srejtail(chan);
3794 l2cap_send_i_or_rr_or_rnr(chan);
3797 } else if (__is_ctrl_final(chan, rx_control)) {
3798 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3800 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3801 l2cap_retransmit_frames(chan);
3804 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3805 (chan->unacked_frames > 0))
3806 __set_retrans_timer(chan);
3808 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3809 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3810 l2cap_send_ack(chan);
3812 l2cap_ertm_send(chan);
3816 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
3818 u16 tx_seq = __get_reqseq(chan, rx_control);
3820 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3822 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3824 chan->expected_ack_seq = tx_seq;
3825 l2cap_drop_acked_frames(chan);
3827 if (__is_ctrl_final(chan, rx_control)) {
3828 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3829 l2cap_retransmit_frames(chan);
3831 l2cap_retransmit_frames(chan);
3833 if (test_bit(CONN_WAIT_F, &chan->conn_state))
3834 set_bit(CONN_REJ_ACT, &chan->conn_state);
3837 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
3839 u16 tx_seq = __get_reqseq(chan, rx_control);
3841 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3843 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3845 if (__is_ctrl_poll(chan, rx_control)) {
3846 chan->expected_ack_seq = tx_seq;
3847 l2cap_drop_acked_frames(chan);
3849 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3850 l2cap_retransmit_one_frame(chan, tx_seq);
3852 l2cap_ertm_send(chan);
3854 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3855 chan->srej_save_reqseq = tx_seq;
3856 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3858 } else if (__is_ctrl_final(chan, rx_control)) {
3859 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
3860 chan->srej_save_reqseq == tx_seq)
3861 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
3863 l2cap_retransmit_one_frame(chan, tx_seq);
3865 l2cap_retransmit_one_frame(chan, tx_seq);
3866 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
3867 chan->srej_save_reqseq = tx_seq;
3868 set_bit(CONN_SREJ_ACT, &chan->conn_state);
3873 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
3875 u16 tx_seq = __get_reqseq(chan, rx_control);
3877 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
3879 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3880 chan->expected_ack_seq = tx_seq;
3881 l2cap_drop_acked_frames(chan);
3883 if (__is_ctrl_poll(chan, rx_control))
3884 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3886 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3887 __clear_retrans_timer(chan);
3888 if (__is_ctrl_poll(chan, rx_control))
3889 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
3893 if (__is_ctrl_poll(chan, rx_control)) {
3894 l2cap_send_srejtail(chan);
3896 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
3897 l2cap_send_sframe(chan, rx_control);
3901 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3903 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
3905 if (__is_ctrl_final(chan, rx_control) &&
3906 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3907 __clear_monitor_timer(chan);
3908 if (chan->unacked_frames > 0)
3909 __set_retrans_timer(chan);
3910 clear_bit(CONN_WAIT_F, &chan->conn_state);
3913 switch (__get_ctrl_super(chan, rx_control)) {
3914 case L2CAP_SUPER_RR:
3915 l2cap_data_channel_rrframe(chan, rx_control);
3918 case L2CAP_SUPER_REJ:
3919 l2cap_data_channel_rejframe(chan, rx_control);
3922 case L2CAP_SUPER_SREJ:
3923 l2cap_data_channel_srejframe(chan, rx_control);
3926 case L2CAP_SUPER_RNR:
3927 l2cap_data_channel_rnrframe(chan, rx_control);
3935 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
3937 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
3940 int len, next_tx_seq_offset, req_seq_offset;
3942 control = __get_control(chan, skb->data);
3943 skb_pull(skb, __ctrl_size(chan));
3947 * We can just drop the corrupted I-frame here.
3948 * Receiver will miss it and start proper recovery
3949 * procedures and ask retransmission.
3951 if (l2cap_check_fcs(chan, skb))
3954 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
3955 len -= L2CAP_SDULEN_SIZE;
3957 if (chan->fcs == L2CAP_FCS_CRC16)
3958 len -= L2CAP_FCS_SIZE;
3960 if (len > chan->mps) {
3961 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3965 req_seq = __get_reqseq(chan, control);
3967 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
3969 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
3970 chan->expected_ack_seq);
3972 /* check for invalid req-seq */
3973 if (req_seq_offset > next_tx_seq_offset) {
3974 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3978 if (!__is_sframe(chan, control)) {
3980 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3984 l2cap_data_channel_iframe(chan, control, skb);
3988 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3992 l2cap_data_channel_sframe(chan, control, skb);
4002 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4004 struct l2cap_chan *chan;
4005 struct sock *sk = NULL;
4010 chan = l2cap_get_chan_by_scid(conn, cid);
4012 BT_DBG("unknown cid 0x%4.4x", cid);
4018 BT_DBG("chan %p, len %d", chan, skb->len);
4020 if (chan->state != BT_CONNECTED)
4023 switch (chan->mode) {
4024 case L2CAP_MODE_BASIC:
4025 /* If socket recv buffers overflows we drop data here
4026 * which is *bad* because L2CAP has to be reliable.
4027 * But we don't have any other choice. L2CAP doesn't
4028 * provide flow control mechanism. */
4030 if (chan->imtu < skb->len)
4033 if (!chan->ops->recv(chan->data, skb))
4037 case L2CAP_MODE_ERTM:
4038 if (!sock_owned_by_user(sk)) {
4039 l2cap_ertm_data_rcv(sk, skb);
4041 if (sk_add_backlog(sk, skb))
4047 case L2CAP_MODE_STREAMING:
4048 control = __get_control(chan, skb->data);
4049 skb_pull(skb, __ctrl_size(chan));
4052 if (l2cap_check_fcs(chan, skb))
4055 if (__is_sar_start(chan, control))
4056 len -= L2CAP_SDULEN_SIZE;
4058 if (chan->fcs == L2CAP_FCS_CRC16)
4059 len -= L2CAP_FCS_SIZE;
4061 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4064 tx_seq = __get_txseq(chan, control);
4066 if (chan->expected_tx_seq != tx_seq) {
4067 /* Frame(s) missing - must discard partial SDU */
4068 kfree_skb(chan->sdu);
4070 chan->sdu_last_frag = NULL;
4073 /* TODO: Notify userland of missing data */
4076 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4078 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4079 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4084 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4098 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4100 struct sock *sk = NULL;
4101 struct l2cap_chan *chan;
4103 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4111 BT_DBG("sk %p, len %d", sk, skb->len);
4113 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4116 if (chan->imtu < skb->len)
4119 if (!chan->ops->recv(chan->data, skb))
4131 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4133 struct sock *sk = NULL;
4134 struct l2cap_chan *chan;
4136 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4144 BT_DBG("sk %p, len %d", sk, skb->len);
4146 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4149 if (chan->imtu < skb->len)
4152 if (!chan->ops->recv(chan->data, skb))
4164 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4166 struct l2cap_hdr *lh = (void *) skb->data;
4170 skb_pull(skb, L2CAP_HDR_SIZE);
4171 cid = __le16_to_cpu(lh->cid);
4172 len = __le16_to_cpu(lh->len);
4174 if (len != skb->len) {
4179 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4182 case L2CAP_CID_LE_SIGNALING:
4183 case L2CAP_CID_SIGNALING:
4184 l2cap_sig_channel(conn, skb);
4187 case L2CAP_CID_CONN_LESS:
4188 psm = get_unaligned_le16(skb->data);
4190 l2cap_conless_channel(conn, psm, skb);
4193 case L2CAP_CID_LE_DATA:
4194 l2cap_att_channel(conn, cid, skb);
4198 if (smp_sig_channel(conn, skb))
4199 l2cap_conn_del(conn->hcon, EACCES);
4203 l2cap_data_channel(conn, cid, skb);
4208 /* ---- L2CAP interface with lower layer (HCI) ---- */
4210 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4212 int exact = 0, lm1 = 0, lm2 = 0;
4213 struct l2cap_chan *c;
4215 if (type != ACL_LINK)
4218 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4220 /* Find listening sockets and check their link_mode */
4221 read_lock(&chan_list_lock);
4222 list_for_each_entry(c, &chan_list, global_l) {
4223 struct sock *sk = c->sk;
4225 if (c->state != BT_LISTEN)
4228 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4229 lm1 |= HCI_LM_ACCEPT;
4230 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4231 lm1 |= HCI_LM_MASTER;
4233 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4234 lm2 |= HCI_LM_ACCEPT;
4235 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4236 lm2 |= HCI_LM_MASTER;
4239 read_unlock(&chan_list_lock);
4241 return exact ? lm1 : lm2;
4244 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4246 struct l2cap_conn *conn;
4248 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4250 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4254 conn = l2cap_conn_add(hcon, status);
4256 l2cap_conn_ready(conn);
4258 l2cap_conn_del(hcon, bt_to_errno(status));
4263 static int l2cap_disconn_ind(struct hci_conn *hcon)
4265 struct l2cap_conn *conn = hcon->l2cap_data;
4267 BT_DBG("hcon %p", hcon);
4269 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4272 return conn->disc_reason;
4275 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4277 BT_DBG("hcon %p reason %d", hcon, reason);
4279 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4282 l2cap_conn_del(hcon, bt_to_errno(reason));
4287 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4289 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4292 if (encrypt == 0x00) {
4293 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4294 __clear_chan_timer(chan);
4295 __set_chan_timer(chan, HZ * 5);
4296 } else if (chan->sec_level == BT_SECURITY_HIGH)
4297 l2cap_chan_close(chan, ECONNREFUSED);
4299 if (chan->sec_level == BT_SECURITY_MEDIUM)
4300 __clear_chan_timer(chan);
4304 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4306 struct l2cap_conn *conn = hcon->l2cap_data;
4307 struct l2cap_chan *chan;
4312 BT_DBG("conn %p", conn);
4314 if (hcon->type == LE_LINK) {
4315 smp_distribute_keys(conn, 0);
4316 del_timer(&conn->security_timer);
4319 read_lock(&conn->chan_lock);
4321 list_for_each_entry(chan, &conn->chan_l, list) {
4322 struct sock *sk = chan->sk;
4326 BT_DBG("chan->scid %d", chan->scid);
4328 if (chan->scid == L2CAP_CID_LE_DATA) {
4329 if (!status && encrypt) {
4330 chan->sec_level = hcon->sec_level;
4331 l2cap_chan_ready(sk);
4338 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4343 if (!status && (chan->state == BT_CONNECTED ||
4344 chan->state == BT_CONFIG)) {
4345 l2cap_check_encryption(chan, encrypt);
4350 if (chan->state == BT_CONNECT) {
4352 struct l2cap_conn_req req;
4353 req.scid = cpu_to_le16(chan->scid);
4354 req.psm = chan->psm;
4356 chan->ident = l2cap_get_ident(conn);
4357 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4359 l2cap_send_cmd(conn, chan->ident,
4360 L2CAP_CONN_REQ, sizeof(req), &req);
4362 __clear_chan_timer(chan);
4363 __set_chan_timer(chan, HZ / 10);
4365 } else if (chan->state == BT_CONNECT2) {
4366 struct l2cap_conn_rsp rsp;
4370 if (bt_sk(sk)->defer_setup) {
4371 struct sock *parent = bt_sk(sk)->parent;
4372 res = L2CAP_CR_PEND;
4373 stat = L2CAP_CS_AUTHOR_PEND;
4375 parent->sk_data_ready(parent, 0);
4377 l2cap_state_change(chan, BT_CONFIG);
4378 res = L2CAP_CR_SUCCESS;
4379 stat = L2CAP_CS_NO_INFO;
4382 l2cap_state_change(chan, BT_DISCONN);
4383 __set_chan_timer(chan, HZ / 10);
4384 res = L2CAP_CR_SEC_BLOCK;
4385 stat = L2CAP_CS_NO_INFO;
4388 rsp.scid = cpu_to_le16(chan->dcid);
4389 rsp.dcid = cpu_to_le16(chan->scid);
4390 rsp.result = cpu_to_le16(res);
4391 rsp.status = cpu_to_le16(stat);
4392 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4399 read_unlock(&conn->chan_lock);
4404 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4406 struct l2cap_conn *conn = hcon->l2cap_data;
4409 conn = l2cap_conn_add(hcon, 0);
4414 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4416 if (!(flags & ACL_CONT)) {
4417 struct l2cap_hdr *hdr;
4418 struct l2cap_chan *chan;
4423 BT_ERR("Unexpected start frame (len %d)", skb->len);
4424 kfree_skb(conn->rx_skb);
4425 conn->rx_skb = NULL;
4427 l2cap_conn_unreliable(conn, ECOMM);
4430 /* Start fragment always begin with Basic L2CAP header */
4431 if (skb->len < L2CAP_HDR_SIZE) {
4432 BT_ERR("Frame is too short (len %d)", skb->len);
4433 l2cap_conn_unreliable(conn, ECOMM);
4437 hdr = (struct l2cap_hdr *) skb->data;
4438 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4439 cid = __le16_to_cpu(hdr->cid);
4441 if (len == skb->len) {
4442 /* Complete frame received */
4443 l2cap_recv_frame(conn, skb);
4447 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4449 if (skb->len > len) {
4450 BT_ERR("Frame is too long (len %d, expected len %d)",
4452 l2cap_conn_unreliable(conn, ECOMM);
4456 chan = l2cap_get_chan_by_scid(conn, cid);
4458 if (chan && chan->sk) {
4459 struct sock *sk = chan->sk;
4461 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4462 BT_ERR("Frame exceeding recv MTU (len %d, "
4466 l2cap_conn_unreliable(conn, ECOMM);
4472 /* Allocate skb for the complete frame (with header) */
4473 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4477 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4479 conn->rx_len = len - skb->len;
4481 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4483 if (!conn->rx_len) {
4484 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4485 l2cap_conn_unreliable(conn, ECOMM);
4489 if (skb->len > conn->rx_len) {
4490 BT_ERR("Fragment is too long (len %d, expected %d)",
4491 skb->len, conn->rx_len);
4492 kfree_skb(conn->rx_skb);
4493 conn->rx_skb = NULL;
4495 l2cap_conn_unreliable(conn, ECOMM);
4499 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4501 conn->rx_len -= skb->len;
4503 if (!conn->rx_len) {
4504 /* Complete frame received */
4505 l2cap_recv_frame(conn, conn->rx_skb);
4506 conn->rx_skb = NULL;
4515 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4517 struct l2cap_chan *c;
4519 read_lock_bh(&chan_list_lock);
4521 list_for_each_entry(c, &chan_list, global_l) {
4522 struct sock *sk = c->sk;
4524 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4525 batostr(&bt_sk(sk)->src),
4526 batostr(&bt_sk(sk)->dst),
4527 c->state, __le16_to_cpu(c->psm),
4528 c->scid, c->dcid, c->imtu, c->omtu,
4529 c->sec_level, c->mode);
4532 read_unlock_bh(&chan_list_lock);
4537 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4539 return single_open(file, l2cap_debugfs_show, inode->i_private);
4542 static const struct file_operations l2cap_debugfs_fops = {
4543 .open = l2cap_debugfs_open,
4545 .llseek = seq_lseek,
4546 .release = single_release,
4549 static struct dentry *l2cap_debugfs;
4551 static struct hci_proto l2cap_hci_proto = {
4553 .id = HCI_PROTO_L2CAP,
4554 .connect_ind = l2cap_connect_ind,
4555 .connect_cfm = l2cap_connect_cfm,
4556 .disconn_ind = l2cap_disconn_ind,
4557 .disconn_cfm = l2cap_disconn_cfm,
4558 .security_cfm = l2cap_security_cfm,
4559 .recv_acldata = l2cap_recv_acldata
4562 int __init l2cap_init(void)
4566 err = l2cap_init_sockets();
4570 err = hci_register_proto(&l2cap_hci_proto);
4572 BT_ERR("L2CAP protocol registration failed");
4573 bt_sock_unregister(BTPROTO_L2CAP);
4578 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4579 bt_debugfs, NULL, &l2cap_debugfs_fops);
4581 BT_ERR("Failed to create L2CAP debug file");
4587 l2cap_cleanup_sockets();
4591 void l2cap_exit(void)
4593 debugfs_remove(l2cap_debugfs);
4595 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4596 BT_ERR("L2CAP protocol unregistration failed");
4598 l2cap_cleanup_sockets();
4601 module_param(disable_ertm, bool, 0644);
4602 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4604 module_param(enable_hs, bool, 0644);
4605 MODULE_PARM_DESC(enable_hs, "Enable High Speed");