2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
6 Copyright (C) 2011 ProFUSION Embedded Systems
8 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
10 This program is free software; you can redistribute it and/or modify
11 it under the terms of the GNU General Public License version 2 as
12 published by the Free Software Foundation;
14 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
15 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
17 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
18 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
19 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
20 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
21 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
23 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
24 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
25 SOFTWARE IS DISCLAIMED.
28 /* Bluetooth L2CAP core. */
30 #include <linux/module.h>
32 #include <linux/types.h>
33 #include <linux/capability.h>
34 #include <linux/errno.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/poll.h>
39 #include <linux/fcntl.h>
40 #include <linux/init.h>
41 #include <linux/interrupt.h>
42 #include <linux/socket.h>
43 #include <linux/skbuff.h>
44 #include <linux/list.h>
45 #include <linux/device.h>
46 #include <linux/debugfs.h>
47 #include <linux/seq_file.h>
48 #include <linux/uaccess.h>
49 #include <linux/crc16.h>
52 #include <asm/system.h>
53 #include <asm/unaligned.h>
55 #include <net/bluetooth/bluetooth.h>
56 #include <net/bluetooth/hci_core.h>
57 #include <net/bluetooth/l2cap.h>
58 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 /* ---- L2CAP channels ---- */
78 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
82 list_for_each_entry(c, &conn->chan_l, list) {
89 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
93 list_for_each_entry(c, &conn->chan_l, list) {
100 /* Find channel with given SCID.
101 * Returns locked socket */
102 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
104 struct l2cap_chan *c;
106 mutex_lock(&conn->chan_lock);
107 c = __l2cap_get_chan_by_scid(conn, cid);
108 mutex_unlock(&conn->chan_lock);
113 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
115 struct l2cap_chan *c;
117 list_for_each_entry(c, &conn->chan_l, list) {
118 if (c->ident == ident)
124 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
126 struct l2cap_chan *c;
128 mutex_lock(&conn->chan_lock);
129 c = __l2cap_get_chan_by_ident(conn, ident);
130 mutex_unlock(&conn->chan_lock);
135 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
137 struct l2cap_chan *c;
139 list_for_each_entry(c, &chan_list, global_l) {
140 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
146 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
150 write_lock(&chan_list_lock);
152 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
165 for (p = 0x1001; p < 0x1100; p += 2)
166 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
167 chan->psm = cpu_to_le16(p);
168 chan->sport = cpu_to_le16(p);
175 write_unlock(&chan_list_lock);
179 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
181 write_lock(&chan_list_lock);
185 write_unlock(&chan_list_lock);
190 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
192 u16 cid = L2CAP_CID_DYN_START;
194 for (; cid < L2CAP_CID_DYN_END; cid++) {
195 if (!__l2cap_get_chan_by_scid(conn, cid))
202 static void l2cap_state_change(struct l2cap_chan *chan, int state)
204 BT_DBG("chan %p %s -> %s", chan, state_to_string(chan->state),
205 state_to_string(state));
208 chan->ops->state_change(chan->data, state);
211 static void l2cap_chan_timeout(struct work_struct *work)
213 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
215 struct l2cap_conn *conn = chan->conn;
216 struct sock *sk = chan->sk;
219 BT_DBG("chan %p state %s", chan, state_to_string(chan->state));
221 mutex_lock(&conn->chan_lock);
224 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
225 reason = ECONNREFUSED;
226 else if (chan->state == BT_CONNECT &&
227 chan->sec_level != BT_SECURITY_SDP)
228 reason = ECONNREFUSED;
232 l2cap_chan_close(chan, reason);
236 chan->ops->close(chan->data);
237 mutex_unlock(&conn->chan_lock);
239 l2cap_chan_put(chan);
242 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
244 struct l2cap_chan *chan;
246 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
250 mutex_init(&chan->lock);
254 write_lock(&chan_list_lock);
255 list_add(&chan->global_l, &chan_list);
256 write_unlock(&chan_list_lock);
258 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
260 chan->state = BT_OPEN;
262 atomic_set(&chan->refcnt, 1);
264 BT_DBG("sk %p chan %p", sk, chan);
269 void l2cap_chan_destroy(struct l2cap_chan *chan)
271 write_lock(&chan_list_lock);
272 list_del(&chan->global_l);
273 write_unlock(&chan_list_lock);
275 l2cap_chan_put(chan);
278 static void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
280 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
281 chan->psm, chan->dcid);
283 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
287 switch (chan->chan_type) {
288 case L2CAP_CHAN_CONN_ORIENTED:
289 if (conn->hcon->type == LE_LINK) {
291 chan->omtu = L2CAP_LE_DEFAULT_MTU;
292 chan->scid = L2CAP_CID_LE_DATA;
293 chan->dcid = L2CAP_CID_LE_DATA;
295 /* Alloc CID for connection-oriented socket */
296 chan->scid = l2cap_alloc_cid(conn);
297 chan->omtu = L2CAP_DEFAULT_MTU;
301 case L2CAP_CHAN_CONN_LESS:
302 /* Connectionless socket */
303 chan->scid = L2CAP_CID_CONN_LESS;
304 chan->dcid = L2CAP_CID_CONN_LESS;
305 chan->omtu = L2CAP_DEFAULT_MTU;
309 /* Raw socket can send/recv signalling messages only */
310 chan->scid = L2CAP_CID_SIGNALING;
311 chan->dcid = L2CAP_CID_SIGNALING;
312 chan->omtu = L2CAP_DEFAULT_MTU;
315 chan->local_id = L2CAP_BESTEFFORT_ID;
316 chan->local_stype = L2CAP_SERV_BESTEFFORT;
317 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
318 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
319 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
320 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
322 l2cap_chan_hold(chan);
324 mutex_lock(&conn->chan_lock);
325 list_add(&chan->list, &conn->chan_l);
326 mutex_unlock(&conn->chan_lock);
330 * Must be called on the locked socket. */
331 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
333 struct sock *sk = chan->sk;
334 struct l2cap_conn *conn = chan->conn;
335 struct sock *parent = bt_sk(sk)->parent;
337 __clear_chan_timer(chan);
339 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
342 /* Delete from channel list */
343 list_del(&chan->list);
345 l2cap_chan_put(chan);
348 hci_conn_put(conn->hcon);
351 l2cap_state_change(chan, BT_CLOSED);
352 sock_set_flag(sk, SOCK_ZAPPED);
358 bt_accept_unlink(sk);
359 parent->sk_data_ready(parent, 0);
361 sk->sk_state_change(sk);
363 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
364 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
367 skb_queue_purge(&chan->tx_q);
369 if (chan->mode == L2CAP_MODE_ERTM) {
370 struct srej_list *l, *tmp;
372 __clear_retrans_timer(chan);
373 __clear_monitor_timer(chan);
374 __clear_ack_timer(chan);
376 skb_queue_purge(&chan->srej_q);
378 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
385 static void l2cap_chan_cleanup_listen(struct sock *parent)
389 BT_DBG("parent %p", parent);
391 /* Close not yet accepted channels */
392 while ((sk = bt_accept_dequeue(parent, NULL))) {
393 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
395 __clear_chan_timer(chan);
397 l2cap_chan_close(chan, ECONNRESET);
400 chan->ops->close(chan->data);
404 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
406 struct l2cap_conn *conn = chan->conn;
407 struct sock *sk = chan->sk;
409 BT_DBG("chan %p state %s sk %p", chan,
410 state_to_string(chan->state), sk);
412 switch (chan->state) {
414 l2cap_chan_cleanup_listen(sk);
416 l2cap_state_change(chan, BT_CLOSED);
417 sock_set_flag(sk, SOCK_ZAPPED);
422 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
423 conn->hcon->type == ACL_LINK) {
424 __clear_chan_timer(chan);
425 __set_chan_timer(chan, sk->sk_sndtimeo);
426 l2cap_send_disconn_req(conn, chan, reason);
428 l2cap_chan_del(chan, reason);
432 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
433 conn->hcon->type == ACL_LINK) {
434 struct l2cap_conn_rsp rsp;
437 if (bt_sk(sk)->defer_setup)
438 result = L2CAP_CR_SEC_BLOCK;
440 result = L2CAP_CR_BAD_PSM;
441 l2cap_state_change(chan, BT_DISCONN);
443 rsp.scid = cpu_to_le16(chan->dcid);
444 rsp.dcid = cpu_to_le16(chan->scid);
445 rsp.result = cpu_to_le16(result);
446 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
447 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
451 l2cap_chan_del(chan, reason);
456 l2cap_chan_del(chan, reason);
460 sock_set_flag(sk, SOCK_ZAPPED);
465 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
467 if (chan->chan_type == L2CAP_CHAN_RAW) {
468 switch (chan->sec_level) {
469 case BT_SECURITY_HIGH:
470 return HCI_AT_DEDICATED_BONDING_MITM;
471 case BT_SECURITY_MEDIUM:
472 return HCI_AT_DEDICATED_BONDING;
474 return HCI_AT_NO_BONDING;
476 } else if (chan->psm == cpu_to_le16(0x0001)) {
477 if (chan->sec_level == BT_SECURITY_LOW)
478 chan->sec_level = BT_SECURITY_SDP;
480 if (chan->sec_level == BT_SECURITY_HIGH)
481 return HCI_AT_NO_BONDING_MITM;
483 return HCI_AT_NO_BONDING;
485 switch (chan->sec_level) {
486 case BT_SECURITY_HIGH:
487 return HCI_AT_GENERAL_BONDING_MITM;
488 case BT_SECURITY_MEDIUM:
489 return HCI_AT_GENERAL_BONDING;
491 return HCI_AT_NO_BONDING;
496 /* Service level security */
497 int l2cap_chan_check_security(struct l2cap_chan *chan)
499 struct l2cap_conn *conn = chan->conn;
502 auth_type = l2cap_get_auth_type(chan);
504 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
507 static u8 l2cap_get_ident(struct l2cap_conn *conn)
511 /* Get next available identificator.
512 * 1 - 128 are used by kernel.
513 * 129 - 199 are reserved.
514 * 200 - 254 are used by utilities like l2ping, etc.
517 spin_lock(&conn->lock);
519 if (++conn->tx_ident > 128)
524 spin_unlock(&conn->lock);
529 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
531 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
534 BT_DBG("code 0x%2.2x", code);
539 if (lmp_no_flush_capable(conn->hcon->hdev))
540 flags = ACL_START_NO_FLUSH;
544 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
545 skb->priority = HCI_PRIO_MAX;
547 hci_send_acl(conn->hchan, skb, flags);
550 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
552 struct hci_conn *hcon = chan->conn->hcon;
555 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
558 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
559 lmp_no_flush_capable(hcon->hdev))
560 flags = ACL_START_NO_FLUSH;
564 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
565 hci_send_acl(chan->conn->hchan, skb, flags);
568 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
571 struct l2cap_hdr *lh;
572 struct l2cap_conn *conn = chan->conn;
575 if (chan->state != BT_CONNECTED)
578 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
579 hlen = L2CAP_EXT_HDR_SIZE;
581 hlen = L2CAP_ENH_HDR_SIZE;
583 if (chan->fcs == L2CAP_FCS_CRC16)
584 hlen += L2CAP_FCS_SIZE;
586 BT_DBG("chan %p, control 0x%8.8x", chan, control);
588 count = min_t(unsigned int, conn->mtu, hlen);
590 control |= __set_sframe(chan);
592 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
593 control |= __set_ctrl_final(chan);
595 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
596 control |= __set_ctrl_poll(chan);
598 skb = bt_skb_alloc(count, GFP_ATOMIC);
602 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
603 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
604 lh->cid = cpu_to_le16(chan->dcid);
606 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
608 if (chan->fcs == L2CAP_FCS_CRC16) {
609 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
610 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
613 skb->priority = HCI_PRIO_MAX;
614 l2cap_do_send(chan, skb);
617 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
619 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
620 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
621 set_bit(CONN_RNR_SENT, &chan->conn_state);
623 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
625 control |= __set_reqseq(chan, chan->buffer_seq);
627 l2cap_send_sframe(chan, control);
630 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
632 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
635 static void l2cap_do_start(struct l2cap_chan *chan)
637 struct l2cap_conn *conn = chan->conn;
639 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
640 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
643 if (l2cap_chan_check_security(chan) &&
644 __l2cap_no_conn_pending(chan)) {
645 struct l2cap_conn_req req;
646 req.scid = cpu_to_le16(chan->scid);
649 chan->ident = l2cap_get_ident(conn);
650 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
652 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
656 struct l2cap_info_req req;
657 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
659 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
660 conn->info_ident = l2cap_get_ident(conn);
662 schedule_delayed_work(&conn->info_timer,
663 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
665 l2cap_send_cmd(conn, conn->info_ident,
666 L2CAP_INFO_REQ, sizeof(req), &req);
670 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
672 u32 local_feat_mask = l2cap_feat_mask;
674 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
677 case L2CAP_MODE_ERTM:
678 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
679 case L2CAP_MODE_STREAMING:
680 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
686 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
689 struct l2cap_disconn_req req;
696 if (chan->mode == L2CAP_MODE_ERTM) {
697 __clear_retrans_timer(chan);
698 __clear_monitor_timer(chan);
699 __clear_ack_timer(chan);
702 req.dcid = cpu_to_le16(chan->dcid);
703 req.scid = cpu_to_le16(chan->scid);
704 l2cap_send_cmd(conn, l2cap_get_ident(conn),
705 L2CAP_DISCONN_REQ, sizeof(req), &req);
707 l2cap_state_change(chan, BT_DISCONN);
711 /* ---- L2CAP connections ---- */
712 static void l2cap_conn_start(struct l2cap_conn *conn)
714 struct l2cap_chan *chan, *tmp;
716 BT_DBG("conn %p", conn);
718 mutex_lock(&conn->chan_lock);
720 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
721 struct sock *sk = chan->sk;
725 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
730 if (chan->state == BT_CONNECT) {
731 struct l2cap_conn_req req;
733 if (!l2cap_chan_check_security(chan) ||
734 !__l2cap_no_conn_pending(chan)) {
739 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
740 && test_bit(CONF_STATE2_DEVICE,
741 &chan->conf_state)) {
742 /* l2cap_chan_close() calls list_del(chan)
743 * so release the lock */
744 l2cap_chan_close(chan, ECONNRESET);
749 req.scid = cpu_to_le16(chan->scid);
752 chan->ident = l2cap_get_ident(conn);
753 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
755 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
758 } else if (chan->state == BT_CONNECT2) {
759 struct l2cap_conn_rsp rsp;
761 rsp.scid = cpu_to_le16(chan->dcid);
762 rsp.dcid = cpu_to_le16(chan->scid);
764 if (l2cap_chan_check_security(chan)) {
765 if (bt_sk(sk)->defer_setup) {
766 struct sock *parent = bt_sk(sk)->parent;
767 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
768 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
770 parent->sk_data_ready(parent, 0);
773 l2cap_state_change(chan, BT_CONFIG);
774 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
775 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
778 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
779 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
782 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
785 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
786 rsp.result != L2CAP_CR_SUCCESS) {
791 set_bit(CONF_REQ_SENT, &chan->conf_state);
792 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
793 l2cap_build_conf_req(chan, buf), buf);
794 chan->num_conf_req++;
800 mutex_unlock(&conn->chan_lock);
803 /* Find socket with cid and source bdaddr.
804 * Returns closest match, locked.
806 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
808 struct l2cap_chan *c, *c1 = NULL;
810 read_lock(&chan_list_lock);
812 list_for_each_entry(c, &chan_list, global_l) {
813 struct sock *sk = c->sk;
815 if (state && c->state != state)
818 if (c->scid == cid) {
820 if (!bacmp(&bt_sk(sk)->src, src)) {
821 read_unlock(&chan_list_lock);
826 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
831 read_unlock(&chan_list_lock);
836 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
838 struct sock *parent, *sk;
839 struct l2cap_chan *chan, *pchan;
843 /* Check if we have socket listening on cid */
844 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
853 /* Check for backlog size */
854 if (sk_acceptq_is_full(parent)) {
855 BT_DBG("backlog full %d", parent->sk_ack_backlog);
859 chan = pchan->ops->new_connection(pchan->data);
865 hci_conn_hold(conn->hcon);
867 bacpy(&bt_sk(sk)->src, conn->src);
868 bacpy(&bt_sk(sk)->dst, conn->dst);
870 bt_accept_enqueue(parent, sk);
872 l2cap_chan_add(conn, chan);
874 __set_chan_timer(chan, sk->sk_sndtimeo);
876 l2cap_state_change(chan, BT_CONNECTED);
877 parent->sk_data_ready(parent, 0);
880 release_sock(parent);
883 static void l2cap_chan_ready(struct l2cap_chan *chan)
885 struct sock *sk = chan->sk;
886 struct sock *parent = bt_sk(sk)->parent;
888 BT_DBG("sk %p, parent %p", sk, parent);
890 chan->conf_state = 0;
891 __clear_chan_timer(chan);
893 l2cap_state_change(chan, BT_CONNECTED);
894 sk->sk_state_change(sk);
897 parent->sk_data_ready(parent, 0);
900 static void l2cap_conn_ready(struct l2cap_conn *conn)
902 struct l2cap_chan *chan;
904 BT_DBG("conn %p", conn);
906 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
907 l2cap_le_conn_ready(conn);
909 if (conn->hcon->out && conn->hcon->type == LE_LINK)
910 smp_conn_security(conn, conn->hcon->pending_sec_level);
912 mutex_lock(&conn->chan_lock);
914 list_for_each_entry(chan, &conn->chan_l, list) {
915 struct sock *sk = chan->sk;
919 if (conn->hcon->type == LE_LINK) {
920 if (smp_conn_security(conn, chan->sec_level))
921 l2cap_chan_ready(chan);
923 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
924 __clear_chan_timer(chan);
925 l2cap_state_change(chan, BT_CONNECTED);
926 sk->sk_state_change(sk);
928 } else if (chan->state == BT_CONNECT)
929 l2cap_do_start(chan);
934 mutex_unlock(&conn->chan_lock);
937 /* Notify sockets that we cannot guaranty reliability anymore */
938 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
940 struct l2cap_chan *chan;
942 BT_DBG("conn %p", conn);
944 mutex_lock(&conn->chan_lock);
946 list_for_each_entry(chan, &conn->chan_l, list) {
947 struct sock *sk = chan->sk;
949 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
953 mutex_unlock(&conn->chan_lock);
956 static void l2cap_info_timeout(struct work_struct *work)
958 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
961 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
962 conn->info_ident = 0;
964 l2cap_conn_start(conn);
967 static void l2cap_conn_del(struct hci_conn *hcon, int err)
969 struct l2cap_conn *conn = hcon->l2cap_data;
970 struct l2cap_chan *chan, *l;
976 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
978 kfree_skb(conn->rx_skb);
980 mutex_lock(&conn->chan_lock);
983 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
986 l2cap_chan_del(chan, err);
988 chan->ops->close(chan->data);
991 mutex_unlock(&conn->chan_lock);
993 hci_chan_del(conn->hchan);
995 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
996 cancel_delayed_work_sync(&conn->info_timer);
998 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->flags)) {
999 cancel_delayed_work_sync(&conn->security_timer);
1000 smp_chan_destroy(conn);
1003 hcon->l2cap_data = NULL;
1007 static void security_timeout(struct work_struct *work)
1009 struct l2cap_conn *conn = container_of(work, struct l2cap_conn,
1010 security_timer.work);
1012 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1015 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1017 struct l2cap_conn *conn = hcon->l2cap_data;
1018 struct hci_chan *hchan;
1023 hchan = hci_chan_create(hcon);
1027 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1029 hci_chan_del(hchan);
1033 hcon->l2cap_data = conn;
1035 conn->hchan = hchan;
1037 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1039 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1040 conn->mtu = hcon->hdev->le_mtu;
1042 conn->mtu = hcon->hdev->acl_mtu;
1044 conn->src = &hcon->hdev->bdaddr;
1045 conn->dst = &hcon->dst;
1047 conn->feat_mask = 0;
1049 spin_lock_init(&conn->lock);
1050 mutex_init(&conn->chan_lock);
1052 INIT_LIST_HEAD(&conn->chan_l);
1054 if (hcon->type == LE_LINK)
1055 INIT_DELAYED_WORK(&conn->security_timer, security_timeout);
1057 INIT_DELAYED_WORK(&conn->info_timer, l2cap_info_timeout);
1059 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1064 /* ---- Socket interface ---- */
1066 /* Find socket with psm and source bdaddr.
1067 * Returns closest match.
1069 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1071 struct l2cap_chan *c, *c1 = NULL;
1073 read_lock(&chan_list_lock);
1075 list_for_each_entry(c, &chan_list, global_l) {
1076 struct sock *sk = c->sk;
1078 if (state && c->state != state)
1081 if (c->psm == psm) {
1083 if (!bacmp(&bt_sk(sk)->src, src)) {
1084 read_unlock(&chan_list_lock);
1089 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1094 read_unlock(&chan_list_lock);
1099 int l2cap_chan_connect(struct l2cap_chan *chan, __le16 psm, u16 cid, bdaddr_t *dst)
1101 struct sock *sk = chan->sk;
1102 bdaddr_t *src = &bt_sk(sk)->src;
1103 struct l2cap_conn *conn;
1104 struct hci_conn *hcon;
1105 struct hci_dev *hdev;
1109 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1112 hdev = hci_get_route(dst, src);
1114 return -EHOSTUNREACH;
1120 /* PSM must be odd and lsb of upper byte must be 0 */
1121 if ((__le16_to_cpu(psm) & 0x0101) != 0x0001 && !cid &&
1122 chan->chan_type != L2CAP_CHAN_RAW) {
1127 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED && !(psm || cid)) {
1132 switch (chan->mode) {
1133 case L2CAP_MODE_BASIC:
1135 case L2CAP_MODE_ERTM:
1136 case L2CAP_MODE_STREAMING:
1145 switch (sk->sk_state) {
1149 /* Already connecting */
1154 /* Already connected */
1168 /* Set destination address and psm */
1169 bacpy(&bt_sk(sk)->dst, dst);
1173 auth_type = l2cap_get_auth_type(chan);
1175 if (chan->dcid == L2CAP_CID_LE_DATA)
1176 hcon = hci_connect(hdev, LE_LINK, dst,
1177 chan->sec_level, auth_type);
1179 hcon = hci_connect(hdev, ACL_LINK, dst,
1180 chan->sec_level, auth_type);
1183 err = PTR_ERR(hcon);
1187 conn = l2cap_conn_add(hcon, 0);
1194 /* Update source addr of the socket */
1195 bacpy(src, conn->src);
1197 l2cap_chan_add(conn, chan);
1199 l2cap_state_change(chan, BT_CONNECT);
1200 __set_chan_timer(chan, sk->sk_sndtimeo);
1202 if (hcon->state == BT_CONNECTED) {
1203 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1204 __clear_chan_timer(chan);
1205 if (l2cap_chan_check_security(chan))
1206 l2cap_state_change(chan, BT_CONNECTED);
1208 l2cap_do_start(chan);
1214 hci_dev_unlock(hdev);
1219 int __l2cap_wait_ack(struct sock *sk)
1221 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1222 DECLARE_WAITQUEUE(wait, current);
1226 add_wait_queue(sk_sleep(sk), &wait);
1227 set_current_state(TASK_INTERRUPTIBLE);
1228 while (chan->unacked_frames > 0 && chan->conn) {
1232 if (signal_pending(current)) {
1233 err = sock_intr_errno(timeo);
1238 timeo = schedule_timeout(timeo);
1240 set_current_state(TASK_INTERRUPTIBLE);
1242 err = sock_error(sk);
1246 set_current_state(TASK_RUNNING);
1247 remove_wait_queue(sk_sleep(sk), &wait);
1251 static void l2cap_monitor_timeout(struct work_struct *work)
1253 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1254 monitor_timer.work);
1255 struct sock *sk = chan->sk;
1257 BT_DBG("chan %p", chan);
1260 if (chan->retry_count >= chan->remote_max_tx) {
1261 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1266 chan->retry_count++;
1267 __set_monitor_timer(chan);
1269 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1273 static void l2cap_retrans_timeout(struct work_struct *work)
1275 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1276 retrans_timer.work);
1277 struct sock *sk = chan->sk;
1279 BT_DBG("chan %p", chan);
1282 chan->retry_count = 1;
1283 __set_monitor_timer(chan);
1285 set_bit(CONN_WAIT_F, &chan->conn_state);
1287 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1291 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1293 struct sk_buff *skb;
1295 while ((skb = skb_peek(&chan->tx_q)) &&
1296 chan->unacked_frames) {
1297 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1300 skb = skb_dequeue(&chan->tx_q);
1303 chan->unacked_frames--;
1306 if (!chan->unacked_frames)
1307 __clear_retrans_timer(chan);
1310 static void l2cap_streaming_send(struct l2cap_chan *chan)
1312 struct sk_buff *skb;
1316 while ((skb = skb_dequeue(&chan->tx_q))) {
1317 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1318 control |= __set_txseq(chan, chan->next_tx_seq);
1319 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1321 if (chan->fcs == L2CAP_FCS_CRC16) {
1322 fcs = crc16(0, (u8 *)skb->data,
1323 skb->len - L2CAP_FCS_SIZE);
1324 put_unaligned_le16(fcs,
1325 skb->data + skb->len - L2CAP_FCS_SIZE);
1328 l2cap_do_send(chan, skb);
1330 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1334 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1336 struct sk_buff *skb, *tx_skb;
1340 skb = skb_peek(&chan->tx_q);
1344 while (bt_cb(skb)->tx_seq != tx_seq) {
1345 if (skb_queue_is_last(&chan->tx_q, skb))
1348 skb = skb_queue_next(&chan->tx_q, skb);
1351 if (chan->remote_max_tx &&
1352 bt_cb(skb)->retries == chan->remote_max_tx) {
1353 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1357 tx_skb = skb_clone(skb, GFP_ATOMIC);
1358 bt_cb(skb)->retries++;
1360 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1361 control &= __get_sar_mask(chan);
1363 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1364 control |= __set_ctrl_final(chan);
1366 control |= __set_reqseq(chan, chan->buffer_seq);
1367 control |= __set_txseq(chan, tx_seq);
1369 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1371 if (chan->fcs == L2CAP_FCS_CRC16) {
1372 fcs = crc16(0, (u8 *)tx_skb->data,
1373 tx_skb->len - L2CAP_FCS_SIZE);
1374 put_unaligned_le16(fcs,
1375 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1378 l2cap_do_send(chan, tx_skb);
1381 static int l2cap_ertm_send(struct l2cap_chan *chan)
1383 struct sk_buff *skb, *tx_skb;
1388 if (chan->state != BT_CONNECTED)
1391 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1393 if (chan->remote_max_tx &&
1394 bt_cb(skb)->retries == chan->remote_max_tx) {
1395 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1399 tx_skb = skb_clone(skb, GFP_ATOMIC);
1401 bt_cb(skb)->retries++;
1403 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1404 control &= __get_sar_mask(chan);
1406 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1407 control |= __set_ctrl_final(chan);
1409 control |= __set_reqseq(chan, chan->buffer_seq);
1410 control |= __set_txseq(chan, chan->next_tx_seq);
1412 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1414 if (chan->fcs == L2CAP_FCS_CRC16) {
1415 fcs = crc16(0, (u8 *)skb->data,
1416 tx_skb->len - L2CAP_FCS_SIZE);
1417 put_unaligned_le16(fcs, skb->data +
1418 tx_skb->len - L2CAP_FCS_SIZE);
1421 l2cap_do_send(chan, tx_skb);
1423 __set_retrans_timer(chan);
1425 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1427 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1429 if (bt_cb(skb)->retries == 1) {
1430 chan->unacked_frames++;
1433 __clear_ack_timer(chan);
1436 chan->frames_sent++;
1438 if (skb_queue_is_last(&chan->tx_q, skb))
1439 chan->tx_send_head = NULL;
1441 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1447 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1451 if (!skb_queue_empty(&chan->tx_q))
1452 chan->tx_send_head = chan->tx_q.next;
1454 chan->next_tx_seq = chan->expected_ack_seq;
1455 ret = l2cap_ertm_send(chan);
1459 static void __l2cap_send_ack(struct l2cap_chan *chan)
1463 control |= __set_reqseq(chan, chan->buffer_seq);
1465 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1466 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1467 set_bit(CONN_RNR_SENT, &chan->conn_state);
1468 l2cap_send_sframe(chan, control);
1472 if (l2cap_ertm_send(chan) > 0)
1475 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1476 l2cap_send_sframe(chan, control);
1479 static void l2cap_send_ack(struct l2cap_chan *chan)
1481 __clear_ack_timer(chan);
1482 __l2cap_send_ack(chan);
1485 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1487 struct srej_list *tail;
1490 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1491 control |= __set_ctrl_final(chan);
1493 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1494 control |= __set_reqseq(chan, tail->tx_seq);
1496 l2cap_send_sframe(chan, control);
1499 static inline int l2cap_skbuff_fromiovec(struct l2cap_chan *chan, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1501 struct l2cap_conn *conn = chan->conn;
1502 struct sk_buff **frag;
1505 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1511 /* Continuation fragments (no L2CAP header) */
1512 frag = &skb_shinfo(skb)->frag_list;
1514 count = min_t(unsigned int, conn->mtu, len);
1516 *frag = chan->ops->alloc_skb(chan, count,
1517 msg->msg_flags & MSG_DONTWAIT, &err);
1521 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1524 (*frag)->priority = skb->priority;
1529 frag = &(*frag)->next;
1535 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1536 struct msghdr *msg, size_t len,
1539 struct l2cap_conn *conn = chan->conn;
1540 struct sk_buff *skb;
1541 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1542 struct l2cap_hdr *lh;
1544 BT_DBG("chan %p len %d priority %u", chan, (int)len, priority);
1546 count = min_t(unsigned int, (conn->mtu - hlen), len);
1548 skb = chan->ops->alloc_skb(chan, count + hlen,
1549 msg->msg_flags & MSG_DONTWAIT, &err);
1552 return ERR_PTR(err);
1554 skb->priority = priority;
1556 /* Create L2CAP header */
1557 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1558 lh->cid = cpu_to_le16(chan->dcid);
1559 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1560 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1562 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1563 if (unlikely(err < 0)) {
1565 return ERR_PTR(err);
1570 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1571 struct msghdr *msg, size_t len,
1574 struct l2cap_conn *conn = chan->conn;
1575 struct sk_buff *skb;
1576 int err, count, hlen = L2CAP_HDR_SIZE;
1577 struct l2cap_hdr *lh;
1579 BT_DBG("chan %p len %d", chan, (int)len);
1581 count = min_t(unsigned int, (conn->mtu - hlen), len);
1583 skb = chan->ops->alloc_skb(chan, count + hlen,
1584 msg->msg_flags & MSG_DONTWAIT, &err);
1587 return ERR_PTR(err);
1589 skb->priority = priority;
1591 /* Create L2CAP header */
1592 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1593 lh->cid = cpu_to_le16(chan->dcid);
1594 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1596 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1597 if (unlikely(err < 0)) {
1599 return ERR_PTR(err);
1604 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1605 struct msghdr *msg, size_t len,
1606 u32 control, u16 sdulen)
1608 struct l2cap_conn *conn = chan->conn;
1609 struct sk_buff *skb;
1610 int err, count, hlen;
1611 struct l2cap_hdr *lh;
1613 BT_DBG("chan %p len %d", chan, (int)len);
1616 return ERR_PTR(-ENOTCONN);
1618 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1619 hlen = L2CAP_EXT_HDR_SIZE;
1621 hlen = L2CAP_ENH_HDR_SIZE;
1624 hlen += L2CAP_SDULEN_SIZE;
1626 if (chan->fcs == L2CAP_FCS_CRC16)
1627 hlen += L2CAP_FCS_SIZE;
1629 count = min_t(unsigned int, (conn->mtu - hlen), len);
1631 skb = chan->ops->alloc_skb(chan, count + hlen,
1632 msg->msg_flags & MSG_DONTWAIT, &err);
1635 return ERR_PTR(err);
1637 /* Create L2CAP header */
1638 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1639 lh->cid = cpu_to_le16(chan->dcid);
1640 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1642 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1645 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1647 err = l2cap_skbuff_fromiovec(chan, msg, len, count, skb);
1648 if (unlikely(err < 0)) {
1650 return ERR_PTR(err);
1653 if (chan->fcs == L2CAP_FCS_CRC16)
1654 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1656 bt_cb(skb)->retries = 0;
1660 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1662 struct sk_buff *skb;
1663 struct sk_buff_head sar_queue;
1667 skb_queue_head_init(&sar_queue);
1668 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1669 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1671 return PTR_ERR(skb);
1673 __skb_queue_tail(&sar_queue, skb);
1674 len -= chan->remote_mps;
1675 size += chan->remote_mps;
1680 if (len > chan->remote_mps) {
1681 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1682 buflen = chan->remote_mps;
1684 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1688 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1690 skb_queue_purge(&sar_queue);
1691 return PTR_ERR(skb);
1694 __skb_queue_tail(&sar_queue, skb);
1698 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1699 if (chan->tx_send_head == NULL)
1700 chan->tx_send_head = sar_queue.next;
1705 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1708 struct sk_buff *skb;
1712 /* Connectionless channel */
1713 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1714 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1716 return PTR_ERR(skb);
1718 l2cap_do_send(chan, skb);
1722 switch (chan->mode) {
1723 case L2CAP_MODE_BASIC:
1724 /* Check outgoing MTU */
1725 if (len > chan->omtu)
1728 /* Create a basic PDU */
1729 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1731 return PTR_ERR(skb);
1733 l2cap_do_send(chan, skb);
1737 case L2CAP_MODE_ERTM:
1738 case L2CAP_MODE_STREAMING:
1739 /* Entire SDU fits into one PDU */
1740 if (len <= chan->remote_mps) {
1741 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1742 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1745 return PTR_ERR(skb);
1747 __skb_queue_tail(&chan->tx_q, skb);
1749 if (chan->tx_send_head == NULL)
1750 chan->tx_send_head = skb;
1753 /* Segment SDU into multiples PDUs */
1754 err = l2cap_sar_segment_sdu(chan, msg, len);
1759 if (chan->mode == L2CAP_MODE_STREAMING) {
1760 l2cap_streaming_send(chan);
1765 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1766 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1771 err = l2cap_ertm_send(chan);
1778 BT_DBG("bad state %1.1x", chan->mode);
1785 /* Copy frame to all raw sockets on that connection */
1786 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1788 struct sk_buff *nskb;
1789 struct l2cap_chan *chan;
1791 BT_DBG("conn %p", conn);
1793 mutex_lock(&conn->chan_lock);
1795 list_for_each_entry(chan, &conn->chan_l, list) {
1796 struct sock *sk = chan->sk;
1797 if (chan->chan_type != L2CAP_CHAN_RAW)
1800 /* Don't send frame to the socket it came from */
1803 nskb = skb_clone(skb, GFP_ATOMIC);
1807 if (chan->ops->recv(chan->data, nskb))
1811 mutex_unlock(&conn->chan_lock);
1814 /* ---- L2CAP signalling commands ---- */
1815 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1816 u8 code, u8 ident, u16 dlen, void *data)
1818 struct sk_buff *skb, **frag;
1819 struct l2cap_cmd_hdr *cmd;
1820 struct l2cap_hdr *lh;
1823 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1824 conn, code, ident, dlen);
1826 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1827 count = min_t(unsigned int, conn->mtu, len);
1829 skb = bt_skb_alloc(count, GFP_ATOMIC);
1833 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1834 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1836 if (conn->hcon->type == LE_LINK)
1837 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1839 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1841 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1844 cmd->len = cpu_to_le16(dlen);
1847 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1848 memcpy(skb_put(skb, count), data, count);
1854 /* Continuation fragments (no L2CAP header) */
1855 frag = &skb_shinfo(skb)->frag_list;
1857 count = min_t(unsigned int, conn->mtu, len);
1859 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1863 memcpy(skb_put(*frag, count), data, count);
1868 frag = &(*frag)->next;
1878 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1880 struct l2cap_conf_opt *opt = *ptr;
1883 len = L2CAP_CONF_OPT_SIZE + opt->len;
1891 *val = *((u8 *) opt->val);
1895 *val = get_unaligned_le16(opt->val);
1899 *val = get_unaligned_le32(opt->val);
1903 *val = (unsigned long) opt->val;
1907 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1911 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1913 struct l2cap_conf_opt *opt = *ptr;
1915 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1922 *((u8 *) opt->val) = val;
1926 put_unaligned_le16(val, opt->val);
1930 put_unaligned_le32(val, opt->val);
1934 memcpy(opt->val, (void *) val, len);
1938 *ptr += L2CAP_CONF_OPT_SIZE + len;
1941 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1943 struct l2cap_conf_efs efs;
1945 switch (chan->mode) {
1946 case L2CAP_MODE_ERTM:
1947 efs.id = chan->local_id;
1948 efs.stype = chan->local_stype;
1949 efs.msdu = cpu_to_le16(chan->local_msdu);
1950 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1951 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1952 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1955 case L2CAP_MODE_STREAMING:
1957 efs.stype = L2CAP_SERV_BESTEFFORT;
1958 efs.msdu = cpu_to_le16(chan->local_msdu);
1959 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1968 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1969 (unsigned long) &efs);
1972 static void l2cap_ack_timeout(struct work_struct *work)
1974 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1977 BT_DBG("chan %p", chan);
1979 lock_sock(chan->sk);
1980 __l2cap_send_ack(chan);
1981 release_sock(chan->sk);
1983 l2cap_chan_put(chan);
1986 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1988 chan->expected_ack_seq = 0;
1989 chan->unacked_frames = 0;
1990 chan->buffer_seq = 0;
1991 chan->num_acked = 0;
1992 chan->frames_sent = 0;
1994 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
1995 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
1996 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
1998 skb_queue_head_init(&chan->srej_q);
2000 INIT_LIST_HEAD(&chan->srej_l);
2003 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2006 case L2CAP_MODE_STREAMING:
2007 case L2CAP_MODE_ERTM:
2008 if (l2cap_mode_supported(mode, remote_feat_mask))
2012 return L2CAP_MODE_BASIC;
2016 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
2018 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2021 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2023 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2026 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2028 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2029 __l2cap_ews_supported(chan)) {
2030 /* use extended control field */
2031 set_bit(FLAG_EXT_CTRL, &chan->flags);
2032 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2034 chan->tx_win = min_t(u16, chan->tx_win,
2035 L2CAP_DEFAULT_TX_WINDOW);
2036 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2040 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2042 struct l2cap_conf_req *req = data;
2043 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2044 void *ptr = req->data;
2047 BT_DBG("chan %p", chan);
2049 if (chan->num_conf_req || chan->num_conf_rsp)
2052 switch (chan->mode) {
2053 case L2CAP_MODE_STREAMING:
2054 case L2CAP_MODE_ERTM:
2055 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2058 if (__l2cap_efs_supported(chan))
2059 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2063 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2068 if (chan->imtu != L2CAP_DEFAULT_MTU)
2069 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2071 switch (chan->mode) {
2072 case L2CAP_MODE_BASIC:
2073 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2074 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2077 rfc.mode = L2CAP_MODE_BASIC;
2079 rfc.max_transmit = 0;
2080 rfc.retrans_timeout = 0;
2081 rfc.monitor_timeout = 0;
2082 rfc.max_pdu_size = 0;
2084 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2085 (unsigned long) &rfc);
2088 case L2CAP_MODE_ERTM:
2089 rfc.mode = L2CAP_MODE_ERTM;
2090 rfc.max_transmit = chan->max_tx;
2091 rfc.retrans_timeout = 0;
2092 rfc.monitor_timeout = 0;
2094 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2095 L2CAP_EXT_HDR_SIZE -
2098 rfc.max_pdu_size = cpu_to_le16(size);
2100 l2cap_txwin_setup(chan);
2102 rfc.txwin_size = min_t(u16, chan->tx_win,
2103 L2CAP_DEFAULT_TX_WINDOW);
2105 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2106 (unsigned long) &rfc);
2108 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2109 l2cap_add_opt_efs(&ptr, chan);
2111 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2114 if (chan->fcs == L2CAP_FCS_NONE ||
2115 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2116 chan->fcs = L2CAP_FCS_NONE;
2117 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2120 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2121 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2125 case L2CAP_MODE_STREAMING:
2126 rfc.mode = L2CAP_MODE_STREAMING;
2128 rfc.max_transmit = 0;
2129 rfc.retrans_timeout = 0;
2130 rfc.monitor_timeout = 0;
2132 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2133 L2CAP_EXT_HDR_SIZE -
2136 rfc.max_pdu_size = cpu_to_le16(size);
2138 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2139 (unsigned long) &rfc);
2141 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2142 l2cap_add_opt_efs(&ptr, chan);
2144 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2147 if (chan->fcs == L2CAP_FCS_NONE ||
2148 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2149 chan->fcs = L2CAP_FCS_NONE;
2150 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2155 req->dcid = cpu_to_le16(chan->dcid);
2156 req->flags = cpu_to_le16(0);
2161 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2163 struct l2cap_conf_rsp *rsp = data;
2164 void *ptr = rsp->data;
2165 void *req = chan->conf_req;
2166 int len = chan->conf_len;
2167 int type, hint, olen;
2169 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2170 struct l2cap_conf_efs efs;
2172 u16 mtu = L2CAP_DEFAULT_MTU;
2173 u16 result = L2CAP_CONF_SUCCESS;
2176 BT_DBG("chan %p", chan);
2178 while (len >= L2CAP_CONF_OPT_SIZE) {
2179 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2181 hint = type & L2CAP_CONF_HINT;
2182 type &= L2CAP_CONF_MASK;
2185 case L2CAP_CONF_MTU:
2189 case L2CAP_CONF_FLUSH_TO:
2190 chan->flush_to = val;
2193 case L2CAP_CONF_QOS:
2196 case L2CAP_CONF_RFC:
2197 if (olen == sizeof(rfc))
2198 memcpy(&rfc, (void *) val, olen);
2201 case L2CAP_CONF_FCS:
2202 if (val == L2CAP_FCS_NONE)
2203 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2206 case L2CAP_CONF_EFS:
2208 if (olen == sizeof(efs))
2209 memcpy(&efs, (void *) val, olen);
2212 case L2CAP_CONF_EWS:
2214 return -ECONNREFUSED;
2216 set_bit(FLAG_EXT_CTRL, &chan->flags);
2217 set_bit(CONF_EWS_RECV, &chan->conf_state);
2218 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2219 chan->remote_tx_win = val;
2226 result = L2CAP_CONF_UNKNOWN;
2227 *((u8 *) ptr++) = type;
2232 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2235 switch (chan->mode) {
2236 case L2CAP_MODE_STREAMING:
2237 case L2CAP_MODE_ERTM:
2238 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2239 chan->mode = l2cap_select_mode(rfc.mode,
2240 chan->conn->feat_mask);
2245 if (__l2cap_efs_supported(chan))
2246 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2248 return -ECONNREFUSED;
2251 if (chan->mode != rfc.mode)
2252 return -ECONNREFUSED;
2258 if (chan->mode != rfc.mode) {
2259 result = L2CAP_CONF_UNACCEPT;
2260 rfc.mode = chan->mode;
2262 if (chan->num_conf_rsp == 1)
2263 return -ECONNREFUSED;
2265 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2266 sizeof(rfc), (unsigned long) &rfc);
2269 if (result == L2CAP_CONF_SUCCESS) {
2270 /* Configure output options and let the other side know
2271 * which ones we don't like. */
2273 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2274 result = L2CAP_CONF_UNACCEPT;
2277 set_bit(CONF_MTU_DONE, &chan->conf_state);
2279 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2282 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2283 efs.stype != L2CAP_SERV_NOTRAFIC &&
2284 efs.stype != chan->local_stype) {
2286 result = L2CAP_CONF_UNACCEPT;
2288 if (chan->num_conf_req >= 1)
2289 return -ECONNREFUSED;
2291 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2293 (unsigned long) &efs);
2295 /* Send PENDING Conf Rsp */
2296 result = L2CAP_CONF_PENDING;
2297 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2302 case L2CAP_MODE_BASIC:
2303 chan->fcs = L2CAP_FCS_NONE;
2304 set_bit(CONF_MODE_DONE, &chan->conf_state);
2307 case L2CAP_MODE_ERTM:
2308 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2309 chan->remote_tx_win = rfc.txwin_size;
2311 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2313 chan->remote_max_tx = rfc.max_transmit;
2315 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2317 L2CAP_EXT_HDR_SIZE -
2320 rfc.max_pdu_size = cpu_to_le16(size);
2321 chan->remote_mps = size;
2323 rfc.retrans_timeout =
2324 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2325 rfc.monitor_timeout =
2326 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2328 set_bit(CONF_MODE_DONE, &chan->conf_state);
2330 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2331 sizeof(rfc), (unsigned long) &rfc);
2333 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2334 chan->remote_id = efs.id;
2335 chan->remote_stype = efs.stype;
2336 chan->remote_msdu = le16_to_cpu(efs.msdu);
2337 chan->remote_flush_to =
2338 le32_to_cpu(efs.flush_to);
2339 chan->remote_acc_lat =
2340 le32_to_cpu(efs.acc_lat);
2341 chan->remote_sdu_itime =
2342 le32_to_cpu(efs.sdu_itime);
2343 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2344 sizeof(efs), (unsigned long) &efs);
2348 case L2CAP_MODE_STREAMING:
2349 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2351 L2CAP_EXT_HDR_SIZE -
2354 rfc.max_pdu_size = cpu_to_le16(size);
2355 chan->remote_mps = size;
2357 set_bit(CONF_MODE_DONE, &chan->conf_state);
2359 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2360 sizeof(rfc), (unsigned long) &rfc);
2365 result = L2CAP_CONF_UNACCEPT;
2367 memset(&rfc, 0, sizeof(rfc));
2368 rfc.mode = chan->mode;
2371 if (result == L2CAP_CONF_SUCCESS)
2372 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2374 rsp->scid = cpu_to_le16(chan->dcid);
2375 rsp->result = cpu_to_le16(result);
2376 rsp->flags = cpu_to_le16(0x0000);
2381 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2383 struct l2cap_conf_req *req = data;
2384 void *ptr = req->data;
2387 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2388 struct l2cap_conf_efs efs;
2390 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2392 while (len >= L2CAP_CONF_OPT_SIZE) {
2393 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2396 case L2CAP_CONF_MTU:
2397 if (val < L2CAP_DEFAULT_MIN_MTU) {
2398 *result = L2CAP_CONF_UNACCEPT;
2399 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2402 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2405 case L2CAP_CONF_FLUSH_TO:
2406 chan->flush_to = val;
2407 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2411 case L2CAP_CONF_RFC:
2412 if (olen == sizeof(rfc))
2413 memcpy(&rfc, (void *)val, olen);
2415 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2416 rfc.mode != chan->mode)
2417 return -ECONNREFUSED;
2421 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2422 sizeof(rfc), (unsigned long) &rfc);
2425 case L2CAP_CONF_EWS:
2426 chan->tx_win = min_t(u16, val,
2427 L2CAP_DEFAULT_EXT_WINDOW);
2428 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2432 case L2CAP_CONF_EFS:
2433 if (olen == sizeof(efs))
2434 memcpy(&efs, (void *)val, olen);
2436 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2437 efs.stype != L2CAP_SERV_NOTRAFIC &&
2438 efs.stype != chan->local_stype)
2439 return -ECONNREFUSED;
2441 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2442 sizeof(efs), (unsigned long) &efs);
2447 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2448 return -ECONNREFUSED;
2450 chan->mode = rfc.mode;
2452 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2454 case L2CAP_MODE_ERTM:
2455 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2456 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2457 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2459 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2460 chan->local_msdu = le16_to_cpu(efs.msdu);
2461 chan->local_sdu_itime =
2462 le32_to_cpu(efs.sdu_itime);
2463 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2464 chan->local_flush_to =
2465 le32_to_cpu(efs.flush_to);
2469 case L2CAP_MODE_STREAMING:
2470 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2474 req->dcid = cpu_to_le16(chan->dcid);
2475 req->flags = cpu_to_le16(0x0000);
2480 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2482 struct l2cap_conf_rsp *rsp = data;
2483 void *ptr = rsp->data;
2485 BT_DBG("chan %p", chan);
2487 rsp->scid = cpu_to_le16(chan->dcid);
2488 rsp->result = cpu_to_le16(result);
2489 rsp->flags = cpu_to_le16(flags);
2494 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2496 struct l2cap_conn_rsp rsp;
2497 struct l2cap_conn *conn = chan->conn;
2500 rsp.scid = cpu_to_le16(chan->dcid);
2501 rsp.dcid = cpu_to_le16(chan->scid);
2502 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2503 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2504 l2cap_send_cmd(conn, chan->ident,
2505 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2507 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2510 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2511 l2cap_build_conf_req(chan, buf), buf);
2512 chan->num_conf_req++;
2515 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2519 struct l2cap_conf_rfc rfc;
2521 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2523 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2526 while (len >= L2CAP_CONF_OPT_SIZE) {
2527 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2530 case L2CAP_CONF_RFC:
2531 if (olen == sizeof(rfc))
2532 memcpy(&rfc, (void *)val, olen);
2537 /* Use sane default values in case a misbehaving remote device
2538 * did not send an RFC option.
2540 rfc.mode = chan->mode;
2541 rfc.retrans_timeout = cpu_to_le16(L2CAP_DEFAULT_RETRANS_TO);
2542 rfc.monitor_timeout = cpu_to_le16(L2CAP_DEFAULT_MONITOR_TO);
2543 rfc.max_pdu_size = cpu_to_le16(chan->imtu);
2545 BT_ERR("Expected RFC option was not found, using defaults");
2549 case L2CAP_MODE_ERTM:
2550 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2551 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2552 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2554 case L2CAP_MODE_STREAMING:
2555 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2559 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2561 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2563 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2566 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2567 cmd->ident == conn->info_ident) {
2568 cancel_delayed_work(&conn->info_timer);
2570 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2571 conn->info_ident = 0;
2573 l2cap_conn_start(conn);
2579 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2581 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2582 struct l2cap_conn_rsp rsp;
2583 struct l2cap_chan *chan = NULL, *pchan;
2584 struct sock *parent, *sk = NULL;
2585 int result, status = L2CAP_CS_NO_INFO;
2587 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2588 __le16 psm = req->psm;
2590 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2592 /* Check if we have socket listening on psm */
2593 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2595 result = L2CAP_CR_BAD_PSM;
2601 mutex_lock(&conn->chan_lock);
2604 /* Check if the ACL is secure enough (if not SDP) */
2605 if (psm != cpu_to_le16(0x0001) &&
2606 !hci_conn_check_link_mode(conn->hcon)) {
2607 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2608 result = L2CAP_CR_SEC_BLOCK;
2612 result = L2CAP_CR_NO_MEM;
2614 /* Check for backlog size */
2615 if (sk_acceptq_is_full(parent)) {
2616 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2620 chan = pchan->ops->new_connection(pchan->data);
2626 /* Check if we already have channel with that dcid */
2627 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2628 sock_set_flag(sk, SOCK_ZAPPED);
2629 chan->ops->close(chan->data);
2633 hci_conn_hold(conn->hcon);
2635 bacpy(&bt_sk(sk)->src, conn->src);
2636 bacpy(&bt_sk(sk)->dst, conn->dst);
2640 bt_accept_enqueue(parent, sk);
2642 l2cap_chan_add(conn, chan);
2646 __set_chan_timer(chan, sk->sk_sndtimeo);
2648 chan->ident = cmd->ident;
2650 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2651 if (l2cap_chan_check_security(chan)) {
2652 if (bt_sk(sk)->defer_setup) {
2653 l2cap_state_change(chan, BT_CONNECT2);
2654 result = L2CAP_CR_PEND;
2655 status = L2CAP_CS_AUTHOR_PEND;
2656 parent->sk_data_ready(parent, 0);
2658 l2cap_state_change(chan, BT_CONFIG);
2659 result = L2CAP_CR_SUCCESS;
2660 status = L2CAP_CS_NO_INFO;
2663 l2cap_state_change(chan, BT_CONNECT2);
2664 result = L2CAP_CR_PEND;
2665 status = L2CAP_CS_AUTHEN_PEND;
2668 l2cap_state_change(chan, BT_CONNECT2);
2669 result = L2CAP_CR_PEND;
2670 status = L2CAP_CS_NO_INFO;
2674 release_sock(parent);
2675 mutex_unlock(&conn->chan_lock);
2678 rsp.scid = cpu_to_le16(scid);
2679 rsp.dcid = cpu_to_le16(dcid);
2680 rsp.result = cpu_to_le16(result);
2681 rsp.status = cpu_to_le16(status);
2682 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2684 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2685 struct l2cap_info_req info;
2686 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2688 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2689 conn->info_ident = l2cap_get_ident(conn);
2691 schedule_delayed_work(&conn->info_timer,
2692 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2694 l2cap_send_cmd(conn, conn->info_ident,
2695 L2CAP_INFO_REQ, sizeof(info), &info);
2698 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2699 result == L2CAP_CR_SUCCESS) {
2701 set_bit(CONF_REQ_SENT, &chan->conf_state);
2702 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2703 l2cap_build_conf_req(chan, buf), buf);
2704 chan->num_conf_req++;
2710 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2712 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2713 u16 scid, dcid, result, status;
2714 struct l2cap_chan *chan;
2719 scid = __le16_to_cpu(rsp->scid);
2720 dcid = __le16_to_cpu(rsp->dcid);
2721 result = __le16_to_cpu(rsp->result);
2722 status = __le16_to_cpu(rsp->status);
2724 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x",
2725 dcid, scid, result, status);
2727 mutex_lock(&conn->chan_lock);
2730 chan = __l2cap_get_chan_by_scid(conn, scid);
2736 chan = __l2cap_get_chan_by_ident(conn, cmd->ident);
2749 case L2CAP_CR_SUCCESS:
2750 l2cap_state_change(chan, BT_CONFIG);
2753 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2755 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2758 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2759 l2cap_build_conf_req(chan, req), req);
2760 chan->num_conf_req++;
2764 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2768 l2cap_chan_del(chan, ECONNREFUSED);
2775 mutex_unlock(&conn->chan_lock);
2780 static inline void set_default_fcs(struct l2cap_chan *chan)
2782 /* FCS is enabled only in ERTM or streaming mode, if one or both
2785 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2786 chan->fcs = L2CAP_FCS_NONE;
2787 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2788 chan->fcs = L2CAP_FCS_CRC16;
2791 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2793 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2796 struct l2cap_chan *chan;
2800 dcid = __le16_to_cpu(req->dcid);
2801 flags = __le16_to_cpu(req->flags);
2803 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2805 chan = l2cap_get_chan_by_scid(conn, dcid);
2812 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2813 struct l2cap_cmd_rej_cid rej;
2815 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2816 rej.scid = cpu_to_le16(chan->scid);
2817 rej.dcid = cpu_to_le16(chan->dcid);
2819 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2824 /* Reject if config buffer is too small. */
2825 len = cmd_len - sizeof(*req);
2826 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2827 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2828 l2cap_build_conf_rsp(chan, rsp,
2829 L2CAP_CONF_REJECT, flags), rsp);
2834 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2835 chan->conf_len += len;
2837 if (flags & 0x0001) {
2838 /* Incomplete config. Send empty response. */
2839 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2840 l2cap_build_conf_rsp(chan, rsp,
2841 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2845 /* Complete config. */
2846 len = l2cap_parse_conf_req(chan, rsp);
2848 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2852 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2853 chan->num_conf_rsp++;
2855 /* Reset config buffer. */
2858 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2861 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2862 set_default_fcs(chan);
2864 l2cap_state_change(chan, BT_CONNECTED);
2866 chan->next_tx_seq = 0;
2867 chan->expected_tx_seq = 0;
2868 skb_queue_head_init(&chan->tx_q);
2869 if (chan->mode == L2CAP_MODE_ERTM)
2870 l2cap_ertm_init(chan);
2872 l2cap_chan_ready(chan);
2876 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2878 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2879 l2cap_build_conf_req(chan, buf), buf);
2880 chan->num_conf_req++;
2883 /* Got Conf Rsp PENDING from remote side and asume we sent
2884 Conf Rsp PENDING in the code above */
2885 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2886 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2888 /* check compatibility */
2890 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2891 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2893 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2894 l2cap_build_conf_rsp(chan, rsp,
2895 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2903 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2905 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2906 u16 scid, flags, result;
2907 struct l2cap_chan *chan;
2909 int len = cmd->len - sizeof(*rsp);
2911 scid = __le16_to_cpu(rsp->scid);
2912 flags = __le16_to_cpu(rsp->flags);
2913 result = __le16_to_cpu(rsp->result);
2915 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2916 scid, flags, result);
2918 chan = l2cap_get_chan_by_scid(conn, scid);
2926 case L2CAP_CONF_SUCCESS:
2927 l2cap_conf_rfc_get(chan, rsp->data, len);
2928 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2931 case L2CAP_CONF_PENDING:
2932 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2934 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2937 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2940 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2944 /* check compatibility */
2946 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2947 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2949 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2950 l2cap_build_conf_rsp(chan, buf,
2951 L2CAP_CONF_SUCCESS, 0x0000), buf);
2955 case L2CAP_CONF_UNACCEPT:
2956 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2959 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2960 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2964 /* throw out any old stored conf requests */
2965 result = L2CAP_CONF_SUCCESS;
2966 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2969 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2973 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2974 L2CAP_CONF_REQ, len, req);
2975 chan->num_conf_req++;
2976 if (result != L2CAP_CONF_SUCCESS)
2982 sk->sk_err = ECONNRESET;
2983 __set_chan_timer(chan,
2984 msecs_to_jiffies(L2CAP_DISC_REJ_TIMEOUT));
2985 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2992 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2994 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2995 set_default_fcs(chan);
2997 l2cap_state_change(chan, BT_CONNECTED);
2998 chan->next_tx_seq = 0;
2999 chan->expected_tx_seq = 0;
3000 skb_queue_head_init(&chan->tx_q);
3001 if (chan->mode == L2CAP_MODE_ERTM)
3002 l2cap_ertm_init(chan);
3004 l2cap_chan_ready(chan);
3012 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3014 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3015 struct l2cap_disconn_rsp rsp;
3017 struct l2cap_chan *chan;
3020 scid = __le16_to_cpu(req->scid);
3021 dcid = __le16_to_cpu(req->dcid);
3023 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3025 mutex_lock(&conn->chan_lock);
3027 chan = __l2cap_get_chan_by_scid(conn, dcid);
3029 mutex_unlock(&conn->chan_lock);
3036 rsp.dcid = cpu_to_le16(chan->scid);
3037 rsp.scid = cpu_to_le16(chan->dcid);
3038 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3040 sk->sk_shutdown = SHUTDOWN_MASK;
3042 l2cap_chan_del(chan, ECONNRESET);
3045 chan->ops->close(chan->data);
3047 mutex_unlock(&conn->chan_lock);
3052 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3054 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3056 struct l2cap_chan *chan;
3059 scid = __le16_to_cpu(rsp->scid);
3060 dcid = __le16_to_cpu(rsp->dcid);
3062 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3064 mutex_lock(&conn->chan_lock);
3066 chan = __l2cap_get_chan_by_scid(conn, scid);
3068 mutex_unlock(&conn->chan_lock);
3075 l2cap_chan_del(chan, 0);
3078 chan->ops->close(chan->data);
3080 mutex_unlock(&conn->chan_lock);
3085 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3087 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3090 type = __le16_to_cpu(req->type);
3092 BT_DBG("type 0x%4.4x", type);
3094 if (type == L2CAP_IT_FEAT_MASK) {
3096 u32 feat_mask = l2cap_feat_mask;
3097 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3098 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3099 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3101 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3104 feat_mask |= L2CAP_FEAT_EXT_FLOW
3105 | L2CAP_FEAT_EXT_WINDOW;
3107 put_unaligned_le32(feat_mask, rsp->data);
3108 l2cap_send_cmd(conn, cmd->ident,
3109 L2CAP_INFO_RSP, sizeof(buf), buf);
3110 } else if (type == L2CAP_IT_FIXED_CHAN) {
3112 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3115 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3117 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3119 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3120 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3121 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3122 l2cap_send_cmd(conn, cmd->ident,
3123 L2CAP_INFO_RSP, sizeof(buf), buf);
3125 struct l2cap_info_rsp rsp;
3126 rsp.type = cpu_to_le16(type);
3127 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3128 l2cap_send_cmd(conn, cmd->ident,
3129 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3135 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3137 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3140 type = __le16_to_cpu(rsp->type);
3141 result = __le16_to_cpu(rsp->result);
3143 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3145 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3146 if (cmd->ident != conn->info_ident ||
3147 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3150 cancel_delayed_work(&conn->info_timer);
3152 if (result != L2CAP_IR_SUCCESS) {
3153 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3154 conn->info_ident = 0;
3156 l2cap_conn_start(conn);
3161 if (type == L2CAP_IT_FEAT_MASK) {
3162 conn->feat_mask = get_unaligned_le32(rsp->data);
3164 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3165 struct l2cap_info_req req;
3166 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3168 conn->info_ident = l2cap_get_ident(conn);
3170 l2cap_send_cmd(conn, conn->info_ident,
3171 L2CAP_INFO_REQ, sizeof(req), &req);
3173 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3174 conn->info_ident = 0;
3176 l2cap_conn_start(conn);
3178 } else if (type == L2CAP_IT_FIXED_CHAN) {
3179 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3180 conn->info_ident = 0;
3182 l2cap_conn_start(conn);
3188 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3189 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3192 struct l2cap_create_chan_req *req = data;
3193 struct l2cap_create_chan_rsp rsp;
3196 if (cmd_len != sizeof(*req))
3202 psm = le16_to_cpu(req->psm);
3203 scid = le16_to_cpu(req->scid);
3205 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3207 /* Placeholder: Always reject */
3209 rsp.scid = cpu_to_le16(scid);
3210 rsp.result = L2CAP_CR_NO_MEM;
3211 rsp.status = L2CAP_CS_NO_INFO;
3213 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3219 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3220 struct l2cap_cmd_hdr *cmd, void *data)
3222 BT_DBG("conn %p", conn);
3224 return l2cap_connect_rsp(conn, cmd, data);
3227 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3228 u16 icid, u16 result)
3230 struct l2cap_move_chan_rsp rsp;
3232 BT_DBG("icid %d, result %d", icid, result);
3234 rsp.icid = cpu_to_le16(icid);
3235 rsp.result = cpu_to_le16(result);
3237 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3240 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3241 struct l2cap_chan *chan, u16 icid, u16 result)
3243 struct l2cap_move_chan_cfm cfm;
3246 BT_DBG("icid %d, result %d", icid, result);
3248 ident = l2cap_get_ident(conn);
3250 chan->ident = ident;
3252 cfm.icid = cpu_to_le16(icid);
3253 cfm.result = cpu_to_le16(result);
3255 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3258 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3261 struct l2cap_move_chan_cfm_rsp rsp;
3263 BT_DBG("icid %d", icid);
3265 rsp.icid = cpu_to_le16(icid);
3266 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3269 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3270 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3272 struct l2cap_move_chan_req *req = data;
3274 u16 result = L2CAP_MR_NOT_ALLOWED;
3276 if (cmd_len != sizeof(*req))
3279 icid = le16_to_cpu(req->icid);
3281 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3286 /* Placeholder: Always refuse */
3287 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3292 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3293 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3295 struct l2cap_move_chan_rsp *rsp = data;
3298 if (cmd_len != sizeof(*rsp))
3301 icid = le16_to_cpu(rsp->icid);
3302 result = le16_to_cpu(rsp->result);
3304 BT_DBG("icid %d, result %d", icid, result);
3306 /* Placeholder: Always unconfirmed */
3307 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3312 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3313 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3315 struct l2cap_move_chan_cfm *cfm = data;
3318 if (cmd_len != sizeof(*cfm))
3321 icid = le16_to_cpu(cfm->icid);
3322 result = le16_to_cpu(cfm->result);
3324 BT_DBG("icid %d, result %d", icid, result);
3326 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3331 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3332 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3334 struct l2cap_move_chan_cfm_rsp *rsp = data;
3337 if (cmd_len != sizeof(*rsp))
3340 icid = le16_to_cpu(rsp->icid);
3342 BT_DBG("icid %d", icid);
3347 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3352 if (min > max || min < 6 || max > 3200)
3355 if (to_multiplier < 10 || to_multiplier > 3200)
3358 if (max >= to_multiplier * 8)
3361 max_latency = (to_multiplier * 8 / max) - 1;
3362 if (latency > 499 || latency > max_latency)
3368 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3369 struct l2cap_cmd_hdr *cmd, u8 *data)
3371 struct hci_conn *hcon = conn->hcon;
3372 struct l2cap_conn_param_update_req *req;
3373 struct l2cap_conn_param_update_rsp rsp;
3374 u16 min, max, latency, to_multiplier, cmd_len;
3377 if (!(hcon->link_mode & HCI_LM_MASTER))
3380 cmd_len = __le16_to_cpu(cmd->len);
3381 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3384 req = (struct l2cap_conn_param_update_req *) data;
3385 min = __le16_to_cpu(req->min);
3386 max = __le16_to_cpu(req->max);
3387 latency = __le16_to_cpu(req->latency);
3388 to_multiplier = __le16_to_cpu(req->to_multiplier);
3390 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3391 min, max, latency, to_multiplier);
3393 memset(&rsp, 0, sizeof(rsp));
3395 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3397 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3399 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3401 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3405 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3410 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3411 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3415 switch (cmd->code) {
3416 case L2CAP_COMMAND_REJ:
3417 l2cap_command_rej(conn, cmd, data);
3420 case L2CAP_CONN_REQ:
3421 err = l2cap_connect_req(conn, cmd, data);
3424 case L2CAP_CONN_RSP:
3425 err = l2cap_connect_rsp(conn, cmd, data);
3428 case L2CAP_CONF_REQ:
3429 err = l2cap_config_req(conn, cmd, cmd_len, data);
3432 case L2CAP_CONF_RSP:
3433 err = l2cap_config_rsp(conn, cmd, data);
3436 case L2CAP_DISCONN_REQ:
3437 err = l2cap_disconnect_req(conn, cmd, data);
3440 case L2CAP_DISCONN_RSP:
3441 err = l2cap_disconnect_rsp(conn, cmd, data);
3444 case L2CAP_ECHO_REQ:
3445 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3448 case L2CAP_ECHO_RSP:
3451 case L2CAP_INFO_REQ:
3452 err = l2cap_information_req(conn, cmd, data);
3455 case L2CAP_INFO_RSP:
3456 err = l2cap_information_rsp(conn, cmd, data);
3459 case L2CAP_CREATE_CHAN_REQ:
3460 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3463 case L2CAP_CREATE_CHAN_RSP:
3464 err = l2cap_create_channel_rsp(conn, cmd, data);
3467 case L2CAP_MOVE_CHAN_REQ:
3468 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3471 case L2CAP_MOVE_CHAN_RSP:
3472 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3475 case L2CAP_MOVE_CHAN_CFM:
3476 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3479 case L2CAP_MOVE_CHAN_CFM_RSP:
3480 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3484 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3492 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3493 struct l2cap_cmd_hdr *cmd, u8 *data)
3495 switch (cmd->code) {
3496 case L2CAP_COMMAND_REJ:
3499 case L2CAP_CONN_PARAM_UPDATE_REQ:
3500 return l2cap_conn_param_update_req(conn, cmd, data);
3502 case L2CAP_CONN_PARAM_UPDATE_RSP:
3506 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3511 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3512 struct sk_buff *skb)
3514 u8 *data = skb->data;
3516 struct l2cap_cmd_hdr cmd;
3519 l2cap_raw_recv(conn, skb);
3521 while (len >= L2CAP_CMD_HDR_SIZE) {
3523 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3524 data += L2CAP_CMD_HDR_SIZE;
3525 len -= L2CAP_CMD_HDR_SIZE;
3527 cmd_len = le16_to_cpu(cmd.len);
3529 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3531 if (cmd_len > len || !cmd.ident) {
3532 BT_DBG("corrupted command");
3536 if (conn->hcon->type == LE_LINK)
3537 err = l2cap_le_sig_cmd(conn, &cmd, data);
3539 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3542 struct l2cap_cmd_rej_unk rej;
3544 BT_ERR("Wrong link type (%d)", err);
3546 /* FIXME: Map err to a valid reason */
3547 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3548 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3558 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3560 u16 our_fcs, rcv_fcs;
3563 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3564 hdr_size = L2CAP_EXT_HDR_SIZE;
3566 hdr_size = L2CAP_ENH_HDR_SIZE;
3568 if (chan->fcs == L2CAP_FCS_CRC16) {
3569 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3570 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3571 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3573 if (our_fcs != rcv_fcs)
3579 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3583 chan->frames_sent = 0;
3585 control |= __set_reqseq(chan, chan->buffer_seq);
3587 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3588 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3589 l2cap_send_sframe(chan, control);
3590 set_bit(CONN_RNR_SENT, &chan->conn_state);
3593 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3594 l2cap_retransmit_frames(chan);
3596 l2cap_ertm_send(chan);
3598 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3599 chan->frames_sent == 0) {
3600 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3601 l2cap_send_sframe(chan, control);
3605 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3607 struct sk_buff *next_skb;
3608 int tx_seq_offset, next_tx_seq_offset;
3610 bt_cb(skb)->tx_seq = tx_seq;
3611 bt_cb(skb)->sar = sar;
3613 next_skb = skb_peek(&chan->srej_q);
3615 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3618 if (bt_cb(next_skb)->tx_seq == tx_seq)
3621 next_tx_seq_offset = __seq_offset(chan,
3622 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3624 if (next_tx_seq_offset > tx_seq_offset) {
3625 __skb_queue_before(&chan->srej_q, next_skb, skb);
3629 if (skb_queue_is_last(&chan->srej_q, next_skb))
3632 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3635 __skb_queue_tail(&chan->srej_q, skb);
3640 static void append_skb_frag(struct sk_buff *skb,
3641 struct sk_buff *new_frag, struct sk_buff **last_frag)
3643 /* skb->len reflects data in skb as well as all fragments
3644 * skb->data_len reflects only data in fragments
3646 if (!skb_has_frag_list(skb))
3647 skb_shinfo(skb)->frag_list = new_frag;
3649 new_frag->next = NULL;
3651 (*last_frag)->next = new_frag;
3652 *last_frag = new_frag;
3654 skb->len += new_frag->len;
3655 skb->data_len += new_frag->len;
3656 skb->truesize += new_frag->truesize;
3659 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3663 switch (__get_ctrl_sar(chan, control)) {
3664 case L2CAP_SAR_UNSEGMENTED:
3668 err = chan->ops->recv(chan->data, skb);
3671 case L2CAP_SAR_START:
3675 chan->sdu_len = get_unaligned_le16(skb->data);
3676 skb_pull(skb, L2CAP_SDULEN_SIZE);
3678 if (chan->sdu_len > chan->imtu) {
3683 if (skb->len >= chan->sdu_len)
3687 chan->sdu_last_frag = skb;
3693 case L2CAP_SAR_CONTINUE:
3697 append_skb_frag(chan->sdu, skb,
3698 &chan->sdu_last_frag);
3701 if (chan->sdu->len >= chan->sdu_len)
3711 append_skb_frag(chan->sdu, skb,
3712 &chan->sdu_last_frag);
3715 if (chan->sdu->len != chan->sdu_len)
3718 err = chan->ops->recv(chan->data, chan->sdu);
3721 /* Reassembly complete */
3723 chan->sdu_last_frag = NULL;
3731 kfree_skb(chan->sdu);
3733 chan->sdu_last_frag = NULL;
3740 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3742 BT_DBG("chan %p, Enter local busy", chan);
3744 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3746 __set_ack_timer(chan);
3749 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3753 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3756 control = __set_reqseq(chan, chan->buffer_seq);
3757 control |= __set_ctrl_poll(chan);
3758 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3759 l2cap_send_sframe(chan, control);
3760 chan->retry_count = 1;
3762 __clear_retrans_timer(chan);
3763 __set_monitor_timer(chan);
3765 set_bit(CONN_WAIT_F, &chan->conn_state);
3768 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3769 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3771 BT_DBG("chan %p, Exit local busy", chan);
3774 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3776 if (chan->mode == L2CAP_MODE_ERTM) {
3778 l2cap_ertm_enter_local_busy(chan);
3780 l2cap_ertm_exit_local_busy(chan);
3784 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3786 struct sk_buff *skb;
3789 while ((skb = skb_peek(&chan->srej_q)) &&
3790 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3793 if (bt_cb(skb)->tx_seq != tx_seq)
3796 skb = skb_dequeue(&chan->srej_q);
3797 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3798 err = l2cap_reassemble_sdu(chan, skb, control);
3801 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3805 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3806 tx_seq = __next_seq(chan, tx_seq);
3810 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3812 struct srej_list *l, *tmp;
3815 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3816 if (l->tx_seq == tx_seq) {
3821 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3822 control |= __set_reqseq(chan, l->tx_seq);
3823 l2cap_send_sframe(chan, control);
3825 list_add_tail(&l->list, &chan->srej_l);
3829 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3831 struct srej_list *new;
3834 while (tx_seq != chan->expected_tx_seq) {
3835 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3836 control |= __set_reqseq(chan, chan->expected_tx_seq);
3837 l2cap_send_sframe(chan, control);
3839 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3843 new->tx_seq = chan->expected_tx_seq;
3845 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3847 list_add_tail(&new->list, &chan->srej_l);
3850 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3855 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3857 u16 tx_seq = __get_txseq(chan, rx_control);
3858 u16 req_seq = __get_reqseq(chan, rx_control);
3859 u8 sar = __get_ctrl_sar(chan, rx_control);
3860 int tx_seq_offset, expected_tx_seq_offset;
3861 int num_to_ack = (chan->tx_win/6) + 1;
3864 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3865 tx_seq, rx_control);
3867 if (__is_ctrl_final(chan, rx_control) &&
3868 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3869 __clear_monitor_timer(chan);
3870 if (chan->unacked_frames > 0)
3871 __set_retrans_timer(chan);
3872 clear_bit(CONN_WAIT_F, &chan->conn_state);
3875 chan->expected_ack_seq = req_seq;
3876 l2cap_drop_acked_frames(chan);
3878 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3880 /* invalid tx_seq */
3881 if (tx_seq_offset >= chan->tx_win) {
3882 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3886 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3887 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3888 l2cap_send_ack(chan);
3892 if (tx_seq == chan->expected_tx_seq)
3895 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3896 struct srej_list *first;
3898 first = list_first_entry(&chan->srej_l,
3899 struct srej_list, list);
3900 if (tx_seq == first->tx_seq) {
3901 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3902 l2cap_check_srej_gap(chan, tx_seq);
3904 list_del(&first->list);
3907 if (list_empty(&chan->srej_l)) {
3908 chan->buffer_seq = chan->buffer_seq_srej;
3909 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3910 l2cap_send_ack(chan);
3911 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3914 struct srej_list *l;
3916 /* duplicated tx_seq */
3917 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3920 list_for_each_entry(l, &chan->srej_l, list) {
3921 if (l->tx_seq == tx_seq) {
3922 l2cap_resend_srejframe(chan, tx_seq);
3927 err = l2cap_send_srejframe(chan, tx_seq);
3929 l2cap_send_disconn_req(chan->conn, chan, -err);
3934 expected_tx_seq_offset = __seq_offset(chan,
3935 chan->expected_tx_seq, chan->buffer_seq);
3937 /* duplicated tx_seq */
3938 if (tx_seq_offset < expected_tx_seq_offset)
3941 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3943 BT_DBG("chan %p, Enter SREJ", chan);
3945 INIT_LIST_HEAD(&chan->srej_l);
3946 chan->buffer_seq_srej = chan->buffer_seq;
3948 __skb_queue_head_init(&chan->srej_q);
3949 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3951 /* Set P-bit only if there are some I-frames to ack. */
3952 if (__clear_ack_timer(chan))
3953 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3955 err = l2cap_send_srejframe(chan, tx_seq);
3957 l2cap_send_disconn_req(chan->conn, chan, -err);
3964 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3966 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3967 bt_cb(skb)->tx_seq = tx_seq;
3968 bt_cb(skb)->sar = sar;
3969 __skb_queue_tail(&chan->srej_q, skb);
3973 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3974 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3977 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3981 if (__is_ctrl_final(chan, rx_control)) {
3982 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3983 l2cap_retransmit_frames(chan);
3987 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3988 if (chan->num_acked == num_to_ack - 1)
3989 l2cap_send_ack(chan);
3991 __set_ack_timer(chan);
4000 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
4002 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
4003 __get_reqseq(chan, rx_control), rx_control);
4005 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
4006 l2cap_drop_acked_frames(chan);
4008 if (__is_ctrl_poll(chan, rx_control)) {
4009 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4010 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4011 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4012 (chan->unacked_frames > 0))
4013 __set_retrans_timer(chan);
4015 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4016 l2cap_send_srejtail(chan);
4018 l2cap_send_i_or_rr_or_rnr(chan);
4021 } else if (__is_ctrl_final(chan, rx_control)) {
4022 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4024 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4025 l2cap_retransmit_frames(chan);
4028 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
4029 (chan->unacked_frames > 0))
4030 __set_retrans_timer(chan);
4032 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4033 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4034 l2cap_send_ack(chan);
4036 l2cap_ertm_send(chan);
4040 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4042 u16 tx_seq = __get_reqseq(chan, rx_control);
4044 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4046 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4048 chan->expected_ack_seq = tx_seq;
4049 l2cap_drop_acked_frames(chan);
4051 if (__is_ctrl_final(chan, rx_control)) {
4052 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4053 l2cap_retransmit_frames(chan);
4055 l2cap_retransmit_frames(chan);
4057 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4058 set_bit(CONN_REJ_ACT, &chan->conn_state);
4061 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4063 u16 tx_seq = __get_reqseq(chan, rx_control);
4065 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4067 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4069 if (__is_ctrl_poll(chan, rx_control)) {
4070 chan->expected_ack_seq = tx_seq;
4071 l2cap_drop_acked_frames(chan);
4073 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4074 l2cap_retransmit_one_frame(chan, tx_seq);
4076 l2cap_ertm_send(chan);
4078 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4079 chan->srej_save_reqseq = tx_seq;
4080 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4082 } else if (__is_ctrl_final(chan, rx_control)) {
4083 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4084 chan->srej_save_reqseq == tx_seq)
4085 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4087 l2cap_retransmit_one_frame(chan, tx_seq);
4089 l2cap_retransmit_one_frame(chan, tx_seq);
4090 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4091 chan->srej_save_reqseq = tx_seq;
4092 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4097 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4099 u16 tx_seq = __get_reqseq(chan, rx_control);
4101 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4103 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4104 chan->expected_ack_seq = tx_seq;
4105 l2cap_drop_acked_frames(chan);
4107 if (__is_ctrl_poll(chan, rx_control))
4108 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4110 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4111 __clear_retrans_timer(chan);
4112 if (__is_ctrl_poll(chan, rx_control))
4113 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4117 if (__is_ctrl_poll(chan, rx_control)) {
4118 l2cap_send_srejtail(chan);
4120 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4121 l2cap_send_sframe(chan, rx_control);
4125 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4127 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4129 if (__is_ctrl_final(chan, rx_control) &&
4130 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4131 __clear_monitor_timer(chan);
4132 if (chan->unacked_frames > 0)
4133 __set_retrans_timer(chan);
4134 clear_bit(CONN_WAIT_F, &chan->conn_state);
4137 switch (__get_ctrl_super(chan, rx_control)) {
4138 case L2CAP_SUPER_RR:
4139 l2cap_data_channel_rrframe(chan, rx_control);
4142 case L2CAP_SUPER_REJ:
4143 l2cap_data_channel_rejframe(chan, rx_control);
4146 case L2CAP_SUPER_SREJ:
4147 l2cap_data_channel_srejframe(chan, rx_control);
4150 case L2CAP_SUPER_RNR:
4151 l2cap_data_channel_rnrframe(chan, rx_control);
4159 static int l2cap_ertm_data_rcv(struct l2cap_chan *chan, struct sk_buff *skb)
4163 int len, next_tx_seq_offset, req_seq_offset;
4165 control = __get_control(chan, skb->data);
4166 skb_pull(skb, __ctrl_size(chan));
4170 * We can just drop the corrupted I-frame here.
4171 * Receiver will miss it and start proper recovery
4172 * procedures and ask retransmission.
4174 if (l2cap_check_fcs(chan, skb))
4177 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4178 len -= L2CAP_SDULEN_SIZE;
4180 if (chan->fcs == L2CAP_FCS_CRC16)
4181 len -= L2CAP_FCS_SIZE;
4183 if (len > chan->mps) {
4184 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4188 req_seq = __get_reqseq(chan, control);
4190 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4192 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4193 chan->expected_ack_seq);
4195 /* check for invalid req-seq */
4196 if (req_seq_offset > next_tx_seq_offset) {
4197 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4201 if (!__is_sframe(chan, control)) {
4203 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4207 l2cap_data_channel_iframe(chan, control, skb);
4211 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4215 l2cap_data_channel_sframe(chan, control, skb);
4225 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4227 struct l2cap_chan *chan;
4228 struct sock *sk = NULL;
4233 chan = l2cap_get_chan_by_scid(conn, cid);
4235 BT_DBG("unknown cid 0x%4.4x", cid);
4242 BT_DBG("chan %p, len %d", chan, skb->len);
4244 if (chan->state != BT_CONNECTED)
4247 switch (chan->mode) {
4248 case L2CAP_MODE_BASIC:
4249 /* If socket recv buffers overflows we drop data here
4250 * which is *bad* because L2CAP has to be reliable.
4251 * But we don't have any other choice. L2CAP doesn't
4252 * provide flow control mechanism. */
4254 if (chan->imtu < skb->len)
4257 if (!chan->ops->recv(chan->data, skb))
4261 case L2CAP_MODE_ERTM:
4262 l2cap_ertm_data_rcv(chan, skb);
4266 case L2CAP_MODE_STREAMING:
4267 control = __get_control(chan, skb->data);
4268 skb_pull(skb, __ctrl_size(chan));
4271 if (l2cap_check_fcs(chan, skb))
4274 if (__is_sar_start(chan, control))
4275 len -= L2CAP_SDULEN_SIZE;
4277 if (chan->fcs == L2CAP_FCS_CRC16)
4278 len -= L2CAP_FCS_SIZE;
4280 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4283 tx_seq = __get_txseq(chan, control);
4285 if (chan->expected_tx_seq != tx_seq) {
4286 /* Frame(s) missing - must discard partial SDU */
4287 kfree_skb(chan->sdu);
4289 chan->sdu_last_frag = NULL;
4292 /* TODO: Notify userland of missing data */
4295 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4297 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4298 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4303 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4317 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4319 struct sock *sk = NULL;
4320 struct l2cap_chan *chan;
4322 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4330 BT_DBG("sk %p, len %d", sk, skb->len);
4332 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4335 if (chan->imtu < skb->len)
4338 if (!chan->ops->recv(chan->data, skb))
4350 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4352 struct sock *sk = NULL;
4353 struct l2cap_chan *chan;
4355 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4363 BT_DBG("sk %p, len %d", sk, skb->len);
4365 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4368 if (chan->imtu < skb->len)
4371 if (!chan->ops->recv(chan->data, skb))
4383 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4385 struct l2cap_hdr *lh = (void *) skb->data;
4389 skb_pull(skb, L2CAP_HDR_SIZE);
4390 cid = __le16_to_cpu(lh->cid);
4391 len = __le16_to_cpu(lh->len);
4393 if (len != skb->len) {
4398 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4401 case L2CAP_CID_LE_SIGNALING:
4402 case L2CAP_CID_SIGNALING:
4403 l2cap_sig_channel(conn, skb);
4406 case L2CAP_CID_CONN_LESS:
4407 psm = get_unaligned_le16(skb->data);
4409 l2cap_conless_channel(conn, psm, skb);
4412 case L2CAP_CID_LE_DATA:
4413 l2cap_att_channel(conn, cid, skb);
4417 if (smp_sig_channel(conn, skb))
4418 l2cap_conn_del(conn->hcon, EACCES);
4422 l2cap_data_channel(conn, cid, skb);
4427 /* ---- L2CAP interface with lower layer (HCI) ---- */
4429 int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr)
4431 int exact = 0, lm1 = 0, lm2 = 0;
4432 struct l2cap_chan *c;
4434 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4436 /* Find listening sockets and check their link_mode */
4437 read_lock(&chan_list_lock);
4438 list_for_each_entry(c, &chan_list, global_l) {
4439 struct sock *sk = c->sk;
4441 if (c->state != BT_LISTEN)
4444 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4445 lm1 |= HCI_LM_ACCEPT;
4446 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4447 lm1 |= HCI_LM_MASTER;
4449 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4450 lm2 |= HCI_LM_ACCEPT;
4451 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4452 lm2 |= HCI_LM_MASTER;
4455 read_unlock(&chan_list_lock);
4457 return exact ? lm1 : lm2;
4460 int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4462 struct l2cap_conn *conn;
4464 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4467 conn = l2cap_conn_add(hcon, status);
4469 l2cap_conn_ready(conn);
4471 l2cap_conn_del(hcon, bt_to_errno(status));
4476 int l2cap_disconn_ind(struct hci_conn *hcon)
4478 struct l2cap_conn *conn = hcon->l2cap_data;
4480 BT_DBG("hcon %p", hcon);
4483 return HCI_ERROR_REMOTE_USER_TERM;
4484 return conn->disc_reason;
4487 int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4489 BT_DBG("hcon %p reason %d", hcon, reason);
4491 l2cap_conn_del(hcon, bt_to_errno(reason));
4495 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4497 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4500 if (encrypt == 0x00) {
4501 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4502 __clear_chan_timer(chan);
4503 __set_chan_timer(chan,
4504 msecs_to_jiffies(L2CAP_ENC_TIMEOUT));
4505 } else if (chan->sec_level == BT_SECURITY_HIGH)
4506 l2cap_chan_close(chan, ECONNREFUSED);
4508 if (chan->sec_level == BT_SECURITY_MEDIUM)
4509 __clear_chan_timer(chan);
4513 int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4515 struct l2cap_conn *conn = hcon->l2cap_data;
4516 struct l2cap_chan *chan;
4521 BT_DBG("conn %p", conn);
4523 if (hcon->type == LE_LINK) {
4524 smp_distribute_keys(conn, 0);
4525 cancel_delayed_work(&conn->security_timer);
4528 mutex_lock(&conn->chan_lock);
4530 list_for_each_entry(chan, &conn->chan_l, list) {
4531 struct sock *sk = chan->sk;
4535 BT_DBG("chan->scid %d", chan->scid);
4537 if (chan->scid == L2CAP_CID_LE_DATA) {
4538 if (!status && encrypt) {
4539 chan->sec_level = hcon->sec_level;
4540 l2cap_chan_ready(chan);
4547 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4552 if (!status && (chan->state == BT_CONNECTED ||
4553 chan->state == BT_CONFIG)) {
4554 l2cap_check_encryption(chan, encrypt);
4559 if (chan->state == BT_CONNECT) {
4561 struct l2cap_conn_req req;
4562 req.scid = cpu_to_le16(chan->scid);
4563 req.psm = chan->psm;
4565 chan->ident = l2cap_get_ident(conn);
4566 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4568 l2cap_send_cmd(conn, chan->ident,
4569 L2CAP_CONN_REQ, sizeof(req), &req);
4571 __clear_chan_timer(chan);
4572 __set_chan_timer(chan,
4573 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4575 } else if (chan->state == BT_CONNECT2) {
4576 struct l2cap_conn_rsp rsp;
4580 if (bt_sk(sk)->defer_setup) {
4581 struct sock *parent = bt_sk(sk)->parent;
4582 res = L2CAP_CR_PEND;
4583 stat = L2CAP_CS_AUTHOR_PEND;
4585 parent->sk_data_ready(parent, 0);
4587 l2cap_state_change(chan, BT_CONFIG);
4588 res = L2CAP_CR_SUCCESS;
4589 stat = L2CAP_CS_NO_INFO;
4592 l2cap_state_change(chan, BT_DISCONN);
4593 __set_chan_timer(chan,
4594 msecs_to_jiffies(L2CAP_DISC_TIMEOUT));
4595 res = L2CAP_CR_SEC_BLOCK;
4596 stat = L2CAP_CS_NO_INFO;
4599 rsp.scid = cpu_to_le16(chan->dcid);
4600 rsp.dcid = cpu_to_le16(chan->scid);
4601 rsp.result = cpu_to_le16(res);
4602 rsp.status = cpu_to_le16(stat);
4603 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4610 mutex_unlock(&conn->chan_lock);
4615 int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4617 struct l2cap_conn *conn = hcon->l2cap_data;
4620 conn = l2cap_conn_add(hcon, 0);
4625 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4627 if (!(flags & ACL_CONT)) {
4628 struct l2cap_hdr *hdr;
4629 struct l2cap_chan *chan;
4634 BT_ERR("Unexpected start frame (len %d)", skb->len);
4635 kfree_skb(conn->rx_skb);
4636 conn->rx_skb = NULL;
4638 l2cap_conn_unreliable(conn, ECOMM);
4641 /* Start fragment always begin with Basic L2CAP header */
4642 if (skb->len < L2CAP_HDR_SIZE) {
4643 BT_ERR("Frame is too short (len %d)", skb->len);
4644 l2cap_conn_unreliable(conn, ECOMM);
4648 hdr = (struct l2cap_hdr *) skb->data;
4649 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4650 cid = __le16_to_cpu(hdr->cid);
4652 if (len == skb->len) {
4653 /* Complete frame received */
4654 l2cap_recv_frame(conn, skb);
4658 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4660 if (skb->len > len) {
4661 BT_ERR("Frame is too long (len %d, expected len %d)",
4663 l2cap_conn_unreliable(conn, ECOMM);
4667 chan = l2cap_get_chan_by_scid(conn, cid);
4669 if (chan && chan->sk) {
4670 struct sock *sk = chan->sk;
4673 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4674 BT_ERR("Frame exceeding recv MTU (len %d, "
4678 l2cap_conn_unreliable(conn, ECOMM);
4684 /* Allocate skb for the complete frame (with header) */
4685 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4689 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4691 conn->rx_len = len - skb->len;
4693 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4695 if (!conn->rx_len) {
4696 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4697 l2cap_conn_unreliable(conn, ECOMM);
4701 if (skb->len > conn->rx_len) {
4702 BT_ERR("Fragment is too long (len %d, expected %d)",
4703 skb->len, conn->rx_len);
4704 kfree_skb(conn->rx_skb);
4705 conn->rx_skb = NULL;
4707 l2cap_conn_unreliable(conn, ECOMM);
4711 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4713 conn->rx_len -= skb->len;
4715 if (!conn->rx_len) {
4716 /* Complete frame received */
4717 l2cap_recv_frame(conn, conn->rx_skb);
4718 conn->rx_skb = NULL;
4727 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4729 struct l2cap_chan *c;
4731 read_lock(&chan_list_lock);
4733 list_for_each_entry(c, &chan_list, global_l) {
4734 struct sock *sk = c->sk;
4736 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4737 batostr(&bt_sk(sk)->src),
4738 batostr(&bt_sk(sk)->dst),
4739 c->state, __le16_to_cpu(c->psm),
4740 c->scid, c->dcid, c->imtu, c->omtu,
4741 c->sec_level, c->mode);
4744 read_unlock(&chan_list_lock);
4749 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4751 return single_open(file, l2cap_debugfs_show, inode->i_private);
4754 static const struct file_operations l2cap_debugfs_fops = {
4755 .open = l2cap_debugfs_open,
4757 .llseek = seq_lseek,
4758 .release = single_release,
4761 static struct dentry *l2cap_debugfs;
4763 int __init l2cap_init(void)
4767 err = l2cap_init_sockets();
4772 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4773 bt_debugfs, NULL, &l2cap_debugfs_fops);
4775 BT_ERR("Failed to create L2CAP debug file");
4781 void l2cap_exit(void)
4783 debugfs_remove(l2cap_debugfs);
4784 l2cap_cleanup_sockets();
4787 module_param(disable_ertm, bool, 0644);
4788 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");