2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
78 /* ---- L2CAP channels ---- */
80 static inline void chan_hold(struct l2cap_chan *c)
82 atomic_inc(&c->refcnt);
85 static inline void chan_put(struct l2cap_chan *c)
87 if (atomic_dec_and_test(&c->refcnt))
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
95 list_for_each_entry(c, &conn->chan_l, list) {
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
105 struct l2cap_chan *c;
107 list_for_each_entry(c, &conn->chan_l, list) {
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
118 struct l2cap_chan *c;
120 read_lock(&conn->chan_lock);
121 c = __l2cap_get_chan_by_scid(conn, cid);
124 read_unlock(&conn->chan_lock);
128 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
130 struct l2cap_chan *c;
132 list_for_each_entry(c, &conn->chan_l, list) {
133 if (c->ident == ident)
139 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
141 struct l2cap_chan *c;
143 read_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_ident(conn, ident);
147 read_unlock(&conn->chan_lock);
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
153 struct l2cap_chan *c;
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
165 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
169 write_lock_bh(&chan_list_lock);
171 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
184 for (p = 0x1001; p < 0x1100; p += 2)
185 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
186 chan->psm = cpu_to_le16(p);
187 chan->sport = cpu_to_le16(p);
194 write_unlock_bh(&chan_list_lock);
198 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
200 write_lock_bh(&chan_list_lock);
204 write_unlock_bh(&chan_list_lock);
209 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
211 u16 cid = L2CAP_CID_DYN_START;
213 for (; cid < L2CAP_CID_DYN_END; cid++) {
214 if (!__l2cap_get_chan_by_scid(conn, cid))
221 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
223 BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
225 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
229 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
231 BT_DBG("chan %p state %d", chan, chan->state);
233 if (timer_pending(timer) && del_timer(timer))
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
240 chan->ops->state_change(chan->data, state);
243 static void l2cap_chan_timeout(unsigned long arg)
245 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
246 struct sock *sk = chan->sk;
249 BT_DBG("chan %p state %d", chan, chan->state);
253 if (sock_owned_by_user(sk)) {
254 /* sk is owned by user. Try again later */
255 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
261 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
262 reason = ECONNREFUSED;
263 else if (chan->state == BT_CONNECT &&
264 chan->sec_level != BT_SECURITY_SDP)
265 reason = ECONNREFUSED;
269 l2cap_chan_close(chan, reason);
273 chan->ops->close(chan->data);
277 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
279 struct l2cap_chan *chan;
281 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
287 write_lock_bh(&chan_list_lock);
288 list_add(&chan->global_l, &chan_list);
289 write_unlock_bh(&chan_list_lock);
291 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
293 chan->state = BT_OPEN;
295 atomic_set(&chan->refcnt, 1);
297 BT_DBG("sk %p chan %p", sk, chan);
302 void l2cap_chan_destroy(struct l2cap_chan *chan)
304 write_lock_bh(&chan_list_lock);
305 list_del(&chan->global_l);
306 write_unlock_bh(&chan_list_lock);
311 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
313 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
314 chan->psm, chan->dcid);
316 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
320 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
321 if (conn->hcon->type == LE_LINK) {
323 chan->omtu = L2CAP_LE_DEFAULT_MTU;
324 chan->scid = L2CAP_CID_LE_DATA;
325 chan->dcid = L2CAP_CID_LE_DATA;
327 /* Alloc CID for connection-oriented socket */
328 chan->scid = l2cap_alloc_cid(conn);
329 chan->omtu = L2CAP_DEFAULT_MTU;
331 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
332 /* Connectionless socket */
333 chan->scid = L2CAP_CID_CONN_LESS;
334 chan->dcid = L2CAP_CID_CONN_LESS;
335 chan->omtu = L2CAP_DEFAULT_MTU;
337 /* Raw socket can send/recv signalling messages only */
338 chan->scid = L2CAP_CID_SIGNALING;
339 chan->dcid = L2CAP_CID_SIGNALING;
340 chan->omtu = L2CAP_DEFAULT_MTU;
343 chan->local_id = L2CAP_BESTEFFORT_ID;
344 chan->local_stype = L2CAP_SERV_BESTEFFORT;
345 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
346 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
347 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
348 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
352 list_add(&chan->list, &conn->chan_l);
356 * Must be called on the locked socket. */
357 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
359 struct sock *sk = chan->sk;
360 struct l2cap_conn *conn = chan->conn;
361 struct sock *parent = bt_sk(sk)->parent;
363 __clear_chan_timer(chan);
365 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
368 /* Delete from channel list */
369 write_lock_bh(&conn->chan_lock);
370 list_del(&chan->list);
371 write_unlock_bh(&conn->chan_lock);
375 hci_conn_put(conn->hcon);
378 l2cap_state_change(chan, BT_CLOSED);
379 sock_set_flag(sk, SOCK_ZAPPED);
385 bt_accept_unlink(sk);
386 parent->sk_data_ready(parent, 0);
388 sk->sk_state_change(sk);
390 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
391 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
394 skb_queue_purge(&chan->tx_q);
396 if (chan->mode == L2CAP_MODE_ERTM) {
397 struct srej_list *l, *tmp;
399 __clear_retrans_timer(chan);
400 __clear_monitor_timer(chan);
401 __clear_ack_timer(chan);
403 skb_queue_purge(&chan->srej_q);
405 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
412 static void l2cap_chan_cleanup_listen(struct sock *parent)
416 BT_DBG("parent %p", parent);
418 /* Close not yet accepted channels */
419 while ((sk = bt_accept_dequeue(parent, NULL))) {
420 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
421 __clear_chan_timer(chan);
423 l2cap_chan_close(chan, ECONNRESET);
425 chan->ops->close(chan->data);
429 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
431 struct l2cap_conn *conn = chan->conn;
432 struct sock *sk = chan->sk;
434 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
436 switch (chan->state) {
438 l2cap_chan_cleanup_listen(sk);
440 l2cap_state_change(chan, BT_CLOSED);
441 sock_set_flag(sk, SOCK_ZAPPED);
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 __clear_chan_timer(chan);
449 __set_chan_timer(chan, sk->sk_sndtimeo);
450 l2cap_send_disconn_req(conn, chan, reason);
452 l2cap_chan_del(chan, reason);
456 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
457 conn->hcon->type == ACL_LINK) {
458 struct l2cap_conn_rsp rsp;
461 if (bt_sk(sk)->defer_setup)
462 result = L2CAP_CR_SEC_BLOCK;
464 result = L2CAP_CR_BAD_PSM;
465 l2cap_state_change(chan, BT_DISCONN);
467 rsp.scid = cpu_to_le16(chan->dcid);
468 rsp.dcid = cpu_to_le16(chan->scid);
469 rsp.result = cpu_to_le16(result);
470 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
471 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
475 l2cap_chan_del(chan, reason);
480 l2cap_chan_del(chan, reason);
484 sock_set_flag(sk, SOCK_ZAPPED);
489 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
491 if (chan->chan_type == L2CAP_CHAN_RAW) {
492 switch (chan->sec_level) {
493 case BT_SECURITY_HIGH:
494 return HCI_AT_DEDICATED_BONDING_MITM;
495 case BT_SECURITY_MEDIUM:
496 return HCI_AT_DEDICATED_BONDING;
498 return HCI_AT_NO_BONDING;
500 } else if (chan->psm == cpu_to_le16(0x0001)) {
501 if (chan->sec_level == BT_SECURITY_LOW)
502 chan->sec_level = BT_SECURITY_SDP;
504 if (chan->sec_level == BT_SECURITY_HIGH)
505 return HCI_AT_NO_BONDING_MITM;
507 return HCI_AT_NO_BONDING;
509 switch (chan->sec_level) {
510 case BT_SECURITY_HIGH:
511 return HCI_AT_GENERAL_BONDING_MITM;
512 case BT_SECURITY_MEDIUM:
513 return HCI_AT_GENERAL_BONDING;
515 return HCI_AT_NO_BONDING;
520 /* Service level security */
521 int l2cap_chan_check_security(struct l2cap_chan *chan)
523 struct l2cap_conn *conn = chan->conn;
526 auth_type = l2cap_get_auth_type(chan);
528 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
531 static u8 l2cap_get_ident(struct l2cap_conn *conn)
535 /* Get next available identificator.
536 * 1 - 128 are used by kernel.
537 * 129 - 199 are reserved.
538 * 200 - 254 are used by utilities like l2ping, etc.
541 spin_lock_bh(&conn->lock);
543 if (++conn->tx_ident > 128)
548 spin_unlock_bh(&conn->lock);
553 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
555 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
558 BT_DBG("code 0x%2.2x", code);
563 if (lmp_no_flush_capable(conn->hcon->hdev))
564 flags = ACL_START_NO_FLUSH;
568 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
569 skb->priority = HCI_PRIO_MAX;
571 hci_send_acl(conn->hchan, skb, flags);
574 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
576 struct hci_conn *hcon = chan->conn->hcon;
579 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
582 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
583 lmp_no_flush_capable(hcon->hdev))
584 flags = ACL_START_NO_FLUSH;
588 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
589 hci_send_acl(chan->conn->hchan, skb, flags);
592 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
595 struct l2cap_hdr *lh;
596 struct l2cap_conn *conn = chan->conn;
599 if (chan->state != BT_CONNECTED)
602 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
603 hlen = L2CAP_EXT_HDR_SIZE;
605 hlen = L2CAP_ENH_HDR_SIZE;
607 if (chan->fcs == L2CAP_FCS_CRC16)
608 hlen += L2CAP_FCS_SIZE;
610 BT_DBG("chan %p, control 0x%8.8x", chan, control);
612 count = min_t(unsigned int, conn->mtu, hlen);
614 control |= __set_sframe(chan);
616 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
617 control |= __set_ctrl_final(chan);
619 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
620 control |= __set_ctrl_poll(chan);
622 skb = bt_skb_alloc(count, GFP_ATOMIC);
626 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
627 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
628 lh->cid = cpu_to_le16(chan->dcid);
630 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
632 if (chan->fcs == L2CAP_FCS_CRC16) {
633 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
634 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
637 skb->priority = HCI_PRIO_MAX;
638 l2cap_do_send(chan, skb);
641 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
643 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
644 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
645 set_bit(CONN_RNR_SENT, &chan->conn_state);
647 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
649 control |= __set_reqseq(chan, chan->buffer_seq);
651 l2cap_send_sframe(chan, control);
654 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
656 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
659 static void l2cap_do_start(struct l2cap_chan *chan)
661 struct l2cap_conn *conn = chan->conn;
663 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
664 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
667 if (l2cap_chan_check_security(chan) &&
668 __l2cap_no_conn_pending(chan)) {
669 struct l2cap_conn_req req;
670 req.scid = cpu_to_le16(chan->scid);
673 chan->ident = l2cap_get_ident(conn);
674 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
676 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
680 struct l2cap_info_req req;
681 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
683 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
684 conn->info_ident = l2cap_get_ident(conn);
686 mod_timer(&conn->info_timer, jiffies +
687 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
689 l2cap_send_cmd(conn, conn->info_ident,
690 L2CAP_INFO_REQ, sizeof(req), &req);
694 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
696 u32 local_feat_mask = l2cap_feat_mask;
698 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
701 case L2CAP_MODE_ERTM:
702 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
703 case L2CAP_MODE_STREAMING:
704 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
710 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
713 struct l2cap_disconn_req req;
720 if (chan->mode == L2CAP_MODE_ERTM) {
721 __clear_retrans_timer(chan);
722 __clear_monitor_timer(chan);
723 __clear_ack_timer(chan);
726 req.dcid = cpu_to_le16(chan->dcid);
727 req.scid = cpu_to_le16(chan->scid);
728 l2cap_send_cmd(conn, l2cap_get_ident(conn),
729 L2CAP_DISCONN_REQ, sizeof(req), &req);
731 l2cap_state_change(chan, BT_DISCONN);
735 /* ---- L2CAP connections ---- */
736 static void l2cap_conn_start(struct l2cap_conn *conn)
738 struct l2cap_chan *chan, *tmp;
740 BT_DBG("conn %p", conn);
742 read_lock(&conn->chan_lock);
744 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
745 struct sock *sk = chan->sk;
749 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
754 if (chan->state == BT_CONNECT) {
755 struct l2cap_conn_req req;
757 if (!l2cap_chan_check_security(chan) ||
758 !__l2cap_no_conn_pending(chan)) {
763 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
764 && test_bit(CONF_STATE2_DEVICE,
765 &chan->conf_state)) {
766 /* l2cap_chan_close() calls list_del(chan)
767 * so release the lock */
768 read_unlock(&conn->chan_lock);
769 l2cap_chan_close(chan, ECONNRESET);
770 read_lock(&conn->chan_lock);
775 req.scid = cpu_to_le16(chan->scid);
778 chan->ident = l2cap_get_ident(conn);
779 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
781 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
784 } else if (chan->state == BT_CONNECT2) {
785 struct l2cap_conn_rsp rsp;
787 rsp.scid = cpu_to_le16(chan->dcid);
788 rsp.dcid = cpu_to_le16(chan->scid);
790 if (l2cap_chan_check_security(chan)) {
791 if (bt_sk(sk)->defer_setup) {
792 struct sock *parent = bt_sk(sk)->parent;
793 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
794 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
796 parent->sk_data_ready(parent, 0);
799 l2cap_state_change(chan, BT_CONFIG);
800 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
801 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
804 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
805 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
808 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
811 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
812 rsp.result != L2CAP_CR_SUCCESS) {
817 set_bit(CONF_REQ_SENT, &chan->conf_state);
818 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
819 l2cap_build_conf_req(chan, buf), buf);
820 chan->num_conf_req++;
826 read_unlock(&conn->chan_lock);
829 /* Find socket with cid and source bdaddr.
830 * Returns closest match, locked.
832 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
834 struct l2cap_chan *c, *c1 = NULL;
836 read_lock(&chan_list_lock);
838 list_for_each_entry(c, &chan_list, global_l) {
839 struct sock *sk = c->sk;
841 if (state && c->state != state)
844 if (c->scid == cid) {
846 if (!bacmp(&bt_sk(sk)->src, src)) {
847 read_unlock(&chan_list_lock);
852 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
857 read_unlock(&chan_list_lock);
862 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
864 struct sock *parent, *sk;
865 struct l2cap_chan *chan, *pchan;
869 /* Check if we have socket listening on cid */
870 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
877 bh_lock_sock(parent);
879 /* Check for backlog size */
880 if (sk_acceptq_is_full(parent)) {
881 BT_DBG("backlog full %d", parent->sk_ack_backlog);
885 chan = pchan->ops->new_connection(pchan->data);
891 write_lock_bh(&conn->chan_lock);
893 hci_conn_hold(conn->hcon);
895 bacpy(&bt_sk(sk)->src, conn->src);
896 bacpy(&bt_sk(sk)->dst, conn->dst);
898 bt_accept_enqueue(parent, sk);
900 __l2cap_chan_add(conn, chan);
902 __set_chan_timer(chan, sk->sk_sndtimeo);
904 l2cap_state_change(chan, BT_CONNECTED);
905 parent->sk_data_ready(parent, 0);
907 write_unlock_bh(&conn->chan_lock);
910 bh_unlock_sock(parent);
913 static void l2cap_chan_ready(struct sock *sk)
915 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
916 struct sock *parent = bt_sk(sk)->parent;
918 BT_DBG("sk %p, parent %p", sk, parent);
920 chan->conf_state = 0;
921 __clear_chan_timer(chan);
923 l2cap_state_change(chan, BT_CONNECTED);
924 sk->sk_state_change(sk);
927 parent->sk_data_ready(parent, 0);
930 static void l2cap_conn_ready(struct l2cap_conn *conn)
932 struct l2cap_chan *chan;
934 BT_DBG("conn %p", conn);
936 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
937 l2cap_le_conn_ready(conn);
939 if (conn->hcon->out && conn->hcon->type == LE_LINK)
940 smp_conn_security(conn, conn->hcon->pending_sec_level);
942 read_lock(&conn->chan_lock);
944 list_for_each_entry(chan, &conn->chan_l, list) {
945 struct sock *sk = chan->sk;
949 if (conn->hcon->type == LE_LINK) {
950 if (smp_conn_security(conn, chan->sec_level))
951 l2cap_chan_ready(sk);
953 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
954 __clear_chan_timer(chan);
955 l2cap_state_change(chan, BT_CONNECTED);
956 sk->sk_state_change(sk);
958 } else if (chan->state == BT_CONNECT)
959 l2cap_do_start(chan);
964 read_unlock(&conn->chan_lock);
967 /* Notify sockets that we cannot guaranty reliability anymore */
968 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
970 struct l2cap_chan *chan;
972 BT_DBG("conn %p", conn);
974 read_lock(&conn->chan_lock);
976 list_for_each_entry(chan, &conn->chan_l, list) {
977 struct sock *sk = chan->sk;
979 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
983 read_unlock(&conn->chan_lock);
986 static void l2cap_info_timeout(unsigned long arg)
988 struct l2cap_conn *conn = (void *) arg;
990 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
991 conn->info_ident = 0;
993 l2cap_conn_start(conn);
996 static void l2cap_conn_del(struct hci_conn *hcon, int err)
998 struct l2cap_conn *conn = hcon->l2cap_data;
999 struct l2cap_chan *chan, *l;
1005 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1007 kfree_skb(conn->rx_skb);
1010 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1013 l2cap_chan_del(chan, err);
1015 chan->ops->close(chan->data);
1018 hci_chan_del(conn->hchan);
1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1021 del_timer_sync(&conn->info_timer);
1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1024 del_timer(&conn->security_timer);
1025 smp_chan_destroy(conn);
1028 hcon->l2cap_data = NULL;
1032 static void security_timeout(unsigned long arg)
1034 struct l2cap_conn *conn = (void *) arg;
1036 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1039 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1041 struct l2cap_conn *conn = hcon->l2cap_data;
1042 struct hci_chan *hchan;
1047 hchan = hci_chan_create(hcon);
1051 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1053 hci_chan_del(hchan);
1057 hcon->l2cap_data = conn;
1059 conn->hchan = hchan;
1061 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1063 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1064 conn->mtu = hcon->hdev->le_mtu;
1066 conn->mtu = hcon->hdev->acl_mtu;
1068 conn->src = &hcon->hdev->bdaddr;
1069 conn->dst = &hcon->dst;
1071 conn->feat_mask = 0;
1073 spin_lock_init(&conn->lock);
1074 rwlock_init(&conn->chan_lock);
1076 INIT_LIST_HEAD(&conn->chan_l);
1078 if (hcon->type == LE_LINK)
1079 setup_timer(&conn->security_timer, security_timeout,
1080 (unsigned long) conn);
1082 setup_timer(&conn->info_timer, l2cap_info_timeout,
1083 (unsigned long) conn);
1085 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1090 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1092 write_lock_bh(&conn->chan_lock);
1093 __l2cap_chan_add(conn, chan);
1094 write_unlock_bh(&conn->chan_lock);
1097 /* ---- Socket interface ---- */
1099 /* Find socket with psm and source bdaddr.
1100 * Returns closest match.
1102 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1104 struct l2cap_chan *c, *c1 = NULL;
1106 read_lock(&chan_list_lock);
1108 list_for_each_entry(c, &chan_list, global_l) {
1109 struct sock *sk = c->sk;
1111 if (state && c->state != state)
1114 if (c->psm == psm) {
1116 if (!bacmp(&bt_sk(sk)->src, src)) {
1117 read_unlock(&chan_list_lock);
1122 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1127 read_unlock(&chan_list_lock);
1132 int l2cap_chan_connect(struct l2cap_chan *chan)
1134 struct sock *sk = chan->sk;
1135 bdaddr_t *src = &bt_sk(sk)->src;
1136 bdaddr_t *dst = &bt_sk(sk)->dst;
1137 struct l2cap_conn *conn;
1138 struct hci_conn *hcon;
1139 struct hci_dev *hdev;
1143 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1146 hdev = hci_get_route(dst, src);
1148 return -EHOSTUNREACH;
1150 hci_dev_lock_bh(hdev);
1152 auth_type = l2cap_get_auth_type(chan);
1154 if (chan->dcid == L2CAP_CID_LE_DATA)
1155 hcon = hci_connect(hdev, LE_LINK, dst,
1156 chan->sec_level, auth_type);
1158 hcon = hci_connect(hdev, ACL_LINK, dst,
1159 chan->sec_level, auth_type);
1162 err = PTR_ERR(hcon);
1166 conn = l2cap_conn_add(hcon, 0);
1173 /* Update source addr of the socket */
1174 bacpy(src, conn->src);
1176 l2cap_chan_add(conn, chan);
1178 l2cap_state_change(chan, BT_CONNECT);
1179 __set_chan_timer(chan, sk->sk_sndtimeo);
1181 if (hcon->state == BT_CONNECTED) {
1182 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1183 __clear_chan_timer(chan);
1184 if (l2cap_chan_check_security(chan))
1185 l2cap_state_change(chan, BT_CONNECTED);
1187 l2cap_do_start(chan);
1193 hci_dev_unlock_bh(hdev);
1198 int __l2cap_wait_ack(struct sock *sk)
1200 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1201 DECLARE_WAITQUEUE(wait, current);
1205 add_wait_queue(sk_sleep(sk), &wait);
1206 set_current_state(TASK_INTERRUPTIBLE);
1207 while (chan->unacked_frames > 0 && chan->conn) {
1211 if (signal_pending(current)) {
1212 err = sock_intr_errno(timeo);
1217 timeo = schedule_timeout(timeo);
1219 set_current_state(TASK_INTERRUPTIBLE);
1221 err = sock_error(sk);
1225 set_current_state(TASK_RUNNING);
1226 remove_wait_queue(sk_sleep(sk), &wait);
1230 static void l2cap_monitor_timeout(unsigned long arg)
1232 struct l2cap_chan *chan = (void *) arg;
1233 struct sock *sk = chan->sk;
1235 BT_DBG("chan %p", chan);
1238 if (chan->retry_count >= chan->remote_max_tx) {
1239 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1244 chan->retry_count++;
1245 __set_monitor_timer(chan);
1247 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1251 static void l2cap_retrans_timeout(unsigned long arg)
1253 struct l2cap_chan *chan = (void *) arg;
1254 struct sock *sk = chan->sk;
1256 BT_DBG("chan %p", chan);
1259 chan->retry_count = 1;
1260 __set_monitor_timer(chan);
1262 set_bit(CONN_WAIT_F, &chan->conn_state);
1264 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1268 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1270 struct sk_buff *skb;
1272 while ((skb = skb_peek(&chan->tx_q)) &&
1273 chan->unacked_frames) {
1274 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1277 skb = skb_dequeue(&chan->tx_q);
1280 chan->unacked_frames--;
1283 if (!chan->unacked_frames)
1284 __clear_retrans_timer(chan);
1287 static void l2cap_streaming_send(struct l2cap_chan *chan)
1289 struct sk_buff *skb;
1293 while ((skb = skb_dequeue(&chan->tx_q))) {
1294 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1295 control |= __set_txseq(chan, chan->next_tx_seq);
1296 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1298 if (chan->fcs == L2CAP_FCS_CRC16) {
1299 fcs = crc16(0, (u8 *)skb->data,
1300 skb->len - L2CAP_FCS_SIZE);
1301 put_unaligned_le16(fcs,
1302 skb->data + skb->len - L2CAP_FCS_SIZE);
1305 l2cap_do_send(chan, skb);
1307 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1311 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1313 struct sk_buff *skb, *tx_skb;
1317 skb = skb_peek(&chan->tx_q);
1321 while (bt_cb(skb)->tx_seq != tx_seq) {
1322 if (skb_queue_is_last(&chan->tx_q, skb))
1325 skb = skb_queue_next(&chan->tx_q, skb);
1328 if (chan->remote_max_tx &&
1329 bt_cb(skb)->retries == chan->remote_max_tx) {
1330 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1334 tx_skb = skb_clone(skb, GFP_ATOMIC);
1335 bt_cb(skb)->retries++;
1337 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1338 control &= __get_sar_mask(chan);
1340 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1341 control |= __set_ctrl_final(chan);
1343 control |= __set_reqseq(chan, chan->buffer_seq);
1344 control |= __set_txseq(chan, tx_seq);
1346 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1348 if (chan->fcs == L2CAP_FCS_CRC16) {
1349 fcs = crc16(0, (u8 *)tx_skb->data,
1350 tx_skb->len - L2CAP_FCS_SIZE);
1351 put_unaligned_le16(fcs,
1352 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1355 l2cap_do_send(chan, tx_skb);
1358 static int l2cap_ertm_send(struct l2cap_chan *chan)
1360 struct sk_buff *skb, *tx_skb;
1365 if (chan->state != BT_CONNECTED)
1368 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1370 if (chan->remote_max_tx &&
1371 bt_cb(skb)->retries == chan->remote_max_tx) {
1372 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1376 tx_skb = skb_clone(skb, GFP_ATOMIC);
1378 bt_cb(skb)->retries++;
1380 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1381 control &= __get_sar_mask(chan);
1383 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1384 control |= __set_ctrl_final(chan);
1386 control |= __set_reqseq(chan, chan->buffer_seq);
1387 control |= __set_txseq(chan, chan->next_tx_seq);
1389 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1391 if (chan->fcs == L2CAP_FCS_CRC16) {
1392 fcs = crc16(0, (u8 *)skb->data,
1393 tx_skb->len - L2CAP_FCS_SIZE);
1394 put_unaligned_le16(fcs, skb->data +
1395 tx_skb->len - L2CAP_FCS_SIZE);
1398 l2cap_do_send(chan, tx_skb);
1400 __set_retrans_timer(chan);
1402 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1404 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1406 if (bt_cb(skb)->retries == 1)
1407 chan->unacked_frames++;
1409 chan->frames_sent++;
1411 if (skb_queue_is_last(&chan->tx_q, skb))
1412 chan->tx_send_head = NULL;
1414 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1422 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1426 if (!skb_queue_empty(&chan->tx_q))
1427 chan->tx_send_head = chan->tx_q.next;
1429 chan->next_tx_seq = chan->expected_ack_seq;
1430 ret = l2cap_ertm_send(chan);
1434 static void l2cap_send_ack(struct l2cap_chan *chan)
1438 control |= __set_reqseq(chan, chan->buffer_seq);
1440 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1441 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1442 set_bit(CONN_RNR_SENT, &chan->conn_state);
1443 l2cap_send_sframe(chan, control);
1447 if (l2cap_ertm_send(chan) > 0)
1450 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1451 l2cap_send_sframe(chan, control);
1454 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1456 struct srej_list *tail;
1459 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1460 control |= __set_ctrl_final(chan);
1462 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1463 control |= __set_reqseq(chan, tail->tx_seq);
1465 l2cap_send_sframe(chan, control);
1468 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1470 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1471 struct sk_buff **frag;
1474 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1480 /* Continuation fragments (no L2CAP header) */
1481 frag = &skb_shinfo(skb)->frag_list;
1483 count = min_t(unsigned int, conn->mtu, len);
1485 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1488 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1491 (*frag)->priority = skb->priority;
1496 frag = &(*frag)->next;
1502 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1503 struct msghdr *msg, size_t len,
1506 struct sock *sk = chan->sk;
1507 struct l2cap_conn *conn = chan->conn;
1508 struct sk_buff *skb;
1509 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1510 struct l2cap_hdr *lh;
1512 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1514 count = min_t(unsigned int, (conn->mtu - hlen), len);
1515 skb = bt_skb_send_alloc(sk, count + hlen,
1516 msg->msg_flags & MSG_DONTWAIT, &err);
1518 return ERR_PTR(err);
1520 skb->priority = priority;
1522 /* Create L2CAP header */
1523 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1524 lh->cid = cpu_to_le16(chan->dcid);
1525 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1526 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1528 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1529 if (unlikely(err < 0)) {
1531 return ERR_PTR(err);
1536 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1537 struct msghdr *msg, size_t len,
1540 struct sock *sk = chan->sk;
1541 struct l2cap_conn *conn = chan->conn;
1542 struct sk_buff *skb;
1543 int err, count, hlen = L2CAP_HDR_SIZE;
1544 struct l2cap_hdr *lh;
1546 BT_DBG("sk %p len %d", sk, (int)len);
1548 count = min_t(unsigned int, (conn->mtu - hlen), len);
1549 skb = bt_skb_send_alloc(sk, count + hlen,
1550 msg->msg_flags & MSG_DONTWAIT, &err);
1552 return ERR_PTR(err);
1554 skb->priority = priority;
1556 /* Create L2CAP header */
1557 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1558 lh->cid = cpu_to_le16(chan->dcid);
1559 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1561 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1562 if (unlikely(err < 0)) {
1564 return ERR_PTR(err);
1569 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1570 struct msghdr *msg, size_t len,
1571 u32 control, u16 sdulen)
1573 struct sock *sk = chan->sk;
1574 struct l2cap_conn *conn = chan->conn;
1575 struct sk_buff *skb;
1576 int err, count, hlen;
1577 struct l2cap_hdr *lh;
1579 BT_DBG("sk %p len %d", sk, (int)len);
1582 return ERR_PTR(-ENOTCONN);
1584 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1585 hlen = L2CAP_EXT_HDR_SIZE;
1587 hlen = L2CAP_ENH_HDR_SIZE;
1590 hlen += L2CAP_SDULEN_SIZE;
1592 if (chan->fcs == L2CAP_FCS_CRC16)
1593 hlen += L2CAP_FCS_SIZE;
1595 count = min_t(unsigned int, (conn->mtu - hlen), len);
1596 skb = bt_skb_send_alloc(sk, count + hlen,
1597 msg->msg_flags & MSG_DONTWAIT, &err);
1599 return ERR_PTR(err);
1601 /* Create L2CAP header */
1602 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1603 lh->cid = cpu_to_le16(chan->dcid);
1604 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1606 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1609 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1611 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1612 if (unlikely(err < 0)) {
1614 return ERR_PTR(err);
1617 if (chan->fcs == L2CAP_FCS_CRC16)
1618 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1620 bt_cb(skb)->retries = 0;
1624 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1626 struct sk_buff *skb;
1627 struct sk_buff_head sar_queue;
1631 skb_queue_head_init(&sar_queue);
1632 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1633 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1635 return PTR_ERR(skb);
1637 __skb_queue_tail(&sar_queue, skb);
1638 len -= chan->remote_mps;
1639 size += chan->remote_mps;
1644 if (len > chan->remote_mps) {
1645 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1646 buflen = chan->remote_mps;
1648 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1652 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1654 skb_queue_purge(&sar_queue);
1655 return PTR_ERR(skb);
1658 __skb_queue_tail(&sar_queue, skb);
1662 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1663 if (chan->tx_send_head == NULL)
1664 chan->tx_send_head = sar_queue.next;
1669 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1672 struct sk_buff *skb;
1676 /* Connectionless channel */
1677 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1678 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1680 return PTR_ERR(skb);
1682 l2cap_do_send(chan, skb);
1686 switch (chan->mode) {
1687 case L2CAP_MODE_BASIC:
1688 /* Check outgoing MTU */
1689 if (len > chan->omtu)
1692 /* Create a basic PDU */
1693 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1695 return PTR_ERR(skb);
1697 l2cap_do_send(chan, skb);
1701 case L2CAP_MODE_ERTM:
1702 case L2CAP_MODE_STREAMING:
1703 /* Entire SDU fits into one PDU */
1704 if (len <= chan->remote_mps) {
1705 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1706 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1709 return PTR_ERR(skb);
1711 __skb_queue_tail(&chan->tx_q, skb);
1713 if (chan->tx_send_head == NULL)
1714 chan->tx_send_head = skb;
1717 /* Segment SDU into multiples PDUs */
1718 err = l2cap_sar_segment_sdu(chan, msg, len);
1723 if (chan->mode == L2CAP_MODE_STREAMING) {
1724 l2cap_streaming_send(chan);
1729 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1730 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1735 err = l2cap_ertm_send(chan);
1742 BT_DBG("bad state %1.1x", chan->mode);
1749 /* Copy frame to all raw sockets on that connection */
1750 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1752 struct sk_buff *nskb;
1753 struct l2cap_chan *chan;
1755 BT_DBG("conn %p", conn);
1757 read_lock(&conn->chan_lock);
1758 list_for_each_entry(chan, &conn->chan_l, list) {
1759 struct sock *sk = chan->sk;
1760 if (chan->chan_type != L2CAP_CHAN_RAW)
1763 /* Don't send frame to the socket it came from */
1766 nskb = skb_clone(skb, GFP_ATOMIC);
1770 if (chan->ops->recv(chan->data, nskb))
1773 read_unlock(&conn->chan_lock);
1776 /* ---- L2CAP signalling commands ---- */
1777 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1778 u8 code, u8 ident, u16 dlen, void *data)
1780 struct sk_buff *skb, **frag;
1781 struct l2cap_cmd_hdr *cmd;
1782 struct l2cap_hdr *lh;
1785 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1786 conn, code, ident, dlen);
1788 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1789 count = min_t(unsigned int, conn->mtu, len);
1791 skb = bt_skb_alloc(count, GFP_ATOMIC);
1795 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1796 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1798 if (conn->hcon->type == LE_LINK)
1799 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1801 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1803 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1806 cmd->len = cpu_to_le16(dlen);
1809 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1810 memcpy(skb_put(skb, count), data, count);
1816 /* Continuation fragments (no L2CAP header) */
1817 frag = &skb_shinfo(skb)->frag_list;
1819 count = min_t(unsigned int, conn->mtu, len);
1821 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1825 memcpy(skb_put(*frag, count), data, count);
1830 frag = &(*frag)->next;
1840 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1842 struct l2cap_conf_opt *opt = *ptr;
1845 len = L2CAP_CONF_OPT_SIZE + opt->len;
1853 *val = *((u8 *) opt->val);
1857 *val = get_unaligned_le16(opt->val);
1861 *val = get_unaligned_le32(opt->val);
1865 *val = (unsigned long) opt->val;
1869 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1873 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1875 struct l2cap_conf_opt *opt = *ptr;
1877 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1884 *((u8 *) opt->val) = val;
1888 put_unaligned_le16(val, opt->val);
1892 put_unaligned_le32(val, opt->val);
1896 memcpy(opt->val, (void *) val, len);
1900 *ptr += L2CAP_CONF_OPT_SIZE + len;
1903 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1905 struct l2cap_conf_efs efs;
1907 switch(chan->mode) {
1908 case L2CAP_MODE_ERTM:
1909 efs.id = chan->local_id;
1910 efs.stype = chan->local_stype;
1911 efs.msdu = cpu_to_le16(chan->local_msdu);
1912 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1913 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1914 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1917 case L2CAP_MODE_STREAMING:
1919 efs.stype = L2CAP_SERV_BESTEFFORT;
1920 efs.msdu = cpu_to_le16(chan->local_msdu);
1921 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1930 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1931 (unsigned long) &efs);
1934 static void l2cap_ack_timeout(unsigned long arg)
1936 struct l2cap_chan *chan = (void *) arg;
1938 bh_lock_sock(chan->sk);
1939 l2cap_send_ack(chan);
1940 bh_unlock_sock(chan->sk);
1943 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1945 struct sock *sk = chan->sk;
1947 chan->expected_ack_seq = 0;
1948 chan->unacked_frames = 0;
1949 chan->buffer_seq = 0;
1950 chan->num_acked = 0;
1951 chan->frames_sent = 0;
1953 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1954 (unsigned long) chan);
1955 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1956 (unsigned long) chan);
1957 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1959 skb_queue_head_init(&chan->srej_q);
1961 INIT_LIST_HEAD(&chan->srej_l);
1964 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1967 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1970 case L2CAP_MODE_STREAMING:
1971 case L2CAP_MODE_ERTM:
1972 if (l2cap_mode_supported(mode, remote_feat_mask))
1976 return L2CAP_MODE_BASIC;
1980 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1982 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1985 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
1987 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
1990 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
1992 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
1993 __l2cap_ews_supported(chan)) {
1994 /* use extended control field */
1995 set_bit(FLAG_EXT_CTRL, &chan->flags);
1996 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
1998 chan->tx_win = min_t(u16, chan->tx_win,
1999 L2CAP_DEFAULT_TX_WINDOW);
2000 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2004 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2006 struct l2cap_conf_req *req = data;
2007 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2008 void *ptr = req->data;
2011 BT_DBG("chan %p", chan);
2013 if (chan->num_conf_req || chan->num_conf_rsp)
2016 switch (chan->mode) {
2017 case L2CAP_MODE_STREAMING:
2018 case L2CAP_MODE_ERTM:
2019 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2022 if (__l2cap_efs_supported(chan))
2023 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2027 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2032 if (chan->imtu != L2CAP_DEFAULT_MTU)
2033 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2035 switch (chan->mode) {
2036 case L2CAP_MODE_BASIC:
2037 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2038 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2041 rfc.mode = L2CAP_MODE_BASIC;
2043 rfc.max_transmit = 0;
2044 rfc.retrans_timeout = 0;
2045 rfc.monitor_timeout = 0;
2046 rfc.max_pdu_size = 0;
2048 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2049 (unsigned long) &rfc);
2052 case L2CAP_MODE_ERTM:
2053 rfc.mode = L2CAP_MODE_ERTM;
2054 rfc.max_transmit = chan->max_tx;
2055 rfc.retrans_timeout = 0;
2056 rfc.monitor_timeout = 0;
2058 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2059 L2CAP_EXT_HDR_SIZE -
2062 rfc.max_pdu_size = cpu_to_le16(size);
2064 l2cap_txwin_setup(chan);
2066 rfc.txwin_size = min_t(u16, chan->tx_win,
2067 L2CAP_DEFAULT_TX_WINDOW);
2069 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2070 (unsigned long) &rfc);
2072 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2073 l2cap_add_opt_efs(&ptr, chan);
2075 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2078 if (chan->fcs == L2CAP_FCS_NONE ||
2079 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2080 chan->fcs = L2CAP_FCS_NONE;
2081 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2084 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2085 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2089 case L2CAP_MODE_STREAMING:
2090 rfc.mode = L2CAP_MODE_STREAMING;
2092 rfc.max_transmit = 0;
2093 rfc.retrans_timeout = 0;
2094 rfc.monitor_timeout = 0;
2096 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2097 L2CAP_EXT_HDR_SIZE -
2100 rfc.max_pdu_size = cpu_to_le16(size);
2102 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2103 (unsigned long) &rfc);
2105 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2106 l2cap_add_opt_efs(&ptr, chan);
2108 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2111 if (chan->fcs == L2CAP_FCS_NONE ||
2112 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2113 chan->fcs = L2CAP_FCS_NONE;
2114 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2119 req->dcid = cpu_to_le16(chan->dcid);
2120 req->flags = cpu_to_le16(0);
2125 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2127 struct l2cap_conf_rsp *rsp = data;
2128 void *ptr = rsp->data;
2129 void *req = chan->conf_req;
2130 int len = chan->conf_len;
2131 int type, hint, olen;
2133 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2134 struct l2cap_conf_efs efs;
2136 u16 mtu = L2CAP_DEFAULT_MTU;
2137 u16 result = L2CAP_CONF_SUCCESS;
2140 BT_DBG("chan %p", chan);
2142 while (len >= L2CAP_CONF_OPT_SIZE) {
2143 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2145 hint = type & L2CAP_CONF_HINT;
2146 type &= L2CAP_CONF_MASK;
2149 case L2CAP_CONF_MTU:
2153 case L2CAP_CONF_FLUSH_TO:
2154 chan->flush_to = val;
2157 case L2CAP_CONF_QOS:
2160 case L2CAP_CONF_RFC:
2161 if (olen == sizeof(rfc))
2162 memcpy(&rfc, (void *) val, olen);
2165 case L2CAP_CONF_FCS:
2166 if (val == L2CAP_FCS_NONE)
2167 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2170 case L2CAP_CONF_EFS:
2172 if (olen == sizeof(efs))
2173 memcpy(&efs, (void *) val, olen);
2176 case L2CAP_CONF_EWS:
2178 return -ECONNREFUSED;
2180 set_bit(FLAG_EXT_CTRL, &chan->flags);
2181 set_bit(CONF_EWS_RECV, &chan->conf_state);
2182 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2183 chan->remote_tx_win = val;
2190 result = L2CAP_CONF_UNKNOWN;
2191 *((u8 *) ptr++) = type;
2196 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2199 switch (chan->mode) {
2200 case L2CAP_MODE_STREAMING:
2201 case L2CAP_MODE_ERTM:
2202 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2203 chan->mode = l2cap_select_mode(rfc.mode,
2204 chan->conn->feat_mask);
2209 if (__l2cap_efs_supported(chan))
2210 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2212 return -ECONNREFUSED;
2215 if (chan->mode != rfc.mode)
2216 return -ECONNREFUSED;
2222 if (chan->mode != rfc.mode) {
2223 result = L2CAP_CONF_UNACCEPT;
2224 rfc.mode = chan->mode;
2226 if (chan->num_conf_rsp == 1)
2227 return -ECONNREFUSED;
2229 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2230 sizeof(rfc), (unsigned long) &rfc);
2233 if (result == L2CAP_CONF_SUCCESS) {
2234 /* Configure output options and let the other side know
2235 * which ones we don't like. */
2237 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2238 result = L2CAP_CONF_UNACCEPT;
2241 set_bit(CONF_MTU_DONE, &chan->conf_state);
2243 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2246 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2247 efs.stype != L2CAP_SERV_NOTRAFIC &&
2248 efs.stype != chan->local_stype) {
2250 result = L2CAP_CONF_UNACCEPT;
2252 if (chan->num_conf_req >= 1)
2253 return -ECONNREFUSED;
2255 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2257 (unsigned long) &efs);
2259 /* Send PENDING Conf Rsp */
2260 result = L2CAP_CONF_PENDING;
2261 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2266 case L2CAP_MODE_BASIC:
2267 chan->fcs = L2CAP_FCS_NONE;
2268 set_bit(CONF_MODE_DONE, &chan->conf_state);
2271 case L2CAP_MODE_ERTM:
2272 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2273 chan->remote_tx_win = rfc.txwin_size;
2275 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2277 chan->remote_max_tx = rfc.max_transmit;
2279 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2281 L2CAP_EXT_HDR_SIZE -
2284 rfc.max_pdu_size = cpu_to_le16(size);
2285 chan->remote_mps = size;
2287 rfc.retrans_timeout =
2288 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2289 rfc.monitor_timeout =
2290 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2292 set_bit(CONF_MODE_DONE, &chan->conf_state);
2294 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2295 sizeof(rfc), (unsigned long) &rfc);
2297 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2298 chan->remote_id = efs.id;
2299 chan->remote_stype = efs.stype;
2300 chan->remote_msdu = le16_to_cpu(efs.msdu);
2301 chan->remote_flush_to =
2302 le32_to_cpu(efs.flush_to);
2303 chan->remote_acc_lat =
2304 le32_to_cpu(efs.acc_lat);
2305 chan->remote_sdu_itime =
2306 le32_to_cpu(efs.sdu_itime);
2307 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2308 sizeof(efs), (unsigned long) &efs);
2312 case L2CAP_MODE_STREAMING:
2313 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2315 L2CAP_EXT_HDR_SIZE -
2318 rfc.max_pdu_size = cpu_to_le16(size);
2319 chan->remote_mps = size;
2321 set_bit(CONF_MODE_DONE, &chan->conf_state);
2323 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2324 sizeof(rfc), (unsigned long) &rfc);
2329 result = L2CAP_CONF_UNACCEPT;
2331 memset(&rfc, 0, sizeof(rfc));
2332 rfc.mode = chan->mode;
2335 if (result == L2CAP_CONF_SUCCESS)
2336 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2338 rsp->scid = cpu_to_le16(chan->dcid);
2339 rsp->result = cpu_to_le16(result);
2340 rsp->flags = cpu_to_le16(0x0000);
2345 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2347 struct l2cap_conf_req *req = data;
2348 void *ptr = req->data;
2351 struct l2cap_conf_rfc rfc;
2352 struct l2cap_conf_efs efs;
2354 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2356 while (len >= L2CAP_CONF_OPT_SIZE) {
2357 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2360 case L2CAP_CONF_MTU:
2361 if (val < L2CAP_DEFAULT_MIN_MTU) {
2362 *result = L2CAP_CONF_UNACCEPT;
2363 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2366 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2369 case L2CAP_CONF_FLUSH_TO:
2370 chan->flush_to = val;
2371 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2375 case L2CAP_CONF_RFC:
2376 if (olen == sizeof(rfc))
2377 memcpy(&rfc, (void *)val, olen);
2379 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2380 rfc.mode != chan->mode)
2381 return -ECONNREFUSED;
2385 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2386 sizeof(rfc), (unsigned long) &rfc);
2389 case L2CAP_CONF_EWS:
2390 chan->tx_win = min_t(u16, val,
2391 L2CAP_DEFAULT_EXT_WINDOW);
2392 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2396 case L2CAP_CONF_EFS:
2397 if (olen == sizeof(efs))
2398 memcpy(&efs, (void *)val, olen);
2400 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2401 efs.stype != L2CAP_SERV_NOTRAFIC &&
2402 efs.stype != chan->local_stype)
2403 return -ECONNREFUSED;
2405 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2406 sizeof(efs), (unsigned long) &efs);
2411 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2412 return -ECONNREFUSED;
2414 chan->mode = rfc.mode;
2416 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2418 case L2CAP_MODE_ERTM:
2419 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2420 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2421 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2423 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2424 chan->local_msdu = le16_to_cpu(efs.msdu);
2425 chan->local_sdu_itime =
2426 le32_to_cpu(efs.sdu_itime);
2427 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2428 chan->local_flush_to =
2429 le32_to_cpu(efs.flush_to);
2433 case L2CAP_MODE_STREAMING:
2434 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2438 req->dcid = cpu_to_le16(chan->dcid);
2439 req->flags = cpu_to_le16(0x0000);
2444 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2446 struct l2cap_conf_rsp *rsp = data;
2447 void *ptr = rsp->data;
2449 BT_DBG("chan %p", chan);
2451 rsp->scid = cpu_to_le16(chan->dcid);
2452 rsp->result = cpu_to_le16(result);
2453 rsp->flags = cpu_to_le16(flags);
2458 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2460 struct l2cap_conn_rsp rsp;
2461 struct l2cap_conn *conn = chan->conn;
2464 rsp.scid = cpu_to_le16(chan->dcid);
2465 rsp.dcid = cpu_to_le16(chan->scid);
2466 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2467 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2468 l2cap_send_cmd(conn, chan->ident,
2469 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2471 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2474 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2475 l2cap_build_conf_req(chan, buf), buf);
2476 chan->num_conf_req++;
2479 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2483 struct l2cap_conf_rfc rfc;
2485 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2487 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2490 while (len >= L2CAP_CONF_OPT_SIZE) {
2491 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2494 case L2CAP_CONF_RFC:
2495 if (olen == sizeof(rfc))
2496 memcpy(&rfc, (void *)val, olen);
2503 case L2CAP_MODE_ERTM:
2504 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2505 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2506 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2508 case L2CAP_MODE_STREAMING:
2509 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2513 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2515 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2517 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2520 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2521 cmd->ident == conn->info_ident) {
2522 del_timer(&conn->info_timer);
2524 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2525 conn->info_ident = 0;
2527 l2cap_conn_start(conn);
2533 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2535 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2536 struct l2cap_conn_rsp rsp;
2537 struct l2cap_chan *chan = NULL, *pchan;
2538 struct sock *parent, *sk = NULL;
2539 int result, status = L2CAP_CS_NO_INFO;
2541 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2542 __le16 psm = req->psm;
2544 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2546 /* Check if we have socket listening on psm */
2547 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2549 result = L2CAP_CR_BAD_PSM;
2555 bh_lock_sock(parent);
2557 /* Check if the ACL is secure enough (if not SDP) */
2558 if (psm != cpu_to_le16(0x0001) &&
2559 !hci_conn_check_link_mode(conn->hcon)) {
2560 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2561 result = L2CAP_CR_SEC_BLOCK;
2565 result = L2CAP_CR_NO_MEM;
2567 /* Check for backlog size */
2568 if (sk_acceptq_is_full(parent)) {
2569 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2573 chan = pchan->ops->new_connection(pchan->data);
2579 write_lock_bh(&conn->chan_lock);
2581 /* Check if we already have channel with that dcid */
2582 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2583 write_unlock_bh(&conn->chan_lock);
2584 sock_set_flag(sk, SOCK_ZAPPED);
2585 chan->ops->close(chan->data);
2589 hci_conn_hold(conn->hcon);
2591 bacpy(&bt_sk(sk)->src, conn->src);
2592 bacpy(&bt_sk(sk)->dst, conn->dst);
2596 bt_accept_enqueue(parent, sk);
2598 __l2cap_chan_add(conn, chan);
2602 __set_chan_timer(chan, sk->sk_sndtimeo);
2604 chan->ident = cmd->ident;
2606 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2607 if (l2cap_chan_check_security(chan)) {
2608 if (bt_sk(sk)->defer_setup) {
2609 l2cap_state_change(chan, BT_CONNECT2);
2610 result = L2CAP_CR_PEND;
2611 status = L2CAP_CS_AUTHOR_PEND;
2612 parent->sk_data_ready(parent, 0);
2614 l2cap_state_change(chan, BT_CONFIG);
2615 result = L2CAP_CR_SUCCESS;
2616 status = L2CAP_CS_NO_INFO;
2619 l2cap_state_change(chan, BT_CONNECT2);
2620 result = L2CAP_CR_PEND;
2621 status = L2CAP_CS_AUTHEN_PEND;
2624 l2cap_state_change(chan, BT_CONNECT2);
2625 result = L2CAP_CR_PEND;
2626 status = L2CAP_CS_NO_INFO;
2629 write_unlock_bh(&conn->chan_lock);
2632 bh_unlock_sock(parent);
2635 rsp.scid = cpu_to_le16(scid);
2636 rsp.dcid = cpu_to_le16(dcid);
2637 rsp.result = cpu_to_le16(result);
2638 rsp.status = cpu_to_le16(status);
2639 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2641 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2642 struct l2cap_info_req info;
2643 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2645 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2646 conn->info_ident = l2cap_get_ident(conn);
2648 mod_timer(&conn->info_timer, jiffies +
2649 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2651 l2cap_send_cmd(conn, conn->info_ident,
2652 L2CAP_INFO_REQ, sizeof(info), &info);
2655 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2656 result == L2CAP_CR_SUCCESS) {
2658 set_bit(CONF_REQ_SENT, &chan->conf_state);
2659 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2660 l2cap_build_conf_req(chan, buf), buf);
2661 chan->num_conf_req++;
2667 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2669 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2670 u16 scid, dcid, result, status;
2671 struct l2cap_chan *chan;
2675 scid = __le16_to_cpu(rsp->scid);
2676 dcid = __le16_to_cpu(rsp->dcid);
2677 result = __le16_to_cpu(rsp->result);
2678 status = __le16_to_cpu(rsp->status);
2680 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2683 chan = l2cap_get_chan_by_scid(conn, scid);
2687 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2695 case L2CAP_CR_SUCCESS:
2696 l2cap_state_change(chan, BT_CONFIG);
2699 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2701 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2704 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2705 l2cap_build_conf_req(chan, req), req);
2706 chan->num_conf_req++;
2710 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2714 /* don't delete l2cap channel if sk is owned by user */
2715 if (sock_owned_by_user(sk)) {
2716 l2cap_state_change(chan, BT_DISCONN);
2717 __clear_chan_timer(chan);
2718 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2722 l2cap_chan_del(chan, ECONNREFUSED);
2730 static inline void set_default_fcs(struct l2cap_chan *chan)
2732 /* FCS is enabled only in ERTM or streaming mode, if one or both
2735 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2736 chan->fcs = L2CAP_FCS_NONE;
2737 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2738 chan->fcs = L2CAP_FCS_CRC16;
2741 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2743 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2746 struct l2cap_chan *chan;
2750 dcid = __le16_to_cpu(req->dcid);
2751 flags = __le16_to_cpu(req->flags);
2753 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2755 chan = l2cap_get_chan_by_scid(conn, dcid);
2761 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2762 struct l2cap_cmd_rej_cid rej;
2764 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2765 rej.scid = cpu_to_le16(chan->scid);
2766 rej.dcid = cpu_to_le16(chan->dcid);
2768 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2773 /* Reject if config buffer is too small. */
2774 len = cmd_len - sizeof(*req);
2775 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2776 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2777 l2cap_build_conf_rsp(chan, rsp,
2778 L2CAP_CONF_REJECT, flags), rsp);
2783 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2784 chan->conf_len += len;
2786 if (flags & 0x0001) {
2787 /* Incomplete config. Send empty response. */
2788 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2789 l2cap_build_conf_rsp(chan, rsp,
2790 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2794 /* Complete config. */
2795 len = l2cap_parse_conf_req(chan, rsp);
2797 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2801 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2802 chan->num_conf_rsp++;
2804 /* Reset config buffer. */
2807 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2810 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2811 set_default_fcs(chan);
2813 l2cap_state_change(chan, BT_CONNECTED);
2815 chan->next_tx_seq = 0;
2816 chan->expected_tx_seq = 0;
2817 skb_queue_head_init(&chan->tx_q);
2818 if (chan->mode == L2CAP_MODE_ERTM)
2819 l2cap_ertm_init(chan);
2821 l2cap_chan_ready(sk);
2825 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2827 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2828 l2cap_build_conf_req(chan, buf), buf);
2829 chan->num_conf_req++;
2832 /* Got Conf Rsp PENDING from remote side and asume we sent
2833 Conf Rsp PENDING in the code above */
2834 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2835 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2837 /* check compatibility */
2839 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2840 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2842 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2843 l2cap_build_conf_rsp(chan, rsp,
2844 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2852 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2854 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2855 u16 scid, flags, result;
2856 struct l2cap_chan *chan;
2858 int len = cmd->len - sizeof(*rsp);
2860 scid = __le16_to_cpu(rsp->scid);
2861 flags = __le16_to_cpu(rsp->flags);
2862 result = __le16_to_cpu(rsp->result);
2864 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2865 scid, flags, result);
2867 chan = l2cap_get_chan_by_scid(conn, scid);
2874 case L2CAP_CONF_SUCCESS:
2875 l2cap_conf_rfc_get(chan, rsp->data, len);
2876 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2879 case L2CAP_CONF_PENDING:
2880 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2882 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2885 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2888 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2892 /* check compatibility */
2894 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2895 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2897 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2898 l2cap_build_conf_rsp(chan, buf,
2899 L2CAP_CONF_SUCCESS, 0x0000), buf);
2903 case L2CAP_CONF_UNACCEPT:
2904 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2907 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2908 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2912 /* throw out any old stored conf requests */
2913 result = L2CAP_CONF_SUCCESS;
2914 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2917 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2921 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2922 L2CAP_CONF_REQ, len, req);
2923 chan->num_conf_req++;
2924 if (result != L2CAP_CONF_SUCCESS)
2930 sk->sk_err = ECONNRESET;
2931 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2932 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2939 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2941 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2942 set_default_fcs(chan);
2944 l2cap_state_change(chan, BT_CONNECTED);
2945 chan->next_tx_seq = 0;
2946 chan->expected_tx_seq = 0;
2947 skb_queue_head_init(&chan->tx_q);
2948 if (chan->mode == L2CAP_MODE_ERTM)
2949 l2cap_ertm_init(chan);
2951 l2cap_chan_ready(sk);
2959 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2961 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2962 struct l2cap_disconn_rsp rsp;
2964 struct l2cap_chan *chan;
2967 scid = __le16_to_cpu(req->scid);
2968 dcid = __le16_to_cpu(req->dcid);
2970 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2972 chan = l2cap_get_chan_by_scid(conn, dcid);
2978 rsp.dcid = cpu_to_le16(chan->scid);
2979 rsp.scid = cpu_to_le16(chan->dcid);
2980 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2982 sk->sk_shutdown = SHUTDOWN_MASK;
2984 /* don't delete l2cap channel if sk is owned by user */
2985 if (sock_owned_by_user(sk)) {
2986 l2cap_state_change(chan, BT_DISCONN);
2987 __clear_chan_timer(chan);
2988 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2993 l2cap_chan_del(chan, ECONNRESET);
2996 chan->ops->close(chan->data);
3000 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3002 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3004 struct l2cap_chan *chan;
3007 scid = __le16_to_cpu(rsp->scid);
3008 dcid = __le16_to_cpu(rsp->dcid);
3010 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3012 chan = l2cap_get_chan_by_scid(conn, scid);
3018 /* don't delete l2cap channel if sk is owned by user */
3019 if (sock_owned_by_user(sk)) {
3020 l2cap_state_change(chan,BT_DISCONN);
3021 __clear_chan_timer(chan);
3022 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
3027 l2cap_chan_del(chan, 0);
3030 chan->ops->close(chan->data);
3034 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3036 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3039 type = __le16_to_cpu(req->type);
3041 BT_DBG("type 0x%4.4x", type);
3043 if (type == L2CAP_IT_FEAT_MASK) {
3045 u32 feat_mask = l2cap_feat_mask;
3046 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3047 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3048 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3050 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3053 feat_mask |= L2CAP_FEAT_EXT_FLOW
3054 | L2CAP_FEAT_EXT_WINDOW;
3056 put_unaligned_le32(feat_mask, rsp->data);
3057 l2cap_send_cmd(conn, cmd->ident,
3058 L2CAP_INFO_RSP, sizeof(buf), buf);
3059 } else if (type == L2CAP_IT_FIXED_CHAN) {
3061 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3064 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3066 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3068 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3069 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3070 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3071 l2cap_send_cmd(conn, cmd->ident,
3072 L2CAP_INFO_RSP, sizeof(buf), buf);
3074 struct l2cap_info_rsp rsp;
3075 rsp.type = cpu_to_le16(type);
3076 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3077 l2cap_send_cmd(conn, cmd->ident,
3078 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3084 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3086 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3089 type = __le16_to_cpu(rsp->type);
3090 result = __le16_to_cpu(rsp->result);
3092 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3094 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3095 if (cmd->ident != conn->info_ident ||
3096 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3099 del_timer(&conn->info_timer);
3101 if (result != L2CAP_IR_SUCCESS) {
3102 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3103 conn->info_ident = 0;
3105 l2cap_conn_start(conn);
3110 if (type == L2CAP_IT_FEAT_MASK) {
3111 conn->feat_mask = get_unaligned_le32(rsp->data);
3113 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3114 struct l2cap_info_req req;
3115 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3117 conn->info_ident = l2cap_get_ident(conn);
3119 l2cap_send_cmd(conn, conn->info_ident,
3120 L2CAP_INFO_REQ, sizeof(req), &req);
3122 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3123 conn->info_ident = 0;
3125 l2cap_conn_start(conn);
3127 } else if (type == L2CAP_IT_FIXED_CHAN) {
3128 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3129 conn->info_ident = 0;
3131 l2cap_conn_start(conn);
3137 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3138 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3141 struct l2cap_create_chan_req *req = data;
3142 struct l2cap_create_chan_rsp rsp;
3145 if (cmd_len != sizeof(*req))
3151 psm = le16_to_cpu(req->psm);
3152 scid = le16_to_cpu(req->scid);
3154 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3156 /* Placeholder: Always reject */
3158 rsp.scid = cpu_to_le16(scid);
3159 rsp.result = L2CAP_CR_NO_MEM;
3160 rsp.status = L2CAP_CS_NO_INFO;
3162 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3168 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3169 struct l2cap_cmd_hdr *cmd, void *data)
3171 BT_DBG("conn %p", conn);
3173 return l2cap_connect_rsp(conn, cmd, data);
3176 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3177 u16 icid, u16 result)
3179 struct l2cap_move_chan_rsp rsp;
3181 BT_DBG("icid %d, result %d", icid, result);
3183 rsp.icid = cpu_to_le16(icid);
3184 rsp.result = cpu_to_le16(result);
3186 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3189 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3190 struct l2cap_chan *chan, u16 icid, u16 result)
3192 struct l2cap_move_chan_cfm cfm;
3195 BT_DBG("icid %d, result %d", icid, result);
3197 ident = l2cap_get_ident(conn);
3199 chan->ident = ident;
3201 cfm.icid = cpu_to_le16(icid);
3202 cfm.result = cpu_to_le16(result);
3204 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3207 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3210 struct l2cap_move_chan_cfm_rsp rsp;
3212 BT_DBG("icid %d", icid);
3214 rsp.icid = cpu_to_le16(icid);
3215 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3218 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3219 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3221 struct l2cap_move_chan_req *req = data;
3223 u16 result = L2CAP_MR_NOT_ALLOWED;
3225 if (cmd_len != sizeof(*req))
3228 icid = le16_to_cpu(req->icid);
3230 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3235 /* Placeholder: Always refuse */
3236 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3241 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3242 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3244 struct l2cap_move_chan_rsp *rsp = data;
3247 if (cmd_len != sizeof(*rsp))
3250 icid = le16_to_cpu(rsp->icid);
3251 result = le16_to_cpu(rsp->result);
3253 BT_DBG("icid %d, result %d", icid, result);
3255 /* Placeholder: Always unconfirmed */
3256 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3261 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3262 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3264 struct l2cap_move_chan_cfm *cfm = data;
3267 if (cmd_len != sizeof(*cfm))
3270 icid = le16_to_cpu(cfm->icid);
3271 result = le16_to_cpu(cfm->result);
3273 BT_DBG("icid %d, result %d", icid, result);
3275 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3280 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3281 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3283 struct l2cap_move_chan_cfm_rsp *rsp = data;
3286 if (cmd_len != sizeof(*rsp))
3289 icid = le16_to_cpu(rsp->icid);
3291 BT_DBG("icid %d", icid);
3296 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3301 if (min > max || min < 6 || max > 3200)
3304 if (to_multiplier < 10 || to_multiplier > 3200)
3307 if (max >= to_multiplier * 8)
3310 max_latency = (to_multiplier * 8 / max) - 1;
3311 if (latency > 499 || latency > max_latency)
3317 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3318 struct l2cap_cmd_hdr *cmd, u8 *data)
3320 struct hci_conn *hcon = conn->hcon;
3321 struct l2cap_conn_param_update_req *req;
3322 struct l2cap_conn_param_update_rsp rsp;
3323 u16 min, max, latency, to_multiplier, cmd_len;
3326 if (!(hcon->link_mode & HCI_LM_MASTER))
3329 cmd_len = __le16_to_cpu(cmd->len);
3330 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3333 req = (struct l2cap_conn_param_update_req *) data;
3334 min = __le16_to_cpu(req->min);
3335 max = __le16_to_cpu(req->max);
3336 latency = __le16_to_cpu(req->latency);
3337 to_multiplier = __le16_to_cpu(req->to_multiplier);
3339 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3340 min, max, latency, to_multiplier);
3342 memset(&rsp, 0, sizeof(rsp));
3344 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3346 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3348 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3350 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3354 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3359 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3360 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3364 switch (cmd->code) {
3365 case L2CAP_COMMAND_REJ:
3366 l2cap_command_rej(conn, cmd, data);
3369 case L2CAP_CONN_REQ:
3370 err = l2cap_connect_req(conn, cmd, data);
3373 case L2CAP_CONN_RSP:
3374 err = l2cap_connect_rsp(conn, cmd, data);
3377 case L2CAP_CONF_REQ:
3378 err = l2cap_config_req(conn, cmd, cmd_len, data);
3381 case L2CAP_CONF_RSP:
3382 err = l2cap_config_rsp(conn, cmd, data);
3385 case L2CAP_DISCONN_REQ:
3386 err = l2cap_disconnect_req(conn, cmd, data);
3389 case L2CAP_DISCONN_RSP:
3390 err = l2cap_disconnect_rsp(conn, cmd, data);
3393 case L2CAP_ECHO_REQ:
3394 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3397 case L2CAP_ECHO_RSP:
3400 case L2CAP_INFO_REQ:
3401 err = l2cap_information_req(conn, cmd, data);
3404 case L2CAP_INFO_RSP:
3405 err = l2cap_information_rsp(conn, cmd, data);
3408 case L2CAP_CREATE_CHAN_REQ:
3409 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3412 case L2CAP_CREATE_CHAN_RSP:
3413 err = l2cap_create_channel_rsp(conn, cmd, data);
3416 case L2CAP_MOVE_CHAN_REQ:
3417 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3420 case L2CAP_MOVE_CHAN_RSP:
3421 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3424 case L2CAP_MOVE_CHAN_CFM:
3425 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3428 case L2CAP_MOVE_CHAN_CFM_RSP:
3429 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3433 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3441 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3442 struct l2cap_cmd_hdr *cmd, u8 *data)
3444 switch (cmd->code) {
3445 case L2CAP_COMMAND_REJ:
3448 case L2CAP_CONN_PARAM_UPDATE_REQ:
3449 return l2cap_conn_param_update_req(conn, cmd, data);
3451 case L2CAP_CONN_PARAM_UPDATE_RSP:
3455 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3460 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3461 struct sk_buff *skb)
3463 u8 *data = skb->data;
3465 struct l2cap_cmd_hdr cmd;
3468 l2cap_raw_recv(conn, skb);
3470 while (len >= L2CAP_CMD_HDR_SIZE) {
3472 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3473 data += L2CAP_CMD_HDR_SIZE;
3474 len -= L2CAP_CMD_HDR_SIZE;
3476 cmd_len = le16_to_cpu(cmd.len);
3478 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3480 if (cmd_len > len || !cmd.ident) {
3481 BT_DBG("corrupted command");
3485 if (conn->hcon->type == LE_LINK)
3486 err = l2cap_le_sig_cmd(conn, &cmd, data);
3488 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3491 struct l2cap_cmd_rej_unk rej;
3493 BT_ERR("Wrong link type (%d)", err);
3495 /* FIXME: Map err to a valid reason */
3496 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3497 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3507 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3509 u16 our_fcs, rcv_fcs;
3512 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3513 hdr_size = L2CAP_EXT_HDR_SIZE;
3515 hdr_size = L2CAP_ENH_HDR_SIZE;
3517 if (chan->fcs == L2CAP_FCS_CRC16) {
3518 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3519 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3520 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3522 if (our_fcs != rcv_fcs)
3528 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3532 chan->frames_sent = 0;
3534 control |= __set_reqseq(chan, chan->buffer_seq);
3536 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3537 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3538 l2cap_send_sframe(chan, control);
3539 set_bit(CONN_RNR_SENT, &chan->conn_state);
3542 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3543 l2cap_retransmit_frames(chan);
3545 l2cap_ertm_send(chan);
3547 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3548 chan->frames_sent == 0) {
3549 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3550 l2cap_send_sframe(chan, control);
3554 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3556 struct sk_buff *next_skb;
3557 int tx_seq_offset, next_tx_seq_offset;
3559 bt_cb(skb)->tx_seq = tx_seq;
3560 bt_cb(skb)->sar = sar;
3562 next_skb = skb_peek(&chan->srej_q);
3564 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3567 if (bt_cb(next_skb)->tx_seq == tx_seq)
3570 next_tx_seq_offset = __seq_offset(chan,
3571 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3573 if (next_tx_seq_offset > tx_seq_offset) {
3574 __skb_queue_before(&chan->srej_q, next_skb, skb);
3578 if (skb_queue_is_last(&chan->srej_q, next_skb))
3581 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3584 __skb_queue_tail(&chan->srej_q, skb);
3589 static void append_skb_frag(struct sk_buff *skb,
3590 struct sk_buff *new_frag, struct sk_buff **last_frag)
3592 /* skb->len reflects data in skb as well as all fragments
3593 * skb->data_len reflects only data in fragments
3595 if (!skb_has_frag_list(skb))
3596 skb_shinfo(skb)->frag_list = new_frag;
3598 new_frag->next = NULL;
3600 (*last_frag)->next = new_frag;
3601 *last_frag = new_frag;
3603 skb->len += new_frag->len;
3604 skb->data_len += new_frag->len;
3605 skb->truesize += new_frag->truesize;
3608 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3612 switch (__get_ctrl_sar(chan, control)) {
3613 case L2CAP_SAR_UNSEGMENTED:
3617 err = chan->ops->recv(chan->data, skb);
3620 case L2CAP_SAR_START:
3624 chan->sdu_len = get_unaligned_le16(skb->data);
3625 skb_pull(skb, L2CAP_SDULEN_SIZE);
3627 if (chan->sdu_len > chan->imtu) {
3632 if (skb->len >= chan->sdu_len)
3636 chan->sdu_last_frag = skb;
3642 case L2CAP_SAR_CONTINUE:
3646 append_skb_frag(chan->sdu, skb,
3647 &chan->sdu_last_frag);
3650 if (chan->sdu->len >= chan->sdu_len)
3660 append_skb_frag(chan->sdu, skb,
3661 &chan->sdu_last_frag);
3664 if (chan->sdu->len != chan->sdu_len)
3667 err = chan->ops->recv(chan->data, chan->sdu);
3670 /* Reassembly complete */
3672 chan->sdu_last_frag = NULL;
3680 kfree_skb(chan->sdu);
3682 chan->sdu_last_frag = NULL;
3689 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3693 BT_DBG("chan %p, Enter local busy", chan);
3695 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3697 control = __set_reqseq(chan, chan->buffer_seq);
3698 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3699 l2cap_send_sframe(chan, control);
3701 set_bit(CONN_RNR_SENT, &chan->conn_state);
3703 __clear_ack_timer(chan);
3706 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3710 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3713 control = __set_reqseq(chan, chan->buffer_seq);
3714 control |= __set_ctrl_poll(chan);
3715 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3716 l2cap_send_sframe(chan, control);
3717 chan->retry_count = 1;
3719 __clear_retrans_timer(chan);
3720 __set_monitor_timer(chan);
3722 set_bit(CONN_WAIT_F, &chan->conn_state);
3725 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3726 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3728 BT_DBG("chan %p, Exit local busy", chan);
3731 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3733 if (chan->mode == L2CAP_MODE_ERTM) {
3735 l2cap_ertm_enter_local_busy(chan);
3737 l2cap_ertm_exit_local_busy(chan);
3741 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3743 struct sk_buff *skb;
3746 while ((skb = skb_peek(&chan->srej_q)) &&
3747 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3750 if (bt_cb(skb)->tx_seq != tx_seq)
3753 skb = skb_dequeue(&chan->srej_q);
3754 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3755 err = l2cap_reassemble_sdu(chan, skb, control);
3758 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3762 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3763 tx_seq = __next_seq(chan, tx_seq);
3767 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3769 struct srej_list *l, *tmp;
3772 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3773 if (l->tx_seq == tx_seq) {
3778 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3779 control |= __set_reqseq(chan, l->tx_seq);
3780 l2cap_send_sframe(chan, control);
3782 list_add_tail(&l->list, &chan->srej_l);
3786 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3788 struct srej_list *new;
3791 while (tx_seq != chan->expected_tx_seq) {
3792 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3793 control |= __set_reqseq(chan, chan->expected_tx_seq);
3794 l2cap_send_sframe(chan, control);
3796 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3800 new->tx_seq = chan->expected_tx_seq;
3802 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3804 list_add_tail(&new->list, &chan->srej_l);
3807 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3812 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3814 u16 tx_seq = __get_txseq(chan, rx_control);
3815 u16 req_seq = __get_reqseq(chan, rx_control);
3816 u8 sar = __get_ctrl_sar(chan, rx_control);
3817 int tx_seq_offset, expected_tx_seq_offset;
3818 int num_to_ack = (chan->tx_win/6) + 1;
3821 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3822 tx_seq, rx_control);
3824 if (__is_ctrl_final(chan, rx_control) &&
3825 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3826 __clear_monitor_timer(chan);
3827 if (chan->unacked_frames > 0)
3828 __set_retrans_timer(chan);
3829 clear_bit(CONN_WAIT_F, &chan->conn_state);
3832 chan->expected_ack_seq = req_seq;
3833 l2cap_drop_acked_frames(chan);
3835 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3837 /* invalid tx_seq */
3838 if (tx_seq_offset >= chan->tx_win) {
3839 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3843 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3846 if (tx_seq == chan->expected_tx_seq)
3849 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3850 struct srej_list *first;
3852 first = list_first_entry(&chan->srej_l,
3853 struct srej_list, list);
3854 if (tx_seq == first->tx_seq) {
3855 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3856 l2cap_check_srej_gap(chan, tx_seq);
3858 list_del(&first->list);
3861 if (list_empty(&chan->srej_l)) {
3862 chan->buffer_seq = chan->buffer_seq_srej;
3863 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3864 l2cap_send_ack(chan);
3865 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3868 struct srej_list *l;
3870 /* duplicated tx_seq */
3871 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3874 list_for_each_entry(l, &chan->srej_l, list) {
3875 if (l->tx_seq == tx_seq) {
3876 l2cap_resend_srejframe(chan, tx_seq);
3881 err = l2cap_send_srejframe(chan, tx_seq);
3883 l2cap_send_disconn_req(chan->conn, chan, -err);
3888 expected_tx_seq_offset = __seq_offset(chan,
3889 chan->expected_tx_seq, chan->buffer_seq);
3891 /* duplicated tx_seq */
3892 if (tx_seq_offset < expected_tx_seq_offset)
3895 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3897 BT_DBG("chan %p, Enter SREJ", chan);
3899 INIT_LIST_HEAD(&chan->srej_l);
3900 chan->buffer_seq_srej = chan->buffer_seq;
3902 __skb_queue_head_init(&chan->srej_q);
3903 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3905 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3907 err = l2cap_send_srejframe(chan, tx_seq);
3909 l2cap_send_disconn_req(chan->conn, chan, -err);
3913 __clear_ack_timer(chan);
3918 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3920 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3921 bt_cb(skb)->tx_seq = tx_seq;
3922 bt_cb(skb)->sar = sar;
3923 __skb_queue_tail(&chan->srej_q, skb);
3927 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3928 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3931 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3935 if (__is_ctrl_final(chan, rx_control)) {
3936 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3937 l2cap_retransmit_frames(chan);
3941 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3942 if (chan->num_acked == num_to_ack - 1)
3943 l2cap_send_ack(chan);
3945 __set_ack_timer(chan);
3954 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3956 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3957 __get_reqseq(chan, rx_control), rx_control);
3959 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3960 l2cap_drop_acked_frames(chan);
3962 if (__is_ctrl_poll(chan, rx_control)) {
3963 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3964 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3965 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3966 (chan->unacked_frames > 0))
3967 __set_retrans_timer(chan);
3969 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3970 l2cap_send_srejtail(chan);
3972 l2cap_send_i_or_rr_or_rnr(chan);
3975 } else if (__is_ctrl_final(chan, rx_control)) {
3976 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3978 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3979 l2cap_retransmit_frames(chan);
3982 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3983 (chan->unacked_frames > 0))
3984 __set_retrans_timer(chan);
3986 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3987 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3988 l2cap_send_ack(chan);
3990 l2cap_ertm_send(chan);
3994 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
3996 u16 tx_seq = __get_reqseq(chan, rx_control);
3998 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4000 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4002 chan->expected_ack_seq = tx_seq;
4003 l2cap_drop_acked_frames(chan);
4005 if (__is_ctrl_final(chan, rx_control)) {
4006 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4007 l2cap_retransmit_frames(chan);
4009 l2cap_retransmit_frames(chan);
4011 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4012 set_bit(CONN_REJ_ACT, &chan->conn_state);
4015 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4017 u16 tx_seq = __get_reqseq(chan, rx_control);
4019 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4021 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4023 if (__is_ctrl_poll(chan, rx_control)) {
4024 chan->expected_ack_seq = tx_seq;
4025 l2cap_drop_acked_frames(chan);
4027 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4028 l2cap_retransmit_one_frame(chan, tx_seq);
4030 l2cap_ertm_send(chan);
4032 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4033 chan->srej_save_reqseq = tx_seq;
4034 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4036 } else if (__is_ctrl_final(chan, rx_control)) {
4037 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4038 chan->srej_save_reqseq == tx_seq)
4039 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4041 l2cap_retransmit_one_frame(chan, tx_seq);
4043 l2cap_retransmit_one_frame(chan, tx_seq);
4044 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4045 chan->srej_save_reqseq = tx_seq;
4046 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4051 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4053 u16 tx_seq = __get_reqseq(chan, rx_control);
4055 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4057 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4058 chan->expected_ack_seq = tx_seq;
4059 l2cap_drop_acked_frames(chan);
4061 if (__is_ctrl_poll(chan, rx_control))
4062 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4064 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4065 __clear_retrans_timer(chan);
4066 if (__is_ctrl_poll(chan, rx_control))
4067 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4071 if (__is_ctrl_poll(chan, rx_control)) {
4072 l2cap_send_srejtail(chan);
4074 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4075 l2cap_send_sframe(chan, rx_control);
4079 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4081 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4083 if (__is_ctrl_final(chan, rx_control) &&
4084 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4085 __clear_monitor_timer(chan);
4086 if (chan->unacked_frames > 0)
4087 __set_retrans_timer(chan);
4088 clear_bit(CONN_WAIT_F, &chan->conn_state);
4091 switch (__get_ctrl_super(chan, rx_control)) {
4092 case L2CAP_SUPER_RR:
4093 l2cap_data_channel_rrframe(chan, rx_control);
4096 case L2CAP_SUPER_REJ:
4097 l2cap_data_channel_rejframe(chan, rx_control);
4100 case L2CAP_SUPER_SREJ:
4101 l2cap_data_channel_srejframe(chan, rx_control);
4104 case L2CAP_SUPER_RNR:
4105 l2cap_data_channel_rnrframe(chan, rx_control);
4113 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4115 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
4118 int len, next_tx_seq_offset, req_seq_offset;
4120 control = __get_control(chan, skb->data);
4121 skb_pull(skb, __ctrl_size(chan));
4125 * We can just drop the corrupted I-frame here.
4126 * Receiver will miss it and start proper recovery
4127 * procedures and ask retransmission.
4129 if (l2cap_check_fcs(chan, skb))
4132 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4133 len -= L2CAP_SDULEN_SIZE;
4135 if (chan->fcs == L2CAP_FCS_CRC16)
4136 len -= L2CAP_FCS_SIZE;
4138 if (len > chan->mps) {
4139 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4143 req_seq = __get_reqseq(chan, control);
4145 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4147 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4148 chan->expected_ack_seq);
4150 /* check for invalid req-seq */
4151 if (req_seq_offset > next_tx_seq_offset) {
4152 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4156 if (!__is_sframe(chan, control)) {
4158 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4162 l2cap_data_channel_iframe(chan, control, skb);
4166 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4170 l2cap_data_channel_sframe(chan, control, skb);
4180 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4182 struct l2cap_chan *chan;
4183 struct sock *sk = NULL;
4188 chan = l2cap_get_chan_by_scid(conn, cid);
4190 BT_DBG("unknown cid 0x%4.4x", cid);
4196 BT_DBG("chan %p, len %d", chan, skb->len);
4198 if (chan->state != BT_CONNECTED)
4201 switch (chan->mode) {
4202 case L2CAP_MODE_BASIC:
4203 /* If socket recv buffers overflows we drop data here
4204 * which is *bad* because L2CAP has to be reliable.
4205 * But we don't have any other choice. L2CAP doesn't
4206 * provide flow control mechanism. */
4208 if (chan->imtu < skb->len)
4211 if (!chan->ops->recv(chan->data, skb))
4215 case L2CAP_MODE_ERTM:
4216 if (!sock_owned_by_user(sk)) {
4217 l2cap_ertm_data_rcv(sk, skb);
4219 if (sk_add_backlog(sk, skb))
4225 case L2CAP_MODE_STREAMING:
4226 control = __get_control(chan, skb->data);
4227 skb_pull(skb, __ctrl_size(chan));
4230 if (l2cap_check_fcs(chan, skb))
4233 if (__is_sar_start(chan, control))
4234 len -= L2CAP_SDULEN_SIZE;
4236 if (chan->fcs == L2CAP_FCS_CRC16)
4237 len -= L2CAP_FCS_SIZE;
4239 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4242 tx_seq = __get_txseq(chan, control);
4244 if (chan->expected_tx_seq != tx_seq) {
4245 /* Frame(s) missing - must discard partial SDU */
4246 kfree_skb(chan->sdu);
4248 chan->sdu_last_frag = NULL;
4251 /* TODO: Notify userland of missing data */
4254 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4256 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4257 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4262 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4276 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4278 struct sock *sk = NULL;
4279 struct l2cap_chan *chan;
4281 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4289 BT_DBG("sk %p, len %d", sk, skb->len);
4291 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4294 if (chan->imtu < skb->len)
4297 if (!chan->ops->recv(chan->data, skb))
4309 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4311 struct sock *sk = NULL;
4312 struct l2cap_chan *chan;
4314 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4322 BT_DBG("sk %p, len %d", sk, skb->len);
4324 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4327 if (chan->imtu < skb->len)
4330 if (!chan->ops->recv(chan->data, skb))
4342 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4344 struct l2cap_hdr *lh = (void *) skb->data;
4348 skb_pull(skb, L2CAP_HDR_SIZE);
4349 cid = __le16_to_cpu(lh->cid);
4350 len = __le16_to_cpu(lh->len);
4352 if (len != skb->len) {
4357 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4360 case L2CAP_CID_LE_SIGNALING:
4361 case L2CAP_CID_SIGNALING:
4362 l2cap_sig_channel(conn, skb);
4365 case L2CAP_CID_CONN_LESS:
4366 psm = get_unaligned_le16(skb->data);
4368 l2cap_conless_channel(conn, psm, skb);
4371 case L2CAP_CID_LE_DATA:
4372 l2cap_att_channel(conn, cid, skb);
4376 if (smp_sig_channel(conn, skb))
4377 l2cap_conn_del(conn->hcon, EACCES);
4381 l2cap_data_channel(conn, cid, skb);
4386 /* ---- L2CAP interface with lower layer (HCI) ---- */
4388 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4390 int exact = 0, lm1 = 0, lm2 = 0;
4391 struct l2cap_chan *c;
4393 if (type != ACL_LINK)
4396 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4398 /* Find listening sockets and check their link_mode */
4399 read_lock(&chan_list_lock);
4400 list_for_each_entry(c, &chan_list, global_l) {
4401 struct sock *sk = c->sk;
4403 if (c->state != BT_LISTEN)
4406 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4407 lm1 |= HCI_LM_ACCEPT;
4408 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4409 lm1 |= HCI_LM_MASTER;
4411 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4412 lm2 |= HCI_LM_ACCEPT;
4413 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4414 lm2 |= HCI_LM_MASTER;
4417 read_unlock(&chan_list_lock);
4419 return exact ? lm1 : lm2;
4422 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4424 struct l2cap_conn *conn;
4426 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4428 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4432 conn = l2cap_conn_add(hcon, status);
4434 l2cap_conn_ready(conn);
4436 l2cap_conn_del(hcon, bt_to_errno(status));
4441 static int l2cap_disconn_ind(struct hci_conn *hcon)
4443 struct l2cap_conn *conn = hcon->l2cap_data;
4445 BT_DBG("hcon %p", hcon);
4447 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4448 return HCI_ERROR_REMOTE_USER_TERM;
4450 return conn->disc_reason;
4453 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4455 BT_DBG("hcon %p reason %d", hcon, reason);
4457 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4460 l2cap_conn_del(hcon, bt_to_errno(reason));
4465 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4467 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4470 if (encrypt == 0x00) {
4471 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4472 __clear_chan_timer(chan);
4473 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4474 } else if (chan->sec_level == BT_SECURITY_HIGH)
4475 l2cap_chan_close(chan, ECONNREFUSED);
4477 if (chan->sec_level == BT_SECURITY_MEDIUM)
4478 __clear_chan_timer(chan);
4482 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4484 struct l2cap_conn *conn = hcon->l2cap_data;
4485 struct l2cap_chan *chan;
4490 BT_DBG("conn %p", conn);
4492 if (hcon->type == LE_LINK) {
4493 smp_distribute_keys(conn, 0);
4494 del_timer(&conn->security_timer);
4497 read_lock(&conn->chan_lock);
4499 list_for_each_entry(chan, &conn->chan_l, list) {
4500 struct sock *sk = chan->sk;
4504 BT_DBG("chan->scid %d", chan->scid);
4506 if (chan->scid == L2CAP_CID_LE_DATA) {
4507 if (!status && encrypt) {
4508 chan->sec_level = hcon->sec_level;
4509 l2cap_chan_ready(sk);
4516 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4521 if (!status && (chan->state == BT_CONNECTED ||
4522 chan->state == BT_CONFIG)) {
4523 l2cap_check_encryption(chan, encrypt);
4528 if (chan->state == BT_CONNECT) {
4530 struct l2cap_conn_req req;
4531 req.scid = cpu_to_le16(chan->scid);
4532 req.psm = chan->psm;
4534 chan->ident = l2cap_get_ident(conn);
4535 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4537 l2cap_send_cmd(conn, chan->ident,
4538 L2CAP_CONN_REQ, sizeof(req), &req);
4540 __clear_chan_timer(chan);
4541 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4543 } else if (chan->state == BT_CONNECT2) {
4544 struct l2cap_conn_rsp rsp;
4548 if (bt_sk(sk)->defer_setup) {
4549 struct sock *parent = bt_sk(sk)->parent;
4550 res = L2CAP_CR_PEND;
4551 stat = L2CAP_CS_AUTHOR_PEND;
4553 parent->sk_data_ready(parent, 0);
4555 l2cap_state_change(chan, BT_CONFIG);
4556 res = L2CAP_CR_SUCCESS;
4557 stat = L2CAP_CS_NO_INFO;
4560 l2cap_state_change(chan, BT_DISCONN);
4561 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4562 res = L2CAP_CR_SEC_BLOCK;
4563 stat = L2CAP_CS_NO_INFO;
4566 rsp.scid = cpu_to_le16(chan->dcid);
4567 rsp.dcid = cpu_to_le16(chan->scid);
4568 rsp.result = cpu_to_le16(res);
4569 rsp.status = cpu_to_le16(stat);
4570 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4577 read_unlock(&conn->chan_lock);
4582 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4584 struct l2cap_conn *conn = hcon->l2cap_data;
4587 conn = l2cap_conn_add(hcon, 0);
4592 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4594 if (!(flags & ACL_CONT)) {
4595 struct l2cap_hdr *hdr;
4596 struct l2cap_chan *chan;
4601 BT_ERR("Unexpected start frame (len %d)", skb->len);
4602 kfree_skb(conn->rx_skb);
4603 conn->rx_skb = NULL;
4605 l2cap_conn_unreliable(conn, ECOMM);
4608 /* Start fragment always begin with Basic L2CAP header */
4609 if (skb->len < L2CAP_HDR_SIZE) {
4610 BT_ERR("Frame is too short (len %d)", skb->len);
4611 l2cap_conn_unreliable(conn, ECOMM);
4615 hdr = (struct l2cap_hdr *) skb->data;
4616 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4617 cid = __le16_to_cpu(hdr->cid);
4619 if (len == skb->len) {
4620 /* Complete frame received */
4621 l2cap_recv_frame(conn, skb);
4625 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4627 if (skb->len > len) {
4628 BT_ERR("Frame is too long (len %d, expected len %d)",
4630 l2cap_conn_unreliable(conn, ECOMM);
4634 chan = l2cap_get_chan_by_scid(conn, cid);
4636 if (chan && chan->sk) {
4637 struct sock *sk = chan->sk;
4639 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4640 BT_ERR("Frame exceeding recv MTU (len %d, "
4644 l2cap_conn_unreliable(conn, ECOMM);
4650 /* Allocate skb for the complete frame (with header) */
4651 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4655 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4657 conn->rx_len = len - skb->len;
4659 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4661 if (!conn->rx_len) {
4662 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4663 l2cap_conn_unreliable(conn, ECOMM);
4667 if (skb->len > conn->rx_len) {
4668 BT_ERR("Fragment is too long (len %d, expected %d)",
4669 skb->len, conn->rx_len);
4670 kfree_skb(conn->rx_skb);
4671 conn->rx_skb = NULL;
4673 l2cap_conn_unreliable(conn, ECOMM);
4677 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4679 conn->rx_len -= skb->len;
4681 if (!conn->rx_len) {
4682 /* Complete frame received */
4683 l2cap_recv_frame(conn, conn->rx_skb);
4684 conn->rx_skb = NULL;
4693 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4695 struct l2cap_chan *c;
4697 read_lock_bh(&chan_list_lock);
4699 list_for_each_entry(c, &chan_list, global_l) {
4700 struct sock *sk = c->sk;
4702 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4703 batostr(&bt_sk(sk)->src),
4704 batostr(&bt_sk(sk)->dst),
4705 c->state, __le16_to_cpu(c->psm),
4706 c->scid, c->dcid, c->imtu, c->omtu,
4707 c->sec_level, c->mode);
4710 read_unlock_bh(&chan_list_lock);
4715 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4717 return single_open(file, l2cap_debugfs_show, inode->i_private);
4720 static const struct file_operations l2cap_debugfs_fops = {
4721 .open = l2cap_debugfs_open,
4723 .llseek = seq_lseek,
4724 .release = single_release,
4727 static struct dentry *l2cap_debugfs;
4729 static struct hci_proto l2cap_hci_proto = {
4731 .id = HCI_PROTO_L2CAP,
4732 .connect_ind = l2cap_connect_ind,
4733 .connect_cfm = l2cap_connect_cfm,
4734 .disconn_ind = l2cap_disconn_ind,
4735 .disconn_cfm = l2cap_disconn_cfm,
4736 .security_cfm = l2cap_security_cfm,
4737 .recv_acldata = l2cap_recv_acldata
4740 int __init l2cap_init(void)
4744 err = l2cap_init_sockets();
4748 err = hci_register_proto(&l2cap_hci_proto);
4750 BT_ERR("L2CAP protocol registration failed");
4751 bt_sock_unregister(BTPROTO_L2CAP);
4756 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4757 bt_debugfs, NULL, &l2cap_debugfs_fops);
4759 BT_ERR("Failed to create L2CAP debug file");
4765 l2cap_cleanup_sockets();
4769 void l2cap_exit(void)
4771 debugfs_remove(l2cap_debugfs);
4773 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4774 BT_ERR("L2CAP protocol unregistration failed");
4776 l2cap_cleanup_sockets();
4779 module_param(disable_ertm, bool, 0644);
4780 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4782 module_param(enable_hs, bool, 0644);
4783 MODULE_PARM_DESC(enable_hs, "Enable High Speed");