2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
65 static LIST_HEAD(chan_list);
66 static DEFINE_RWLOCK(chan_list_lock);
68 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
69 u8 code, u8 ident, u16 dlen, void *data);
70 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
72 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
73 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
74 struct l2cap_chan *chan, int err);
76 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
78 /* ---- L2CAP channels ---- */
80 static inline void chan_hold(struct l2cap_chan *c)
82 atomic_inc(&c->refcnt);
85 static inline void chan_put(struct l2cap_chan *c)
87 if (atomic_dec_and_test(&c->refcnt))
91 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
95 list_for_each_entry(c, &conn->chan_l, list) {
103 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
105 struct l2cap_chan *c;
107 list_for_each_entry(c, &conn->chan_l, list) {
114 /* Find channel with given SCID.
115 * Returns locked socket */
116 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
118 struct l2cap_chan *c;
120 read_lock(&conn->chan_lock);
121 c = __l2cap_get_chan_by_scid(conn, cid);
124 read_unlock(&conn->chan_lock);
128 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
130 struct l2cap_chan *c;
132 list_for_each_entry(c, &conn->chan_l, list) {
133 if (c->ident == ident)
139 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
141 struct l2cap_chan *c;
143 read_lock(&conn->chan_lock);
144 c = __l2cap_get_chan_by_ident(conn, ident);
147 read_unlock(&conn->chan_lock);
151 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
153 struct l2cap_chan *c;
155 list_for_each_entry(c, &chan_list, global_l) {
156 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
165 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
169 write_lock_bh(&chan_list_lock);
171 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
184 for (p = 0x1001; p < 0x1100; p += 2)
185 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
186 chan->psm = cpu_to_le16(p);
187 chan->sport = cpu_to_le16(p);
194 write_unlock_bh(&chan_list_lock);
198 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
200 write_lock_bh(&chan_list_lock);
204 write_unlock_bh(&chan_list_lock);
209 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
211 u16 cid = L2CAP_CID_DYN_START;
213 for (; cid < L2CAP_CID_DYN_END; cid++) {
214 if (!__l2cap_get_chan_by_scid(conn, cid))
221 static void l2cap_set_timer(struct l2cap_chan *chan, struct timer_list *timer, long timeout)
223 BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
225 if (!mod_timer(timer, jiffies + msecs_to_jiffies(timeout)))
229 static void l2cap_clear_timer(struct l2cap_chan *chan, struct timer_list *timer)
231 BT_DBG("chan %p state %d", chan, chan->state);
233 if (timer_pending(timer) && del_timer(timer))
237 static void l2cap_state_change(struct l2cap_chan *chan, int state)
240 chan->ops->state_change(chan->data, state);
243 static void l2cap_chan_timeout(unsigned long arg)
245 struct l2cap_chan *chan = (struct l2cap_chan *) arg;
246 struct sock *sk = chan->sk;
249 BT_DBG("chan %p state %d", chan, chan->state);
253 if (sock_owned_by_user(sk)) {
254 /* sk is owned by user. Try again later */
255 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
261 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
262 reason = ECONNREFUSED;
263 else if (chan->state == BT_CONNECT &&
264 chan->sec_level != BT_SECURITY_SDP)
265 reason = ECONNREFUSED;
269 l2cap_chan_close(chan, reason);
273 chan->ops->close(chan->data);
277 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
279 struct l2cap_chan *chan;
281 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
287 write_lock_bh(&chan_list_lock);
288 list_add(&chan->global_l, &chan_list);
289 write_unlock_bh(&chan_list_lock);
291 setup_timer(&chan->chan_timer, l2cap_chan_timeout, (unsigned long) chan);
293 chan->state = BT_OPEN;
295 atomic_set(&chan->refcnt, 1);
297 BT_DBG("sk %p chan %p", sk, chan);
302 void l2cap_chan_destroy(struct l2cap_chan *chan)
304 write_lock_bh(&chan_list_lock);
305 list_del(&chan->global_l);
306 write_unlock_bh(&chan_list_lock);
311 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
313 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
314 chan->psm, chan->dcid);
316 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
320 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
321 if (conn->hcon->type == LE_LINK) {
323 chan->omtu = L2CAP_LE_DEFAULT_MTU;
324 chan->scid = L2CAP_CID_LE_DATA;
325 chan->dcid = L2CAP_CID_LE_DATA;
327 /* Alloc CID for connection-oriented socket */
328 chan->scid = l2cap_alloc_cid(conn);
329 chan->omtu = L2CAP_DEFAULT_MTU;
331 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
332 /* Connectionless socket */
333 chan->scid = L2CAP_CID_CONN_LESS;
334 chan->dcid = L2CAP_CID_CONN_LESS;
335 chan->omtu = L2CAP_DEFAULT_MTU;
337 /* Raw socket can send/recv signalling messages only */
338 chan->scid = L2CAP_CID_SIGNALING;
339 chan->dcid = L2CAP_CID_SIGNALING;
340 chan->omtu = L2CAP_DEFAULT_MTU;
343 chan->local_id = L2CAP_BESTEFFORT_ID;
344 chan->local_stype = L2CAP_SERV_BESTEFFORT;
345 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
346 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
347 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
348 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
352 list_add(&chan->list, &conn->chan_l);
356 * Must be called on the locked socket. */
357 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
359 struct sock *sk = chan->sk;
360 struct l2cap_conn *conn = chan->conn;
361 struct sock *parent = bt_sk(sk)->parent;
363 __clear_chan_timer(chan);
365 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
368 /* Delete from channel list */
369 write_lock_bh(&conn->chan_lock);
370 list_del(&chan->list);
371 write_unlock_bh(&conn->chan_lock);
375 hci_conn_put(conn->hcon);
378 l2cap_state_change(chan, BT_CLOSED);
379 sock_set_flag(sk, SOCK_ZAPPED);
385 bt_accept_unlink(sk);
386 parent->sk_data_ready(parent, 0);
388 sk->sk_state_change(sk);
390 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
391 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
394 skb_queue_purge(&chan->tx_q);
396 if (chan->mode == L2CAP_MODE_ERTM) {
397 struct srej_list *l, *tmp;
399 __clear_retrans_timer(chan);
400 __clear_monitor_timer(chan);
401 __clear_ack_timer(chan);
403 skb_queue_purge(&chan->srej_q);
405 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
412 static void l2cap_chan_cleanup_listen(struct sock *parent)
416 BT_DBG("parent %p", parent);
418 /* Close not yet accepted channels */
419 while ((sk = bt_accept_dequeue(parent, NULL))) {
420 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
421 __clear_chan_timer(chan);
423 l2cap_chan_close(chan, ECONNRESET);
425 chan->ops->close(chan->data);
429 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
431 struct l2cap_conn *conn = chan->conn;
432 struct sock *sk = chan->sk;
434 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
436 switch (chan->state) {
438 l2cap_chan_cleanup_listen(sk);
440 l2cap_state_change(chan, BT_CLOSED);
441 sock_set_flag(sk, SOCK_ZAPPED);
446 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
447 conn->hcon->type == ACL_LINK) {
448 __clear_chan_timer(chan);
449 __set_chan_timer(chan, sk->sk_sndtimeo);
450 l2cap_send_disconn_req(conn, chan, reason);
452 l2cap_chan_del(chan, reason);
456 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
457 conn->hcon->type == ACL_LINK) {
458 struct l2cap_conn_rsp rsp;
461 if (bt_sk(sk)->defer_setup)
462 result = L2CAP_CR_SEC_BLOCK;
464 result = L2CAP_CR_BAD_PSM;
465 l2cap_state_change(chan, BT_DISCONN);
467 rsp.scid = cpu_to_le16(chan->dcid);
468 rsp.dcid = cpu_to_le16(chan->scid);
469 rsp.result = cpu_to_le16(result);
470 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
471 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
475 l2cap_chan_del(chan, reason);
480 l2cap_chan_del(chan, reason);
484 sock_set_flag(sk, SOCK_ZAPPED);
489 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
491 if (chan->chan_type == L2CAP_CHAN_RAW) {
492 switch (chan->sec_level) {
493 case BT_SECURITY_HIGH:
494 return HCI_AT_DEDICATED_BONDING_MITM;
495 case BT_SECURITY_MEDIUM:
496 return HCI_AT_DEDICATED_BONDING;
498 return HCI_AT_NO_BONDING;
500 } else if (chan->psm == cpu_to_le16(0x0001)) {
501 if (chan->sec_level == BT_SECURITY_LOW)
502 chan->sec_level = BT_SECURITY_SDP;
504 if (chan->sec_level == BT_SECURITY_HIGH)
505 return HCI_AT_NO_BONDING_MITM;
507 return HCI_AT_NO_BONDING;
509 switch (chan->sec_level) {
510 case BT_SECURITY_HIGH:
511 return HCI_AT_GENERAL_BONDING_MITM;
512 case BT_SECURITY_MEDIUM:
513 return HCI_AT_GENERAL_BONDING;
515 return HCI_AT_NO_BONDING;
520 /* Service level security */
521 int l2cap_chan_check_security(struct l2cap_chan *chan)
523 struct l2cap_conn *conn = chan->conn;
526 auth_type = l2cap_get_auth_type(chan);
528 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
531 static u8 l2cap_get_ident(struct l2cap_conn *conn)
535 /* Get next available identificator.
536 * 1 - 128 are used by kernel.
537 * 129 - 199 are reserved.
538 * 200 - 254 are used by utilities like l2ping, etc.
541 spin_lock_bh(&conn->lock);
543 if (++conn->tx_ident > 128)
548 spin_unlock_bh(&conn->lock);
553 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
555 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
558 BT_DBG("code 0x%2.2x", code);
563 if (lmp_no_flush_capable(conn->hcon->hdev))
564 flags = ACL_START_NO_FLUSH;
568 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
569 skb->priority = HCI_PRIO_MAX;
571 hci_send_acl(conn->hchan, skb, flags);
574 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
576 struct hci_conn *hcon = chan->conn->hcon;
579 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
582 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
583 lmp_no_flush_capable(hcon->hdev))
584 flags = ACL_START_NO_FLUSH;
588 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
589 hci_send_acl(chan->conn->hchan, skb, flags);
592 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
595 struct l2cap_hdr *lh;
596 struct l2cap_conn *conn = chan->conn;
599 if (chan->state != BT_CONNECTED)
602 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
603 hlen = L2CAP_EXT_HDR_SIZE;
605 hlen = L2CAP_ENH_HDR_SIZE;
607 if (chan->fcs == L2CAP_FCS_CRC16)
608 hlen += L2CAP_FCS_SIZE;
610 BT_DBG("chan %p, control 0x%8.8x", chan, control);
612 count = min_t(unsigned int, conn->mtu, hlen);
614 control |= __set_sframe(chan);
616 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
617 control |= __set_ctrl_final(chan);
619 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
620 control |= __set_ctrl_poll(chan);
622 skb = bt_skb_alloc(count, GFP_ATOMIC);
626 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
627 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
628 lh->cid = cpu_to_le16(chan->dcid);
630 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
632 if (chan->fcs == L2CAP_FCS_CRC16) {
633 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
634 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
637 skb->priority = HCI_PRIO_MAX;
638 l2cap_do_send(chan, skb);
641 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
643 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
644 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
645 set_bit(CONN_RNR_SENT, &chan->conn_state);
647 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
649 control |= __set_reqseq(chan, chan->buffer_seq);
651 l2cap_send_sframe(chan, control);
654 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
656 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
659 static void l2cap_do_start(struct l2cap_chan *chan)
661 struct l2cap_conn *conn = chan->conn;
663 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
664 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
667 if (l2cap_chan_check_security(chan) &&
668 __l2cap_no_conn_pending(chan)) {
669 struct l2cap_conn_req req;
670 req.scid = cpu_to_le16(chan->scid);
673 chan->ident = l2cap_get_ident(conn);
674 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
676 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
680 struct l2cap_info_req req;
681 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
683 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
684 conn->info_ident = l2cap_get_ident(conn);
686 mod_timer(&conn->info_timer, jiffies +
687 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
689 l2cap_send_cmd(conn, conn->info_ident,
690 L2CAP_INFO_REQ, sizeof(req), &req);
694 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
696 u32 local_feat_mask = l2cap_feat_mask;
698 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
701 case L2CAP_MODE_ERTM:
702 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
703 case L2CAP_MODE_STREAMING:
704 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
710 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
713 struct l2cap_disconn_req req;
720 if (chan->mode == L2CAP_MODE_ERTM) {
721 __clear_retrans_timer(chan);
722 __clear_monitor_timer(chan);
723 __clear_ack_timer(chan);
726 req.dcid = cpu_to_le16(chan->dcid);
727 req.scid = cpu_to_le16(chan->scid);
728 l2cap_send_cmd(conn, l2cap_get_ident(conn),
729 L2CAP_DISCONN_REQ, sizeof(req), &req);
731 l2cap_state_change(chan, BT_DISCONN);
735 /* ---- L2CAP connections ---- */
736 static void l2cap_conn_start(struct l2cap_conn *conn)
738 struct l2cap_chan *chan, *tmp;
740 BT_DBG("conn %p", conn);
742 read_lock(&conn->chan_lock);
744 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
745 struct sock *sk = chan->sk;
749 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
754 if (chan->state == BT_CONNECT) {
755 struct l2cap_conn_req req;
757 if (!l2cap_chan_check_security(chan) ||
758 !__l2cap_no_conn_pending(chan)) {
763 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
764 && test_bit(CONF_STATE2_DEVICE,
765 &chan->conf_state)) {
766 /* l2cap_chan_close() calls list_del(chan)
767 * so release the lock */
768 read_unlock(&conn->chan_lock);
769 l2cap_chan_close(chan, ECONNRESET);
770 read_lock(&conn->chan_lock);
775 req.scid = cpu_to_le16(chan->scid);
778 chan->ident = l2cap_get_ident(conn);
779 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
781 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
784 } else if (chan->state == BT_CONNECT2) {
785 struct l2cap_conn_rsp rsp;
787 rsp.scid = cpu_to_le16(chan->dcid);
788 rsp.dcid = cpu_to_le16(chan->scid);
790 if (l2cap_chan_check_security(chan)) {
791 if (bt_sk(sk)->defer_setup) {
792 struct sock *parent = bt_sk(sk)->parent;
793 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
794 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
796 parent->sk_data_ready(parent, 0);
799 l2cap_state_change(chan, BT_CONFIG);
800 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
801 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
804 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
805 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
808 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
811 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
812 rsp.result != L2CAP_CR_SUCCESS) {
817 set_bit(CONF_REQ_SENT, &chan->conf_state);
818 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
819 l2cap_build_conf_req(chan, buf), buf);
820 chan->num_conf_req++;
826 read_unlock(&conn->chan_lock);
829 /* Find socket with cid and source bdaddr.
830 * Returns closest match, locked.
832 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
834 struct l2cap_chan *c, *c1 = NULL;
836 read_lock(&chan_list_lock);
838 list_for_each_entry(c, &chan_list, global_l) {
839 struct sock *sk = c->sk;
841 if (state && c->state != state)
844 if (c->scid == cid) {
846 if (!bacmp(&bt_sk(sk)->src, src)) {
847 read_unlock(&chan_list_lock);
852 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
857 read_unlock(&chan_list_lock);
862 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
864 struct sock *parent, *sk;
865 struct l2cap_chan *chan, *pchan;
869 /* Check if we have socket listening on cid */
870 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
877 bh_lock_sock(parent);
879 /* Check for backlog size */
880 if (sk_acceptq_is_full(parent)) {
881 BT_DBG("backlog full %d", parent->sk_ack_backlog);
885 chan = pchan->ops->new_connection(pchan->data);
891 write_lock_bh(&conn->chan_lock);
893 hci_conn_hold(conn->hcon);
895 bacpy(&bt_sk(sk)->src, conn->src);
896 bacpy(&bt_sk(sk)->dst, conn->dst);
898 bt_accept_enqueue(parent, sk);
900 __l2cap_chan_add(conn, chan);
902 __set_chan_timer(chan, sk->sk_sndtimeo);
904 l2cap_state_change(chan, BT_CONNECTED);
905 parent->sk_data_ready(parent, 0);
907 write_unlock_bh(&conn->chan_lock);
910 bh_unlock_sock(parent);
913 static void l2cap_chan_ready(struct sock *sk)
915 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
916 struct sock *parent = bt_sk(sk)->parent;
918 BT_DBG("sk %p, parent %p", sk, parent);
920 chan->conf_state = 0;
921 __clear_chan_timer(chan);
923 l2cap_state_change(chan, BT_CONNECTED);
924 sk->sk_state_change(sk);
927 parent->sk_data_ready(parent, 0);
930 static void l2cap_conn_ready(struct l2cap_conn *conn)
932 struct l2cap_chan *chan;
934 BT_DBG("conn %p", conn);
936 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
937 l2cap_le_conn_ready(conn);
939 if (conn->hcon->out && conn->hcon->type == LE_LINK)
940 smp_conn_security(conn, conn->hcon->pending_sec_level);
942 read_lock(&conn->chan_lock);
944 list_for_each_entry(chan, &conn->chan_l, list) {
945 struct sock *sk = chan->sk;
949 if (conn->hcon->type == LE_LINK) {
950 if (smp_conn_security(conn, chan->sec_level))
951 l2cap_chan_ready(sk);
953 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
954 __clear_chan_timer(chan);
955 l2cap_state_change(chan, BT_CONNECTED);
956 sk->sk_state_change(sk);
958 } else if (chan->state == BT_CONNECT)
959 l2cap_do_start(chan);
964 read_unlock(&conn->chan_lock);
967 /* Notify sockets that we cannot guaranty reliability anymore */
968 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
970 struct l2cap_chan *chan;
972 BT_DBG("conn %p", conn);
974 read_lock(&conn->chan_lock);
976 list_for_each_entry(chan, &conn->chan_l, list) {
977 struct sock *sk = chan->sk;
979 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
983 read_unlock(&conn->chan_lock);
986 static void l2cap_info_timeout(unsigned long arg)
988 struct l2cap_conn *conn = (void *) arg;
990 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
991 conn->info_ident = 0;
993 l2cap_conn_start(conn);
996 static void l2cap_conn_del(struct hci_conn *hcon, int err)
998 struct l2cap_conn *conn = hcon->l2cap_data;
999 struct l2cap_chan *chan, *l;
1005 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1007 kfree_skb(conn->rx_skb);
1010 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1013 l2cap_chan_del(chan, err);
1015 chan->ops->close(chan->data);
1018 hci_chan_del(conn->hchan);
1020 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1021 del_timer_sync(&conn->info_timer);
1023 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1024 del_timer(&conn->security_timer);
1025 smp_chan_destroy(conn);
1028 hcon->l2cap_data = NULL;
1032 static void security_timeout(unsigned long arg)
1034 struct l2cap_conn *conn = (void *) arg;
1036 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1039 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1041 struct l2cap_conn *conn = hcon->l2cap_data;
1042 struct hci_chan *hchan;
1047 hchan = hci_chan_create(hcon);
1051 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1053 hci_chan_del(hchan);
1057 hcon->l2cap_data = conn;
1059 conn->hchan = hchan;
1061 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1063 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1064 conn->mtu = hcon->hdev->le_mtu;
1066 conn->mtu = hcon->hdev->acl_mtu;
1068 conn->src = &hcon->hdev->bdaddr;
1069 conn->dst = &hcon->dst;
1071 conn->feat_mask = 0;
1073 spin_lock_init(&conn->lock);
1074 rwlock_init(&conn->chan_lock);
1076 INIT_LIST_HEAD(&conn->chan_l);
1078 if (hcon->type == LE_LINK)
1079 setup_timer(&conn->security_timer, security_timeout,
1080 (unsigned long) conn);
1082 setup_timer(&conn->info_timer, l2cap_info_timeout,
1083 (unsigned long) conn);
1085 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1090 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1092 write_lock_bh(&conn->chan_lock);
1093 __l2cap_chan_add(conn, chan);
1094 write_unlock_bh(&conn->chan_lock);
1097 /* ---- Socket interface ---- */
1099 /* Find socket with psm and source bdaddr.
1100 * Returns closest match.
1102 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1104 struct l2cap_chan *c, *c1 = NULL;
1106 read_lock(&chan_list_lock);
1108 list_for_each_entry(c, &chan_list, global_l) {
1109 struct sock *sk = c->sk;
1111 if (state && c->state != state)
1114 if (c->psm == psm) {
1116 if (!bacmp(&bt_sk(sk)->src, src)) {
1117 read_unlock(&chan_list_lock);
1122 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1127 read_unlock(&chan_list_lock);
1132 int l2cap_chan_connect(struct l2cap_chan *chan)
1134 struct sock *sk = chan->sk;
1135 bdaddr_t *src = &bt_sk(sk)->src;
1136 bdaddr_t *dst = &bt_sk(sk)->dst;
1137 struct l2cap_conn *conn;
1138 struct hci_conn *hcon;
1139 struct hci_dev *hdev;
1143 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1146 hdev = hci_get_route(dst, src);
1148 return -EHOSTUNREACH;
1150 hci_dev_lock_bh(hdev);
1152 auth_type = l2cap_get_auth_type(chan);
1154 if (chan->dcid == L2CAP_CID_LE_DATA)
1155 hcon = hci_connect(hdev, LE_LINK, dst,
1156 chan->sec_level, auth_type);
1158 hcon = hci_connect(hdev, ACL_LINK, dst,
1159 chan->sec_level, auth_type);
1162 err = PTR_ERR(hcon);
1166 conn = l2cap_conn_add(hcon, 0);
1173 /* Update source addr of the socket */
1174 bacpy(src, conn->src);
1176 l2cap_chan_add(conn, chan);
1178 l2cap_state_change(chan, BT_CONNECT);
1179 __set_chan_timer(chan, sk->sk_sndtimeo);
1181 if (hcon->state == BT_CONNECTED) {
1182 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1183 __clear_chan_timer(chan);
1184 if (l2cap_chan_check_security(chan))
1185 l2cap_state_change(chan, BT_CONNECTED);
1187 l2cap_do_start(chan);
1193 hci_dev_unlock_bh(hdev);
1198 int __l2cap_wait_ack(struct sock *sk)
1200 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1201 DECLARE_WAITQUEUE(wait, current);
1205 add_wait_queue(sk_sleep(sk), &wait);
1206 set_current_state(TASK_INTERRUPTIBLE);
1207 while (chan->unacked_frames > 0 && chan->conn) {
1211 if (signal_pending(current)) {
1212 err = sock_intr_errno(timeo);
1217 timeo = schedule_timeout(timeo);
1219 set_current_state(TASK_INTERRUPTIBLE);
1221 err = sock_error(sk);
1225 set_current_state(TASK_RUNNING);
1226 remove_wait_queue(sk_sleep(sk), &wait);
1230 static void l2cap_monitor_timeout(unsigned long arg)
1232 struct l2cap_chan *chan = (void *) arg;
1233 struct sock *sk = chan->sk;
1235 BT_DBG("chan %p", chan);
1238 if (chan->retry_count >= chan->remote_max_tx) {
1239 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1244 chan->retry_count++;
1245 __set_monitor_timer(chan);
1247 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1251 static void l2cap_retrans_timeout(unsigned long arg)
1253 struct l2cap_chan *chan = (void *) arg;
1254 struct sock *sk = chan->sk;
1256 BT_DBG("chan %p", chan);
1259 chan->retry_count = 1;
1260 __set_monitor_timer(chan);
1262 set_bit(CONN_WAIT_F, &chan->conn_state);
1264 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1268 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1270 struct sk_buff *skb;
1272 while ((skb = skb_peek(&chan->tx_q)) &&
1273 chan->unacked_frames) {
1274 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1277 skb = skb_dequeue(&chan->tx_q);
1280 chan->unacked_frames--;
1283 if (!chan->unacked_frames)
1284 __clear_retrans_timer(chan);
1287 static void l2cap_streaming_send(struct l2cap_chan *chan)
1289 struct sk_buff *skb;
1293 while ((skb = skb_dequeue(&chan->tx_q))) {
1294 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1295 control |= __set_txseq(chan, chan->next_tx_seq);
1296 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1298 if (chan->fcs == L2CAP_FCS_CRC16) {
1299 fcs = crc16(0, (u8 *)skb->data,
1300 skb->len - L2CAP_FCS_SIZE);
1301 put_unaligned_le16(fcs,
1302 skb->data + skb->len - L2CAP_FCS_SIZE);
1305 l2cap_do_send(chan, skb);
1307 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1311 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1313 struct sk_buff *skb, *tx_skb;
1317 skb = skb_peek(&chan->tx_q);
1322 if (bt_cb(skb)->tx_seq == tx_seq)
1325 if (skb_queue_is_last(&chan->tx_q, skb))
1328 } while ((skb = skb_queue_next(&chan->tx_q, skb)));
1330 if (chan->remote_max_tx &&
1331 bt_cb(skb)->retries == chan->remote_max_tx) {
1332 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1336 tx_skb = skb_clone(skb, GFP_ATOMIC);
1337 bt_cb(skb)->retries++;
1339 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1340 control &= __get_sar_mask(chan);
1342 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1343 control |= __set_ctrl_final(chan);
1345 control |= __set_reqseq(chan, chan->buffer_seq);
1346 control |= __set_txseq(chan, tx_seq);
1348 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1350 if (chan->fcs == L2CAP_FCS_CRC16) {
1351 fcs = crc16(0, (u8 *)tx_skb->data,
1352 tx_skb->len - L2CAP_FCS_SIZE);
1353 put_unaligned_le16(fcs,
1354 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1357 l2cap_do_send(chan, tx_skb);
1360 static int l2cap_ertm_send(struct l2cap_chan *chan)
1362 struct sk_buff *skb, *tx_skb;
1367 if (chan->state != BT_CONNECTED)
1370 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1372 if (chan->remote_max_tx &&
1373 bt_cb(skb)->retries == chan->remote_max_tx) {
1374 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1378 tx_skb = skb_clone(skb, GFP_ATOMIC);
1380 bt_cb(skb)->retries++;
1382 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1383 control &= __get_sar_mask(chan);
1385 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1386 control |= __set_ctrl_final(chan);
1388 control |= __set_reqseq(chan, chan->buffer_seq);
1389 control |= __set_txseq(chan, chan->next_tx_seq);
1391 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1393 if (chan->fcs == L2CAP_FCS_CRC16) {
1394 fcs = crc16(0, (u8 *)skb->data,
1395 tx_skb->len - L2CAP_FCS_SIZE);
1396 put_unaligned_le16(fcs, skb->data +
1397 tx_skb->len - L2CAP_FCS_SIZE);
1400 l2cap_do_send(chan, tx_skb);
1402 __set_retrans_timer(chan);
1404 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1406 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1408 if (bt_cb(skb)->retries == 1)
1409 chan->unacked_frames++;
1411 chan->frames_sent++;
1413 if (skb_queue_is_last(&chan->tx_q, skb))
1414 chan->tx_send_head = NULL;
1416 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1424 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1428 if (!skb_queue_empty(&chan->tx_q))
1429 chan->tx_send_head = chan->tx_q.next;
1431 chan->next_tx_seq = chan->expected_ack_seq;
1432 ret = l2cap_ertm_send(chan);
1436 static void l2cap_send_ack(struct l2cap_chan *chan)
1440 control |= __set_reqseq(chan, chan->buffer_seq);
1442 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1443 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1444 set_bit(CONN_RNR_SENT, &chan->conn_state);
1445 l2cap_send_sframe(chan, control);
1449 if (l2cap_ertm_send(chan) > 0)
1452 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1453 l2cap_send_sframe(chan, control);
1456 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1458 struct srej_list *tail;
1461 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1462 control |= __set_ctrl_final(chan);
1464 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1465 control |= __set_reqseq(chan, tail->tx_seq);
1467 l2cap_send_sframe(chan, control);
1470 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1472 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1473 struct sk_buff **frag;
1476 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1482 /* Continuation fragments (no L2CAP header) */
1483 frag = &skb_shinfo(skb)->frag_list;
1485 count = min_t(unsigned int, conn->mtu, len);
1487 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1490 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1493 (*frag)->priority = skb->priority;
1498 frag = &(*frag)->next;
1504 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1505 struct msghdr *msg, size_t len,
1508 struct sock *sk = chan->sk;
1509 struct l2cap_conn *conn = chan->conn;
1510 struct sk_buff *skb;
1511 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1512 struct l2cap_hdr *lh;
1514 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1516 count = min_t(unsigned int, (conn->mtu - hlen), len);
1517 skb = bt_skb_send_alloc(sk, count + hlen,
1518 msg->msg_flags & MSG_DONTWAIT, &err);
1520 return ERR_PTR(err);
1522 skb->priority = priority;
1524 /* Create L2CAP header */
1525 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1526 lh->cid = cpu_to_le16(chan->dcid);
1527 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1528 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1530 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1531 if (unlikely(err < 0)) {
1533 return ERR_PTR(err);
1538 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1539 struct msghdr *msg, size_t len,
1542 struct sock *sk = chan->sk;
1543 struct l2cap_conn *conn = chan->conn;
1544 struct sk_buff *skb;
1545 int err, count, hlen = L2CAP_HDR_SIZE;
1546 struct l2cap_hdr *lh;
1548 BT_DBG("sk %p len %d", sk, (int)len);
1550 count = min_t(unsigned int, (conn->mtu - hlen), len);
1551 skb = bt_skb_send_alloc(sk, count + hlen,
1552 msg->msg_flags & MSG_DONTWAIT, &err);
1554 return ERR_PTR(err);
1556 skb->priority = priority;
1558 /* Create L2CAP header */
1559 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1560 lh->cid = cpu_to_le16(chan->dcid);
1561 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1563 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1564 if (unlikely(err < 0)) {
1566 return ERR_PTR(err);
1571 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1572 struct msghdr *msg, size_t len,
1573 u32 control, u16 sdulen)
1575 struct sock *sk = chan->sk;
1576 struct l2cap_conn *conn = chan->conn;
1577 struct sk_buff *skb;
1578 int err, count, hlen;
1579 struct l2cap_hdr *lh;
1581 BT_DBG("sk %p len %d", sk, (int)len);
1584 return ERR_PTR(-ENOTCONN);
1586 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1587 hlen = L2CAP_EXT_HDR_SIZE;
1589 hlen = L2CAP_ENH_HDR_SIZE;
1592 hlen += L2CAP_SDULEN_SIZE;
1594 if (chan->fcs == L2CAP_FCS_CRC16)
1595 hlen += L2CAP_FCS_SIZE;
1597 count = min_t(unsigned int, (conn->mtu - hlen), len);
1598 skb = bt_skb_send_alloc(sk, count + hlen,
1599 msg->msg_flags & MSG_DONTWAIT, &err);
1601 return ERR_PTR(err);
1603 /* Create L2CAP header */
1604 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1605 lh->cid = cpu_to_le16(chan->dcid);
1606 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1608 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1611 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1613 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1614 if (unlikely(err < 0)) {
1616 return ERR_PTR(err);
1619 if (chan->fcs == L2CAP_FCS_CRC16)
1620 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1622 bt_cb(skb)->retries = 0;
1626 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1628 struct sk_buff *skb;
1629 struct sk_buff_head sar_queue;
1633 skb_queue_head_init(&sar_queue);
1634 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1635 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1637 return PTR_ERR(skb);
1639 __skb_queue_tail(&sar_queue, skb);
1640 len -= chan->remote_mps;
1641 size += chan->remote_mps;
1646 if (len > chan->remote_mps) {
1647 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1648 buflen = chan->remote_mps;
1650 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1654 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1656 skb_queue_purge(&sar_queue);
1657 return PTR_ERR(skb);
1660 __skb_queue_tail(&sar_queue, skb);
1664 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1665 if (chan->tx_send_head == NULL)
1666 chan->tx_send_head = sar_queue.next;
1671 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1674 struct sk_buff *skb;
1678 /* Connectionless channel */
1679 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1680 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1682 return PTR_ERR(skb);
1684 l2cap_do_send(chan, skb);
1688 switch (chan->mode) {
1689 case L2CAP_MODE_BASIC:
1690 /* Check outgoing MTU */
1691 if (len > chan->omtu)
1694 /* Create a basic PDU */
1695 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1697 return PTR_ERR(skb);
1699 l2cap_do_send(chan, skb);
1703 case L2CAP_MODE_ERTM:
1704 case L2CAP_MODE_STREAMING:
1705 /* Entire SDU fits into one PDU */
1706 if (len <= chan->remote_mps) {
1707 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1708 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1711 return PTR_ERR(skb);
1713 __skb_queue_tail(&chan->tx_q, skb);
1715 if (chan->tx_send_head == NULL)
1716 chan->tx_send_head = skb;
1719 /* Segment SDU into multiples PDUs */
1720 err = l2cap_sar_segment_sdu(chan, msg, len);
1725 if (chan->mode == L2CAP_MODE_STREAMING) {
1726 l2cap_streaming_send(chan);
1731 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1732 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1737 err = l2cap_ertm_send(chan);
1744 BT_DBG("bad state %1.1x", chan->mode);
1751 /* Copy frame to all raw sockets on that connection */
1752 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1754 struct sk_buff *nskb;
1755 struct l2cap_chan *chan;
1757 BT_DBG("conn %p", conn);
1759 read_lock(&conn->chan_lock);
1760 list_for_each_entry(chan, &conn->chan_l, list) {
1761 struct sock *sk = chan->sk;
1762 if (chan->chan_type != L2CAP_CHAN_RAW)
1765 /* Don't send frame to the socket it came from */
1768 nskb = skb_clone(skb, GFP_ATOMIC);
1772 if (chan->ops->recv(chan->data, nskb))
1775 read_unlock(&conn->chan_lock);
1778 /* ---- L2CAP signalling commands ---- */
1779 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1780 u8 code, u8 ident, u16 dlen, void *data)
1782 struct sk_buff *skb, **frag;
1783 struct l2cap_cmd_hdr *cmd;
1784 struct l2cap_hdr *lh;
1787 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1788 conn, code, ident, dlen);
1790 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1791 count = min_t(unsigned int, conn->mtu, len);
1793 skb = bt_skb_alloc(count, GFP_ATOMIC);
1797 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1798 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1800 if (conn->hcon->type == LE_LINK)
1801 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1803 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1805 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1808 cmd->len = cpu_to_le16(dlen);
1811 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1812 memcpy(skb_put(skb, count), data, count);
1818 /* Continuation fragments (no L2CAP header) */
1819 frag = &skb_shinfo(skb)->frag_list;
1821 count = min_t(unsigned int, conn->mtu, len);
1823 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1827 memcpy(skb_put(*frag, count), data, count);
1832 frag = &(*frag)->next;
1842 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1844 struct l2cap_conf_opt *opt = *ptr;
1847 len = L2CAP_CONF_OPT_SIZE + opt->len;
1855 *val = *((u8 *) opt->val);
1859 *val = get_unaligned_le16(opt->val);
1863 *val = get_unaligned_le32(opt->val);
1867 *val = (unsigned long) opt->val;
1871 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1875 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1877 struct l2cap_conf_opt *opt = *ptr;
1879 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1886 *((u8 *) opt->val) = val;
1890 put_unaligned_le16(val, opt->val);
1894 put_unaligned_le32(val, opt->val);
1898 memcpy(opt->val, (void *) val, len);
1902 *ptr += L2CAP_CONF_OPT_SIZE + len;
1905 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1907 struct l2cap_conf_efs efs;
1909 switch(chan->mode) {
1910 case L2CAP_MODE_ERTM:
1911 efs.id = chan->local_id;
1912 efs.stype = chan->local_stype;
1913 efs.msdu = cpu_to_le16(chan->local_msdu);
1914 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1915 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1916 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1919 case L2CAP_MODE_STREAMING:
1921 efs.stype = L2CAP_SERV_BESTEFFORT;
1922 efs.msdu = cpu_to_le16(chan->local_msdu);
1923 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1932 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1933 (unsigned long) &efs);
1936 static void l2cap_ack_timeout(unsigned long arg)
1938 struct l2cap_chan *chan = (void *) arg;
1940 bh_lock_sock(chan->sk);
1941 l2cap_send_ack(chan);
1942 bh_unlock_sock(chan->sk);
1945 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1947 struct sock *sk = chan->sk;
1949 chan->expected_ack_seq = 0;
1950 chan->unacked_frames = 0;
1951 chan->buffer_seq = 0;
1952 chan->num_acked = 0;
1953 chan->frames_sent = 0;
1955 setup_timer(&chan->retrans_timer, l2cap_retrans_timeout,
1956 (unsigned long) chan);
1957 setup_timer(&chan->monitor_timer, l2cap_monitor_timeout,
1958 (unsigned long) chan);
1959 setup_timer(&chan->ack_timer, l2cap_ack_timeout, (unsigned long) chan);
1961 skb_queue_head_init(&chan->srej_q);
1963 INIT_LIST_HEAD(&chan->srej_l);
1966 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1969 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1972 case L2CAP_MODE_STREAMING:
1973 case L2CAP_MODE_ERTM:
1974 if (l2cap_mode_supported(mode, remote_feat_mask))
1978 return L2CAP_MODE_BASIC;
1982 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1984 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
1987 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
1989 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
1992 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
1994 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
1995 __l2cap_ews_supported(chan)) {
1996 /* use extended control field */
1997 set_bit(FLAG_EXT_CTRL, &chan->flags);
1998 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2000 chan->tx_win = min_t(u16, chan->tx_win,
2001 L2CAP_DEFAULT_TX_WINDOW);
2002 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2006 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2008 struct l2cap_conf_req *req = data;
2009 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2010 void *ptr = req->data;
2013 BT_DBG("chan %p", chan);
2015 if (chan->num_conf_req || chan->num_conf_rsp)
2018 switch (chan->mode) {
2019 case L2CAP_MODE_STREAMING:
2020 case L2CAP_MODE_ERTM:
2021 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2024 if (__l2cap_efs_supported(chan))
2025 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2029 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2034 if (chan->imtu != L2CAP_DEFAULT_MTU)
2035 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2037 switch (chan->mode) {
2038 case L2CAP_MODE_BASIC:
2039 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2040 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2043 rfc.mode = L2CAP_MODE_BASIC;
2045 rfc.max_transmit = 0;
2046 rfc.retrans_timeout = 0;
2047 rfc.monitor_timeout = 0;
2048 rfc.max_pdu_size = 0;
2050 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2051 (unsigned long) &rfc);
2054 case L2CAP_MODE_ERTM:
2055 rfc.mode = L2CAP_MODE_ERTM;
2056 rfc.max_transmit = chan->max_tx;
2057 rfc.retrans_timeout = 0;
2058 rfc.monitor_timeout = 0;
2060 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2061 L2CAP_EXT_HDR_SIZE -
2064 rfc.max_pdu_size = cpu_to_le16(size);
2066 l2cap_txwin_setup(chan);
2068 rfc.txwin_size = min_t(u16, chan->tx_win,
2069 L2CAP_DEFAULT_TX_WINDOW);
2071 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2072 (unsigned long) &rfc);
2074 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2075 l2cap_add_opt_efs(&ptr, chan);
2077 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2080 if (chan->fcs == L2CAP_FCS_NONE ||
2081 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2082 chan->fcs = L2CAP_FCS_NONE;
2083 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2086 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2087 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2091 case L2CAP_MODE_STREAMING:
2092 rfc.mode = L2CAP_MODE_STREAMING;
2094 rfc.max_transmit = 0;
2095 rfc.retrans_timeout = 0;
2096 rfc.monitor_timeout = 0;
2098 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2099 L2CAP_EXT_HDR_SIZE -
2102 rfc.max_pdu_size = cpu_to_le16(size);
2104 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2105 (unsigned long) &rfc);
2107 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2108 l2cap_add_opt_efs(&ptr, chan);
2110 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2113 if (chan->fcs == L2CAP_FCS_NONE ||
2114 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2115 chan->fcs = L2CAP_FCS_NONE;
2116 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2121 req->dcid = cpu_to_le16(chan->dcid);
2122 req->flags = cpu_to_le16(0);
2127 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2129 struct l2cap_conf_rsp *rsp = data;
2130 void *ptr = rsp->data;
2131 void *req = chan->conf_req;
2132 int len = chan->conf_len;
2133 int type, hint, olen;
2135 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2136 struct l2cap_conf_efs efs;
2138 u16 mtu = L2CAP_DEFAULT_MTU;
2139 u16 result = L2CAP_CONF_SUCCESS;
2142 BT_DBG("chan %p", chan);
2144 while (len >= L2CAP_CONF_OPT_SIZE) {
2145 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2147 hint = type & L2CAP_CONF_HINT;
2148 type &= L2CAP_CONF_MASK;
2151 case L2CAP_CONF_MTU:
2155 case L2CAP_CONF_FLUSH_TO:
2156 chan->flush_to = val;
2159 case L2CAP_CONF_QOS:
2162 case L2CAP_CONF_RFC:
2163 if (olen == sizeof(rfc))
2164 memcpy(&rfc, (void *) val, olen);
2167 case L2CAP_CONF_FCS:
2168 if (val == L2CAP_FCS_NONE)
2169 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2172 case L2CAP_CONF_EFS:
2174 if (olen == sizeof(efs))
2175 memcpy(&efs, (void *) val, olen);
2178 case L2CAP_CONF_EWS:
2180 return -ECONNREFUSED;
2182 set_bit(FLAG_EXT_CTRL, &chan->flags);
2183 set_bit(CONF_EWS_RECV, &chan->conf_state);
2184 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2185 chan->remote_tx_win = val;
2192 result = L2CAP_CONF_UNKNOWN;
2193 *((u8 *) ptr++) = type;
2198 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2201 switch (chan->mode) {
2202 case L2CAP_MODE_STREAMING:
2203 case L2CAP_MODE_ERTM:
2204 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2205 chan->mode = l2cap_select_mode(rfc.mode,
2206 chan->conn->feat_mask);
2211 if (__l2cap_efs_supported(chan))
2212 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2214 return -ECONNREFUSED;
2217 if (chan->mode != rfc.mode)
2218 return -ECONNREFUSED;
2224 if (chan->mode != rfc.mode) {
2225 result = L2CAP_CONF_UNACCEPT;
2226 rfc.mode = chan->mode;
2228 if (chan->num_conf_rsp == 1)
2229 return -ECONNREFUSED;
2231 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2232 sizeof(rfc), (unsigned long) &rfc);
2235 if (result == L2CAP_CONF_SUCCESS) {
2236 /* Configure output options and let the other side know
2237 * which ones we don't like. */
2239 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2240 result = L2CAP_CONF_UNACCEPT;
2243 set_bit(CONF_MTU_DONE, &chan->conf_state);
2245 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2248 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2249 efs.stype != L2CAP_SERV_NOTRAFIC &&
2250 efs.stype != chan->local_stype) {
2252 result = L2CAP_CONF_UNACCEPT;
2254 if (chan->num_conf_req >= 1)
2255 return -ECONNREFUSED;
2257 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2259 (unsigned long) &efs);
2261 /* Send PENDING Conf Rsp */
2262 result = L2CAP_CONF_PENDING;
2263 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2268 case L2CAP_MODE_BASIC:
2269 chan->fcs = L2CAP_FCS_NONE;
2270 set_bit(CONF_MODE_DONE, &chan->conf_state);
2273 case L2CAP_MODE_ERTM:
2274 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2275 chan->remote_tx_win = rfc.txwin_size;
2277 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2279 chan->remote_max_tx = rfc.max_transmit;
2281 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2283 L2CAP_EXT_HDR_SIZE -
2286 rfc.max_pdu_size = cpu_to_le16(size);
2287 chan->remote_mps = size;
2289 rfc.retrans_timeout =
2290 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2291 rfc.monitor_timeout =
2292 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2294 set_bit(CONF_MODE_DONE, &chan->conf_state);
2296 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2297 sizeof(rfc), (unsigned long) &rfc);
2299 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2300 chan->remote_id = efs.id;
2301 chan->remote_stype = efs.stype;
2302 chan->remote_msdu = le16_to_cpu(efs.msdu);
2303 chan->remote_flush_to =
2304 le32_to_cpu(efs.flush_to);
2305 chan->remote_acc_lat =
2306 le32_to_cpu(efs.acc_lat);
2307 chan->remote_sdu_itime =
2308 le32_to_cpu(efs.sdu_itime);
2309 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2310 sizeof(efs), (unsigned long) &efs);
2314 case L2CAP_MODE_STREAMING:
2315 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2317 L2CAP_EXT_HDR_SIZE -
2320 rfc.max_pdu_size = cpu_to_le16(size);
2321 chan->remote_mps = size;
2323 set_bit(CONF_MODE_DONE, &chan->conf_state);
2325 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2326 sizeof(rfc), (unsigned long) &rfc);
2331 result = L2CAP_CONF_UNACCEPT;
2333 memset(&rfc, 0, sizeof(rfc));
2334 rfc.mode = chan->mode;
2337 if (result == L2CAP_CONF_SUCCESS)
2338 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2340 rsp->scid = cpu_to_le16(chan->dcid);
2341 rsp->result = cpu_to_le16(result);
2342 rsp->flags = cpu_to_le16(0x0000);
2347 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2349 struct l2cap_conf_req *req = data;
2350 void *ptr = req->data;
2353 struct l2cap_conf_rfc rfc;
2354 struct l2cap_conf_efs efs;
2356 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2358 while (len >= L2CAP_CONF_OPT_SIZE) {
2359 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2362 case L2CAP_CONF_MTU:
2363 if (val < L2CAP_DEFAULT_MIN_MTU) {
2364 *result = L2CAP_CONF_UNACCEPT;
2365 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2368 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2371 case L2CAP_CONF_FLUSH_TO:
2372 chan->flush_to = val;
2373 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2377 case L2CAP_CONF_RFC:
2378 if (olen == sizeof(rfc))
2379 memcpy(&rfc, (void *)val, olen);
2381 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2382 rfc.mode != chan->mode)
2383 return -ECONNREFUSED;
2387 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2388 sizeof(rfc), (unsigned long) &rfc);
2391 case L2CAP_CONF_EWS:
2392 chan->tx_win = min_t(u16, val,
2393 L2CAP_DEFAULT_EXT_WINDOW);
2394 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2398 case L2CAP_CONF_EFS:
2399 if (olen == sizeof(efs))
2400 memcpy(&efs, (void *)val, olen);
2402 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2403 efs.stype != L2CAP_SERV_NOTRAFIC &&
2404 efs.stype != chan->local_stype)
2405 return -ECONNREFUSED;
2407 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2408 sizeof(efs), (unsigned long) &efs);
2413 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2414 return -ECONNREFUSED;
2416 chan->mode = rfc.mode;
2418 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2420 case L2CAP_MODE_ERTM:
2421 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2422 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2423 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2425 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2426 chan->local_msdu = le16_to_cpu(efs.msdu);
2427 chan->local_sdu_itime =
2428 le32_to_cpu(efs.sdu_itime);
2429 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2430 chan->local_flush_to =
2431 le32_to_cpu(efs.flush_to);
2435 case L2CAP_MODE_STREAMING:
2436 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2440 req->dcid = cpu_to_le16(chan->dcid);
2441 req->flags = cpu_to_le16(0x0000);
2446 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2448 struct l2cap_conf_rsp *rsp = data;
2449 void *ptr = rsp->data;
2451 BT_DBG("chan %p", chan);
2453 rsp->scid = cpu_to_le16(chan->dcid);
2454 rsp->result = cpu_to_le16(result);
2455 rsp->flags = cpu_to_le16(flags);
2460 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2462 struct l2cap_conn_rsp rsp;
2463 struct l2cap_conn *conn = chan->conn;
2466 rsp.scid = cpu_to_le16(chan->dcid);
2467 rsp.dcid = cpu_to_le16(chan->scid);
2468 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2469 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2470 l2cap_send_cmd(conn, chan->ident,
2471 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2473 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2476 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2477 l2cap_build_conf_req(chan, buf), buf);
2478 chan->num_conf_req++;
2481 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2485 struct l2cap_conf_rfc rfc;
2487 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2489 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2492 while (len >= L2CAP_CONF_OPT_SIZE) {
2493 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2496 case L2CAP_CONF_RFC:
2497 if (olen == sizeof(rfc))
2498 memcpy(&rfc, (void *)val, olen);
2505 case L2CAP_MODE_ERTM:
2506 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2507 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2508 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2510 case L2CAP_MODE_STREAMING:
2511 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2515 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2517 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2519 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2522 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2523 cmd->ident == conn->info_ident) {
2524 del_timer(&conn->info_timer);
2526 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2527 conn->info_ident = 0;
2529 l2cap_conn_start(conn);
2535 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2537 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2538 struct l2cap_conn_rsp rsp;
2539 struct l2cap_chan *chan = NULL, *pchan;
2540 struct sock *parent, *sk = NULL;
2541 int result, status = L2CAP_CS_NO_INFO;
2543 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2544 __le16 psm = req->psm;
2546 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2548 /* Check if we have socket listening on psm */
2549 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2551 result = L2CAP_CR_BAD_PSM;
2557 bh_lock_sock(parent);
2559 /* Check if the ACL is secure enough (if not SDP) */
2560 if (psm != cpu_to_le16(0x0001) &&
2561 !hci_conn_check_link_mode(conn->hcon)) {
2562 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2563 result = L2CAP_CR_SEC_BLOCK;
2567 result = L2CAP_CR_NO_MEM;
2569 /* Check for backlog size */
2570 if (sk_acceptq_is_full(parent)) {
2571 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2575 chan = pchan->ops->new_connection(pchan->data);
2581 write_lock_bh(&conn->chan_lock);
2583 /* Check if we already have channel with that dcid */
2584 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2585 write_unlock_bh(&conn->chan_lock);
2586 sock_set_flag(sk, SOCK_ZAPPED);
2587 chan->ops->close(chan->data);
2591 hci_conn_hold(conn->hcon);
2593 bacpy(&bt_sk(sk)->src, conn->src);
2594 bacpy(&bt_sk(sk)->dst, conn->dst);
2598 bt_accept_enqueue(parent, sk);
2600 __l2cap_chan_add(conn, chan);
2604 __set_chan_timer(chan, sk->sk_sndtimeo);
2606 chan->ident = cmd->ident;
2608 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2609 if (l2cap_chan_check_security(chan)) {
2610 if (bt_sk(sk)->defer_setup) {
2611 l2cap_state_change(chan, BT_CONNECT2);
2612 result = L2CAP_CR_PEND;
2613 status = L2CAP_CS_AUTHOR_PEND;
2614 parent->sk_data_ready(parent, 0);
2616 l2cap_state_change(chan, BT_CONFIG);
2617 result = L2CAP_CR_SUCCESS;
2618 status = L2CAP_CS_NO_INFO;
2621 l2cap_state_change(chan, BT_CONNECT2);
2622 result = L2CAP_CR_PEND;
2623 status = L2CAP_CS_AUTHEN_PEND;
2626 l2cap_state_change(chan, BT_CONNECT2);
2627 result = L2CAP_CR_PEND;
2628 status = L2CAP_CS_NO_INFO;
2631 write_unlock_bh(&conn->chan_lock);
2634 bh_unlock_sock(parent);
2637 rsp.scid = cpu_to_le16(scid);
2638 rsp.dcid = cpu_to_le16(dcid);
2639 rsp.result = cpu_to_le16(result);
2640 rsp.status = cpu_to_le16(status);
2641 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2643 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2644 struct l2cap_info_req info;
2645 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2647 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2648 conn->info_ident = l2cap_get_ident(conn);
2650 mod_timer(&conn->info_timer, jiffies +
2651 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2653 l2cap_send_cmd(conn, conn->info_ident,
2654 L2CAP_INFO_REQ, sizeof(info), &info);
2657 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2658 result == L2CAP_CR_SUCCESS) {
2660 set_bit(CONF_REQ_SENT, &chan->conf_state);
2661 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2662 l2cap_build_conf_req(chan, buf), buf);
2663 chan->num_conf_req++;
2669 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2671 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2672 u16 scid, dcid, result, status;
2673 struct l2cap_chan *chan;
2677 scid = __le16_to_cpu(rsp->scid);
2678 dcid = __le16_to_cpu(rsp->dcid);
2679 result = __le16_to_cpu(rsp->result);
2680 status = __le16_to_cpu(rsp->status);
2682 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2685 chan = l2cap_get_chan_by_scid(conn, scid);
2689 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2697 case L2CAP_CR_SUCCESS:
2698 l2cap_state_change(chan, BT_CONFIG);
2701 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2703 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2706 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2707 l2cap_build_conf_req(chan, req), req);
2708 chan->num_conf_req++;
2712 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2716 /* don't delete l2cap channel if sk is owned by user */
2717 if (sock_owned_by_user(sk)) {
2718 l2cap_state_change(chan, BT_DISCONN);
2719 __clear_chan_timer(chan);
2720 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2724 l2cap_chan_del(chan, ECONNREFUSED);
2732 static inline void set_default_fcs(struct l2cap_chan *chan)
2734 /* FCS is enabled only in ERTM or streaming mode, if one or both
2737 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2738 chan->fcs = L2CAP_FCS_NONE;
2739 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2740 chan->fcs = L2CAP_FCS_CRC16;
2743 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2745 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2748 struct l2cap_chan *chan;
2752 dcid = __le16_to_cpu(req->dcid);
2753 flags = __le16_to_cpu(req->flags);
2755 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2757 chan = l2cap_get_chan_by_scid(conn, dcid);
2763 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2764 struct l2cap_cmd_rej_cid rej;
2766 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2767 rej.scid = cpu_to_le16(chan->scid);
2768 rej.dcid = cpu_to_le16(chan->dcid);
2770 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2775 /* Reject if config buffer is too small. */
2776 len = cmd_len - sizeof(*req);
2777 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2778 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2779 l2cap_build_conf_rsp(chan, rsp,
2780 L2CAP_CONF_REJECT, flags), rsp);
2785 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2786 chan->conf_len += len;
2788 if (flags & 0x0001) {
2789 /* Incomplete config. Send empty response. */
2790 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2791 l2cap_build_conf_rsp(chan, rsp,
2792 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2796 /* Complete config. */
2797 len = l2cap_parse_conf_req(chan, rsp);
2799 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2803 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2804 chan->num_conf_rsp++;
2806 /* Reset config buffer. */
2809 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2812 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2813 set_default_fcs(chan);
2815 l2cap_state_change(chan, BT_CONNECTED);
2817 chan->next_tx_seq = 0;
2818 chan->expected_tx_seq = 0;
2819 skb_queue_head_init(&chan->tx_q);
2820 if (chan->mode == L2CAP_MODE_ERTM)
2821 l2cap_ertm_init(chan);
2823 l2cap_chan_ready(sk);
2827 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2829 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2830 l2cap_build_conf_req(chan, buf), buf);
2831 chan->num_conf_req++;
2834 /* Got Conf Rsp PENDING from remote side and asume we sent
2835 Conf Rsp PENDING in the code above */
2836 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2837 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2839 /* check compatibility */
2841 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2842 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2844 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2845 l2cap_build_conf_rsp(chan, rsp,
2846 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2854 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2856 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2857 u16 scid, flags, result;
2858 struct l2cap_chan *chan;
2860 int len = cmd->len - sizeof(*rsp);
2862 scid = __le16_to_cpu(rsp->scid);
2863 flags = __le16_to_cpu(rsp->flags);
2864 result = __le16_to_cpu(rsp->result);
2866 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2867 scid, flags, result);
2869 chan = l2cap_get_chan_by_scid(conn, scid);
2876 case L2CAP_CONF_SUCCESS:
2877 l2cap_conf_rfc_get(chan, rsp->data, len);
2878 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2881 case L2CAP_CONF_PENDING:
2882 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2884 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2887 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2890 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2894 /* check compatibility */
2896 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2897 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2899 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2900 l2cap_build_conf_rsp(chan, buf,
2901 L2CAP_CONF_SUCCESS, 0x0000), buf);
2905 case L2CAP_CONF_UNACCEPT:
2906 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2909 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2910 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2914 /* throw out any old stored conf requests */
2915 result = L2CAP_CONF_SUCCESS;
2916 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2919 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2923 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2924 L2CAP_CONF_REQ, len, req);
2925 chan->num_conf_req++;
2926 if (result != L2CAP_CONF_SUCCESS)
2932 sk->sk_err = ECONNRESET;
2933 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2934 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2941 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2943 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2944 set_default_fcs(chan);
2946 l2cap_state_change(chan, BT_CONNECTED);
2947 chan->next_tx_seq = 0;
2948 chan->expected_tx_seq = 0;
2949 skb_queue_head_init(&chan->tx_q);
2950 if (chan->mode == L2CAP_MODE_ERTM)
2951 l2cap_ertm_init(chan);
2953 l2cap_chan_ready(sk);
2961 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2963 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2964 struct l2cap_disconn_rsp rsp;
2966 struct l2cap_chan *chan;
2969 scid = __le16_to_cpu(req->scid);
2970 dcid = __le16_to_cpu(req->dcid);
2972 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2974 chan = l2cap_get_chan_by_scid(conn, dcid);
2980 rsp.dcid = cpu_to_le16(chan->scid);
2981 rsp.scid = cpu_to_le16(chan->dcid);
2982 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2984 sk->sk_shutdown = SHUTDOWN_MASK;
2986 /* don't delete l2cap channel if sk is owned by user */
2987 if (sock_owned_by_user(sk)) {
2988 l2cap_state_change(chan, BT_DISCONN);
2989 __clear_chan_timer(chan);
2990 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2995 l2cap_chan_del(chan, ECONNRESET);
2998 chan->ops->close(chan->data);
3002 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3004 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3006 struct l2cap_chan *chan;
3009 scid = __le16_to_cpu(rsp->scid);
3010 dcid = __le16_to_cpu(rsp->dcid);
3012 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3014 chan = l2cap_get_chan_by_scid(conn, scid);
3020 /* don't delete l2cap channel if sk is owned by user */
3021 if (sock_owned_by_user(sk)) {
3022 l2cap_state_change(chan,BT_DISCONN);
3023 __clear_chan_timer(chan);
3024 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
3029 l2cap_chan_del(chan, 0);
3032 chan->ops->close(chan->data);
3036 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3038 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3041 type = __le16_to_cpu(req->type);
3043 BT_DBG("type 0x%4.4x", type);
3045 if (type == L2CAP_IT_FEAT_MASK) {
3047 u32 feat_mask = l2cap_feat_mask;
3048 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3049 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3050 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3052 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3055 feat_mask |= L2CAP_FEAT_EXT_FLOW
3056 | L2CAP_FEAT_EXT_WINDOW;
3058 put_unaligned_le32(feat_mask, rsp->data);
3059 l2cap_send_cmd(conn, cmd->ident,
3060 L2CAP_INFO_RSP, sizeof(buf), buf);
3061 } else if (type == L2CAP_IT_FIXED_CHAN) {
3063 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3066 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3068 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3070 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3071 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3072 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3073 l2cap_send_cmd(conn, cmd->ident,
3074 L2CAP_INFO_RSP, sizeof(buf), buf);
3076 struct l2cap_info_rsp rsp;
3077 rsp.type = cpu_to_le16(type);
3078 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3079 l2cap_send_cmd(conn, cmd->ident,
3080 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3086 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3088 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3091 type = __le16_to_cpu(rsp->type);
3092 result = __le16_to_cpu(rsp->result);
3094 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3096 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3097 if (cmd->ident != conn->info_ident ||
3098 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3101 del_timer(&conn->info_timer);
3103 if (result != L2CAP_IR_SUCCESS) {
3104 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3105 conn->info_ident = 0;
3107 l2cap_conn_start(conn);
3112 if (type == L2CAP_IT_FEAT_MASK) {
3113 conn->feat_mask = get_unaligned_le32(rsp->data);
3115 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3116 struct l2cap_info_req req;
3117 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3119 conn->info_ident = l2cap_get_ident(conn);
3121 l2cap_send_cmd(conn, conn->info_ident,
3122 L2CAP_INFO_REQ, sizeof(req), &req);
3124 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3125 conn->info_ident = 0;
3127 l2cap_conn_start(conn);
3129 } else if (type == L2CAP_IT_FIXED_CHAN) {
3130 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3131 conn->info_ident = 0;
3133 l2cap_conn_start(conn);
3139 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3140 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3143 struct l2cap_create_chan_req *req = data;
3144 struct l2cap_create_chan_rsp rsp;
3147 if (cmd_len != sizeof(*req))
3153 psm = le16_to_cpu(req->psm);
3154 scid = le16_to_cpu(req->scid);
3156 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3158 /* Placeholder: Always reject */
3160 rsp.scid = cpu_to_le16(scid);
3161 rsp.result = L2CAP_CR_NO_MEM;
3162 rsp.status = L2CAP_CS_NO_INFO;
3164 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3170 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3171 struct l2cap_cmd_hdr *cmd, void *data)
3173 BT_DBG("conn %p", conn);
3175 return l2cap_connect_rsp(conn, cmd, data);
3178 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3179 u16 icid, u16 result)
3181 struct l2cap_move_chan_rsp rsp;
3183 BT_DBG("icid %d, result %d", icid, result);
3185 rsp.icid = cpu_to_le16(icid);
3186 rsp.result = cpu_to_le16(result);
3188 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3191 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3192 struct l2cap_chan *chan, u16 icid, u16 result)
3194 struct l2cap_move_chan_cfm cfm;
3197 BT_DBG("icid %d, result %d", icid, result);
3199 ident = l2cap_get_ident(conn);
3201 chan->ident = ident;
3203 cfm.icid = cpu_to_le16(icid);
3204 cfm.result = cpu_to_le16(result);
3206 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3209 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3212 struct l2cap_move_chan_cfm_rsp rsp;
3214 BT_DBG("icid %d", icid);
3216 rsp.icid = cpu_to_le16(icid);
3217 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3220 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3221 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3223 struct l2cap_move_chan_req *req = data;
3225 u16 result = L2CAP_MR_NOT_ALLOWED;
3227 if (cmd_len != sizeof(*req))
3230 icid = le16_to_cpu(req->icid);
3232 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3237 /* Placeholder: Always refuse */
3238 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3243 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3244 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3246 struct l2cap_move_chan_rsp *rsp = data;
3249 if (cmd_len != sizeof(*rsp))
3252 icid = le16_to_cpu(rsp->icid);
3253 result = le16_to_cpu(rsp->result);
3255 BT_DBG("icid %d, result %d", icid, result);
3257 /* Placeholder: Always unconfirmed */
3258 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3263 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3264 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3266 struct l2cap_move_chan_cfm *cfm = data;
3269 if (cmd_len != sizeof(*cfm))
3272 icid = le16_to_cpu(cfm->icid);
3273 result = le16_to_cpu(cfm->result);
3275 BT_DBG("icid %d, result %d", icid, result);
3277 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3282 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3283 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3285 struct l2cap_move_chan_cfm_rsp *rsp = data;
3288 if (cmd_len != sizeof(*rsp))
3291 icid = le16_to_cpu(rsp->icid);
3293 BT_DBG("icid %d", icid);
3298 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3303 if (min > max || min < 6 || max > 3200)
3306 if (to_multiplier < 10 || to_multiplier > 3200)
3309 if (max >= to_multiplier * 8)
3312 max_latency = (to_multiplier * 8 / max) - 1;
3313 if (latency > 499 || latency > max_latency)
3319 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3320 struct l2cap_cmd_hdr *cmd, u8 *data)
3322 struct hci_conn *hcon = conn->hcon;
3323 struct l2cap_conn_param_update_req *req;
3324 struct l2cap_conn_param_update_rsp rsp;
3325 u16 min, max, latency, to_multiplier, cmd_len;
3328 if (!(hcon->link_mode & HCI_LM_MASTER))
3331 cmd_len = __le16_to_cpu(cmd->len);
3332 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3335 req = (struct l2cap_conn_param_update_req *) data;
3336 min = __le16_to_cpu(req->min);
3337 max = __le16_to_cpu(req->max);
3338 latency = __le16_to_cpu(req->latency);
3339 to_multiplier = __le16_to_cpu(req->to_multiplier);
3341 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3342 min, max, latency, to_multiplier);
3344 memset(&rsp, 0, sizeof(rsp));
3346 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3348 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3350 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3352 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3356 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3361 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3362 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3366 switch (cmd->code) {
3367 case L2CAP_COMMAND_REJ:
3368 l2cap_command_rej(conn, cmd, data);
3371 case L2CAP_CONN_REQ:
3372 err = l2cap_connect_req(conn, cmd, data);
3375 case L2CAP_CONN_RSP:
3376 err = l2cap_connect_rsp(conn, cmd, data);
3379 case L2CAP_CONF_REQ:
3380 err = l2cap_config_req(conn, cmd, cmd_len, data);
3383 case L2CAP_CONF_RSP:
3384 err = l2cap_config_rsp(conn, cmd, data);
3387 case L2CAP_DISCONN_REQ:
3388 err = l2cap_disconnect_req(conn, cmd, data);
3391 case L2CAP_DISCONN_RSP:
3392 err = l2cap_disconnect_rsp(conn, cmd, data);
3395 case L2CAP_ECHO_REQ:
3396 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3399 case L2CAP_ECHO_RSP:
3402 case L2CAP_INFO_REQ:
3403 err = l2cap_information_req(conn, cmd, data);
3406 case L2CAP_INFO_RSP:
3407 err = l2cap_information_rsp(conn, cmd, data);
3410 case L2CAP_CREATE_CHAN_REQ:
3411 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3414 case L2CAP_CREATE_CHAN_RSP:
3415 err = l2cap_create_channel_rsp(conn, cmd, data);
3418 case L2CAP_MOVE_CHAN_REQ:
3419 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3422 case L2CAP_MOVE_CHAN_RSP:
3423 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3426 case L2CAP_MOVE_CHAN_CFM:
3427 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3430 case L2CAP_MOVE_CHAN_CFM_RSP:
3431 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3435 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3443 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3444 struct l2cap_cmd_hdr *cmd, u8 *data)
3446 switch (cmd->code) {
3447 case L2CAP_COMMAND_REJ:
3450 case L2CAP_CONN_PARAM_UPDATE_REQ:
3451 return l2cap_conn_param_update_req(conn, cmd, data);
3453 case L2CAP_CONN_PARAM_UPDATE_RSP:
3457 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3462 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3463 struct sk_buff *skb)
3465 u8 *data = skb->data;
3467 struct l2cap_cmd_hdr cmd;
3470 l2cap_raw_recv(conn, skb);
3472 while (len >= L2CAP_CMD_HDR_SIZE) {
3474 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3475 data += L2CAP_CMD_HDR_SIZE;
3476 len -= L2CAP_CMD_HDR_SIZE;
3478 cmd_len = le16_to_cpu(cmd.len);
3480 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3482 if (cmd_len > len || !cmd.ident) {
3483 BT_DBG("corrupted command");
3487 if (conn->hcon->type == LE_LINK)
3488 err = l2cap_le_sig_cmd(conn, &cmd, data);
3490 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3493 struct l2cap_cmd_rej_unk rej;
3495 BT_ERR("Wrong link type (%d)", err);
3497 /* FIXME: Map err to a valid reason */
3498 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3499 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3509 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3511 u16 our_fcs, rcv_fcs;
3514 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3515 hdr_size = L2CAP_EXT_HDR_SIZE;
3517 hdr_size = L2CAP_ENH_HDR_SIZE;
3519 if (chan->fcs == L2CAP_FCS_CRC16) {
3520 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3521 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3522 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3524 if (our_fcs != rcv_fcs)
3530 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3534 chan->frames_sent = 0;
3536 control |= __set_reqseq(chan, chan->buffer_seq);
3538 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3539 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3540 l2cap_send_sframe(chan, control);
3541 set_bit(CONN_RNR_SENT, &chan->conn_state);
3544 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3545 l2cap_retransmit_frames(chan);
3547 l2cap_ertm_send(chan);
3549 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3550 chan->frames_sent == 0) {
3551 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3552 l2cap_send_sframe(chan, control);
3556 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3558 struct sk_buff *next_skb;
3559 int tx_seq_offset, next_tx_seq_offset;
3561 bt_cb(skb)->tx_seq = tx_seq;
3562 bt_cb(skb)->sar = sar;
3564 next_skb = skb_peek(&chan->srej_q);
3566 __skb_queue_tail(&chan->srej_q, skb);
3570 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3573 if (bt_cb(next_skb)->tx_seq == tx_seq)
3576 next_tx_seq_offset = __seq_offset(chan,
3577 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3579 if (next_tx_seq_offset > tx_seq_offset) {
3580 __skb_queue_before(&chan->srej_q, next_skb, skb);
3584 if (skb_queue_is_last(&chan->srej_q, next_skb))
3587 } while ((next_skb = skb_queue_next(&chan->srej_q, next_skb)));
3589 __skb_queue_tail(&chan->srej_q, skb);
3594 static void append_skb_frag(struct sk_buff *skb,
3595 struct sk_buff *new_frag, struct sk_buff **last_frag)
3597 /* skb->len reflects data in skb as well as all fragments
3598 * skb->data_len reflects only data in fragments
3600 if (!skb_has_frag_list(skb))
3601 skb_shinfo(skb)->frag_list = new_frag;
3603 new_frag->next = NULL;
3605 (*last_frag)->next = new_frag;
3606 *last_frag = new_frag;
3608 skb->len += new_frag->len;
3609 skb->data_len += new_frag->len;
3610 skb->truesize += new_frag->truesize;
3613 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3617 switch (__get_ctrl_sar(chan, control)) {
3618 case L2CAP_SAR_UNSEGMENTED:
3622 err = chan->ops->recv(chan->data, skb);
3625 case L2CAP_SAR_START:
3629 chan->sdu_len = get_unaligned_le16(skb->data);
3630 skb_pull(skb, L2CAP_SDULEN_SIZE);
3632 if (chan->sdu_len > chan->imtu) {
3637 if (skb->len >= chan->sdu_len)
3641 chan->sdu_last_frag = skb;
3647 case L2CAP_SAR_CONTINUE:
3651 append_skb_frag(chan->sdu, skb,
3652 &chan->sdu_last_frag);
3655 if (chan->sdu->len >= chan->sdu_len)
3665 append_skb_frag(chan->sdu, skb,
3666 &chan->sdu_last_frag);
3669 if (chan->sdu->len != chan->sdu_len)
3672 err = chan->ops->recv(chan->data, chan->sdu);
3675 /* Reassembly complete */
3677 chan->sdu_last_frag = NULL;
3685 kfree_skb(chan->sdu);
3687 chan->sdu_last_frag = NULL;
3694 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3698 BT_DBG("chan %p, Enter local busy", chan);
3700 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3702 control = __set_reqseq(chan, chan->buffer_seq);
3703 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3704 l2cap_send_sframe(chan, control);
3706 set_bit(CONN_RNR_SENT, &chan->conn_state);
3708 __clear_ack_timer(chan);
3711 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3715 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3718 control = __set_reqseq(chan, chan->buffer_seq);
3719 control |= __set_ctrl_poll(chan);
3720 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3721 l2cap_send_sframe(chan, control);
3722 chan->retry_count = 1;
3724 __clear_retrans_timer(chan);
3725 __set_monitor_timer(chan);
3727 set_bit(CONN_WAIT_F, &chan->conn_state);
3730 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3731 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3733 BT_DBG("chan %p, Exit local busy", chan);
3736 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3738 if (chan->mode == L2CAP_MODE_ERTM) {
3740 l2cap_ertm_enter_local_busy(chan);
3742 l2cap_ertm_exit_local_busy(chan);
3746 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3748 struct sk_buff *skb;
3751 while ((skb = skb_peek(&chan->srej_q)) &&
3752 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3755 if (bt_cb(skb)->tx_seq != tx_seq)
3758 skb = skb_dequeue(&chan->srej_q);
3759 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3760 err = l2cap_reassemble_sdu(chan, skb, control);
3763 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3767 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3768 tx_seq = __next_seq(chan, tx_seq);
3772 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3774 struct srej_list *l, *tmp;
3777 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3778 if (l->tx_seq == tx_seq) {
3783 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3784 control |= __set_reqseq(chan, l->tx_seq);
3785 l2cap_send_sframe(chan, control);
3787 list_add_tail(&l->list, &chan->srej_l);
3791 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3793 struct srej_list *new;
3796 while (tx_seq != chan->expected_tx_seq) {
3797 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3798 control |= __set_reqseq(chan, chan->expected_tx_seq);
3799 l2cap_send_sframe(chan, control);
3801 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3805 new->tx_seq = chan->expected_tx_seq;
3807 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3809 list_add_tail(&new->list, &chan->srej_l);
3812 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3817 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3819 u16 tx_seq = __get_txseq(chan, rx_control);
3820 u16 req_seq = __get_reqseq(chan, rx_control);
3821 u8 sar = __get_ctrl_sar(chan, rx_control);
3822 int tx_seq_offset, expected_tx_seq_offset;
3823 int num_to_ack = (chan->tx_win/6) + 1;
3826 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3827 tx_seq, rx_control);
3829 if (__is_ctrl_final(chan, rx_control) &&
3830 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3831 __clear_monitor_timer(chan);
3832 if (chan->unacked_frames > 0)
3833 __set_retrans_timer(chan);
3834 clear_bit(CONN_WAIT_F, &chan->conn_state);
3837 chan->expected_ack_seq = req_seq;
3838 l2cap_drop_acked_frames(chan);
3840 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3842 /* invalid tx_seq */
3843 if (tx_seq_offset >= chan->tx_win) {
3844 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3848 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3851 if (tx_seq == chan->expected_tx_seq)
3854 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3855 struct srej_list *first;
3857 first = list_first_entry(&chan->srej_l,
3858 struct srej_list, list);
3859 if (tx_seq == first->tx_seq) {
3860 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3861 l2cap_check_srej_gap(chan, tx_seq);
3863 list_del(&first->list);
3866 if (list_empty(&chan->srej_l)) {
3867 chan->buffer_seq = chan->buffer_seq_srej;
3868 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3869 l2cap_send_ack(chan);
3870 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3873 struct srej_list *l;
3875 /* duplicated tx_seq */
3876 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3879 list_for_each_entry(l, &chan->srej_l, list) {
3880 if (l->tx_seq == tx_seq) {
3881 l2cap_resend_srejframe(chan, tx_seq);
3886 err = l2cap_send_srejframe(chan, tx_seq);
3888 l2cap_send_disconn_req(chan->conn, chan, -err);
3893 expected_tx_seq_offset = __seq_offset(chan,
3894 chan->expected_tx_seq, chan->buffer_seq);
3896 /* duplicated tx_seq */
3897 if (tx_seq_offset < expected_tx_seq_offset)
3900 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3902 BT_DBG("chan %p, Enter SREJ", chan);
3904 INIT_LIST_HEAD(&chan->srej_l);
3905 chan->buffer_seq_srej = chan->buffer_seq;
3907 __skb_queue_head_init(&chan->srej_q);
3908 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3910 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3912 err = l2cap_send_srejframe(chan, tx_seq);
3914 l2cap_send_disconn_req(chan->conn, chan, -err);
3918 __clear_ack_timer(chan);
3923 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3925 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3926 bt_cb(skb)->tx_seq = tx_seq;
3927 bt_cb(skb)->sar = sar;
3928 __skb_queue_tail(&chan->srej_q, skb);
3932 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3933 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3936 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3940 if (__is_ctrl_final(chan, rx_control)) {
3941 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3942 l2cap_retransmit_frames(chan);
3946 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3947 if (chan->num_acked == num_to_ack - 1)
3948 l2cap_send_ack(chan);
3950 __set_ack_timer(chan);
3959 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3961 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3962 __get_reqseq(chan, rx_control), rx_control);
3964 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3965 l2cap_drop_acked_frames(chan);
3967 if (__is_ctrl_poll(chan, rx_control)) {
3968 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3969 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3970 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3971 (chan->unacked_frames > 0))
3972 __set_retrans_timer(chan);
3974 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3975 l2cap_send_srejtail(chan);
3977 l2cap_send_i_or_rr_or_rnr(chan);
3980 } else if (__is_ctrl_final(chan, rx_control)) {
3981 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3983 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3984 l2cap_retransmit_frames(chan);
3987 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3988 (chan->unacked_frames > 0))
3989 __set_retrans_timer(chan);
3991 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3992 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
3993 l2cap_send_ack(chan);
3995 l2cap_ertm_send(chan);
3999 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4001 u16 tx_seq = __get_reqseq(chan, rx_control);
4003 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4005 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4007 chan->expected_ack_seq = tx_seq;
4008 l2cap_drop_acked_frames(chan);
4010 if (__is_ctrl_final(chan, rx_control)) {
4011 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4012 l2cap_retransmit_frames(chan);
4014 l2cap_retransmit_frames(chan);
4016 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4017 set_bit(CONN_REJ_ACT, &chan->conn_state);
4020 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4022 u16 tx_seq = __get_reqseq(chan, rx_control);
4024 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4026 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4028 if (__is_ctrl_poll(chan, rx_control)) {
4029 chan->expected_ack_seq = tx_seq;
4030 l2cap_drop_acked_frames(chan);
4032 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4033 l2cap_retransmit_one_frame(chan, tx_seq);
4035 l2cap_ertm_send(chan);
4037 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4038 chan->srej_save_reqseq = tx_seq;
4039 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4041 } else if (__is_ctrl_final(chan, rx_control)) {
4042 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4043 chan->srej_save_reqseq == tx_seq)
4044 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4046 l2cap_retransmit_one_frame(chan, tx_seq);
4048 l2cap_retransmit_one_frame(chan, tx_seq);
4049 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4050 chan->srej_save_reqseq = tx_seq;
4051 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4056 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4058 u16 tx_seq = __get_reqseq(chan, rx_control);
4060 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4062 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4063 chan->expected_ack_seq = tx_seq;
4064 l2cap_drop_acked_frames(chan);
4066 if (__is_ctrl_poll(chan, rx_control))
4067 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4069 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4070 __clear_retrans_timer(chan);
4071 if (__is_ctrl_poll(chan, rx_control))
4072 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4076 if (__is_ctrl_poll(chan, rx_control)) {
4077 l2cap_send_srejtail(chan);
4079 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4080 l2cap_send_sframe(chan, rx_control);
4084 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4086 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4088 if (__is_ctrl_final(chan, rx_control) &&
4089 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4090 __clear_monitor_timer(chan);
4091 if (chan->unacked_frames > 0)
4092 __set_retrans_timer(chan);
4093 clear_bit(CONN_WAIT_F, &chan->conn_state);
4096 switch (__get_ctrl_super(chan, rx_control)) {
4097 case L2CAP_SUPER_RR:
4098 l2cap_data_channel_rrframe(chan, rx_control);
4101 case L2CAP_SUPER_REJ:
4102 l2cap_data_channel_rejframe(chan, rx_control);
4105 case L2CAP_SUPER_SREJ:
4106 l2cap_data_channel_srejframe(chan, rx_control);
4109 case L2CAP_SUPER_RNR:
4110 l2cap_data_channel_rnrframe(chan, rx_control);
4118 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4120 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
4123 int len, next_tx_seq_offset, req_seq_offset;
4125 control = __get_control(chan, skb->data);
4126 skb_pull(skb, __ctrl_size(chan));
4130 * We can just drop the corrupted I-frame here.
4131 * Receiver will miss it and start proper recovery
4132 * procedures and ask retransmission.
4134 if (l2cap_check_fcs(chan, skb))
4137 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4138 len -= L2CAP_SDULEN_SIZE;
4140 if (chan->fcs == L2CAP_FCS_CRC16)
4141 len -= L2CAP_FCS_SIZE;
4143 if (len > chan->mps) {
4144 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4148 req_seq = __get_reqseq(chan, control);
4150 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4152 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4153 chan->expected_ack_seq);
4155 /* check for invalid req-seq */
4156 if (req_seq_offset > next_tx_seq_offset) {
4157 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4161 if (!__is_sframe(chan, control)) {
4163 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4167 l2cap_data_channel_iframe(chan, control, skb);
4171 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4175 l2cap_data_channel_sframe(chan, control, skb);
4185 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4187 struct l2cap_chan *chan;
4188 struct sock *sk = NULL;
4193 chan = l2cap_get_chan_by_scid(conn, cid);
4195 BT_DBG("unknown cid 0x%4.4x", cid);
4201 BT_DBG("chan %p, len %d", chan, skb->len);
4203 if (chan->state != BT_CONNECTED)
4206 switch (chan->mode) {
4207 case L2CAP_MODE_BASIC:
4208 /* If socket recv buffers overflows we drop data here
4209 * which is *bad* because L2CAP has to be reliable.
4210 * But we don't have any other choice. L2CAP doesn't
4211 * provide flow control mechanism. */
4213 if (chan->imtu < skb->len)
4216 if (!chan->ops->recv(chan->data, skb))
4220 case L2CAP_MODE_ERTM:
4221 if (!sock_owned_by_user(sk)) {
4222 l2cap_ertm_data_rcv(sk, skb);
4224 if (sk_add_backlog(sk, skb))
4230 case L2CAP_MODE_STREAMING:
4231 control = __get_control(chan, skb->data);
4232 skb_pull(skb, __ctrl_size(chan));
4235 if (l2cap_check_fcs(chan, skb))
4238 if (__is_sar_start(chan, control))
4239 len -= L2CAP_SDULEN_SIZE;
4241 if (chan->fcs == L2CAP_FCS_CRC16)
4242 len -= L2CAP_FCS_SIZE;
4244 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4247 tx_seq = __get_txseq(chan, control);
4249 if (chan->expected_tx_seq != tx_seq) {
4250 /* Frame(s) missing - must discard partial SDU */
4251 kfree_skb(chan->sdu);
4253 chan->sdu_last_frag = NULL;
4256 /* TODO: Notify userland of missing data */
4259 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4261 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4262 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4267 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4281 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4283 struct sock *sk = NULL;
4284 struct l2cap_chan *chan;
4286 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4294 BT_DBG("sk %p, len %d", sk, skb->len);
4296 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4299 if (chan->imtu < skb->len)
4302 if (!chan->ops->recv(chan->data, skb))
4314 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4316 struct sock *sk = NULL;
4317 struct l2cap_chan *chan;
4319 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4327 BT_DBG("sk %p, len %d", sk, skb->len);
4329 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4332 if (chan->imtu < skb->len)
4335 if (!chan->ops->recv(chan->data, skb))
4347 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4349 struct l2cap_hdr *lh = (void *) skb->data;
4353 skb_pull(skb, L2CAP_HDR_SIZE);
4354 cid = __le16_to_cpu(lh->cid);
4355 len = __le16_to_cpu(lh->len);
4357 if (len != skb->len) {
4362 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4365 case L2CAP_CID_LE_SIGNALING:
4366 case L2CAP_CID_SIGNALING:
4367 l2cap_sig_channel(conn, skb);
4370 case L2CAP_CID_CONN_LESS:
4371 psm = get_unaligned_le16(skb->data);
4373 l2cap_conless_channel(conn, psm, skb);
4376 case L2CAP_CID_LE_DATA:
4377 l2cap_att_channel(conn, cid, skb);
4381 if (smp_sig_channel(conn, skb))
4382 l2cap_conn_del(conn->hcon, EACCES);
4386 l2cap_data_channel(conn, cid, skb);
4391 /* ---- L2CAP interface with lower layer (HCI) ---- */
4393 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4395 int exact = 0, lm1 = 0, lm2 = 0;
4396 struct l2cap_chan *c;
4398 if (type != ACL_LINK)
4401 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4403 /* Find listening sockets and check their link_mode */
4404 read_lock(&chan_list_lock);
4405 list_for_each_entry(c, &chan_list, global_l) {
4406 struct sock *sk = c->sk;
4408 if (c->state != BT_LISTEN)
4411 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4412 lm1 |= HCI_LM_ACCEPT;
4413 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4414 lm1 |= HCI_LM_MASTER;
4416 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4417 lm2 |= HCI_LM_ACCEPT;
4418 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4419 lm2 |= HCI_LM_MASTER;
4422 read_unlock(&chan_list_lock);
4424 return exact ? lm1 : lm2;
4427 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4429 struct l2cap_conn *conn;
4431 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4433 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4437 conn = l2cap_conn_add(hcon, status);
4439 l2cap_conn_ready(conn);
4441 l2cap_conn_del(hcon, bt_to_errno(status));
4446 static int l2cap_disconn_ind(struct hci_conn *hcon)
4448 struct l2cap_conn *conn = hcon->l2cap_data;
4450 BT_DBG("hcon %p", hcon);
4452 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4453 return HCI_ERROR_REMOTE_USER_TERM;
4455 return conn->disc_reason;
4458 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4460 BT_DBG("hcon %p reason %d", hcon, reason);
4462 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4465 l2cap_conn_del(hcon, bt_to_errno(reason));
4470 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4472 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4475 if (encrypt == 0x00) {
4476 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4477 __clear_chan_timer(chan);
4478 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4479 } else if (chan->sec_level == BT_SECURITY_HIGH)
4480 l2cap_chan_close(chan, ECONNREFUSED);
4482 if (chan->sec_level == BT_SECURITY_MEDIUM)
4483 __clear_chan_timer(chan);
4487 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4489 struct l2cap_conn *conn = hcon->l2cap_data;
4490 struct l2cap_chan *chan;
4495 BT_DBG("conn %p", conn);
4497 if (hcon->type == LE_LINK) {
4498 smp_distribute_keys(conn, 0);
4499 del_timer(&conn->security_timer);
4502 read_lock(&conn->chan_lock);
4504 list_for_each_entry(chan, &conn->chan_l, list) {
4505 struct sock *sk = chan->sk;
4509 BT_DBG("chan->scid %d", chan->scid);
4511 if (chan->scid == L2CAP_CID_LE_DATA) {
4512 if (!status && encrypt) {
4513 chan->sec_level = hcon->sec_level;
4514 l2cap_chan_ready(sk);
4521 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4526 if (!status && (chan->state == BT_CONNECTED ||
4527 chan->state == BT_CONFIG)) {
4528 l2cap_check_encryption(chan, encrypt);
4533 if (chan->state == BT_CONNECT) {
4535 struct l2cap_conn_req req;
4536 req.scid = cpu_to_le16(chan->scid);
4537 req.psm = chan->psm;
4539 chan->ident = l2cap_get_ident(conn);
4540 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4542 l2cap_send_cmd(conn, chan->ident,
4543 L2CAP_CONN_REQ, sizeof(req), &req);
4545 __clear_chan_timer(chan);
4546 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4548 } else if (chan->state == BT_CONNECT2) {
4549 struct l2cap_conn_rsp rsp;
4553 if (bt_sk(sk)->defer_setup) {
4554 struct sock *parent = bt_sk(sk)->parent;
4555 res = L2CAP_CR_PEND;
4556 stat = L2CAP_CS_AUTHOR_PEND;
4558 parent->sk_data_ready(parent, 0);
4560 l2cap_state_change(chan, BT_CONFIG);
4561 res = L2CAP_CR_SUCCESS;
4562 stat = L2CAP_CS_NO_INFO;
4565 l2cap_state_change(chan, BT_DISCONN);
4566 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4567 res = L2CAP_CR_SEC_BLOCK;
4568 stat = L2CAP_CS_NO_INFO;
4571 rsp.scid = cpu_to_le16(chan->dcid);
4572 rsp.dcid = cpu_to_le16(chan->scid);
4573 rsp.result = cpu_to_le16(res);
4574 rsp.status = cpu_to_le16(stat);
4575 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4582 read_unlock(&conn->chan_lock);
4587 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4589 struct l2cap_conn *conn = hcon->l2cap_data;
4592 conn = l2cap_conn_add(hcon, 0);
4597 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4599 if (!(flags & ACL_CONT)) {
4600 struct l2cap_hdr *hdr;
4601 struct l2cap_chan *chan;
4606 BT_ERR("Unexpected start frame (len %d)", skb->len);
4607 kfree_skb(conn->rx_skb);
4608 conn->rx_skb = NULL;
4610 l2cap_conn_unreliable(conn, ECOMM);
4613 /* Start fragment always begin with Basic L2CAP header */
4614 if (skb->len < L2CAP_HDR_SIZE) {
4615 BT_ERR("Frame is too short (len %d)", skb->len);
4616 l2cap_conn_unreliable(conn, ECOMM);
4620 hdr = (struct l2cap_hdr *) skb->data;
4621 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4622 cid = __le16_to_cpu(hdr->cid);
4624 if (len == skb->len) {
4625 /* Complete frame received */
4626 l2cap_recv_frame(conn, skb);
4630 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4632 if (skb->len > len) {
4633 BT_ERR("Frame is too long (len %d, expected len %d)",
4635 l2cap_conn_unreliable(conn, ECOMM);
4639 chan = l2cap_get_chan_by_scid(conn, cid);
4641 if (chan && chan->sk) {
4642 struct sock *sk = chan->sk;
4644 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4645 BT_ERR("Frame exceeding recv MTU (len %d, "
4649 l2cap_conn_unreliable(conn, ECOMM);
4655 /* Allocate skb for the complete frame (with header) */
4656 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4660 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4662 conn->rx_len = len - skb->len;
4664 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4666 if (!conn->rx_len) {
4667 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4668 l2cap_conn_unreliable(conn, ECOMM);
4672 if (skb->len > conn->rx_len) {
4673 BT_ERR("Fragment is too long (len %d, expected %d)",
4674 skb->len, conn->rx_len);
4675 kfree_skb(conn->rx_skb);
4676 conn->rx_skb = NULL;
4678 l2cap_conn_unreliable(conn, ECOMM);
4682 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4684 conn->rx_len -= skb->len;
4686 if (!conn->rx_len) {
4687 /* Complete frame received */
4688 l2cap_recv_frame(conn, conn->rx_skb);
4689 conn->rx_skb = NULL;
4698 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4700 struct l2cap_chan *c;
4702 read_lock_bh(&chan_list_lock);
4704 list_for_each_entry(c, &chan_list, global_l) {
4705 struct sock *sk = c->sk;
4707 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4708 batostr(&bt_sk(sk)->src),
4709 batostr(&bt_sk(sk)->dst),
4710 c->state, __le16_to_cpu(c->psm),
4711 c->scid, c->dcid, c->imtu, c->omtu,
4712 c->sec_level, c->mode);
4715 read_unlock_bh(&chan_list_lock);
4720 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4722 return single_open(file, l2cap_debugfs_show, inode->i_private);
4725 static const struct file_operations l2cap_debugfs_fops = {
4726 .open = l2cap_debugfs_open,
4728 .llseek = seq_lseek,
4729 .release = single_release,
4732 static struct dentry *l2cap_debugfs;
4734 static struct hci_proto l2cap_hci_proto = {
4736 .id = HCI_PROTO_L2CAP,
4737 .connect_ind = l2cap_connect_ind,
4738 .connect_cfm = l2cap_connect_cfm,
4739 .disconn_ind = l2cap_disconn_ind,
4740 .disconn_cfm = l2cap_disconn_cfm,
4741 .security_cfm = l2cap_security_cfm,
4742 .recv_acldata = l2cap_recv_acldata
4745 int __init l2cap_init(void)
4749 err = l2cap_init_sockets();
4753 err = hci_register_proto(&l2cap_hci_proto);
4755 BT_ERR("L2CAP protocol registration failed");
4756 bt_sock_unregister(BTPROTO_L2CAP);
4761 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4762 bt_debugfs, NULL, &l2cap_debugfs_fops);
4764 BT_ERR("Failed to create L2CAP debug file");
4770 l2cap_cleanup_sockets();
4774 void l2cap_exit(void)
4776 debugfs_remove(l2cap_debugfs);
4778 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4779 BT_ERR("L2CAP protocol unregistration failed");
4781 l2cap_cleanup_sockets();
4784 module_param(disable_ertm, bool, 0644);
4785 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4787 module_param(enable_hs, bool, 0644);
4788 MODULE_PARM_DESC(enable_hs, "Enable High Speed");