2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
57 #include <net/bluetooth/smp.h>
61 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
62 static u8 l2cap_fixed_chan[8] = { L2CAP_FC_L2CAP, };
64 static LIST_HEAD(chan_list);
65 static DEFINE_RWLOCK(chan_list_lock);
67 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
68 u8 code, u8 ident, u16 dlen, void *data);
69 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len,
71 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data);
72 static void l2cap_send_disconn_req(struct l2cap_conn *conn,
73 struct l2cap_chan *chan, int err);
75 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
77 /* ---- L2CAP channels ---- */
79 static inline void chan_hold(struct l2cap_chan *c)
81 atomic_inc(&c->refcnt);
84 static inline void chan_put(struct l2cap_chan *c)
86 if (atomic_dec_and_test(&c->refcnt))
90 static struct l2cap_chan *__l2cap_get_chan_by_dcid(struct l2cap_conn *conn, u16 cid)
94 list_for_each_entry(c, &conn->chan_l, list) {
101 static struct l2cap_chan *__l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
103 struct l2cap_chan *c;
105 list_for_each_entry(c, &conn->chan_l, list) {
112 /* Find channel with given SCID.
113 * Returns locked socket */
114 static struct l2cap_chan *l2cap_get_chan_by_scid(struct l2cap_conn *conn, u16 cid)
116 struct l2cap_chan *c;
118 read_lock(&conn->chan_lock);
119 c = __l2cap_get_chan_by_scid(conn, cid);
122 read_unlock(&conn->chan_lock);
126 static struct l2cap_chan *__l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
128 struct l2cap_chan *c;
130 list_for_each_entry(c, &conn->chan_l, list) {
131 if (c->ident == ident)
137 static inline struct l2cap_chan *l2cap_get_chan_by_ident(struct l2cap_conn *conn, u8 ident)
139 struct l2cap_chan *c;
141 read_lock(&conn->chan_lock);
142 c = __l2cap_get_chan_by_ident(conn, ident);
145 read_unlock(&conn->chan_lock);
149 static struct l2cap_chan *__l2cap_global_chan_by_addr(__le16 psm, bdaddr_t *src)
151 struct l2cap_chan *c;
153 list_for_each_entry(c, &chan_list, global_l) {
154 if (c->sport == psm && !bacmp(&bt_sk(c->sk)->src, src))
160 int l2cap_add_psm(struct l2cap_chan *chan, bdaddr_t *src, __le16 psm)
164 write_lock_bh(&chan_list_lock);
166 if (psm && __l2cap_global_chan_by_addr(psm, src)) {
179 for (p = 0x1001; p < 0x1100; p += 2)
180 if (!__l2cap_global_chan_by_addr(cpu_to_le16(p), src)) {
181 chan->psm = cpu_to_le16(p);
182 chan->sport = cpu_to_le16(p);
189 write_unlock_bh(&chan_list_lock);
193 int l2cap_add_scid(struct l2cap_chan *chan, __u16 scid)
195 write_lock_bh(&chan_list_lock);
199 write_unlock_bh(&chan_list_lock);
204 static u16 l2cap_alloc_cid(struct l2cap_conn *conn)
206 u16 cid = L2CAP_CID_DYN_START;
208 for (; cid < L2CAP_CID_DYN_END; cid++) {
209 if (!__l2cap_get_chan_by_scid(conn, cid))
216 static void l2cap_set_timer(struct l2cap_chan *chan, struct delayed_work *work, long timeout)
218 BT_DBG("chan %p state %d timeout %ld", chan, chan->state, timeout);
220 cancel_delayed_work_sync(work);
222 schedule_delayed_work(work, timeout);
225 static void l2cap_clear_timer(struct delayed_work *work)
227 cancel_delayed_work_sync(work);
230 static char *state_to_string(int state)
234 return "BT_CONNECTED";
244 return "BT_CONNECT2";
253 return "invalid state";
256 static void l2cap_state_change(struct l2cap_chan *chan, int state)
258 BT_DBG("%p %s -> %s", chan, state_to_string(chan->state),
259 state_to_string(state));
262 chan->ops->state_change(chan->data, state);
265 static void l2cap_chan_timeout(struct work_struct *work)
267 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
269 struct sock *sk = chan->sk;
272 BT_DBG("chan %p state %d", chan, chan->state);
276 if (chan->state == BT_CONNECTED || chan->state == BT_CONFIG)
277 reason = ECONNREFUSED;
278 else if (chan->state == BT_CONNECT &&
279 chan->sec_level != BT_SECURITY_SDP)
280 reason = ECONNREFUSED;
284 l2cap_chan_close(chan, reason);
288 chan->ops->close(chan->data);
292 struct l2cap_chan *l2cap_chan_create(struct sock *sk)
294 struct l2cap_chan *chan;
296 chan = kzalloc(sizeof(*chan), GFP_ATOMIC);
302 write_lock_bh(&chan_list_lock);
303 list_add(&chan->global_l, &chan_list);
304 write_unlock_bh(&chan_list_lock);
306 INIT_DELAYED_WORK(&chan->chan_timer, l2cap_chan_timeout);
308 chan->state = BT_OPEN;
310 atomic_set(&chan->refcnt, 1);
312 BT_DBG("sk %p chan %p", sk, chan);
317 void l2cap_chan_destroy(struct l2cap_chan *chan)
319 write_lock_bh(&chan_list_lock);
320 list_del(&chan->global_l);
321 write_unlock_bh(&chan_list_lock);
326 static void __l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
328 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
329 chan->psm, chan->dcid);
331 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
335 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED) {
336 if (conn->hcon->type == LE_LINK) {
338 chan->omtu = L2CAP_LE_DEFAULT_MTU;
339 chan->scid = L2CAP_CID_LE_DATA;
340 chan->dcid = L2CAP_CID_LE_DATA;
342 /* Alloc CID for connection-oriented socket */
343 chan->scid = l2cap_alloc_cid(conn);
344 chan->omtu = L2CAP_DEFAULT_MTU;
346 } else if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
347 /* Connectionless socket */
348 chan->scid = L2CAP_CID_CONN_LESS;
349 chan->dcid = L2CAP_CID_CONN_LESS;
350 chan->omtu = L2CAP_DEFAULT_MTU;
352 /* Raw socket can send/recv signalling messages only */
353 chan->scid = L2CAP_CID_SIGNALING;
354 chan->dcid = L2CAP_CID_SIGNALING;
355 chan->omtu = L2CAP_DEFAULT_MTU;
358 chan->local_id = L2CAP_BESTEFFORT_ID;
359 chan->local_stype = L2CAP_SERV_BESTEFFORT;
360 chan->local_msdu = L2CAP_DEFAULT_MAX_SDU_SIZE;
361 chan->local_sdu_itime = L2CAP_DEFAULT_SDU_ITIME;
362 chan->local_acc_lat = L2CAP_DEFAULT_ACC_LAT;
363 chan->local_flush_to = L2CAP_DEFAULT_FLUSH_TO;
367 list_add(&chan->list, &conn->chan_l);
371 * Must be called on the locked socket. */
372 static void l2cap_chan_del(struct l2cap_chan *chan, int err)
374 struct sock *sk = chan->sk;
375 struct l2cap_conn *conn = chan->conn;
376 struct sock *parent = bt_sk(sk)->parent;
378 __clear_chan_timer(chan);
380 BT_DBG("chan %p, conn %p, err %d", chan, conn, err);
383 /* Delete from channel list */
384 write_lock_bh(&conn->chan_lock);
385 list_del(&chan->list);
386 write_unlock_bh(&conn->chan_lock);
390 hci_conn_put(conn->hcon);
393 l2cap_state_change(chan, BT_CLOSED);
394 sock_set_flag(sk, SOCK_ZAPPED);
400 bt_accept_unlink(sk);
401 parent->sk_data_ready(parent, 0);
403 sk->sk_state_change(sk);
405 if (!(test_bit(CONF_OUTPUT_DONE, &chan->conf_state) &&
406 test_bit(CONF_INPUT_DONE, &chan->conf_state)))
409 skb_queue_purge(&chan->tx_q);
411 if (chan->mode == L2CAP_MODE_ERTM) {
412 struct srej_list *l, *tmp;
414 __clear_retrans_timer(chan);
415 __clear_monitor_timer(chan);
416 __clear_ack_timer(chan);
418 skb_queue_purge(&chan->srej_q);
420 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
427 static void l2cap_chan_cleanup_listen(struct sock *parent)
431 BT_DBG("parent %p", parent);
433 /* Close not yet accepted channels */
434 while ((sk = bt_accept_dequeue(parent, NULL))) {
435 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
436 __clear_chan_timer(chan);
438 l2cap_chan_close(chan, ECONNRESET);
440 chan->ops->close(chan->data);
444 void l2cap_chan_close(struct l2cap_chan *chan, int reason)
446 struct l2cap_conn *conn = chan->conn;
447 struct sock *sk = chan->sk;
449 BT_DBG("chan %p state %d socket %p", chan, chan->state, sk->sk_socket);
451 switch (chan->state) {
453 l2cap_chan_cleanup_listen(sk);
455 l2cap_state_change(chan, BT_CLOSED);
456 sock_set_flag(sk, SOCK_ZAPPED);
461 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
462 conn->hcon->type == ACL_LINK) {
463 __clear_chan_timer(chan);
464 __set_chan_timer(chan, sk->sk_sndtimeo);
465 l2cap_send_disconn_req(conn, chan, reason);
467 l2cap_chan_del(chan, reason);
471 if (chan->chan_type == L2CAP_CHAN_CONN_ORIENTED &&
472 conn->hcon->type == ACL_LINK) {
473 struct l2cap_conn_rsp rsp;
476 if (bt_sk(sk)->defer_setup)
477 result = L2CAP_CR_SEC_BLOCK;
479 result = L2CAP_CR_BAD_PSM;
480 l2cap_state_change(chan, BT_DISCONN);
482 rsp.scid = cpu_to_le16(chan->dcid);
483 rsp.dcid = cpu_to_le16(chan->scid);
484 rsp.result = cpu_to_le16(result);
485 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
486 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
490 l2cap_chan_del(chan, reason);
495 l2cap_chan_del(chan, reason);
499 sock_set_flag(sk, SOCK_ZAPPED);
504 static inline u8 l2cap_get_auth_type(struct l2cap_chan *chan)
506 if (chan->chan_type == L2CAP_CHAN_RAW) {
507 switch (chan->sec_level) {
508 case BT_SECURITY_HIGH:
509 return HCI_AT_DEDICATED_BONDING_MITM;
510 case BT_SECURITY_MEDIUM:
511 return HCI_AT_DEDICATED_BONDING;
513 return HCI_AT_NO_BONDING;
515 } else if (chan->psm == cpu_to_le16(0x0001)) {
516 if (chan->sec_level == BT_SECURITY_LOW)
517 chan->sec_level = BT_SECURITY_SDP;
519 if (chan->sec_level == BT_SECURITY_HIGH)
520 return HCI_AT_NO_BONDING_MITM;
522 return HCI_AT_NO_BONDING;
524 switch (chan->sec_level) {
525 case BT_SECURITY_HIGH:
526 return HCI_AT_GENERAL_BONDING_MITM;
527 case BT_SECURITY_MEDIUM:
528 return HCI_AT_GENERAL_BONDING;
530 return HCI_AT_NO_BONDING;
535 /* Service level security */
536 int l2cap_chan_check_security(struct l2cap_chan *chan)
538 struct l2cap_conn *conn = chan->conn;
541 auth_type = l2cap_get_auth_type(chan);
543 return hci_conn_security(conn->hcon, chan->sec_level, auth_type);
546 static u8 l2cap_get_ident(struct l2cap_conn *conn)
550 /* Get next available identificator.
551 * 1 - 128 are used by kernel.
552 * 129 - 199 are reserved.
553 * 200 - 254 are used by utilities like l2ping, etc.
556 spin_lock_bh(&conn->lock);
558 if (++conn->tx_ident > 128)
563 spin_unlock_bh(&conn->lock);
568 static void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
570 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
573 BT_DBG("code 0x%2.2x", code);
578 if (lmp_no_flush_capable(conn->hcon->hdev))
579 flags = ACL_START_NO_FLUSH;
583 bt_cb(skb)->force_active = BT_POWER_FORCE_ACTIVE_ON;
584 skb->priority = HCI_PRIO_MAX;
586 hci_send_acl(conn->hchan, skb, flags);
589 static void l2cap_do_send(struct l2cap_chan *chan, struct sk_buff *skb)
591 struct hci_conn *hcon = chan->conn->hcon;
594 BT_DBG("chan %p, skb %p len %d priority %u", chan, skb, skb->len,
597 if (!test_bit(FLAG_FLUSHABLE, &chan->flags) &&
598 lmp_no_flush_capable(hcon->hdev))
599 flags = ACL_START_NO_FLUSH;
603 bt_cb(skb)->force_active = test_bit(FLAG_FORCE_ACTIVE, &chan->flags);
604 hci_send_acl(chan->conn->hchan, skb, flags);
607 static inline void l2cap_send_sframe(struct l2cap_chan *chan, u32 control)
610 struct l2cap_hdr *lh;
611 struct l2cap_conn *conn = chan->conn;
614 if (chan->state != BT_CONNECTED)
617 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
618 hlen = L2CAP_EXT_HDR_SIZE;
620 hlen = L2CAP_ENH_HDR_SIZE;
622 if (chan->fcs == L2CAP_FCS_CRC16)
623 hlen += L2CAP_FCS_SIZE;
625 BT_DBG("chan %p, control 0x%8.8x", chan, control);
627 count = min_t(unsigned int, conn->mtu, hlen);
629 control |= __set_sframe(chan);
631 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
632 control |= __set_ctrl_final(chan);
634 if (test_and_clear_bit(CONN_SEND_PBIT, &chan->conn_state))
635 control |= __set_ctrl_poll(chan);
637 skb = bt_skb_alloc(count, GFP_ATOMIC);
641 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
642 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
643 lh->cid = cpu_to_le16(chan->dcid);
645 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
647 if (chan->fcs == L2CAP_FCS_CRC16) {
648 u16 fcs = crc16(0, (u8 *)lh, count - L2CAP_FCS_SIZE);
649 put_unaligned_le16(fcs, skb_put(skb, L2CAP_FCS_SIZE));
652 skb->priority = HCI_PRIO_MAX;
653 l2cap_do_send(chan, skb);
656 static inline void l2cap_send_rr_or_rnr(struct l2cap_chan *chan, u32 control)
658 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
659 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
660 set_bit(CONN_RNR_SENT, &chan->conn_state);
662 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
664 control |= __set_reqseq(chan, chan->buffer_seq);
666 l2cap_send_sframe(chan, control);
669 static inline int __l2cap_no_conn_pending(struct l2cap_chan *chan)
671 return !test_bit(CONF_CONNECT_PEND, &chan->conf_state);
674 static void l2cap_do_start(struct l2cap_chan *chan)
676 struct l2cap_conn *conn = chan->conn;
678 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
679 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
682 if (l2cap_chan_check_security(chan) &&
683 __l2cap_no_conn_pending(chan)) {
684 struct l2cap_conn_req req;
685 req.scid = cpu_to_le16(chan->scid);
688 chan->ident = l2cap_get_ident(conn);
689 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
691 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
695 struct l2cap_info_req req;
696 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
698 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
699 conn->info_ident = l2cap_get_ident(conn);
701 mod_timer(&conn->info_timer, jiffies +
702 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
704 l2cap_send_cmd(conn, conn->info_ident,
705 L2CAP_INFO_REQ, sizeof(req), &req);
709 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
711 u32 local_feat_mask = l2cap_feat_mask;
713 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
716 case L2CAP_MODE_ERTM:
717 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
718 case L2CAP_MODE_STREAMING:
719 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
725 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct l2cap_chan *chan, int err)
728 struct l2cap_disconn_req req;
735 if (chan->mode == L2CAP_MODE_ERTM) {
736 __clear_retrans_timer(chan);
737 __clear_monitor_timer(chan);
738 __clear_ack_timer(chan);
741 req.dcid = cpu_to_le16(chan->dcid);
742 req.scid = cpu_to_le16(chan->scid);
743 l2cap_send_cmd(conn, l2cap_get_ident(conn),
744 L2CAP_DISCONN_REQ, sizeof(req), &req);
746 l2cap_state_change(chan, BT_DISCONN);
750 /* ---- L2CAP connections ---- */
751 static void l2cap_conn_start(struct l2cap_conn *conn)
753 struct l2cap_chan *chan, *tmp;
755 BT_DBG("conn %p", conn);
757 read_lock(&conn->chan_lock);
759 list_for_each_entry_safe(chan, tmp, &conn->chan_l, list) {
760 struct sock *sk = chan->sk;
764 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
769 if (chan->state == BT_CONNECT) {
770 struct l2cap_conn_req req;
772 if (!l2cap_chan_check_security(chan) ||
773 !__l2cap_no_conn_pending(chan)) {
778 if (!l2cap_mode_supported(chan->mode, conn->feat_mask)
779 && test_bit(CONF_STATE2_DEVICE,
780 &chan->conf_state)) {
781 /* l2cap_chan_close() calls list_del(chan)
782 * so release the lock */
783 read_unlock(&conn->chan_lock);
784 l2cap_chan_close(chan, ECONNRESET);
785 read_lock(&conn->chan_lock);
790 req.scid = cpu_to_le16(chan->scid);
793 chan->ident = l2cap_get_ident(conn);
794 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
796 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_REQ,
799 } else if (chan->state == BT_CONNECT2) {
800 struct l2cap_conn_rsp rsp;
802 rsp.scid = cpu_to_le16(chan->dcid);
803 rsp.dcid = cpu_to_le16(chan->scid);
805 if (l2cap_chan_check_security(chan)) {
806 if (bt_sk(sk)->defer_setup) {
807 struct sock *parent = bt_sk(sk)->parent;
808 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
809 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
811 parent->sk_data_ready(parent, 0);
814 l2cap_state_change(chan, BT_CONFIG);
815 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
816 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
819 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
820 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
823 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
826 if (test_bit(CONF_REQ_SENT, &chan->conf_state) ||
827 rsp.result != L2CAP_CR_SUCCESS) {
832 set_bit(CONF_REQ_SENT, &chan->conf_state);
833 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
834 l2cap_build_conf_req(chan, buf), buf);
835 chan->num_conf_req++;
841 read_unlock(&conn->chan_lock);
844 /* Find socket with cid and source bdaddr.
845 * Returns closest match, locked.
847 static struct l2cap_chan *l2cap_global_chan_by_scid(int state, __le16 cid, bdaddr_t *src)
849 struct l2cap_chan *c, *c1 = NULL;
851 read_lock(&chan_list_lock);
853 list_for_each_entry(c, &chan_list, global_l) {
854 struct sock *sk = c->sk;
856 if (state && c->state != state)
859 if (c->scid == cid) {
861 if (!bacmp(&bt_sk(sk)->src, src)) {
862 read_unlock(&chan_list_lock);
867 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
872 read_unlock(&chan_list_lock);
877 static void l2cap_le_conn_ready(struct l2cap_conn *conn)
879 struct sock *parent, *sk;
880 struct l2cap_chan *chan, *pchan;
884 /* Check if we have socket listening on cid */
885 pchan = l2cap_global_chan_by_scid(BT_LISTEN, L2CAP_CID_LE_DATA,
892 bh_lock_sock(parent);
894 /* Check for backlog size */
895 if (sk_acceptq_is_full(parent)) {
896 BT_DBG("backlog full %d", parent->sk_ack_backlog);
900 chan = pchan->ops->new_connection(pchan->data);
906 write_lock_bh(&conn->chan_lock);
908 hci_conn_hold(conn->hcon);
910 bacpy(&bt_sk(sk)->src, conn->src);
911 bacpy(&bt_sk(sk)->dst, conn->dst);
913 bt_accept_enqueue(parent, sk);
915 __l2cap_chan_add(conn, chan);
917 __set_chan_timer(chan, sk->sk_sndtimeo);
919 l2cap_state_change(chan, BT_CONNECTED);
920 parent->sk_data_ready(parent, 0);
922 write_unlock_bh(&conn->chan_lock);
925 bh_unlock_sock(parent);
928 static void l2cap_chan_ready(struct sock *sk)
930 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
931 struct sock *parent = bt_sk(sk)->parent;
933 BT_DBG("sk %p, parent %p", sk, parent);
935 chan->conf_state = 0;
936 __clear_chan_timer(chan);
938 l2cap_state_change(chan, BT_CONNECTED);
939 sk->sk_state_change(sk);
942 parent->sk_data_ready(parent, 0);
945 static void l2cap_conn_ready(struct l2cap_conn *conn)
947 struct l2cap_chan *chan;
949 BT_DBG("conn %p", conn);
951 if (!conn->hcon->out && conn->hcon->type == LE_LINK)
952 l2cap_le_conn_ready(conn);
954 if (conn->hcon->out && conn->hcon->type == LE_LINK)
955 smp_conn_security(conn, conn->hcon->pending_sec_level);
957 read_lock(&conn->chan_lock);
959 list_for_each_entry(chan, &conn->chan_l, list) {
960 struct sock *sk = chan->sk;
964 if (conn->hcon->type == LE_LINK) {
965 if (smp_conn_security(conn, chan->sec_level))
966 l2cap_chan_ready(sk);
968 } else if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
969 __clear_chan_timer(chan);
970 l2cap_state_change(chan, BT_CONNECTED);
971 sk->sk_state_change(sk);
973 } else if (chan->state == BT_CONNECT)
974 l2cap_do_start(chan);
979 read_unlock(&conn->chan_lock);
982 /* Notify sockets that we cannot guaranty reliability anymore */
983 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
985 struct l2cap_chan *chan;
987 BT_DBG("conn %p", conn);
989 read_lock(&conn->chan_lock);
991 list_for_each_entry(chan, &conn->chan_l, list) {
992 struct sock *sk = chan->sk;
994 if (test_bit(FLAG_FORCE_RELIABLE, &chan->flags))
998 read_unlock(&conn->chan_lock);
1001 static void l2cap_info_timeout(unsigned long arg)
1003 struct l2cap_conn *conn = (void *) arg;
1005 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
1006 conn->info_ident = 0;
1008 l2cap_conn_start(conn);
1011 static void l2cap_conn_del(struct hci_conn *hcon, int err)
1013 struct l2cap_conn *conn = hcon->l2cap_data;
1014 struct l2cap_chan *chan, *l;
1020 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
1022 kfree_skb(conn->rx_skb);
1025 list_for_each_entry_safe(chan, l, &conn->chan_l, list) {
1028 l2cap_chan_del(chan, err);
1030 chan->ops->close(chan->data);
1033 hci_chan_del(conn->hchan);
1035 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
1036 del_timer_sync(&conn->info_timer);
1038 if (test_and_clear_bit(HCI_CONN_LE_SMP_PEND, &hcon->pend)) {
1039 del_timer(&conn->security_timer);
1040 smp_chan_destroy(conn);
1043 hcon->l2cap_data = NULL;
1047 static void security_timeout(unsigned long arg)
1049 struct l2cap_conn *conn = (void *) arg;
1051 l2cap_conn_del(conn->hcon, ETIMEDOUT);
1054 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
1056 struct l2cap_conn *conn = hcon->l2cap_data;
1057 struct hci_chan *hchan;
1062 hchan = hci_chan_create(hcon);
1066 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
1068 hci_chan_del(hchan);
1072 hcon->l2cap_data = conn;
1074 conn->hchan = hchan;
1076 BT_DBG("hcon %p conn %p hchan %p", hcon, conn, hchan);
1078 if (hcon->hdev->le_mtu && hcon->type == LE_LINK)
1079 conn->mtu = hcon->hdev->le_mtu;
1081 conn->mtu = hcon->hdev->acl_mtu;
1083 conn->src = &hcon->hdev->bdaddr;
1084 conn->dst = &hcon->dst;
1086 conn->feat_mask = 0;
1088 spin_lock_init(&conn->lock);
1089 rwlock_init(&conn->chan_lock);
1091 INIT_LIST_HEAD(&conn->chan_l);
1093 if (hcon->type == LE_LINK)
1094 setup_timer(&conn->security_timer, security_timeout,
1095 (unsigned long) conn);
1097 setup_timer(&conn->info_timer, l2cap_info_timeout,
1098 (unsigned long) conn);
1100 conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM;
1105 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct l2cap_chan *chan)
1107 write_lock_bh(&conn->chan_lock);
1108 __l2cap_chan_add(conn, chan);
1109 write_unlock_bh(&conn->chan_lock);
1112 /* ---- Socket interface ---- */
1114 /* Find socket with psm and source bdaddr.
1115 * Returns closest match.
1117 static struct l2cap_chan *l2cap_global_chan_by_psm(int state, __le16 psm, bdaddr_t *src)
1119 struct l2cap_chan *c, *c1 = NULL;
1121 read_lock(&chan_list_lock);
1123 list_for_each_entry(c, &chan_list, global_l) {
1124 struct sock *sk = c->sk;
1126 if (state && c->state != state)
1129 if (c->psm == psm) {
1131 if (!bacmp(&bt_sk(sk)->src, src)) {
1132 read_unlock(&chan_list_lock);
1137 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
1142 read_unlock(&chan_list_lock);
1147 int l2cap_chan_connect(struct l2cap_chan *chan)
1149 struct sock *sk = chan->sk;
1150 bdaddr_t *src = &bt_sk(sk)->src;
1151 bdaddr_t *dst = &bt_sk(sk)->dst;
1152 struct l2cap_conn *conn;
1153 struct hci_conn *hcon;
1154 struct hci_dev *hdev;
1158 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1161 hdev = hci_get_route(dst, src);
1163 return -EHOSTUNREACH;
1167 auth_type = l2cap_get_auth_type(chan);
1169 if (chan->dcid == L2CAP_CID_LE_DATA)
1170 hcon = hci_connect(hdev, LE_LINK, dst,
1171 chan->sec_level, auth_type);
1173 hcon = hci_connect(hdev, ACL_LINK, dst,
1174 chan->sec_level, auth_type);
1177 err = PTR_ERR(hcon);
1181 conn = l2cap_conn_add(hcon, 0);
1188 /* Update source addr of the socket */
1189 bacpy(src, conn->src);
1191 l2cap_chan_add(conn, chan);
1193 l2cap_state_change(chan, BT_CONNECT);
1194 __set_chan_timer(chan, sk->sk_sndtimeo);
1196 if (hcon->state == BT_CONNECTED) {
1197 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED) {
1198 __clear_chan_timer(chan);
1199 if (l2cap_chan_check_security(chan))
1200 l2cap_state_change(chan, BT_CONNECTED);
1202 l2cap_do_start(chan);
1208 hci_dev_unlock(hdev);
1213 int __l2cap_wait_ack(struct sock *sk)
1215 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
1216 DECLARE_WAITQUEUE(wait, current);
1220 add_wait_queue(sk_sleep(sk), &wait);
1221 set_current_state(TASK_INTERRUPTIBLE);
1222 while (chan->unacked_frames > 0 && chan->conn) {
1226 if (signal_pending(current)) {
1227 err = sock_intr_errno(timeo);
1232 timeo = schedule_timeout(timeo);
1234 set_current_state(TASK_INTERRUPTIBLE);
1236 err = sock_error(sk);
1240 set_current_state(TASK_RUNNING);
1241 remove_wait_queue(sk_sleep(sk), &wait);
1245 static void l2cap_monitor_timeout(struct work_struct *work)
1247 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1248 monitor_timer.work);
1249 struct sock *sk = chan->sk;
1251 BT_DBG("chan %p", chan);
1254 if (chan->retry_count >= chan->remote_max_tx) {
1255 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1260 chan->retry_count++;
1261 __set_monitor_timer(chan);
1263 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1267 static void l2cap_retrans_timeout(struct work_struct *work)
1269 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1270 retrans_timer.work);
1271 struct sock *sk = chan->sk;
1273 BT_DBG("chan %p", chan);
1276 chan->retry_count = 1;
1277 __set_monitor_timer(chan);
1279 set_bit(CONN_WAIT_F, &chan->conn_state);
1281 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_POLL);
1285 static void l2cap_drop_acked_frames(struct l2cap_chan *chan)
1287 struct sk_buff *skb;
1289 while ((skb = skb_peek(&chan->tx_q)) &&
1290 chan->unacked_frames) {
1291 if (bt_cb(skb)->tx_seq == chan->expected_ack_seq)
1294 skb = skb_dequeue(&chan->tx_q);
1297 chan->unacked_frames--;
1300 if (!chan->unacked_frames)
1301 __clear_retrans_timer(chan);
1304 static void l2cap_streaming_send(struct l2cap_chan *chan)
1306 struct sk_buff *skb;
1310 while ((skb = skb_dequeue(&chan->tx_q))) {
1311 control = __get_control(chan, skb->data + L2CAP_HDR_SIZE);
1312 control |= __set_txseq(chan, chan->next_tx_seq);
1313 __put_control(chan, control, skb->data + L2CAP_HDR_SIZE);
1315 if (chan->fcs == L2CAP_FCS_CRC16) {
1316 fcs = crc16(0, (u8 *)skb->data,
1317 skb->len - L2CAP_FCS_SIZE);
1318 put_unaligned_le16(fcs,
1319 skb->data + skb->len - L2CAP_FCS_SIZE);
1322 l2cap_do_send(chan, skb);
1324 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1328 static void l2cap_retransmit_one_frame(struct l2cap_chan *chan, u16 tx_seq)
1330 struct sk_buff *skb, *tx_skb;
1334 skb = skb_peek(&chan->tx_q);
1338 while (bt_cb(skb)->tx_seq != tx_seq) {
1339 if (skb_queue_is_last(&chan->tx_q, skb))
1342 skb = skb_queue_next(&chan->tx_q, skb);
1345 if (chan->remote_max_tx &&
1346 bt_cb(skb)->retries == chan->remote_max_tx) {
1347 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1351 tx_skb = skb_clone(skb, GFP_ATOMIC);
1352 bt_cb(skb)->retries++;
1354 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1355 control &= __get_sar_mask(chan);
1357 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1358 control |= __set_ctrl_final(chan);
1360 control |= __set_reqseq(chan, chan->buffer_seq);
1361 control |= __set_txseq(chan, tx_seq);
1363 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1365 if (chan->fcs == L2CAP_FCS_CRC16) {
1366 fcs = crc16(0, (u8 *)tx_skb->data,
1367 tx_skb->len - L2CAP_FCS_SIZE);
1368 put_unaligned_le16(fcs,
1369 tx_skb->data + tx_skb->len - L2CAP_FCS_SIZE);
1372 l2cap_do_send(chan, tx_skb);
1375 static int l2cap_ertm_send(struct l2cap_chan *chan)
1377 struct sk_buff *skb, *tx_skb;
1382 if (chan->state != BT_CONNECTED)
1385 while ((skb = chan->tx_send_head) && (!l2cap_tx_window_full(chan))) {
1387 if (chan->remote_max_tx &&
1388 bt_cb(skb)->retries == chan->remote_max_tx) {
1389 l2cap_send_disconn_req(chan->conn, chan, ECONNABORTED);
1393 tx_skb = skb_clone(skb, GFP_ATOMIC);
1395 bt_cb(skb)->retries++;
1397 control = __get_control(chan, tx_skb->data + L2CAP_HDR_SIZE);
1398 control &= __get_sar_mask(chan);
1400 if (test_and_clear_bit(CONN_SEND_FBIT, &chan->conn_state))
1401 control |= __set_ctrl_final(chan);
1403 control |= __set_reqseq(chan, chan->buffer_seq);
1404 control |= __set_txseq(chan, chan->next_tx_seq);
1406 __put_control(chan, control, tx_skb->data + L2CAP_HDR_SIZE);
1408 if (chan->fcs == L2CAP_FCS_CRC16) {
1409 fcs = crc16(0, (u8 *)skb->data,
1410 tx_skb->len - L2CAP_FCS_SIZE);
1411 put_unaligned_le16(fcs, skb->data +
1412 tx_skb->len - L2CAP_FCS_SIZE);
1415 l2cap_do_send(chan, tx_skb);
1417 __set_retrans_timer(chan);
1419 bt_cb(skb)->tx_seq = chan->next_tx_seq;
1421 chan->next_tx_seq = __next_seq(chan, chan->next_tx_seq);
1423 if (bt_cb(skb)->retries == 1)
1424 chan->unacked_frames++;
1426 chan->frames_sent++;
1428 if (skb_queue_is_last(&chan->tx_q, skb))
1429 chan->tx_send_head = NULL;
1431 chan->tx_send_head = skb_queue_next(&chan->tx_q, skb);
1439 static int l2cap_retransmit_frames(struct l2cap_chan *chan)
1443 if (!skb_queue_empty(&chan->tx_q))
1444 chan->tx_send_head = chan->tx_q.next;
1446 chan->next_tx_seq = chan->expected_ack_seq;
1447 ret = l2cap_ertm_send(chan);
1451 static void l2cap_send_ack(struct l2cap_chan *chan)
1455 control |= __set_reqseq(chan, chan->buffer_seq);
1457 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
1458 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
1459 set_bit(CONN_RNR_SENT, &chan->conn_state);
1460 l2cap_send_sframe(chan, control);
1464 if (l2cap_ertm_send(chan) > 0)
1467 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
1468 l2cap_send_sframe(chan, control);
1471 static void l2cap_send_srejtail(struct l2cap_chan *chan)
1473 struct srej_list *tail;
1476 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
1477 control |= __set_ctrl_final(chan);
1479 tail = list_entry((&chan->srej_l)->prev, struct srej_list, list);
1480 control |= __set_reqseq(chan, tail->tx_seq);
1482 l2cap_send_sframe(chan, control);
1485 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1487 struct l2cap_conn *conn = l2cap_pi(sk)->chan->conn;
1488 struct sk_buff **frag;
1491 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1497 /* Continuation fragments (no L2CAP header) */
1498 frag = &skb_shinfo(skb)->frag_list;
1500 count = min_t(unsigned int, conn->mtu, len);
1502 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1505 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1508 (*frag)->priority = skb->priority;
1513 frag = &(*frag)->next;
1519 static struct sk_buff *l2cap_create_connless_pdu(struct l2cap_chan *chan,
1520 struct msghdr *msg, size_t len,
1523 struct sock *sk = chan->sk;
1524 struct l2cap_conn *conn = chan->conn;
1525 struct sk_buff *skb;
1526 int err, count, hlen = L2CAP_HDR_SIZE + L2CAP_PSMLEN_SIZE;
1527 struct l2cap_hdr *lh;
1529 BT_DBG("sk %p len %d priority %u", sk, (int)len, priority);
1531 count = min_t(unsigned int, (conn->mtu - hlen), len);
1532 skb = bt_skb_send_alloc(sk, count + hlen,
1533 msg->msg_flags & MSG_DONTWAIT, &err);
1535 return ERR_PTR(err);
1537 skb->priority = priority;
1539 /* Create L2CAP header */
1540 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1541 lh->cid = cpu_to_le16(chan->dcid);
1542 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1543 put_unaligned_le16(chan->psm, skb_put(skb, 2));
1545 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1546 if (unlikely(err < 0)) {
1548 return ERR_PTR(err);
1553 static struct sk_buff *l2cap_create_basic_pdu(struct l2cap_chan *chan,
1554 struct msghdr *msg, size_t len,
1557 struct sock *sk = chan->sk;
1558 struct l2cap_conn *conn = chan->conn;
1559 struct sk_buff *skb;
1560 int err, count, hlen = L2CAP_HDR_SIZE;
1561 struct l2cap_hdr *lh;
1563 BT_DBG("sk %p len %d", sk, (int)len);
1565 count = min_t(unsigned int, (conn->mtu - hlen), len);
1566 skb = bt_skb_send_alloc(sk, count + hlen,
1567 msg->msg_flags & MSG_DONTWAIT, &err);
1569 return ERR_PTR(err);
1571 skb->priority = priority;
1573 /* Create L2CAP header */
1574 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1575 lh->cid = cpu_to_le16(chan->dcid);
1576 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1578 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1579 if (unlikely(err < 0)) {
1581 return ERR_PTR(err);
1586 static struct sk_buff *l2cap_create_iframe_pdu(struct l2cap_chan *chan,
1587 struct msghdr *msg, size_t len,
1588 u32 control, u16 sdulen)
1590 struct sock *sk = chan->sk;
1591 struct l2cap_conn *conn = chan->conn;
1592 struct sk_buff *skb;
1593 int err, count, hlen;
1594 struct l2cap_hdr *lh;
1596 BT_DBG("sk %p len %d", sk, (int)len);
1599 return ERR_PTR(-ENOTCONN);
1601 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
1602 hlen = L2CAP_EXT_HDR_SIZE;
1604 hlen = L2CAP_ENH_HDR_SIZE;
1607 hlen += L2CAP_SDULEN_SIZE;
1609 if (chan->fcs == L2CAP_FCS_CRC16)
1610 hlen += L2CAP_FCS_SIZE;
1612 count = min_t(unsigned int, (conn->mtu - hlen), len);
1613 skb = bt_skb_send_alloc(sk, count + hlen,
1614 msg->msg_flags & MSG_DONTWAIT, &err);
1616 return ERR_PTR(err);
1618 /* Create L2CAP header */
1619 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1620 lh->cid = cpu_to_le16(chan->dcid);
1621 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1623 __put_control(chan, control, skb_put(skb, __ctrl_size(chan)));
1626 put_unaligned_le16(sdulen, skb_put(skb, L2CAP_SDULEN_SIZE));
1628 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1629 if (unlikely(err < 0)) {
1631 return ERR_PTR(err);
1634 if (chan->fcs == L2CAP_FCS_CRC16)
1635 put_unaligned_le16(0, skb_put(skb, L2CAP_FCS_SIZE));
1637 bt_cb(skb)->retries = 0;
1641 static int l2cap_sar_segment_sdu(struct l2cap_chan *chan, struct msghdr *msg, size_t len)
1643 struct sk_buff *skb;
1644 struct sk_buff_head sar_queue;
1648 skb_queue_head_init(&sar_queue);
1649 control = __set_ctrl_sar(chan, L2CAP_SAR_START);
1650 skb = l2cap_create_iframe_pdu(chan, msg, chan->remote_mps, control, len);
1652 return PTR_ERR(skb);
1654 __skb_queue_tail(&sar_queue, skb);
1655 len -= chan->remote_mps;
1656 size += chan->remote_mps;
1661 if (len > chan->remote_mps) {
1662 control = __set_ctrl_sar(chan, L2CAP_SAR_CONTINUE);
1663 buflen = chan->remote_mps;
1665 control = __set_ctrl_sar(chan, L2CAP_SAR_END);
1669 skb = l2cap_create_iframe_pdu(chan, msg, buflen, control, 0);
1671 skb_queue_purge(&sar_queue);
1672 return PTR_ERR(skb);
1675 __skb_queue_tail(&sar_queue, skb);
1679 skb_queue_splice_tail(&sar_queue, &chan->tx_q);
1680 if (chan->tx_send_head == NULL)
1681 chan->tx_send_head = sar_queue.next;
1686 int l2cap_chan_send(struct l2cap_chan *chan, struct msghdr *msg, size_t len,
1689 struct sk_buff *skb;
1693 /* Connectionless channel */
1694 if (chan->chan_type == L2CAP_CHAN_CONN_LESS) {
1695 skb = l2cap_create_connless_pdu(chan, msg, len, priority);
1697 return PTR_ERR(skb);
1699 l2cap_do_send(chan, skb);
1703 switch (chan->mode) {
1704 case L2CAP_MODE_BASIC:
1705 /* Check outgoing MTU */
1706 if (len > chan->omtu)
1709 /* Create a basic PDU */
1710 skb = l2cap_create_basic_pdu(chan, msg, len, priority);
1712 return PTR_ERR(skb);
1714 l2cap_do_send(chan, skb);
1718 case L2CAP_MODE_ERTM:
1719 case L2CAP_MODE_STREAMING:
1720 /* Entire SDU fits into one PDU */
1721 if (len <= chan->remote_mps) {
1722 control = __set_ctrl_sar(chan, L2CAP_SAR_UNSEGMENTED);
1723 skb = l2cap_create_iframe_pdu(chan, msg, len, control,
1726 return PTR_ERR(skb);
1728 __skb_queue_tail(&chan->tx_q, skb);
1730 if (chan->tx_send_head == NULL)
1731 chan->tx_send_head = skb;
1734 /* Segment SDU into multiples PDUs */
1735 err = l2cap_sar_segment_sdu(chan, msg, len);
1740 if (chan->mode == L2CAP_MODE_STREAMING) {
1741 l2cap_streaming_send(chan);
1746 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
1747 test_bit(CONN_WAIT_F, &chan->conn_state)) {
1752 err = l2cap_ertm_send(chan);
1759 BT_DBG("bad state %1.1x", chan->mode);
1766 /* Copy frame to all raw sockets on that connection */
1767 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
1769 struct sk_buff *nskb;
1770 struct l2cap_chan *chan;
1772 BT_DBG("conn %p", conn);
1774 read_lock(&conn->chan_lock);
1775 list_for_each_entry(chan, &conn->chan_l, list) {
1776 struct sock *sk = chan->sk;
1777 if (chan->chan_type != L2CAP_CHAN_RAW)
1780 /* Don't send frame to the socket it came from */
1783 nskb = skb_clone(skb, GFP_ATOMIC);
1787 if (chan->ops->recv(chan->data, nskb))
1790 read_unlock(&conn->chan_lock);
1793 /* ---- L2CAP signalling commands ---- */
1794 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
1795 u8 code, u8 ident, u16 dlen, void *data)
1797 struct sk_buff *skb, **frag;
1798 struct l2cap_cmd_hdr *cmd;
1799 struct l2cap_hdr *lh;
1802 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
1803 conn, code, ident, dlen);
1805 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
1806 count = min_t(unsigned int, conn->mtu, len);
1808 skb = bt_skb_alloc(count, GFP_ATOMIC);
1812 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1813 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
1815 if (conn->hcon->type == LE_LINK)
1816 lh->cid = cpu_to_le16(L2CAP_CID_LE_SIGNALING);
1818 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
1820 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
1823 cmd->len = cpu_to_le16(dlen);
1826 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
1827 memcpy(skb_put(skb, count), data, count);
1833 /* Continuation fragments (no L2CAP header) */
1834 frag = &skb_shinfo(skb)->frag_list;
1836 count = min_t(unsigned int, conn->mtu, len);
1838 *frag = bt_skb_alloc(count, GFP_ATOMIC);
1842 memcpy(skb_put(*frag, count), data, count);
1847 frag = &(*frag)->next;
1857 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
1859 struct l2cap_conf_opt *opt = *ptr;
1862 len = L2CAP_CONF_OPT_SIZE + opt->len;
1870 *val = *((u8 *) opt->val);
1874 *val = get_unaligned_le16(opt->val);
1878 *val = get_unaligned_le32(opt->val);
1882 *val = (unsigned long) opt->val;
1886 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
1890 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
1892 struct l2cap_conf_opt *opt = *ptr;
1894 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
1901 *((u8 *) opt->val) = val;
1905 put_unaligned_le16(val, opt->val);
1909 put_unaligned_le32(val, opt->val);
1913 memcpy(opt->val, (void *) val, len);
1917 *ptr += L2CAP_CONF_OPT_SIZE + len;
1920 static void l2cap_add_opt_efs(void **ptr, struct l2cap_chan *chan)
1922 struct l2cap_conf_efs efs;
1924 switch (chan->mode) {
1925 case L2CAP_MODE_ERTM:
1926 efs.id = chan->local_id;
1927 efs.stype = chan->local_stype;
1928 efs.msdu = cpu_to_le16(chan->local_msdu);
1929 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1930 efs.acc_lat = cpu_to_le32(L2CAP_DEFAULT_ACC_LAT);
1931 efs.flush_to = cpu_to_le32(L2CAP_DEFAULT_FLUSH_TO);
1934 case L2CAP_MODE_STREAMING:
1936 efs.stype = L2CAP_SERV_BESTEFFORT;
1937 efs.msdu = cpu_to_le16(chan->local_msdu);
1938 efs.sdu_itime = cpu_to_le32(chan->local_sdu_itime);
1947 l2cap_add_conf_opt(ptr, L2CAP_CONF_EFS, sizeof(efs),
1948 (unsigned long) &efs);
1951 static void l2cap_ack_timeout(struct work_struct *work)
1953 struct l2cap_chan *chan = container_of(work, struct l2cap_chan,
1956 lock_sock(chan->sk);
1957 l2cap_send_ack(chan);
1958 release_sock(chan->sk);
1961 static inline void l2cap_ertm_init(struct l2cap_chan *chan)
1963 struct sock *sk = chan->sk;
1965 chan->expected_ack_seq = 0;
1966 chan->unacked_frames = 0;
1967 chan->buffer_seq = 0;
1968 chan->num_acked = 0;
1969 chan->frames_sent = 0;
1971 INIT_DELAYED_WORK(&chan->retrans_timer, l2cap_retrans_timeout);
1972 INIT_DELAYED_WORK(&chan->monitor_timer, l2cap_monitor_timeout);
1973 INIT_DELAYED_WORK(&chan->ack_timer, l2cap_ack_timeout);
1975 skb_queue_head_init(&chan->srej_q);
1977 INIT_LIST_HEAD(&chan->srej_l);
1980 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
1983 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
1986 case L2CAP_MODE_STREAMING:
1987 case L2CAP_MODE_ERTM:
1988 if (l2cap_mode_supported(mode, remote_feat_mask))
1992 return L2CAP_MODE_BASIC;
1996 static inline bool __l2cap_ews_supported(struct l2cap_chan *chan)
1998 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_WINDOW;
2001 static inline bool __l2cap_efs_supported(struct l2cap_chan *chan)
2003 return enable_hs && chan->conn->feat_mask & L2CAP_FEAT_EXT_FLOW;
2006 static inline void l2cap_txwin_setup(struct l2cap_chan *chan)
2008 if (chan->tx_win > L2CAP_DEFAULT_TX_WINDOW &&
2009 __l2cap_ews_supported(chan)) {
2010 /* use extended control field */
2011 set_bit(FLAG_EXT_CTRL, &chan->flags);
2012 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2014 chan->tx_win = min_t(u16, chan->tx_win,
2015 L2CAP_DEFAULT_TX_WINDOW);
2016 chan->tx_win_max = L2CAP_DEFAULT_TX_WINDOW;
2020 static int l2cap_build_conf_req(struct l2cap_chan *chan, void *data)
2022 struct l2cap_conf_req *req = data;
2023 struct l2cap_conf_rfc rfc = { .mode = chan->mode };
2024 void *ptr = req->data;
2027 BT_DBG("chan %p", chan);
2029 if (chan->num_conf_req || chan->num_conf_rsp)
2032 switch (chan->mode) {
2033 case L2CAP_MODE_STREAMING:
2034 case L2CAP_MODE_ERTM:
2035 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state))
2038 if (__l2cap_efs_supported(chan))
2039 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2043 chan->mode = l2cap_select_mode(rfc.mode, chan->conn->feat_mask);
2048 if (chan->imtu != L2CAP_DEFAULT_MTU)
2049 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2051 switch (chan->mode) {
2052 case L2CAP_MODE_BASIC:
2053 if (!(chan->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2054 !(chan->conn->feat_mask & L2CAP_FEAT_STREAMING))
2057 rfc.mode = L2CAP_MODE_BASIC;
2059 rfc.max_transmit = 0;
2060 rfc.retrans_timeout = 0;
2061 rfc.monitor_timeout = 0;
2062 rfc.max_pdu_size = 0;
2064 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2065 (unsigned long) &rfc);
2068 case L2CAP_MODE_ERTM:
2069 rfc.mode = L2CAP_MODE_ERTM;
2070 rfc.max_transmit = chan->max_tx;
2071 rfc.retrans_timeout = 0;
2072 rfc.monitor_timeout = 0;
2074 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2075 L2CAP_EXT_HDR_SIZE -
2078 rfc.max_pdu_size = cpu_to_le16(size);
2080 l2cap_txwin_setup(chan);
2082 rfc.txwin_size = min_t(u16, chan->tx_win,
2083 L2CAP_DEFAULT_TX_WINDOW);
2085 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2086 (unsigned long) &rfc);
2088 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2089 l2cap_add_opt_efs(&ptr, chan);
2091 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2094 if (chan->fcs == L2CAP_FCS_NONE ||
2095 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2096 chan->fcs = L2CAP_FCS_NONE;
2097 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2100 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
2101 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2105 case L2CAP_MODE_STREAMING:
2106 rfc.mode = L2CAP_MODE_STREAMING;
2108 rfc.max_transmit = 0;
2109 rfc.retrans_timeout = 0;
2110 rfc.monitor_timeout = 0;
2112 size = min_t(u16, L2CAP_DEFAULT_MAX_PDU_SIZE, chan->conn->mtu -
2113 L2CAP_EXT_HDR_SIZE -
2116 rfc.max_pdu_size = cpu_to_le16(size);
2118 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2119 (unsigned long) &rfc);
2121 if (test_bit(FLAG_EFS_ENABLE, &chan->flags))
2122 l2cap_add_opt_efs(&ptr, chan);
2124 if (!(chan->conn->feat_mask & L2CAP_FEAT_FCS))
2127 if (chan->fcs == L2CAP_FCS_NONE ||
2128 test_bit(CONF_NO_FCS_RECV, &chan->conf_state)) {
2129 chan->fcs = L2CAP_FCS_NONE;
2130 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, chan->fcs);
2135 req->dcid = cpu_to_le16(chan->dcid);
2136 req->flags = cpu_to_le16(0);
2141 static int l2cap_parse_conf_req(struct l2cap_chan *chan, void *data)
2143 struct l2cap_conf_rsp *rsp = data;
2144 void *ptr = rsp->data;
2145 void *req = chan->conf_req;
2146 int len = chan->conf_len;
2147 int type, hint, olen;
2149 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2150 struct l2cap_conf_efs efs;
2152 u16 mtu = L2CAP_DEFAULT_MTU;
2153 u16 result = L2CAP_CONF_SUCCESS;
2156 BT_DBG("chan %p", chan);
2158 while (len >= L2CAP_CONF_OPT_SIZE) {
2159 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2161 hint = type & L2CAP_CONF_HINT;
2162 type &= L2CAP_CONF_MASK;
2165 case L2CAP_CONF_MTU:
2169 case L2CAP_CONF_FLUSH_TO:
2170 chan->flush_to = val;
2173 case L2CAP_CONF_QOS:
2176 case L2CAP_CONF_RFC:
2177 if (olen == sizeof(rfc))
2178 memcpy(&rfc, (void *) val, olen);
2181 case L2CAP_CONF_FCS:
2182 if (val == L2CAP_FCS_NONE)
2183 set_bit(CONF_NO_FCS_RECV, &chan->conf_state);
2186 case L2CAP_CONF_EFS:
2188 if (olen == sizeof(efs))
2189 memcpy(&efs, (void *) val, olen);
2192 case L2CAP_CONF_EWS:
2194 return -ECONNREFUSED;
2196 set_bit(FLAG_EXT_CTRL, &chan->flags);
2197 set_bit(CONF_EWS_RECV, &chan->conf_state);
2198 chan->tx_win_max = L2CAP_DEFAULT_EXT_WINDOW;
2199 chan->remote_tx_win = val;
2206 result = L2CAP_CONF_UNKNOWN;
2207 *((u8 *) ptr++) = type;
2212 if (chan->num_conf_rsp || chan->num_conf_req > 1)
2215 switch (chan->mode) {
2216 case L2CAP_MODE_STREAMING:
2217 case L2CAP_MODE_ERTM:
2218 if (!test_bit(CONF_STATE2_DEVICE, &chan->conf_state)) {
2219 chan->mode = l2cap_select_mode(rfc.mode,
2220 chan->conn->feat_mask);
2225 if (__l2cap_efs_supported(chan))
2226 set_bit(FLAG_EFS_ENABLE, &chan->flags);
2228 return -ECONNREFUSED;
2231 if (chan->mode != rfc.mode)
2232 return -ECONNREFUSED;
2238 if (chan->mode != rfc.mode) {
2239 result = L2CAP_CONF_UNACCEPT;
2240 rfc.mode = chan->mode;
2242 if (chan->num_conf_rsp == 1)
2243 return -ECONNREFUSED;
2245 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2246 sizeof(rfc), (unsigned long) &rfc);
2249 if (result == L2CAP_CONF_SUCCESS) {
2250 /* Configure output options and let the other side know
2251 * which ones we don't like. */
2253 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2254 result = L2CAP_CONF_UNACCEPT;
2257 set_bit(CONF_MTU_DONE, &chan->conf_state);
2259 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->omtu);
2262 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2263 efs.stype != L2CAP_SERV_NOTRAFIC &&
2264 efs.stype != chan->local_stype) {
2266 result = L2CAP_CONF_UNACCEPT;
2268 if (chan->num_conf_req >= 1)
2269 return -ECONNREFUSED;
2271 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2273 (unsigned long) &efs);
2275 /* Send PENDING Conf Rsp */
2276 result = L2CAP_CONF_PENDING;
2277 set_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2282 case L2CAP_MODE_BASIC:
2283 chan->fcs = L2CAP_FCS_NONE;
2284 set_bit(CONF_MODE_DONE, &chan->conf_state);
2287 case L2CAP_MODE_ERTM:
2288 if (!test_bit(CONF_EWS_RECV, &chan->conf_state))
2289 chan->remote_tx_win = rfc.txwin_size;
2291 rfc.txwin_size = L2CAP_DEFAULT_TX_WINDOW;
2293 chan->remote_max_tx = rfc.max_transmit;
2295 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2297 L2CAP_EXT_HDR_SIZE -
2300 rfc.max_pdu_size = cpu_to_le16(size);
2301 chan->remote_mps = size;
2303 rfc.retrans_timeout =
2304 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2305 rfc.monitor_timeout =
2306 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2308 set_bit(CONF_MODE_DONE, &chan->conf_state);
2310 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2311 sizeof(rfc), (unsigned long) &rfc);
2313 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2314 chan->remote_id = efs.id;
2315 chan->remote_stype = efs.stype;
2316 chan->remote_msdu = le16_to_cpu(efs.msdu);
2317 chan->remote_flush_to =
2318 le32_to_cpu(efs.flush_to);
2319 chan->remote_acc_lat =
2320 le32_to_cpu(efs.acc_lat);
2321 chan->remote_sdu_itime =
2322 le32_to_cpu(efs.sdu_itime);
2323 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2324 sizeof(efs), (unsigned long) &efs);
2328 case L2CAP_MODE_STREAMING:
2329 size = min_t(u16, le16_to_cpu(rfc.max_pdu_size),
2331 L2CAP_EXT_HDR_SIZE -
2334 rfc.max_pdu_size = cpu_to_le16(size);
2335 chan->remote_mps = size;
2337 set_bit(CONF_MODE_DONE, &chan->conf_state);
2339 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2340 sizeof(rfc), (unsigned long) &rfc);
2345 result = L2CAP_CONF_UNACCEPT;
2347 memset(&rfc, 0, sizeof(rfc));
2348 rfc.mode = chan->mode;
2351 if (result == L2CAP_CONF_SUCCESS)
2352 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2354 rsp->scid = cpu_to_le16(chan->dcid);
2355 rsp->result = cpu_to_le16(result);
2356 rsp->flags = cpu_to_le16(0x0000);
2361 static int l2cap_parse_conf_rsp(struct l2cap_chan *chan, void *rsp, int len, void *data, u16 *result)
2363 struct l2cap_conf_req *req = data;
2364 void *ptr = req->data;
2367 struct l2cap_conf_rfc rfc;
2368 struct l2cap_conf_efs efs;
2370 BT_DBG("chan %p, rsp %p, len %d, req %p", chan, rsp, len, data);
2372 while (len >= L2CAP_CONF_OPT_SIZE) {
2373 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2376 case L2CAP_CONF_MTU:
2377 if (val < L2CAP_DEFAULT_MIN_MTU) {
2378 *result = L2CAP_CONF_UNACCEPT;
2379 chan->imtu = L2CAP_DEFAULT_MIN_MTU;
2382 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, chan->imtu);
2385 case L2CAP_CONF_FLUSH_TO:
2386 chan->flush_to = val;
2387 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2391 case L2CAP_CONF_RFC:
2392 if (olen == sizeof(rfc))
2393 memcpy(&rfc, (void *)val, olen);
2395 if (test_bit(CONF_STATE2_DEVICE, &chan->conf_state) &&
2396 rfc.mode != chan->mode)
2397 return -ECONNREFUSED;
2401 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2402 sizeof(rfc), (unsigned long) &rfc);
2405 case L2CAP_CONF_EWS:
2406 chan->tx_win = min_t(u16, val,
2407 L2CAP_DEFAULT_EXT_WINDOW);
2408 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EWS, 2,
2412 case L2CAP_CONF_EFS:
2413 if (olen == sizeof(efs))
2414 memcpy(&efs, (void *)val, olen);
2416 if (chan->local_stype != L2CAP_SERV_NOTRAFIC &&
2417 efs.stype != L2CAP_SERV_NOTRAFIC &&
2418 efs.stype != chan->local_stype)
2419 return -ECONNREFUSED;
2421 l2cap_add_conf_opt(&ptr, L2CAP_CONF_EFS,
2422 sizeof(efs), (unsigned long) &efs);
2427 if (chan->mode == L2CAP_MODE_BASIC && chan->mode != rfc.mode)
2428 return -ECONNREFUSED;
2430 chan->mode = rfc.mode;
2432 if (*result == L2CAP_CONF_SUCCESS || *result == L2CAP_CONF_PENDING) {
2434 case L2CAP_MODE_ERTM:
2435 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2436 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2437 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2439 if (test_bit(FLAG_EFS_ENABLE, &chan->flags)) {
2440 chan->local_msdu = le16_to_cpu(efs.msdu);
2441 chan->local_sdu_itime =
2442 le32_to_cpu(efs.sdu_itime);
2443 chan->local_acc_lat = le32_to_cpu(efs.acc_lat);
2444 chan->local_flush_to =
2445 le32_to_cpu(efs.flush_to);
2449 case L2CAP_MODE_STREAMING:
2450 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2454 req->dcid = cpu_to_le16(chan->dcid);
2455 req->flags = cpu_to_le16(0x0000);
2460 static int l2cap_build_conf_rsp(struct l2cap_chan *chan, void *data, u16 result, u16 flags)
2462 struct l2cap_conf_rsp *rsp = data;
2463 void *ptr = rsp->data;
2465 BT_DBG("chan %p", chan);
2467 rsp->scid = cpu_to_le16(chan->dcid);
2468 rsp->result = cpu_to_le16(result);
2469 rsp->flags = cpu_to_le16(flags);
2474 void __l2cap_connect_rsp_defer(struct l2cap_chan *chan)
2476 struct l2cap_conn_rsp rsp;
2477 struct l2cap_conn *conn = chan->conn;
2480 rsp.scid = cpu_to_le16(chan->dcid);
2481 rsp.dcid = cpu_to_le16(chan->scid);
2482 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
2483 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
2484 l2cap_send_cmd(conn, chan->ident,
2485 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2487 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2490 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2491 l2cap_build_conf_req(chan, buf), buf);
2492 chan->num_conf_req++;
2495 static void l2cap_conf_rfc_get(struct l2cap_chan *chan, void *rsp, int len)
2499 struct l2cap_conf_rfc rfc;
2501 BT_DBG("chan %p, rsp %p, len %d", chan, rsp, len);
2503 if ((chan->mode != L2CAP_MODE_ERTM) && (chan->mode != L2CAP_MODE_STREAMING))
2506 while (len >= L2CAP_CONF_OPT_SIZE) {
2507 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2510 case L2CAP_CONF_RFC:
2511 if (olen == sizeof(rfc))
2512 memcpy(&rfc, (void *)val, olen);
2519 case L2CAP_MODE_ERTM:
2520 chan->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2521 chan->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2522 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2524 case L2CAP_MODE_STREAMING:
2525 chan->mps = le16_to_cpu(rfc.max_pdu_size);
2529 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2531 struct l2cap_cmd_rej_unk *rej = (struct l2cap_cmd_rej_unk *) data;
2533 if (rej->reason != L2CAP_REJ_NOT_UNDERSTOOD)
2536 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2537 cmd->ident == conn->info_ident) {
2538 del_timer(&conn->info_timer);
2540 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2541 conn->info_ident = 0;
2543 l2cap_conn_start(conn);
2549 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2551 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2552 struct l2cap_conn_rsp rsp;
2553 struct l2cap_chan *chan = NULL, *pchan;
2554 struct sock *parent, *sk = NULL;
2555 int result, status = L2CAP_CS_NO_INFO;
2557 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2558 __le16 psm = req->psm;
2560 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2562 /* Check if we have socket listening on psm */
2563 pchan = l2cap_global_chan_by_psm(BT_LISTEN, psm, conn->src);
2565 result = L2CAP_CR_BAD_PSM;
2571 bh_lock_sock(parent);
2573 /* Check if the ACL is secure enough (if not SDP) */
2574 if (psm != cpu_to_le16(0x0001) &&
2575 !hci_conn_check_link_mode(conn->hcon)) {
2576 conn->disc_reason = HCI_ERROR_AUTH_FAILURE;
2577 result = L2CAP_CR_SEC_BLOCK;
2581 result = L2CAP_CR_NO_MEM;
2583 /* Check for backlog size */
2584 if (sk_acceptq_is_full(parent)) {
2585 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2589 chan = pchan->ops->new_connection(pchan->data);
2595 write_lock_bh(&conn->chan_lock);
2597 /* Check if we already have channel with that dcid */
2598 if (__l2cap_get_chan_by_dcid(conn, scid)) {
2599 write_unlock_bh(&conn->chan_lock);
2600 sock_set_flag(sk, SOCK_ZAPPED);
2601 chan->ops->close(chan->data);
2605 hci_conn_hold(conn->hcon);
2607 bacpy(&bt_sk(sk)->src, conn->src);
2608 bacpy(&bt_sk(sk)->dst, conn->dst);
2612 bt_accept_enqueue(parent, sk);
2614 __l2cap_chan_add(conn, chan);
2618 __set_chan_timer(chan, sk->sk_sndtimeo);
2620 chan->ident = cmd->ident;
2622 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2623 if (l2cap_chan_check_security(chan)) {
2624 if (bt_sk(sk)->defer_setup) {
2625 l2cap_state_change(chan, BT_CONNECT2);
2626 result = L2CAP_CR_PEND;
2627 status = L2CAP_CS_AUTHOR_PEND;
2628 parent->sk_data_ready(parent, 0);
2630 l2cap_state_change(chan, BT_CONFIG);
2631 result = L2CAP_CR_SUCCESS;
2632 status = L2CAP_CS_NO_INFO;
2635 l2cap_state_change(chan, BT_CONNECT2);
2636 result = L2CAP_CR_PEND;
2637 status = L2CAP_CS_AUTHEN_PEND;
2640 l2cap_state_change(chan, BT_CONNECT2);
2641 result = L2CAP_CR_PEND;
2642 status = L2CAP_CS_NO_INFO;
2645 write_unlock_bh(&conn->chan_lock);
2648 bh_unlock_sock(parent);
2651 rsp.scid = cpu_to_le16(scid);
2652 rsp.dcid = cpu_to_le16(dcid);
2653 rsp.result = cpu_to_le16(result);
2654 rsp.status = cpu_to_le16(status);
2655 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2657 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2658 struct l2cap_info_req info;
2659 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2661 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2662 conn->info_ident = l2cap_get_ident(conn);
2664 mod_timer(&conn->info_timer, jiffies +
2665 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2667 l2cap_send_cmd(conn, conn->info_ident,
2668 L2CAP_INFO_REQ, sizeof(info), &info);
2671 if (chan && !test_bit(CONF_REQ_SENT, &chan->conf_state) &&
2672 result == L2CAP_CR_SUCCESS) {
2674 set_bit(CONF_REQ_SENT, &chan->conf_state);
2675 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2676 l2cap_build_conf_req(chan, buf), buf);
2677 chan->num_conf_req++;
2683 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2685 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2686 u16 scid, dcid, result, status;
2687 struct l2cap_chan *chan;
2691 scid = __le16_to_cpu(rsp->scid);
2692 dcid = __le16_to_cpu(rsp->dcid);
2693 result = __le16_to_cpu(rsp->result);
2694 status = __le16_to_cpu(rsp->status);
2696 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2699 chan = l2cap_get_chan_by_scid(conn, scid);
2703 chan = l2cap_get_chan_by_ident(conn, cmd->ident);
2711 case L2CAP_CR_SUCCESS:
2712 l2cap_state_change(chan, BT_CONFIG);
2715 clear_bit(CONF_CONNECT_PEND, &chan->conf_state);
2717 if (test_and_set_bit(CONF_REQ_SENT, &chan->conf_state))
2720 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2721 l2cap_build_conf_req(chan, req), req);
2722 chan->num_conf_req++;
2726 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
2730 /* don't delete l2cap channel if sk is owned by user */
2731 if (sock_owned_by_user(sk)) {
2732 l2cap_state_change(chan, BT_DISCONN);
2733 __clear_chan_timer(chan);
2734 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
2738 l2cap_chan_del(chan, ECONNREFUSED);
2746 static inline void set_default_fcs(struct l2cap_chan *chan)
2748 /* FCS is enabled only in ERTM or streaming mode, if one or both
2751 if (chan->mode != L2CAP_MODE_ERTM && chan->mode != L2CAP_MODE_STREAMING)
2752 chan->fcs = L2CAP_FCS_NONE;
2753 else if (!test_bit(CONF_NO_FCS_RECV, &chan->conf_state))
2754 chan->fcs = L2CAP_FCS_CRC16;
2757 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2759 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2762 struct l2cap_chan *chan;
2766 dcid = __le16_to_cpu(req->dcid);
2767 flags = __le16_to_cpu(req->flags);
2769 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2771 chan = l2cap_get_chan_by_scid(conn, dcid);
2777 if (chan->state != BT_CONFIG && chan->state != BT_CONNECT2) {
2778 struct l2cap_cmd_rej_cid rej;
2780 rej.reason = cpu_to_le16(L2CAP_REJ_INVALID_CID);
2781 rej.scid = cpu_to_le16(chan->scid);
2782 rej.dcid = cpu_to_le16(chan->dcid);
2784 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
2789 /* Reject if config buffer is too small. */
2790 len = cmd_len - sizeof(*req);
2791 if (len < 0 || chan->conf_len + len > sizeof(chan->conf_req)) {
2792 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2793 l2cap_build_conf_rsp(chan, rsp,
2794 L2CAP_CONF_REJECT, flags), rsp);
2799 memcpy(chan->conf_req + chan->conf_len, req->data, len);
2800 chan->conf_len += len;
2802 if (flags & 0x0001) {
2803 /* Incomplete config. Send empty response. */
2804 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2805 l2cap_build_conf_rsp(chan, rsp,
2806 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2810 /* Complete config. */
2811 len = l2cap_parse_conf_req(chan, rsp);
2813 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2817 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2818 chan->num_conf_rsp++;
2820 /* Reset config buffer. */
2823 if (!test_bit(CONF_OUTPUT_DONE, &chan->conf_state))
2826 if (test_bit(CONF_INPUT_DONE, &chan->conf_state)) {
2827 set_default_fcs(chan);
2829 l2cap_state_change(chan, BT_CONNECTED);
2831 chan->next_tx_seq = 0;
2832 chan->expected_tx_seq = 0;
2833 skb_queue_head_init(&chan->tx_q);
2834 if (chan->mode == L2CAP_MODE_ERTM)
2835 l2cap_ertm_init(chan);
2837 l2cap_chan_ready(sk);
2841 if (!test_and_set_bit(CONF_REQ_SENT, &chan->conf_state)) {
2843 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2844 l2cap_build_conf_req(chan, buf), buf);
2845 chan->num_conf_req++;
2848 /* Got Conf Rsp PENDING from remote side and asume we sent
2849 Conf Rsp PENDING in the code above */
2850 if (test_bit(CONF_REM_CONF_PEND, &chan->conf_state) &&
2851 test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2853 /* check compatibility */
2855 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2856 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2858 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2859 l2cap_build_conf_rsp(chan, rsp,
2860 L2CAP_CONF_SUCCESS, 0x0000), rsp);
2868 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2870 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2871 u16 scid, flags, result;
2872 struct l2cap_chan *chan;
2874 int len = cmd->len - sizeof(*rsp);
2876 scid = __le16_to_cpu(rsp->scid);
2877 flags = __le16_to_cpu(rsp->flags);
2878 result = __le16_to_cpu(rsp->result);
2880 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2881 scid, flags, result);
2883 chan = l2cap_get_chan_by_scid(conn, scid);
2890 case L2CAP_CONF_SUCCESS:
2891 l2cap_conf_rfc_get(chan, rsp->data, len);
2892 clear_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2895 case L2CAP_CONF_PENDING:
2896 set_bit(CONF_REM_CONF_PEND, &chan->conf_state);
2898 if (test_bit(CONF_LOC_CONF_PEND, &chan->conf_state)) {
2901 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2904 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2908 /* check compatibility */
2910 clear_bit(CONF_LOC_CONF_PEND, &chan->conf_state);
2911 set_bit(CONF_OUTPUT_DONE, &chan->conf_state);
2913 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2914 l2cap_build_conf_rsp(chan, buf,
2915 L2CAP_CONF_SUCCESS, 0x0000), buf);
2919 case L2CAP_CONF_UNACCEPT:
2920 if (chan->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2923 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2924 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2928 /* throw out any old stored conf requests */
2929 result = L2CAP_CONF_SUCCESS;
2930 len = l2cap_parse_conf_rsp(chan, rsp->data, len,
2933 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2937 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2938 L2CAP_CONF_REQ, len, req);
2939 chan->num_conf_req++;
2940 if (result != L2CAP_CONF_SUCCESS)
2946 sk->sk_err = ECONNRESET;
2947 __set_chan_timer(chan, L2CAP_DISC_REJ_TIMEOUT);
2948 l2cap_send_disconn_req(conn, chan, ECONNRESET);
2955 set_bit(CONF_INPUT_DONE, &chan->conf_state);
2957 if (test_bit(CONF_OUTPUT_DONE, &chan->conf_state)) {
2958 set_default_fcs(chan);
2960 l2cap_state_change(chan, BT_CONNECTED);
2961 chan->next_tx_seq = 0;
2962 chan->expected_tx_seq = 0;
2963 skb_queue_head_init(&chan->tx_q);
2964 if (chan->mode == L2CAP_MODE_ERTM)
2965 l2cap_ertm_init(chan);
2967 l2cap_chan_ready(sk);
2975 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2977 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
2978 struct l2cap_disconn_rsp rsp;
2980 struct l2cap_chan *chan;
2983 scid = __le16_to_cpu(req->scid);
2984 dcid = __le16_to_cpu(req->dcid);
2986 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
2988 chan = l2cap_get_chan_by_scid(conn, dcid);
2994 rsp.dcid = cpu_to_le16(chan->scid);
2995 rsp.scid = cpu_to_le16(chan->dcid);
2996 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
2998 sk->sk_shutdown = SHUTDOWN_MASK;
3000 /* don't delete l2cap channel if sk is owned by user */
3001 if (sock_owned_by_user(sk)) {
3002 l2cap_state_change(chan, BT_DISCONN);
3003 __clear_chan_timer(chan);
3004 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
3009 l2cap_chan_del(chan, ECONNRESET);
3012 chan->ops->close(chan->data);
3016 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3018 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3020 struct l2cap_chan *chan;
3023 scid = __le16_to_cpu(rsp->scid);
3024 dcid = __le16_to_cpu(rsp->dcid);
3026 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3028 chan = l2cap_get_chan_by_scid(conn, scid);
3034 /* don't delete l2cap channel if sk is owned by user */
3035 if (sock_owned_by_user(sk)) {
3036 l2cap_state_change(chan, BT_DISCONN);
3037 __clear_chan_timer(chan);
3038 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
3043 l2cap_chan_del(chan, 0);
3046 chan->ops->close(chan->data);
3050 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3052 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3055 type = __le16_to_cpu(req->type);
3057 BT_DBG("type 0x%4.4x", type);
3059 if (type == L2CAP_IT_FEAT_MASK) {
3061 u32 feat_mask = l2cap_feat_mask;
3062 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3063 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3064 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3066 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3069 feat_mask |= L2CAP_FEAT_EXT_FLOW
3070 | L2CAP_FEAT_EXT_WINDOW;
3072 put_unaligned_le32(feat_mask, rsp->data);
3073 l2cap_send_cmd(conn, cmd->ident,
3074 L2CAP_INFO_RSP, sizeof(buf), buf);
3075 } else if (type == L2CAP_IT_FIXED_CHAN) {
3077 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3080 l2cap_fixed_chan[0] |= L2CAP_FC_A2MP;
3082 l2cap_fixed_chan[0] &= ~L2CAP_FC_A2MP;
3084 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3085 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3086 memcpy(rsp->data, l2cap_fixed_chan, sizeof(l2cap_fixed_chan));
3087 l2cap_send_cmd(conn, cmd->ident,
3088 L2CAP_INFO_RSP, sizeof(buf), buf);
3090 struct l2cap_info_rsp rsp;
3091 rsp.type = cpu_to_le16(type);
3092 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3093 l2cap_send_cmd(conn, cmd->ident,
3094 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3100 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3102 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3105 type = __le16_to_cpu(rsp->type);
3106 result = __le16_to_cpu(rsp->result);
3108 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3110 /* L2CAP Info req/rsp are unbound to channels, add extra checks */
3111 if (cmd->ident != conn->info_ident ||
3112 conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE)
3115 del_timer(&conn->info_timer);
3117 if (result != L2CAP_IR_SUCCESS) {
3118 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3119 conn->info_ident = 0;
3121 l2cap_conn_start(conn);
3126 if (type == L2CAP_IT_FEAT_MASK) {
3127 conn->feat_mask = get_unaligned_le32(rsp->data);
3129 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3130 struct l2cap_info_req req;
3131 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3133 conn->info_ident = l2cap_get_ident(conn);
3135 l2cap_send_cmd(conn, conn->info_ident,
3136 L2CAP_INFO_REQ, sizeof(req), &req);
3138 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3139 conn->info_ident = 0;
3141 l2cap_conn_start(conn);
3143 } else if (type == L2CAP_IT_FIXED_CHAN) {
3144 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3145 conn->info_ident = 0;
3147 l2cap_conn_start(conn);
3153 static inline int l2cap_create_channel_req(struct l2cap_conn *conn,
3154 struct l2cap_cmd_hdr *cmd, u16 cmd_len,
3157 struct l2cap_create_chan_req *req = data;
3158 struct l2cap_create_chan_rsp rsp;
3161 if (cmd_len != sizeof(*req))
3167 psm = le16_to_cpu(req->psm);
3168 scid = le16_to_cpu(req->scid);
3170 BT_DBG("psm %d, scid %d, amp_id %d", psm, scid, req->amp_id);
3172 /* Placeholder: Always reject */
3174 rsp.scid = cpu_to_le16(scid);
3175 rsp.result = L2CAP_CR_NO_MEM;
3176 rsp.status = L2CAP_CS_NO_INFO;
3178 l2cap_send_cmd(conn, cmd->ident, L2CAP_CREATE_CHAN_RSP,
3184 static inline int l2cap_create_channel_rsp(struct l2cap_conn *conn,
3185 struct l2cap_cmd_hdr *cmd, void *data)
3187 BT_DBG("conn %p", conn);
3189 return l2cap_connect_rsp(conn, cmd, data);
3192 static void l2cap_send_move_chan_rsp(struct l2cap_conn *conn, u8 ident,
3193 u16 icid, u16 result)
3195 struct l2cap_move_chan_rsp rsp;
3197 BT_DBG("icid %d, result %d", icid, result);
3199 rsp.icid = cpu_to_le16(icid);
3200 rsp.result = cpu_to_le16(result);
3202 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_RSP, sizeof(rsp), &rsp);
3205 static void l2cap_send_move_chan_cfm(struct l2cap_conn *conn,
3206 struct l2cap_chan *chan, u16 icid, u16 result)
3208 struct l2cap_move_chan_cfm cfm;
3211 BT_DBG("icid %d, result %d", icid, result);
3213 ident = l2cap_get_ident(conn);
3215 chan->ident = ident;
3217 cfm.icid = cpu_to_le16(icid);
3218 cfm.result = cpu_to_le16(result);
3220 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM, sizeof(cfm), &cfm);
3223 static void l2cap_send_move_chan_cfm_rsp(struct l2cap_conn *conn, u8 ident,
3226 struct l2cap_move_chan_cfm_rsp rsp;
3228 BT_DBG("icid %d", icid);
3230 rsp.icid = cpu_to_le16(icid);
3231 l2cap_send_cmd(conn, ident, L2CAP_MOVE_CHAN_CFM_RSP, sizeof(rsp), &rsp);
3234 static inline int l2cap_move_channel_req(struct l2cap_conn *conn,
3235 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3237 struct l2cap_move_chan_req *req = data;
3239 u16 result = L2CAP_MR_NOT_ALLOWED;
3241 if (cmd_len != sizeof(*req))
3244 icid = le16_to_cpu(req->icid);
3246 BT_DBG("icid %d, dest_amp_id %d", icid, req->dest_amp_id);
3251 /* Placeholder: Always refuse */
3252 l2cap_send_move_chan_rsp(conn, cmd->ident, icid, result);
3257 static inline int l2cap_move_channel_rsp(struct l2cap_conn *conn,
3258 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3260 struct l2cap_move_chan_rsp *rsp = data;
3263 if (cmd_len != sizeof(*rsp))
3266 icid = le16_to_cpu(rsp->icid);
3267 result = le16_to_cpu(rsp->result);
3269 BT_DBG("icid %d, result %d", icid, result);
3271 /* Placeholder: Always unconfirmed */
3272 l2cap_send_move_chan_cfm(conn, NULL, icid, L2CAP_MC_UNCONFIRMED);
3277 static inline int l2cap_move_channel_confirm(struct l2cap_conn *conn,
3278 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3280 struct l2cap_move_chan_cfm *cfm = data;
3283 if (cmd_len != sizeof(*cfm))
3286 icid = le16_to_cpu(cfm->icid);
3287 result = le16_to_cpu(cfm->result);
3289 BT_DBG("icid %d, result %d", icid, result);
3291 l2cap_send_move_chan_cfm_rsp(conn, cmd->ident, icid);
3296 static inline int l2cap_move_channel_confirm_rsp(struct l2cap_conn *conn,
3297 struct l2cap_cmd_hdr *cmd, u16 cmd_len, void *data)
3299 struct l2cap_move_chan_cfm_rsp *rsp = data;
3302 if (cmd_len != sizeof(*rsp))
3305 icid = le16_to_cpu(rsp->icid);
3307 BT_DBG("icid %d", icid);
3312 static inline int l2cap_check_conn_param(u16 min, u16 max, u16 latency,
3317 if (min > max || min < 6 || max > 3200)
3320 if (to_multiplier < 10 || to_multiplier > 3200)
3323 if (max >= to_multiplier * 8)
3326 max_latency = (to_multiplier * 8 / max) - 1;
3327 if (latency > 499 || latency > max_latency)
3333 static inline int l2cap_conn_param_update_req(struct l2cap_conn *conn,
3334 struct l2cap_cmd_hdr *cmd, u8 *data)
3336 struct hci_conn *hcon = conn->hcon;
3337 struct l2cap_conn_param_update_req *req;
3338 struct l2cap_conn_param_update_rsp rsp;
3339 u16 min, max, latency, to_multiplier, cmd_len;
3342 if (!(hcon->link_mode & HCI_LM_MASTER))
3345 cmd_len = __le16_to_cpu(cmd->len);
3346 if (cmd_len != sizeof(struct l2cap_conn_param_update_req))
3349 req = (struct l2cap_conn_param_update_req *) data;
3350 min = __le16_to_cpu(req->min);
3351 max = __le16_to_cpu(req->max);
3352 latency = __le16_to_cpu(req->latency);
3353 to_multiplier = __le16_to_cpu(req->to_multiplier);
3355 BT_DBG("min 0x%4.4x max 0x%4.4x latency: 0x%4.4x Timeout: 0x%4.4x",
3356 min, max, latency, to_multiplier);
3358 memset(&rsp, 0, sizeof(rsp));
3360 err = l2cap_check_conn_param(min, max, latency, to_multiplier);
3362 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_REJECTED);
3364 rsp.result = cpu_to_le16(L2CAP_CONN_PARAM_ACCEPTED);
3366 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_PARAM_UPDATE_RSP,
3370 hci_le_conn_update(hcon, min, max, latency, to_multiplier);
3375 static inline int l2cap_bredr_sig_cmd(struct l2cap_conn *conn,
3376 struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3380 switch (cmd->code) {
3381 case L2CAP_COMMAND_REJ:
3382 l2cap_command_rej(conn, cmd, data);
3385 case L2CAP_CONN_REQ:
3386 err = l2cap_connect_req(conn, cmd, data);
3389 case L2CAP_CONN_RSP:
3390 err = l2cap_connect_rsp(conn, cmd, data);
3393 case L2CAP_CONF_REQ:
3394 err = l2cap_config_req(conn, cmd, cmd_len, data);
3397 case L2CAP_CONF_RSP:
3398 err = l2cap_config_rsp(conn, cmd, data);
3401 case L2CAP_DISCONN_REQ:
3402 err = l2cap_disconnect_req(conn, cmd, data);
3405 case L2CAP_DISCONN_RSP:
3406 err = l2cap_disconnect_rsp(conn, cmd, data);
3409 case L2CAP_ECHO_REQ:
3410 l2cap_send_cmd(conn, cmd->ident, L2CAP_ECHO_RSP, cmd_len, data);
3413 case L2CAP_ECHO_RSP:
3416 case L2CAP_INFO_REQ:
3417 err = l2cap_information_req(conn, cmd, data);
3420 case L2CAP_INFO_RSP:
3421 err = l2cap_information_rsp(conn, cmd, data);
3424 case L2CAP_CREATE_CHAN_REQ:
3425 err = l2cap_create_channel_req(conn, cmd, cmd_len, data);
3428 case L2CAP_CREATE_CHAN_RSP:
3429 err = l2cap_create_channel_rsp(conn, cmd, data);
3432 case L2CAP_MOVE_CHAN_REQ:
3433 err = l2cap_move_channel_req(conn, cmd, cmd_len, data);
3436 case L2CAP_MOVE_CHAN_RSP:
3437 err = l2cap_move_channel_rsp(conn, cmd, cmd_len, data);
3440 case L2CAP_MOVE_CHAN_CFM:
3441 err = l2cap_move_channel_confirm(conn, cmd, cmd_len, data);
3444 case L2CAP_MOVE_CHAN_CFM_RSP:
3445 err = l2cap_move_channel_confirm_rsp(conn, cmd, cmd_len, data);
3449 BT_ERR("Unknown BR/EDR signaling command 0x%2.2x", cmd->code);
3457 static inline int l2cap_le_sig_cmd(struct l2cap_conn *conn,
3458 struct l2cap_cmd_hdr *cmd, u8 *data)
3460 switch (cmd->code) {
3461 case L2CAP_COMMAND_REJ:
3464 case L2CAP_CONN_PARAM_UPDATE_REQ:
3465 return l2cap_conn_param_update_req(conn, cmd, data);
3467 case L2CAP_CONN_PARAM_UPDATE_RSP:
3471 BT_ERR("Unknown LE signaling command 0x%2.2x", cmd->code);
3476 static inline void l2cap_sig_channel(struct l2cap_conn *conn,
3477 struct sk_buff *skb)
3479 u8 *data = skb->data;
3481 struct l2cap_cmd_hdr cmd;
3484 l2cap_raw_recv(conn, skb);
3486 while (len >= L2CAP_CMD_HDR_SIZE) {
3488 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3489 data += L2CAP_CMD_HDR_SIZE;
3490 len -= L2CAP_CMD_HDR_SIZE;
3492 cmd_len = le16_to_cpu(cmd.len);
3494 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3496 if (cmd_len > len || !cmd.ident) {
3497 BT_DBG("corrupted command");
3501 if (conn->hcon->type == LE_LINK)
3502 err = l2cap_le_sig_cmd(conn, &cmd, data);
3504 err = l2cap_bredr_sig_cmd(conn, &cmd, cmd_len, data);
3507 struct l2cap_cmd_rej_unk rej;
3509 BT_ERR("Wrong link type (%d)", err);
3511 /* FIXME: Map err to a valid reason */
3512 rej.reason = cpu_to_le16(L2CAP_REJ_NOT_UNDERSTOOD);
3513 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3523 static int l2cap_check_fcs(struct l2cap_chan *chan, struct sk_buff *skb)
3525 u16 our_fcs, rcv_fcs;
3528 if (test_bit(FLAG_EXT_CTRL, &chan->flags))
3529 hdr_size = L2CAP_EXT_HDR_SIZE;
3531 hdr_size = L2CAP_ENH_HDR_SIZE;
3533 if (chan->fcs == L2CAP_FCS_CRC16) {
3534 skb_trim(skb, skb->len - L2CAP_FCS_SIZE);
3535 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3536 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3538 if (our_fcs != rcv_fcs)
3544 static inline void l2cap_send_i_or_rr_or_rnr(struct l2cap_chan *chan)
3548 chan->frames_sent = 0;
3550 control |= __set_reqseq(chan, chan->buffer_seq);
3552 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3553 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3554 l2cap_send_sframe(chan, control);
3555 set_bit(CONN_RNR_SENT, &chan->conn_state);
3558 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state))
3559 l2cap_retransmit_frames(chan);
3561 l2cap_ertm_send(chan);
3563 if (!test_bit(CONN_LOCAL_BUSY, &chan->conn_state) &&
3564 chan->frames_sent == 0) {
3565 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3566 l2cap_send_sframe(chan, control);
3570 static int l2cap_add_to_srej_queue(struct l2cap_chan *chan, struct sk_buff *skb, u16 tx_seq, u8 sar)
3572 struct sk_buff *next_skb;
3573 int tx_seq_offset, next_tx_seq_offset;
3575 bt_cb(skb)->tx_seq = tx_seq;
3576 bt_cb(skb)->sar = sar;
3578 next_skb = skb_peek(&chan->srej_q);
3580 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3583 if (bt_cb(next_skb)->tx_seq == tx_seq)
3586 next_tx_seq_offset = __seq_offset(chan,
3587 bt_cb(next_skb)->tx_seq, chan->buffer_seq);
3589 if (next_tx_seq_offset > tx_seq_offset) {
3590 __skb_queue_before(&chan->srej_q, next_skb, skb);
3594 if (skb_queue_is_last(&chan->srej_q, next_skb))
3597 next_skb = skb_queue_next(&chan->srej_q, next_skb);
3600 __skb_queue_tail(&chan->srej_q, skb);
3605 static void append_skb_frag(struct sk_buff *skb,
3606 struct sk_buff *new_frag, struct sk_buff **last_frag)
3608 /* skb->len reflects data in skb as well as all fragments
3609 * skb->data_len reflects only data in fragments
3611 if (!skb_has_frag_list(skb))
3612 skb_shinfo(skb)->frag_list = new_frag;
3614 new_frag->next = NULL;
3616 (*last_frag)->next = new_frag;
3617 *last_frag = new_frag;
3619 skb->len += new_frag->len;
3620 skb->data_len += new_frag->len;
3621 skb->truesize += new_frag->truesize;
3624 static int l2cap_reassemble_sdu(struct l2cap_chan *chan, struct sk_buff *skb, u32 control)
3628 switch (__get_ctrl_sar(chan, control)) {
3629 case L2CAP_SAR_UNSEGMENTED:
3633 err = chan->ops->recv(chan->data, skb);
3636 case L2CAP_SAR_START:
3640 chan->sdu_len = get_unaligned_le16(skb->data);
3641 skb_pull(skb, L2CAP_SDULEN_SIZE);
3643 if (chan->sdu_len > chan->imtu) {
3648 if (skb->len >= chan->sdu_len)
3652 chan->sdu_last_frag = skb;
3658 case L2CAP_SAR_CONTINUE:
3662 append_skb_frag(chan->sdu, skb,
3663 &chan->sdu_last_frag);
3666 if (chan->sdu->len >= chan->sdu_len)
3676 append_skb_frag(chan->sdu, skb,
3677 &chan->sdu_last_frag);
3680 if (chan->sdu->len != chan->sdu_len)
3683 err = chan->ops->recv(chan->data, chan->sdu);
3686 /* Reassembly complete */
3688 chan->sdu_last_frag = NULL;
3696 kfree_skb(chan->sdu);
3698 chan->sdu_last_frag = NULL;
3705 static void l2cap_ertm_enter_local_busy(struct l2cap_chan *chan)
3709 BT_DBG("chan %p, Enter local busy", chan);
3711 set_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3713 control = __set_reqseq(chan, chan->buffer_seq);
3714 control |= __set_ctrl_super(chan, L2CAP_SUPER_RNR);
3715 l2cap_send_sframe(chan, control);
3717 set_bit(CONN_RNR_SENT, &chan->conn_state);
3719 __clear_ack_timer(chan);
3722 static void l2cap_ertm_exit_local_busy(struct l2cap_chan *chan)
3726 if (!test_bit(CONN_RNR_SENT, &chan->conn_state))
3729 control = __set_reqseq(chan, chan->buffer_seq);
3730 control |= __set_ctrl_poll(chan);
3731 control |= __set_ctrl_super(chan, L2CAP_SUPER_RR);
3732 l2cap_send_sframe(chan, control);
3733 chan->retry_count = 1;
3735 __clear_retrans_timer(chan);
3736 __set_monitor_timer(chan);
3738 set_bit(CONN_WAIT_F, &chan->conn_state);
3741 clear_bit(CONN_LOCAL_BUSY, &chan->conn_state);
3742 clear_bit(CONN_RNR_SENT, &chan->conn_state);
3744 BT_DBG("chan %p, Exit local busy", chan);
3747 void l2cap_chan_busy(struct l2cap_chan *chan, int busy)
3749 if (chan->mode == L2CAP_MODE_ERTM) {
3751 l2cap_ertm_enter_local_busy(chan);
3753 l2cap_ertm_exit_local_busy(chan);
3757 static void l2cap_check_srej_gap(struct l2cap_chan *chan, u16 tx_seq)
3759 struct sk_buff *skb;
3762 while ((skb = skb_peek(&chan->srej_q)) &&
3763 !test_bit(CONN_LOCAL_BUSY, &chan->conn_state)) {
3766 if (bt_cb(skb)->tx_seq != tx_seq)
3769 skb = skb_dequeue(&chan->srej_q);
3770 control = __set_ctrl_sar(chan, bt_cb(skb)->sar);
3771 err = l2cap_reassemble_sdu(chan, skb, control);
3774 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3778 chan->buffer_seq_srej = __next_seq(chan, chan->buffer_seq_srej);
3779 tx_seq = __next_seq(chan, tx_seq);
3783 static void l2cap_resend_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3785 struct srej_list *l, *tmp;
3788 list_for_each_entry_safe(l, tmp, &chan->srej_l, list) {
3789 if (l->tx_seq == tx_seq) {
3794 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3795 control |= __set_reqseq(chan, l->tx_seq);
3796 l2cap_send_sframe(chan, control);
3798 list_add_tail(&l->list, &chan->srej_l);
3802 static int l2cap_send_srejframe(struct l2cap_chan *chan, u16 tx_seq)
3804 struct srej_list *new;
3807 while (tx_seq != chan->expected_tx_seq) {
3808 control = __set_ctrl_super(chan, L2CAP_SUPER_SREJ);
3809 control |= __set_reqseq(chan, chan->expected_tx_seq);
3810 l2cap_send_sframe(chan, control);
3812 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3816 new->tx_seq = chan->expected_tx_seq;
3818 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3820 list_add_tail(&new->list, &chan->srej_l);
3823 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3828 static inline int l2cap_data_channel_iframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
3830 u16 tx_seq = __get_txseq(chan, rx_control);
3831 u16 req_seq = __get_reqseq(chan, rx_control);
3832 u8 sar = __get_ctrl_sar(chan, rx_control);
3833 int tx_seq_offset, expected_tx_seq_offset;
3834 int num_to_ack = (chan->tx_win/6) + 1;
3837 BT_DBG("chan %p len %d tx_seq %d rx_control 0x%8.8x", chan, skb->len,
3838 tx_seq, rx_control);
3840 if (__is_ctrl_final(chan, rx_control) &&
3841 test_bit(CONN_WAIT_F, &chan->conn_state)) {
3842 __clear_monitor_timer(chan);
3843 if (chan->unacked_frames > 0)
3844 __set_retrans_timer(chan);
3845 clear_bit(CONN_WAIT_F, &chan->conn_state);
3848 chan->expected_ack_seq = req_seq;
3849 l2cap_drop_acked_frames(chan);
3851 tx_seq_offset = __seq_offset(chan, tx_seq, chan->buffer_seq);
3853 /* invalid tx_seq */
3854 if (tx_seq_offset >= chan->tx_win) {
3855 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3859 if (test_bit(CONN_LOCAL_BUSY, &chan->conn_state))
3862 if (tx_seq == chan->expected_tx_seq)
3865 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3866 struct srej_list *first;
3868 first = list_first_entry(&chan->srej_l,
3869 struct srej_list, list);
3870 if (tx_seq == first->tx_seq) {
3871 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3872 l2cap_check_srej_gap(chan, tx_seq);
3874 list_del(&first->list);
3877 if (list_empty(&chan->srej_l)) {
3878 chan->buffer_seq = chan->buffer_seq_srej;
3879 clear_bit(CONN_SREJ_SENT, &chan->conn_state);
3880 l2cap_send_ack(chan);
3881 BT_DBG("chan %p, Exit SREJ_SENT", chan);
3884 struct srej_list *l;
3886 /* duplicated tx_seq */
3887 if (l2cap_add_to_srej_queue(chan, skb, tx_seq, sar) < 0)
3890 list_for_each_entry(l, &chan->srej_l, list) {
3891 if (l->tx_seq == tx_seq) {
3892 l2cap_resend_srejframe(chan, tx_seq);
3897 err = l2cap_send_srejframe(chan, tx_seq);
3899 l2cap_send_disconn_req(chan->conn, chan, -err);
3904 expected_tx_seq_offset = __seq_offset(chan,
3905 chan->expected_tx_seq, chan->buffer_seq);
3907 /* duplicated tx_seq */
3908 if (tx_seq_offset < expected_tx_seq_offset)
3911 set_bit(CONN_SREJ_SENT, &chan->conn_state);
3913 BT_DBG("chan %p, Enter SREJ", chan);
3915 INIT_LIST_HEAD(&chan->srej_l);
3916 chan->buffer_seq_srej = chan->buffer_seq;
3918 __skb_queue_head_init(&chan->srej_q);
3919 l2cap_add_to_srej_queue(chan, skb, tx_seq, sar);
3921 set_bit(CONN_SEND_PBIT, &chan->conn_state);
3923 err = l2cap_send_srejframe(chan, tx_seq);
3925 l2cap_send_disconn_req(chan->conn, chan, -err);
3929 __clear_ack_timer(chan);
3934 chan->expected_tx_seq = __next_seq(chan, chan->expected_tx_seq);
3936 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3937 bt_cb(skb)->tx_seq = tx_seq;
3938 bt_cb(skb)->sar = sar;
3939 __skb_queue_tail(&chan->srej_q, skb);
3943 err = l2cap_reassemble_sdu(chan, skb, rx_control);
3944 chan->buffer_seq = __next_seq(chan, chan->buffer_seq);
3947 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
3951 if (__is_ctrl_final(chan, rx_control)) {
3952 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3953 l2cap_retransmit_frames(chan);
3957 chan->num_acked = (chan->num_acked + 1) % num_to_ack;
3958 if (chan->num_acked == num_to_ack - 1)
3959 l2cap_send_ack(chan);
3961 __set_ack_timer(chan);
3970 static inline void l2cap_data_channel_rrframe(struct l2cap_chan *chan, u32 rx_control)
3972 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan,
3973 __get_reqseq(chan, rx_control), rx_control);
3975 chan->expected_ack_seq = __get_reqseq(chan, rx_control);
3976 l2cap_drop_acked_frames(chan);
3978 if (__is_ctrl_poll(chan, rx_control)) {
3979 set_bit(CONN_SEND_FBIT, &chan->conn_state);
3980 if (test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
3981 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3982 (chan->unacked_frames > 0))
3983 __set_retrans_timer(chan);
3985 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3986 l2cap_send_srejtail(chan);
3988 l2cap_send_i_or_rr_or_rnr(chan);
3991 } else if (__is_ctrl_final(chan, rx_control)) {
3992 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
3994 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
3995 l2cap_retransmit_frames(chan);
3998 if (test_bit(CONN_REMOTE_BUSY, &chan->conn_state) &&
3999 (chan->unacked_frames > 0))
4000 __set_retrans_timer(chan);
4002 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4003 if (test_bit(CONN_SREJ_SENT, &chan->conn_state))
4004 l2cap_send_ack(chan);
4006 l2cap_ertm_send(chan);
4010 static inline void l2cap_data_channel_rejframe(struct l2cap_chan *chan, u32 rx_control)
4012 u16 tx_seq = __get_reqseq(chan, rx_control);
4014 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4016 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4018 chan->expected_ack_seq = tx_seq;
4019 l2cap_drop_acked_frames(chan);
4021 if (__is_ctrl_final(chan, rx_control)) {
4022 if (!test_and_clear_bit(CONN_REJ_ACT, &chan->conn_state))
4023 l2cap_retransmit_frames(chan);
4025 l2cap_retransmit_frames(chan);
4027 if (test_bit(CONN_WAIT_F, &chan->conn_state))
4028 set_bit(CONN_REJ_ACT, &chan->conn_state);
4031 static inline void l2cap_data_channel_srejframe(struct l2cap_chan *chan, u32 rx_control)
4033 u16 tx_seq = __get_reqseq(chan, rx_control);
4035 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4037 clear_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4039 if (__is_ctrl_poll(chan, rx_control)) {
4040 chan->expected_ack_seq = tx_seq;
4041 l2cap_drop_acked_frames(chan);
4043 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4044 l2cap_retransmit_one_frame(chan, tx_seq);
4046 l2cap_ertm_send(chan);
4048 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4049 chan->srej_save_reqseq = tx_seq;
4050 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4052 } else if (__is_ctrl_final(chan, rx_control)) {
4053 if (test_bit(CONN_SREJ_ACT, &chan->conn_state) &&
4054 chan->srej_save_reqseq == tx_seq)
4055 clear_bit(CONN_SREJ_ACT, &chan->conn_state);
4057 l2cap_retransmit_one_frame(chan, tx_seq);
4059 l2cap_retransmit_one_frame(chan, tx_seq);
4060 if (test_bit(CONN_WAIT_F, &chan->conn_state)) {
4061 chan->srej_save_reqseq = tx_seq;
4062 set_bit(CONN_SREJ_ACT, &chan->conn_state);
4067 static inline void l2cap_data_channel_rnrframe(struct l2cap_chan *chan, u32 rx_control)
4069 u16 tx_seq = __get_reqseq(chan, rx_control);
4071 BT_DBG("chan %p, req_seq %d ctrl 0x%8.8x", chan, tx_seq, rx_control);
4073 set_bit(CONN_REMOTE_BUSY, &chan->conn_state);
4074 chan->expected_ack_seq = tx_seq;
4075 l2cap_drop_acked_frames(chan);
4077 if (__is_ctrl_poll(chan, rx_control))
4078 set_bit(CONN_SEND_FBIT, &chan->conn_state);
4080 if (!test_bit(CONN_SREJ_SENT, &chan->conn_state)) {
4081 __clear_retrans_timer(chan);
4082 if (__is_ctrl_poll(chan, rx_control))
4083 l2cap_send_rr_or_rnr(chan, L2CAP_CTRL_FINAL);
4087 if (__is_ctrl_poll(chan, rx_control)) {
4088 l2cap_send_srejtail(chan);
4090 rx_control = __set_ctrl_super(chan, L2CAP_SUPER_RR);
4091 l2cap_send_sframe(chan, rx_control);
4095 static inline int l2cap_data_channel_sframe(struct l2cap_chan *chan, u32 rx_control, struct sk_buff *skb)
4097 BT_DBG("chan %p rx_control 0x%8.8x len %d", chan, rx_control, skb->len);
4099 if (__is_ctrl_final(chan, rx_control) &&
4100 test_bit(CONN_WAIT_F, &chan->conn_state)) {
4101 __clear_monitor_timer(chan);
4102 if (chan->unacked_frames > 0)
4103 __set_retrans_timer(chan);
4104 clear_bit(CONN_WAIT_F, &chan->conn_state);
4107 switch (__get_ctrl_super(chan, rx_control)) {
4108 case L2CAP_SUPER_RR:
4109 l2cap_data_channel_rrframe(chan, rx_control);
4112 case L2CAP_SUPER_REJ:
4113 l2cap_data_channel_rejframe(chan, rx_control);
4116 case L2CAP_SUPER_SREJ:
4117 l2cap_data_channel_srejframe(chan, rx_control);
4120 case L2CAP_SUPER_RNR:
4121 l2cap_data_channel_rnrframe(chan, rx_control);
4129 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4131 struct l2cap_chan *chan = l2cap_pi(sk)->chan;
4134 int len, next_tx_seq_offset, req_seq_offset;
4136 control = __get_control(chan, skb->data);
4137 skb_pull(skb, __ctrl_size(chan));
4141 * We can just drop the corrupted I-frame here.
4142 * Receiver will miss it and start proper recovery
4143 * procedures and ask retransmission.
4145 if (l2cap_check_fcs(chan, skb))
4148 if (__is_sar_start(chan, control) && !__is_sframe(chan, control))
4149 len -= L2CAP_SDULEN_SIZE;
4151 if (chan->fcs == L2CAP_FCS_CRC16)
4152 len -= L2CAP_FCS_SIZE;
4154 if (len > chan->mps) {
4155 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4159 req_seq = __get_reqseq(chan, control);
4161 req_seq_offset = __seq_offset(chan, req_seq, chan->expected_ack_seq);
4163 next_tx_seq_offset = __seq_offset(chan, chan->next_tx_seq,
4164 chan->expected_ack_seq);
4166 /* check for invalid req-seq */
4167 if (req_seq_offset > next_tx_seq_offset) {
4168 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4172 if (!__is_sframe(chan, control)) {
4174 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4178 l2cap_data_channel_iframe(chan, control, skb);
4182 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4186 l2cap_data_channel_sframe(chan, control, skb);
4196 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4198 struct l2cap_chan *chan;
4199 struct sock *sk = NULL;
4204 chan = l2cap_get_chan_by_scid(conn, cid);
4206 BT_DBG("unknown cid 0x%4.4x", cid);
4212 BT_DBG("chan %p, len %d", chan, skb->len);
4214 if (chan->state != BT_CONNECTED)
4217 switch (chan->mode) {
4218 case L2CAP_MODE_BASIC:
4219 /* If socket recv buffers overflows we drop data here
4220 * which is *bad* because L2CAP has to be reliable.
4221 * But we don't have any other choice. L2CAP doesn't
4222 * provide flow control mechanism. */
4224 if (chan->imtu < skb->len)
4227 if (!chan->ops->recv(chan->data, skb))
4231 case L2CAP_MODE_ERTM:
4232 if (!sock_owned_by_user(sk)) {
4233 l2cap_ertm_data_rcv(sk, skb);
4235 if (sk_add_backlog(sk, skb))
4241 case L2CAP_MODE_STREAMING:
4242 control = __get_control(chan, skb->data);
4243 skb_pull(skb, __ctrl_size(chan));
4246 if (l2cap_check_fcs(chan, skb))
4249 if (__is_sar_start(chan, control))
4250 len -= L2CAP_SDULEN_SIZE;
4252 if (chan->fcs == L2CAP_FCS_CRC16)
4253 len -= L2CAP_FCS_SIZE;
4255 if (len > chan->mps || len < 0 || __is_sframe(chan, control))
4258 tx_seq = __get_txseq(chan, control);
4260 if (chan->expected_tx_seq != tx_seq) {
4261 /* Frame(s) missing - must discard partial SDU */
4262 kfree_skb(chan->sdu);
4264 chan->sdu_last_frag = NULL;
4267 /* TODO: Notify userland of missing data */
4270 chan->expected_tx_seq = __next_seq(chan, tx_seq);
4272 if (l2cap_reassemble_sdu(chan, skb, control) == -EMSGSIZE)
4273 l2cap_send_disconn_req(chan->conn, chan, ECONNRESET);
4278 BT_DBG("chan %p: bad mode 0x%2.2x", chan, chan->mode);
4292 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4294 struct sock *sk = NULL;
4295 struct l2cap_chan *chan;
4297 chan = l2cap_global_chan_by_psm(0, psm, conn->src);
4305 BT_DBG("sk %p, len %d", sk, skb->len);
4307 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4310 if (chan->imtu < skb->len)
4313 if (!chan->ops->recv(chan->data, skb))
4325 static inline int l2cap_att_channel(struct l2cap_conn *conn, __le16 cid, struct sk_buff *skb)
4327 struct sock *sk = NULL;
4328 struct l2cap_chan *chan;
4330 chan = l2cap_global_chan_by_scid(0, cid, conn->src);
4338 BT_DBG("sk %p, len %d", sk, skb->len);
4340 if (chan->state != BT_BOUND && chan->state != BT_CONNECTED)
4343 if (chan->imtu < skb->len)
4346 if (!chan->ops->recv(chan->data, skb))
4358 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4360 struct l2cap_hdr *lh = (void *) skb->data;
4364 skb_pull(skb, L2CAP_HDR_SIZE);
4365 cid = __le16_to_cpu(lh->cid);
4366 len = __le16_to_cpu(lh->len);
4368 if (len != skb->len) {
4373 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4376 case L2CAP_CID_LE_SIGNALING:
4377 case L2CAP_CID_SIGNALING:
4378 l2cap_sig_channel(conn, skb);
4381 case L2CAP_CID_CONN_LESS:
4382 psm = get_unaligned_le16(skb->data);
4384 l2cap_conless_channel(conn, psm, skb);
4387 case L2CAP_CID_LE_DATA:
4388 l2cap_att_channel(conn, cid, skb);
4392 if (smp_sig_channel(conn, skb))
4393 l2cap_conn_del(conn->hcon, EACCES);
4397 l2cap_data_channel(conn, cid, skb);
4402 /* ---- L2CAP interface with lower layer (HCI) ---- */
4404 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4406 int exact = 0, lm1 = 0, lm2 = 0;
4407 struct l2cap_chan *c;
4409 if (type != ACL_LINK)
4412 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4414 /* Find listening sockets and check their link_mode */
4415 read_lock(&chan_list_lock);
4416 list_for_each_entry(c, &chan_list, global_l) {
4417 struct sock *sk = c->sk;
4419 if (c->state != BT_LISTEN)
4422 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4423 lm1 |= HCI_LM_ACCEPT;
4424 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4425 lm1 |= HCI_LM_MASTER;
4427 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4428 lm2 |= HCI_LM_ACCEPT;
4429 if (test_bit(FLAG_ROLE_SWITCH, &c->flags))
4430 lm2 |= HCI_LM_MASTER;
4433 read_unlock(&chan_list_lock);
4435 return exact ? lm1 : lm2;
4438 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4440 struct l2cap_conn *conn;
4442 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4444 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4448 conn = l2cap_conn_add(hcon, status);
4450 l2cap_conn_ready(conn);
4452 l2cap_conn_del(hcon, bt_to_errno(status));
4457 static int l2cap_disconn_ind(struct hci_conn *hcon)
4459 struct l2cap_conn *conn = hcon->l2cap_data;
4461 BT_DBG("hcon %p", hcon);
4463 if ((hcon->type != ACL_LINK && hcon->type != LE_LINK) || !conn)
4464 return HCI_ERROR_REMOTE_USER_TERM;
4466 return conn->disc_reason;
4469 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4471 BT_DBG("hcon %p reason %d", hcon, reason);
4473 if (!(hcon->type == ACL_LINK || hcon->type == LE_LINK))
4476 l2cap_conn_del(hcon, bt_to_errno(reason));
4481 static inline void l2cap_check_encryption(struct l2cap_chan *chan, u8 encrypt)
4483 if (chan->chan_type != L2CAP_CHAN_CONN_ORIENTED)
4486 if (encrypt == 0x00) {
4487 if (chan->sec_level == BT_SECURITY_MEDIUM) {
4488 __clear_chan_timer(chan);
4489 __set_chan_timer(chan, L2CAP_ENC_TIMEOUT);
4490 } else if (chan->sec_level == BT_SECURITY_HIGH)
4491 l2cap_chan_close(chan, ECONNREFUSED);
4493 if (chan->sec_level == BT_SECURITY_MEDIUM)
4494 __clear_chan_timer(chan);
4498 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4500 struct l2cap_conn *conn = hcon->l2cap_data;
4501 struct l2cap_chan *chan;
4506 BT_DBG("conn %p", conn);
4508 if (hcon->type == LE_LINK) {
4509 smp_distribute_keys(conn, 0);
4510 del_timer(&conn->security_timer);
4513 read_lock(&conn->chan_lock);
4515 list_for_each_entry(chan, &conn->chan_l, list) {
4516 struct sock *sk = chan->sk;
4520 BT_DBG("chan->scid %d", chan->scid);
4522 if (chan->scid == L2CAP_CID_LE_DATA) {
4523 if (!status && encrypt) {
4524 chan->sec_level = hcon->sec_level;
4525 l2cap_chan_ready(sk);
4532 if (test_bit(CONF_CONNECT_PEND, &chan->conf_state)) {
4537 if (!status && (chan->state == BT_CONNECTED ||
4538 chan->state == BT_CONFIG)) {
4539 l2cap_check_encryption(chan, encrypt);
4544 if (chan->state == BT_CONNECT) {
4546 struct l2cap_conn_req req;
4547 req.scid = cpu_to_le16(chan->scid);
4548 req.psm = chan->psm;
4550 chan->ident = l2cap_get_ident(conn);
4551 set_bit(CONF_CONNECT_PEND, &chan->conf_state);
4553 l2cap_send_cmd(conn, chan->ident,
4554 L2CAP_CONN_REQ, sizeof(req), &req);
4556 __clear_chan_timer(chan);
4557 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4559 } else if (chan->state == BT_CONNECT2) {
4560 struct l2cap_conn_rsp rsp;
4564 if (bt_sk(sk)->defer_setup) {
4565 struct sock *parent = bt_sk(sk)->parent;
4566 res = L2CAP_CR_PEND;
4567 stat = L2CAP_CS_AUTHOR_PEND;
4569 parent->sk_data_ready(parent, 0);
4571 l2cap_state_change(chan, BT_CONFIG);
4572 res = L2CAP_CR_SUCCESS;
4573 stat = L2CAP_CS_NO_INFO;
4576 l2cap_state_change(chan, BT_DISCONN);
4577 __set_chan_timer(chan, L2CAP_DISC_TIMEOUT);
4578 res = L2CAP_CR_SEC_BLOCK;
4579 stat = L2CAP_CS_NO_INFO;
4582 rsp.scid = cpu_to_le16(chan->dcid);
4583 rsp.dcid = cpu_to_le16(chan->scid);
4584 rsp.result = cpu_to_le16(res);
4585 rsp.status = cpu_to_le16(stat);
4586 l2cap_send_cmd(conn, chan->ident, L2CAP_CONN_RSP,
4593 read_unlock(&conn->chan_lock);
4598 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4600 struct l2cap_conn *conn = hcon->l2cap_data;
4603 conn = l2cap_conn_add(hcon, 0);
4608 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4610 if (!(flags & ACL_CONT)) {
4611 struct l2cap_hdr *hdr;
4612 struct l2cap_chan *chan;
4617 BT_ERR("Unexpected start frame (len %d)", skb->len);
4618 kfree_skb(conn->rx_skb);
4619 conn->rx_skb = NULL;
4621 l2cap_conn_unreliable(conn, ECOMM);
4624 /* Start fragment always begin with Basic L2CAP header */
4625 if (skb->len < L2CAP_HDR_SIZE) {
4626 BT_ERR("Frame is too short (len %d)", skb->len);
4627 l2cap_conn_unreliable(conn, ECOMM);
4631 hdr = (struct l2cap_hdr *) skb->data;
4632 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4633 cid = __le16_to_cpu(hdr->cid);
4635 if (len == skb->len) {
4636 /* Complete frame received */
4637 l2cap_recv_frame(conn, skb);
4641 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4643 if (skb->len > len) {
4644 BT_ERR("Frame is too long (len %d, expected len %d)",
4646 l2cap_conn_unreliable(conn, ECOMM);
4650 chan = l2cap_get_chan_by_scid(conn, cid);
4652 if (chan && chan->sk) {
4653 struct sock *sk = chan->sk;
4655 if (chan->imtu < len - L2CAP_HDR_SIZE) {
4656 BT_ERR("Frame exceeding recv MTU (len %d, "
4660 l2cap_conn_unreliable(conn, ECOMM);
4666 /* Allocate skb for the complete frame (with header) */
4667 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4671 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4673 conn->rx_len = len - skb->len;
4675 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4677 if (!conn->rx_len) {
4678 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4679 l2cap_conn_unreliable(conn, ECOMM);
4683 if (skb->len > conn->rx_len) {
4684 BT_ERR("Fragment is too long (len %d, expected %d)",
4685 skb->len, conn->rx_len);
4686 kfree_skb(conn->rx_skb);
4687 conn->rx_skb = NULL;
4689 l2cap_conn_unreliable(conn, ECOMM);
4693 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4695 conn->rx_len -= skb->len;
4697 if (!conn->rx_len) {
4698 /* Complete frame received */
4699 l2cap_recv_frame(conn, conn->rx_skb);
4700 conn->rx_skb = NULL;
4709 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4711 struct l2cap_chan *c;
4713 read_lock_bh(&chan_list_lock);
4715 list_for_each_entry(c, &chan_list, global_l) {
4716 struct sock *sk = c->sk;
4718 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d %d\n",
4719 batostr(&bt_sk(sk)->src),
4720 batostr(&bt_sk(sk)->dst),
4721 c->state, __le16_to_cpu(c->psm),
4722 c->scid, c->dcid, c->imtu, c->omtu,
4723 c->sec_level, c->mode);
4726 read_unlock_bh(&chan_list_lock);
4731 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4733 return single_open(file, l2cap_debugfs_show, inode->i_private);
4736 static const struct file_operations l2cap_debugfs_fops = {
4737 .open = l2cap_debugfs_open,
4739 .llseek = seq_lseek,
4740 .release = single_release,
4743 static struct dentry *l2cap_debugfs;
4745 static struct hci_proto l2cap_hci_proto = {
4747 .id = HCI_PROTO_L2CAP,
4748 .connect_ind = l2cap_connect_ind,
4749 .connect_cfm = l2cap_connect_cfm,
4750 .disconn_ind = l2cap_disconn_ind,
4751 .disconn_cfm = l2cap_disconn_cfm,
4752 .security_cfm = l2cap_security_cfm,
4753 .recv_acldata = l2cap_recv_acldata
4756 int __init l2cap_init(void)
4760 err = l2cap_init_sockets();
4764 err = hci_register_proto(&l2cap_hci_proto);
4766 BT_ERR("L2CAP protocol registration failed");
4767 bt_sock_unregister(BTPROTO_L2CAP);
4772 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4773 bt_debugfs, NULL, &l2cap_debugfs_fops);
4775 BT_ERR("Failed to create L2CAP debug file");
4781 l2cap_cleanup_sockets();
4785 void l2cap_exit(void)
4787 debugfs_remove(l2cap_debugfs);
4789 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4790 BT_ERR("L2CAP protocol unregistration failed");
4792 l2cap_cleanup_sockets();
4795 module_param(disable_ertm, bool, 0644);
4796 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");