2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core and sockets. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
58 #define VERSION "2.15"
60 static int disable_ertm = 0;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static const struct proto_ops l2cap_sock_ops;
67 static struct workqueue_struct *_busy_wq;
69 static struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
73 static void l2cap_busy_work(struct work_struct *work);
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
79 static int l2cap_build_conf_req(struct sock *sk, void *data);
80 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data);
83 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
85 /* ---- L2CAP timers ---- */
86 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
88 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
89 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
92 static void l2cap_sock_clear_timer(struct sock *sk)
94 BT_DBG("sock %p state %d", sk, sk->sk_state);
95 sk_stop_timer(sk, &sk->sk_timer);
98 static void l2cap_sock_timeout(unsigned long arg)
100 struct sock *sk = (struct sock *) arg;
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
107 if (sock_owned_by_user(sk)) {
108 /* sk is owned by user. Try again later */
109 l2cap_sock_set_timer(sk, HZ / 5);
115 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
116 reason = ECONNREFUSED;
117 else if (sk->sk_state == BT_CONNECT &&
118 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
119 reason = ECONNREFUSED;
123 __l2cap_sock_close(sk, reason);
131 /* ---- L2CAP channels ---- */
132 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
135 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
136 if (l2cap_pi(s)->dcid == cid)
142 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
145 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
146 if (l2cap_pi(s)->scid == cid)
152 /* Find channel with given SCID.
153 * Returns locked socket */
154 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
158 s = __l2cap_get_chan_by_scid(l, cid);
161 read_unlock(&l->lock);
165 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
168 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
169 if (l2cap_pi(s)->ident == ident)
175 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
179 s = __l2cap_get_chan_by_ident(l, ident);
182 read_unlock(&l->lock);
186 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
188 u16 cid = L2CAP_CID_DYN_START;
190 for (; cid < L2CAP_CID_DYN_END; cid++) {
191 if (!__l2cap_get_chan_by_scid(l, cid))
198 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
203 l2cap_pi(l->head)->prev_c = sk;
205 l2cap_pi(sk)->next_c = l->head;
206 l2cap_pi(sk)->prev_c = NULL;
210 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
212 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
214 write_lock_bh(&l->lock);
219 l2cap_pi(next)->prev_c = prev;
221 l2cap_pi(prev)->next_c = next;
222 write_unlock_bh(&l->lock);
227 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
229 struct l2cap_chan_list *l = &conn->chan_list;
231 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
232 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
234 conn->disc_reason = 0x13;
236 l2cap_pi(sk)->conn = conn;
238 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
239 /* Alloc CID for connection-oriented socket */
240 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
241 } else if (sk->sk_type == SOCK_DGRAM) {
242 /* Connectionless socket */
243 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
244 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
245 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
247 /* Raw socket can send/recv signalling messages only */
248 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
249 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
250 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
253 __l2cap_chan_link(l, sk);
256 bt_accept_enqueue(parent, sk);
260 * Must be called on the locked socket. */
261 static void l2cap_chan_del(struct sock *sk, int err)
263 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
264 struct sock *parent = bt_sk(sk)->parent;
266 l2cap_sock_clear_timer(sk);
268 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
271 /* Unlink from channel list */
272 l2cap_chan_unlink(&conn->chan_list, sk);
273 l2cap_pi(sk)->conn = NULL;
274 hci_conn_put(conn->hcon);
277 sk->sk_state = BT_CLOSED;
278 sock_set_flag(sk, SOCK_ZAPPED);
284 bt_accept_unlink(sk);
285 parent->sk_data_ready(parent, 0);
287 sk->sk_state_change(sk);
289 skb_queue_purge(TX_QUEUE(sk));
291 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
292 struct srej_list *l, *tmp;
294 del_timer(&l2cap_pi(sk)->retrans_timer);
295 del_timer(&l2cap_pi(sk)->monitor_timer);
296 del_timer(&l2cap_pi(sk)->ack_timer);
298 skb_queue_purge(SREJ_QUEUE(sk));
299 skb_queue_purge(BUSY_QUEUE(sk));
301 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
308 /* Service level security */
309 static inline int l2cap_check_security(struct sock *sk)
311 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
314 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
315 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
316 auth_type = HCI_AT_NO_BONDING_MITM;
318 auth_type = HCI_AT_NO_BONDING;
320 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
321 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
323 switch (l2cap_pi(sk)->sec_level) {
324 case BT_SECURITY_HIGH:
325 auth_type = HCI_AT_GENERAL_BONDING_MITM;
327 case BT_SECURITY_MEDIUM:
328 auth_type = HCI_AT_GENERAL_BONDING;
331 auth_type = HCI_AT_NO_BONDING;
336 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
340 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
344 /* Get next available identificator.
345 * 1 - 128 are used by kernel.
346 * 129 - 199 are reserved.
347 * 200 - 254 are used by utilities like l2ping, etc.
350 spin_lock_bh(&conn->lock);
352 if (++conn->tx_ident > 128)
357 spin_unlock_bh(&conn->lock);
362 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
364 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
366 BT_DBG("code 0x%2.2x", code);
371 hci_send_acl(conn->hcon, skb, 0);
374 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
377 struct l2cap_hdr *lh;
378 struct l2cap_conn *conn = pi->conn;
379 struct sock *sk = (struct sock *)pi;
380 int count, hlen = L2CAP_HDR_SIZE + 2;
382 if (sk->sk_state != BT_CONNECTED)
385 if (pi->fcs == L2CAP_FCS_CRC16)
388 BT_DBG("pi %p, control 0x%2.2x", pi, control);
390 count = min_t(unsigned int, conn->mtu, hlen);
391 control |= L2CAP_CTRL_FRAME_TYPE;
393 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
394 control |= L2CAP_CTRL_FINAL;
395 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
398 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
399 control |= L2CAP_CTRL_POLL;
400 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
403 skb = bt_skb_alloc(count, GFP_ATOMIC);
407 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
408 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
409 lh->cid = cpu_to_le16(pi->dcid);
410 put_unaligned_le16(control, skb_put(skb, 2));
412 if (pi->fcs == L2CAP_FCS_CRC16) {
413 u16 fcs = crc16(0, (u8 *)lh, count - 2);
414 put_unaligned_le16(fcs, skb_put(skb, 2));
417 hci_send_acl(pi->conn->hcon, skb, 0);
420 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
422 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
423 control |= L2CAP_SUPER_RCV_NOT_READY;
424 pi->conn_state |= L2CAP_CONN_RNR_SENT;
426 control |= L2CAP_SUPER_RCV_READY;
428 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
430 l2cap_send_sframe(pi, control);
433 static inline int __l2cap_no_conn_pending(struct sock *sk)
435 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
438 static void l2cap_do_start(struct sock *sk)
440 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
442 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
443 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
446 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
447 struct l2cap_conn_req req;
448 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
449 req.psm = l2cap_pi(sk)->psm;
451 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
452 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
454 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
455 L2CAP_CONN_REQ, sizeof(req), &req);
458 struct l2cap_info_req req;
459 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
461 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
462 conn->info_ident = l2cap_get_ident(conn);
464 mod_timer(&conn->info_timer, jiffies +
465 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
467 l2cap_send_cmd(conn, conn->info_ident,
468 L2CAP_INFO_REQ, sizeof(req), &req);
472 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
474 u32 local_feat_mask = l2cap_feat_mask;
476 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
479 case L2CAP_MODE_ERTM:
480 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
481 case L2CAP_MODE_STREAMING:
482 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
488 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
490 struct l2cap_disconn_req req;
495 skb_queue_purge(TX_QUEUE(sk));
497 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
498 del_timer(&l2cap_pi(sk)->retrans_timer);
499 del_timer(&l2cap_pi(sk)->monitor_timer);
500 del_timer(&l2cap_pi(sk)->ack_timer);
503 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
504 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
505 l2cap_send_cmd(conn, l2cap_get_ident(conn),
506 L2CAP_DISCONN_REQ, sizeof(req), &req);
508 sk->sk_state = BT_DISCONN;
512 /* ---- L2CAP connections ---- */
513 static void l2cap_conn_start(struct l2cap_conn *conn)
515 struct l2cap_chan_list *l = &conn->chan_list;
516 struct sock_del_list del, *tmp1, *tmp2;
519 BT_DBG("conn %p", conn);
521 INIT_LIST_HEAD(&del.list);
525 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
528 if (sk->sk_type != SOCK_SEQPACKET &&
529 sk->sk_type != SOCK_STREAM) {
534 if (sk->sk_state == BT_CONNECT) {
535 struct l2cap_conn_req req;
537 if (!l2cap_check_security(sk) ||
538 !__l2cap_no_conn_pending(sk)) {
543 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
545 && l2cap_pi(sk)->conf_state &
546 L2CAP_CONF_STATE2_DEVICE) {
547 tmp1 = kzalloc(sizeof(struct sock_del_list),
550 list_add_tail(&tmp1->list, &del.list);
555 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
556 req.psm = l2cap_pi(sk)->psm;
558 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
559 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
561 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
562 L2CAP_CONN_REQ, sizeof(req), &req);
564 } else if (sk->sk_state == BT_CONNECT2) {
565 struct l2cap_conn_rsp rsp;
567 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
568 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
570 if (l2cap_check_security(sk)) {
571 if (bt_sk(sk)->defer_setup) {
572 struct sock *parent = bt_sk(sk)->parent;
573 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
574 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
575 parent->sk_data_ready(parent, 0);
578 sk->sk_state = BT_CONFIG;
579 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
580 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
583 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
584 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
587 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
588 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
590 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
591 rsp.result != L2CAP_CR_SUCCESS) {
596 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
597 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
598 l2cap_build_conf_req(sk, buf), buf);
599 l2cap_pi(sk)->num_conf_req++;
605 read_unlock(&l->lock);
607 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
608 bh_lock_sock(tmp1->sk);
609 __l2cap_sock_close(tmp1->sk, ECONNRESET);
610 bh_unlock_sock(tmp1->sk);
611 list_del(&tmp1->list);
616 static void l2cap_conn_ready(struct l2cap_conn *conn)
618 struct l2cap_chan_list *l = &conn->chan_list;
621 BT_DBG("conn %p", conn);
625 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
628 if (sk->sk_type != SOCK_SEQPACKET &&
629 sk->sk_type != SOCK_STREAM) {
630 l2cap_sock_clear_timer(sk);
631 sk->sk_state = BT_CONNECTED;
632 sk->sk_state_change(sk);
633 } else if (sk->sk_state == BT_CONNECT)
639 read_unlock(&l->lock);
642 /* Notify sockets that we cannot guaranty reliability anymore */
643 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
645 struct l2cap_chan_list *l = &conn->chan_list;
648 BT_DBG("conn %p", conn);
652 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
653 if (l2cap_pi(sk)->force_reliable)
657 read_unlock(&l->lock);
660 static void l2cap_info_timeout(unsigned long arg)
662 struct l2cap_conn *conn = (void *) arg;
664 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
665 conn->info_ident = 0;
667 l2cap_conn_start(conn);
670 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
672 struct l2cap_conn *conn = hcon->l2cap_data;
677 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
681 hcon->l2cap_data = conn;
684 BT_DBG("hcon %p conn %p", hcon, conn);
686 conn->mtu = hcon->hdev->acl_mtu;
687 conn->src = &hcon->hdev->bdaddr;
688 conn->dst = &hcon->dst;
692 spin_lock_init(&conn->lock);
693 rwlock_init(&conn->chan_list.lock);
695 setup_timer(&conn->info_timer, l2cap_info_timeout,
696 (unsigned long) conn);
698 conn->disc_reason = 0x13;
703 static void l2cap_conn_del(struct hci_conn *hcon, int err)
705 struct l2cap_conn *conn = hcon->l2cap_data;
711 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
713 kfree_skb(conn->rx_skb);
716 while ((sk = conn->chan_list.head)) {
718 l2cap_chan_del(sk, err);
723 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
724 del_timer_sync(&conn->info_timer);
726 hcon->l2cap_data = NULL;
730 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
732 struct l2cap_chan_list *l = &conn->chan_list;
733 write_lock_bh(&l->lock);
734 __l2cap_chan_add(conn, sk, parent);
735 write_unlock_bh(&l->lock);
738 /* ---- Socket interface ---- */
739 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
742 struct hlist_node *node;
743 sk_for_each(sk, node, &l2cap_sk_list.head)
744 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
751 /* Find socket with psm and source bdaddr.
752 * Returns closest match.
754 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
756 struct sock *sk = NULL, *sk1 = NULL;
757 struct hlist_node *node;
759 sk_for_each(sk, node, &l2cap_sk_list.head) {
760 if (state && sk->sk_state != state)
763 if (l2cap_pi(sk)->psm == psm) {
765 if (!bacmp(&bt_sk(sk)->src, src))
769 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
773 return node ? sk : sk1;
776 /* Find socket with given address (psm, src).
777 * Returns locked socket */
778 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
781 read_lock(&l2cap_sk_list.lock);
782 s = __l2cap_get_sock_by_psm(state, psm, src);
785 read_unlock(&l2cap_sk_list.lock);
789 static void l2cap_sock_destruct(struct sock *sk)
793 skb_queue_purge(&sk->sk_receive_queue);
794 skb_queue_purge(&sk->sk_write_queue);
797 static void l2cap_sock_cleanup_listen(struct sock *parent)
801 BT_DBG("parent %p", parent);
803 /* Close not yet accepted channels */
804 while ((sk = bt_accept_dequeue(parent, NULL)))
805 l2cap_sock_close(sk);
807 parent->sk_state = BT_CLOSED;
808 sock_set_flag(parent, SOCK_ZAPPED);
811 /* Kill socket (only if zapped and orphan)
812 * Must be called on unlocked socket.
814 static void l2cap_sock_kill(struct sock *sk)
816 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
819 BT_DBG("sk %p state %d", sk, sk->sk_state);
821 /* Kill poor orphan */
822 bt_sock_unlink(&l2cap_sk_list, sk);
823 sock_set_flag(sk, SOCK_DEAD);
827 static void __l2cap_sock_close(struct sock *sk, int reason)
829 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
831 switch (sk->sk_state) {
833 l2cap_sock_cleanup_listen(sk);
838 if (sk->sk_type == SOCK_SEQPACKET ||
839 sk->sk_type == SOCK_STREAM) {
840 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
842 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
843 l2cap_send_disconn_req(conn, sk, reason);
845 l2cap_chan_del(sk, reason);
849 if (sk->sk_type == SOCK_SEQPACKET ||
850 sk->sk_type == SOCK_STREAM) {
851 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
852 struct l2cap_conn_rsp rsp;
855 if (bt_sk(sk)->defer_setup)
856 result = L2CAP_CR_SEC_BLOCK;
858 result = L2CAP_CR_BAD_PSM;
860 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
861 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
862 rsp.result = cpu_to_le16(result);
863 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
864 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
865 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
867 l2cap_chan_del(sk, reason);
872 l2cap_chan_del(sk, reason);
876 sock_set_flag(sk, SOCK_ZAPPED);
881 /* Must be called on unlocked socket. */
882 static void l2cap_sock_close(struct sock *sk)
884 l2cap_sock_clear_timer(sk);
886 __l2cap_sock_close(sk, ECONNRESET);
891 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
893 struct l2cap_pinfo *pi = l2cap_pi(sk);
898 sk->sk_type = parent->sk_type;
899 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
901 pi->imtu = l2cap_pi(parent)->imtu;
902 pi->omtu = l2cap_pi(parent)->omtu;
903 pi->conf_state = l2cap_pi(parent)->conf_state;
904 pi->mode = l2cap_pi(parent)->mode;
905 pi->fcs = l2cap_pi(parent)->fcs;
906 pi->max_tx = l2cap_pi(parent)->max_tx;
907 pi->tx_win = l2cap_pi(parent)->tx_win;
908 pi->sec_level = l2cap_pi(parent)->sec_level;
909 pi->role_switch = l2cap_pi(parent)->role_switch;
910 pi->force_reliable = l2cap_pi(parent)->force_reliable;
912 pi->imtu = L2CAP_DEFAULT_MTU;
914 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
915 pi->mode = L2CAP_MODE_ERTM;
916 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
918 pi->mode = L2CAP_MODE_BASIC;
920 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
921 pi->fcs = L2CAP_FCS_CRC16;
922 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
923 pi->sec_level = BT_SECURITY_LOW;
925 pi->force_reliable = 0;
928 /* Default config options */
930 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
931 skb_queue_head_init(TX_QUEUE(sk));
932 skb_queue_head_init(SREJ_QUEUE(sk));
933 skb_queue_head_init(BUSY_QUEUE(sk));
934 INIT_LIST_HEAD(SREJ_LIST(sk));
937 static struct proto l2cap_proto = {
939 .owner = THIS_MODULE,
940 .obj_size = sizeof(struct l2cap_pinfo)
943 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
947 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
951 sock_init_data(sock, sk);
952 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
954 sk->sk_destruct = l2cap_sock_destruct;
955 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
957 sock_reset_flag(sk, SOCK_ZAPPED);
959 sk->sk_protocol = proto;
960 sk->sk_state = BT_OPEN;
962 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
964 bt_sock_link(&l2cap_sk_list, sk);
968 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
973 BT_DBG("sock %p", sock);
975 sock->state = SS_UNCONNECTED;
977 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
978 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
979 return -ESOCKTNOSUPPORT;
981 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
984 sock->ops = &l2cap_sock_ops;
986 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
990 l2cap_sock_init(sk, NULL);
994 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
996 struct sock *sk = sock->sk;
997 struct sockaddr_l2 la;
1000 BT_DBG("sk %p", sk);
1002 if (!addr || addr->sa_family != AF_BLUETOOTH)
1005 memset(&la, 0, sizeof(la));
1006 len = min_t(unsigned int, sizeof(la), alen);
1007 memcpy(&la, addr, len);
1014 if (sk->sk_state != BT_OPEN) {
1020 __u16 psm = __le16_to_cpu(la.l2_psm);
1022 /* PSM must be odd and lsb of upper byte must be 0 */
1023 if ((psm & 0x0101) != 0x0001) {
1028 /* Restrict usage of well-known PSMs */
1029 if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
1035 write_lock_bh(&l2cap_sk_list.lock);
1037 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1040 /* Save source address */
1041 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1042 l2cap_pi(sk)->psm = la.l2_psm;
1043 l2cap_pi(sk)->sport = la.l2_psm;
1044 sk->sk_state = BT_BOUND;
1046 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1047 __le16_to_cpu(la.l2_psm) == 0x0003)
1048 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1051 write_unlock_bh(&l2cap_sk_list.lock);
1058 static int l2cap_do_connect(struct sock *sk)
1060 bdaddr_t *src = &bt_sk(sk)->src;
1061 bdaddr_t *dst = &bt_sk(sk)->dst;
1062 struct l2cap_conn *conn;
1063 struct hci_conn *hcon;
1064 struct hci_dev *hdev;
1068 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1071 hdev = hci_get_route(dst, src);
1073 return -EHOSTUNREACH;
1075 hci_dev_lock_bh(hdev);
1079 if (sk->sk_type == SOCK_RAW) {
1080 switch (l2cap_pi(sk)->sec_level) {
1081 case BT_SECURITY_HIGH:
1082 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1084 case BT_SECURITY_MEDIUM:
1085 auth_type = HCI_AT_DEDICATED_BONDING;
1088 auth_type = HCI_AT_NO_BONDING;
1091 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1092 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1093 auth_type = HCI_AT_NO_BONDING_MITM;
1095 auth_type = HCI_AT_NO_BONDING;
1097 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1098 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1100 switch (l2cap_pi(sk)->sec_level) {
1101 case BT_SECURITY_HIGH:
1102 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1104 case BT_SECURITY_MEDIUM:
1105 auth_type = HCI_AT_GENERAL_BONDING;
1108 auth_type = HCI_AT_NO_BONDING;
1113 hcon = hci_connect(hdev, ACL_LINK, dst,
1114 l2cap_pi(sk)->sec_level, auth_type);
1118 conn = l2cap_conn_add(hcon, 0);
1126 /* Update source addr of the socket */
1127 bacpy(src, conn->src);
1129 l2cap_chan_add(conn, sk, NULL);
1131 sk->sk_state = BT_CONNECT;
1132 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1134 if (hcon->state == BT_CONNECTED) {
1135 if (sk->sk_type != SOCK_SEQPACKET &&
1136 sk->sk_type != SOCK_STREAM) {
1137 l2cap_sock_clear_timer(sk);
1138 sk->sk_state = BT_CONNECTED;
1144 hci_dev_unlock_bh(hdev);
1149 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1151 struct sock *sk = sock->sk;
1152 struct sockaddr_l2 la;
1155 BT_DBG("sk %p", sk);
1157 if (!addr || alen < sizeof(addr->sa_family) ||
1158 addr->sa_family != AF_BLUETOOTH)
1161 memset(&la, 0, sizeof(la));
1162 len = min_t(unsigned int, sizeof(la), alen);
1163 memcpy(&la, addr, len);
1170 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1176 switch (l2cap_pi(sk)->mode) {
1177 case L2CAP_MODE_BASIC:
1179 case L2CAP_MODE_ERTM:
1180 case L2CAP_MODE_STREAMING:
1189 switch (sk->sk_state) {
1193 /* Already connecting */
1197 /* Already connected */
1211 /* PSM must be odd and lsb of upper byte must be 0 */
1212 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
1213 sk->sk_type != SOCK_RAW) {
1218 /* Set destination address and psm */
1219 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1220 l2cap_pi(sk)->psm = la.l2_psm;
1222 err = l2cap_do_connect(sk);
1227 err = bt_sock_wait_state(sk, BT_CONNECTED,
1228 sock_sndtimeo(sk, flags & O_NONBLOCK));
1234 static int l2cap_sock_listen(struct socket *sock, int backlog)
1236 struct sock *sk = sock->sk;
1239 BT_DBG("sk %p backlog %d", sk, backlog);
1243 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1244 || sk->sk_state != BT_BOUND) {
1249 switch (l2cap_pi(sk)->mode) {
1250 case L2CAP_MODE_BASIC:
1252 case L2CAP_MODE_ERTM:
1253 case L2CAP_MODE_STREAMING:
1262 if (!l2cap_pi(sk)->psm) {
1263 bdaddr_t *src = &bt_sk(sk)->src;
1268 write_lock_bh(&l2cap_sk_list.lock);
1270 for (psm = 0x1001; psm < 0x1100; psm += 2)
1271 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1272 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1273 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1278 write_unlock_bh(&l2cap_sk_list.lock);
1284 sk->sk_max_ack_backlog = backlog;
1285 sk->sk_ack_backlog = 0;
1286 sk->sk_state = BT_LISTEN;
1293 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1295 DECLARE_WAITQUEUE(wait, current);
1296 struct sock *sk = sock->sk, *nsk;
1300 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1302 if (sk->sk_state != BT_LISTEN) {
1307 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1309 BT_DBG("sk %p timeo %ld", sk, timeo);
1311 /* Wait for an incoming connection. (wake-one). */
1312 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1313 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1314 set_current_state(TASK_INTERRUPTIBLE);
1321 timeo = schedule_timeout(timeo);
1322 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1324 if (sk->sk_state != BT_LISTEN) {
1329 if (signal_pending(current)) {
1330 err = sock_intr_errno(timeo);
1334 set_current_state(TASK_RUNNING);
1335 remove_wait_queue(sk_sleep(sk), &wait);
1340 newsock->state = SS_CONNECTED;
1342 BT_DBG("new socket %p", nsk);
1349 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1351 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1352 struct sock *sk = sock->sk;
1354 BT_DBG("sock %p, sk %p", sock, sk);
1356 addr->sa_family = AF_BLUETOOTH;
1357 *len = sizeof(struct sockaddr_l2);
1360 la->l2_psm = l2cap_pi(sk)->psm;
1361 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1362 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1364 la->l2_psm = l2cap_pi(sk)->sport;
1365 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1366 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1372 static int __l2cap_wait_ack(struct sock *sk)
1374 DECLARE_WAITQUEUE(wait, current);
1378 add_wait_queue(sk_sleep(sk), &wait);
1379 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1380 set_current_state(TASK_INTERRUPTIBLE);
1385 if (signal_pending(current)) {
1386 err = sock_intr_errno(timeo);
1391 timeo = schedule_timeout(timeo);
1394 err = sock_error(sk);
1398 set_current_state(TASK_RUNNING);
1399 remove_wait_queue(sk_sleep(sk), &wait);
1403 static void l2cap_monitor_timeout(unsigned long arg)
1405 struct sock *sk = (void *) arg;
1407 BT_DBG("sk %p", sk);
1410 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1411 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1416 l2cap_pi(sk)->retry_count++;
1417 __mod_monitor_timer();
1419 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1423 static void l2cap_retrans_timeout(unsigned long arg)
1425 struct sock *sk = (void *) arg;
1427 BT_DBG("sk %p", sk);
1430 l2cap_pi(sk)->retry_count = 1;
1431 __mod_monitor_timer();
1433 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1435 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1439 static void l2cap_drop_acked_frames(struct sock *sk)
1441 struct sk_buff *skb;
1443 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1444 l2cap_pi(sk)->unacked_frames) {
1445 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1448 skb = skb_dequeue(TX_QUEUE(sk));
1451 l2cap_pi(sk)->unacked_frames--;
1454 if (!l2cap_pi(sk)->unacked_frames)
1455 del_timer(&l2cap_pi(sk)->retrans_timer);
1458 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1460 struct l2cap_pinfo *pi = l2cap_pi(sk);
1462 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1464 hci_send_acl(pi->conn->hcon, skb, 0);
1467 static void l2cap_streaming_send(struct sock *sk)
1469 struct sk_buff *skb;
1470 struct l2cap_pinfo *pi = l2cap_pi(sk);
1473 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1474 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1475 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1476 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1478 if (pi->fcs == L2CAP_FCS_CRC16) {
1479 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1480 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1483 l2cap_do_send(sk, skb);
1485 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1489 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1491 struct l2cap_pinfo *pi = l2cap_pi(sk);
1492 struct sk_buff *skb, *tx_skb;
1495 skb = skb_peek(TX_QUEUE(sk));
1500 if (bt_cb(skb)->tx_seq == tx_seq)
1503 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1506 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1508 if (pi->remote_max_tx &&
1509 bt_cb(skb)->retries == pi->remote_max_tx) {
1510 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1514 tx_skb = skb_clone(skb, GFP_ATOMIC);
1515 bt_cb(skb)->retries++;
1516 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1518 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1519 control |= L2CAP_CTRL_FINAL;
1520 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1523 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1524 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1526 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1528 if (pi->fcs == L2CAP_FCS_CRC16) {
1529 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1530 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1533 l2cap_do_send(sk, tx_skb);
1536 static int l2cap_ertm_send(struct sock *sk)
1538 struct sk_buff *skb, *tx_skb;
1539 struct l2cap_pinfo *pi = l2cap_pi(sk);
1543 if (sk->sk_state != BT_CONNECTED)
1546 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1548 if (pi->remote_max_tx &&
1549 bt_cb(skb)->retries == pi->remote_max_tx) {
1550 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1554 tx_skb = skb_clone(skb, GFP_ATOMIC);
1556 bt_cb(skb)->retries++;
1558 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1559 control &= L2CAP_CTRL_SAR;
1561 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1562 control |= L2CAP_CTRL_FINAL;
1563 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1565 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1566 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1567 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1570 if (pi->fcs == L2CAP_FCS_CRC16) {
1571 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1572 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1575 l2cap_do_send(sk, tx_skb);
1577 __mod_retrans_timer();
1579 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1580 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1582 pi->unacked_frames++;
1585 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1586 sk->sk_send_head = NULL;
1588 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1596 static int l2cap_retransmit_frames(struct sock *sk)
1598 struct l2cap_pinfo *pi = l2cap_pi(sk);
1601 if (!skb_queue_empty(TX_QUEUE(sk)))
1602 sk->sk_send_head = TX_QUEUE(sk)->next;
1604 pi->next_tx_seq = pi->expected_ack_seq;
1605 ret = l2cap_ertm_send(sk);
1609 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1611 struct sock *sk = (struct sock *)pi;
1614 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1616 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1617 control |= L2CAP_SUPER_RCV_NOT_READY;
1618 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1619 l2cap_send_sframe(pi, control);
1623 if (l2cap_ertm_send(sk) > 0)
1626 control |= L2CAP_SUPER_RCV_READY;
1627 l2cap_send_sframe(pi, control);
1630 static void l2cap_send_srejtail(struct sock *sk)
1632 struct srej_list *tail;
1635 control = L2CAP_SUPER_SELECT_REJECT;
1636 control |= L2CAP_CTRL_FINAL;
1638 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1639 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1641 l2cap_send_sframe(l2cap_pi(sk), control);
1644 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1646 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1647 struct sk_buff **frag;
1650 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1656 /* Continuation fragments (no L2CAP header) */
1657 frag = &skb_shinfo(skb)->frag_list;
1659 count = min_t(unsigned int, conn->mtu, len);
1661 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1664 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1670 frag = &(*frag)->next;
1676 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1678 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1679 struct sk_buff *skb;
1680 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1681 struct l2cap_hdr *lh;
1683 BT_DBG("sk %p len %d", sk, (int)len);
1685 count = min_t(unsigned int, (conn->mtu - hlen), len);
1686 skb = bt_skb_send_alloc(sk, count + hlen,
1687 msg->msg_flags & MSG_DONTWAIT, &err);
1689 return ERR_PTR(err);
1691 /* Create L2CAP header */
1692 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1693 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1694 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1695 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1697 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1698 if (unlikely(err < 0)) {
1700 return ERR_PTR(err);
1705 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1707 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1708 struct sk_buff *skb;
1709 int err, count, hlen = L2CAP_HDR_SIZE;
1710 struct l2cap_hdr *lh;
1712 BT_DBG("sk %p len %d", sk, (int)len);
1714 count = min_t(unsigned int, (conn->mtu - hlen), len);
1715 skb = bt_skb_send_alloc(sk, count + hlen,
1716 msg->msg_flags & MSG_DONTWAIT, &err);
1718 return ERR_PTR(err);
1720 /* Create L2CAP header */
1721 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1722 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1723 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1725 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1726 if (unlikely(err < 0)) {
1728 return ERR_PTR(err);
1733 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1735 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1736 struct sk_buff *skb;
1737 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1738 struct l2cap_hdr *lh;
1740 BT_DBG("sk %p len %d", sk, (int)len);
1743 return ERR_PTR(-ENOTCONN);
1748 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1751 count = min_t(unsigned int, (conn->mtu - hlen), len);
1752 skb = bt_skb_send_alloc(sk, count + hlen,
1753 msg->msg_flags & MSG_DONTWAIT, &err);
1755 return ERR_PTR(err);
1757 /* Create L2CAP header */
1758 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1759 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1760 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1761 put_unaligned_le16(control, skb_put(skb, 2));
1763 put_unaligned_le16(sdulen, skb_put(skb, 2));
1765 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1766 if (unlikely(err < 0)) {
1768 return ERR_PTR(err);
1771 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1772 put_unaligned_le16(0, skb_put(skb, 2));
1774 bt_cb(skb)->retries = 0;
1778 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1780 struct l2cap_pinfo *pi = l2cap_pi(sk);
1781 struct sk_buff *skb;
1782 struct sk_buff_head sar_queue;
1786 skb_queue_head_init(&sar_queue);
1787 control = L2CAP_SDU_START;
1788 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1790 return PTR_ERR(skb);
1792 __skb_queue_tail(&sar_queue, skb);
1793 len -= pi->remote_mps;
1794 size += pi->remote_mps;
1799 if (len > pi->remote_mps) {
1800 control = L2CAP_SDU_CONTINUE;
1801 buflen = pi->remote_mps;
1803 control = L2CAP_SDU_END;
1807 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1809 skb_queue_purge(&sar_queue);
1810 return PTR_ERR(skb);
1813 __skb_queue_tail(&sar_queue, skb);
1817 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1818 if (sk->sk_send_head == NULL)
1819 sk->sk_send_head = sar_queue.next;
1824 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1826 struct sock *sk = sock->sk;
1827 struct l2cap_pinfo *pi = l2cap_pi(sk);
1828 struct sk_buff *skb;
1832 BT_DBG("sock %p, sk %p", sock, sk);
1834 err = sock_error(sk);
1838 if (msg->msg_flags & MSG_OOB)
1843 if (sk->sk_state != BT_CONNECTED) {
1848 /* Connectionless channel */
1849 if (sk->sk_type == SOCK_DGRAM) {
1850 skb = l2cap_create_connless_pdu(sk, msg, len);
1854 l2cap_do_send(sk, skb);
1861 case L2CAP_MODE_BASIC:
1862 /* Check outgoing MTU */
1863 if (len > pi->omtu) {
1868 /* Create a basic PDU */
1869 skb = l2cap_create_basic_pdu(sk, msg, len);
1875 l2cap_do_send(sk, skb);
1879 case L2CAP_MODE_ERTM:
1880 case L2CAP_MODE_STREAMING:
1881 /* Entire SDU fits into one PDU */
1882 if (len <= pi->remote_mps) {
1883 control = L2CAP_SDU_UNSEGMENTED;
1884 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1889 __skb_queue_tail(TX_QUEUE(sk), skb);
1891 if (sk->sk_send_head == NULL)
1892 sk->sk_send_head = skb;
1895 /* Segment SDU into multiples PDUs */
1896 err = l2cap_sar_segment_sdu(sk, msg, len);
1901 if (pi->mode == L2CAP_MODE_STREAMING) {
1902 l2cap_streaming_send(sk);
1904 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1905 pi->conn_state && L2CAP_CONN_WAIT_F) {
1909 err = l2cap_ertm_send(sk);
1917 BT_DBG("bad state %1.1x", pi->mode);
1926 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1928 struct sock *sk = sock->sk;
1932 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1933 struct l2cap_conn_rsp rsp;
1934 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1937 sk->sk_state = BT_CONFIG;
1939 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1940 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1941 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1942 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1943 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1944 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1946 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1951 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1952 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1953 l2cap_build_conf_req(sk, buf), buf);
1954 l2cap_pi(sk)->num_conf_req++;
1962 if (sock->type == SOCK_STREAM)
1963 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
1965 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1968 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1970 struct sock *sk = sock->sk;
1971 struct l2cap_options opts;
1975 BT_DBG("sk %p", sk);
1981 if (sk->sk_state == BT_CONNECTED) {
1986 opts.imtu = l2cap_pi(sk)->imtu;
1987 opts.omtu = l2cap_pi(sk)->omtu;
1988 opts.flush_to = l2cap_pi(sk)->flush_to;
1989 opts.mode = l2cap_pi(sk)->mode;
1990 opts.fcs = l2cap_pi(sk)->fcs;
1991 opts.max_tx = l2cap_pi(sk)->max_tx;
1992 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1994 len = min_t(unsigned int, sizeof(opts), optlen);
1995 if (copy_from_user((char *) &opts, optval, len)) {
2000 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
2005 l2cap_pi(sk)->mode = opts.mode;
2006 switch (l2cap_pi(sk)->mode) {
2007 case L2CAP_MODE_BASIC:
2008 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
2010 case L2CAP_MODE_ERTM:
2011 case L2CAP_MODE_STREAMING:
2020 l2cap_pi(sk)->imtu = opts.imtu;
2021 l2cap_pi(sk)->omtu = opts.omtu;
2022 l2cap_pi(sk)->fcs = opts.fcs;
2023 l2cap_pi(sk)->max_tx = opts.max_tx;
2024 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
2028 if (get_user(opt, (u32 __user *) optval)) {
2033 if (opt & L2CAP_LM_AUTH)
2034 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2035 if (opt & L2CAP_LM_ENCRYPT)
2036 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2037 if (opt & L2CAP_LM_SECURE)
2038 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2040 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2041 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2053 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2055 struct sock *sk = sock->sk;
2056 struct bt_security sec;
2060 BT_DBG("sk %p", sk);
2062 if (level == SOL_L2CAP)
2063 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2065 if (level != SOL_BLUETOOTH)
2066 return -ENOPROTOOPT;
2072 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2073 && sk->sk_type != SOCK_RAW) {
2078 sec.level = BT_SECURITY_LOW;
2080 len = min_t(unsigned int, sizeof(sec), optlen);
2081 if (copy_from_user((char *) &sec, optval, len)) {
2086 if (sec.level < BT_SECURITY_LOW ||
2087 sec.level > BT_SECURITY_HIGH) {
2092 l2cap_pi(sk)->sec_level = sec.level;
2095 case BT_DEFER_SETUP:
2096 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2101 if (get_user(opt, (u32 __user *) optval)) {
2106 bt_sk(sk)->defer_setup = opt;
2118 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2120 struct sock *sk = sock->sk;
2121 struct l2cap_options opts;
2122 struct l2cap_conninfo cinfo;
2126 BT_DBG("sk %p", sk);
2128 if (get_user(len, optlen))
2135 opts.imtu = l2cap_pi(sk)->imtu;
2136 opts.omtu = l2cap_pi(sk)->omtu;
2137 opts.flush_to = l2cap_pi(sk)->flush_to;
2138 opts.mode = l2cap_pi(sk)->mode;
2139 opts.fcs = l2cap_pi(sk)->fcs;
2140 opts.max_tx = l2cap_pi(sk)->max_tx;
2141 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2143 len = min_t(unsigned int, len, sizeof(opts));
2144 if (copy_to_user(optval, (char *) &opts, len))
2150 switch (l2cap_pi(sk)->sec_level) {
2151 case BT_SECURITY_LOW:
2152 opt = L2CAP_LM_AUTH;
2154 case BT_SECURITY_MEDIUM:
2155 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2157 case BT_SECURITY_HIGH:
2158 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2166 if (l2cap_pi(sk)->role_switch)
2167 opt |= L2CAP_LM_MASTER;
2169 if (l2cap_pi(sk)->force_reliable)
2170 opt |= L2CAP_LM_RELIABLE;
2172 if (put_user(opt, (u32 __user *) optval))
2176 case L2CAP_CONNINFO:
2177 if (sk->sk_state != BT_CONNECTED &&
2178 !(sk->sk_state == BT_CONNECT2 &&
2179 bt_sk(sk)->defer_setup)) {
2184 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2185 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2187 len = min_t(unsigned int, len, sizeof(cinfo));
2188 if (copy_to_user(optval, (char *) &cinfo, len))
2202 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2204 struct sock *sk = sock->sk;
2205 struct bt_security sec;
2208 BT_DBG("sk %p", sk);
2210 if (level == SOL_L2CAP)
2211 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2213 if (level != SOL_BLUETOOTH)
2214 return -ENOPROTOOPT;
2216 if (get_user(len, optlen))
2223 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2224 && sk->sk_type != SOCK_RAW) {
2229 sec.level = l2cap_pi(sk)->sec_level;
2231 len = min_t(unsigned int, len, sizeof(sec));
2232 if (copy_to_user(optval, (char *) &sec, len))
2237 case BT_DEFER_SETUP:
2238 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2243 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2257 static int l2cap_sock_shutdown(struct socket *sock, int how)
2259 struct sock *sk = sock->sk;
2262 BT_DBG("sock %p, sk %p", sock, sk);
2268 if (!sk->sk_shutdown) {
2269 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2270 err = __l2cap_wait_ack(sk);
2272 sk->sk_shutdown = SHUTDOWN_MASK;
2273 l2cap_sock_clear_timer(sk);
2274 __l2cap_sock_close(sk, 0);
2276 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2277 err = bt_sock_wait_state(sk, BT_CLOSED,
2281 if (!err && sk->sk_err)
2288 static int l2cap_sock_release(struct socket *sock)
2290 struct sock *sk = sock->sk;
2293 BT_DBG("sock %p, sk %p", sock, sk);
2298 err = l2cap_sock_shutdown(sock, 2);
2301 l2cap_sock_kill(sk);
2305 static void l2cap_chan_ready(struct sock *sk)
2307 struct sock *parent = bt_sk(sk)->parent;
2309 BT_DBG("sk %p, parent %p", sk, parent);
2311 l2cap_pi(sk)->conf_state = 0;
2312 l2cap_sock_clear_timer(sk);
2315 /* Outgoing channel.
2316 * Wake up socket sleeping on connect.
2318 sk->sk_state = BT_CONNECTED;
2319 sk->sk_state_change(sk);
2321 /* Incoming channel.
2322 * Wake up socket sleeping on accept.
2324 parent->sk_data_ready(parent, 0);
2328 /* Copy frame to all raw sockets on that connection */
2329 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2331 struct l2cap_chan_list *l = &conn->chan_list;
2332 struct sk_buff *nskb;
2335 BT_DBG("conn %p", conn);
2337 read_lock(&l->lock);
2338 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2339 if (sk->sk_type != SOCK_RAW)
2342 /* Don't send frame to the socket it came from */
2345 nskb = skb_clone(skb, GFP_ATOMIC);
2349 if (sock_queue_rcv_skb(sk, nskb))
2352 read_unlock(&l->lock);
2355 /* ---- L2CAP signalling commands ---- */
2356 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2357 u8 code, u8 ident, u16 dlen, void *data)
2359 struct sk_buff *skb, **frag;
2360 struct l2cap_cmd_hdr *cmd;
2361 struct l2cap_hdr *lh;
2364 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2365 conn, code, ident, dlen);
2367 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2368 count = min_t(unsigned int, conn->mtu, len);
2370 skb = bt_skb_alloc(count, GFP_ATOMIC);
2374 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2375 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2376 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2378 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2381 cmd->len = cpu_to_le16(dlen);
2384 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2385 memcpy(skb_put(skb, count), data, count);
2391 /* Continuation fragments (no L2CAP header) */
2392 frag = &skb_shinfo(skb)->frag_list;
2394 count = min_t(unsigned int, conn->mtu, len);
2396 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2400 memcpy(skb_put(*frag, count), data, count);
2405 frag = &(*frag)->next;
2415 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2417 struct l2cap_conf_opt *opt = *ptr;
2420 len = L2CAP_CONF_OPT_SIZE + opt->len;
2428 *val = *((u8 *) opt->val);
2432 *val = get_unaligned_le16(opt->val);
2436 *val = get_unaligned_le32(opt->val);
2440 *val = (unsigned long) opt->val;
2444 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2448 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2450 struct l2cap_conf_opt *opt = *ptr;
2452 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2459 *((u8 *) opt->val) = val;
2463 put_unaligned_le16(val, opt->val);
2467 put_unaligned_le32(val, opt->val);
2471 memcpy(opt->val, (void *) val, len);
2475 *ptr += L2CAP_CONF_OPT_SIZE + len;
2478 static void l2cap_ack_timeout(unsigned long arg)
2480 struct sock *sk = (void *) arg;
2483 l2cap_send_ack(l2cap_pi(sk));
2487 static inline void l2cap_ertm_init(struct sock *sk)
2489 l2cap_pi(sk)->expected_ack_seq = 0;
2490 l2cap_pi(sk)->unacked_frames = 0;
2491 l2cap_pi(sk)->buffer_seq = 0;
2492 l2cap_pi(sk)->num_acked = 0;
2493 l2cap_pi(sk)->frames_sent = 0;
2495 setup_timer(&l2cap_pi(sk)->retrans_timer,
2496 l2cap_retrans_timeout, (unsigned long) sk);
2497 setup_timer(&l2cap_pi(sk)->monitor_timer,
2498 l2cap_monitor_timeout, (unsigned long) sk);
2499 setup_timer(&l2cap_pi(sk)->ack_timer,
2500 l2cap_ack_timeout, (unsigned long) sk);
2502 __skb_queue_head_init(SREJ_QUEUE(sk));
2503 __skb_queue_head_init(BUSY_QUEUE(sk));
2505 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2507 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2510 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2513 case L2CAP_MODE_STREAMING:
2514 case L2CAP_MODE_ERTM:
2515 if (l2cap_mode_supported(mode, remote_feat_mask))
2519 return L2CAP_MODE_BASIC;
2523 static int l2cap_build_conf_req(struct sock *sk, void *data)
2525 struct l2cap_pinfo *pi = l2cap_pi(sk);
2526 struct l2cap_conf_req *req = data;
2527 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2528 void *ptr = req->data;
2530 BT_DBG("sk %p", sk);
2532 if (pi->num_conf_req || pi->num_conf_rsp)
2536 case L2CAP_MODE_STREAMING:
2537 case L2CAP_MODE_ERTM:
2538 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2543 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2549 case L2CAP_MODE_BASIC:
2550 if (pi->imtu != L2CAP_DEFAULT_MTU)
2551 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2553 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2554 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2557 rfc.mode = L2CAP_MODE_BASIC;
2559 rfc.max_transmit = 0;
2560 rfc.retrans_timeout = 0;
2561 rfc.monitor_timeout = 0;
2562 rfc.max_pdu_size = 0;
2564 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2565 (unsigned long) &rfc);
2568 case L2CAP_MODE_ERTM:
2569 rfc.mode = L2CAP_MODE_ERTM;
2570 rfc.txwin_size = pi->tx_win;
2571 rfc.max_transmit = pi->max_tx;
2572 rfc.retrans_timeout = 0;
2573 rfc.monitor_timeout = 0;
2574 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2575 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2576 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2578 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2579 (unsigned long) &rfc);
2581 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2584 if (pi->fcs == L2CAP_FCS_NONE ||
2585 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2586 pi->fcs = L2CAP_FCS_NONE;
2587 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2591 case L2CAP_MODE_STREAMING:
2592 rfc.mode = L2CAP_MODE_STREAMING;
2594 rfc.max_transmit = 0;
2595 rfc.retrans_timeout = 0;
2596 rfc.monitor_timeout = 0;
2597 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2598 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2599 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2601 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2602 (unsigned long) &rfc);
2604 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2607 if (pi->fcs == L2CAP_FCS_NONE ||
2608 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2609 pi->fcs = L2CAP_FCS_NONE;
2610 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2615 /* FIXME: Need actual value of the flush timeout */
2616 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2617 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2619 req->dcid = cpu_to_le16(pi->dcid);
2620 req->flags = cpu_to_le16(0);
2625 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2627 struct l2cap_pinfo *pi = l2cap_pi(sk);
2628 struct l2cap_conf_rsp *rsp = data;
2629 void *ptr = rsp->data;
2630 void *req = pi->conf_req;
2631 int len = pi->conf_len;
2632 int type, hint, olen;
2634 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2635 u16 mtu = L2CAP_DEFAULT_MTU;
2636 u16 result = L2CAP_CONF_SUCCESS;
2638 BT_DBG("sk %p", sk);
2640 while (len >= L2CAP_CONF_OPT_SIZE) {
2641 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2643 hint = type & L2CAP_CONF_HINT;
2644 type &= L2CAP_CONF_MASK;
2647 case L2CAP_CONF_MTU:
2651 case L2CAP_CONF_FLUSH_TO:
2655 case L2CAP_CONF_QOS:
2658 case L2CAP_CONF_RFC:
2659 if (olen == sizeof(rfc))
2660 memcpy(&rfc, (void *) val, olen);
2663 case L2CAP_CONF_FCS:
2664 if (val == L2CAP_FCS_NONE)
2665 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2673 result = L2CAP_CONF_UNKNOWN;
2674 *((u8 *) ptr++) = type;
2679 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2683 case L2CAP_MODE_STREAMING:
2684 case L2CAP_MODE_ERTM:
2685 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2686 pi->mode = l2cap_select_mode(rfc.mode,
2687 pi->conn->feat_mask);
2691 if (pi->mode != rfc.mode)
2692 return -ECONNREFUSED;
2698 if (pi->mode != rfc.mode) {
2699 result = L2CAP_CONF_UNACCEPT;
2700 rfc.mode = pi->mode;
2702 if (pi->num_conf_rsp == 1)
2703 return -ECONNREFUSED;
2705 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2706 sizeof(rfc), (unsigned long) &rfc);
2710 if (result == L2CAP_CONF_SUCCESS) {
2711 /* Configure output options and let the other side know
2712 * which ones we don't like. */
2714 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2715 result = L2CAP_CONF_UNACCEPT;
2718 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2720 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2723 case L2CAP_MODE_BASIC:
2724 pi->fcs = L2CAP_FCS_NONE;
2725 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2728 case L2CAP_MODE_ERTM:
2729 pi->remote_tx_win = rfc.txwin_size;
2730 pi->remote_max_tx = rfc.max_transmit;
2732 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2733 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2735 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2737 rfc.retrans_timeout =
2738 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2739 rfc.monitor_timeout =
2740 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2742 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2744 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2745 sizeof(rfc), (unsigned long) &rfc);
2749 case L2CAP_MODE_STREAMING:
2750 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2751 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2753 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2755 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2757 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2758 sizeof(rfc), (unsigned long) &rfc);
2763 result = L2CAP_CONF_UNACCEPT;
2765 memset(&rfc, 0, sizeof(rfc));
2766 rfc.mode = pi->mode;
2769 if (result == L2CAP_CONF_SUCCESS)
2770 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2772 rsp->scid = cpu_to_le16(pi->dcid);
2773 rsp->result = cpu_to_le16(result);
2774 rsp->flags = cpu_to_le16(0x0000);
2779 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2781 struct l2cap_pinfo *pi = l2cap_pi(sk);
2782 struct l2cap_conf_req *req = data;
2783 void *ptr = req->data;
2786 struct l2cap_conf_rfc rfc;
2788 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2790 while (len >= L2CAP_CONF_OPT_SIZE) {
2791 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2794 case L2CAP_CONF_MTU:
2795 if (val < L2CAP_DEFAULT_MIN_MTU) {
2796 *result = L2CAP_CONF_UNACCEPT;
2797 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
2800 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2803 case L2CAP_CONF_FLUSH_TO:
2805 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2809 case L2CAP_CONF_RFC:
2810 if (olen == sizeof(rfc))
2811 memcpy(&rfc, (void *)val, olen);
2813 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2814 rfc.mode != pi->mode)
2815 return -ECONNREFUSED;
2819 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2820 sizeof(rfc), (unsigned long) &rfc);
2825 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2826 return -ECONNREFUSED;
2828 pi->mode = rfc.mode;
2830 if (*result == L2CAP_CONF_SUCCESS) {
2832 case L2CAP_MODE_ERTM:
2833 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2834 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2835 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2837 case L2CAP_MODE_STREAMING:
2838 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2842 req->dcid = cpu_to_le16(pi->dcid);
2843 req->flags = cpu_to_le16(0x0000);
2848 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2850 struct l2cap_conf_rsp *rsp = data;
2851 void *ptr = rsp->data;
2853 BT_DBG("sk %p", sk);
2855 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2856 rsp->result = cpu_to_le16(result);
2857 rsp->flags = cpu_to_le16(flags);
2862 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2864 struct l2cap_pinfo *pi = l2cap_pi(sk);
2867 struct l2cap_conf_rfc rfc;
2869 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2871 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2874 while (len >= L2CAP_CONF_OPT_SIZE) {
2875 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2878 case L2CAP_CONF_RFC:
2879 if (olen == sizeof(rfc))
2880 memcpy(&rfc, (void *)val, olen);
2887 case L2CAP_MODE_ERTM:
2888 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2889 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2890 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2892 case L2CAP_MODE_STREAMING:
2893 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2897 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2899 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2901 if (rej->reason != 0x0000)
2904 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2905 cmd->ident == conn->info_ident) {
2906 del_timer(&conn->info_timer);
2908 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2909 conn->info_ident = 0;
2911 l2cap_conn_start(conn);
2917 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2919 struct l2cap_chan_list *list = &conn->chan_list;
2920 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2921 struct l2cap_conn_rsp rsp;
2922 struct sock *parent, *sk = NULL;
2923 int result, status = L2CAP_CS_NO_INFO;
2925 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2926 __le16 psm = req->psm;
2928 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2930 /* Check if we have socket listening on psm */
2931 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2933 result = L2CAP_CR_BAD_PSM;
2937 /* Check if the ACL is secure enough (if not SDP) */
2938 if (psm != cpu_to_le16(0x0001) &&
2939 !hci_conn_check_link_mode(conn->hcon)) {
2940 conn->disc_reason = 0x05;
2941 result = L2CAP_CR_SEC_BLOCK;
2945 result = L2CAP_CR_NO_MEM;
2947 /* Check for backlog size */
2948 if (sk_acceptq_is_full(parent)) {
2949 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2953 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2957 write_lock_bh(&list->lock);
2959 /* Check if we already have channel with that dcid */
2960 if (__l2cap_get_chan_by_dcid(list, scid)) {
2961 write_unlock_bh(&list->lock);
2962 sock_set_flag(sk, SOCK_ZAPPED);
2963 l2cap_sock_kill(sk);
2967 hci_conn_hold(conn->hcon);
2969 l2cap_sock_init(sk, parent);
2970 bacpy(&bt_sk(sk)->src, conn->src);
2971 bacpy(&bt_sk(sk)->dst, conn->dst);
2972 l2cap_pi(sk)->psm = psm;
2973 l2cap_pi(sk)->dcid = scid;
2975 __l2cap_chan_add(conn, sk, parent);
2976 dcid = l2cap_pi(sk)->scid;
2978 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2980 l2cap_pi(sk)->ident = cmd->ident;
2982 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2983 if (l2cap_check_security(sk)) {
2984 if (bt_sk(sk)->defer_setup) {
2985 sk->sk_state = BT_CONNECT2;
2986 result = L2CAP_CR_PEND;
2987 status = L2CAP_CS_AUTHOR_PEND;
2988 parent->sk_data_ready(parent, 0);
2990 sk->sk_state = BT_CONFIG;
2991 result = L2CAP_CR_SUCCESS;
2992 status = L2CAP_CS_NO_INFO;
2995 sk->sk_state = BT_CONNECT2;
2996 result = L2CAP_CR_PEND;
2997 status = L2CAP_CS_AUTHEN_PEND;
3000 sk->sk_state = BT_CONNECT2;
3001 result = L2CAP_CR_PEND;
3002 status = L2CAP_CS_NO_INFO;
3005 write_unlock_bh(&list->lock);
3008 bh_unlock_sock(parent);
3011 rsp.scid = cpu_to_le16(scid);
3012 rsp.dcid = cpu_to_le16(dcid);
3013 rsp.result = cpu_to_le16(result);
3014 rsp.status = cpu_to_le16(status);
3015 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
3017 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
3018 struct l2cap_info_req info;
3019 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3021 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
3022 conn->info_ident = l2cap_get_ident(conn);
3024 mod_timer(&conn->info_timer, jiffies +
3025 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
3027 l2cap_send_cmd(conn, conn->info_ident,
3028 L2CAP_INFO_REQ, sizeof(info), &info);
3031 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3032 result == L2CAP_CR_SUCCESS) {
3034 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3035 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3036 l2cap_build_conf_req(sk, buf), buf);
3037 l2cap_pi(sk)->num_conf_req++;
3043 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3045 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3046 u16 scid, dcid, result, status;
3050 scid = __le16_to_cpu(rsp->scid);
3051 dcid = __le16_to_cpu(rsp->dcid);
3052 result = __le16_to_cpu(rsp->result);
3053 status = __le16_to_cpu(rsp->status);
3055 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3058 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3062 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3068 case L2CAP_CR_SUCCESS:
3069 sk->sk_state = BT_CONFIG;
3070 l2cap_pi(sk)->ident = 0;
3071 l2cap_pi(sk)->dcid = dcid;
3072 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3074 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3077 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3079 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3080 l2cap_build_conf_req(sk, req), req);
3081 l2cap_pi(sk)->num_conf_req++;
3085 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3089 /* don't delete l2cap channel if sk is owned by user */
3090 if (sock_owned_by_user(sk)) {
3091 sk->sk_state = BT_DISCONN;
3092 l2cap_sock_clear_timer(sk);
3093 l2cap_sock_set_timer(sk, HZ / 5);
3097 l2cap_chan_del(sk, ECONNREFUSED);
3105 static inline void set_default_fcs(struct l2cap_pinfo *pi)
3107 /* FCS is enabled only in ERTM or streaming mode, if one or both
3110 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
3111 pi->fcs = L2CAP_FCS_NONE;
3112 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
3113 pi->fcs = L2CAP_FCS_CRC16;
3116 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3118 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3124 dcid = __le16_to_cpu(req->dcid);
3125 flags = __le16_to_cpu(req->flags);
3127 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3129 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3133 if (sk->sk_state == BT_DISCONN)
3136 /* Reject if config buffer is too small. */
3137 len = cmd_len - sizeof(*req);
3138 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3139 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3140 l2cap_build_conf_rsp(sk, rsp,
3141 L2CAP_CONF_REJECT, flags), rsp);
3146 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3147 l2cap_pi(sk)->conf_len += len;
3149 if (flags & 0x0001) {
3150 /* Incomplete config. Send empty response. */
3151 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3152 l2cap_build_conf_rsp(sk, rsp,
3153 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3157 /* Complete config. */
3158 len = l2cap_parse_conf_req(sk, rsp);
3160 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3164 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3165 l2cap_pi(sk)->num_conf_rsp++;
3167 /* Reset config buffer. */
3168 l2cap_pi(sk)->conf_len = 0;
3170 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3173 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3174 set_default_fcs(l2cap_pi(sk));
3176 sk->sk_state = BT_CONNECTED;
3178 l2cap_pi(sk)->next_tx_seq = 0;
3179 l2cap_pi(sk)->expected_tx_seq = 0;
3180 __skb_queue_head_init(TX_QUEUE(sk));
3181 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3182 l2cap_ertm_init(sk);
3184 l2cap_chan_ready(sk);
3188 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3190 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3191 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3192 l2cap_build_conf_req(sk, buf), buf);
3193 l2cap_pi(sk)->num_conf_req++;
3201 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3203 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3204 u16 scid, flags, result;
3206 int len = cmd->len - sizeof(*rsp);
3208 scid = __le16_to_cpu(rsp->scid);
3209 flags = __le16_to_cpu(rsp->flags);
3210 result = __le16_to_cpu(rsp->result);
3212 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3213 scid, flags, result);
3215 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3220 case L2CAP_CONF_SUCCESS:
3221 l2cap_conf_rfc_get(sk, rsp->data, len);
3224 case L2CAP_CONF_UNACCEPT:
3225 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3228 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3229 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3233 /* throw out any old stored conf requests */
3234 result = L2CAP_CONF_SUCCESS;
3235 len = l2cap_parse_conf_rsp(sk, rsp->data,
3238 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3242 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3243 L2CAP_CONF_REQ, len, req);
3244 l2cap_pi(sk)->num_conf_req++;
3245 if (result != L2CAP_CONF_SUCCESS)
3251 sk->sk_err = ECONNRESET;
3252 l2cap_sock_set_timer(sk, HZ * 5);
3253 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3260 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3262 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3263 set_default_fcs(l2cap_pi(sk));
3265 sk->sk_state = BT_CONNECTED;
3266 l2cap_pi(sk)->next_tx_seq = 0;
3267 l2cap_pi(sk)->expected_tx_seq = 0;
3268 __skb_queue_head_init(TX_QUEUE(sk));
3269 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3270 l2cap_ertm_init(sk);
3272 l2cap_chan_ready(sk);
3280 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3282 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3283 struct l2cap_disconn_rsp rsp;
3287 scid = __le16_to_cpu(req->scid);
3288 dcid = __le16_to_cpu(req->dcid);
3290 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3292 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3296 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3297 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3298 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3300 sk->sk_shutdown = SHUTDOWN_MASK;
3302 /* don't delete l2cap channel if sk is owned by user */
3303 if (sock_owned_by_user(sk)) {
3304 sk->sk_state = BT_DISCONN;
3305 l2cap_sock_clear_timer(sk);
3306 l2cap_sock_set_timer(sk, HZ / 5);
3311 l2cap_chan_del(sk, ECONNRESET);
3314 l2cap_sock_kill(sk);
3318 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3320 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3324 scid = __le16_to_cpu(rsp->scid);
3325 dcid = __le16_to_cpu(rsp->dcid);
3327 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3329 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3333 /* don't delete l2cap channel if sk is owned by user */
3334 if (sock_owned_by_user(sk)) {
3335 sk->sk_state = BT_DISCONN;
3336 l2cap_sock_clear_timer(sk);
3337 l2cap_sock_set_timer(sk, HZ / 5);
3342 l2cap_chan_del(sk, 0);
3345 l2cap_sock_kill(sk);
3349 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3351 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3354 type = __le16_to_cpu(req->type);
3356 BT_DBG("type 0x%4.4x", type);
3358 if (type == L2CAP_IT_FEAT_MASK) {
3360 u32 feat_mask = l2cap_feat_mask;
3361 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3362 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3363 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3365 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3367 put_unaligned_le32(feat_mask, rsp->data);
3368 l2cap_send_cmd(conn, cmd->ident,
3369 L2CAP_INFO_RSP, sizeof(buf), buf);
3370 } else if (type == L2CAP_IT_FIXED_CHAN) {
3372 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3373 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3374 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3375 memcpy(buf + 4, l2cap_fixed_chan, 8);
3376 l2cap_send_cmd(conn, cmd->ident,
3377 L2CAP_INFO_RSP, sizeof(buf), buf);
3379 struct l2cap_info_rsp rsp;
3380 rsp.type = cpu_to_le16(type);
3381 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3382 l2cap_send_cmd(conn, cmd->ident,
3383 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3389 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3391 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3394 type = __le16_to_cpu(rsp->type);
3395 result = __le16_to_cpu(rsp->result);
3397 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3399 del_timer(&conn->info_timer);
3401 if (result != L2CAP_IR_SUCCESS) {
3402 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3403 conn->info_ident = 0;
3405 l2cap_conn_start(conn);
3410 if (type == L2CAP_IT_FEAT_MASK) {
3411 conn->feat_mask = get_unaligned_le32(rsp->data);
3413 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3414 struct l2cap_info_req req;
3415 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3417 conn->info_ident = l2cap_get_ident(conn);
3419 l2cap_send_cmd(conn, conn->info_ident,
3420 L2CAP_INFO_REQ, sizeof(req), &req);
3422 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3423 conn->info_ident = 0;
3425 l2cap_conn_start(conn);
3427 } else if (type == L2CAP_IT_FIXED_CHAN) {
3428 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3429 conn->info_ident = 0;
3431 l2cap_conn_start(conn);
3437 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3439 u8 *data = skb->data;
3441 struct l2cap_cmd_hdr cmd;
3444 l2cap_raw_recv(conn, skb);
3446 while (len >= L2CAP_CMD_HDR_SIZE) {
3448 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3449 data += L2CAP_CMD_HDR_SIZE;
3450 len -= L2CAP_CMD_HDR_SIZE;
3452 cmd_len = le16_to_cpu(cmd.len);
3454 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3456 if (cmd_len > len || !cmd.ident) {
3457 BT_DBG("corrupted command");
3462 case L2CAP_COMMAND_REJ:
3463 l2cap_command_rej(conn, &cmd, data);
3466 case L2CAP_CONN_REQ:
3467 err = l2cap_connect_req(conn, &cmd, data);
3470 case L2CAP_CONN_RSP:
3471 err = l2cap_connect_rsp(conn, &cmd, data);
3474 case L2CAP_CONF_REQ:
3475 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3478 case L2CAP_CONF_RSP:
3479 err = l2cap_config_rsp(conn, &cmd, data);
3482 case L2CAP_DISCONN_REQ:
3483 err = l2cap_disconnect_req(conn, &cmd, data);
3486 case L2CAP_DISCONN_RSP:
3487 err = l2cap_disconnect_rsp(conn, &cmd, data);
3490 case L2CAP_ECHO_REQ:
3491 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3494 case L2CAP_ECHO_RSP:
3497 case L2CAP_INFO_REQ:
3498 err = l2cap_information_req(conn, &cmd, data);
3501 case L2CAP_INFO_RSP:
3502 err = l2cap_information_rsp(conn, &cmd, data);
3506 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3512 struct l2cap_cmd_rej rej;
3513 BT_DBG("error %d", err);
3515 /* FIXME: Map err to a valid reason */
3516 rej.reason = cpu_to_le16(0);
3517 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3527 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3529 u16 our_fcs, rcv_fcs;
3530 int hdr_size = L2CAP_HDR_SIZE + 2;
3532 if (pi->fcs == L2CAP_FCS_CRC16) {
3533 skb_trim(skb, skb->len - 2);
3534 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3535 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3537 if (our_fcs != rcv_fcs)
3543 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3545 struct l2cap_pinfo *pi = l2cap_pi(sk);
3548 pi->frames_sent = 0;
3550 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3552 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3553 control |= L2CAP_SUPER_RCV_NOT_READY;
3554 l2cap_send_sframe(pi, control);
3555 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3558 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3559 l2cap_retransmit_frames(sk);
3561 l2cap_ertm_send(sk);
3563 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3564 pi->frames_sent == 0) {
3565 control |= L2CAP_SUPER_RCV_READY;
3566 l2cap_send_sframe(pi, control);
3570 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3572 struct sk_buff *next_skb;
3573 struct l2cap_pinfo *pi = l2cap_pi(sk);
3574 int tx_seq_offset, next_tx_seq_offset;
3576 bt_cb(skb)->tx_seq = tx_seq;
3577 bt_cb(skb)->sar = sar;
3579 next_skb = skb_peek(SREJ_QUEUE(sk));
3581 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3585 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3586 if (tx_seq_offset < 0)
3587 tx_seq_offset += 64;
3590 if (bt_cb(next_skb)->tx_seq == tx_seq)
3593 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3594 pi->buffer_seq) % 64;
3595 if (next_tx_seq_offset < 0)
3596 next_tx_seq_offset += 64;
3598 if (next_tx_seq_offset > tx_seq_offset) {
3599 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3603 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3606 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3608 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3613 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3615 struct l2cap_pinfo *pi = l2cap_pi(sk);
3616 struct sk_buff *_skb;
3619 switch (control & L2CAP_CTRL_SAR) {
3620 case L2CAP_SDU_UNSEGMENTED:
3621 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3624 err = sock_queue_rcv_skb(sk, skb);
3630 case L2CAP_SDU_START:
3631 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3634 pi->sdu_len = get_unaligned_le16(skb->data);
3636 if (pi->sdu_len > pi->imtu)
3639 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3643 /* pull sdu_len bytes only after alloc, because of Local Busy
3644 * condition we have to be sure that this will be executed
3645 * only once, i.e., when alloc does not fail */
3648 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3650 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3651 pi->partial_sdu_len = skb->len;
3654 case L2CAP_SDU_CONTINUE:
3655 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3661 pi->partial_sdu_len += skb->len;
3662 if (pi->partial_sdu_len > pi->sdu_len)
3665 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3670 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3676 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3677 pi->partial_sdu_len += skb->len;
3679 if (pi->partial_sdu_len > pi->imtu)
3682 if (pi->partial_sdu_len != pi->sdu_len)
3685 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3688 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3690 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3694 err = sock_queue_rcv_skb(sk, _skb);
3697 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3701 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3702 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3716 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3721 static int l2cap_try_push_rx_skb(struct sock *sk)
3723 struct l2cap_pinfo *pi = l2cap_pi(sk);
3724 struct sk_buff *skb;
3728 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3729 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3730 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3732 skb_queue_head(BUSY_QUEUE(sk), skb);
3736 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3739 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3742 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3743 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3744 l2cap_send_sframe(pi, control);
3745 l2cap_pi(sk)->retry_count = 1;
3747 del_timer(&pi->retrans_timer);
3748 __mod_monitor_timer();
3750 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3753 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3754 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3756 BT_DBG("sk %p, Exit local busy", sk);
3761 static void l2cap_busy_work(struct work_struct *work)
3763 DECLARE_WAITQUEUE(wait, current);
3764 struct l2cap_pinfo *pi =
3765 container_of(work, struct l2cap_pinfo, busy_work);
3766 struct sock *sk = (struct sock *)pi;
3767 int n_tries = 0, timeo = HZ/5, err;
3768 struct sk_buff *skb;
3772 add_wait_queue(sk_sleep(sk), &wait);
3773 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3774 set_current_state(TASK_INTERRUPTIBLE);
3776 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3778 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3785 if (signal_pending(current)) {
3786 err = sock_intr_errno(timeo);
3791 timeo = schedule_timeout(timeo);
3794 err = sock_error(sk);
3798 if (l2cap_try_push_rx_skb(sk) == 0)
3802 set_current_state(TASK_RUNNING);
3803 remove_wait_queue(sk_sleep(sk), &wait);
3808 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3810 struct l2cap_pinfo *pi = l2cap_pi(sk);
3813 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3814 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3815 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3816 return l2cap_try_push_rx_skb(sk);
3821 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3823 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3827 /* Busy Condition */
3828 BT_DBG("sk %p, Enter local busy", sk);
3830 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3831 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3832 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3834 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3835 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3836 l2cap_send_sframe(pi, sctrl);
3838 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3840 del_timer(&pi->ack_timer);
3842 queue_work(_busy_wq, &pi->busy_work);
3847 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3849 struct l2cap_pinfo *pi = l2cap_pi(sk);
3850 struct sk_buff *_skb;
3854 * TODO: We have to notify the userland if some data is lost with the
3858 switch (control & L2CAP_CTRL_SAR) {
3859 case L2CAP_SDU_UNSEGMENTED:
3860 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3865 err = sock_queue_rcv_skb(sk, skb);
3871 case L2CAP_SDU_START:
3872 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3877 pi->sdu_len = get_unaligned_le16(skb->data);
3880 if (pi->sdu_len > pi->imtu) {
3885 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3891 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3893 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3894 pi->partial_sdu_len = skb->len;
3898 case L2CAP_SDU_CONTINUE:
3899 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3902 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3904 pi->partial_sdu_len += skb->len;
3905 if (pi->partial_sdu_len > pi->sdu_len)
3913 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3916 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3918 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3919 pi->partial_sdu_len += skb->len;
3921 if (pi->partial_sdu_len > pi->imtu)
3924 if (pi->partial_sdu_len == pi->sdu_len) {
3925 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3926 err = sock_queue_rcv_skb(sk, _skb);
3941 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3943 struct sk_buff *skb;
3946 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3947 if (bt_cb(skb)->tx_seq != tx_seq)
3950 skb = skb_dequeue(SREJ_QUEUE(sk));
3951 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3952 l2cap_ertm_reassembly_sdu(sk, skb, control);
3953 l2cap_pi(sk)->buffer_seq_srej =
3954 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3955 tx_seq = (tx_seq + 1) % 64;
3959 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3961 struct l2cap_pinfo *pi = l2cap_pi(sk);
3962 struct srej_list *l, *tmp;
3965 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3966 if (l->tx_seq == tx_seq) {
3971 control = L2CAP_SUPER_SELECT_REJECT;
3972 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3973 l2cap_send_sframe(pi, control);
3975 list_add_tail(&l->list, SREJ_LIST(sk));
3979 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3981 struct l2cap_pinfo *pi = l2cap_pi(sk);
3982 struct srej_list *new;
3985 while (tx_seq != pi->expected_tx_seq) {
3986 control = L2CAP_SUPER_SELECT_REJECT;
3987 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3988 l2cap_send_sframe(pi, control);
3990 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3991 new->tx_seq = pi->expected_tx_seq;
3992 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3993 list_add_tail(&new->list, SREJ_LIST(sk));
3995 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3998 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4000 struct l2cap_pinfo *pi = l2cap_pi(sk);
4001 u8 tx_seq = __get_txseq(rx_control);
4002 u8 req_seq = __get_reqseq(rx_control);
4003 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
4004 int tx_seq_offset, expected_tx_seq_offset;
4005 int num_to_ack = (pi->tx_win/6) + 1;
4008 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
4011 if (L2CAP_CTRL_FINAL & rx_control &&
4012 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4013 del_timer(&pi->monitor_timer);
4014 if (pi->unacked_frames > 0)
4015 __mod_retrans_timer();
4016 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
4019 pi->expected_ack_seq = req_seq;
4020 l2cap_drop_acked_frames(sk);
4022 if (tx_seq == pi->expected_tx_seq)
4025 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
4026 if (tx_seq_offset < 0)
4027 tx_seq_offset += 64;
4029 /* invalid tx_seq */
4030 if (tx_seq_offset >= pi->tx_win) {
4031 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4035 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
4038 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4039 struct srej_list *first;
4041 first = list_first_entry(SREJ_LIST(sk),
4042 struct srej_list, list);
4043 if (tx_seq == first->tx_seq) {
4044 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4045 l2cap_check_srej_gap(sk, tx_seq);
4047 list_del(&first->list);
4050 if (list_empty(SREJ_LIST(sk))) {
4051 pi->buffer_seq = pi->buffer_seq_srej;
4052 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
4054 BT_DBG("sk %p, Exit SREJ_SENT", sk);
4057 struct srej_list *l;
4059 /* duplicated tx_seq */
4060 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
4063 list_for_each_entry(l, SREJ_LIST(sk), list) {
4064 if (l->tx_seq == tx_seq) {
4065 l2cap_resend_srejframe(sk, tx_seq);
4069 l2cap_send_srejframe(sk, tx_seq);
4072 expected_tx_seq_offset =
4073 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4074 if (expected_tx_seq_offset < 0)
4075 expected_tx_seq_offset += 64;
4077 /* duplicated tx_seq */
4078 if (tx_seq_offset < expected_tx_seq_offset)
4081 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4083 BT_DBG("sk %p, Enter SREJ", sk);
4085 INIT_LIST_HEAD(SREJ_LIST(sk));
4086 pi->buffer_seq_srej = pi->buffer_seq;
4088 __skb_queue_head_init(SREJ_QUEUE(sk));
4089 __skb_queue_head_init(BUSY_QUEUE(sk));
4090 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4092 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4094 l2cap_send_srejframe(sk, tx_seq);
4096 del_timer(&pi->ack_timer);
4101 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4103 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4104 bt_cb(skb)->tx_seq = tx_seq;
4105 bt_cb(skb)->sar = sar;
4106 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4110 err = l2cap_push_rx_skb(sk, skb, rx_control);
4114 if (rx_control & L2CAP_CTRL_FINAL) {
4115 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4116 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4118 l2cap_retransmit_frames(sk);
4123 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4124 if (pi->num_acked == num_to_ack - 1)
4134 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4136 struct l2cap_pinfo *pi = l2cap_pi(sk);
4138 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4141 pi->expected_ack_seq = __get_reqseq(rx_control);
4142 l2cap_drop_acked_frames(sk);
4144 if (rx_control & L2CAP_CTRL_POLL) {
4145 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4146 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4147 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4148 (pi->unacked_frames > 0))
4149 __mod_retrans_timer();
4151 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4152 l2cap_send_srejtail(sk);
4154 l2cap_send_i_or_rr_or_rnr(sk);
4157 } else if (rx_control & L2CAP_CTRL_FINAL) {
4158 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4160 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4161 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4163 l2cap_retransmit_frames(sk);
4166 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4167 (pi->unacked_frames > 0))
4168 __mod_retrans_timer();
4170 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4171 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4174 l2cap_ertm_send(sk);
4179 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4181 struct l2cap_pinfo *pi = l2cap_pi(sk);
4182 u8 tx_seq = __get_reqseq(rx_control);
4184 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4186 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4188 pi->expected_ack_seq = tx_seq;
4189 l2cap_drop_acked_frames(sk);
4191 if (rx_control & L2CAP_CTRL_FINAL) {
4192 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4193 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4195 l2cap_retransmit_frames(sk);
4197 l2cap_retransmit_frames(sk);
4199 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4200 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4203 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4205 struct l2cap_pinfo *pi = l2cap_pi(sk);
4206 u8 tx_seq = __get_reqseq(rx_control);
4208 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4210 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4212 if (rx_control & L2CAP_CTRL_POLL) {
4213 pi->expected_ack_seq = tx_seq;
4214 l2cap_drop_acked_frames(sk);
4216 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4217 l2cap_retransmit_one_frame(sk, tx_seq);
4219 l2cap_ertm_send(sk);
4221 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4222 pi->srej_save_reqseq = tx_seq;
4223 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4225 } else if (rx_control & L2CAP_CTRL_FINAL) {
4226 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4227 pi->srej_save_reqseq == tx_seq)
4228 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4230 l2cap_retransmit_one_frame(sk, tx_seq);
4232 l2cap_retransmit_one_frame(sk, tx_seq);
4233 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4234 pi->srej_save_reqseq = tx_seq;
4235 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4240 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4242 struct l2cap_pinfo *pi = l2cap_pi(sk);
4243 u8 tx_seq = __get_reqseq(rx_control);
4245 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4247 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4248 pi->expected_ack_seq = tx_seq;
4249 l2cap_drop_acked_frames(sk);
4251 if (rx_control & L2CAP_CTRL_POLL)
4252 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4254 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4255 del_timer(&pi->retrans_timer);
4256 if (rx_control & L2CAP_CTRL_POLL)
4257 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4261 if (rx_control & L2CAP_CTRL_POLL)
4262 l2cap_send_srejtail(sk);
4264 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4267 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4269 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4271 if (L2CAP_CTRL_FINAL & rx_control &&
4272 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4273 del_timer(&l2cap_pi(sk)->monitor_timer);
4274 if (l2cap_pi(sk)->unacked_frames > 0)
4275 __mod_retrans_timer();
4276 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4279 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4280 case L2CAP_SUPER_RCV_READY:
4281 l2cap_data_channel_rrframe(sk, rx_control);
4284 case L2CAP_SUPER_REJECT:
4285 l2cap_data_channel_rejframe(sk, rx_control);
4288 case L2CAP_SUPER_SELECT_REJECT:
4289 l2cap_data_channel_srejframe(sk, rx_control);
4292 case L2CAP_SUPER_RCV_NOT_READY:
4293 l2cap_data_channel_rnrframe(sk, rx_control);
4301 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4303 struct l2cap_pinfo *pi = l2cap_pi(sk);
4306 int len, next_tx_seq_offset, req_seq_offset;
4308 control = get_unaligned_le16(skb->data);
4313 * We can just drop the corrupted I-frame here.
4314 * Receiver will miss it and start proper recovery
4315 * procedures and ask retransmission.
4317 if (l2cap_check_fcs(pi, skb))
4320 if (__is_sar_start(control) && __is_iframe(control))
4323 if (pi->fcs == L2CAP_FCS_CRC16)
4326 if (len > pi->mps) {
4327 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4331 req_seq = __get_reqseq(control);
4332 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4333 if (req_seq_offset < 0)
4334 req_seq_offset += 64;
4336 next_tx_seq_offset =
4337 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4338 if (next_tx_seq_offset < 0)
4339 next_tx_seq_offset += 64;
4341 /* check for invalid req-seq */
4342 if (req_seq_offset > next_tx_seq_offset) {
4343 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4347 if (__is_iframe(control)) {
4349 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4353 l2cap_data_channel_iframe(sk, control, skb);
4357 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4361 l2cap_data_channel_sframe(sk, control, skb);
4371 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4374 struct l2cap_pinfo *pi;
4379 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4381 BT_DBG("unknown cid 0x%4.4x", cid);
4387 BT_DBG("sk %p, len %d", sk, skb->len);
4389 if (sk->sk_state != BT_CONNECTED)
4393 case L2CAP_MODE_BASIC:
4394 /* If socket recv buffers overflows we drop data here
4395 * which is *bad* because L2CAP has to be reliable.
4396 * But we don't have any other choice. L2CAP doesn't
4397 * provide flow control mechanism. */
4399 if (pi->imtu < skb->len)
4402 if (!sock_queue_rcv_skb(sk, skb))
4406 case L2CAP_MODE_ERTM:
4407 if (!sock_owned_by_user(sk)) {
4408 l2cap_ertm_data_rcv(sk, skb);
4410 if (sk_add_backlog(sk, skb))
4416 case L2CAP_MODE_STREAMING:
4417 control = get_unaligned_le16(skb->data);
4421 if (l2cap_check_fcs(pi, skb))
4424 if (__is_sar_start(control))
4427 if (pi->fcs == L2CAP_FCS_CRC16)
4430 if (len > pi->mps || len < 0 || __is_sframe(control))
4433 tx_seq = __get_txseq(control);
4435 if (pi->expected_tx_seq == tx_seq)
4436 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4438 pi->expected_tx_seq = (tx_seq + 1) % 64;
4440 l2cap_streaming_reassembly_sdu(sk, skb, control);
4445 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4459 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4463 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4467 BT_DBG("sk %p, len %d", sk, skb->len);
4469 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4472 if (l2cap_pi(sk)->imtu < skb->len)
4475 if (!sock_queue_rcv_skb(sk, skb))
4487 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4489 struct l2cap_hdr *lh = (void *) skb->data;
4493 skb_pull(skb, L2CAP_HDR_SIZE);
4494 cid = __le16_to_cpu(lh->cid);
4495 len = __le16_to_cpu(lh->len);
4497 if (len != skb->len) {
4502 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4505 case L2CAP_CID_SIGNALING:
4506 l2cap_sig_channel(conn, skb);
4509 case L2CAP_CID_CONN_LESS:
4510 psm = get_unaligned_le16(skb->data);
4512 l2cap_conless_channel(conn, psm, skb);
4516 l2cap_data_channel(conn, cid, skb);
4521 /* ---- L2CAP interface with lower layer (HCI) ---- */
4523 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4525 int exact = 0, lm1 = 0, lm2 = 0;
4526 register struct sock *sk;
4527 struct hlist_node *node;
4529 if (type != ACL_LINK)
4532 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4534 /* Find listening sockets and check their link_mode */
4535 read_lock(&l2cap_sk_list.lock);
4536 sk_for_each(sk, node, &l2cap_sk_list.head) {
4537 if (sk->sk_state != BT_LISTEN)
4540 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4541 lm1 |= HCI_LM_ACCEPT;
4542 if (l2cap_pi(sk)->role_switch)
4543 lm1 |= HCI_LM_MASTER;
4545 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4546 lm2 |= HCI_LM_ACCEPT;
4547 if (l2cap_pi(sk)->role_switch)
4548 lm2 |= HCI_LM_MASTER;
4551 read_unlock(&l2cap_sk_list.lock);
4553 return exact ? lm1 : lm2;
4556 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4558 struct l2cap_conn *conn;
4560 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4562 if (hcon->type != ACL_LINK)
4566 conn = l2cap_conn_add(hcon, status);
4568 l2cap_conn_ready(conn);
4570 l2cap_conn_del(hcon, bt_err(status));
4575 static int l2cap_disconn_ind(struct hci_conn *hcon)
4577 struct l2cap_conn *conn = hcon->l2cap_data;
4579 BT_DBG("hcon %p", hcon);
4581 if (hcon->type != ACL_LINK || !conn)
4584 return conn->disc_reason;
4587 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4589 BT_DBG("hcon %p reason %d", hcon, reason);
4591 if (hcon->type != ACL_LINK)
4594 l2cap_conn_del(hcon, bt_err(reason));
4599 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4601 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4604 if (encrypt == 0x00) {
4605 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4606 l2cap_sock_clear_timer(sk);
4607 l2cap_sock_set_timer(sk, HZ * 5);
4608 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4609 __l2cap_sock_close(sk, ECONNREFUSED);
4611 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4612 l2cap_sock_clear_timer(sk);
4616 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4618 struct l2cap_chan_list *l;
4619 struct l2cap_conn *conn = hcon->l2cap_data;
4625 l = &conn->chan_list;
4627 BT_DBG("conn %p", conn);
4629 read_lock(&l->lock);
4631 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4634 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4639 if (!status && (sk->sk_state == BT_CONNECTED ||
4640 sk->sk_state == BT_CONFIG)) {
4641 l2cap_check_encryption(sk, encrypt);
4646 if (sk->sk_state == BT_CONNECT) {
4648 struct l2cap_conn_req req;
4649 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4650 req.psm = l2cap_pi(sk)->psm;
4652 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4653 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4655 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4656 L2CAP_CONN_REQ, sizeof(req), &req);
4658 l2cap_sock_clear_timer(sk);
4659 l2cap_sock_set_timer(sk, HZ / 10);
4661 } else if (sk->sk_state == BT_CONNECT2) {
4662 struct l2cap_conn_rsp rsp;
4666 sk->sk_state = BT_CONFIG;
4667 result = L2CAP_CR_SUCCESS;
4669 sk->sk_state = BT_DISCONN;
4670 l2cap_sock_set_timer(sk, HZ / 10);
4671 result = L2CAP_CR_SEC_BLOCK;
4674 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4675 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4676 rsp.result = cpu_to_le16(result);
4677 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4678 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4679 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4685 read_unlock(&l->lock);
4690 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4692 struct l2cap_conn *conn = hcon->l2cap_data;
4694 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4697 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4699 if (flags & ACL_START) {
4700 struct l2cap_hdr *hdr;
4706 BT_ERR("Unexpected start frame (len %d)", skb->len);
4707 kfree_skb(conn->rx_skb);
4708 conn->rx_skb = NULL;
4710 l2cap_conn_unreliable(conn, ECOMM);
4713 /* Start fragment always begin with Basic L2CAP header */
4714 if (skb->len < L2CAP_HDR_SIZE) {
4715 BT_ERR("Frame is too short (len %d)", skb->len);
4716 l2cap_conn_unreliable(conn, ECOMM);
4720 hdr = (struct l2cap_hdr *) skb->data;
4721 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4722 cid = __le16_to_cpu(hdr->cid);
4724 if (len == skb->len) {
4725 /* Complete frame received */
4726 l2cap_recv_frame(conn, skb);
4730 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4732 if (skb->len > len) {
4733 BT_ERR("Frame is too long (len %d, expected len %d)",
4735 l2cap_conn_unreliable(conn, ECOMM);
4739 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4741 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
4742 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
4743 len, l2cap_pi(sk)->imtu);
4745 l2cap_conn_unreliable(conn, ECOMM);
4752 /* Allocate skb for the complete frame (with header) */
4753 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4757 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4759 conn->rx_len = len - skb->len;
4761 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4763 if (!conn->rx_len) {
4764 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4765 l2cap_conn_unreliable(conn, ECOMM);
4769 if (skb->len > conn->rx_len) {
4770 BT_ERR("Fragment is too long (len %d, expected %d)",
4771 skb->len, conn->rx_len);
4772 kfree_skb(conn->rx_skb);
4773 conn->rx_skb = NULL;
4775 l2cap_conn_unreliable(conn, ECOMM);
4779 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4781 conn->rx_len -= skb->len;
4783 if (!conn->rx_len) {
4784 /* Complete frame received */
4785 l2cap_recv_frame(conn, conn->rx_skb);
4786 conn->rx_skb = NULL;
4795 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4798 struct hlist_node *node;
4800 read_lock_bh(&l2cap_sk_list.lock);
4802 sk_for_each(sk, node, &l2cap_sk_list.head) {
4803 struct l2cap_pinfo *pi = l2cap_pi(sk);
4805 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4806 batostr(&bt_sk(sk)->src),
4807 batostr(&bt_sk(sk)->dst),
4808 sk->sk_state, __le16_to_cpu(pi->psm),
4810 pi->imtu, pi->omtu, pi->sec_level);
4813 read_unlock_bh(&l2cap_sk_list.lock);
4818 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4820 return single_open(file, l2cap_debugfs_show, inode->i_private);
4823 static const struct file_operations l2cap_debugfs_fops = {
4824 .open = l2cap_debugfs_open,
4826 .llseek = seq_lseek,
4827 .release = single_release,
4830 static struct dentry *l2cap_debugfs;
4832 static const struct proto_ops l2cap_sock_ops = {
4833 .family = PF_BLUETOOTH,
4834 .owner = THIS_MODULE,
4835 .release = l2cap_sock_release,
4836 .bind = l2cap_sock_bind,
4837 .connect = l2cap_sock_connect,
4838 .listen = l2cap_sock_listen,
4839 .accept = l2cap_sock_accept,
4840 .getname = l2cap_sock_getname,
4841 .sendmsg = l2cap_sock_sendmsg,
4842 .recvmsg = l2cap_sock_recvmsg,
4843 .poll = bt_sock_poll,
4844 .ioctl = bt_sock_ioctl,
4845 .mmap = sock_no_mmap,
4846 .socketpair = sock_no_socketpair,
4847 .shutdown = l2cap_sock_shutdown,
4848 .setsockopt = l2cap_sock_setsockopt,
4849 .getsockopt = l2cap_sock_getsockopt
4852 static const struct net_proto_family l2cap_sock_family_ops = {
4853 .family = PF_BLUETOOTH,
4854 .owner = THIS_MODULE,
4855 .create = l2cap_sock_create,
4858 static struct hci_proto l2cap_hci_proto = {
4860 .id = HCI_PROTO_L2CAP,
4861 .connect_ind = l2cap_connect_ind,
4862 .connect_cfm = l2cap_connect_cfm,
4863 .disconn_ind = l2cap_disconn_ind,
4864 .disconn_cfm = l2cap_disconn_cfm,
4865 .security_cfm = l2cap_security_cfm,
4866 .recv_acldata = l2cap_recv_acldata
4869 static int __init l2cap_init(void)
4873 err = proto_register(&l2cap_proto, 0);
4877 _busy_wq = create_singlethread_workqueue("l2cap");
4881 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4883 BT_ERR("L2CAP socket registration failed");
4887 err = hci_register_proto(&l2cap_hci_proto);
4889 BT_ERR("L2CAP protocol registration failed");
4890 bt_sock_unregister(BTPROTO_L2CAP);
4895 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4896 bt_debugfs, NULL, &l2cap_debugfs_fops);
4898 BT_ERR("Failed to create L2CAP debug file");
4901 BT_INFO("L2CAP ver %s", VERSION);
4902 BT_INFO("L2CAP socket layer initialized");
4907 proto_unregister(&l2cap_proto);
4911 static void __exit l2cap_exit(void)
4913 debugfs_remove(l2cap_debugfs);
4915 flush_workqueue(_busy_wq);
4916 destroy_workqueue(_busy_wq);
4918 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4919 BT_ERR("L2CAP socket unregistration failed");
4921 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4922 BT_ERR("L2CAP protocol unregistration failed");
4924 proto_unregister(&l2cap_proto);
4927 void l2cap_load(void)
4929 /* Dummy function to trigger automatic L2CAP module loading by
4930 * other modules that use L2CAP sockets but don't use any other
4931 * symbols from it. */
4933 EXPORT_SYMBOL(l2cap_load);
4935 module_init(l2cap_init);
4936 module_exit(l2cap_exit);
4938 module_param(disable_ertm, bool, 0644);
4939 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4941 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4942 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4943 MODULE_VERSION(VERSION);
4944 MODULE_LICENSE("GPL");
4945 MODULE_ALIAS("bt-proto-0");