2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2009-2010 Gustavo F. Padovan <gustavo@padovan.org>
5 Copyright (C) 2010 Google Inc.
7 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
9 This program is free software; you can redistribute it and/or modify
10 it under the terms of the GNU General Public License version 2 as
11 published by the Free Software Foundation;
13 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
14 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
15 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
16 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
17 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
18 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
19 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
20 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
22 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
23 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
24 SOFTWARE IS DISCLAIMED.
27 /* Bluetooth L2CAP core and sockets. */
29 #include <linux/module.h>
31 #include <linux/types.h>
32 #include <linux/capability.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/interrupt.h>
41 #include <linux/socket.h>
42 #include <linux/skbuff.h>
43 #include <linux/list.h>
44 #include <linux/device.h>
45 #include <linux/debugfs.h>
46 #include <linux/seq_file.h>
47 #include <linux/uaccess.h>
48 #include <linux/crc16.h>
51 #include <asm/system.h>
52 #include <asm/unaligned.h>
54 #include <net/bluetooth/bluetooth.h>
55 #include <net/bluetooth/hci_core.h>
56 #include <net/bluetooth/l2cap.h>
58 #define VERSION "2.15"
60 static int disable_ertm;
62 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
63 static u8 l2cap_fixed_chan[8] = { 0x02, };
65 static const struct proto_ops l2cap_sock_ops;
67 static struct workqueue_struct *_busy_wq;
69 static struct bt_sock_list l2cap_sk_list = {
70 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
73 static void l2cap_busy_work(struct work_struct *work);
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
79 static int l2cap_build_conf_req(struct sock *sk, void *data);
80 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
81 u8 code, u8 ident, u16 dlen, void *data);
83 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
85 /* ---- L2CAP timers ---- */
86 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
88 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
89 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
92 static void l2cap_sock_clear_timer(struct sock *sk)
94 BT_DBG("sock %p state %d", sk, sk->sk_state);
95 sk_stop_timer(sk, &sk->sk_timer);
98 static void l2cap_sock_timeout(unsigned long arg)
100 struct sock *sk = (struct sock *) arg;
103 BT_DBG("sock %p state %d", sk, sk->sk_state);
107 if (sock_owned_by_user(sk)) {
108 /* sk is owned by user. Try again later */
109 l2cap_sock_set_timer(sk, HZ / 5);
115 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
116 reason = ECONNREFUSED;
117 else if (sk->sk_state == BT_CONNECT &&
118 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
119 reason = ECONNREFUSED;
123 __l2cap_sock_close(sk, reason);
131 /* ---- L2CAP channels ---- */
132 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
135 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
136 if (l2cap_pi(s)->dcid == cid)
142 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
145 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
146 if (l2cap_pi(s)->scid == cid)
152 /* Find channel with given SCID.
153 * Returns locked socket */
154 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
158 s = __l2cap_get_chan_by_scid(l, cid);
161 read_unlock(&l->lock);
165 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
168 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
169 if (l2cap_pi(s)->ident == ident)
175 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
179 s = __l2cap_get_chan_by_ident(l, ident);
182 read_unlock(&l->lock);
186 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
188 u16 cid = L2CAP_CID_DYN_START;
190 for (; cid < L2CAP_CID_DYN_END; cid++) {
191 if (!__l2cap_get_chan_by_scid(l, cid))
198 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
203 l2cap_pi(l->head)->prev_c = sk;
205 l2cap_pi(sk)->next_c = l->head;
206 l2cap_pi(sk)->prev_c = NULL;
210 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
212 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
214 write_lock_bh(&l->lock);
219 l2cap_pi(next)->prev_c = prev;
221 l2cap_pi(prev)->next_c = next;
222 write_unlock_bh(&l->lock);
227 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
229 struct l2cap_chan_list *l = &conn->chan_list;
231 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
232 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
234 conn->disc_reason = 0x13;
236 l2cap_pi(sk)->conn = conn;
238 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
239 /* Alloc CID for connection-oriented socket */
240 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
241 } else if (sk->sk_type == SOCK_DGRAM) {
242 /* Connectionless socket */
243 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
244 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
245 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
247 /* Raw socket can send/recv signalling messages only */
248 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
249 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
250 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
253 __l2cap_chan_link(l, sk);
256 bt_accept_enqueue(parent, sk);
260 * Must be called on the locked socket. */
261 static void l2cap_chan_del(struct sock *sk, int err)
263 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
264 struct sock *parent = bt_sk(sk)->parent;
266 l2cap_sock_clear_timer(sk);
268 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
271 /* Unlink from channel list */
272 l2cap_chan_unlink(&conn->chan_list, sk);
273 l2cap_pi(sk)->conn = NULL;
274 hci_conn_put(conn->hcon);
277 sk->sk_state = BT_CLOSED;
278 sock_set_flag(sk, SOCK_ZAPPED);
284 bt_accept_unlink(sk);
285 parent->sk_data_ready(parent, 0);
287 sk->sk_state_change(sk);
289 skb_queue_purge(TX_QUEUE(sk));
291 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
292 struct srej_list *l, *tmp;
294 del_timer(&l2cap_pi(sk)->retrans_timer);
295 del_timer(&l2cap_pi(sk)->monitor_timer);
296 del_timer(&l2cap_pi(sk)->ack_timer);
298 skb_queue_purge(SREJ_QUEUE(sk));
299 skb_queue_purge(BUSY_QUEUE(sk));
301 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
308 static inline u8 l2cap_get_auth_type(struct sock *sk)
310 if (sk->sk_type == SOCK_RAW) {
311 switch (l2cap_pi(sk)->sec_level) {
312 case BT_SECURITY_HIGH:
313 return HCI_AT_DEDICATED_BONDING_MITM;
314 case BT_SECURITY_MEDIUM:
315 return HCI_AT_DEDICATED_BONDING;
317 return HCI_AT_NO_BONDING;
319 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
320 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
321 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
323 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
324 return HCI_AT_NO_BONDING_MITM;
326 return HCI_AT_NO_BONDING;
328 switch (l2cap_pi(sk)->sec_level) {
329 case BT_SECURITY_HIGH:
330 return HCI_AT_GENERAL_BONDING_MITM;
331 case BT_SECURITY_MEDIUM:
332 return HCI_AT_GENERAL_BONDING;
334 return HCI_AT_NO_BONDING;
339 /* Service level security */
340 static inline int l2cap_check_security(struct sock *sk)
342 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
345 auth_type = l2cap_get_auth_type(sk);
347 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
351 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
355 /* Get next available identificator.
356 * 1 - 128 are used by kernel.
357 * 129 - 199 are reserved.
358 * 200 - 254 are used by utilities like l2ping, etc.
361 spin_lock_bh(&conn->lock);
363 if (++conn->tx_ident > 128)
368 spin_unlock_bh(&conn->lock);
373 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
375 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
377 BT_DBG("code 0x%2.2x", code);
382 hci_send_acl(conn->hcon, skb, 0);
385 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
388 struct l2cap_hdr *lh;
389 struct l2cap_conn *conn = pi->conn;
390 struct sock *sk = (struct sock *)pi;
391 int count, hlen = L2CAP_HDR_SIZE + 2;
393 if (sk->sk_state != BT_CONNECTED)
396 if (pi->fcs == L2CAP_FCS_CRC16)
399 BT_DBG("pi %p, control 0x%2.2x", pi, control);
401 count = min_t(unsigned int, conn->mtu, hlen);
402 control |= L2CAP_CTRL_FRAME_TYPE;
404 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
405 control |= L2CAP_CTRL_FINAL;
406 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
409 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
410 control |= L2CAP_CTRL_POLL;
411 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
414 skb = bt_skb_alloc(count, GFP_ATOMIC);
418 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
419 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
420 lh->cid = cpu_to_le16(pi->dcid);
421 put_unaligned_le16(control, skb_put(skb, 2));
423 if (pi->fcs == L2CAP_FCS_CRC16) {
424 u16 fcs = crc16(0, (u8 *)lh, count - 2);
425 put_unaligned_le16(fcs, skb_put(skb, 2));
428 hci_send_acl(pi->conn->hcon, skb, 0);
431 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
433 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
434 control |= L2CAP_SUPER_RCV_NOT_READY;
435 pi->conn_state |= L2CAP_CONN_RNR_SENT;
437 control |= L2CAP_SUPER_RCV_READY;
439 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
441 l2cap_send_sframe(pi, control);
444 static inline int __l2cap_no_conn_pending(struct sock *sk)
446 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
449 static void l2cap_do_start(struct sock *sk)
451 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
453 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
454 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
457 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
458 struct l2cap_conn_req req;
459 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
460 req.psm = l2cap_pi(sk)->psm;
462 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
463 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
465 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
466 L2CAP_CONN_REQ, sizeof(req), &req);
469 struct l2cap_info_req req;
470 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
472 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
473 conn->info_ident = l2cap_get_ident(conn);
475 mod_timer(&conn->info_timer, jiffies +
476 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
478 l2cap_send_cmd(conn, conn->info_ident,
479 L2CAP_INFO_REQ, sizeof(req), &req);
483 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
485 u32 local_feat_mask = l2cap_feat_mask;
487 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
490 case L2CAP_MODE_ERTM:
491 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
492 case L2CAP_MODE_STREAMING:
493 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
499 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
501 struct l2cap_disconn_req req;
506 skb_queue_purge(TX_QUEUE(sk));
508 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
509 del_timer(&l2cap_pi(sk)->retrans_timer);
510 del_timer(&l2cap_pi(sk)->monitor_timer);
511 del_timer(&l2cap_pi(sk)->ack_timer);
514 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
515 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
516 l2cap_send_cmd(conn, l2cap_get_ident(conn),
517 L2CAP_DISCONN_REQ, sizeof(req), &req);
519 sk->sk_state = BT_DISCONN;
523 /* ---- L2CAP connections ---- */
524 static void l2cap_conn_start(struct l2cap_conn *conn)
526 struct l2cap_chan_list *l = &conn->chan_list;
527 struct sock_del_list del, *tmp1, *tmp2;
530 BT_DBG("conn %p", conn);
532 INIT_LIST_HEAD(&del.list);
536 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
539 if (sk->sk_type != SOCK_SEQPACKET &&
540 sk->sk_type != SOCK_STREAM) {
545 if (sk->sk_state == BT_CONNECT) {
546 struct l2cap_conn_req req;
548 if (!l2cap_check_security(sk) ||
549 !__l2cap_no_conn_pending(sk)) {
554 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
556 && l2cap_pi(sk)->conf_state &
557 L2CAP_CONF_STATE2_DEVICE) {
558 tmp1 = kzalloc(sizeof(struct sock_del_list),
561 list_add_tail(&tmp1->list, &del.list);
566 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
567 req.psm = l2cap_pi(sk)->psm;
569 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
570 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
572 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
573 L2CAP_CONN_REQ, sizeof(req), &req);
575 } else if (sk->sk_state == BT_CONNECT2) {
576 struct l2cap_conn_rsp rsp;
578 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
579 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
581 if (l2cap_check_security(sk)) {
582 if (bt_sk(sk)->defer_setup) {
583 struct sock *parent = bt_sk(sk)->parent;
584 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
585 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
586 parent->sk_data_ready(parent, 0);
589 sk->sk_state = BT_CONFIG;
590 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
591 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
594 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
595 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
598 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
599 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
601 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
602 rsp.result != L2CAP_CR_SUCCESS) {
607 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
608 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
609 l2cap_build_conf_req(sk, buf), buf);
610 l2cap_pi(sk)->num_conf_req++;
616 read_unlock(&l->lock);
618 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
619 bh_lock_sock(tmp1->sk);
620 __l2cap_sock_close(tmp1->sk, ECONNRESET);
621 bh_unlock_sock(tmp1->sk);
622 list_del(&tmp1->list);
627 static void l2cap_conn_ready(struct l2cap_conn *conn)
629 struct l2cap_chan_list *l = &conn->chan_list;
632 BT_DBG("conn %p", conn);
636 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
639 if (sk->sk_type != SOCK_SEQPACKET &&
640 sk->sk_type != SOCK_STREAM) {
641 l2cap_sock_clear_timer(sk);
642 sk->sk_state = BT_CONNECTED;
643 sk->sk_state_change(sk);
644 } else if (sk->sk_state == BT_CONNECT)
650 read_unlock(&l->lock);
653 /* Notify sockets that we cannot guaranty reliability anymore */
654 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
656 struct l2cap_chan_list *l = &conn->chan_list;
659 BT_DBG("conn %p", conn);
663 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
664 if (l2cap_pi(sk)->force_reliable)
668 read_unlock(&l->lock);
671 static void l2cap_info_timeout(unsigned long arg)
673 struct l2cap_conn *conn = (void *) arg;
675 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
676 conn->info_ident = 0;
678 l2cap_conn_start(conn);
681 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
683 struct l2cap_conn *conn = hcon->l2cap_data;
688 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
692 hcon->l2cap_data = conn;
695 BT_DBG("hcon %p conn %p", hcon, conn);
697 conn->mtu = hcon->hdev->acl_mtu;
698 conn->src = &hcon->hdev->bdaddr;
699 conn->dst = &hcon->dst;
703 spin_lock_init(&conn->lock);
704 rwlock_init(&conn->chan_list.lock);
706 setup_timer(&conn->info_timer, l2cap_info_timeout,
707 (unsigned long) conn);
709 conn->disc_reason = 0x13;
714 static void l2cap_conn_del(struct hci_conn *hcon, int err)
716 struct l2cap_conn *conn = hcon->l2cap_data;
722 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
724 kfree_skb(conn->rx_skb);
727 while ((sk = conn->chan_list.head)) {
729 l2cap_chan_del(sk, err);
734 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
735 del_timer_sync(&conn->info_timer);
737 hcon->l2cap_data = NULL;
741 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
743 struct l2cap_chan_list *l = &conn->chan_list;
744 write_lock_bh(&l->lock);
745 __l2cap_chan_add(conn, sk, parent);
746 write_unlock_bh(&l->lock);
749 /* ---- Socket interface ---- */
750 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
753 struct hlist_node *node;
754 sk_for_each(sk, node, &l2cap_sk_list.head)
755 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
762 /* Find socket with psm and source bdaddr.
763 * Returns closest match.
765 static struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
767 struct sock *sk = NULL, *sk1 = NULL;
768 struct hlist_node *node;
770 read_lock(&l2cap_sk_list.lock);
772 sk_for_each(sk, node, &l2cap_sk_list.head) {
773 if (state && sk->sk_state != state)
776 if (l2cap_pi(sk)->psm == psm) {
778 if (!bacmp(&bt_sk(sk)->src, src))
782 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
787 read_unlock(&l2cap_sk_list.lock);
789 return node ? sk : sk1;
792 static void l2cap_sock_destruct(struct sock *sk)
796 skb_queue_purge(&sk->sk_receive_queue);
797 skb_queue_purge(&sk->sk_write_queue);
800 static void l2cap_sock_cleanup_listen(struct sock *parent)
804 BT_DBG("parent %p", parent);
806 /* Close not yet accepted channels */
807 while ((sk = bt_accept_dequeue(parent, NULL)))
808 l2cap_sock_close(sk);
810 parent->sk_state = BT_CLOSED;
811 sock_set_flag(parent, SOCK_ZAPPED);
814 /* Kill socket (only if zapped and orphan)
815 * Must be called on unlocked socket.
817 static void l2cap_sock_kill(struct sock *sk)
819 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
822 BT_DBG("sk %p state %d", sk, sk->sk_state);
824 /* Kill poor orphan */
825 bt_sock_unlink(&l2cap_sk_list, sk);
826 sock_set_flag(sk, SOCK_DEAD);
830 static void __l2cap_sock_close(struct sock *sk, int reason)
832 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
834 switch (sk->sk_state) {
836 l2cap_sock_cleanup_listen(sk);
841 if (sk->sk_type == SOCK_SEQPACKET ||
842 sk->sk_type == SOCK_STREAM) {
843 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
845 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
846 l2cap_send_disconn_req(conn, sk, reason);
848 l2cap_chan_del(sk, reason);
852 if (sk->sk_type == SOCK_SEQPACKET ||
853 sk->sk_type == SOCK_STREAM) {
854 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
855 struct l2cap_conn_rsp rsp;
858 if (bt_sk(sk)->defer_setup)
859 result = L2CAP_CR_SEC_BLOCK;
861 result = L2CAP_CR_BAD_PSM;
862 sk->sk_state = BT_DISCONN;
864 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
865 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
866 rsp.result = cpu_to_le16(result);
867 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
868 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
869 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
871 l2cap_chan_del(sk, reason);
876 l2cap_chan_del(sk, reason);
880 sock_set_flag(sk, SOCK_ZAPPED);
885 /* Must be called on unlocked socket. */
886 static void l2cap_sock_close(struct sock *sk)
888 l2cap_sock_clear_timer(sk);
890 __l2cap_sock_close(sk, ECONNRESET);
895 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
897 struct l2cap_pinfo *pi = l2cap_pi(sk);
902 sk->sk_type = parent->sk_type;
903 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
905 pi->imtu = l2cap_pi(parent)->imtu;
906 pi->omtu = l2cap_pi(parent)->omtu;
907 pi->conf_state = l2cap_pi(parent)->conf_state;
908 pi->mode = l2cap_pi(parent)->mode;
909 pi->fcs = l2cap_pi(parent)->fcs;
910 pi->max_tx = l2cap_pi(parent)->max_tx;
911 pi->tx_win = l2cap_pi(parent)->tx_win;
912 pi->sec_level = l2cap_pi(parent)->sec_level;
913 pi->role_switch = l2cap_pi(parent)->role_switch;
914 pi->force_reliable = l2cap_pi(parent)->force_reliable;
916 pi->imtu = L2CAP_DEFAULT_MTU;
918 if (!disable_ertm && sk->sk_type == SOCK_STREAM) {
919 pi->mode = L2CAP_MODE_ERTM;
920 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
922 pi->mode = L2CAP_MODE_BASIC;
924 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
925 pi->fcs = L2CAP_FCS_CRC16;
926 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
927 pi->sec_level = BT_SECURITY_LOW;
929 pi->force_reliable = 0;
932 /* Default config options */
934 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
935 skb_queue_head_init(TX_QUEUE(sk));
936 skb_queue_head_init(SREJ_QUEUE(sk));
937 skb_queue_head_init(BUSY_QUEUE(sk));
938 INIT_LIST_HEAD(SREJ_LIST(sk));
941 static struct proto l2cap_proto = {
943 .owner = THIS_MODULE,
944 .obj_size = sizeof(struct l2cap_pinfo)
947 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
951 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
955 sock_init_data(sock, sk);
956 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
958 sk->sk_destruct = l2cap_sock_destruct;
959 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
961 sock_reset_flag(sk, SOCK_ZAPPED);
963 sk->sk_protocol = proto;
964 sk->sk_state = BT_OPEN;
966 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
968 bt_sock_link(&l2cap_sk_list, sk);
972 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
977 BT_DBG("sock %p", sock);
979 sock->state = SS_UNCONNECTED;
981 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
982 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
983 return -ESOCKTNOSUPPORT;
985 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
988 sock->ops = &l2cap_sock_ops;
990 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
994 l2cap_sock_init(sk, NULL);
998 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
1000 struct sock *sk = sock->sk;
1001 struct sockaddr_l2 la;
1004 BT_DBG("sk %p", sk);
1006 if (!addr || addr->sa_family != AF_BLUETOOTH)
1009 memset(&la, 0, sizeof(la));
1010 len = min_t(unsigned int, sizeof(la), alen);
1011 memcpy(&la, addr, len);
1018 if (sk->sk_state != BT_OPEN) {
1024 __u16 psm = __le16_to_cpu(la.l2_psm);
1026 /* PSM must be odd and lsb of upper byte must be 0 */
1027 if ((psm & 0x0101) != 0x0001) {
1032 /* Restrict usage of well-known PSMs */
1033 if (psm < 0x1001 && !capable(CAP_NET_BIND_SERVICE)) {
1039 write_lock_bh(&l2cap_sk_list.lock);
1041 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1044 /* Save source address */
1045 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1046 l2cap_pi(sk)->psm = la.l2_psm;
1047 l2cap_pi(sk)->sport = la.l2_psm;
1048 sk->sk_state = BT_BOUND;
1050 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1051 __le16_to_cpu(la.l2_psm) == 0x0003)
1052 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1055 write_unlock_bh(&l2cap_sk_list.lock);
1062 static int l2cap_do_connect(struct sock *sk)
1064 bdaddr_t *src = &bt_sk(sk)->src;
1065 bdaddr_t *dst = &bt_sk(sk)->dst;
1066 struct l2cap_conn *conn;
1067 struct hci_conn *hcon;
1068 struct hci_dev *hdev;
1072 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1075 hdev = hci_get_route(dst, src);
1077 return -EHOSTUNREACH;
1079 hci_dev_lock_bh(hdev);
1083 auth_type = l2cap_get_auth_type(sk);
1085 hcon = hci_connect(hdev, ACL_LINK, dst,
1086 l2cap_pi(sk)->sec_level, auth_type);
1090 conn = l2cap_conn_add(hcon, 0);
1098 /* Update source addr of the socket */
1099 bacpy(src, conn->src);
1101 l2cap_chan_add(conn, sk, NULL);
1103 sk->sk_state = BT_CONNECT;
1104 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1106 if (hcon->state == BT_CONNECTED) {
1107 if (sk->sk_type != SOCK_SEQPACKET &&
1108 sk->sk_type != SOCK_STREAM) {
1109 l2cap_sock_clear_timer(sk);
1110 if (l2cap_check_security(sk))
1111 sk->sk_state = BT_CONNECTED;
1117 hci_dev_unlock_bh(hdev);
1122 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1124 struct sock *sk = sock->sk;
1125 struct sockaddr_l2 la;
1128 BT_DBG("sk %p", sk);
1130 if (!addr || alen < sizeof(addr->sa_family) ||
1131 addr->sa_family != AF_BLUETOOTH)
1134 memset(&la, 0, sizeof(la));
1135 len = min_t(unsigned int, sizeof(la), alen);
1136 memcpy(&la, addr, len);
1143 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1149 switch (l2cap_pi(sk)->mode) {
1150 case L2CAP_MODE_BASIC:
1152 case L2CAP_MODE_ERTM:
1153 case L2CAP_MODE_STREAMING:
1162 switch (sk->sk_state) {
1166 /* Already connecting */
1170 /* Already connected */
1184 /* PSM must be odd and lsb of upper byte must be 0 */
1185 if ((__le16_to_cpu(la.l2_psm) & 0x0101) != 0x0001 &&
1186 sk->sk_type != SOCK_RAW) {
1191 /* Set destination address and psm */
1192 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1193 l2cap_pi(sk)->psm = la.l2_psm;
1195 err = l2cap_do_connect(sk);
1200 err = bt_sock_wait_state(sk, BT_CONNECTED,
1201 sock_sndtimeo(sk, flags & O_NONBLOCK));
1207 static int l2cap_sock_listen(struct socket *sock, int backlog)
1209 struct sock *sk = sock->sk;
1212 BT_DBG("sk %p backlog %d", sk, backlog);
1216 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1217 || sk->sk_state != BT_BOUND) {
1222 switch (l2cap_pi(sk)->mode) {
1223 case L2CAP_MODE_BASIC:
1225 case L2CAP_MODE_ERTM:
1226 case L2CAP_MODE_STREAMING:
1235 if (!l2cap_pi(sk)->psm) {
1236 bdaddr_t *src = &bt_sk(sk)->src;
1241 write_lock_bh(&l2cap_sk_list.lock);
1243 for (psm = 0x1001; psm < 0x1100; psm += 2)
1244 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1245 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1246 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1251 write_unlock_bh(&l2cap_sk_list.lock);
1257 sk->sk_max_ack_backlog = backlog;
1258 sk->sk_ack_backlog = 0;
1259 sk->sk_state = BT_LISTEN;
1266 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1268 DECLARE_WAITQUEUE(wait, current);
1269 struct sock *sk = sock->sk, *nsk;
1273 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1275 if (sk->sk_state != BT_LISTEN) {
1280 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1282 BT_DBG("sk %p timeo %ld", sk, timeo);
1284 /* Wait for an incoming connection. (wake-one). */
1285 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1286 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1287 set_current_state(TASK_INTERRUPTIBLE);
1294 timeo = schedule_timeout(timeo);
1295 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1297 if (sk->sk_state != BT_LISTEN) {
1302 if (signal_pending(current)) {
1303 err = sock_intr_errno(timeo);
1307 set_current_state(TASK_RUNNING);
1308 remove_wait_queue(sk_sleep(sk), &wait);
1313 newsock->state = SS_CONNECTED;
1315 BT_DBG("new socket %p", nsk);
1322 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1324 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1325 struct sock *sk = sock->sk;
1327 BT_DBG("sock %p, sk %p", sock, sk);
1329 addr->sa_family = AF_BLUETOOTH;
1330 *len = sizeof(struct sockaddr_l2);
1333 la->l2_psm = l2cap_pi(sk)->psm;
1334 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1335 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1337 la->l2_psm = l2cap_pi(sk)->sport;
1338 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1339 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1345 static int __l2cap_wait_ack(struct sock *sk)
1347 DECLARE_WAITQUEUE(wait, current);
1351 add_wait_queue(sk_sleep(sk), &wait);
1352 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1353 set_current_state(TASK_INTERRUPTIBLE);
1358 if (signal_pending(current)) {
1359 err = sock_intr_errno(timeo);
1364 timeo = schedule_timeout(timeo);
1367 err = sock_error(sk);
1371 set_current_state(TASK_RUNNING);
1372 remove_wait_queue(sk_sleep(sk), &wait);
1376 static void l2cap_monitor_timeout(unsigned long arg)
1378 struct sock *sk = (void *) arg;
1380 BT_DBG("sk %p", sk);
1383 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1384 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1389 l2cap_pi(sk)->retry_count++;
1390 __mod_monitor_timer();
1392 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1396 static void l2cap_retrans_timeout(unsigned long arg)
1398 struct sock *sk = (void *) arg;
1400 BT_DBG("sk %p", sk);
1403 l2cap_pi(sk)->retry_count = 1;
1404 __mod_monitor_timer();
1406 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1408 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1412 static void l2cap_drop_acked_frames(struct sock *sk)
1414 struct sk_buff *skb;
1416 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1417 l2cap_pi(sk)->unacked_frames) {
1418 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1421 skb = skb_dequeue(TX_QUEUE(sk));
1424 l2cap_pi(sk)->unacked_frames--;
1427 if (!l2cap_pi(sk)->unacked_frames)
1428 del_timer(&l2cap_pi(sk)->retrans_timer);
1431 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1433 struct l2cap_pinfo *pi = l2cap_pi(sk);
1435 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1437 hci_send_acl(pi->conn->hcon, skb, 0);
1440 static void l2cap_streaming_send(struct sock *sk)
1442 struct sk_buff *skb;
1443 struct l2cap_pinfo *pi = l2cap_pi(sk);
1446 while ((skb = skb_dequeue(TX_QUEUE(sk)))) {
1447 control = get_unaligned_le16(skb->data + L2CAP_HDR_SIZE);
1448 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1449 put_unaligned_le16(control, skb->data + L2CAP_HDR_SIZE);
1451 if (pi->fcs == L2CAP_FCS_CRC16) {
1452 fcs = crc16(0, (u8 *)skb->data, skb->len - 2);
1453 put_unaligned_le16(fcs, skb->data + skb->len - 2);
1456 l2cap_do_send(sk, skb);
1458 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1462 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1464 struct l2cap_pinfo *pi = l2cap_pi(sk);
1465 struct sk_buff *skb, *tx_skb;
1468 skb = skb_peek(TX_QUEUE(sk));
1473 if (bt_cb(skb)->tx_seq == tx_seq)
1476 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1479 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1481 if (pi->remote_max_tx &&
1482 bt_cb(skb)->retries == pi->remote_max_tx) {
1483 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1487 tx_skb = skb_clone(skb, GFP_ATOMIC);
1488 bt_cb(skb)->retries++;
1489 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1491 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1492 control |= L2CAP_CTRL_FINAL;
1493 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1496 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1497 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1499 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1501 if (pi->fcs == L2CAP_FCS_CRC16) {
1502 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1503 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1506 l2cap_do_send(sk, tx_skb);
1509 static int l2cap_ertm_send(struct sock *sk)
1511 struct sk_buff *skb, *tx_skb;
1512 struct l2cap_pinfo *pi = l2cap_pi(sk);
1516 if (sk->sk_state != BT_CONNECTED)
1519 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1521 if (pi->remote_max_tx &&
1522 bt_cb(skb)->retries == pi->remote_max_tx) {
1523 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1527 tx_skb = skb_clone(skb, GFP_ATOMIC);
1529 bt_cb(skb)->retries++;
1531 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1532 control &= L2CAP_CTRL_SAR;
1534 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1535 control |= L2CAP_CTRL_FINAL;
1536 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1538 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1539 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1540 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1543 if (pi->fcs == L2CAP_FCS_CRC16) {
1544 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1545 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1548 l2cap_do_send(sk, tx_skb);
1550 __mod_retrans_timer();
1552 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1553 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1555 pi->unacked_frames++;
1558 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1559 sk->sk_send_head = NULL;
1561 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1569 static int l2cap_retransmit_frames(struct sock *sk)
1571 struct l2cap_pinfo *pi = l2cap_pi(sk);
1574 if (!skb_queue_empty(TX_QUEUE(sk)))
1575 sk->sk_send_head = TX_QUEUE(sk)->next;
1577 pi->next_tx_seq = pi->expected_ack_seq;
1578 ret = l2cap_ertm_send(sk);
1582 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1584 struct sock *sk = (struct sock *)pi;
1587 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1589 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1590 control |= L2CAP_SUPER_RCV_NOT_READY;
1591 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1592 l2cap_send_sframe(pi, control);
1596 if (l2cap_ertm_send(sk) > 0)
1599 control |= L2CAP_SUPER_RCV_READY;
1600 l2cap_send_sframe(pi, control);
1603 static void l2cap_send_srejtail(struct sock *sk)
1605 struct srej_list *tail;
1608 control = L2CAP_SUPER_SELECT_REJECT;
1609 control |= L2CAP_CTRL_FINAL;
1611 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1612 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1614 l2cap_send_sframe(l2cap_pi(sk), control);
1617 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1619 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1620 struct sk_buff **frag;
1623 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1629 /* Continuation fragments (no L2CAP header) */
1630 frag = &skb_shinfo(skb)->frag_list;
1632 count = min_t(unsigned int, conn->mtu, len);
1634 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1637 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1643 frag = &(*frag)->next;
1649 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1651 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1652 struct sk_buff *skb;
1653 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1654 struct l2cap_hdr *lh;
1656 BT_DBG("sk %p len %d", sk, (int)len);
1658 count = min_t(unsigned int, (conn->mtu - hlen), len);
1659 skb = bt_skb_send_alloc(sk, count + hlen,
1660 msg->msg_flags & MSG_DONTWAIT, &err);
1662 return ERR_PTR(err);
1664 /* Create L2CAP header */
1665 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1666 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1667 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1668 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1670 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1671 if (unlikely(err < 0)) {
1673 return ERR_PTR(err);
1678 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1680 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1681 struct sk_buff *skb;
1682 int err, count, hlen = L2CAP_HDR_SIZE;
1683 struct l2cap_hdr *lh;
1685 BT_DBG("sk %p len %d", sk, (int)len);
1687 count = min_t(unsigned int, (conn->mtu - hlen), len);
1688 skb = bt_skb_send_alloc(sk, count + hlen,
1689 msg->msg_flags & MSG_DONTWAIT, &err);
1691 return ERR_PTR(err);
1693 /* Create L2CAP header */
1694 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1695 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1696 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1698 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1699 if (unlikely(err < 0)) {
1701 return ERR_PTR(err);
1706 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1708 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1709 struct sk_buff *skb;
1710 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1711 struct l2cap_hdr *lh;
1713 BT_DBG("sk %p len %d", sk, (int)len);
1716 return ERR_PTR(-ENOTCONN);
1721 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1724 count = min_t(unsigned int, (conn->mtu - hlen), len);
1725 skb = bt_skb_send_alloc(sk, count + hlen,
1726 msg->msg_flags & MSG_DONTWAIT, &err);
1728 return ERR_PTR(err);
1730 /* Create L2CAP header */
1731 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1732 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1733 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1734 put_unaligned_le16(control, skb_put(skb, 2));
1736 put_unaligned_le16(sdulen, skb_put(skb, 2));
1738 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1739 if (unlikely(err < 0)) {
1741 return ERR_PTR(err);
1744 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1745 put_unaligned_le16(0, skb_put(skb, 2));
1747 bt_cb(skb)->retries = 0;
1751 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1753 struct l2cap_pinfo *pi = l2cap_pi(sk);
1754 struct sk_buff *skb;
1755 struct sk_buff_head sar_queue;
1759 skb_queue_head_init(&sar_queue);
1760 control = L2CAP_SDU_START;
1761 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1763 return PTR_ERR(skb);
1765 __skb_queue_tail(&sar_queue, skb);
1766 len -= pi->remote_mps;
1767 size += pi->remote_mps;
1772 if (len > pi->remote_mps) {
1773 control = L2CAP_SDU_CONTINUE;
1774 buflen = pi->remote_mps;
1776 control = L2CAP_SDU_END;
1780 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1782 skb_queue_purge(&sar_queue);
1783 return PTR_ERR(skb);
1786 __skb_queue_tail(&sar_queue, skb);
1790 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1791 if (sk->sk_send_head == NULL)
1792 sk->sk_send_head = sar_queue.next;
1797 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1799 struct sock *sk = sock->sk;
1800 struct l2cap_pinfo *pi = l2cap_pi(sk);
1801 struct sk_buff *skb;
1805 BT_DBG("sock %p, sk %p", sock, sk);
1807 err = sock_error(sk);
1811 if (msg->msg_flags & MSG_OOB)
1816 if (sk->sk_state != BT_CONNECTED) {
1821 /* Connectionless channel */
1822 if (sk->sk_type == SOCK_DGRAM) {
1823 skb = l2cap_create_connless_pdu(sk, msg, len);
1827 l2cap_do_send(sk, skb);
1834 case L2CAP_MODE_BASIC:
1835 /* Check outgoing MTU */
1836 if (len > pi->omtu) {
1841 /* Create a basic PDU */
1842 skb = l2cap_create_basic_pdu(sk, msg, len);
1848 l2cap_do_send(sk, skb);
1852 case L2CAP_MODE_ERTM:
1853 case L2CAP_MODE_STREAMING:
1854 /* Entire SDU fits into one PDU */
1855 if (len <= pi->remote_mps) {
1856 control = L2CAP_SDU_UNSEGMENTED;
1857 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1862 __skb_queue_tail(TX_QUEUE(sk), skb);
1864 if (sk->sk_send_head == NULL)
1865 sk->sk_send_head = skb;
1868 /* Segment SDU into multiples PDUs */
1869 err = l2cap_sar_segment_sdu(sk, msg, len);
1874 if (pi->mode == L2CAP_MODE_STREAMING) {
1875 l2cap_streaming_send(sk);
1877 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
1878 (pi->conn_state & L2CAP_CONN_WAIT_F)) {
1882 err = l2cap_ertm_send(sk);
1890 BT_DBG("bad state %1.1x", pi->mode);
1899 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1901 struct sock *sk = sock->sk;
1905 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1906 struct l2cap_conn_rsp rsp;
1907 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1910 sk->sk_state = BT_CONFIG;
1912 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1913 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1914 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1915 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1916 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1917 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1919 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1924 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1925 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1926 l2cap_build_conf_req(sk, buf), buf);
1927 l2cap_pi(sk)->num_conf_req++;
1935 if (sock->type == SOCK_STREAM)
1936 return bt_sock_stream_recvmsg(iocb, sock, msg, len, flags);
1938 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1941 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1943 struct sock *sk = sock->sk;
1944 struct l2cap_options opts;
1948 BT_DBG("sk %p", sk);
1954 if (sk->sk_state == BT_CONNECTED) {
1959 opts.imtu = l2cap_pi(sk)->imtu;
1960 opts.omtu = l2cap_pi(sk)->omtu;
1961 opts.flush_to = l2cap_pi(sk)->flush_to;
1962 opts.mode = l2cap_pi(sk)->mode;
1963 opts.fcs = l2cap_pi(sk)->fcs;
1964 opts.max_tx = l2cap_pi(sk)->max_tx;
1965 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1967 len = min_t(unsigned int, sizeof(opts), optlen);
1968 if (copy_from_user((char *) &opts, optval, len)) {
1973 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1978 l2cap_pi(sk)->mode = opts.mode;
1979 switch (l2cap_pi(sk)->mode) {
1980 case L2CAP_MODE_BASIC:
1981 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1983 case L2CAP_MODE_ERTM:
1984 case L2CAP_MODE_STREAMING:
1993 l2cap_pi(sk)->imtu = opts.imtu;
1994 l2cap_pi(sk)->omtu = opts.omtu;
1995 l2cap_pi(sk)->fcs = opts.fcs;
1996 l2cap_pi(sk)->max_tx = opts.max_tx;
1997 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
2001 if (get_user(opt, (u32 __user *) optval)) {
2006 if (opt & L2CAP_LM_AUTH)
2007 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2008 if (opt & L2CAP_LM_ENCRYPT)
2009 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2010 if (opt & L2CAP_LM_SECURE)
2011 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2013 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2014 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2026 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2028 struct sock *sk = sock->sk;
2029 struct bt_security sec;
2033 BT_DBG("sk %p", sk);
2035 if (level == SOL_L2CAP)
2036 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2038 if (level != SOL_BLUETOOTH)
2039 return -ENOPROTOOPT;
2045 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2046 && sk->sk_type != SOCK_RAW) {
2051 sec.level = BT_SECURITY_LOW;
2053 len = min_t(unsigned int, sizeof(sec), optlen);
2054 if (copy_from_user((char *) &sec, optval, len)) {
2059 if (sec.level < BT_SECURITY_LOW ||
2060 sec.level > BT_SECURITY_HIGH) {
2065 l2cap_pi(sk)->sec_level = sec.level;
2068 case BT_DEFER_SETUP:
2069 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2074 if (get_user(opt, (u32 __user *) optval)) {
2079 bt_sk(sk)->defer_setup = opt;
2091 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2093 struct sock *sk = sock->sk;
2094 struct l2cap_options opts;
2095 struct l2cap_conninfo cinfo;
2099 BT_DBG("sk %p", sk);
2101 if (get_user(len, optlen))
2108 opts.imtu = l2cap_pi(sk)->imtu;
2109 opts.omtu = l2cap_pi(sk)->omtu;
2110 opts.flush_to = l2cap_pi(sk)->flush_to;
2111 opts.mode = l2cap_pi(sk)->mode;
2112 opts.fcs = l2cap_pi(sk)->fcs;
2113 opts.max_tx = l2cap_pi(sk)->max_tx;
2114 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2116 len = min_t(unsigned int, len, sizeof(opts));
2117 if (copy_to_user(optval, (char *) &opts, len))
2123 switch (l2cap_pi(sk)->sec_level) {
2124 case BT_SECURITY_LOW:
2125 opt = L2CAP_LM_AUTH;
2127 case BT_SECURITY_MEDIUM:
2128 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2130 case BT_SECURITY_HIGH:
2131 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2139 if (l2cap_pi(sk)->role_switch)
2140 opt |= L2CAP_LM_MASTER;
2142 if (l2cap_pi(sk)->force_reliable)
2143 opt |= L2CAP_LM_RELIABLE;
2145 if (put_user(opt, (u32 __user *) optval))
2149 case L2CAP_CONNINFO:
2150 if (sk->sk_state != BT_CONNECTED &&
2151 !(sk->sk_state == BT_CONNECT2 &&
2152 bt_sk(sk)->defer_setup)) {
2157 memset(&cinfo, 0, sizeof(cinfo));
2158 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2159 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2161 len = min_t(unsigned int, len, sizeof(cinfo));
2162 if (copy_to_user(optval, (char *) &cinfo, len))
2176 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2178 struct sock *sk = sock->sk;
2179 struct bt_security sec;
2182 BT_DBG("sk %p", sk);
2184 if (level == SOL_L2CAP)
2185 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2187 if (level != SOL_BLUETOOTH)
2188 return -ENOPROTOOPT;
2190 if (get_user(len, optlen))
2197 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2198 && sk->sk_type != SOCK_RAW) {
2203 sec.level = l2cap_pi(sk)->sec_level;
2205 len = min_t(unsigned int, len, sizeof(sec));
2206 if (copy_to_user(optval, (char *) &sec, len))
2211 case BT_DEFER_SETUP:
2212 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2217 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2231 static int l2cap_sock_shutdown(struct socket *sock, int how)
2233 struct sock *sk = sock->sk;
2236 BT_DBG("sock %p, sk %p", sock, sk);
2242 if (!sk->sk_shutdown) {
2243 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2244 err = __l2cap_wait_ack(sk);
2246 sk->sk_shutdown = SHUTDOWN_MASK;
2247 l2cap_sock_clear_timer(sk);
2248 __l2cap_sock_close(sk, 0);
2250 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2251 err = bt_sock_wait_state(sk, BT_CLOSED,
2255 if (!err && sk->sk_err)
2262 static int l2cap_sock_release(struct socket *sock)
2264 struct sock *sk = sock->sk;
2267 BT_DBG("sock %p, sk %p", sock, sk);
2272 err = l2cap_sock_shutdown(sock, 2);
2275 l2cap_sock_kill(sk);
2279 static void l2cap_chan_ready(struct sock *sk)
2281 struct sock *parent = bt_sk(sk)->parent;
2283 BT_DBG("sk %p, parent %p", sk, parent);
2285 l2cap_pi(sk)->conf_state = 0;
2286 l2cap_sock_clear_timer(sk);
2289 /* Outgoing channel.
2290 * Wake up socket sleeping on connect.
2292 sk->sk_state = BT_CONNECTED;
2293 sk->sk_state_change(sk);
2295 /* Incoming channel.
2296 * Wake up socket sleeping on accept.
2298 parent->sk_data_ready(parent, 0);
2302 /* Copy frame to all raw sockets on that connection */
2303 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2305 struct l2cap_chan_list *l = &conn->chan_list;
2306 struct sk_buff *nskb;
2309 BT_DBG("conn %p", conn);
2311 read_lock(&l->lock);
2312 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2313 if (sk->sk_type != SOCK_RAW)
2316 /* Don't send frame to the socket it came from */
2319 nskb = skb_clone(skb, GFP_ATOMIC);
2323 if (sock_queue_rcv_skb(sk, nskb))
2326 read_unlock(&l->lock);
2329 /* ---- L2CAP signalling commands ---- */
2330 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2331 u8 code, u8 ident, u16 dlen, void *data)
2333 struct sk_buff *skb, **frag;
2334 struct l2cap_cmd_hdr *cmd;
2335 struct l2cap_hdr *lh;
2338 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2339 conn, code, ident, dlen);
2341 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2342 count = min_t(unsigned int, conn->mtu, len);
2344 skb = bt_skb_alloc(count, GFP_ATOMIC);
2348 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2349 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2350 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2352 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2355 cmd->len = cpu_to_le16(dlen);
2358 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2359 memcpy(skb_put(skb, count), data, count);
2365 /* Continuation fragments (no L2CAP header) */
2366 frag = &skb_shinfo(skb)->frag_list;
2368 count = min_t(unsigned int, conn->mtu, len);
2370 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2374 memcpy(skb_put(*frag, count), data, count);
2379 frag = &(*frag)->next;
2389 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2391 struct l2cap_conf_opt *opt = *ptr;
2394 len = L2CAP_CONF_OPT_SIZE + opt->len;
2402 *val = *((u8 *) opt->val);
2406 *val = get_unaligned_le16(opt->val);
2410 *val = get_unaligned_le32(opt->val);
2414 *val = (unsigned long) opt->val;
2418 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2422 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2424 struct l2cap_conf_opt *opt = *ptr;
2426 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2433 *((u8 *) opt->val) = val;
2437 put_unaligned_le16(val, opt->val);
2441 put_unaligned_le32(val, opt->val);
2445 memcpy(opt->val, (void *) val, len);
2449 *ptr += L2CAP_CONF_OPT_SIZE + len;
2452 static void l2cap_ack_timeout(unsigned long arg)
2454 struct sock *sk = (void *) arg;
2457 l2cap_send_ack(l2cap_pi(sk));
2461 static inline void l2cap_ertm_init(struct sock *sk)
2463 l2cap_pi(sk)->expected_ack_seq = 0;
2464 l2cap_pi(sk)->unacked_frames = 0;
2465 l2cap_pi(sk)->buffer_seq = 0;
2466 l2cap_pi(sk)->num_acked = 0;
2467 l2cap_pi(sk)->frames_sent = 0;
2469 setup_timer(&l2cap_pi(sk)->retrans_timer,
2470 l2cap_retrans_timeout, (unsigned long) sk);
2471 setup_timer(&l2cap_pi(sk)->monitor_timer,
2472 l2cap_monitor_timeout, (unsigned long) sk);
2473 setup_timer(&l2cap_pi(sk)->ack_timer,
2474 l2cap_ack_timeout, (unsigned long) sk);
2476 __skb_queue_head_init(SREJ_QUEUE(sk));
2477 __skb_queue_head_init(BUSY_QUEUE(sk));
2479 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2481 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2484 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2487 case L2CAP_MODE_STREAMING:
2488 case L2CAP_MODE_ERTM:
2489 if (l2cap_mode_supported(mode, remote_feat_mask))
2493 return L2CAP_MODE_BASIC;
2497 static int l2cap_build_conf_req(struct sock *sk, void *data)
2499 struct l2cap_pinfo *pi = l2cap_pi(sk);
2500 struct l2cap_conf_req *req = data;
2501 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2502 void *ptr = req->data;
2504 BT_DBG("sk %p", sk);
2506 if (pi->num_conf_req || pi->num_conf_rsp)
2510 case L2CAP_MODE_STREAMING:
2511 case L2CAP_MODE_ERTM:
2512 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2517 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2523 case L2CAP_MODE_BASIC:
2524 if (pi->imtu != L2CAP_DEFAULT_MTU)
2525 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2527 if (!(pi->conn->feat_mask & L2CAP_FEAT_ERTM) &&
2528 !(pi->conn->feat_mask & L2CAP_FEAT_STREAMING))
2531 rfc.mode = L2CAP_MODE_BASIC;
2533 rfc.max_transmit = 0;
2534 rfc.retrans_timeout = 0;
2535 rfc.monitor_timeout = 0;
2536 rfc.max_pdu_size = 0;
2538 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2539 (unsigned long) &rfc);
2542 case L2CAP_MODE_ERTM:
2543 rfc.mode = L2CAP_MODE_ERTM;
2544 rfc.txwin_size = pi->tx_win;
2545 rfc.max_transmit = pi->max_tx;
2546 rfc.retrans_timeout = 0;
2547 rfc.monitor_timeout = 0;
2548 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2549 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2550 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2552 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2553 (unsigned long) &rfc);
2555 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2558 if (pi->fcs == L2CAP_FCS_NONE ||
2559 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2560 pi->fcs = L2CAP_FCS_NONE;
2561 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2565 case L2CAP_MODE_STREAMING:
2566 rfc.mode = L2CAP_MODE_STREAMING;
2568 rfc.max_transmit = 0;
2569 rfc.retrans_timeout = 0;
2570 rfc.monitor_timeout = 0;
2571 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2572 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2573 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2575 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2576 (unsigned long) &rfc);
2578 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2581 if (pi->fcs == L2CAP_FCS_NONE ||
2582 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2583 pi->fcs = L2CAP_FCS_NONE;
2584 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2589 /* FIXME: Need actual value of the flush timeout */
2590 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2591 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2593 req->dcid = cpu_to_le16(pi->dcid);
2594 req->flags = cpu_to_le16(0);
2599 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2601 struct l2cap_pinfo *pi = l2cap_pi(sk);
2602 struct l2cap_conf_rsp *rsp = data;
2603 void *ptr = rsp->data;
2604 void *req = pi->conf_req;
2605 int len = pi->conf_len;
2606 int type, hint, olen;
2608 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2609 u16 mtu = L2CAP_DEFAULT_MTU;
2610 u16 result = L2CAP_CONF_SUCCESS;
2612 BT_DBG("sk %p", sk);
2614 while (len >= L2CAP_CONF_OPT_SIZE) {
2615 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2617 hint = type & L2CAP_CONF_HINT;
2618 type &= L2CAP_CONF_MASK;
2621 case L2CAP_CONF_MTU:
2625 case L2CAP_CONF_FLUSH_TO:
2629 case L2CAP_CONF_QOS:
2632 case L2CAP_CONF_RFC:
2633 if (olen == sizeof(rfc))
2634 memcpy(&rfc, (void *) val, olen);
2637 case L2CAP_CONF_FCS:
2638 if (val == L2CAP_FCS_NONE)
2639 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2647 result = L2CAP_CONF_UNKNOWN;
2648 *((u8 *) ptr++) = type;
2653 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2657 case L2CAP_MODE_STREAMING:
2658 case L2CAP_MODE_ERTM:
2659 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2660 pi->mode = l2cap_select_mode(rfc.mode,
2661 pi->conn->feat_mask);
2665 if (pi->mode != rfc.mode)
2666 return -ECONNREFUSED;
2672 if (pi->mode != rfc.mode) {
2673 result = L2CAP_CONF_UNACCEPT;
2674 rfc.mode = pi->mode;
2676 if (pi->num_conf_rsp == 1)
2677 return -ECONNREFUSED;
2679 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2680 sizeof(rfc), (unsigned long) &rfc);
2684 if (result == L2CAP_CONF_SUCCESS) {
2685 /* Configure output options and let the other side know
2686 * which ones we don't like. */
2688 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2689 result = L2CAP_CONF_UNACCEPT;
2692 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2694 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2697 case L2CAP_MODE_BASIC:
2698 pi->fcs = L2CAP_FCS_NONE;
2699 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2702 case L2CAP_MODE_ERTM:
2703 pi->remote_tx_win = rfc.txwin_size;
2704 pi->remote_max_tx = rfc.max_transmit;
2706 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2707 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2709 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2711 rfc.retrans_timeout =
2712 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2713 rfc.monitor_timeout =
2714 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2716 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2718 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2719 sizeof(rfc), (unsigned long) &rfc);
2723 case L2CAP_MODE_STREAMING:
2724 if (le16_to_cpu(rfc.max_pdu_size) > pi->conn->mtu - 10)
2725 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2727 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2729 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2731 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2732 sizeof(rfc), (unsigned long) &rfc);
2737 result = L2CAP_CONF_UNACCEPT;
2739 memset(&rfc, 0, sizeof(rfc));
2740 rfc.mode = pi->mode;
2743 if (result == L2CAP_CONF_SUCCESS)
2744 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2746 rsp->scid = cpu_to_le16(pi->dcid);
2747 rsp->result = cpu_to_le16(result);
2748 rsp->flags = cpu_to_le16(0x0000);
2753 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2755 struct l2cap_pinfo *pi = l2cap_pi(sk);
2756 struct l2cap_conf_req *req = data;
2757 void *ptr = req->data;
2760 struct l2cap_conf_rfc rfc;
2762 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2764 while (len >= L2CAP_CONF_OPT_SIZE) {
2765 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2768 case L2CAP_CONF_MTU:
2769 if (val < L2CAP_DEFAULT_MIN_MTU) {
2770 *result = L2CAP_CONF_UNACCEPT;
2771 pi->imtu = L2CAP_DEFAULT_MIN_MTU;
2774 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2777 case L2CAP_CONF_FLUSH_TO:
2779 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2783 case L2CAP_CONF_RFC:
2784 if (olen == sizeof(rfc))
2785 memcpy(&rfc, (void *)val, olen);
2787 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2788 rfc.mode != pi->mode)
2789 return -ECONNREFUSED;
2793 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2794 sizeof(rfc), (unsigned long) &rfc);
2799 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2800 return -ECONNREFUSED;
2802 pi->mode = rfc.mode;
2804 if (*result == L2CAP_CONF_SUCCESS) {
2806 case L2CAP_MODE_ERTM:
2807 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2808 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2809 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2811 case L2CAP_MODE_STREAMING:
2812 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2816 req->dcid = cpu_to_le16(pi->dcid);
2817 req->flags = cpu_to_le16(0x0000);
2822 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2824 struct l2cap_conf_rsp *rsp = data;
2825 void *ptr = rsp->data;
2827 BT_DBG("sk %p", sk);
2829 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2830 rsp->result = cpu_to_le16(result);
2831 rsp->flags = cpu_to_le16(flags);
2836 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2838 struct l2cap_pinfo *pi = l2cap_pi(sk);
2841 struct l2cap_conf_rfc rfc;
2843 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2845 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2848 while (len >= L2CAP_CONF_OPT_SIZE) {
2849 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2852 case L2CAP_CONF_RFC:
2853 if (olen == sizeof(rfc))
2854 memcpy(&rfc, (void *)val, olen);
2861 case L2CAP_MODE_ERTM:
2862 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2863 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2864 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2866 case L2CAP_MODE_STREAMING:
2867 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2871 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2873 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2875 if (rej->reason != 0x0000)
2878 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2879 cmd->ident == conn->info_ident) {
2880 del_timer(&conn->info_timer);
2882 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2883 conn->info_ident = 0;
2885 l2cap_conn_start(conn);
2891 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2893 struct l2cap_chan_list *list = &conn->chan_list;
2894 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2895 struct l2cap_conn_rsp rsp;
2896 struct sock *parent, *sk = NULL;
2897 int result, status = L2CAP_CS_NO_INFO;
2899 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2900 __le16 psm = req->psm;
2902 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2904 /* Check if we have socket listening on psm */
2905 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2907 result = L2CAP_CR_BAD_PSM;
2911 bh_lock_sock(parent);
2913 /* Check if the ACL is secure enough (if not SDP) */
2914 if (psm != cpu_to_le16(0x0001) &&
2915 !hci_conn_check_link_mode(conn->hcon)) {
2916 conn->disc_reason = 0x05;
2917 result = L2CAP_CR_SEC_BLOCK;
2921 result = L2CAP_CR_NO_MEM;
2923 /* Check for backlog size */
2924 if (sk_acceptq_is_full(parent)) {
2925 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2929 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2933 write_lock_bh(&list->lock);
2935 /* Check if we already have channel with that dcid */
2936 if (__l2cap_get_chan_by_dcid(list, scid)) {
2937 write_unlock_bh(&list->lock);
2938 sock_set_flag(sk, SOCK_ZAPPED);
2939 l2cap_sock_kill(sk);
2943 hci_conn_hold(conn->hcon);
2945 l2cap_sock_init(sk, parent);
2946 bacpy(&bt_sk(sk)->src, conn->src);
2947 bacpy(&bt_sk(sk)->dst, conn->dst);
2948 l2cap_pi(sk)->psm = psm;
2949 l2cap_pi(sk)->dcid = scid;
2951 __l2cap_chan_add(conn, sk, parent);
2952 dcid = l2cap_pi(sk)->scid;
2954 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2956 l2cap_pi(sk)->ident = cmd->ident;
2958 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2959 if (l2cap_check_security(sk)) {
2960 if (bt_sk(sk)->defer_setup) {
2961 sk->sk_state = BT_CONNECT2;
2962 result = L2CAP_CR_PEND;
2963 status = L2CAP_CS_AUTHOR_PEND;
2964 parent->sk_data_ready(parent, 0);
2966 sk->sk_state = BT_CONFIG;
2967 result = L2CAP_CR_SUCCESS;
2968 status = L2CAP_CS_NO_INFO;
2971 sk->sk_state = BT_CONNECT2;
2972 result = L2CAP_CR_PEND;
2973 status = L2CAP_CS_AUTHEN_PEND;
2976 sk->sk_state = BT_CONNECT2;
2977 result = L2CAP_CR_PEND;
2978 status = L2CAP_CS_NO_INFO;
2981 write_unlock_bh(&list->lock);
2984 bh_unlock_sock(parent);
2987 rsp.scid = cpu_to_le16(scid);
2988 rsp.dcid = cpu_to_le16(dcid);
2989 rsp.result = cpu_to_le16(result);
2990 rsp.status = cpu_to_le16(status);
2991 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2993 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2994 struct l2cap_info_req info;
2995 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2997 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2998 conn->info_ident = l2cap_get_ident(conn);
3000 mod_timer(&conn->info_timer, jiffies +
3001 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
3003 l2cap_send_cmd(conn, conn->info_ident,
3004 L2CAP_INFO_REQ, sizeof(info), &info);
3007 if (sk && !(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
3008 result == L2CAP_CR_SUCCESS) {
3010 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3011 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3012 l2cap_build_conf_req(sk, buf), buf);
3013 l2cap_pi(sk)->num_conf_req++;
3019 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3021 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3022 u16 scid, dcid, result, status;
3026 scid = __le16_to_cpu(rsp->scid);
3027 dcid = __le16_to_cpu(rsp->dcid);
3028 result = __le16_to_cpu(rsp->result);
3029 status = __le16_to_cpu(rsp->status);
3031 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3034 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3038 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3044 case L2CAP_CR_SUCCESS:
3045 sk->sk_state = BT_CONFIG;
3046 l2cap_pi(sk)->ident = 0;
3047 l2cap_pi(sk)->dcid = dcid;
3048 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3050 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3053 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3055 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3056 l2cap_build_conf_req(sk, req), req);
3057 l2cap_pi(sk)->num_conf_req++;
3061 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3065 /* don't delete l2cap channel if sk is owned by user */
3066 if (sock_owned_by_user(sk)) {
3067 sk->sk_state = BT_DISCONN;
3068 l2cap_sock_clear_timer(sk);
3069 l2cap_sock_set_timer(sk, HZ / 5);
3073 l2cap_chan_del(sk, ECONNREFUSED);
3081 static inline void set_default_fcs(struct l2cap_pinfo *pi)
3083 /* FCS is enabled only in ERTM or streaming mode, if one or both
3086 if (pi->mode != L2CAP_MODE_ERTM && pi->mode != L2CAP_MODE_STREAMING)
3087 pi->fcs = L2CAP_FCS_NONE;
3088 else if (!(pi->conf_state & L2CAP_CONF_NO_FCS_RECV))
3089 pi->fcs = L2CAP_FCS_CRC16;
3092 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3094 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3100 dcid = __le16_to_cpu(req->dcid);
3101 flags = __le16_to_cpu(req->flags);
3103 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3105 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3109 if (sk->sk_state != BT_CONFIG) {
3110 struct l2cap_cmd_rej rej;
3112 rej.reason = cpu_to_le16(0x0002);
3113 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3118 /* Reject if config buffer is too small. */
3119 len = cmd_len - sizeof(*req);
3120 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3121 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3122 l2cap_build_conf_rsp(sk, rsp,
3123 L2CAP_CONF_REJECT, flags), rsp);
3128 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3129 l2cap_pi(sk)->conf_len += len;
3131 if (flags & 0x0001) {
3132 /* Incomplete config. Send empty response. */
3133 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3134 l2cap_build_conf_rsp(sk, rsp,
3135 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3139 /* Complete config. */
3140 len = l2cap_parse_conf_req(sk, rsp);
3142 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3146 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3147 l2cap_pi(sk)->num_conf_rsp++;
3149 /* Reset config buffer. */
3150 l2cap_pi(sk)->conf_len = 0;
3152 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3155 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3156 set_default_fcs(l2cap_pi(sk));
3158 sk->sk_state = BT_CONNECTED;
3160 l2cap_pi(sk)->next_tx_seq = 0;
3161 l2cap_pi(sk)->expected_tx_seq = 0;
3162 __skb_queue_head_init(TX_QUEUE(sk));
3163 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3164 l2cap_ertm_init(sk);
3166 l2cap_chan_ready(sk);
3170 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3172 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3173 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3174 l2cap_build_conf_req(sk, buf), buf);
3175 l2cap_pi(sk)->num_conf_req++;
3183 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3185 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3186 u16 scid, flags, result;
3188 int len = cmd->len - sizeof(*rsp);
3190 scid = __le16_to_cpu(rsp->scid);
3191 flags = __le16_to_cpu(rsp->flags);
3192 result = __le16_to_cpu(rsp->result);
3194 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3195 scid, flags, result);
3197 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3202 case L2CAP_CONF_SUCCESS:
3203 l2cap_conf_rfc_get(sk, rsp->data, len);
3206 case L2CAP_CONF_UNACCEPT:
3207 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3210 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3211 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3215 /* throw out any old stored conf requests */
3216 result = L2CAP_CONF_SUCCESS;
3217 len = l2cap_parse_conf_rsp(sk, rsp->data,
3220 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3224 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3225 L2CAP_CONF_REQ, len, req);
3226 l2cap_pi(sk)->num_conf_req++;
3227 if (result != L2CAP_CONF_SUCCESS)
3233 sk->sk_err = ECONNRESET;
3234 l2cap_sock_set_timer(sk, HZ * 5);
3235 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3242 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3244 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3245 set_default_fcs(l2cap_pi(sk));
3247 sk->sk_state = BT_CONNECTED;
3248 l2cap_pi(sk)->next_tx_seq = 0;
3249 l2cap_pi(sk)->expected_tx_seq = 0;
3250 __skb_queue_head_init(TX_QUEUE(sk));
3251 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3252 l2cap_ertm_init(sk);
3254 l2cap_chan_ready(sk);
3262 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3264 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3265 struct l2cap_disconn_rsp rsp;
3269 scid = __le16_to_cpu(req->scid);
3270 dcid = __le16_to_cpu(req->dcid);
3272 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3274 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3278 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3279 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3280 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3282 sk->sk_shutdown = SHUTDOWN_MASK;
3284 /* don't delete l2cap channel if sk is owned by user */
3285 if (sock_owned_by_user(sk)) {
3286 sk->sk_state = BT_DISCONN;
3287 l2cap_sock_clear_timer(sk);
3288 l2cap_sock_set_timer(sk, HZ / 5);
3293 l2cap_chan_del(sk, ECONNRESET);
3296 l2cap_sock_kill(sk);
3300 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3302 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3306 scid = __le16_to_cpu(rsp->scid);
3307 dcid = __le16_to_cpu(rsp->dcid);
3309 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3311 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3315 /* don't delete l2cap channel if sk is owned by user */
3316 if (sock_owned_by_user(sk)) {
3317 sk->sk_state = BT_DISCONN;
3318 l2cap_sock_clear_timer(sk);
3319 l2cap_sock_set_timer(sk, HZ / 5);
3324 l2cap_chan_del(sk, 0);
3327 l2cap_sock_kill(sk);
3331 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3333 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3336 type = __le16_to_cpu(req->type);
3338 BT_DBG("type 0x%4.4x", type);
3340 if (type == L2CAP_IT_FEAT_MASK) {
3342 u32 feat_mask = l2cap_feat_mask;
3343 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3344 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3345 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3347 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3349 put_unaligned_le32(feat_mask, rsp->data);
3350 l2cap_send_cmd(conn, cmd->ident,
3351 L2CAP_INFO_RSP, sizeof(buf), buf);
3352 } else if (type == L2CAP_IT_FIXED_CHAN) {
3354 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3355 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3356 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3357 memcpy(buf + 4, l2cap_fixed_chan, 8);
3358 l2cap_send_cmd(conn, cmd->ident,
3359 L2CAP_INFO_RSP, sizeof(buf), buf);
3361 struct l2cap_info_rsp rsp;
3362 rsp.type = cpu_to_le16(type);
3363 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3364 l2cap_send_cmd(conn, cmd->ident,
3365 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3371 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3373 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3376 type = __le16_to_cpu(rsp->type);
3377 result = __le16_to_cpu(rsp->result);
3379 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3381 del_timer(&conn->info_timer);
3383 if (result != L2CAP_IR_SUCCESS) {
3384 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3385 conn->info_ident = 0;
3387 l2cap_conn_start(conn);
3392 if (type == L2CAP_IT_FEAT_MASK) {
3393 conn->feat_mask = get_unaligned_le32(rsp->data);
3395 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3396 struct l2cap_info_req req;
3397 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3399 conn->info_ident = l2cap_get_ident(conn);
3401 l2cap_send_cmd(conn, conn->info_ident,
3402 L2CAP_INFO_REQ, sizeof(req), &req);
3404 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3405 conn->info_ident = 0;
3407 l2cap_conn_start(conn);
3409 } else if (type == L2CAP_IT_FIXED_CHAN) {
3410 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3411 conn->info_ident = 0;
3413 l2cap_conn_start(conn);
3419 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3421 u8 *data = skb->data;
3423 struct l2cap_cmd_hdr cmd;
3426 l2cap_raw_recv(conn, skb);
3428 while (len >= L2CAP_CMD_HDR_SIZE) {
3430 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3431 data += L2CAP_CMD_HDR_SIZE;
3432 len -= L2CAP_CMD_HDR_SIZE;
3434 cmd_len = le16_to_cpu(cmd.len);
3436 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3438 if (cmd_len > len || !cmd.ident) {
3439 BT_DBG("corrupted command");
3444 case L2CAP_COMMAND_REJ:
3445 l2cap_command_rej(conn, &cmd, data);
3448 case L2CAP_CONN_REQ:
3449 err = l2cap_connect_req(conn, &cmd, data);
3452 case L2CAP_CONN_RSP:
3453 err = l2cap_connect_rsp(conn, &cmd, data);
3456 case L2CAP_CONF_REQ:
3457 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3460 case L2CAP_CONF_RSP:
3461 err = l2cap_config_rsp(conn, &cmd, data);
3464 case L2CAP_DISCONN_REQ:
3465 err = l2cap_disconnect_req(conn, &cmd, data);
3468 case L2CAP_DISCONN_RSP:
3469 err = l2cap_disconnect_rsp(conn, &cmd, data);
3472 case L2CAP_ECHO_REQ:
3473 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3476 case L2CAP_ECHO_RSP:
3479 case L2CAP_INFO_REQ:
3480 err = l2cap_information_req(conn, &cmd, data);
3483 case L2CAP_INFO_RSP:
3484 err = l2cap_information_rsp(conn, &cmd, data);
3488 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3494 struct l2cap_cmd_rej rej;
3495 BT_DBG("error %d", err);
3497 /* FIXME: Map err to a valid reason */
3498 rej.reason = cpu_to_le16(0);
3499 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3509 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3511 u16 our_fcs, rcv_fcs;
3512 int hdr_size = L2CAP_HDR_SIZE + 2;
3514 if (pi->fcs == L2CAP_FCS_CRC16) {
3515 skb_trim(skb, skb->len - 2);
3516 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3517 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3519 if (our_fcs != rcv_fcs)
3525 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3527 struct l2cap_pinfo *pi = l2cap_pi(sk);
3530 pi->frames_sent = 0;
3532 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3534 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3535 control |= L2CAP_SUPER_RCV_NOT_READY;
3536 l2cap_send_sframe(pi, control);
3537 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3540 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3541 l2cap_retransmit_frames(sk);
3543 l2cap_ertm_send(sk);
3545 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3546 pi->frames_sent == 0) {
3547 control |= L2CAP_SUPER_RCV_READY;
3548 l2cap_send_sframe(pi, control);
3552 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3554 struct sk_buff *next_skb;
3555 struct l2cap_pinfo *pi = l2cap_pi(sk);
3556 int tx_seq_offset, next_tx_seq_offset;
3558 bt_cb(skb)->tx_seq = tx_seq;
3559 bt_cb(skb)->sar = sar;
3561 next_skb = skb_peek(SREJ_QUEUE(sk));
3563 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3567 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3568 if (tx_seq_offset < 0)
3569 tx_seq_offset += 64;
3572 if (bt_cb(next_skb)->tx_seq == tx_seq)
3575 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3576 pi->buffer_seq) % 64;
3577 if (next_tx_seq_offset < 0)
3578 next_tx_seq_offset += 64;
3580 if (next_tx_seq_offset > tx_seq_offset) {
3581 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3585 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3588 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3590 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3595 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3597 struct l2cap_pinfo *pi = l2cap_pi(sk);
3598 struct sk_buff *_skb;
3601 switch (control & L2CAP_CTRL_SAR) {
3602 case L2CAP_SDU_UNSEGMENTED:
3603 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3606 err = sock_queue_rcv_skb(sk, skb);
3612 case L2CAP_SDU_START:
3613 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3616 pi->sdu_len = get_unaligned_le16(skb->data);
3618 if (pi->sdu_len > pi->imtu)
3621 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3625 /* pull sdu_len bytes only after alloc, because of Local Busy
3626 * condition we have to be sure that this will be executed
3627 * only once, i.e., when alloc does not fail */
3630 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3632 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3633 pi->partial_sdu_len = skb->len;
3636 case L2CAP_SDU_CONTINUE:
3637 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3643 pi->partial_sdu_len += skb->len;
3644 if (pi->partial_sdu_len > pi->sdu_len)
3647 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3652 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3658 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3659 pi->partial_sdu_len += skb->len;
3661 if (pi->partial_sdu_len > pi->imtu)
3664 if (pi->partial_sdu_len != pi->sdu_len)
3667 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3670 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3672 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3676 err = sock_queue_rcv_skb(sk, _skb);
3679 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3683 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3684 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3698 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3703 static int l2cap_try_push_rx_skb(struct sock *sk)
3705 struct l2cap_pinfo *pi = l2cap_pi(sk);
3706 struct sk_buff *skb;
3710 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3711 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3712 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3714 skb_queue_head(BUSY_QUEUE(sk), skb);
3718 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3721 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3724 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3725 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3726 l2cap_send_sframe(pi, control);
3727 l2cap_pi(sk)->retry_count = 1;
3729 del_timer(&pi->retrans_timer);
3730 __mod_monitor_timer();
3732 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3735 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3736 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3738 BT_DBG("sk %p, Exit local busy", sk);
3743 static void l2cap_busy_work(struct work_struct *work)
3745 DECLARE_WAITQUEUE(wait, current);
3746 struct l2cap_pinfo *pi =
3747 container_of(work, struct l2cap_pinfo, busy_work);
3748 struct sock *sk = (struct sock *)pi;
3749 int n_tries = 0, timeo = HZ/5, err;
3750 struct sk_buff *skb;
3754 add_wait_queue(sk_sleep(sk), &wait);
3755 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3756 set_current_state(TASK_INTERRUPTIBLE);
3758 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3760 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3767 if (signal_pending(current)) {
3768 err = sock_intr_errno(timeo);
3773 timeo = schedule_timeout(timeo);
3776 err = sock_error(sk);
3780 if (l2cap_try_push_rx_skb(sk) == 0)
3784 set_current_state(TASK_RUNNING);
3785 remove_wait_queue(sk_sleep(sk), &wait);
3790 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3792 struct l2cap_pinfo *pi = l2cap_pi(sk);
3795 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3796 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3797 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3798 return l2cap_try_push_rx_skb(sk);
3803 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3805 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3809 /* Busy Condition */
3810 BT_DBG("sk %p, Enter local busy", sk);
3812 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3813 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3814 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3816 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3817 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3818 l2cap_send_sframe(pi, sctrl);
3820 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3822 del_timer(&pi->ack_timer);
3824 queue_work(_busy_wq, &pi->busy_work);
3829 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3831 struct l2cap_pinfo *pi = l2cap_pi(sk);
3832 struct sk_buff *_skb;
3836 * TODO: We have to notify the userland if some data is lost with the
3840 switch (control & L2CAP_CTRL_SAR) {
3841 case L2CAP_SDU_UNSEGMENTED:
3842 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3847 err = sock_queue_rcv_skb(sk, skb);
3853 case L2CAP_SDU_START:
3854 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3859 pi->sdu_len = get_unaligned_le16(skb->data);
3862 if (pi->sdu_len > pi->imtu) {
3867 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3873 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3875 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3876 pi->partial_sdu_len = skb->len;
3880 case L2CAP_SDU_CONTINUE:
3881 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3884 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3886 pi->partial_sdu_len += skb->len;
3887 if (pi->partial_sdu_len > pi->sdu_len)
3895 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3898 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3900 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3901 pi->partial_sdu_len += skb->len;
3903 if (pi->partial_sdu_len > pi->imtu)
3906 if (pi->partial_sdu_len == pi->sdu_len) {
3907 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3908 err = sock_queue_rcv_skb(sk, _skb);
3923 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3925 struct sk_buff *skb;
3928 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3929 if (bt_cb(skb)->tx_seq != tx_seq)
3932 skb = skb_dequeue(SREJ_QUEUE(sk));
3933 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3934 l2cap_ertm_reassembly_sdu(sk, skb, control);
3935 l2cap_pi(sk)->buffer_seq_srej =
3936 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3937 tx_seq = (tx_seq + 1) % 64;
3941 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3943 struct l2cap_pinfo *pi = l2cap_pi(sk);
3944 struct srej_list *l, *tmp;
3947 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3948 if (l->tx_seq == tx_seq) {
3953 control = L2CAP_SUPER_SELECT_REJECT;
3954 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3955 l2cap_send_sframe(pi, control);
3957 list_add_tail(&l->list, SREJ_LIST(sk));
3961 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3963 struct l2cap_pinfo *pi = l2cap_pi(sk);
3964 struct srej_list *new;
3967 while (tx_seq != pi->expected_tx_seq) {
3968 control = L2CAP_SUPER_SELECT_REJECT;
3969 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3970 l2cap_send_sframe(pi, control);
3972 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3973 new->tx_seq = pi->expected_tx_seq;
3974 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3975 list_add_tail(&new->list, SREJ_LIST(sk));
3977 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3980 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3982 struct l2cap_pinfo *pi = l2cap_pi(sk);
3983 u8 tx_seq = __get_txseq(rx_control);
3984 u8 req_seq = __get_reqseq(rx_control);
3985 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3986 int tx_seq_offset, expected_tx_seq_offset;
3987 int num_to_ack = (pi->tx_win/6) + 1;
3990 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3993 if (L2CAP_CTRL_FINAL & rx_control &&
3994 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3995 del_timer(&pi->monitor_timer);
3996 if (pi->unacked_frames > 0)
3997 __mod_retrans_timer();
3998 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
4001 pi->expected_ack_seq = req_seq;
4002 l2cap_drop_acked_frames(sk);
4004 if (tx_seq == pi->expected_tx_seq)
4007 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
4008 if (tx_seq_offset < 0)
4009 tx_seq_offset += 64;
4011 /* invalid tx_seq */
4012 if (tx_seq_offset >= pi->tx_win) {
4013 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4017 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
4020 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4021 struct srej_list *first;
4023 first = list_first_entry(SREJ_LIST(sk),
4024 struct srej_list, list);
4025 if (tx_seq == first->tx_seq) {
4026 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4027 l2cap_check_srej_gap(sk, tx_seq);
4029 list_del(&first->list);
4032 if (list_empty(SREJ_LIST(sk))) {
4033 pi->buffer_seq = pi->buffer_seq_srej;
4034 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
4036 BT_DBG("sk %p, Exit SREJ_SENT", sk);
4039 struct srej_list *l;
4041 /* duplicated tx_seq */
4042 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
4045 list_for_each_entry(l, SREJ_LIST(sk), list) {
4046 if (l->tx_seq == tx_seq) {
4047 l2cap_resend_srejframe(sk, tx_seq);
4051 l2cap_send_srejframe(sk, tx_seq);
4054 expected_tx_seq_offset =
4055 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4056 if (expected_tx_seq_offset < 0)
4057 expected_tx_seq_offset += 64;
4059 /* duplicated tx_seq */
4060 if (tx_seq_offset < expected_tx_seq_offset)
4063 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4065 BT_DBG("sk %p, Enter SREJ", sk);
4067 INIT_LIST_HEAD(SREJ_LIST(sk));
4068 pi->buffer_seq_srej = pi->buffer_seq;
4070 __skb_queue_head_init(SREJ_QUEUE(sk));
4071 __skb_queue_head_init(BUSY_QUEUE(sk));
4072 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4074 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4076 l2cap_send_srejframe(sk, tx_seq);
4078 del_timer(&pi->ack_timer);
4083 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4085 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4086 bt_cb(skb)->tx_seq = tx_seq;
4087 bt_cb(skb)->sar = sar;
4088 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4092 err = l2cap_push_rx_skb(sk, skb, rx_control);
4096 if (rx_control & L2CAP_CTRL_FINAL) {
4097 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4098 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4100 l2cap_retransmit_frames(sk);
4105 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4106 if (pi->num_acked == num_to_ack - 1)
4116 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4118 struct l2cap_pinfo *pi = l2cap_pi(sk);
4120 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4123 pi->expected_ack_seq = __get_reqseq(rx_control);
4124 l2cap_drop_acked_frames(sk);
4126 if (rx_control & L2CAP_CTRL_POLL) {
4127 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4128 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4129 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4130 (pi->unacked_frames > 0))
4131 __mod_retrans_timer();
4133 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4134 l2cap_send_srejtail(sk);
4136 l2cap_send_i_or_rr_or_rnr(sk);
4139 } else if (rx_control & L2CAP_CTRL_FINAL) {
4140 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4142 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4143 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4145 l2cap_retransmit_frames(sk);
4148 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4149 (pi->unacked_frames > 0))
4150 __mod_retrans_timer();
4152 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4153 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
4156 l2cap_ertm_send(sk);
4160 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4162 struct l2cap_pinfo *pi = l2cap_pi(sk);
4163 u8 tx_seq = __get_reqseq(rx_control);
4165 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4167 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4169 pi->expected_ack_seq = tx_seq;
4170 l2cap_drop_acked_frames(sk);
4172 if (rx_control & L2CAP_CTRL_FINAL) {
4173 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4174 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4176 l2cap_retransmit_frames(sk);
4178 l2cap_retransmit_frames(sk);
4180 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4181 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4184 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4186 struct l2cap_pinfo *pi = l2cap_pi(sk);
4187 u8 tx_seq = __get_reqseq(rx_control);
4189 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4191 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4193 if (rx_control & L2CAP_CTRL_POLL) {
4194 pi->expected_ack_seq = tx_seq;
4195 l2cap_drop_acked_frames(sk);
4197 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4198 l2cap_retransmit_one_frame(sk, tx_seq);
4200 l2cap_ertm_send(sk);
4202 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4203 pi->srej_save_reqseq = tx_seq;
4204 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4206 } else if (rx_control & L2CAP_CTRL_FINAL) {
4207 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4208 pi->srej_save_reqseq == tx_seq)
4209 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4211 l2cap_retransmit_one_frame(sk, tx_seq);
4213 l2cap_retransmit_one_frame(sk, tx_seq);
4214 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4215 pi->srej_save_reqseq = tx_seq;
4216 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4221 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4223 struct l2cap_pinfo *pi = l2cap_pi(sk);
4224 u8 tx_seq = __get_reqseq(rx_control);
4226 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4228 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4229 pi->expected_ack_seq = tx_seq;
4230 l2cap_drop_acked_frames(sk);
4232 if (rx_control & L2CAP_CTRL_POLL)
4233 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4235 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4236 del_timer(&pi->retrans_timer);
4237 if (rx_control & L2CAP_CTRL_POLL)
4238 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4242 if (rx_control & L2CAP_CTRL_POLL)
4243 l2cap_send_srejtail(sk);
4245 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4248 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4250 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4252 if (L2CAP_CTRL_FINAL & rx_control &&
4253 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4254 del_timer(&l2cap_pi(sk)->monitor_timer);
4255 if (l2cap_pi(sk)->unacked_frames > 0)
4256 __mod_retrans_timer();
4257 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4260 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4261 case L2CAP_SUPER_RCV_READY:
4262 l2cap_data_channel_rrframe(sk, rx_control);
4265 case L2CAP_SUPER_REJECT:
4266 l2cap_data_channel_rejframe(sk, rx_control);
4269 case L2CAP_SUPER_SELECT_REJECT:
4270 l2cap_data_channel_srejframe(sk, rx_control);
4273 case L2CAP_SUPER_RCV_NOT_READY:
4274 l2cap_data_channel_rnrframe(sk, rx_control);
4282 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4284 struct l2cap_pinfo *pi = l2cap_pi(sk);
4287 int len, next_tx_seq_offset, req_seq_offset;
4289 control = get_unaligned_le16(skb->data);
4294 * We can just drop the corrupted I-frame here.
4295 * Receiver will miss it and start proper recovery
4296 * procedures and ask retransmission.
4298 if (l2cap_check_fcs(pi, skb))
4301 if (__is_sar_start(control) && __is_iframe(control))
4304 if (pi->fcs == L2CAP_FCS_CRC16)
4307 if (len > pi->mps) {
4308 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4312 req_seq = __get_reqseq(control);
4313 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4314 if (req_seq_offset < 0)
4315 req_seq_offset += 64;
4317 next_tx_seq_offset =
4318 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4319 if (next_tx_seq_offset < 0)
4320 next_tx_seq_offset += 64;
4322 /* check for invalid req-seq */
4323 if (req_seq_offset > next_tx_seq_offset) {
4324 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4328 if (__is_iframe(control)) {
4330 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4334 l2cap_data_channel_iframe(sk, control, skb);
4338 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4342 l2cap_data_channel_sframe(sk, control, skb);
4352 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4355 struct l2cap_pinfo *pi;
4360 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4362 BT_DBG("unknown cid 0x%4.4x", cid);
4368 BT_DBG("sk %p, len %d", sk, skb->len);
4370 if (sk->sk_state != BT_CONNECTED)
4374 case L2CAP_MODE_BASIC:
4375 /* If socket recv buffers overflows we drop data here
4376 * which is *bad* because L2CAP has to be reliable.
4377 * But we don't have any other choice. L2CAP doesn't
4378 * provide flow control mechanism. */
4380 if (pi->imtu < skb->len)
4383 if (!sock_queue_rcv_skb(sk, skb))
4387 case L2CAP_MODE_ERTM:
4388 if (!sock_owned_by_user(sk)) {
4389 l2cap_ertm_data_rcv(sk, skb);
4391 if (sk_add_backlog(sk, skb))
4397 case L2CAP_MODE_STREAMING:
4398 control = get_unaligned_le16(skb->data);
4402 if (l2cap_check_fcs(pi, skb))
4405 if (__is_sar_start(control))
4408 if (pi->fcs == L2CAP_FCS_CRC16)
4411 if (len > pi->mps || len < 0 || __is_sframe(control))
4414 tx_seq = __get_txseq(control);
4416 if (pi->expected_tx_seq == tx_seq)
4417 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4419 pi->expected_tx_seq = (tx_seq + 1) % 64;
4421 l2cap_streaming_reassembly_sdu(sk, skb, control);
4426 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4440 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4444 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4450 BT_DBG("sk %p, len %d", sk, skb->len);
4452 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4455 if (l2cap_pi(sk)->imtu < skb->len)
4458 if (!sock_queue_rcv_skb(sk, skb))
4470 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4472 struct l2cap_hdr *lh = (void *) skb->data;
4476 skb_pull(skb, L2CAP_HDR_SIZE);
4477 cid = __le16_to_cpu(lh->cid);
4478 len = __le16_to_cpu(lh->len);
4480 if (len != skb->len) {
4485 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4488 case L2CAP_CID_SIGNALING:
4489 l2cap_sig_channel(conn, skb);
4492 case L2CAP_CID_CONN_LESS:
4493 psm = get_unaligned_le16(skb->data);
4495 l2cap_conless_channel(conn, psm, skb);
4499 l2cap_data_channel(conn, cid, skb);
4504 /* ---- L2CAP interface with lower layer (HCI) ---- */
4506 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4508 int exact = 0, lm1 = 0, lm2 = 0;
4509 register struct sock *sk;
4510 struct hlist_node *node;
4512 if (type != ACL_LINK)
4515 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4517 /* Find listening sockets and check their link_mode */
4518 read_lock(&l2cap_sk_list.lock);
4519 sk_for_each(sk, node, &l2cap_sk_list.head) {
4520 if (sk->sk_state != BT_LISTEN)
4523 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4524 lm1 |= HCI_LM_ACCEPT;
4525 if (l2cap_pi(sk)->role_switch)
4526 lm1 |= HCI_LM_MASTER;
4528 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4529 lm2 |= HCI_LM_ACCEPT;
4530 if (l2cap_pi(sk)->role_switch)
4531 lm2 |= HCI_LM_MASTER;
4534 read_unlock(&l2cap_sk_list.lock);
4536 return exact ? lm1 : lm2;
4539 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4541 struct l2cap_conn *conn;
4543 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4545 if (hcon->type != ACL_LINK)
4549 conn = l2cap_conn_add(hcon, status);
4551 l2cap_conn_ready(conn);
4553 l2cap_conn_del(hcon, bt_err(status));
4558 static int l2cap_disconn_ind(struct hci_conn *hcon)
4560 struct l2cap_conn *conn = hcon->l2cap_data;
4562 BT_DBG("hcon %p", hcon);
4564 if (hcon->type != ACL_LINK || !conn)
4567 return conn->disc_reason;
4570 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4572 BT_DBG("hcon %p reason %d", hcon, reason);
4574 if (hcon->type != ACL_LINK)
4577 l2cap_conn_del(hcon, bt_err(reason));
4582 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4584 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4587 if (encrypt == 0x00) {
4588 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4589 l2cap_sock_clear_timer(sk);
4590 l2cap_sock_set_timer(sk, HZ * 5);
4591 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4592 __l2cap_sock_close(sk, ECONNREFUSED);
4594 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4595 l2cap_sock_clear_timer(sk);
4599 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4601 struct l2cap_chan_list *l;
4602 struct l2cap_conn *conn = hcon->l2cap_data;
4608 l = &conn->chan_list;
4610 BT_DBG("conn %p", conn);
4612 read_lock(&l->lock);
4614 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4617 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4622 if (!status && (sk->sk_state == BT_CONNECTED ||
4623 sk->sk_state == BT_CONFIG)) {
4624 l2cap_check_encryption(sk, encrypt);
4629 if (sk->sk_state == BT_CONNECT) {
4631 struct l2cap_conn_req req;
4632 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4633 req.psm = l2cap_pi(sk)->psm;
4635 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4636 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4638 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4639 L2CAP_CONN_REQ, sizeof(req), &req);
4641 l2cap_sock_clear_timer(sk);
4642 l2cap_sock_set_timer(sk, HZ / 10);
4644 } else if (sk->sk_state == BT_CONNECT2) {
4645 struct l2cap_conn_rsp rsp;
4649 sk->sk_state = BT_CONFIG;
4650 result = L2CAP_CR_SUCCESS;
4652 sk->sk_state = BT_DISCONN;
4653 l2cap_sock_set_timer(sk, HZ / 10);
4654 result = L2CAP_CR_SEC_BLOCK;
4657 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4658 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4659 rsp.result = cpu_to_le16(result);
4660 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4661 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4662 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4668 read_unlock(&l->lock);
4673 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4675 struct l2cap_conn *conn = hcon->l2cap_data;
4677 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4680 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4682 if (flags & ACL_START) {
4683 struct l2cap_hdr *hdr;
4689 BT_ERR("Unexpected start frame (len %d)", skb->len);
4690 kfree_skb(conn->rx_skb);
4691 conn->rx_skb = NULL;
4693 l2cap_conn_unreliable(conn, ECOMM);
4696 /* Start fragment always begin with Basic L2CAP header */
4697 if (skb->len < L2CAP_HDR_SIZE) {
4698 BT_ERR("Frame is too short (len %d)", skb->len);
4699 l2cap_conn_unreliable(conn, ECOMM);
4703 hdr = (struct l2cap_hdr *) skb->data;
4704 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4705 cid = __le16_to_cpu(hdr->cid);
4707 if (len == skb->len) {
4708 /* Complete frame received */
4709 l2cap_recv_frame(conn, skb);
4713 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4715 if (skb->len > len) {
4716 BT_ERR("Frame is too long (len %d, expected len %d)",
4718 l2cap_conn_unreliable(conn, ECOMM);
4722 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4724 if (sk && l2cap_pi(sk)->imtu < len - L2CAP_HDR_SIZE) {
4725 BT_ERR("Frame exceeding recv MTU (len %d, MTU %d)",
4726 len, l2cap_pi(sk)->imtu);
4728 l2cap_conn_unreliable(conn, ECOMM);
4735 /* Allocate skb for the complete frame (with header) */
4736 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4740 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4742 conn->rx_len = len - skb->len;
4744 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4746 if (!conn->rx_len) {
4747 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4748 l2cap_conn_unreliable(conn, ECOMM);
4752 if (skb->len > conn->rx_len) {
4753 BT_ERR("Fragment is too long (len %d, expected %d)",
4754 skb->len, conn->rx_len);
4755 kfree_skb(conn->rx_skb);
4756 conn->rx_skb = NULL;
4758 l2cap_conn_unreliable(conn, ECOMM);
4762 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4764 conn->rx_len -= skb->len;
4766 if (!conn->rx_len) {
4767 /* Complete frame received */
4768 l2cap_recv_frame(conn, conn->rx_skb);
4769 conn->rx_skb = NULL;
4778 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4781 struct hlist_node *node;
4783 read_lock_bh(&l2cap_sk_list.lock);
4785 sk_for_each(sk, node, &l2cap_sk_list.head) {
4786 struct l2cap_pinfo *pi = l2cap_pi(sk);
4788 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4789 batostr(&bt_sk(sk)->src),
4790 batostr(&bt_sk(sk)->dst),
4791 sk->sk_state, __le16_to_cpu(pi->psm),
4793 pi->imtu, pi->omtu, pi->sec_level);
4796 read_unlock_bh(&l2cap_sk_list.lock);
4801 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4803 return single_open(file, l2cap_debugfs_show, inode->i_private);
4806 static const struct file_operations l2cap_debugfs_fops = {
4807 .open = l2cap_debugfs_open,
4809 .llseek = seq_lseek,
4810 .release = single_release,
4813 static struct dentry *l2cap_debugfs;
4815 static const struct proto_ops l2cap_sock_ops = {
4816 .family = PF_BLUETOOTH,
4817 .owner = THIS_MODULE,
4818 .release = l2cap_sock_release,
4819 .bind = l2cap_sock_bind,
4820 .connect = l2cap_sock_connect,
4821 .listen = l2cap_sock_listen,
4822 .accept = l2cap_sock_accept,
4823 .getname = l2cap_sock_getname,
4824 .sendmsg = l2cap_sock_sendmsg,
4825 .recvmsg = l2cap_sock_recvmsg,
4826 .poll = bt_sock_poll,
4827 .ioctl = bt_sock_ioctl,
4828 .mmap = sock_no_mmap,
4829 .socketpair = sock_no_socketpair,
4830 .shutdown = l2cap_sock_shutdown,
4831 .setsockopt = l2cap_sock_setsockopt,
4832 .getsockopt = l2cap_sock_getsockopt
4835 static const struct net_proto_family l2cap_sock_family_ops = {
4836 .family = PF_BLUETOOTH,
4837 .owner = THIS_MODULE,
4838 .create = l2cap_sock_create,
4841 static struct hci_proto l2cap_hci_proto = {
4843 .id = HCI_PROTO_L2CAP,
4844 .connect_ind = l2cap_connect_ind,
4845 .connect_cfm = l2cap_connect_cfm,
4846 .disconn_ind = l2cap_disconn_ind,
4847 .disconn_cfm = l2cap_disconn_cfm,
4848 .security_cfm = l2cap_security_cfm,
4849 .recv_acldata = l2cap_recv_acldata
4852 static int __init l2cap_init(void)
4856 err = proto_register(&l2cap_proto, 0);
4860 _busy_wq = create_singlethread_workqueue("l2cap");
4862 proto_unregister(&l2cap_proto);
4866 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4868 BT_ERR("L2CAP socket registration failed");
4872 err = hci_register_proto(&l2cap_hci_proto);
4874 BT_ERR("L2CAP protocol registration failed");
4875 bt_sock_unregister(BTPROTO_L2CAP);
4880 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4881 bt_debugfs, NULL, &l2cap_debugfs_fops);
4883 BT_ERR("Failed to create L2CAP debug file");
4886 BT_INFO("L2CAP ver %s", VERSION);
4887 BT_INFO("L2CAP socket layer initialized");
4892 destroy_workqueue(_busy_wq);
4893 proto_unregister(&l2cap_proto);
4897 static void __exit l2cap_exit(void)
4899 debugfs_remove(l2cap_debugfs);
4901 flush_workqueue(_busy_wq);
4902 destroy_workqueue(_busy_wq);
4904 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4905 BT_ERR("L2CAP socket unregistration failed");
4907 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4908 BT_ERR("L2CAP protocol unregistration failed");
4910 proto_unregister(&l2cap_proto);
4913 void l2cap_load(void)
4915 /* Dummy function to trigger automatic L2CAP module loading by
4916 * other modules that use L2CAP sockets but don't use any other
4917 * symbols from it. */
4919 EXPORT_SYMBOL(l2cap_load);
4921 module_init(l2cap_init);
4922 module_exit(l2cap_exit);
4924 module_param(disable_ertm, bool, 0644);
4925 MODULE_PARM_DESC(disable_ertm, "Disable enhanced retransmission mode");
4927 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4928 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4929 MODULE_VERSION(VERSION);
4930 MODULE_LICENSE("GPL");
4931 MODULE_ALIAS("bt-proto-0");