2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops;
65 static struct workqueue_struct *_busy_wq;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void l2cap_busy_work(struct work_struct *work);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static int l2cap_build_conf_req(struct sock *sk, void *data);
78 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
79 u8 code, u8 ident, u16 dlen, void *data);
81 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
83 /* ---- L2CAP timers ---- */
84 static void l2cap_sock_timeout(unsigned long arg)
86 struct sock *sk = (struct sock *) arg;
89 BT_DBG("sock %p state %d", sk, sk->sk_state);
93 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
94 reason = ECONNREFUSED;
95 else if (sk->sk_state == BT_CONNECT &&
96 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
97 reason = ECONNREFUSED;
101 __l2cap_sock_close(sk, reason);
109 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
111 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
112 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
115 static void l2cap_sock_clear_timer(struct sock *sk)
117 BT_DBG("sock %p state %d", sk, sk->sk_state);
118 sk_stop_timer(sk, &sk->sk_timer);
121 /* ---- L2CAP channels ---- */
122 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
125 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
126 if (l2cap_pi(s)->dcid == cid)
132 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
135 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
136 if (l2cap_pi(s)->scid == cid)
142 /* Find channel with given SCID.
143 * Returns locked socket */
144 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
148 s = __l2cap_get_chan_by_scid(l, cid);
151 read_unlock(&l->lock);
155 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
158 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
159 if (l2cap_pi(s)->ident == ident)
165 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
169 s = __l2cap_get_chan_by_ident(l, ident);
172 read_unlock(&l->lock);
176 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
178 u16 cid = L2CAP_CID_DYN_START;
180 for (; cid < L2CAP_CID_DYN_END; cid++) {
181 if (!__l2cap_get_chan_by_scid(l, cid))
188 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
193 l2cap_pi(l->head)->prev_c = sk;
195 l2cap_pi(sk)->next_c = l->head;
196 l2cap_pi(sk)->prev_c = NULL;
200 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
202 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
204 write_lock_bh(&l->lock);
209 l2cap_pi(next)->prev_c = prev;
211 l2cap_pi(prev)->next_c = next;
212 write_unlock_bh(&l->lock);
217 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
219 struct l2cap_chan_list *l = &conn->chan_list;
221 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
222 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
224 conn->disc_reason = 0x13;
226 l2cap_pi(sk)->conn = conn;
228 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
229 /* Alloc CID for connection-oriented socket */
230 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
231 } else if (sk->sk_type == SOCK_DGRAM) {
232 /* Connectionless socket */
233 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
234 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
235 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
237 /* Raw socket can send/recv signalling messages only */
238 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
239 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
240 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
243 __l2cap_chan_link(l, sk);
246 bt_accept_enqueue(parent, sk);
250 * Must be called on the locked socket. */
251 static void l2cap_chan_del(struct sock *sk, int err)
253 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
254 struct sock *parent = bt_sk(sk)->parent;
256 l2cap_sock_clear_timer(sk);
258 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
261 /* Unlink from channel list */
262 l2cap_chan_unlink(&conn->chan_list, sk);
263 l2cap_pi(sk)->conn = NULL;
264 hci_conn_put(conn->hcon);
267 sk->sk_state = BT_CLOSED;
268 sock_set_flag(sk, SOCK_ZAPPED);
274 bt_accept_unlink(sk);
275 parent->sk_data_ready(parent, 0);
277 sk->sk_state_change(sk);
279 skb_queue_purge(TX_QUEUE(sk));
281 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
282 struct srej_list *l, *tmp;
284 del_timer(&l2cap_pi(sk)->retrans_timer);
285 del_timer(&l2cap_pi(sk)->monitor_timer);
286 del_timer(&l2cap_pi(sk)->ack_timer);
288 skb_queue_purge(SREJ_QUEUE(sk));
289 skb_queue_purge(BUSY_QUEUE(sk));
291 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
298 /* Service level security */
299 static inline int l2cap_check_security(struct sock *sk)
301 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
304 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
305 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
306 auth_type = HCI_AT_NO_BONDING_MITM;
308 auth_type = HCI_AT_NO_BONDING;
310 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
311 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
313 switch (l2cap_pi(sk)->sec_level) {
314 case BT_SECURITY_HIGH:
315 auth_type = HCI_AT_GENERAL_BONDING_MITM;
317 case BT_SECURITY_MEDIUM:
318 auth_type = HCI_AT_GENERAL_BONDING;
321 auth_type = HCI_AT_NO_BONDING;
326 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
330 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
334 /* Get next available identificator.
335 * 1 - 128 are used by kernel.
336 * 129 - 199 are reserved.
337 * 200 - 254 are used by utilities like l2ping, etc.
340 spin_lock_bh(&conn->lock);
342 if (++conn->tx_ident > 128)
347 spin_unlock_bh(&conn->lock);
352 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
354 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
356 BT_DBG("code 0x%2.2x", code);
361 hci_send_acl(conn->hcon, skb, 0);
364 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
367 struct l2cap_hdr *lh;
368 struct l2cap_conn *conn = pi->conn;
369 struct sock *sk = (struct sock *)pi;
370 int count, hlen = L2CAP_HDR_SIZE + 2;
372 if (sk->sk_state != BT_CONNECTED)
375 if (pi->fcs == L2CAP_FCS_CRC16)
378 BT_DBG("pi %p, control 0x%2.2x", pi, control);
380 count = min_t(unsigned int, conn->mtu, hlen);
381 control |= L2CAP_CTRL_FRAME_TYPE;
383 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
384 control |= L2CAP_CTRL_FINAL;
385 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
388 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
389 control |= L2CAP_CTRL_POLL;
390 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
393 skb = bt_skb_alloc(count, GFP_ATOMIC);
397 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
398 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
399 lh->cid = cpu_to_le16(pi->dcid);
400 put_unaligned_le16(control, skb_put(skb, 2));
402 if (pi->fcs == L2CAP_FCS_CRC16) {
403 u16 fcs = crc16(0, (u8 *)lh, count - 2);
404 put_unaligned_le16(fcs, skb_put(skb, 2));
407 hci_send_acl(pi->conn->hcon, skb, 0);
410 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
412 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
413 control |= L2CAP_SUPER_RCV_NOT_READY;
414 pi->conn_state |= L2CAP_CONN_RNR_SENT;
416 control |= L2CAP_SUPER_RCV_READY;
418 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
420 l2cap_send_sframe(pi, control);
423 static inline int __l2cap_no_conn_pending(struct sock *sk)
425 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
428 static void l2cap_do_start(struct sock *sk)
430 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
432 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
433 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
436 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
437 struct l2cap_conn_req req;
438 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
439 req.psm = l2cap_pi(sk)->psm;
441 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
442 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
444 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
445 L2CAP_CONN_REQ, sizeof(req), &req);
448 struct l2cap_info_req req;
449 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
451 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
452 conn->info_ident = l2cap_get_ident(conn);
454 mod_timer(&conn->info_timer, jiffies +
455 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
457 l2cap_send_cmd(conn, conn->info_ident,
458 L2CAP_INFO_REQ, sizeof(req), &req);
462 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
464 u32 local_feat_mask = l2cap_feat_mask;
466 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
469 case L2CAP_MODE_ERTM:
470 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
471 case L2CAP_MODE_STREAMING:
472 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
478 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
480 struct l2cap_disconn_req req;
485 skb_queue_purge(TX_QUEUE(sk));
487 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
488 del_timer(&l2cap_pi(sk)->retrans_timer);
489 del_timer(&l2cap_pi(sk)->monitor_timer);
490 del_timer(&l2cap_pi(sk)->ack_timer);
493 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
494 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
495 l2cap_send_cmd(conn, l2cap_get_ident(conn),
496 L2CAP_DISCONN_REQ, sizeof(req), &req);
498 sk->sk_state = BT_DISCONN;
502 /* ---- L2CAP connections ---- */
503 static void l2cap_conn_start(struct l2cap_conn *conn)
505 struct l2cap_chan_list *l = &conn->chan_list;
506 struct sock_del_list del, *tmp1, *tmp2;
509 BT_DBG("conn %p", conn);
511 INIT_LIST_HEAD(&del.list);
515 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
518 if (sk->sk_type != SOCK_SEQPACKET &&
519 sk->sk_type != SOCK_STREAM) {
524 if (sk->sk_state == BT_CONNECT) {
525 struct l2cap_conn_req req;
527 if (!l2cap_check_security(sk) ||
528 !__l2cap_no_conn_pending(sk)) {
533 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
535 && l2cap_pi(sk)->conf_state &
536 L2CAP_CONF_STATE2_DEVICE) {
537 tmp1 = kzalloc(sizeof(struct sock_del_list),
540 list_add_tail(&tmp1->list, &del.list);
545 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
546 req.psm = l2cap_pi(sk)->psm;
548 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
549 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
551 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
552 L2CAP_CONN_REQ, sizeof(req), &req);
554 } else if (sk->sk_state == BT_CONNECT2) {
555 struct l2cap_conn_rsp rsp;
557 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
558 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
560 if (l2cap_check_security(sk)) {
561 if (bt_sk(sk)->defer_setup) {
562 struct sock *parent = bt_sk(sk)->parent;
563 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
564 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
565 parent->sk_data_ready(parent, 0);
568 sk->sk_state = BT_CONFIG;
569 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
570 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
573 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
574 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
577 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
578 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
580 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT ||
581 rsp.result != L2CAP_CR_SUCCESS) {
586 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
587 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
588 l2cap_build_conf_req(sk, buf), buf);
589 l2cap_pi(sk)->num_conf_req++;
595 read_unlock(&l->lock);
597 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
598 bh_lock_sock(tmp1->sk);
599 __l2cap_sock_close(tmp1->sk, ECONNRESET);
600 bh_unlock_sock(tmp1->sk);
601 list_del(&tmp1->list);
606 static void l2cap_conn_ready(struct l2cap_conn *conn)
608 struct l2cap_chan_list *l = &conn->chan_list;
611 BT_DBG("conn %p", conn);
615 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
618 if (sk->sk_type != SOCK_SEQPACKET &&
619 sk->sk_type != SOCK_STREAM) {
620 l2cap_sock_clear_timer(sk);
621 sk->sk_state = BT_CONNECTED;
622 sk->sk_state_change(sk);
623 } else if (sk->sk_state == BT_CONNECT)
629 read_unlock(&l->lock);
632 /* Notify sockets that we cannot guaranty reliability anymore */
633 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
635 struct l2cap_chan_list *l = &conn->chan_list;
638 BT_DBG("conn %p", conn);
642 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
643 if (l2cap_pi(sk)->force_reliable)
647 read_unlock(&l->lock);
650 static void l2cap_info_timeout(unsigned long arg)
652 struct l2cap_conn *conn = (void *) arg;
654 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
655 conn->info_ident = 0;
657 l2cap_conn_start(conn);
660 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
662 struct l2cap_conn *conn = hcon->l2cap_data;
667 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
671 hcon->l2cap_data = conn;
674 BT_DBG("hcon %p conn %p", hcon, conn);
676 conn->mtu = hcon->hdev->acl_mtu;
677 conn->src = &hcon->hdev->bdaddr;
678 conn->dst = &hcon->dst;
682 spin_lock_init(&conn->lock);
683 rwlock_init(&conn->chan_list.lock);
685 setup_timer(&conn->info_timer, l2cap_info_timeout,
686 (unsigned long) conn);
688 conn->disc_reason = 0x13;
693 static void l2cap_conn_del(struct hci_conn *hcon, int err)
695 struct l2cap_conn *conn = hcon->l2cap_data;
701 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
703 kfree_skb(conn->rx_skb);
706 while ((sk = conn->chan_list.head)) {
708 l2cap_chan_del(sk, err);
713 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
714 del_timer_sync(&conn->info_timer);
716 hcon->l2cap_data = NULL;
720 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
722 struct l2cap_chan_list *l = &conn->chan_list;
723 write_lock_bh(&l->lock);
724 __l2cap_chan_add(conn, sk, parent);
725 write_unlock_bh(&l->lock);
728 /* ---- Socket interface ---- */
729 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
732 struct hlist_node *node;
733 sk_for_each(sk, node, &l2cap_sk_list.head)
734 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
741 /* Find socket with psm and source bdaddr.
742 * Returns closest match.
744 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
746 struct sock *sk = NULL, *sk1 = NULL;
747 struct hlist_node *node;
749 sk_for_each(sk, node, &l2cap_sk_list.head) {
750 if (state && sk->sk_state != state)
753 if (l2cap_pi(sk)->psm == psm) {
755 if (!bacmp(&bt_sk(sk)->src, src))
759 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
763 return node ? sk : sk1;
766 /* Find socket with given address (psm, src).
767 * Returns locked socket */
768 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
771 read_lock(&l2cap_sk_list.lock);
772 s = __l2cap_get_sock_by_psm(state, psm, src);
775 read_unlock(&l2cap_sk_list.lock);
779 static void l2cap_sock_destruct(struct sock *sk)
783 skb_queue_purge(&sk->sk_receive_queue);
784 skb_queue_purge(&sk->sk_write_queue);
787 static void l2cap_sock_cleanup_listen(struct sock *parent)
791 BT_DBG("parent %p", parent);
793 /* Close not yet accepted channels */
794 while ((sk = bt_accept_dequeue(parent, NULL)))
795 l2cap_sock_close(sk);
797 parent->sk_state = BT_CLOSED;
798 sock_set_flag(parent, SOCK_ZAPPED);
801 /* Kill socket (only if zapped and orphan)
802 * Must be called on unlocked socket.
804 static void l2cap_sock_kill(struct sock *sk)
806 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
809 BT_DBG("sk %p state %d", sk, sk->sk_state);
811 /* Kill poor orphan */
812 bt_sock_unlink(&l2cap_sk_list, sk);
813 sock_set_flag(sk, SOCK_DEAD);
817 static void __l2cap_sock_close(struct sock *sk, int reason)
819 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
821 switch (sk->sk_state) {
823 l2cap_sock_cleanup_listen(sk);
828 if (sk->sk_type == SOCK_SEQPACKET ||
829 sk->sk_type == SOCK_STREAM) {
830 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
832 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
833 l2cap_send_disconn_req(conn, sk, reason);
835 l2cap_chan_del(sk, reason);
839 if (sk->sk_type == SOCK_SEQPACKET ||
840 sk->sk_type == SOCK_STREAM) {
841 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
842 struct l2cap_conn_rsp rsp;
845 if (bt_sk(sk)->defer_setup)
846 result = L2CAP_CR_SEC_BLOCK;
848 result = L2CAP_CR_BAD_PSM;
850 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
851 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
852 rsp.result = cpu_to_le16(result);
853 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
854 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
855 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
857 l2cap_chan_del(sk, reason);
862 l2cap_chan_del(sk, reason);
866 sock_set_flag(sk, SOCK_ZAPPED);
871 /* Must be called on unlocked socket. */
872 static void l2cap_sock_close(struct sock *sk)
874 l2cap_sock_clear_timer(sk);
876 __l2cap_sock_close(sk, ECONNRESET);
881 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
883 struct l2cap_pinfo *pi = l2cap_pi(sk);
888 sk->sk_type = parent->sk_type;
889 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
891 pi->imtu = l2cap_pi(parent)->imtu;
892 pi->omtu = l2cap_pi(parent)->omtu;
893 pi->conf_state = l2cap_pi(parent)->conf_state;
894 pi->mode = l2cap_pi(parent)->mode;
895 pi->fcs = l2cap_pi(parent)->fcs;
896 pi->max_tx = l2cap_pi(parent)->max_tx;
897 pi->tx_win = l2cap_pi(parent)->tx_win;
898 pi->sec_level = l2cap_pi(parent)->sec_level;
899 pi->role_switch = l2cap_pi(parent)->role_switch;
900 pi->force_reliable = l2cap_pi(parent)->force_reliable;
902 pi->imtu = L2CAP_DEFAULT_MTU;
904 if (enable_ertm && sk->sk_type == SOCK_STREAM) {
905 pi->mode = L2CAP_MODE_ERTM;
906 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
908 pi->mode = L2CAP_MODE_BASIC;
910 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
911 pi->fcs = L2CAP_FCS_CRC16;
912 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
913 pi->sec_level = BT_SECURITY_LOW;
915 pi->force_reliable = 0;
918 /* Default config options */
920 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
921 skb_queue_head_init(TX_QUEUE(sk));
922 skb_queue_head_init(SREJ_QUEUE(sk));
923 skb_queue_head_init(BUSY_QUEUE(sk));
924 INIT_LIST_HEAD(SREJ_LIST(sk));
927 static struct proto l2cap_proto = {
929 .owner = THIS_MODULE,
930 .obj_size = sizeof(struct l2cap_pinfo)
933 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
937 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
941 sock_init_data(sock, sk);
942 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
944 sk->sk_destruct = l2cap_sock_destruct;
945 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
947 sock_reset_flag(sk, SOCK_ZAPPED);
949 sk->sk_protocol = proto;
950 sk->sk_state = BT_OPEN;
952 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
954 bt_sock_link(&l2cap_sk_list, sk);
958 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
963 BT_DBG("sock %p", sock);
965 sock->state = SS_UNCONNECTED;
967 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
968 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
969 return -ESOCKTNOSUPPORT;
971 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
974 sock->ops = &l2cap_sock_ops;
976 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
980 l2cap_sock_init(sk, NULL);
984 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
986 struct sock *sk = sock->sk;
987 struct sockaddr_l2 la;
992 if (!addr || addr->sa_family != AF_BLUETOOTH)
995 memset(&la, 0, sizeof(la));
996 len = min_t(unsigned int, sizeof(la), alen);
997 memcpy(&la, addr, len);
1004 if (sk->sk_state != BT_OPEN) {
1009 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
1010 !capable(CAP_NET_BIND_SERVICE)) {
1015 write_lock_bh(&l2cap_sk_list.lock);
1017 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1020 /* Save source address */
1021 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1022 l2cap_pi(sk)->psm = la.l2_psm;
1023 l2cap_pi(sk)->sport = la.l2_psm;
1024 sk->sk_state = BT_BOUND;
1026 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1027 __le16_to_cpu(la.l2_psm) == 0x0003)
1028 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1031 write_unlock_bh(&l2cap_sk_list.lock);
1038 static int l2cap_do_connect(struct sock *sk)
1040 bdaddr_t *src = &bt_sk(sk)->src;
1041 bdaddr_t *dst = &bt_sk(sk)->dst;
1042 struct l2cap_conn *conn;
1043 struct hci_conn *hcon;
1044 struct hci_dev *hdev;
1048 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1051 hdev = hci_get_route(dst, src);
1053 return -EHOSTUNREACH;
1055 hci_dev_lock_bh(hdev);
1059 if (sk->sk_type == SOCK_RAW) {
1060 switch (l2cap_pi(sk)->sec_level) {
1061 case BT_SECURITY_HIGH:
1062 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1064 case BT_SECURITY_MEDIUM:
1065 auth_type = HCI_AT_DEDICATED_BONDING;
1068 auth_type = HCI_AT_NO_BONDING;
1071 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1072 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1073 auth_type = HCI_AT_NO_BONDING_MITM;
1075 auth_type = HCI_AT_NO_BONDING;
1077 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1078 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1080 switch (l2cap_pi(sk)->sec_level) {
1081 case BT_SECURITY_HIGH:
1082 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1084 case BT_SECURITY_MEDIUM:
1085 auth_type = HCI_AT_GENERAL_BONDING;
1088 auth_type = HCI_AT_NO_BONDING;
1093 hcon = hci_connect(hdev, ACL_LINK, dst,
1094 l2cap_pi(sk)->sec_level, auth_type);
1098 conn = l2cap_conn_add(hcon, 0);
1106 /* Update source addr of the socket */
1107 bacpy(src, conn->src);
1109 l2cap_chan_add(conn, sk, NULL);
1111 sk->sk_state = BT_CONNECT;
1112 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1114 if (hcon->state == BT_CONNECTED) {
1115 if (sk->sk_type != SOCK_SEQPACKET &&
1116 sk->sk_type != SOCK_STREAM) {
1117 l2cap_sock_clear_timer(sk);
1118 sk->sk_state = BT_CONNECTED;
1124 hci_dev_unlock_bh(hdev);
1129 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1131 struct sock *sk = sock->sk;
1132 struct sockaddr_l2 la;
1135 BT_DBG("sk %p", sk);
1137 if (!addr || alen < sizeof(addr->sa_family) ||
1138 addr->sa_family != AF_BLUETOOTH)
1141 memset(&la, 0, sizeof(la));
1142 len = min_t(unsigned int, sizeof(la), alen);
1143 memcpy(&la, addr, len);
1150 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1156 switch (l2cap_pi(sk)->mode) {
1157 case L2CAP_MODE_BASIC:
1159 case L2CAP_MODE_ERTM:
1160 case L2CAP_MODE_STREAMING:
1169 switch (sk->sk_state) {
1173 /* Already connecting */
1177 /* Already connected */
1191 /* Set destination address and psm */
1192 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1193 l2cap_pi(sk)->psm = la.l2_psm;
1195 err = l2cap_do_connect(sk);
1200 err = bt_sock_wait_state(sk, BT_CONNECTED,
1201 sock_sndtimeo(sk, flags & O_NONBLOCK));
1207 static int l2cap_sock_listen(struct socket *sock, int backlog)
1209 struct sock *sk = sock->sk;
1212 BT_DBG("sk %p backlog %d", sk, backlog);
1216 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1217 || sk->sk_state != BT_BOUND) {
1222 switch (l2cap_pi(sk)->mode) {
1223 case L2CAP_MODE_BASIC:
1225 case L2CAP_MODE_ERTM:
1226 case L2CAP_MODE_STREAMING:
1235 if (!l2cap_pi(sk)->psm) {
1236 bdaddr_t *src = &bt_sk(sk)->src;
1241 write_lock_bh(&l2cap_sk_list.lock);
1243 for (psm = 0x1001; psm < 0x1100; psm += 2)
1244 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1245 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1246 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1251 write_unlock_bh(&l2cap_sk_list.lock);
1257 sk->sk_max_ack_backlog = backlog;
1258 sk->sk_ack_backlog = 0;
1259 sk->sk_state = BT_LISTEN;
1266 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1268 DECLARE_WAITQUEUE(wait, current);
1269 struct sock *sk = sock->sk, *nsk;
1273 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1275 if (sk->sk_state != BT_LISTEN) {
1280 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1282 BT_DBG("sk %p timeo %ld", sk, timeo);
1284 /* Wait for an incoming connection. (wake-one). */
1285 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1286 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1287 set_current_state(TASK_INTERRUPTIBLE);
1294 timeo = schedule_timeout(timeo);
1295 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1297 if (sk->sk_state != BT_LISTEN) {
1302 if (signal_pending(current)) {
1303 err = sock_intr_errno(timeo);
1307 set_current_state(TASK_RUNNING);
1308 remove_wait_queue(sk_sleep(sk), &wait);
1313 newsock->state = SS_CONNECTED;
1315 BT_DBG("new socket %p", nsk);
1322 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1324 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1325 struct sock *sk = sock->sk;
1327 BT_DBG("sock %p, sk %p", sock, sk);
1329 addr->sa_family = AF_BLUETOOTH;
1330 *len = sizeof(struct sockaddr_l2);
1333 la->l2_psm = l2cap_pi(sk)->psm;
1334 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1335 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1337 la->l2_psm = l2cap_pi(sk)->sport;
1338 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1339 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1345 static int __l2cap_wait_ack(struct sock *sk)
1347 DECLARE_WAITQUEUE(wait, current);
1351 add_wait_queue(sk_sleep(sk), &wait);
1352 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1353 set_current_state(TASK_INTERRUPTIBLE);
1358 if (signal_pending(current)) {
1359 err = sock_intr_errno(timeo);
1364 timeo = schedule_timeout(timeo);
1367 err = sock_error(sk);
1371 set_current_state(TASK_RUNNING);
1372 remove_wait_queue(sk_sleep(sk), &wait);
1376 static void l2cap_monitor_timeout(unsigned long arg)
1378 struct sock *sk = (void *) arg;
1380 BT_DBG("sk %p", sk);
1383 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1384 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1389 l2cap_pi(sk)->retry_count++;
1390 __mod_monitor_timer();
1392 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1396 static void l2cap_retrans_timeout(unsigned long arg)
1398 struct sock *sk = (void *) arg;
1400 BT_DBG("sk %p", sk);
1403 l2cap_pi(sk)->retry_count = 1;
1404 __mod_monitor_timer();
1406 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1408 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1412 static void l2cap_drop_acked_frames(struct sock *sk)
1414 struct sk_buff *skb;
1416 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1417 l2cap_pi(sk)->unacked_frames) {
1418 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1421 skb = skb_dequeue(TX_QUEUE(sk));
1424 l2cap_pi(sk)->unacked_frames--;
1427 if (!l2cap_pi(sk)->unacked_frames)
1428 del_timer(&l2cap_pi(sk)->retrans_timer);
1431 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1433 struct l2cap_pinfo *pi = l2cap_pi(sk);
1435 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1437 hci_send_acl(pi->conn->hcon, skb, 0);
1440 static void l2cap_streaming_send(struct sock *sk)
1442 struct sk_buff *skb, *tx_skb;
1443 struct l2cap_pinfo *pi = l2cap_pi(sk);
1446 while ((skb = sk->sk_send_head)) {
1447 tx_skb = skb_clone(skb, GFP_ATOMIC);
1449 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1450 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1451 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1453 if (pi->fcs == L2CAP_FCS_CRC16) {
1454 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1455 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1458 l2cap_do_send(sk, tx_skb);
1460 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1462 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1463 sk->sk_send_head = NULL;
1465 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1467 skb = skb_dequeue(TX_QUEUE(sk));
1472 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1474 struct l2cap_pinfo *pi = l2cap_pi(sk);
1475 struct sk_buff *skb, *tx_skb;
1478 skb = skb_peek(TX_QUEUE(sk));
1483 if (bt_cb(skb)->tx_seq == tx_seq)
1486 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1489 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1491 if (pi->remote_max_tx &&
1492 bt_cb(skb)->retries == pi->remote_max_tx) {
1493 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1497 tx_skb = skb_clone(skb, GFP_ATOMIC);
1498 bt_cb(skb)->retries++;
1499 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1501 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1502 control |= L2CAP_CTRL_FINAL;
1503 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1506 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1507 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1509 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1511 if (pi->fcs == L2CAP_FCS_CRC16) {
1512 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1513 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1516 l2cap_do_send(sk, tx_skb);
1519 static int l2cap_ertm_send(struct sock *sk)
1521 struct sk_buff *skb, *tx_skb;
1522 struct l2cap_pinfo *pi = l2cap_pi(sk);
1526 if (sk->sk_state != BT_CONNECTED)
1529 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1531 if (pi->remote_max_tx &&
1532 bt_cb(skb)->retries == pi->remote_max_tx) {
1533 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1537 tx_skb = skb_clone(skb, GFP_ATOMIC);
1539 bt_cb(skb)->retries++;
1541 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1542 control &= L2CAP_CTRL_SAR;
1544 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1545 control |= L2CAP_CTRL_FINAL;
1546 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1548 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1549 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1550 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1553 if (pi->fcs == L2CAP_FCS_CRC16) {
1554 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1555 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1558 l2cap_do_send(sk, tx_skb);
1560 __mod_retrans_timer();
1562 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1563 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1565 pi->unacked_frames++;
1568 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1569 sk->sk_send_head = NULL;
1571 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1579 static int l2cap_retransmit_frames(struct sock *sk)
1581 struct l2cap_pinfo *pi = l2cap_pi(sk);
1584 if (!skb_queue_empty(TX_QUEUE(sk)))
1585 sk->sk_send_head = TX_QUEUE(sk)->next;
1587 pi->next_tx_seq = pi->expected_ack_seq;
1588 ret = l2cap_ertm_send(sk);
1592 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1594 struct sock *sk = (struct sock *)pi;
1597 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1599 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1600 control |= L2CAP_SUPER_RCV_NOT_READY;
1601 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1602 l2cap_send_sframe(pi, control);
1606 if (l2cap_ertm_send(sk) > 0)
1609 control |= L2CAP_SUPER_RCV_READY;
1610 l2cap_send_sframe(pi, control);
1613 static void l2cap_send_srejtail(struct sock *sk)
1615 struct srej_list *tail;
1618 control = L2CAP_SUPER_SELECT_REJECT;
1619 control |= L2CAP_CTRL_FINAL;
1621 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1622 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1624 l2cap_send_sframe(l2cap_pi(sk), control);
1627 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1629 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1630 struct sk_buff **frag;
1633 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1639 /* Continuation fragments (no L2CAP header) */
1640 frag = &skb_shinfo(skb)->frag_list;
1642 count = min_t(unsigned int, conn->mtu, len);
1644 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1647 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1653 frag = &(*frag)->next;
1659 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1661 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1662 struct sk_buff *skb;
1663 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1664 struct l2cap_hdr *lh;
1666 BT_DBG("sk %p len %d", sk, (int)len);
1668 count = min_t(unsigned int, (conn->mtu - hlen), len);
1669 skb = bt_skb_send_alloc(sk, count + hlen,
1670 msg->msg_flags & MSG_DONTWAIT, &err);
1672 return ERR_PTR(-ENOMEM);
1674 /* Create L2CAP header */
1675 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1676 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1677 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1678 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1680 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1681 if (unlikely(err < 0)) {
1683 return ERR_PTR(err);
1688 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1690 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1691 struct sk_buff *skb;
1692 int err, count, hlen = L2CAP_HDR_SIZE;
1693 struct l2cap_hdr *lh;
1695 BT_DBG("sk %p len %d", sk, (int)len);
1697 count = min_t(unsigned int, (conn->mtu - hlen), len);
1698 skb = bt_skb_send_alloc(sk, count + hlen,
1699 msg->msg_flags & MSG_DONTWAIT, &err);
1701 return ERR_PTR(-ENOMEM);
1703 /* Create L2CAP header */
1704 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1705 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1706 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1708 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1709 if (unlikely(err < 0)) {
1711 return ERR_PTR(err);
1716 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1718 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1719 struct sk_buff *skb;
1720 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1721 struct l2cap_hdr *lh;
1723 BT_DBG("sk %p len %d", sk, (int)len);
1726 return ERR_PTR(-ENOTCONN);
1731 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1734 count = min_t(unsigned int, (conn->mtu - hlen), len);
1735 skb = bt_skb_send_alloc(sk, count + hlen,
1736 msg->msg_flags & MSG_DONTWAIT, &err);
1738 return ERR_PTR(-ENOMEM);
1740 /* Create L2CAP header */
1741 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1742 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1743 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1744 put_unaligned_le16(control, skb_put(skb, 2));
1746 put_unaligned_le16(sdulen, skb_put(skb, 2));
1748 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1749 if (unlikely(err < 0)) {
1751 return ERR_PTR(err);
1754 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1755 put_unaligned_le16(0, skb_put(skb, 2));
1757 bt_cb(skb)->retries = 0;
1761 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1763 struct l2cap_pinfo *pi = l2cap_pi(sk);
1764 struct sk_buff *skb;
1765 struct sk_buff_head sar_queue;
1769 skb_queue_head_init(&sar_queue);
1770 control = L2CAP_SDU_START;
1771 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1773 return PTR_ERR(skb);
1775 __skb_queue_tail(&sar_queue, skb);
1776 len -= pi->remote_mps;
1777 size += pi->remote_mps;
1782 if (len > pi->remote_mps) {
1783 control = L2CAP_SDU_CONTINUE;
1784 buflen = pi->remote_mps;
1786 control = L2CAP_SDU_END;
1790 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1792 skb_queue_purge(&sar_queue);
1793 return PTR_ERR(skb);
1796 __skb_queue_tail(&sar_queue, skb);
1800 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1801 if (sk->sk_send_head == NULL)
1802 sk->sk_send_head = sar_queue.next;
1807 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1809 struct sock *sk = sock->sk;
1810 struct l2cap_pinfo *pi = l2cap_pi(sk);
1811 struct sk_buff *skb;
1815 BT_DBG("sock %p, sk %p", sock, sk);
1817 err = sock_error(sk);
1821 if (msg->msg_flags & MSG_OOB)
1826 if (sk->sk_state != BT_CONNECTED) {
1831 /* Connectionless channel */
1832 if (sk->sk_type == SOCK_DGRAM) {
1833 skb = l2cap_create_connless_pdu(sk, msg, len);
1837 l2cap_do_send(sk, skb);
1844 case L2CAP_MODE_BASIC:
1845 /* Check outgoing MTU */
1846 if (len > pi->omtu) {
1851 /* Create a basic PDU */
1852 skb = l2cap_create_basic_pdu(sk, msg, len);
1858 l2cap_do_send(sk, skb);
1862 case L2CAP_MODE_ERTM:
1863 case L2CAP_MODE_STREAMING:
1864 /* Entire SDU fits into one PDU */
1865 if (len <= pi->remote_mps) {
1866 control = L2CAP_SDU_UNSEGMENTED;
1867 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1872 __skb_queue_tail(TX_QUEUE(sk), skb);
1874 if (sk->sk_send_head == NULL)
1875 sk->sk_send_head = skb;
1878 /* Segment SDU into multiples PDUs */
1879 err = l2cap_sar_segment_sdu(sk, msg, len);
1884 if (pi->mode == L2CAP_MODE_STREAMING) {
1885 l2cap_streaming_send(sk);
1887 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1888 pi->conn_state && L2CAP_CONN_WAIT_F) {
1892 err = l2cap_ertm_send(sk);
1900 BT_DBG("bad state %1.1x", pi->mode);
1909 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1911 struct sock *sk = sock->sk;
1915 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1916 struct l2cap_conn_rsp rsp;
1917 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1920 sk->sk_state = BT_CONFIG;
1922 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1923 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1924 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1925 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1926 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1927 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1929 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) {
1934 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
1935 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
1936 l2cap_build_conf_req(sk, buf), buf);
1937 l2cap_pi(sk)->num_conf_req++;
1945 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1948 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1950 struct sock *sk = sock->sk;
1951 struct l2cap_options opts;
1955 BT_DBG("sk %p", sk);
1961 opts.imtu = l2cap_pi(sk)->imtu;
1962 opts.omtu = l2cap_pi(sk)->omtu;
1963 opts.flush_to = l2cap_pi(sk)->flush_to;
1964 opts.mode = l2cap_pi(sk)->mode;
1965 opts.fcs = l2cap_pi(sk)->fcs;
1966 opts.max_tx = l2cap_pi(sk)->max_tx;
1967 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1969 len = min_t(unsigned int, sizeof(opts), optlen);
1970 if (copy_from_user((char *) &opts, optval, len)) {
1975 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1980 l2cap_pi(sk)->mode = opts.mode;
1981 switch (l2cap_pi(sk)->mode) {
1982 case L2CAP_MODE_BASIC:
1983 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1985 case L2CAP_MODE_ERTM:
1986 case L2CAP_MODE_STREAMING:
1995 l2cap_pi(sk)->imtu = opts.imtu;
1996 l2cap_pi(sk)->omtu = opts.omtu;
1997 l2cap_pi(sk)->fcs = opts.fcs;
1998 l2cap_pi(sk)->max_tx = opts.max_tx;
1999 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
2003 if (get_user(opt, (u32 __user *) optval)) {
2008 if (opt & L2CAP_LM_AUTH)
2009 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
2010 if (opt & L2CAP_LM_ENCRYPT)
2011 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
2012 if (opt & L2CAP_LM_SECURE)
2013 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
2015 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
2016 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
2028 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2030 struct sock *sk = sock->sk;
2031 struct bt_security sec;
2035 BT_DBG("sk %p", sk);
2037 if (level == SOL_L2CAP)
2038 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2040 if (level != SOL_BLUETOOTH)
2041 return -ENOPROTOOPT;
2047 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2048 && sk->sk_type != SOCK_RAW) {
2053 sec.level = BT_SECURITY_LOW;
2055 len = min_t(unsigned int, sizeof(sec), optlen);
2056 if (copy_from_user((char *) &sec, optval, len)) {
2061 if (sec.level < BT_SECURITY_LOW ||
2062 sec.level > BT_SECURITY_HIGH) {
2067 l2cap_pi(sk)->sec_level = sec.level;
2070 case BT_DEFER_SETUP:
2071 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2076 if (get_user(opt, (u32 __user *) optval)) {
2081 bt_sk(sk)->defer_setup = opt;
2093 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2095 struct sock *sk = sock->sk;
2096 struct l2cap_options opts;
2097 struct l2cap_conninfo cinfo;
2101 BT_DBG("sk %p", sk);
2103 if (get_user(len, optlen))
2110 opts.imtu = l2cap_pi(sk)->imtu;
2111 opts.omtu = l2cap_pi(sk)->omtu;
2112 opts.flush_to = l2cap_pi(sk)->flush_to;
2113 opts.mode = l2cap_pi(sk)->mode;
2114 opts.fcs = l2cap_pi(sk)->fcs;
2115 opts.max_tx = l2cap_pi(sk)->max_tx;
2116 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2118 len = min_t(unsigned int, len, sizeof(opts));
2119 if (copy_to_user(optval, (char *) &opts, len))
2125 switch (l2cap_pi(sk)->sec_level) {
2126 case BT_SECURITY_LOW:
2127 opt = L2CAP_LM_AUTH;
2129 case BT_SECURITY_MEDIUM:
2130 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2132 case BT_SECURITY_HIGH:
2133 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2141 if (l2cap_pi(sk)->role_switch)
2142 opt |= L2CAP_LM_MASTER;
2144 if (l2cap_pi(sk)->force_reliable)
2145 opt |= L2CAP_LM_RELIABLE;
2147 if (put_user(opt, (u32 __user *) optval))
2151 case L2CAP_CONNINFO:
2152 if (sk->sk_state != BT_CONNECTED &&
2153 !(sk->sk_state == BT_CONNECT2 &&
2154 bt_sk(sk)->defer_setup)) {
2159 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2160 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2162 len = min_t(unsigned int, len, sizeof(cinfo));
2163 if (copy_to_user(optval, (char *) &cinfo, len))
2177 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2179 struct sock *sk = sock->sk;
2180 struct bt_security sec;
2183 BT_DBG("sk %p", sk);
2185 if (level == SOL_L2CAP)
2186 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2188 if (level != SOL_BLUETOOTH)
2189 return -ENOPROTOOPT;
2191 if (get_user(len, optlen))
2198 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2199 && sk->sk_type != SOCK_RAW) {
2204 sec.level = l2cap_pi(sk)->sec_level;
2206 len = min_t(unsigned int, len, sizeof(sec));
2207 if (copy_to_user(optval, (char *) &sec, len))
2212 case BT_DEFER_SETUP:
2213 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2218 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2232 static int l2cap_sock_shutdown(struct socket *sock, int how)
2234 struct sock *sk = sock->sk;
2237 BT_DBG("sock %p, sk %p", sock, sk);
2243 if (!sk->sk_shutdown) {
2244 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2245 err = __l2cap_wait_ack(sk);
2247 sk->sk_shutdown = SHUTDOWN_MASK;
2248 l2cap_sock_clear_timer(sk);
2249 __l2cap_sock_close(sk, 0);
2251 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2252 err = bt_sock_wait_state(sk, BT_CLOSED,
2256 if (!err && sk->sk_err)
2263 static int l2cap_sock_release(struct socket *sock)
2265 struct sock *sk = sock->sk;
2268 BT_DBG("sock %p, sk %p", sock, sk);
2273 err = l2cap_sock_shutdown(sock, 2);
2276 l2cap_sock_kill(sk);
2280 static void l2cap_chan_ready(struct sock *sk)
2282 struct sock *parent = bt_sk(sk)->parent;
2284 BT_DBG("sk %p, parent %p", sk, parent);
2286 l2cap_pi(sk)->conf_state = 0;
2287 l2cap_sock_clear_timer(sk);
2290 /* Outgoing channel.
2291 * Wake up socket sleeping on connect.
2293 sk->sk_state = BT_CONNECTED;
2294 sk->sk_state_change(sk);
2296 /* Incoming channel.
2297 * Wake up socket sleeping on accept.
2299 parent->sk_data_ready(parent, 0);
2303 /* Copy frame to all raw sockets on that connection */
2304 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2306 struct l2cap_chan_list *l = &conn->chan_list;
2307 struct sk_buff *nskb;
2310 BT_DBG("conn %p", conn);
2312 read_lock(&l->lock);
2313 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2314 if (sk->sk_type != SOCK_RAW)
2317 /* Don't send frame to the socket it came from */
2320 nskb = skb_clone(skb, GFP_ATOMIC);
2324 if (sock_queue_rcv_skb(sk, nskb))
2327 read_unlock(&l->lock);
2330 /* ---- L2CAP signalling commands ---- */
2331 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2332 u8 code, u8 ident, u16 dlen, void *data)
2334 struct sk_buff *skb, **frag;
2335 struct l2cap_cmd_hdr *cmd;
2336 struct l2cap_hdr *lh;
2339 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2340 conn, code, ident, dlen);
2342 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2343 count = min_t(unsigned int, conn->mtu, len);
2345 skb = bt_skb_alloc(count, GFP_ATOMIC);
2349 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2350 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2351 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2353 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2356 cmd->len = cpu_to_le16(dlen);
2359 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2360 memcpy(skb_put(skb, count), data, count);
2366 /* Continuation fragments (no L2CAP header) */
2367 frag = &skb_shinfo(skb)->frag_list;
2369 count = min_t(unsigned int, conn->mtu, len);
2371 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2375 memcpy(skb_put(*frag, count), data, count);
2380 frag = &(*frag)->next;
2390 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2392 struct l2cap_conf_opt *opt = *ptr;
2395 len = L2CAP_CONF_OPT_SIZE + opt->len;
2403 *val = *((u8 *) opt->val);
2407 *val = __le16_to_cpu(*((__le16 *) opt->val));
2411 *val = __le32_to_cpu(*((__le32 *) opt->val));
2415 *val = (unsigned long) opt->val;
2419 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2423 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2425 struct l2cap_conf_opt *opt = *ptr;
2427 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2434 *((u8 *) opt->val) = val;
2438 *((__le16 *) opt->val) = cpu_to_le16(val);
2442 *((__le32 *) opt->val) = cpu_to_le32(val);
2446 memcpy(opt->val, (void *) val, len);
2450 *ptr += L2CAP_CONF_OPT_SIZE + len;
2453 static void l2cap_ack_timeout(unsigned long arg)
2455 struct sock *sk = (void *) arg;
2458 l2cap_send_ack(l2cap_pi(sk));
2462 static inline void l2cap_ertm_init(struct sock *sk)
2464 l2cap_pi(sk)->expected_ack_seq = 0;
2465 l2cap_pi(sk)->unacked_frames = 0;
2466 l2cap_pi(sk)->buffer_seq = 0;
2467 l2cap_pi(sk)->num_acked = 0;
2468 l2cap_pi(sk)->frames_sent = 0;
2470 setup_timer(&l2cap_pi(sk)->retrans_timer,
2471 l2cap_retrans_timeout, (unsigned long) sk);
2472 setup_timer(&l2cap_pi(sk)->monitor_timer,
2473 l2cap_monitor_timeout, (unsigned long) sk);
2474 setup_timer(&l2cap_pi(sk)->ack_timer,
2475 l2cap_ack_timeout, (unsigned long) sk);
2477 __skb_queue_head_init(SREJ_QUEUE(sk));
2478 __skb_queue_head_init(BUSY_QUEUE(sk));
2480 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2482 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2485 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2488 case L2CAP_MODE_STREAMING:
2489 case L2CAP_MODE_ERTM:
2490 if (l2cap_mode_supported(mode, remote_feat_mask))
2494 return L2CAP_MODE_BASIC;
2498 static int l2cap_build_conf_req(struct sock *sk, void *data)
2500 struct l2cap_pinfo *pi = l2cap_pi(sk);
2501 struct l2cap_conf_req *req = data;
2502 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2503 void *ptr = req->data;
2505 BT_DBG("sk %p", sk);
2507 if (pi->num_conf_req || pi->num_conf_rsp)
2511 case L2CAP_MODE_STREAMING:
2512 case L2CAP_MODE_ERTM:
2513 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2518 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2524 case L2CAP_MODE_BASIC:
2525 if (pi->imtu != L2CAP_DEFAULT_MTU)
2526 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2528 rfc.mode = L2CAP_MODE_BASIC;
2530 rfc.max_transmit = 0;
2531 rfc.retrans_timeout = 0;
2532 rfc.monitor_timeout = 0;
2533 rfc.max_pdu_size = 0;
2537 case L2CAP_MODE_ERTM:
2538 rfc.mode = L2CAP_MODE_ERTM;
2539 rfc.txwin_size = pi->tx_win;
2540 rfc.max_transmit = pi->max_tx;
2541 rfc.retrans_timeout = 0;
2542 rfc.monitor_timeout = 0;
2543 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2544 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2545 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2547 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2550 if (pi->fcs == L2CAP_FCS_NONE ||
2551 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2552 pi->fcs = L2CAP_FCS_NONE;
2553 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2557 case L2CAP_MODE_STREAMING:
2558 rfc.mode = L2CAP_MODE_STREAMING;
2560 rfc.max_transmit = 0;
2561 rfc.retrans_timeout = 0;
2562 rfc.monitor_timeout = 0;
2563 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2564 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2565 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2567 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2570 if (pi->fcs == L2CAP_FCS_NONE ||
2571 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2572 pi->fcs = L2CAP_FCS_NONE;
2573 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2578 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2579 (unsigned long) &rfc);
2581 /* FIXME: Need actual value of the flush timeout */
2582 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2583 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2585 req->dcid = cpu_to_le16(pi->dcid);
2586 req->flags = cpu_to_le16(0);
2591 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2593 struct l2cap_pinfo *pi = l2cap_pi(sk);
2594 struct l2cap_conf_rsp *rsp = data;
2595 void *ptr = rsp->data;
2596 void *req = pi->conf_req;
2597 int len = pi->conf_len;
2598 int type, hint, olen;
2600 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2601 u16 mtu = L2CAP_DEFAULT_MTU;
2602 u16 result = L2CAP_CONF_SUCCESS;
2604 BT_DBG("sk %p", sk);
2606 while (len >= L2CAP_CONF_OPT_SIZE) {
2607 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2609 hint = type & L2CAP_CONF_HINT;
2610 type &= L2CAP_CONF_MASK;
2613 case L2CAP_CONF_MTU:
2617 case L2CAP_CONF_FLUSH_TO:
2621 case L2CAP_CONF_QOS:
2624 case L2CAP_CONF_RFC:
2625 if (olen == sizeof(rfc))
2626 memcpy(&rfc, (void *) val, olen);
2629 case L2CAP_CONF_FCS:
2630 if (val == L2CAP_FCS_NONE)
2631 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2639 result = L2CAP_CONF_UNKNOWN;
2640 *((u8 *) ptr++) = type;
2645 if (pi->num_conf_rsp || pi->num_conf_req > 1)
2649 case L2CAP_MODE_STREAMING:
2650 case L2CAP_MODE_ERTM:
2651 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2652 pi->mode = l2cap_select_mode(rfc.mode,
2653 pi->conn->feat_mask);
2657 if (pi->mode != rfc.mode)
2658 return -ECONNREFUSED;
2664 if (pi->mode != rfc.mode) {
2665 result = L2CAP_CONF_UNACCEPT;
2666 rfc.mode = pi->mode;
2668 if (pi->num_conf_rsp == 1)
2669 return -ECONNREFUSED;
2671 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2672 sizeof(rfc), (unsigned long) &rfc);
2676 if (result == L2CAP_CONF_SUCCESS) {
2677 /* Configure output options and let the other side know
2678 * which ones we don't like. */
2680 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2681 result = L2CAP_CONF_UNACCEPT;
2684 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2686 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2689 case L2CAP_MODE_BASIC:
2690 pi->fcs = L2CAP_FCS_NONE;
2691 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2694 case L2CAP_MODE_ERTM:
2695 pi->remote_tx_win = rfc.txwin_size;
2696 pi->remote_max_tx = rfc.max_transmit;
2697 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2698 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2700 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2702 rfc.retrans_timeout =
2703 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2704 rfc.monitor_timeout =
2705 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2707 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2709 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2710 sizeof(rfc), (unsigned long) &rfc);
2714 case L2CAP_MODE_STREAMING:
2715 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2716 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2718 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2720 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2722 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2723 sizeof(rfc), (unsigned long) &rfc);
2728 result = L2CAP_CONF_UNACCEPT;
2730 memset(&rfc, 0, sizeof(rfc));
2731 rfc.mode = pi->mode;
2734 if (result == L2CAP_CONF_SUCCESS)
2735 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2737 rsp->scid = cpu_to_le16(pi->dcid);
2738 rsp->result = cpu_to_le16(result);
2739 rsp->flags = cpu_to_le16(0x0000);
2744 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2746 struct l2cap_pinfo *pi = l2cap_pi(sk);
2747 struct l2cap_conf_req *req = data;
2748 void *ptr = req->data;
2751 struct l2cap_conf_rfc rfc;
2753 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2755 while (len >= L2CAP_CONF_OPT_SIZE) {
2756 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2759 case L2CAP_CONF_MTU:
2760 if (val < L2CAP_DEFAULT_MIN_MTU) {
2761 *result = L2CAP_CONF_UNACCEPT;
2762 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2765 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2768 case L2CAP_CONF_FLUSH_TO:
2770 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2774 case L2CAP_CONF_RFC:
2775 if (olen == sizeof(rfc))
2776 memcpy(&rfc, (void *)val, olen);
2778 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2779 rfc.mode != pi->mode)
2780 return -ECONNREFUSED;
2784 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2785 sizeof(rfc), (unsigned long) &rfc);
2790 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2791 return -ECONNREFUSED;
2793 pi->mode = rfc.mode;
2795 if (*result == L2CAP_CONF_SUCCESS) {
2797 case L2CAP_MODE_ERTM:
2798 pi->remote_tx_win = rfc.txwin_size;
2799 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2800 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2801 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2803 case L2CAP_MODE_STREAMING:
2804 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2808 req->dcid = cpu_to_le16(pi->dcid);
2809 req->flags = cpu_to_le16(0x0000);
2814 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2816 struct l2cap_conf_rsp *rsp = data;
2817 void *ptr = rsp->data;
2819 BT_DBG("sk %p", sk);
2821 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2822 rsp->result = cpu_to_le16(result);
2823 rsp->flags = cpu_to_le16(flags);
2828 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2830 struct l2cap_pinfo *pi = l2cap_pi(sk);
2833 struct l2cap_conf_rfc rfc;
2835 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2837 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2840 while (len >= L2CAP_CONF_OPT_SIZE) {
2841 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2844 case L2CAP_CONF_RFC:
2845 if (olen == sizeof(rfc))
2846 memcpy(&rfc, (void *)val, olen);
2853 case L2CAP_MODE_ERTM:
2854 pi->remote_tx_win = rfc.txwin_size;
2855 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2856 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2857 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2859 case L2CAP_MODE_STREAMING:
2860 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2864 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2866 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2868 if (rej->reason != 0x0000)
2871 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2872 cmd->ident == conn->info_ident) {
2873 del_timer(&conn->info_timer);
2875 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2876 conn->info_ident = 0;
2878 l2cap_conn_start(conn);
2884 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2886 struct l2cap_chan_list *list = &conn->chan_list;
2887 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2888 struct l2cap_conn_rsp rsp;
2889 struct sock *parent, *uninitialized_var(sk);
2890 int result, status = L2CAP_CS_NO_INFO;
2892 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2893 __le16 psm = req->psm;
2895 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2897 /* Check if we have socket listening on psm */
2898 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2900 result = L2CAP_CR_BAD_PSM;
2904 /* Check if the ACL is secure enough (if not SDP) */
2905 if (psm != cpu_to_le16(0x0001) &&
2906 !hci_conn_check_link_mode(conn->hcon)) {
2907 conn->disc_reason = 0x05;
2908 result = L2CAP_CR_SEC_BLOCK;
2912 result = L2CAP_CR_NO_MEM;
2914 /* Check for backlog size */
2915 if (sk_acceptq_is_full(parent)) {
2916 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2920 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2924 write_lock_bh(&list->lock);
2926 /* Check if we already have channel with that dcid */
2927 if (__l2cap_get_chan_by_dcid(list, scid)) {
2928 write_unlock_bh(&list->lock);
2929 sock_set_flag(sk, SOCK_ZAPPED);
2930 l2cap_sock_kill(sk);
2934 hci_conn_hold(conn->hcon);
2936 l2cap_sock_init(sk, parent);
2937 bacpy(&bt_sk(sk)->src, conn->src);
2938 bacpy(&bt_sk(sk)->dst, conn->dst);
2939 l2cap_pi(sk)->psm = psm;
2940 l2cap_pi(sk)->dcid = scid;
2942 __l2cap_chan_add(conn, sk, parent);
2943 dcid = l2cap_pi(sk)->scid;
2945 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2947 l2cap_pi(sk)->ident = cmd->ident;
2949 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2950 if (l2cap_check_security(sk)) {
2951 if (bt_sk(sk)->defer_setup) {
2952 sk->sk_state = BT_CONNECT2;
2953 result = L2CAP_CR_PEND;
2954 status = L2CAP_CS_AUTHOR_PEND;
2955 parent->sk_data_ready(parent, 0);
2957 sk->sk_state = BT_CONFIG;
2958 result = L2CAP_CR_SUCCESS;
2959 status = L2CAP_CS_NO_INFO;
2962 sk->sk_state = BT_CONNECT2;
2963 result = L2CAP_CR_PEND;
2964 status = L2CAP_CS_AUTHEN_PEND;
2967 sk->sk_state = BT_CONNECT2;
2968 result = L2CAP_CR_PEND;
2969 status = L2CAP_CS_NO_INFO;
2972 write_unlock_bh(&list->lock);
2975 bh_unlock_sock(parent);
2978 rsp.scid = cpu_to_le16(scid);
2979 rsp.dcid = cpu_to_le16(dcid);
2980 rsp.result = cpu_to_le16(result);
2981 rsp.status = cpu_to_le16(status);
2982 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2984 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2985 struct l2cap_info_req info;
2986 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2988 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2989 conn->info_ident = l2cap_get_ident(conn);
2991 mod_timer(&conn->info_timer, jiffies +
2992 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2994 l2cap_send_cmd(conn, conn->info_ident,
2995 L2CAP_INFO_REQ, sizeof(info), &info);
2998 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT) &&
2999 result == L2CAP_CR_SUCCESS) {
3001 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3002 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3003 l2cap_build_conf_req(sk, buf), buf);
3004 l2cap_pi(sk)->num_conf_req++;
3010 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3012 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
3013 u16 scid, dcid, result, status;
3017 scid = __le16_to_cpu(rsp->scid);
3018 dcid = __le16_to_cpu(rsp->dcid);
3019 result = __le16_to_cpu(rsp->result);
3020 status = __le16_to_cpu(rsp->status);
3022 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
3025 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3029 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
3035 case L2CAP_CR_SUCCESS:
3036 sk->sk_state = BT_CONFIG;
3037 l2cap_pi(sk)->ident = 0;
3038 l2cap_pi(sk)->dcid = dcid;
3039 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3041 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)
3044 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3046 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3047 l2cap_build_conf_req(sk, req), req);
3048 l2cap_pi(sk)->num_conf_req++;
3052 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3056 l2cap_chan_del(sk, ECONNREFUSED);
3064 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3066 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3072 dcid = __le16_to_cpu(req->dcid);
3073 flags = __le16_to_cpu(req->flags);
3075 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3077 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3081 if (sk->sk_state != BT_CONFIG) {
3082 struct l2cap_cmd_rej rej;
3084 rej.reason = cpu_to_le16(0x0002);
3085 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3090 /* Reject if config buffer is too small. */
3091 len = cmd_len - sizeof(*req);
3092 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3093 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3094 l2cap_build_conf_rsp(sk, rsp,
3095 L2CAP_CONF_REJECT, flags), rsp);
3100 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3101 l2cap_pi(sk)->conf_len += len;
3103 if (flags & 0x0001) {
3104 /* Incomplete config. Send empty response. */
3105 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3106 l2cap_build_conf_rsp(sk, rsp,
3107 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3111 /* Complete config. */
3112 len = l2cap_parse_conf_req(sk, rsp);
3114 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3118 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3119 l2cap_pi(sk)->num_conf_rsp++;
3121 /* Reset config buffer. */
3122 l2cap_pi(sk)->conf_len = 0;
3124 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3127 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3128 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3129 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3130 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3132 sk->sk_state = BT_CONNECTED;
3134 l2cap_pi(sk)->next_tx_seq = 0;
3135 l2cap_pi(sk)->expected_tx_seq = 0;
3136 __skb_queue_head_init(TX_QUEUE(sk));
3137 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3138 l2cap_ertm_init(sk);
3140 l2cap_chan_ready(sk);
3144 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3146 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3147 l2cap_build_conf_req(sk, buf), buf);
3148 l2cap_pi(sk)->num_conf_req++;
3156 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3158 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3159 u16 scid, flags, result;
3161 int len = cmd->len - sizeof(*rsp);
3163 scid = __le16_to_cpu(rsp->scid);
3164 flags = __le16_to_cpu(rsp->flags);
3165 result = __le16_to_cpu(rsp->result);
3167 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3168 scid, flags, result);
3170 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3175 case L2CAP_CONF_SUCCESS:
3176 l2cap_conf_rfc_get(sk, rsp->data, len);
3179 case L2CAP_CONF_UNACCEPT:
3180 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3183 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3184 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3188 /* throw out any old stored conf requests */
3189 result = L2CAP_CONF_SUCCESS;
3190 len = l2cap_parse_conf_rsp(sk, rsp->data,
3193 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3197 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3198 L2CAP_CONF_REQ, len, req);
3199 l2cap_pi(sk)->num_conf_req++;
3200 if (result != L2CAP_CONF_SUCCESS)
3206 sk->sk_err = ECONNRESET;
3207 l2cap_sock_set_timer(sk, HZ * 5);
3208 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3215 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3217 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3218 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3219 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3220 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3222 sk->sk_state = BT_CONNECTED;
3223 l2cap_pi(sk)->next_tx_seq = 0;
3224 l2cap_pi(sk)->expected_tx_seq = 0;
3225 __skb_queue_head_init(TX_QUEUE(sk));
3226 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3227 l2cap_ertm_init(sk);
3229 l2cap_chan_ready(sk);
3237 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3239 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3240 struct l2cap_disconn_rsp rsp;
3244 scid = __le16_to_cpu(req->scid);
3245 dcid = __le16_to_cpu(req->dcid);
3247 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3249 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3253 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3254 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3255 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3257 sk->sk_shutdown = SHUTDOWN_MASK;
3259 l2cap_chan_del(sk, ECONNRESET);
3262 l2cap_sock_kill(sk);
3266 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3268 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3272 scid = __le16_to_cpu(rsp->scid);
3273 dcid = __le16_to_cpu(rsp->dcid);
3275 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3277 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3281 l2cap_chan_del(sk, 0);
3284 l2cap_sock_kill(sk);
3288 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3290 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3293 type = __le16_to_cpu(req->type);
3295 BT_DBG("type 0x%4.4x", type);
3297 if (type == L2CAP_IT_FEAT_MASK) {
3299 u32 feat_mask = l2cap_feat_mask;
3300 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3301 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3302 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3304 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3306 put_unaligned_le32(feat_mask, rsp->data);
3307 l2cap_send_cmd(conn, cmd->ident,
3308 L2CAP_INFO_RSP, sizeof(buf), buf);
3309 } else if (type == L2CAP_IT_FIXED_CHAN) {
3311 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3312 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3313 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3314 memcpy(buf + 4, l2cap_fixed_chan, 8);
3315 l2cap_send_cmd(conn, cmd->ident,
3316 L2CAP_INFO_RSP, sizeof(buf), buf);
3318 struct l2cap_info_rsp rsp;
3319 rsp.type = cpu_to_le16(type);
3320 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3321 l2cap_send_cmd(conn, cmd->ident,
3322 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3328 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3330 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3333 type = __le16_to_cpu(rsp->type);
3334 result = __le16_to_cpu(rsp->result);
3336 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3338 del_timer(&conn->info_timer);
3340 if (type == L2CAP_IT_FEAT_MASK) {
3341 conn->feat_mask = get_unaligned_le32(rsp->data);
3343 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3344 struct l2cap_info_req req;
3345 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3347 conn->info_ident = l2cap_get_ident(conn);
3349 l2cap_send_cmd(conn, conn->info_ident,
3350 L2CAP_INFO_REQ, sizeof(req), &req);
3352 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3353 conn->info_ident = 0;
3355 l2cap_conn_start(conn);
3357 } else if (type == L2CAP_IT_FIXED_CHAN) {
3358 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3359 conn->info_ident = 0;
3361 l2cap_conn_start(conn);
3367 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3369 u8 *data = skb->data;
3371 struct l2cap_cmd_hdr cmd;
3374 l2cap_raw_recv(conn, skb);
3376 while (len >= L2CAP_CMD_HDR_SIZE) {
3378 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3379 data += L2CAP_CMD_HDR_SIZE;
3380 len -= L2CAP_CMD_HDR_SIZE;
3382 cmd_len = le16_to_cpu(cmd.len);
3384 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3386 if (cmd_len > len || !cmd.ident) {
3387 BT_DBG("corrupted command");
3392 case L2CAP_COMMAND_REJ:
3393 l2cap_command_rej(conn, &cmd, data);
3396 case L2CAP_CONN_REQ:
3397 err = l2cap_connect_req(conn, &cmd, data);
3400 case L2CAP_CONN_RSP:
3401 err = l2cap_connect_rsp(conn, &cmd, data);
3404 case L2CAP_CONF_REQ:
3405 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3408 case L2CAP_CONF_RSP:
3409 err = l2cap_config_rsp(conn, &cmd, data);
3412 case L2CAP_DISCONN_REQ:
3413 err = l2cap_disconnect_req(conn, &cmd, data);
3416 case L2CAP_DISCONN_RSP:
3417 err = l2cap_disconnect_rsp(conn, &cmd, data);
3420 case L2CAP_ECHO_REQ:
3421 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3424 case L2CAP_ECHO_RSP:
3427 case L2CAP_INFO_REQ:
3428 err = l2cap_information_req(conn, &cmd, data);
3431 case L2CAP_INFO_RSP:
3432 err = l2cap_information_rsp(conn, &cmd, data);
3436 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3442 struct l2cap_cmd_rej rej;
3443 BT_DBG("error %d", err);
3445 /* FIXME: Map err to a valid reason */
3446 rej.reason = cpu_to_le16(0);
3447 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3457 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3459 u16 our_fcs, rcv_fcs;
3460 int hdr_size = L2CAP_HDR_SIZE + 2;
3462 if (pi->fcs == L2CAP_FCS_CRC16) {
3463 skb_trim(skb, skb->len - 2);
3464 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3465 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3467 if (our_fcs != rcv_fcs)
3473 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3475 struct l2cap_pinfo *pi = l2cap_pi(sk);
3478 pi->frames_sent = 0;
3480 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3482 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3483 control |= L2CAP_SUPER_RCV_NOT_READY;
3484 l2cap_send_sframe(pi, control);
3485 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3488 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3489 l2cap_retransmit_frames(sk);
3491 l2cap_ertm_send(sk);
3493 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3494 pi->frames_sent == 0) {
3495 control |= L2CAP_SUPER_RCV_READY;
3496 l2cap_send_sframe(pi, control);
3500 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3502 struct sk_buff *next_skb;
3503 struct l2cap_pinfo *pi = l2cap_pi(sk);
3504 int tx_seq_offset, next_tx_seq_offset;
3506 bt_cb(skb)->tx_seq = tx_seq;
3507 bt_cb(skb)->sar = sar;
3509 next_skb = skb_peek(SREJ_QUEUE(sk));
3511 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3515 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3516 if (tx_seq_offset < 0)
3517 tx_seq_offset += 64;
3520 if (bt_cb(next_skb)->tx_seq == tx_seq)
3523 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3524 pi->buffer_seq) % 64;
3525 if (next_tx_seq_offset < 0)
3526 next_tx_seq_offset += 64;
3528 if (next_tx_seq_offset > tx_seq_offset) {
3529 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3533 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3536 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3538 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3543 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3545 struct l2cap_pinfo *pi = l2cap_pi(sk);
3546 struct sk_buff *_skb;
3549 switch (control & L2CAP_CTRL_SAR) {
3550 case L2CAP_SDU_UNSEGMENTED:
3551 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3554 err = sock_queue_rcv_skb(sk, skb);
3560 case L2CAP_SDU_START:
3561 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3564 pi->sdu_len = get_unaligned_le16(skb->data);
3566 if (pi->sdu_len > pi->imtu)
3569 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3573 /* pull sdu_len bytes only after alloc, because of Local Busy
3574 * condition we have to be sure that this will be executed
3575 * only once, i.e., when alloc does not fail */
3578 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3580 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3581 pi->partial_sdu_len = skb->len;
3584 case L2CAP_SDU_CONTINUE:
3585 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3591 pi->partial_sdu_len += skb->len;
3592 if (pi->partial_sdu_len > pi->sdu_len)
3595 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3600 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3606 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3607 pi->partial_sdu_len += skb->len;
3609 if (pi->partial_sdu_len > pi->imtu)
3612 if (pi->partial_sdu_len != pi->sdu_len)
3615 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3618 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3620 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3624 err = sock_queue_rcv_skb(sk, _skb);
3627 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3631 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3632 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3646 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3651 static int l2cap_try_push_rx_skb(struct sock *sk)
3653 struct l2cap_pinfo *pi = l2cap_pi(sk);
3654 struct sk_buff *skb;
3658 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3659 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3660 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3662 skb_queue_head(BUSY_QUEUE(sk), skb);
3666 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3669 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3672 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3673 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3674 l2cap_send_sframe(pi, control);
3675 l2cap_pi(sk)->retry_count = 1;
3677 del_timer(&pi->retrans_timer);
3678 __mod_monitor_timer();
3680 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3683 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3684 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3686 BT_DBG("sk %p, Exit local busy", sk);
3691 static void l2cap_busy_work(struct work_struct *work)
3693 DECLARE_WAITQUEUE(wait, current);
3694 struct l2cap_pinfo *pi =
3695 container_of(work, struct l2cap_pinfo, busy_work);
3696 struct sock *sk = (struct sock *)pi;
3697 int n_tries = 0, timeo = HZ/5, err;
3698 struct sk_buff *skb;
3702 add_wait_queue(sk_sleep(sk), &wait);
3703 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3704 set_current_state(TASK_INTERRUPTIBLE);
3706 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3708 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3715 if (signal_pending(current)) {
3716 err = sock_intr_errno(timeo);
3721 timeo = schedule_timeout(timeo);
3724 err = sock_error(sk);
3728 if (l2cap_try_push_rx_skb(sk) == 0)
3732 set_current_state(TASK_RUNNING);
3733 remove_wait_queue(sk_sleep(sk), &wait);
3738 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3740 struct l2cap_pinfo *pi = l2cap_pi(sk);
3743 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3744 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3745 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3746 return l2cap_try_push_rx_skb(sk);
3751 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3753 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3757 /* Busy Condition */
3758 BT_DBG("sk %p, Enter local busy", sk);
3760 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3761 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3762 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3764 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3765 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3766 l2cap_send_sframe(pi, sctrl);
3768 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3770 del_timer(&pi->ack_timer);
3772 queue_work(_busy_wq, &pi->busy_work);
3777 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3779 struct l2cap_pinfo *pi = l2cap_pi(sk);
3780 struct sk_buff *_skb;
3784 * TODO: We have to notify the userland if some data is lost with the
3788 switch (control & L2CAP_CTRL_SAR) {
3789 case L2CAP_SDU_UNSEGMENTED:
3790 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3795 err = sock_queue_rcv_skb(sk, skb);
3801 case L2CAP_SDU_START:
3802 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3807 pi->sdu_len = get_unaligned_le16(skb->data);
3810 if (pi->sdu_len > pi->imtu) {
3815 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3821 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3823 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3824 pi->partial_sdu_len = skb->len;
3828 case L2CAP_SDU_CONTINUE:
3829 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3832 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3834 pi->partial_sdu_len += skb->len;
3835 if (pi->partial_sdu_len > pi->sdu_len)
3843 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3846 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3848 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3849 pi->partial_sdu_len += skb->len;
3851 if (pi->partial_sdu_len > pi->imtu)
3854 if (pi->partial_sdu_len == pi->sdu_len) {
3855 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3856 err = sock_queue_rcv_skb(sk, _skb);
3871 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3873 struct sk_buff *skb;
3876 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3877 if (bt_cb(skb)->tx_seq != tx_seq)
3880 skb = skb_dequeue(SREJ_QUEUE(sk));
3881 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3882 l2cap_ertm_reassembly_sdu(sk, skb, control);
3883 l2cap_pi(sk)->buffer_seq_srej =
3884 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3885 tx_seq = (tx_seq + 1) % 64;
3889 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3891 struct l2cap_pinfo *pi = l2cap_pi(sk);
3892 struct srej_list *l, *tmp;
3895 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3896 if (l->tx_seq == tx_seq) {
3901 control = L2CAP_SUPER_SELECT_REJECT;
3902 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3903 l2cap_send_sframe(pi, control);
3905 list_add_tail(&l->list, SREJ_LIST(sk));
3909 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3911 struct l2cap_pinfo *pi = l2cap_pi(sk);
3912 struct srej_list *new;
3915 while (tx_seq != pi->expected_tx_seq) {
3916 control = L2CAP_SUPER_SELECT_REJECT;
3917 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3918 l2cap_send_sframe(pi, control);
3920 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3921 new->tx_seq = pi->expected_tx_seq;
3922 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3923 list_add_tail(&new->list, SREJ_LIST(sk));
3925 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3928 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3930 struct l2cap_pinfo *pi = l2cap_pi(sk);
3931 u8 tx_seq = __get_txseq(rx_control);
3932 u8 req_seq = __get_reqseq(rx_control);
3933 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3934 int tx_seq_offset, expected_tx_seq_offset;
3935 int num_to_ack = (pi->tx_win/6) + 1;
3938 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3941 if (L2CAP_CTRL_FINAL & rx_control &&
3942 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3943 del_timer(&pi->monitor_timer);
3944 if (pi->unacked_frames > 0)
3945 __mod_retrans_timer();
3946 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3949 pi->expected_ack_seq = req_seq;
3950 l2cap_drop_acked_frames(sk);
3952 if (tx_seq == pi->expected_tx_seq)
3955 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3956 if (tx_seq_offset < 0)
3957 tx_seq_offset += 64;
3959 /* invalid tx_seq */
3960 if (tx_seq_offset >= pi->tx_win) {
3961 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3965 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3968 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3969 struct srej_list *first;
3971 first = list_first_entry(SREJ_LIST(sk),
3972 struct srej_list, list);
3973 if (tx_seq == first->tx_seq) {
3974 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3975 l2cap_check_srej_gap(sk, tx_seq);
3977 list_del(&first->list);
3980 if (list_empty(SREJ_LIST(sk))) {
3981 pi->buffer_seq = pi->buffer_seq_srej;
3982 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3984 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3987 struct srej_list *l;
3989 /* duplicated tx_seq */
3990 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3993 list_for_each_entry(l, SREJ_LIST(sk), list) {
3994 if (l->tx_seq == tx_seq) {
3995 l2cap_resend_srejframe(sk, tx_seq);
3999 l2cap_send_srejframe(sk, tx_seq);
4002 expected_tx_seq_offset =
4003 (pi->expected_tx_seq - pi->buffer_seq) % 64;
4004 if (expected_tx_seq_offset < 0)
4005 expected_tx_seq_offset += 64;
4007 /* duplicated tx_seq */
4008 if (tx_seq_offset < expected_tx_seq_offset)
4011 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
4013 BT_DBG("sk %p, Enter SREJ", sk);
4015 INIT_LIST_HEAD(SREJ_LIST(sk));
4016 pi->buffer_seq_srej = pi->buffer_seq;
4018 __skb_queue_head_init(SREJ_QUEUE(sk));
4019 __skb_queue_head_init(BUSY_QUEUE(sk));
4020 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
4022 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
4024 l2cap_send_srejframe(sk, tx_seq);
4026 del_timer(&pi->ack_timer);
4031 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4033 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4034 bt_cb(skb)->tx_seq = tx_seq;
4035 bt_cb(skb)->sar = sar;
4036 __skb_queue_tail(SREJ_QUEUE(sk), skb);
4040 err = l2cap_push_rx_skb(sk, skb, rx_control);
4044 if (rx_control & L2CAP_CTRL_FINAL) {
4045 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4046 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4048 l2cap_retransmit_frames(sk);
4053 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4054 if (pi->num_acked == num_to_ack - 1)
4064 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4066 struct l2cap_pinfo *pi = l2cap_pi(sk);
4068 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4071 pi->expected_ack_seq = __get_reqseq(rx_control);
4072 l2cap_drop_acked_frames(sk);
4074 if (rx_control & L2CAP_CTRL_POLL) {
4075 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4076 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4077 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4078 (pi->unacked_frames > 0))
4079 __mod_retrans_timer();
4081 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4082 l2cap_send_srejtail(sk);
4084 l2cap_send_i_or_rr_or_rnr(sk);
4087 } else if (rx_control & L2CAP_CTRL_FINAL) {
4088 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4090 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4091 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4093 l2cap_retransmit_frames(sk);
4096 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4097 (pi->unacked_frames > 0))
4098 __mod_retrans_timer();
4100 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4101 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4104 l2cap_ertm_send(sk);
4109 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4111 struct l2cap_pinfo *pi = l2cap_pi(sk);
4112 u8 tx_seq = __get_reqseq(rx_control);
4114 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4116 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4118 pi->expected_ack_seq = tx_seq;
4119 l2cap_drop_acked_frames(sk);
4121 if (rx_control & L2CAP_CTRL_FINAL) {
4122 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4123 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4125 l2cap_retransmit_frames(sk);
4127 l2cap_retransmit_frames(sk);
4129 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4130 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4133 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4135 struct l2cap_pinfo *pi = l2cap_pi(sk);
4136 u8 tx_seq = __get_reqseq(rx_control);
4138 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4140 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4142 if (rx_control & L2CAP_CTRL_POLL) {
4143 pi->expected_ack_seq = tx_seq;
4144 l2cap_drop_acked_frames(sk);
4146 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4147 l2cap_retransmit_one_frame(sk, tx_seq);
4149 l2cap_ertm_send(sk);
4151 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4152 pi->srej_save_reqseq = tx_seq;
4153 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4155 } else if (rx_control & L2CAP_CTRL_FINAL) {
4156 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4157 pi->srej_save_reqseq == tx_seq)
4158 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4160 l2cap_retransmit_one_frame(sk, tx_seq);
4162 l2cap_retransmit_one_frame(sk, tx_seq);
4163 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4164 pi->srej_save_reqseq = tx_seq;
4165 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4170 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4172 struct l2cap_pinfo *pi = l2cap_pi(sk);
4173 u8 tx_seq = __get_reqseq(rx_control);
4175 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4177 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4178 pi->expected_ack_seq = tx_seq;
4179 l2cap_drop_acked_frames(sk);
4181 if (rx_control & L2CAP_CTRL_POLL)
4182 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4184 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4185 del_timer(&pi->retrans_timer);
4186 if (rx_control & L2CAP_CTRL_POLL)
4187 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4191 if (rx_control & L2CAP_CTRL_POLL)
4192 l2cap_send_srejtail(sk);
4194 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4197 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4199 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4201 if (L2CAP_CTRL_FINAL & rx_control &&
4202 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4203 del_timer(&l2cap_pi(sk)->monitor_timer);
4204 if (l2cap_pi(sk)->unacked_frames > 0)
4205 __mod_retrans_timer();
4206 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4209 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4210 case L2CAP_SUPER_RCV_READY:
4211 l2cap_data_channel_rrframe(sk, rx_control);
4214 case L2CAP_SUPER_REJECT:
4215 l2cap_data_channel_rejframe(sk, rx_control);
4218 case L2CAP_SUPER_SELECT_REJECT:
4219 l2cap_data_channel_srejframe(sk, rx_control);
4222 case L2CAP_SUPER_RCV_NOT_READY:
4223 l2cap_data_channel_rnrframe(sk, rx_control);
4231 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4233 struct l2cap_pinfo *pi = l2cap_pi(sk);
4236 int len, next_tx_seq_offset, req_seq_offset;
4238 control = get_unaligned_le16(skb->data);
4243 * We can just drop the corrupted I-frame here.
4244 * Receiver will miss it and start proper recovery
4245 * procedures and ask retransmission.
4247 if (l2cap_check_fcs(pi, skb))
4250 if (__is_sar_start(control) && __is_iframe(control))
4253 if (pi->fcs == L2CAP_FCS_CRC16)
4256 if (len > pi->mps) {
4257 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4261 req_seq = __get_reqseq(control);
4262 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4263 if (req_seq_offset < 0)
4264 req_seq_offset += 64;
4266 next_tx_seq_offset =
4267 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4268 if (next_tx_seq_offset < 0)
4269 next_tx_seq_offset += 64;
4271 /* check for invalid req-seq */
4272 if (req_seq_offset > next_tx_seq_offset) {
4273 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4277 if (__is_iframe(control)) {
4279 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4283 l2cap_data_channel_iframe(sk, control, skb);
4287 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4291 l2cap_data_channel_sframe(sk, control, skb);
4301 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4304 struct l2cap_pinfo *pi;
4309 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4311 BT_DBG("unknown cid 0x%4.4x", cid);
4317 BT_DBG("sk %p, len %d", sk, skb->len);
4319 if (sk->sk_state != BT_CONNECTED)
4323 case L2CAP_MODE_BASIC:
4324 /* If socket recv buffers overflows we drop data here
4325 * which is *bad* because L2CAP has to be reliable.
4326 * But we don't have any other choice. L2CAP doesn't
4327 * provide flow control mechanism. */
4329 if (pi->imtu < skb->len)
4332 if (!sock_queue_rcv_skb(sk, skb))
4336 case L2CAP_MODE_ERTM:
4337 if (!sock_owned_by_user(sk)) {
4338 l2cap_ertm_data_rcv(sk, skb);
4340 if (sk_add_backlog(sk, skb))
4346 case L2CAP_MODE_STREAMING:
4347 control = get_unaligned_le16(skb->data);
4351 if (l2cap_check_fcs(pi, skb))
4354 if (__is_sar_start(control))
4357 if (pi->fcs == L2CAP_FCS_CRC16)
4360 if (len > pi->mps || len < 0 || __is_sframe(control))
4363 tx_seq = __get_txseq(control);
4365 if (pi->expected_tx_seq == tx_seq)
4366 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4368 pi->expected_tx_seq = (tx_seq + 1) % 64;
4370 l2cap_streaming_reassembly_sdu(sk, skb, control);
4375 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4389 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4393 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4397 BT_DBG("sk %p, len %d", sk, skb->len);
4399 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4402 if (l2cap_pi(sk)->imtu < skb->len)
4405 if (!sock_queue_rcv_skb(sk, skb))
4417 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4419 struct l2cap_hdr *lh = (void *) skb->data;
4423 skb_pull(skb, L2CAP_HDR_SIZE);
4424 cid = __le16_to_cpu(lh->cid);
4425 len = __le16_to_cpu(lh->len);
4427 if (len != skb->len) {
4432 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4435 case L2CAP_CID_SIGNALING:
4436 l2cap_sig_channel(conn, skb);
4439 case L2CAP_CID_CONN_LESS:
4440 psm = get_unaligned_le16(skb->data);
4442 l2cap_conless_channel(conn, psm, skb);
4446 l2cap_data_channel(conn, cid, skb);
4451 /* ---- L2CAP interface with lower layer (HCI) ---- */
4453 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4455 int exact = 0, lm1 = 0, lm2 = 0;
4456 register struct sock *sk;
4457 struct hlist_node *node;
4459 if (type != ACL_LINK)
4462 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4464 /* Find listening sockets and check their link_mode */
4465 read_lock(&l2cap_sk_list.lock);
4466 sk_for_each(sk, node, &l2cap_sk_list.head) {
4467 if (sk->sk_state != BT_LISTEN)
4470 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4471 lm1 |= HCI_LM_ACCEPT;
4472 if (l2cap_pi(sk)->role_switch)
4473 lm1 |= HCI_LM_MASTER;
4475 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4476 lm2 |= HCI_LM_ACCEPT;
4477 if (l2cap_pi(sk)->role_switch)
4478 lm2 |= HCI_LM_MASTER;
4481 read_unlock(&l2cap_sk_list.lock);
4483 return exact ? lm1 : lm2;
4486 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4488 struct l2cap_conn *conn;
4490 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4492 if (hcon->type != ACL_LINK)
4496 conn = l2cap_conn_add(hcon, status);
4498 l2cap_conn_ready(conn);
4500 l2cap_conn_del(hcon, bt_err(status));
4505 static int l2cap_disconn_ind(struct hci_conn *hcon)
4507 struct l2cap_conn *conn = hcon->l2cap_data;
4509 BT_DBG("hcon %p", hcon);
4511 if (hcon->type != ACL_LINK || !conn)
4514 return conn->disc_reason;
4517 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4519 BT_DBG("hcon %p reason %d", hcon, reason);
4521 if (hcon->type != ACL_LINK)
4524 l2cap_conn_del(hcon, bt_err(reason));
4529 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4531 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4534 if (encrypt == 0x00) {
4535 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4536 l2cap_sock_clear_timer(sk);
4537 l2cap_sock_set_timer(sk, HZ * 5);
4538 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4539 __l2cap_sock_close(sk, ECONNREFUSED);
4541 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4542 l2cap_sock_clear_timer(sk);
4546 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4548 struct l2cap_chan_list *l;
4549 struct l2cap_conn *conn = hcon->l2cap_data;
4555 l = &conn->chan_list;
4557 BT_DBG("conn %p", conn);
4559 read_lock(&l->lock);
4561 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4564 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4569 if (!status && (sk->sk_state == BT_CONNECTED ||
4570 sk->sk_state == BT_CONFIG)) {
4571 l2cap_check_encryption(sk, encrypt);
4576 if (sk->sk_state == BT_CONNECT) {
4578 struct l2cap_conn_req req;
4579 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4580 req.psm = l2cap_pi(sk)->psm;
4582 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4583 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4585 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4586 L2CAP_CONN_REQ, sizeof(req), &req);
4588 l2cap_sock_clear_timer(sk);
4589 l2cap_sock_set_timer(sk, HZ / 10);
4591 } else if (sk->sk_state == BT_CONNECT2) {
4592 struct l2cap_conn_rsp rsp;
4596 sk->sk_state = BT_CONFIG;
4597 result = L2CAP_CR_SUCCESS;
4599 sk->sk_state = BT_DISCONN;
4600 l2cap_sock_set_timer(sk, HZ / 10);
4601 result = L2CAP_CR_SEC_BLOCK;
4604 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4605 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4606 rsp.result = cpu_to_le16(result);
4607 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4608 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4609 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4615 read_unlock(&l->lock);
4620 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4622 struct l2cap_conn *conn = hcon->l2cap_data;
4624 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4627 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4629 if (flags & ACL_START) {
4630 struct l2cap_hdr *hdr;
4634 BT_ERR("Unexpected start frame (len %d)", skb->len);
4635 kfree_skb(conn->rx_skb);
4636 conn->rx_skb = NULL;
4638 l2cap_conn_unreliable(conn, ECOMM);
4642 BT_ERR("Frame is too short (len %d)", skb->len);
4643 l2cap_conn_unreliable(conn, ECOMM);
4647 hdr = (struct l2cap_hdr *) skb->data;
4648 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4650 if (len == skb->len) {
4651 /* Complete frame received */
4652 l2cap_recv_frame(conn, skb);
4656 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4658 if (skb->len > len) {
4659 BT_ERR("Frame is too long (len %d, expected len %d)",
4661 l2cap_conn_unreliable(conn, ECOMM);
4665 /* Allocate skb for the complete frame (with header) */
4666 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4670 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4672 conn->rx_len = len - skb->len;
4674 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4676 if (!conn->rx_len) {
4677 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4678 l2cap_conn_unreliable(conn, ECOMM);
4682 if (skb->len > conn->rx_len) {
4683 BT_ERR("Fragment is too long (len %d, expected %d)",
4684 skb->len, conn->rx_len);
4685 kfree_skb(conn->rx_skb);
4686 conn->rx_skb = NULL;
4688 l2cap_conn_unreliable(conn, ECOMM);
4692 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4694 conn->rx_len -= skb->len;
4696 if (!conn->rx_len) {
4697 /* Complete frame received */
4698 l2cap_recv_frame(conn, conn->rx_skb);
4699 conn->rx_skb = NULL;
4708 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4711 struct hlist_node *node;
4713 read_lock_bh(&l2cap_sk_list.lock);
4715 sk_for_each(sk, node, &l2cap_sk_list.head) {
4716 struct l2cap_pinfo *pi = l2cap_pi(sk);
4718 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4719 batostr(&bt_sk(sk)->src),
4720 batostr(&bt_sk(sk)->dst),
4721 sk->sk_state, __le16_to_cpu(pi->psm),
4723 pi->imtu, pi->omtu, pi->sec_level);
4726 read_unlock_bh(&l2cap_sk_list.lock);
4731 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4733 return single_open(file, l2cap_debugfs_show, inode->i_private);
4736 static const struct file_operations l2cap_debugfs_fops = {
4737 .open = l2cap_debugfs_open,
4739 .llseek = seq_lseek,
4740 .release = single_release,
4743 static struct dentry *l2cap_debugfs;
4745 static const struct proto_ops l2cap_sock_ops = {
4746 .family = PF_BLUETOOTH,
4747 .owner = THIS_MODULE,
4748 .release = l2cap_sock_release,
4749 .bind = l2cap_sock_bind,
4750 .connect = l2cap_sock_connect,
4751 .listen = l2cap_sock_listen,
4752 .accept = l2cap_sock_accept,
4753 .getname = l2cap_sock_getname,
4754 .sendmsg = l2cap_sock_sendmsg,
4755 .recvmsg = l2cap_sock_recvmsg,
4756 .poll = bt_sock_poll,
4757 .ioctl = bt_sock_ioctl,
4758 .mmap = sock_no_mmap,
4759 .socketpair = sock_no_socketpair,
4760 .shutdown = l2cap_sock_shutdown,
4761 .setsockopt = l2cap_sock_setsockopt,
4762 .getsockopt = l2cap_sock_getsockopt
4765 static const struct net_proto_family l2cap_sock_family_ops = {
4766 .family = PF_BLUETOOTH,
4767 .owner = THIS_MODULE,
4768 .create = l2cap_sock_create,
4771 static struct hci_proto l2cap_hci_proto = {
4773 .id = HCI_PROTO_L2CAP,
4774 .connect_ind = l2cap_connect_ind,
4775 .connect_cfm = l2cap_connect_cfm,
4776 .disconn_ind = l2cap_disconn_ind,
4777 .disconn_cfm = l2cap_disconn_cfm,
4778 .security_cfm = l2cap_security_cfm,
4779 .recv_acldata = l2cap_recv_acldata
4782 static int __init l2cap_init(void)
4786 err = proto_register(&l2cap_proto, 0);
4790 _busy_wq = create_singlethread_workqueue("l2cap");
4794 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4796 BT_ERR("L2CAP socket registration failed");
4800 err = hci_register_proto(&l2cap_hci_proto);
4802 BT_ERR("L2CAP protocol registration failed");
4803 bt_sock_unregister(BTPROTO_L2CAP);
4808 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4809 bt_debugfs, NULL, &l2cap_debugfs_fops);
4811 BT_ERR("Failed to create L2CAP debug file");
4814 BT_INFO("L2CAP ver %s", VERSION);
4815 BT_INFO("L2CAP socket layer initialized");
4820 proto_unregister(&l2cap_proto);
4824 static void __exit l2cap_exit(void)
4826 debugfs_remove(l2cap_debugfs);
4828 flush_workqueue(_busy_wq);
4829 destroy_workqueue(_busy_wq);
4831 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4832 BT_ERR("L2CAP socket unregistration failed");
4834 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4835 BT_ERR("L2CAP protocol unregistration failed");
4837 proto_unregister(&l2cap_proto);
4840 void l2cap_load(void)
4842 /* Dummy function to trigger automatic L2CAP module loading by
4843 * other modules that use L2CAP sockets but don't use any other
4844 * symbols from it. */
4846 EXPORT_SYMBOL(l2cap_load);
4848 module_init(l2cap_init);
4849 module_exit(l2cap_exit);
4851 module_param(enable_ertm, bool, 0644);
4852 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4854 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4855 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4856 MODULE_VERSION(VERSION);
4857 MODULE_LICENSE("GPL");
4858 MODULE_ALIAS("bt-proto-0");