2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops;
65 static struct workqueue_struct *_busy_wq;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void l2cap_busy_work(struct work_struct *work);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
86 BT_DBG("sock %p state %d", sk, sk->sk_state);
90 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
91 reason = ECONNREFUSED;
92 else if (sk->sk_state == BT_CONNECT &&
93 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
94 reason = ECONNREFUSED;
98 __l2cap_sock_close(sk, reason);
106 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
108 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
109 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
112 static void l2cap_sock_clear_timer(struct sock *sk)
114 BT_DBG("sock %p state %d", sk, sk->sk_state);
115 sk_stop_timer(sk, &sk->sk_timer);
118 /* ---- L2CAP channels ---- */
119 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->dcid == cid)
129 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
132 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
133 if (l2cap_pi(s)->scid == cid)
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
145 s = __l2cap_get_chan_by_scid(l, cid);
148 read_unlock(&l->lock);
152 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
155 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
156 if (l2cap_pi(s)->ident == ident)
162 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
166 s = __l2cap_get_chan_by_ident(l, ident);
169 read_unlock(&l->lock);
173 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
175 u16 cid = L2CAP_CID_DYN_START;
177 for (; cid < L2CAP_CID_DYN_END; cid++) {
178 if (!__l2cap_get_chan_by_scid(l, cid))
185 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
190 l2cap_pi(l->head)->prev_c = sk;
192 l2cap_pi(sk)->next_c = l->head;
193 l2cap_pi(sk)->prev_c = NULL;
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
199 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
201 write_lock_bh(&l->lock);
206 l2cap_pi(next)->prev_c = prev;
208 l2cap_pi(prev)->next_c = next;
209 write_unlock_bh(&l->lock);
214 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
216 struct l2cap_chan_list *l = &conn->chan_list;
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
219 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
221 conn->disc_reason = 0x13;
223 l2cap_pi(sk)->conn = conn;
225 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
228 } else if (sk->sk_type == SOCK_DGRAM) {
229 /* Connectionless socket */
230 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
231 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
236 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
240 __l2cap_chan_link(l, sk);
243 bt_accept_enqueue(parent, sk);
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock *sk, int err)
250 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
251 struct sock *parent = bt_sk(sk)->parent;
253 l2cap_sock_clear_timer(sk);
255 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn->chan_list, sk);
260 l2cap_pi(sk)->conn = NULL;
261 hci_conn_put(conn->hcon);
264 sk->sk_state = BT_CLOSED;
265 sock_set_flag(sk, SOCK_ZAPPED);
271 bt_accept_unlink(sk);
272 parent->sk_data_ready(parent, 0);
274 sk->sk_state_change(sk);
277 /* Service level security */
278 static inline int l2cap_check_security(struct sock *sk)
280 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
283 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
284 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
285 auth_type = HCI_AT_NO_BONDING_MITM;
287 auth_type = HCI_AT_NO_BONDING;
289 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
290 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
292 switch (l2cap_pi(sk)->sec_level) {
293 case BT_SECURITY_HIGH:
294 auth_type = HCI_AT_GENERAL_BONDING_MITM;
296 case BT_SECURITY_MEDIUM:
297 auth_type = HCI_AT_GENERAL_BONDING;
300 auth_type = HCI_AT_NO_BONDING;
305 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
309 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
313 /* Get next available identificator.
314 * 1 - 128 are used by kernel.
315 * 129 - 199 are reserved.
316 * 200 - 254 are used by utilities like l2ping, etc.
319 spin_lock_bh(&conn->lock);
321 if (++conn->tx_ident > 128)
326 spin_unlock_bh(&conn->lock);
331 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
333 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
335 BT_DBG("code 0x%2.2x", code);
340 hci_send_acl(conn->hcon, skb, 0);
343 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
346 struct l2cap_hdr *lh;
347 struct l2cap_conn *conn = pi->conn;
348 int count, hlen = L2CAP_HDR_SIZE + 2;
350 if (pi->fcs == L2CAP_FCS_CRC16)
353 BT_DBG("pi %p, control 0x%2.2x", pi, control);
355 count = min_t(unsigned int, conn->mtu, hlen);
356 control |= L2CAP_CTRL_FRAME_TYPE;
358 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
359 control |= L2CAP_CTRL_FINAL;
360 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
363 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
364 control |= L2CAP_CTRL_POLL;
365 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
368 skb = bt_skb_alloc(count, GFP_ATOMIC);
372 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
373 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
374 lh->cid = cpu_to_le16(pi->dcid);
375 put_unaligned_le16(control, skb_put(skb, 2));
377 if (pi->fcs == L2CAP_FCS_CRC16) {
378 u16 fcs = crc16(0, (u8 *)lh, count - 2);
379 put_unaligned_le16(fcs, skb_put(skb, 2));
382 hci_send_acl(pi->conn->hcon, skb, 0);
385 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
387 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
388 control |= L2CAP_SUPER_RCV_NOT_READY;
389 pi->conn_state |= L2CAP_CONN_RNR_SENT;
391 control |= L2CAP_SUPER_RCV_READY;
393 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
395 l2cap_send_sframe(pi, control);
398 static inline int __l2cap_no_conn_pending(struct sock *sk)
400 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
403 static void l2cap_do_start(struct sock *sk)
405 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
407 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
408 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
411 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
412 struct l2cap_conn_req req;
413 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
414 req.psm = l2cap_pi(sk)->psm;
416 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
417 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
419 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
420 L2CAP_CONN_REQ, sizeof(req), &req);
423 struct l2cap_info_req req;
424 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
426 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
427 conn->info_ident = l2cap_get_ident(conn);
429 mod_timer(&conn->info_timer, jiffies +
430 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
432 l2cap_send_cmd(conn, conn->info_ident,
433 L2CAP_INFO_REQ, sizeof(req), &req);
437 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
439 struct l2cap_disconn_req req;
441 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
442 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
443 l2cap_send_cmd(conn, l2cap_get_ident(conn),
444 L2CAP_DISCONN_REQ, sizeof(req), &req);
447 /* ---- L2CAP connections ---- */
448 static void l2cap_conn_start(struct l2cap_conn *conn)
450 struct l2cap_chan_list *l = &conn->chan_list;
453 BT_DBG("conn %p", conn);
457 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
460 if (sk->sk_type != SOCK_SEQPACKET &&
461 sk->sk_type != SOCK_STREAM) {
466 if (sk->sk_state == BT_CONNECT) {
467 if (l2cap_check_security(sk) &&
468 __l2cap_no_conn_pending(sk)) {
469 struct l2cap_conn_req req;
470 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
471 req.psm = l2cap_pi(sk)->psm;
473 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
474 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
476 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
477 L2CAP_CONN_REQ, sizeof(req), &req);
479 } else if (sk->sk_state == BT_CONNECT2) {
480 struct l2cap_conn_rsp rsp;
481 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
482 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
484 if (l2cap_check_security(sk)) {
485 if (bt_sk(sk)->defer_setup) {
486 struct sock *parent = bt_sk(sk)->parent;
487 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
488 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
489 parent->sk_data_ready(parent, 0);
492 sk->sk_state = BT_CONFIG;
493 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
494 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
497 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
498 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
501 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
502 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
508 read_unlock(&l->lock);
511 static void l2cap_conn_ready(struct l2cap_conn *conn)
513 struct l2cap_chan_list *l = &conn->chan_list;
516 BT_DBG("conn %p", conn);
520 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
523 if (sk->sk_type != SOCK_SEQPACKET &&
524 sk->sk_type != SOCK_STREAM) {
525 l2cap_sock_clear_timer(sk);
526 sk->sk_state = BT_CONNECTED;
527 sk->sk_state_change(sk);
528 } else if (sk->sk_state == BT_CONNECT)
534 read_unlock(&l->lock);
537 /* Notify sockets that we cannot guaranty reliability anymore */
538 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
540 struct l2cap_chan_list *l = &conn->chan_list;
543 BT_DBG("conn %p", conn);
547 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
548 if (l2cap_pi(sk)->force_reliable)
552 read_unlock(&l->lock);
555 static void l2cap_info_timeout(unsigned long arg)
557 struct l2cap_conn *conn = (void *) arg;
559 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
560 conn->info_ident = 0;
562 l2cap_conn_start(conn);
565 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
567 struct l2cap_conn *conn = hcon->l2cap_data;
572 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
576 hcon->l2cap_data = conn;
579 BT_DBG("hcon %p conn %p", hcon, conn);
581 conn->mtu = hcon->hdev->acl_mtu;
582 conn->src = &hcon->hdev->bdaddr;
583 conn->dst = &hcon->dst;
587 spin_lock_init(&conn->lock);
588 rwlock_init(&conn->chan_list.lock);
590 setup_timer(&conn->info_timer, l2cap_info_timeout,
591 (unsigned long) conn);
593 conn->disc_reason = 0x13;
598 static void l2cap_conn_del(struct hci_conn *hcon, int err)
600 struct l2cap_conn *conn = hcon->l2cap_data;
606 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
608 kfree_skb(conn->rx_skb);
611 while ((sk = conn->chan_list.head)) {
613 l2cap_chan_del(sk, err);
618 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
619 del_timer_sync(&conn->info_timer);
621 hcon->l2cap_data = NULL;
625 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
627 struct l2cap_chan_list *l = &conn->chan_list;
628 write_lock_bh(&l->lock);
629 __l2cap_chan_add(conn, sk, parent);
630 write_unlock_bh(&l->lock);
633 /* ---- Socket interface ---- */
634 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
637 struct hlist_node *node;
638 sk_for_each(sk, node, &l2cap_sk_list.head)
639 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
646 /* Find socket with psm and source bdaddr.
647 * Returns closest match.
649 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
651 struct sock *sk = NULL, *sk1 = NULL;
652 struct hlist_node *node;
654 sk_for_each(sk, node, &l2cap_sk_list.head) {
655 if (state && sk->sk_state != state)
658 if (l2cap_pi(sk)->psm == psm) {
660 if (!bacmp(&bt_sk(sk)->src, src))
664 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
668 return node ? sk : sk1;
671 /* Find socket with given address (psm, src).
672 * Returns locked socket */
673 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
676 read_lock(&l2cap_sk_list.lock);
677 s = __l2cap_get_sock_by_psm(state, psm, src);
680 read_unlock(&l2cap_sk_list.lock);
684 static void l2cap_sock_destruct(struct sock *sk)
688 skb_queue_purge(&sk->sk_receive_queue);
689 skb_queue_purge(&sk->sk_write_queue);
692 static void l2cap_sock_cleanup_listen(struct sock *parent)
696 BT_DBG("parent %p", parent);
698 /* Close not yet accepted channels */
699 while ((sk = bt_accept_dequeue(parent, NULL)))
700 l2cap_sock_close(sk);
702 parent->sk_state = BT_CLOSED;
703 sock_set_flag(parent, SOCK_ZAPPED);
706 /* Kill socket (only if zapped and orphan)
707 * Must be called on unlocked socket.
709 static void l2cap_sock_kill(struct sock *sk)
711 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
714 BT_DBG("sk %p state %d", sk, sk->sk_state);
716 /* Kill poor orphan */
717 bt_sock_unlink(&l2cap_sk_list, sk);
718 sock_set_flag(sk, SOCK_DEAD);
722 static void __l2cap_sock_close(struct sock *sk, int reason)
724 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
726 switch (sk->sk_state) {
728 l2cap_sock_cleanup_listen(sk);
733 if (sk->sk_type == SOCK_SEQPACKET ||
734 sk->sk_type == SOCK_STREAM) {
735 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
737 sk->sk_state = BT_DISCONN;
738 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
739 l2cap_send_disconn_req(conn, sk);
741 l2cap_chan_del(sk, reason);
745 if (sk->sk_type == SOCK_SEQPACKET ||
746 sk->sk_type == SOCK_STREAM) {
747 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
748 struct l2cap_conn_rsp rsp;
751 if (bt_sk(sk)->defer_setup)
752 result = L2CAP_CR_SEC_BLOCK;
754 result = L2CAP_CR_BAD_PSM;
756 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
757 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
758 rsp.result = cpu_to_le16(result);
759 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
760 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
761 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
763 l2cap_chan_del(sk, reason);
768 l2cap_chan_del(sk, reason);
772 sock_set_flag(sk, SOCK_ZAPPED);
777 /* Must be called on unlocked socket. */
778 static void l2cap_sock_close(struct sock *sk)
780 l2cap_sock_clear_timer(sk);
782 __l2cap_sock_close(sk, ECONNRESET);
787 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
789 struct l2cap_pinfo *pi = l2cap_pi(sk);
794 sk->sk_type = parent->sk_type;
795 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
797 pi->imtu = l2cap_pi(parent)->imtu;
798 pi->omtu = l2cap_pi(parent)->omtu;
799 pi->mode = l2cap_pi(parent)->mode;
800 pi->fcs = l2cap_pi(parent)->fcs;
801 pi->max_tx = l2cap_pi(parent)->max_tx;
802 pi->tx_win = l2cap_pi(parent)->tx_win;
803 pi->sec_level = l2cap_pi(parent)->sec_level;
804 pi->role_switch = l2cap_pi(parent)->role_switch;
805 pi->force_reliable = l2cap_pi(parent)->force_reliable;
807 pi->imtu = L2CAP_DEFAULT_MTU;
809 if (enable_ertm && sk->sk_type == SOCK_STREAM)
810 pi->mode = L2CAP_MODE_ERTM;
812 pi->mode = L2CAP_MODE_BASIC;
813 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
814 pi->fcs = L2CAP_FCS_CRC16;
815 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
816 pi->sec_level = BT_SECURITY_LOW;
818 pi->force_reliable = 0;
821 /* Default config options */
823 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
824 skb_queue_head_init(TX_QUEUE(sk));
825 skb_queue_head_init(SREJ_QUEUE(sk));
826 skb_queue_head_init(BUSY_QUEUE(sk));
827 INIT_LIST_HEAD(SREJ_LIST(sk));
830 static struct proto l2cap_proto = {
832 .owner = THIS_MODULE,
833 .obj_size = sizeof(struct l2cap_pinfo)
836 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
840 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
844 sock_init_data(sock, sk);
845 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
847 sk->sk_destruct = l2cap_sock_destruct;
848 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
850 sock_reset_flag(sk, SOCK_ZAPPED);
852 sk->sk_protocol = proto;
853 sk->sk_state = BT_OPEN;
855 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
857 bt_sock_link(&l2cap_sk_list, sk);
861 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
866 BT_DBG("sock %p", sock);
868 sock->state = SS_UNCONNECTED;
870 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
871 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
872 return -ESOCKTNOSUPPORT;
874 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
877 sock->ops = &l2cap_sock_ops;
879 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
883 l2cap_sock_init(sk, NULL);
887 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
889 struct sock *sk = sock->sk;
890 struct sockaddr_l2 la;
895 if (!addr || addr->sa_family != AF_BLUETOOTH)
898 memset(&la, 0, sizeof(la));
899 len = min_t(unsigned int, sizeof(la), alen);
900 memcpy(&la, addr, len);
907 if (sk->sk_state != BT_OPEN) {
912 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
913 !capable(CAP_NET_BIND_SERVICE)) {
918 write_lock_bh(&l2cap_sk_list.lock);
920 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
923 /* Save source address */
924 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
925 l2cap_pi(sk)->psm = la.l2_psm;
926 l2cap_pi(sk)->sport = la.l2_psm;
927 sk->sk_state = BT_BOUND;
929 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
930 __le16_to_cpu(la.l2_psm) == 0x0003)
931 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
934 write_unlock_bh(&l2cap_sk_list.lock);
941 static int l2cap_do_connect(struct sock *sk)
943 bdaddr_t *src = &bt_sk(sk)->src;
944 bdaddr_t *dst = &bt_sk(sk)->dst;
945 struct l2cap_conn *conn;
946 struct hci_conn *hcon;
947 struct hci_dev *hdev;
951 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
954 hdev = hci_get_route(dst, src);
956 return -EHOSTUNREACH;
958 hci_dev_lock_bh(hdev);
962 if (sk->sk_type == SOCK_RAW) {
963 switch (l2cap_pi(sk)->sec_level) {
964 case BT_SECURITY_HIGH:
965 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
967 case BT_SECURITY_MEDIUM:
968 auth_type = HCI_AT_DEDICATED_BONDING;
971 auth_type = HCI_AT_NO_BONDING;
974 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
975 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
976 auth_type = HCI_AT_NO_BONDING_MITM;
978 auth_type = HCI_AT_NO_BONDING;
980 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
981 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
983 switch (l2cap_pi(sk)->sec_level) {
984 case BT_SECURITY_HIGH:
985 auth_type = HCI_AT_GENERAL_BONDING_MITM;
987 case BT_SECURITY_MEDIUM:
988 auth_type = HCI_AT_GENERAL_BONDING;
991 auth_type = HCI_AT_NO_BONDING;
996 hcon = hci_connect(hdev, ACL_LINK, dst,
997 l2cap_pi(sk)->sec_level, auth_type);
1001 conn = l2cap_conn_add(hcon, 0);
1009 /* Update source addr of the socket */
1010 bacpy(src, conn->src);
1012 l2cap_chan_add(conn, sk, NULL);
1014 sk->sk_state = BT_CONNECT;
1015 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1017 if (hcon->state == BT_CONNECTED) {
1018 if (sk->sk_type != SOCK_SEQPACKET &&
1019 sk->sk_type != SOCK_STREAM) {
1020 l2cap_sock_clear_timer(sk);
1021 sk->sk_state = BT_CONNECTED;
1027 hci_dev_unlock_bh(hdev);
1032 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1034 struct sock *sk = sock->sk;
1035 struct sockaddr_l2 la;
1038 BT_DBG("sk %p", sk);
1040 if (!addr || alen < sizeof(addr->sa_family) ||
1041 addr->sa_family != AF_BLUETOOTH)
1044 memset(&la, 0, sizeof(la));
1045 len = min_t(unsigned int, sizeof(la), alen);
1046 memcpy(&la, addr, len);
1053 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1059 switch (l2cap_pi(sk)->mode) {
1060 case L2CAP_MODE_BASIC:
1062 case L2CAP_MODE_ERTM:
1063 case L2CAP_MODE_STREAMING:
1072 switch (sk->sk_state) {
1076 /* Already connecting */
1080 /* Already connected */
1093 /* Set destination address and psm */
1094 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1095 l2cap_pi(sk)->psm = la.l2_psm;
1097 err = l2cap_do_connect(sk);
1102 err = bt_sock_wait_state(sk, BT_CONNECTED,
1103 sock_sndtimeo(sk, flags & O_NONBLOCK));
1109 static int l2cap_sock_listen(struct socket *sock, int backlog)
1111 struct sock *sk = sock->sk;
1114 BT_DBG("sk %p backlog %d", sk, backlog);
1118 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1119 || sk->sk_state != BT_BOUND) {
1124 switch (l2cap_pi(sk)->mode) {
1125 case L2CAP_MODE_BASIC:
1127 case L2CAP_MODE_ERTM:
1128 case L2CAP_MODE_STREAMING:
1137 if (!l2cap_pi(sk)->psm) {
1138 bdaddr_t *src = &bt_sk(sk)->src;
1143 write_lock_bh(&l2cap_sk_list.lock);
1145 for (psm = 0x1001; psm < 0x1100; psm += 2)
1146 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1147 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1148 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1153 write_unlock_bh(&l2cap_sk_list.lock);
1159 sk->sk_max_ack_backlog = backlog;
1160 sk->sk_ack_backlog = 0;
1161 sk->sk_state = BT_LISTEN;
1168 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1170 DECLARE_WAITQUEUE(wait, current);
1171 struct sock *sk = sock->sk, *nsk;
1175 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1177 if (sk->sk_state != BT_LISTEN) {
1182 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1184 BT_DBG("sk %p timeo %ld", sk, timeo);
1186 /* Wait for an incoming connection. (wake-one). */
1187 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1188 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1189 set_current_state(TASK_INTERRUPTIBLE);
1196 timeo = schedule_timeout(timeo);
1197 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1199 if (sk->sk_state != BT_LISTEN) {
1204 if (signal_pending(current)) {
1205 err = sock_intr_errno(timeo);
1209 set_current_state(TASK_RUNNING);
1210 remove_wait_queue(sk_sleep(sk), &wait);
1215 newsock->state = SS_CONNECTED;
1217 BT_DBG("new socket %p", nsk);
1224 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1226 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1227 struct sock *sk = sock->sk;
1229 BT_DBG("sock %p, sk %p", sock, sk);
1231 addr->sa_family = AF_BLUETOOTH;
1232 *len = sizeof(struct sockaddr_l2);
1235 la->l2_psm = l2cap_pi(sk)->psm;
1236 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1237 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1239 la->l2_psm = l2cap_pi(sk)->sport;
1240 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1241 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1247 static int __l2cap_wait_ack(struct sock *sk)
1249 DECLARE_WAITQUEUE(wait, current);
1253 add_wait_queue(sk_sleep(sk), &wait);
1254 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1255 set_current_state(TASK_INTERRUPTIBLE);
1260 if (signal_pending(current)) {
1261 err = sock_intr_errno(timeo);
1266 timeo = schedule_timeout(timeo);
1269 err = sock_error(sk);
1273 set_current_state(TASK_RUNNING);
1274 remove_wait_queue(sk_sleep(sk), &wait);
1278 static void l2cap_monitor_timeout(unsigned long arg)
1280 struct sock *sk = (void *) arg;
1283 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1284 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1289 l2cap_pi(sk)->retry_count++;
1290 __mod_monitor_timer();
1292 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1296 static void l2cap_retrans_timeout(unsigned long arg)
1298 struct sock *sk = (void *) arg;
1301 l2cap_pi(sk)->retry_count = 1;
1302 __mod_monitor_timer();
1304 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1306 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1310 static void l2cap_drop_acked_frames(struct sock *sk)
1312 struct sk_buff *skb;
1314 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1315 l2cap_pi(sk)->unacked_frames) {
1316 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1319 skb = skb_dequeue(TX_QUEUE(sk));
1322 l2cap_pi(sk)->unacked_frames--;
1325 if (!l2cap_pi(sk)->unacked_frames)
1326 del_timer(&l2cap_pi(sk)->retrans_timer);
1329 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1331 struct l2cap_pinfo *pi = l2cap_pi(sk);
1333 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1335 hci_send_acl(pi->conn->hcon, skb, 0);
1338 static int l2cap_streaming_send(struct sock *sk)
1340 struct sk_buff *skb, *tx_skb;
1341 struct l2cap_pinfo *pi = l2cap_pi(sk);
1344 while ((skb = sk->sk_send_head)) {
1345 tx_skb = skb_clone(skb, GFP_ATOMIC);
1347 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1348 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1349 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1351 if (pi->fcs == L2CAP_FCS_CRC16) {
1352 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1353 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1356 l2cap_do_send(sk, tx_skb);
1358 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1360 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1361 sk->sk_send_head = NULL;
1363 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1365 skb = skb_dequeue(TX_QUEUE(sk));
1371 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1373 struct l2cap_pinfo *pi = l2cap_pi(sk);
1374 struct sk_buff *skb, *tx_skb;
1377 skb = skb_peek(TX_QUEUE(sk));
1382 if (bt_cb(skb)->tx_seq == tx_seq)
1385 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1388 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1390 if (pi->remote_max_tx &&
1391 bt_cb(skb)->retries == pi->remote_max_tx) {
1392 l2cap_send_disconn_req(pi->conn, sk);
1396 tx_skb = skb_clone(skb, GFP_ATOMIC);
1397 bt_cb(skb)->retries++;
1398 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1399 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1400 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1401 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1403 if (pi->fcs == L2CAP_FCS_CRC16) {
1404 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1405 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1408 l2cap_do_send(sk, tx_skb);
1411 static int l2cap_ertm_send(struct sock *sk)
1413 struct sk_buff *skb, *tx_skb;
1414 struct l2cap_pinfo *pi = l2cap_pi(sk);
1418 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1421 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1422 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1424 if (pi->remote_max_tx &&
1425 bt_cb(skb)->retries == pi->remote_max_tx) {
1426 l2cap_send_disconn_req(pi->conn, sk);
1430 tx_skb = skb_clone(skb, GFP_ATOMIC);
1432 bt_cb(skb)->retries++;
1434 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1435 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1436 control |= L2CAP_CTRL_FINAL;
1437 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1439 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1440 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1441 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1444 if (pi->fcs == L2CAP_FCS_CRC16) {
1445 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1446 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1449 l2cap_do_send(sk, tx_skb);
1451 __mod_retrans_timer();
1453 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1454 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1456 pi->unacked_frames++;
1459 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1460 sk->sk_send_head = NULL;
1462 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1470 static int l2cap_retransmit_frames(struct sock *sk)
1472 struct l2cap_pinfo *pi = l2cap_pi(sk);
1475 spin_lock_bh(&pi->send_lock);
1477 if (!skb_queue_empty(TX_QUEUE(sk)))
1478 sk->sk_send_head = TX_QUEUE(sk)->next;
1480 pi->next_tx_seq = pi->expected_ack_seq;
1481 ret = l2cap_ertm_send(sk);
1483 spin_unlock_bh(&pi->send_lock);
1488 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1490 struct sock *sk = (struct sock *)pi;
1494 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1496 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1497 control |= L2CAP_SUPER_RCV_NOT_READY;
1498 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1499 l2cap_send_sframe(pi, control);
1503 spin_lock_bh(&pi->send_lock);
1504 nframes = l2cap_ertm_send(sk);
1505 spin_unlock_bh(&pi->send_lock);
1510 control |= L2CAP_SUPER_RCV_READY;
1511 l2cap_send_sframe(pi, control);
1514 static void l2cap_send_srejtail(struct sock *sk)
1516 struct srej_list *tail;
1519 control = L2CAP_SUPER_SELECT_REJECT;
1520 control |= L2CAP_CTRL_FINAL;
1522 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1523 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1525 l2cap_send_sframe(l2cap_pi(sk), control);
1528 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1530 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1531 struct sk_buff **frag;
1534 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1540 /* Continuation fragments (no L2CAP header) */
1541 frag = &skb_shinfo(skb)->frag_list;
1543 count = min_t(unsigned int, conn->mtu, len);
1545 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1548 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1554 frag = &(*frag)->next;
1560 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1562 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1563 struct sk_buff *skb;
1564 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1565 struct l2cap_hdr *lh;
1567 BT_DBG("sk %p len %d", sk, (int)len);
1569 count = min_t(unsigned int, (conn->mtu - hlen), len);
1570 skb = bt_skb_send_alloc(sk, count + hlen,
1571 msg->msg_flags & MSG_DONTWAIT, &err);
1573 return ERR_PTR(-ENOMEM);
1575 /* Create L2CAP header */
1576 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1577 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1578 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1579 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1581 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1582 if (unlikely(err < 0)) {
1584 return ERR_PTR(err);
1589 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1591 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1592 struct sk_buff *skb;
1593 int err, count, hlen = L2CAP_HDR_SIZE;
1594 struct l2cap_hdr *lh;
1596 BT_DBG("sk %p len %d", sk, (int)len);
1598 count = min_t(unsigned int, (conn->mtu - hlen), len);
1599 skb = bt_skb_send_alloc(sk, count + hlen,
1600 msg->msg_flags & MSG_DONTWAIT, &err);
1602 return ERR_PTR(-ENOMEM);
1604 /* Create L2CAP header */
1605 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1606 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1607 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1609 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1610 if (unlikely(err < 0)) {
1612 return ERR_PTR(err);
1617 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1619 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1620 struct sk_buff *skb;
1621 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1622 struct l2cap_hdr *lh;
1624 BT_DBG("sk %p len %d", sk, (int)len);
1627 return ERR_PTR(-ENOTCONN);
1632 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1635 count = min_t(unsigned int, (conn->mtu - hlen), len);
1636 skb = bt_skb_send_alloc(sk, count + hlen,
1637 msg->msg_flags & MSG_DONTWAIT, &err);
1639 return ERR_PTR(-ENOMEM);
1641 /* Create L2CAP header */
1642 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1643 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1644 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1645 put_unaligned_le16(control, skb_put(skb, 2));
1647 put_unaligned_le16(sdulen, skb_put(skb, 2));
1649 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1650 if (unlikely(err < 0)) {
1652 return ERR_PTR(err);
1655 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1656 put_unaligned_le16(0, skb_put(skb, 2));
1658 bt_cb(skb)->retries = 0;
1662 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1664 struct l2cap_pinfo *pi = l2cap_pi(sk);
1665 struct sk_buff *skb;
1666 struct sk_buff_head sar_queue;
1670 skb_queue_head_init(&sar_queue);
1671 control = L2CAP_SDU_START;
1672 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1674 return PTR_ERR(skb);
1676 __skb_queue_tail(&sar_queue, skb);
1677 len -= pi->remote_mps;
1678 size += pi->remote_mps;
1683 if (len > pi->remote_mps) {
1684 control = L2CAP_SDU_CONTINUE;
1685 buflen = pi->remote_mps;
1687 control = L2CAP_SDU_END;
1691 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1693 skb_queue_purge(&sar_queue);
1694 return PTR_ERR(skb);
1697 __skb_queue_tail(&sar_queue, skb);
1701 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1702 spin_lock_bh(&pi->send_lock);
1703 if (sk->sk_send_head == NULL)
1704 sk->sk_send_head = sar_queue.next;
1705 spin_unlock_bh(&pi->send_lock);
1710 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1712 struct sock *sk = sock->sk;
1713 struct l2cap_pinfo *pi = l2cap_pi(sk);
1714 struct sk_buff *skb;
1718 BT_DBG("sock %p, sk %p", sock, sk);
1720 err = sock_error(sk);
1724 if (msg->msg_flags & MSG_OOB)
1729 if (sk->sk_state != BT_CONNECTED) {
1734 /* Connectionless channel */
1735 if (sk->sk_type == SOCK_DGRAM) {
1736 skb = l2cap_create_connless_pdu(sk, msg, len);
1740 l2cap_do_send(sk, skb);
1747 case L2CAP_MODE_BASIC:
1748 /* Check outgoing MTU */
1749 if (len > pi->omtu) {
1754 /* Create a basic PDU */
1755 skb = l2cap_create_basic_pdu(sk, msg, len);
1761 l2cap_do_send(sk, skb);
1765 case L2CAP_MODE_ERTM:
1766 case L2CAP_MODE_STREAMING:
1767 /* Entire SDU fits into one PDU */
1768 if (len <= pi->remote_mps) {
1769 control = L2CAP_SDU_UNSEGMENTED;
1770 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1775 __skb_queue_tail(TX_QUEUE(sk), skb);
1777 if (pi->mode == L2CAP_MODE_ERTM)
1778 spin_lock_bh(&pi->send_lock);
1780 if (sk->sk_send_head == NULL)
1781 sk->sk_send_head = skb;
1783 if (pi->mode == L2CAP_MODE_ERTM)
1784 spin_unlock_bh(&pi->send_lock);
1786 /* Segment SDU into multiples PDUs */
1787 err = l2cap_sar_segment_sdu(sk, msg, len);
1792 if (pi->mode == L2CAP_MODE_STREAMING) {
1793 err = l2cap_streaming_send(sk);
1795 spin_lock_bh(&pi->send_lock);
1796 err = l2cap_ertm_send(sk);
1797 spin_unlock_bh(&pi->send_lock);
1805 BT_DBG("bad state %1.1x", pi->mode);
1814 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1816 struct sock *sk = sock->sk;
1820 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1821 struct l2cap_conn_rsp rsp;
1823 sk->sk_state = BT_CONFIG;
1825 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1826 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1827 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1828 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1829 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1830 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1838 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1841 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1843 struct sock *sk = sock->sk;
1844 struct l2cap_options opts;
1848 BT_DBG("sk %p", sk);
1854 opts.imtu = l2cap_pi(sk)->imtu;
1855 opts.omtu = l2cap_pi(sk)->omtu;
1856 opts.flush_to = l2cap_pi(sk)->flush_to;
1857 opts.mode = l2cap_pi(sk)->mode;
1858 opts.fcs = l2cap_pi(sk)->fcs;
1859 opts.max_tx = l2cap_pi(sk)->max_tx;
1860 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1862 len = min_t(unsigned int, sizeof(opts), optlen);
1863 if (copy_from_user((char *) &opts, optval, len)) {
1868 l2cap_pi(sk)->mode = opts.mode;
1869 switch (l2cap_pi(sk)->mode) {
1870 case L2CAP_MODE_BASIC:
1872 case L2CAP_MODE_ERTM:
1873 case L2CAP_MODE_STREAMING:
1882 l2cap_pi(sk)->imtu = opts.imtu;
1883 l2cap_pi(sk)->omtu = opts.omtu;
1884 l2cap_pi(sk)->fcs = opts.fcs;
1885 l2cap_pi(sk)->max_tx = opts.max_tx;
1886 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1890 if (get_user(opt, (u32 __user *) optval)) {
1895 if (opt & L2CAP_LM_AUTH)
1896 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1897 if (opt & L2CAP_LM_ENCRYPT)
1898 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1899 if (opt & L2CAP_LM_SECURE)
1900 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1902 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1903 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1915 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1917 struct sock *sk = sock->sk;
1918 struct bt_security sec;
1922 BT_DBG("sk %p", sk);
1924 if (level == SOL_L2CAP)
1925 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1927 if (level != SOL_BLUETOOTH)
1928 return -ENOPROTOOPT;
1934 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1935 && sk->sk_type != SOCK_RAW) {
1940 sec.level = BT_SECURITY_LOW;
1942 len = min_t(unsigned int, sizeof(sec), optlen);
1943 if (copy_from_user((char *) &sec, optval, len)) {
1948 if (sec.level < BT_SECURITY_LOW ||
1949 sec.level > BT_SECURITY_HIGH) {
1954 l2cap_pi(sk)->sec_level = sec.level;
1957 case BT_DEFER_SETUP:
1958 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1963 if (get_user(opt, (u32 __user *) optval)) {
1968 bt_sk(sk)->defer_setup = opt;
1980 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1982 struct sock *sk = sock->sk;
1983 struct l2cap_options opts;
1984 struct l2cap_conninfo cinfo;
1988 BT_DBG("sk %p", sk);
1990 if (get_user(len, optlen))
1997 opts.imtu = l2cap_pi(sk)->imtu;
1998 opts.omtu = l2cap_pi(sk)->omtu;
1999 opts.flush_to = l2cap_pi(sk)->flush_to;
2000 opts.mode = l2cap_pi(sk)->mode;
2001 opts.fcs = l2cap_pi(sk)->fcs;
2002 opts.max_tx = l2cap_pi(sk)->max_tx;
2003 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2005 len = min_t(unsigned int, len, sizeof(opts));
2006 if (copy_to_user(optval, (char *) &opts, len))
2012 switch (l2cap_pi(sk)->sec_level) {
2013 case BT_SECURITY_LOW:
2014 opt = L2CAP_LM_AUTH;
2016 case BT_SECURITY_MEDIUM:
2017 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2019 case BT_SECURITY_HIGH:
2020 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2028 if (l2cap_pi(sk)->role_switch)
2029 opt |= L2CAP_LM_MASTER;
2031 if (l2cap_pi(sk)->force_reliable)
2032 opt |= L2CAP_LM_RELIABLE;
2034 if (put_user(opt, (u32 __user *) optval))
2038 case L2CAP_CONNINFO:
2039 if (sk->sk_state != BT_CONNECTED &&
2040 !(sk->sk_state == BT_CONNECT2 &&
2041 bt_sk(sk)->defer_setup)) {
2046 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2047 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2049 len = min_t(unsigned int, len, sizeof(cinfo));
2050 if (copy_to_user(optval, (char *) &cinfo, len))
2064 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2066 struct sock *sk = sock->sk;
2067 struct bt_security sec;
2070 BT_DBG("sk %p", sk);
2072 if (level == SOL_L2CAP)
2073 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2075 if (level != SOL_BLUETOOTH)
2076 return -ENOPROTOOPT;
2078 if (get_user(len, optlen))
2085 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2086 && sk->sk_type != SOCK_RAW) {
2091 sec.level = l2cap_pi(sk)->sec_level;
2093 len = min_t(unsigned int, len, sizeof(sec));
2094 if (copy_to_user(optval, (char *) &sec, len))
2099 case BT_DEFER_SETUP:
2100 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2105 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2119 static int l2cap_sock_shutdown(struct socket *sock, int how)
2121 struct sock *sk = sock->sk;
2124 BT_DBG("sock %p, sk %p", sock, sk);
2130 if (!sk->sk_shutdown) {
2131 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2132 err = __l2cap_wait_ack(sk);
2134 sk->sk_shutdown = SHUTDOWN_MASK;
2135 l2cap_sock_clear_timer(sk);
2136 __l2cap_sock_close(sk, 0);
2138 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2139 err = bt_sock_wait_state(sk, BT_CLOSED,
2146 static int l2cap_sock_release(struct socket *sock)
2148 struct sock *sk = sock->sk;
2151 BT_DBG("sock %p, sk %p", sock, sk);
2156 err = l2cap_sock_shutdown(sock, 2);
2159 l2cap_sock_kill(sk);
2163 static void l2cap_chan_ready(struct sock *sk)
2165 struct sock *parent = bt_sk(sk)->parent;
2167 BT_DBG("sk %p, parent %p", sk, parent);
2169 l2cap_pi(sk)->conf_state = 0;
2170 l2cap_sock_clear_timer(sk);
2173 /* Outgoing channel.
2174 * Wake up socket sleeping on connect.
2176 sk->sk_state = BT_CONNECTED;
2177 sk->sk_state_change(sk);
2179 /* Incoming channel.
2180 * Wake up socket sleeping on accept.
2182 parent->sk_data_ready(parent, 0);
2186 /* Copy frame to all raw sockets on that connection */
2187 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2189 struct l2cap_chan_list *l = &conn->chan_list;
2190 struct sk_buff *nskb;
2193 BT_DBG("conn %p", conn);
2195 read_lock(&l->lock);
2196 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2197 if (sk->sk_type != SOCK_RAW)
2200 /* Don't send frame to the socket it came from */
2203 nskb = skb_clone(skb, GFP_ATOMIC);
2207 if (sock_queue_rcv_skb(sk, nskb))
2210 read_unlock(&l->lock);
2213 /* ---- L2CAP signalling commands ---- */
2214 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2215 u8 code, u8 ident, u16 dlen, void *data)
2217 struct sk_buff *skb, **frag;
2218 struct l2cap_cmd_hdr *cmd;
2219 struct l2cap_hdr *lh;
2222 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2223 conn, code, ident, dlen);
2225 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2226 count = min_t(unsigned int, conn->mtu, len);
2228 skb = bt_skb_alloc(count, GFP_ATOMIC);
2232 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2233 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2234 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2236 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2239 cmd->len = cpu_to_le16(dlen);
2242 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2243 memcpy(skb_put(skb, count), data, count);
2249 /* Continuation fragments (no L2CAP header) */
2250 frag = &skb_shinfo(skb)->frag_list;
2252 count = min_t(unsigned int, conn->mtu, len);
2254 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2258 memcpy(skb_put(*frag, count), data, count);
2263 frag = &(*frag)->next;
2273 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2275 struct l2cap_conf_opt *opt = *ptr;
2278 len = L2CAP_CONF_OPT_SIZE + opt->len;
2286 *val = *((u8 *) opt->val);
2290 *val = __le16_to_cpu(*((__le16 *) opt->val));
2294 *val = __le32_to_cpu(*((__le32 *) opt->val));
2298 *val = (unsigned long) opt->val;
2302 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2306 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2308 struct l2cap_conf_opt *opt = *ptr;
2310 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2317 *((u8 *) opt->val) = val;
2321 *((__le16 *) opt->val) = cpu_to_le16(val);
2325 *((__le32 *) opt->val) = cpu_to_le32(val);
2329 memcpy(opt->val, (void *) val, len);
2333 *ptr += L2CAP_CONF_OPT_SIZE + len;
2336 static void l2cap_ack_timeout(unsigned long arg)
2338 struct sock *sk = (void *) arg;
2341 l2cap_send_ack(l2cap_pi(sk));
2345 static inline void l2cap_ertm_init(struct sock *sk)
2347 l2cap_pi(sk)->expected_ack_seq = 0;
2348 l2cap_pi(sk)->unacked_frames = 0;
2349 l2cap_pi(sk)->buffer_seq = 0;
2350 l2cap_pi(sk)->num_acked = 0;
2351 l2cap_pi(sk)->frames_sent = 0;
2353 setup_timer(&l2cap_pi(sk)->retrans_timer,
2354 l2cap_retrans_timeout, (unsigned long) sk);
2355 setup_timer(&l2cap_pi(sk)->monitor_timer,
2356 l2cap_monitor_timeout, (unsigned long) sk);
2357 setup_timer(&l2cap_pi(sk)->ack_timer,
2358 l2cap_ack_timeout, (unsigned long) sk);
2360 __skb_queue_head_init(SREJ_QUEUE(sk));
2361 __skb_queue_head_init(BUSY_QUEUE(sk));
2362 spin_lock_init(&l2cap_pi(sk)->send_lock);
2364 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2367 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2369 u32 local_feat_mask = l2cap_feat_mask;
2371 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2374 case L2CAP_MODE_ERTM:
2375 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2376 case L2CAP_MODE_STREAMING:
2377 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2383 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2386 case L2CAP_MODE_STREAMING:
2387 case L2CAP_MODE_ERTM:
2388 if (l2cap_mode_supported(mode, remote_feat_mask))
2392 return L2CAP_MODE_BASIC;
2396 static int l2cap_build_conf_req(struct sock *sk, void *data)
2398 struct l2cap_pinfo *pi = l2cap_pi(sk);
2399 struct l2cap_conf_req *req = data;
2400 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2401 void *ptr = req->data;
2403 BT_DBG("sk %p", sk);
2405 if (pi->num_conf_req || pi->num_conf_rsp)
2409 case L2CAP_MODE_STREAMING:
2410 case L2CAP_MODE_ERTM:
2411 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2412 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2413 l2cap_send_disconn_req(pi->conn, sk);
2416 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2422 case L2CAP_MODE_BASIC:
2423 if (pi->imtu != L2CAP_DEFAULT_MTU)
2424 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2427 case L2CAP_MODE_ERTM:
2428 rfc.mode = L2CAP_MODE_ERTM;
2429 rfc.txwin_size = pi->tx_win;
2430 rfc.max_transmit = pi->max_tx;
2431 rfc.retrans_timeout = 0;
2432 rfc.monitor_timeout = 0;
2433 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2434 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2435 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2437 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2438 sizeof(rfc), (unsigned long) &rfc);
2440 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2443 if (pi->fcs == L2CAP_FCS_NONE ||
2444 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2445 pi->fcs = L2CAP_FCS_NONE;
2446 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2450 case L2CAP_MODE_STREAMING:
2451 rfc.mode = L2CAP_MODE_STREAMING;
2453 rfc.max_transmit = 0;
2454 rfc.retrans_timeout = 0;
2455 rfc.monitor_timeout = 0;
2456 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2457 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2458 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2460 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2461 sizeof(rfc), (unsigned long) &rfc);
2463 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2466 if (pi->fcs == L2CAP_FCS_NONE ||
2467 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2468 pi->fcs = L2CAP_FCS_NONE;
2469 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2474 /* FIXME: Need actual value of the flush timeout */
2475 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2476 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2478 req->dcid = cpu_to_le16(pi->dcid);
2479 req->flags = cpu_to_le16(0);
2484 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2486 struct l2cap_pinfo *pi = l2cap_pi(sk);
2487 struct l2cap_conf_rsp *rsp = data;
2488 void *ptr = rsp->data;
2489 void *req = pi->conf_req;
2490 int len = pi->conf_len;
2491 int type, hint, olen;
2493 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2494 u16 mtu = L2CAP_DEFAULT_MTU;
2495 u16 result = L2CAP_CONF_SUCCESS;
2497 BT_DBG("sk %p", sk);
2499 while (len >= L2CAP_CONF_OPT_SIZE) {
2500 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2502 hint = type & L2CAP_CONF_HINT;
2503 type &= L2CAP_CONF_MASK;
2506 case L2CAP_CONF_MTU:
2510 case L2CAP_CONF_FLUSH_TO:
2514 case L2CAP_CONF_QOS:
2517 case L2CAP_CONF_RFC:
2518 if (olen == sizeof(rfc))
2519 memcpy(&rfc, (void *) val, olen);
2522 case L2CAP_CONF_FCS:
2523 if (val == L2CAP_FCS_NONE)
2524 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2532 result = L2CAP_CONF_UNKNOWN;
2533 *((u8 *) ptr++) = type;
2538 if (pi->num_conf_rsp || pi->num_conf_req)
2542 case L2CAP_MODE_STREAMING:
2543 case L2CAP_MODE_ERTM:
2544 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2545 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2546 return -ECONNREFUSED;
2549 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2554 if (pi->mode != rfc.mode) {
2555 result = L2CAP_CONF_UNACCEPT;
2556 rfc.mode = pi->mode;
2558 if (pi->num_conf_rsp == 1)
2559 return -ECONNREFUSED;
2561 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2562 sizeof(rfc), (unsigned long) &rfc);
2566 if (result == L2CAP_CONF_SUCCESS) {
2567 /* Configure output options and let the other side know
2568 * which ones we don't like. */
2570 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2571 result = L2CAP_CONF_UNACCEPT;
2574 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2576 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2579 case L2CAP_MODE_BASIC:
2580 pi->fcs = L2CAP_FCS_NONE;
2581 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2584 case L2CAP_MODE_ERTM:
2585 pi->remote_tx_win = rfc.txwin_size;
2586 pi->remote_max_tx = rfc.max_transmit;
2587 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2588 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2590 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2592 rfc.retrans_timeout =
2593 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2594 rfc.monitor_timeout =
2595 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2597 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2599 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2600 sizeof(rfc), (unsigned long) &rfc);
2604 case L2CAP_MODE_STREAMING:
2605 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2606 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2608 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2610 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2612 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2613 sizeof(rfc), (unsigned long) &rfc);
2618 result = L2CAP_CONF_UNACCEPT;
2620 memset(&rfc, 0, sizeof(rfc));
2621 rfc.mode = pi->mode;
2624 if (result == L2CAP_CONF_SUCCESS)
2625 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2627 rsp->scid = cpu_to_le16(pi->dcid);
2628 rsp->result = cpu_to_le16(result);
2629 rsp->flags = cpu_to_le16(0x0000);
2634 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2636 struct l2cap_pinfo *pi = l2cap_pi(sk);
2637 struct l2cap_conf_req *req = data;
2638 void *ptr = req->data;
2641 struct l2cap_conf_rfc rfc;
2643 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2645 while (len >= L2CAP_CONF_OPT_SIZE) {
2646 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2649 case L2CAP_CONF_MTU:
2650 if (val < L2CAP_DEFAULT_MIN_MTU) {
2651 *result = L2CAP_CONF_UNACCEPT;
2652 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2655 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2658 case L2CAP_CONF_FLUSH_TO:
2660 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2664 case L2CAP_CONF_RFC:
2665 if (olen == sizeof(rfc))
2666 memcpy(&rfc, (void *)val, olen);
2668 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2669 rfc.mode != pi->mode)
2670 return -ECONNREFUSED;
2672 pi->mode = rfc.mode;
2675 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2676 sizeof(rfc), (unsigned long) &rfc);
2681 if (*result == L2CAP_CONF_SUCCESS) {
2683 case L2CAP_MODE_ERTM:
2684 pi->remote_tx_win = rfc.txwin_size;
2685 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2686 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2687 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2689 case L2CAP_MODE_STREAMING:
2690 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2694 req->dcid = cpu_to_le16(pi->dcid);
2695 req->flags = cpu_to_le16(0x0000);
2700 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2702 struct l2cap_conf_rsp *rsp = data;
2703 void *ptr = rsp->data;
2705 BT_DBG("sk %p", sk);
2707 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2708 rsp->result = cpu_to_le16(result);
2709 rsp->flags = cpu_to_le16(flags);
2714 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2716 struct l2cap_pinfo *pi = l2cap_pi(sk);
2719 struct l2cap_conf_rfc rfc;
2721 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2723 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2726 while (len >= L2CAP_CONF_OPT_SIZE) {
2727 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2730 case L2CAP_CONF_RFC:
2731 if (olen == sizeof(rfc))
2732 memcpy(&rfc, (void *)val, olen);
2739 case L2CAP_MODE_ERTM:
2740 pi->remote_tx_win = rfc.txwin_size;
2741 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2742 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2743 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2745 case L2CAP_MODE_STREAMING:
2746 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2750 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2752 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2754 if (rej->reason != 0x0000)
2757 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2758 cmd->ident == conn->info_ident) {
2759 del_timer(&conn->info_timer);
2761 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2762 conn->info_ident = 0;
2764 l2cap_conn_start(conn);
2770 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2772 struct l2cap_chan_list *list = &conn->chan_list;
2773 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2774 struct l2cap_conn_rsp rsp;
2775 struct sock *sk, *parent;
2776 int result, status = L2CAP_CS_NO_INFO;
2778 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2779 __le16 psm = req->psm;
2781 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2783 /* Check if we have socket listening on psm */
2784 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2786 result = L2CAP_CR_BAD_PSM;
2790 /* Check if the ACL is secure enough (if not SDP) */
2791 if (psm != cpu_to_le16(0x0001) &&
2792 !hci_conn_check_link_mode(conn->hcon)) {
2793 conn->disc_reason = 0x05;
2794 result = L2CAP_CR_SEC_BLOCK;
2798 result = L2CAP_CR_NO_MEM;
2800 /* Check for backlog size */
2801 if (sk_acceptq_is_full(parent)) {
2802 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2806 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2810 write_lock_bh(&list->lock);
2812 /* Check if we already have channel with that dcid */
2813 if (__l2cap_get_chan_by_dcid(list, scid)) {
2814 write_unlock_bh(&list->lock);
2815 sock_set_flag(sk, SOCK_ZAPPED);
2816 l2cap_sock_kill(sk);
2820 hci_conn_hold(conn->hcon);
2822 l2cap_sock_init(sk, parent);
2823 bacpy(&bt_sk(sk)->src, conn->src);
2824 bacpy(&bt_sk(sk)->dst, conn->dst);
2825 l2cap_pi(sk)->psm = psm;
2826 l2cap_pi(sk)->dcid = scid;
2828 __l2cap_chan_add(conn, sk, parent);
2829 dcid = l2cap_pi(sk)->scid;
2831 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2833 l2cap_pi(sk)->ident = cmd->ident;
2835 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2836 if (l2cap_check_security(sk)) {
2837 if (bt_sk(sk)->defer_setup) {
2838 sk->sk_state = BT_CONNECT2;
2839 result = L2CAP_CR_PEND;
2840 status = L2CAP_CS_AUTHOR_PEND;
2841 parent->sk_data_ready(parent, 0);
2843 sk->sk_state = BT_CONFIG;
2844 result = L2CAP_CR_SUCCESS;
2845 status = L2CAP_CS_NO_INFO;
2848 sk->sk_state = BT_CONNECT2;
2849 result = L2CAP_CR_PEND;
2850 status = L2CAP_CS_AUTHEN_PEND;
2853 sk->sk_state = BT_CONNECT2;
2854 result = L2CAP_CR_PEND;
2855 status = L2CAP_CS_NO_INFO;
2858 write_unlock_bh(&list->lock);
2861 bh_unlock_sock(parent);
2864 rsp.scid = cpu_to_le16(scid);
2865 rsp.dcid = cpu_to_le16(dcid);
2866 rsp.result = cpu_to_le16(result);
2867 rsp.status = cpu_to_le16(status);
2868 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2870 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2871 struct l2cap_info_req info;
2872 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2874 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2875 conn->info_ident = l2cap_get_ident(conn);
2877 mod_timer(&conn->info_timer, jiffies +
2878 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2880 l2cap_send_cmd(conn, conn->info_ident,
2881 L2CAP_INFO_REQ, sizeof(info), &info);
2887 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2889 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2890 u16 scid, dcid, result, status;
2894 scid = __le16_to_cpu(rsp->scid);
2895 dcid = __le16_to_cpu(rsp->dcid);
2896 result = __le16_to_cpu(rsp->result);
2897 status = __le16_to_cpu(rsp->status);
2899 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2902 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2906 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2912 case L2CAP_CR_SUCCESS:
2913 sk->sk_state = BT_CONFIG;
2914 l2cap_pi(sk)->ident = 0;
2915 l2cap_pi(sk)->dcid = dcid;
2916 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2917 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2919 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2920 l2cap_build_conf_req(sk, req), req);
2921 l2cap_pi(sk)->num_conf_req++;
2925 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2929 l2cap_chan_del(sk, ECONNREFUSED);
2937 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2939 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2945 dcid = __le16_to_cpu(req->dcid);
2946 flags = __le16_to_cpu(req->flags);
2948 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2950 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2954 if (sk->sk_state == BT_DISCONN)
2957 /* Reject if config buffer is too small. */
2958 len = cmd_len - sizeof(*req);
2959 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2960 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2961 l2cap_build_conf_rsp(sk, rsp,
2962 L2CAP_CONF_REJECT, flags), rsp);
2967 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2968 l2cap_pi(sk)->conf_len += len;
2970 if (flags & 0x0001) {
2971 /* Incomplete config. Send empty response. */
2972 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2973 l2cap_build_conf_rsp(sk, rsp,
2974 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2978 /* Complete config. */
2979 len = l2cap_parse_conf_req(sk, rsp);
2981 l2cap_send_disconn_req(conn, sk);
2985 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2986 l2cap_pi(sk)->num_conf_rsp++;
2988 /* Reset config buffer. */
2989 l2cap_pi(sk)->conf_len = 0;
2991 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2994 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2995 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2996 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2997 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2999 sk->sk_state = BT_CONNECTED;
3001 l2cap_pi(sk)->next_tx_seq = 0;
3002 l2cap_pi(sk)->expected_tx_seq = 0;
3003 __skb_queue_head_init(TX_QUEUE(sk));
3004 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3005 l2cap_ertm_init(sk);
3007 l2cap_chan_ready(sk);
3011 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3013 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3014 l2cap_build_conf_req(sk, buf), buf);
3015 l2cap_pi(sk)->num_conf_req++;
3023 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3025 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3026 u16 scid, flags, result;
3028 int len = cmd->len - sizeof(*rsp);
3030 scid = __le16_to_cpu(rsp->scid);
3031 flags = __le16_to_cpu(rsp->flags);
3032 result = __le16_to_cpu(rsp->result);
3034 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3035 scid, flags, result);
3037 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3042 case L2CAP_CONF_SUCCESS:
3043 l2cap_conf_rfc_get(sk, rsp->data, len);
3046 case L2CAP_CONF_UNACCEPT:
3047 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3050 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3051 l2cap_send_disconn_req(conn, sk);
3055 /* throw out any old stored conf requests */
3056 result = L2CAP_CONF_SUCCESS;
3057 len = l2cap_parse_conf_rsp(sk, rsp->data,
3060 l2cap_send_disconn_req(conn, sk);
3064 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3065 L2CAP_CONF_REQ, len, req);
3066 l2cap_pi(sk)->num_conf_req++;
3067 if (result != L2CAP_CONF_SUCCESS)
3073 sk->sk_state = BT_DISCONN;
3074 sk->sk_err = ECONNRESET;
3075 l2cap_sock_set_timer(sk, HZ * 5);
3076 l2cap_send_disconn_req(conn, sk);
3083 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3085 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3086 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3087 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3088 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3090 sk->sk_state = BT_CONNECTED;
3091 l2cap_pi(sk)->next_tx_seq = 0;
3092 l2cap_pi(sk)->expected_tx_seq = 0;
3093 __skb_queue_head_init(TX_QUEUE(sk));
3094 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3095 l2cap_ertm_init(sk);
3097 l2cap_chan_ready(sk);
3105 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3107 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3108 struct l2cap_disconn_rsp rsp;
3112 scid = __le16_to_cpu(req->scid);
3113 dcid = __le16_to_cpu(req->dcid);
3115 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3117 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3121 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3122 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3123 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3125 sk->sk_shutdown = SHUTDOWN_MASK;
3127 skb_queue_purge(TX_QUEUE(sk));
3129 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3130 skb_queue_purge(SREJ_QUEUE(sk));
3131 skb_queue_purge(BUSY_QUEUE(sk));
3132 del_timer(&l2cap_pi(sk)->retrans_timer);
3133 del_timer(&l2cap_pi(sk)->monitor_timer);
3134 del_timer(&l2cap_pi(sk)->ack_timer);
3137 l2cap_chan_del(sk, ECONNRESET);
3140 l2cap_sock_kill(sk);
3144 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3146 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3150 scid = __le16_to_cpu(rsp->scid);
3151 dcid = __le16_to_cpu(rsp->dcid);
3153 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3155 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3159 skb_queue_purge(TX_QUEUE(sk));
3161 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3162 skb_queue_purge(SREJ_QUEUE(sk));
3163 skb_queue_purge(BUSY_QUEUE(sk));
3164 del_timer(&l2cap_pi(sk)->retrans_timer);
3165 del_timer(&l2cap_pi(sk)->monitor_timer);
3166 del_timer(&l2cap_pi(sk)->ack_timer);
3169 l2cap_chan_del(sk, 0);
3172 l2cap_sock_kill(sk);
3176 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3178 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3181 type = __le16_to_cpu(req->type);
3183 BT_DBG("type 0x%4.4x", type);
3185 if (type == L2CAP_IT_FEAT_MASK) {
3187 u32 feat_mask = l2cap_feat_mask;
3188 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3189 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3190 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3192 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3194 put_unaligned_le32(feat_mask, rsp->data);
3195 l2cap_send_cmd(conn, cmd->ident,
3196 L2CAP_INFO_RSP, sizeof(buf), buf);
3197 } else if (type == L2CAP_IT_FIXED_CHAN) {
3199 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3200 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3201 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3202 memcpy(buf + 4, l2cap_fixed_chan, 8);
3203 l2cap_send_cmd(conn, cmd->ident,
3204 L2CAP_INFO_RSP, sizeof(buf), buf);
3206 struct l2cap_info_rsp rsp;
3207 rsp.type = cpu_to_le16(type);
3208 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3209 l2cap_send_cmd(conn, cmd->ident,
3210 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3216 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3218 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3221 type = __le16_to_cpu(rsp->type);
3222 result = __le16_to_cpu(rsp->result);
3224 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3226 del_timer(&conn->info_timer);
3228 if (type == L2CAP_IT_FEAT_MASK) {
3229 conn->feat_mask = get_unaligned_le32(rsp->data);
3231 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3232 struct l2cap_info_req req;
3233 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3235 conn->info_ident = l2cap_get_ident(conn);
3237 l2cap_send_cmd(conn, conn->info_ident,
3238 L2CAP_INFO_REQ, sizeof(req), &req);
3240 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3241 conn->info_ident = 0;
3243 l2cap_conn_start(conn);
3245 } else if (type == L2CAP_IT_FIXED_CHAN) {
3246 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3247 conn->info_ident = 0;
3249 l2cap_conn_start(conn);
3255 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3257 u8 *data = skb->data;
3259 struct l2cap_cmd_hdr cmd;
3262 l2cap_raw_recv(conn, skb);
3264 while (len >= L2CAP_CMD_HDR_SIZE) {
3266 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3267 data += L2CAP_CMD_HDR_SIZE;
3268 len -= L2CAP_CMD_HDR_SIZE;
3270 cmd_len = le16_to_cpu(cmd.len);
3272 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3274 if (cmd_len > len || !cmd.ident) {
3275 BT_DBG("corrupted command");
3280 case L2CAP_COMMAND_REJ:
3281 l2cap_command_rej(conn, &cmd, data);
3284 case L2CAP_CONN_REQ:
3285 err = l2cap_connect_req(conn, &cmd, data);
3288 case L2CAP_CONN_RSP:
3289 err = l2cap_connect_rsp(conn, &cmd, data);
3292 case L2CAP_CONF_REQ:
3293 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3296 case L2CAP_CONF_RSP:
3297 err = l2cap_config_rsp(conn, &cmd, data);
3300 case L2CAP_DISCONN_REQ:
3301 err = l2cap_disconnect_req(conn, &cmd, data);
3304 case L2CAP_DISCONN_RSP:
3305 err = l2cap_disconnect_rsp(conn, &cmd, data);
3308 case L2CAP_ECHO_REQ:
3309 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3312 case L2CAP_ECHO_RSP:
3315 case L2CAP_INFO_REQ:
3316 err = l2cap_information_req(conn, &cmd, data);
3319 case L2CAP_INFO_RSP:
3320 err = l2cap_information_rsp(conn, &cmd, data);
3324 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3330 struct l2cap_cmd_rej rej;
3331 BT_DBG("error %d", err);
3333 /* FIXME: Map err to a valid reason */
3334 rej.reason = cpu_to_le16(0);
3335 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3345 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3347 u16 our_fcs, rcv_fcs;
3348 int hdr_size = L2CAP_HDR_SIZE + 2;
3350 if (pi->fcs == L2CAP_FCS_CRC16) {
3351 skb_trim(skb, skb->len - 2);
3352 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3353 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3355 if (our_fcs != rcv_fcs)
3361 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3363 struct l2cap_pinfo *pi = l2cap_pi(sk);
3366 pi->frames_sent = 0;
3367 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3369 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3371 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3372 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3373 l2cap_send_sframe(pi, control);
3374 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3375 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3378 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3379 __mod_retrans_timer();
3381 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3383 spin_lock_bh(&pi->send_lock);
3384 l2cap_ertm_send(sk);
3385 spin_unlock_bh(&pi->send_lock);
3387 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3388 pi->frames_sent == 0) {
3389 control |= L2CAP_SUPER_RCV_READY;
3390 l2cap_send_sframe(pi, control);
3394 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3396 struct sk_buff *next_skb;
3398 bt_cb(skb)->tx_seq = tx_seq;
3399 bt_cb(skb)->sar = sar;
3401 next_skb = skb_peek(SREJ_QUEUE(sk));
3403 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3408 if (bt_cb(next_skb)->tx_seq == tx_seq)
3411 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3412 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3416 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3419 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3421 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3426 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3428 struct l2cap_pinfo *pi = l2cap_pi(sk);
3429 struct sk_buff *_skb;
3432 switch (control & L2CAP_CTRL_SAR) {
3433 case L2CAP_SDU_UNSEGMENTED:
3434 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3437 err = sock_queue_rcv_skb(sk, skb);
3443 case L2CAP_SDU_START:
3444 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3447 pi->sdu_len = get_unaligned_le16(skb->data);
3449 if (pi->sdu_len > pi->imtu)
3452 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3456 /* pull sdu_len bytes only after alloc, because of Local Busy
3457 * condition we have to be sure that this will be executed
3458 * only once, i.e., when alloc does not fail */
3461 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3463 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3464 pi->partial_sdu_len = skb->len;
3467 case L2CAP_SDU_CONTINUE:
3468 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3474 pi->partial_sdu_len += skb->len;
3475 if (pi->partial_sdu_len > pi->sdu_len)
3478 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3483 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3489 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3490 pi->partial_sdu_len += skb->len;
3492 if (pi->partial_sdu_len > pi->imtu)
3495 if (pi->partial_sdu_len != pi->sdu_len)
3498 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3501 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3503 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3507 err = sock_queue_rcv_skb(sk, _skb);
3510 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3514 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3515 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3529 l2cap_send_disconn_req(pi->conn, sk);
3534 static void l2cap_busy_work(struct work_struct *work)
3536 DECLARE_WAITQUEUE(wait, current);
3537 struct l2cap_pinfo *pi =
3538 container_of(work, struct l2cap_pinfo, busy_work);
3539 struct sock *sk = (struct sock *)pi;
3540 int n_tries = 0, timeo = HZ/5, err;
3541 struct sk_buff *skb;
3546 add_wait_queue(sk_sleep(sk), &wait);
3547 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3548 set_current_state(TASK_INTERRUPTIBLE);
3550 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3552 l2cap_send_disconn_req(pi->conn, sk);
3559 if (signal_pending(current)) {
3560 err = sock_intr_errno(timeo);
3565 timeo = schedule_timeout(timeo);
3568 err = sock_error(sk);
3572 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3573 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3574 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3576 skb_queue_head(BUSY_QUEUE(sk), skb);
3580 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3587 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3590 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3591 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3592 l2cap_send_sframe(pi, control);
3593 l2cap_pi(sk)->retry_count = 1;
3595 del_timer(&pi->retrans_timer);
3596 __mod_monitor_timer();
3598 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3601 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3602 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3604 set_current_state(TASK_RUNNING);
3605 remove_wait_queue(sk_sleep(sk), &wait);
3610 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3612 struct l2cap_pinfo *pi = l2cap_pi(sk);
3615 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3616 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3617 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3621 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3623 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3627 /* Busy Condition */
3628 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3629 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3630 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3632 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3633 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3634 l2cap_send_sframe(pi, sctrl);
3636 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3638 queue_work(_busy_wq, &pi->busy_work);
3643 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3645 struct l2cap_pinfo *pi = l2cap_pi(sk);
3646 struct sk_buff *_skb;
3650 * TODO: We have to notify the userland if some data is lost with the
3654 switch (control & L2CAP_CTRL_SAR) {
3655 case L2CAP_SDU_UNSEGMENTED:
3656 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3661 err = sock_queue_rcv_skb(sk, skb);
3667 case L2CAP_SDU_START:
3668 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3673 pi->sdu_len = get_unaligned_le16(skb->data);
3676 if (pi->sdu_len > pi->imtu) {
3681 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3687 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3689 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3690 pi->partial_sdu_len = skb->len;
3694 case L2CAP_SDU_CONTINUE:
3695 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3698 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3700 pi->partial_sdu_len += skb->len;
3701 if (pi->partial_sdu_len > pi->sdu_len)
3709 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3712 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3714 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3715 pi->partial_sdu_len += skb->len;
3717 if (pi->partial_sdu_len > pi->imtu)
3720 if (pi->partial_sdu_len == pi->sdu_len) {
3721 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3722 err = sock_queue_rcv_skb(sk, _skb);
3737 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3739 struct sk_buff *skb;
3742 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3743 if (bt_cb(skb)->tx_seq != tx_seq)
3746 skb = skb_dequeue(SREJ_QUEUE(sk));
3747 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3748 l2cap_ertm_reassembly_sdu(sk, skb, control);
3749 l2cap_pi(sk)->buffer_seq_srej =
3750 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3751 tx_seq = (tx_seq + 1) % 64;
3755 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3757 struct l2cap_pinfo *pi = l2cap_pi(sk);
3758 struct srej_list *l, *tmp;
3761 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3762 if (l->tx_seq == tx_seq) {
3767 control = L2CAP_SUPER_SELECT_REJECT;
3768 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3769 l2cap_send_sframe(pi, control);
3771 list_add_tail(&l->list, SREJ_LIST(sk));
3775 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3777 struct l2cap_pinfo *pi = l2cap_pi(sk);
3778 struct srej_list *new;
3781 while (tx_seq != pi->expected_tx_seq) {
3782 control = L2CAP_SUPER_SELECT_REJECT;
3783 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3784 l2cap_send_sframe(pi, control);
3786 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3787 new->tx_seq = pi->expected_tx_seq;
3788 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3789 list_add_tail(&new->list, SREJ_LIST(sk));
3791 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3794 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3796 struct l2cap_pinfo *pi = l2cap_pi(sk);
3797 u8 tx_seq = __get_txseq(rx_control);
3798 u8 req_seq = __get_reqseq(rx_control);
3799 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3800 int tx_seq_offset, expected_tx_seq_offset;
3801 int num_to_ack = (pi->tx_win/6) + 1;
3804 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3806 if (L2CAP_CTRL_FINAL & rx_control &&
3807 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3808 del_timer(&pi->monitor_timer);
3809 if (pi->unacked_frames > 0)
3810 __mod_retrans_timer();
3811 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3814 pi->expected_ack_seq = req_seq;
3815 l2cap_drop_acked_frames(sk);
3817 if (tx_seq == pi->expected_tx_seq)
3820 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3821 if (tx_seq_offset < 0)
3822 tx_seq_offset += 64;
3824 /* invalid tx_seq */
3825 if (tx_seq_offset >= pi->tx_win) {
3826 l2cap_send_disconn_req(pi->conn, sk);
3830 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3833 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3834 struct srej_list *first;
3836 first = list_first_entry(SREJ_LIST(sk),
3837 struct srej_list, list);
3838 if (tx_seq == first->tx_seq) {
3839 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3840 l2cap_check_srej_gap(sk, tx_seq);
3842 list_del(&first->list);
3845 if (list_empty(SREJ_LIST(sk))) {
3846 pi->buffer_seq = pi->buffer_seq_srej;
3847 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3851 struct srej_list *l;
3853 /* duplicated tx_seq */
3854 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3857 list_for_each_entry(l, SREJ_LIST(sk), list) {
3858 if (l->tx_seq == tx_seq) {
3859 l2cap_resend_srejframe(sk, tx_seq);
3863 l2cap_send_srejframe(sk, tx_seq);
3866 expected_tx_seq_offset =
3867 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3868 if (expected_tx_seq_offset < 0)
3869 expected_tx_seq_offset += 64;
3871 /* duplicated tx_seq */
3872 if (tx_seq_offset < expected_tx_seq_offset)
3875 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3877 INIT_LIST_HEAD(SREJ_LIST(sk));
3878 pi->buffer_seq_srej = pi->buffer_seq;
3880 __skb_queue_head_init(SREJ_QUEUE(sk));
3881 __skb_queue_head_init(BUSY_QUEUE(sk));
3882 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3884 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3886 l2cap_send_srejframe(sk, tx_seq);
3891 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3893 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3894 bt_cb(skb)->tx_seq = tx_seq;
3895 bt_cb(skb)->sar = sar;
3896 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3900 if (rx_control & L2CAP_CTRL_FINAL) {
3901 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3902 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3904 l2cap_retransmit_frames(sk);
3907 err = l2cap_push_rx_skb(sk, skb, rx_control);
3913 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3914 if (pi->num_acked == num_to_ack - 1)
3924 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3926 struct l2cap_pinfo *pi = l2cap_pi(sk);
3928 pi->expected_ack_seq = __get_reqseq(rx_control);
3929 l2cap_drop_acked_frames(sk);
3931 if (rx_control & L2CAP_CTRL_POLL) {
3932 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3933 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3934 (pi->unacked_frames > 0))
3935 __mod_retrans_timer();
3937 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3938 l2cap_send_srejtail(sk);
3940 l2cap_send_i_or_rr_or_rnr(sk);
3943 } else if (rx_control & L2CAP_CTRL_FINAL) {
3944 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3946 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3947 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3949 l2cap_retransmit_frames(sk);
3952 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3953 (pi->unacked_frames > 0))
3954 __mod_retrans_timer();
3956 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3957 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3960 spin_lock_bh(&pi->send_lock);
3961 l2cap_ertm_send(sk);
3962 spin_unlock_bh(&pi->send_lock);
3967 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3969 struct l2cap_pinfo *pi = l2cap_pi(sk);
3970 u8 tx_seq = __get_reqseq(rx_control);
3972 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3974 pi->expected_ack_seq = tx_seq;
3975 l2cap_drop_acked_frames(sk);
3977 if (rx_control & L2CAP_CTRL_FINAL) {
3978 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3979 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3981 l2cap_retransmit_frames(sk);
3983 l2cap_retransmit_frames(sk);
3985 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3986 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3989 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3991 struct l2cap_pinfo *pi = l2cap_pi(sk);
3992 u8 tx_seq = __get_reqseq(rx_control);
3994 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3996 if (rx_control & L2CAP_CTRL_POLL) {
3997 pi->expected_ack_seq = tx_seq;
3998 l2cap_drop_acked_frames(sk);
3999 l2cap_retransmit_one_frame(sk, tx_seq);
4001 spin_lock_bh(&pi->send_lock);
4002 l2cap_ertm_send(sk);
4003 spin_unlock_bh(&pi->send_lock);
4005 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4006 pi->srej_save_reqseq = tx_seq;
4007 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4009 } else if (rx_control & L2CAP_CTRL_FINAL) {
4010 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4011 pi->srej_save_reqseq == tx_seq)
4012 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4014 l2cap_retransmit_one_frame(sk, tx_seq);
4016 l2cap_retransmit_one_frame(sk, tx_seq);
4017 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4018 pi->srej_save_reqseq = tx_seq;
4019 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4024 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4026 struct l2cap_pinfo *pi = l2cap_pi(sk);
4027 u8 tx_seq = __get_reqseq(rx_control);
4029 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4030 pi->expected_ack_seq = tx_seq;
4031 l2cap_drop_acked_frames(sk);
4033 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4034 del_timer(&pi->retrans_timer);
4035 if (rx_control & L2CAP_CTRL_POLL)
4036 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4040 if (rx_control & L2CAP_CTRL_POLL)
4041 l2cap_send_srejtail(sk);
4043 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4046 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4048 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4050 if (L2CAP_CTRL_FINAL & rx_control &&
4051 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4052 del_timer(&l2cap_pi(sk)->monitor_timer);
4053 if (l2cap_pi(sk)->unacked_frames > 0)
4054 __mod_retrans_timer();
4055 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4058 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4059 case L2CAP_SUPER_RCV_READY:
4060 l2cap_data_channel_rrframe(sk, rx_control);
4063 case L2CAP_SUPER_REJECT:
4064 l2cap_data_channel_rejframe(sk, rx_control);
4067 case L2CAP_SUPER_SELECT_REJECT:
4068 l2cap_data_channel_srejframe(sk, rx_control);
4071 case L2CAP_SUPER_RCV_NOT_READY:
4072 l2cap_data_channel_rnrframe(sk, rx_control);
4080 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4083 struct l2cap_pinfo *pi;
4086 int next_tx_seq_offset, req_seq_offset;
4088 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4090 BT_DBG("unknown cid 0x%4.4x", cid);
4096 BT_DBG("sk %p, len %d", sk, skb->len);
4098 if (sk->sk_state != BT_CONNECTED)
4102 case L2CAP_MODE_BASIC:
4103 /* If socket recv buffers overflows we drop data here
4104 * which is *bad* because L2CAP has to be reliable.
4105 * But we don't have any other choice. L2CAP doesn't
4106 * provide flow control mechanism. */
4108 if (pi->imtu < skb->len)
4111 if (!sock_queue_rcv_skb(sk, skb))
4115 case L2CAP_MODE_ERTM:
4116 control = get_unaligned_le16(skb->data);
4120 if (__is_sar_start(control) && __is_iframe(control))
4123 if (pi->fcs == L2CAP_FCS_CRC16)
4127 * We can just drop the corrupted I-frame here.
4128 * Receiver will miss it and start proper recovery
4129 * procedures and ask retransmission.
4131 if (len > pi->mps) {
4132 l2cap_send_disconn_req(pi->conn, sk);
4136 if (l2cap_check_fcs(pi, skb))
4139 req_seq = __get_reqseq(control);
4140 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4141 if (req_seq_offset < 0)
4142 req_seq_offset += 64;
4144 next_tx_seq_offset =
4145 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4146 if (next_tx_seq_offset < 0)
4147 next_tx_seq_offset += 64;
4149 /* check for invalid req-seq */
4150 if (req_seq_offset > next_tx_seq_offset) {
4151 l2cap_send_disconn_req(pi->conn, sk);
4155 if (__is_iframe(control)) {
4157 l2cap_send_disconn_req(pi->conn, sk);
4161 l2cap_data_channel_iframe(sk, control, skb);
4164 l2cap_send_disconn_req(pi->conn, sk);
4168 l2cap_data_channel_sframe(sk, control, skb);
4173 case L2CAP_MODE_STREAMING:
4174 control = get_unaligned_le16(skb->data);
4178 if (__is_sar_start(control))
4181 if (pi->fcs == L2CAP_FCS_CRC16)
4184 if (len > pi->mps || len < 4 || __is_sframe(control))
4187 if (l2cap_check_fcs(pi, skb))
4190 tx_seq = __get_txseq(control);
4192 if (pi->expected_tx_seq == tx_seq)
4193 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4195 pi->expected_tx_seq = (tx_seq + 1) % 64;
4197 l2cap_streaming_reassembly_sdu(sk, skb, control);
4202 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4216 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4220 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4224 BT_DBG("sk %p, len %d", sk, skb->len);
4226 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4229 if (l2cap_pi(sk)->imtu < skb->len)
4232 if (!sock_queue_rcv_skb(sk, skb))
4244 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4246 struct l2cap_hdr *lh = (void *) skb->data;
4250 skb_pull(skb, L2CAP_HDR_SIZE);
4251 cid = __le16_to_cpu(lh->cid);
4252 len = __le16_to_cpu(lh->len);
4254 if (len != skb->len) {
4259 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4262 case L2CAP_CID_SIGNALING:
4263 l2cap_sig_channel(conn, skb);
4266 case L2CAP_CID_CONN_LESS:
4267 psm = get_unaligned_le16(skb->data);
4269 l2cap_conless_channel(conn, psm, skb);
4273 l2cap_data_channel(conn, cid, skb);
4278 /* ---- L2CAP interface with lower layer (HCI) ---- */
4280 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4282 int exact = 0, lm1 = 0, lm2 = 0;
4283 register struct sock *sk;
4284 struct hlist_node *node;
4286 if (type != ACL_LINK)
4289 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4291 /* Find listening sockets and check their link_mode */
4292 read_lock(&l2cap_sk_list.lock);
4293 sk_for_each(sk, node, &l2cap_sk_list.head) {
4294 if (sk->sk_state != BT_LISTEN)
4297 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4298 lm1 |= HCI_LM_ACCEPT;
4299 if (l2cap_pi(sk)->role_switch)
4300 lm1 |= HCI_LM_MASTER;
4302 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4303 lm2 |= HCI_LM_ACCEPT;
4304 if (l2cap_pi(sk)->role_switch)
4305 lm2 |= HCI_LM_MASTER;
4308 read_unlock(&l2cap_sk_list.lock);
4310 return exact ? lm1 : lm2;
4313 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4315 struct l2cap_conn *conn;
4317 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4319 if (hcon->type != ACL_LINK)
4323 conn = l2cap_conn_add(hcon, status);
4325 l2cap_conn_ready(conn);
4327 l2cap_conn_del(hcon, bt_err(status));
4332 static int l2cap_disconn_ind(struct hci_conn *hcon)
4334 struct l2cap_conn *conn = hcon->l2cap_data;
4336 BT_DBG("hcon %p", hcon);
4338 if (hcon->type != ACL_LINK || !conn)
4341 return conn->disc_reason;
4344 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4346 BT_DBG("hcon %p reason %d", hcon, reason);
4348 if (hcon->type != ACL_LINK)
4351 l2cap_conn_del(hcon, bt_err(reason));
4356 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4358 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4361 if (encrypt == 0x00) {
4362 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4363 l2cap_sock_clear_timer(sk);
4364 l2cap_sock_set_timer(sk, HZ * 5);
4365 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4366 __l2cap_sock_close(sk, ECONNREFUSED);
4368 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4369 l2cap_sock_clear_timer(sk);
4373 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4375 struct l2cap_chan_list *l;
4376 struct l2cap_conn *conn = hcon->l2cap_data;
4382 l = &conn->chan_list;
4384 BT_DBG("conn %p", conn);
4386 read_lock(&l->lock);
4388 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4391 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4396 if (!status && (sk->sk_state == BT_CONNECTED ||
4397 sk->sk_state == BT_CONFIG)) {
4398 l2cap_check_encryption(sk, encrypt);
4403 if (sk->sk_state == BT_CONNECT) {
4405 struct l2cap_conn_req req;
4406 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4407 req.psm = l2cap_pi(sk)->psm;
4409 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4410 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4412 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4413 L2CAP_CONN_REQ, sizeof(req), &req);
4415 l2cap_sock_clear_timer(sk);
4416 l2cap_sock_set_timer(sk, HZ / 10);
4418 } else if (sk->sk_state == BT_CONNECT2) {
4419 struct l2cap_conn_rsp rsp;
4423 sk->sk_state = BT_CONFIG;
4424 result = L2CAP_CR_SUCCESS;
4426 sk->sk_state = BT_DISCONN;
4427 l2cap_sock_set_timer(sk, HZ / 10);
4428 result = L2CAP_CR_SEC_BLOCK;
4431 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4432 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4433 rsp.result = cpu_to_le16(result);
4434 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4435 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4436 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4442 read_unlock(&l->lock);
4447 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4449 struct l2cap_conn *conn = hcon->l2cap_data;
4451 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4454 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4456 if (flags & ACL_START) {
4457 struct l2cap_hdr *hdr;
4461 BT_ERR("Unexpected start frame (len %d)", skb->len);
4462 kfree_skb(conn->rx_skb);
4463 conn->rx_skb = NULL;
4465 l2cap_conn_unreliable(conn, ECOMM);
4469 BT_ERR("Frame is too short (len %d)", skb->len);
4470 l2cap_conn_unreliable(conn, ECOMM);
4474 hdr = (struct l2cap_hdr *) skb->data;
4475 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4477 if (len == skb->len) {
4478 /* Complete frame received */
4479 l2cap_recv_frame(conn, skb);
4483 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4485 if (skb->len > len) {
4486 BT_ERR("Frame is too long (len %d, expected len %d)",
4488 l2cap_conn_unreliable(conn, ECOMM);
4492 /* Allocate skb for the complete frame (with header) */
4493 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4497 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4499 conn->rx_len = len - skb->len;
4501 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4503 if (!conn->rx_len) {
4504 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4505 l2cap_conn_unreliable(conn, ECOMM);
4509 if (skb->len > conn->rx_len) {
4510 BT_ERR("Fragment is too long (len %d, expected %d)",
4511 skb->len, conn->rx_len);
4512 kfree_skb(conn->rx_skb);
4513 conn->rx_skb = NULL;
4515 l2cap_conn_unreliable(conn, ECOMM);
4519 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4521 conn->rx_len -= skb->len;
4523 if (!conn->rx_len) {
4524 /* Complete frame received */
4525 l2cap_recv_frame(conn, conn->rx_skb);
4526 conn->rx_skb = NULL;
4535 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4538 struct hlist_node *node;
4540 read_lock_bh(&l2cap_sk_list.lock);
4542 sk_for_each(sk, node, &l2cap_sk_list.head) {
4543 struct l2cap_pinfo *pi = l2cap_pi(sk);
4545 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4546 batostr(&bt_sk(sk)->src),
4547 batostr(&bt_sk(sk)->dst),
4548 sk->sk_state, __le16_to_cpu(pi->psm),
4550 pi->imtu, pi->omtu, pi->sec_level);
4553 read_unlock_bh(&l2cap_sk_list.lock);
4558 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4560 return single_open(file, l2cap_debugfs_show, inode->i_private);
4563 static const struct file_operations l2cap_debugfs_fops = {
4564 .open = l2cap_debugfs_open,
4566 .llseek = seq_lseek,
4567 .release = single_release,
4570 static struct dentry *l2cap_debugfs;
4572 static const struct proto_ops l2cap_sock_ops = {
4573 .family = PF_BLUETOOTH,
4574 .owner = THIS_MODULE,
4575 .release = l2cap_sock_release,
4576 .bind = l2cap_sock_bind,
4577 .connect = l2cap_sock_connect,
4578 .listen = l2cap_sock_listen,
4579 .accept = l2cap_sock_accept,
4580 .getname = l2cap_sock_getname,
4581 .sendmsg = l2cap_sock_sendmsg,
4582 .recvmsg = l2cap_sock_recvmsg,
4583 .poll = bt_sock_poll,
4584 .ioctl = bt_sock_ioctl,
4585 .mmap = sock_no_mmap,
4586 .socketpair = sock_no_socketpair,
4587 .shutdown = l2cap_sock_shutdown,
4588 .setsockopt = l2cap_sock_setsockopt,
4589 .getsockopt = l2cap_sock_getsockopt
4592 static const struct net_proto_family l2cap_sock_family_ops = {
4593 .family = PF_BLUETOOTH,
4594 .owner = THIS_MODULE,
4595 .create = l2cap_sock_create,
4598 static struct hci_proto l2cap_hci_proto = {
4600 .id = HCI_PROTO_L2CAP,
4601 .connect_ind = l2cap_connect_ind,
4602 .connect_cfm = l2cap_connect_cfm,
4603 .disconn_ind = l2cap_disconn_ind,
4604 .disconn_cfm = l2cap_disconn_cfm,
4605 .security_cfm = l2cap_security_cfm,
4606 .recv_acldata = l2cap_recv_acldata
4609 static int __init l2cap_init(void)
4613 err = proto_register(&l2cap_proto, 0);
4617 _busy_wq = create_singlethread_workqueue("l2cap");
4621 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4623 BT_ERR("L2CAP socket registration failed");
4627 err = hci_register_proto(&l2cap_hci_proto);
4629 BT_ERR("L2CAP protocol registration failed");
4630 bt_sock_unregister(BTPROTO_L2CAP);
4635 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4636 bt_debugfs, NULL, &l2cap_debugfs_fops);
4638 BT_ERR("Failed to create L2CAP debug file");
4641 BT_INFO("L2CAP ver %s", VERSION);
4642 BT_INFO("L2CAP socket layer initialized");
4647 proto_unregister(&l2cap_proto);
4651 static void __exit l2cap_exit(void)
4653 debugfs_remove(l2cap_debugfs);
4655 flush_workqueue(_busy_wq);
4656 destroy_workqueue(_busy_wq);
4658 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4659 BT_ERR("L2CAP socket unregistration failed");
4661 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4662 BT_ERR("L2CAP protocol unregistration failed");
4664 proto_unregister(&l2cap_proto);
4667 void l2cap_load(void)
4669 /* Dummy function to trigger automatic L2CAP module loading by
4670 * other modules that use L2CAP sockets but don't use any other
4671 * symbols from it. */
4673 EXPORT_SYMBOL(l2cap_load);
4675 module_init(l2cap_init);
4676 module_exit(l2cap_exit);
4678 module_param(enable_ertm, bool, 0644);
4679 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4681 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4682 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4683 MODULE_VERSION(VERSION);
4684 MODULE_LICENSE("GPL");
4685 MODULE_ALIAS("bt-proto-0");