2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 #ifdef CONFIG_BT_L2CAP_EXT_FEATURES
59 static int enable_ertm = 1;
61 static int enable_ertm = 0;
63 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
64 static int tx_window = L2CAP_DEFAULT_TX_WINDOW;
66 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
67 static u8 l2cap_fixed_chan[8] = { 0x02, };
69 static const struct proto_ops l2cap_sock_ops;
71 static struct workqueue_struct *_busy_wq;
73 static struct bt_sock_list l2cap_sk_list = {
74 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
77 static void l2cap_busy_work(struct work_struct *work);
79 static void __l2cap_sock_close(struct sock *sk, int reason);
80 static void l2cap_sock_close(struct sock *sk);
81 static void l2cap_sock_kill(struct sock *sk);
83 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
84 u8 code, u8 ident, u16 dlen, void *data);
86 /* ---- L2CAP timers ---- */
87 static void l2cap_sock_timeout(unsigned long arg)
89 struct sock *sk = (struct sock *) arg;
92 BT_DBG("sock %p state %d", sk, sk->sk_state);
96 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
97 reason = ECONNREFUSED;
98 else if (sk->sk_state == BT_CONNECT &&
99 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
100 reason = ECONNREFUSED;
104 __l2cap_sock_close(sk, reason);
112 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
114 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
115 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
118 static void l2cap_sock_clear_timer(struct sock *sk)
120 BT_DBG("sock %p state %d", sk, sk->sk_state);
121 sk_stop_timer(sk, &sk->sk_timer);
124 /* ---- L2CAP channels ---- */
125 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
128 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
129 if (l2cap_pi(s)->dcid == cid)
135 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
138 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
139 if (l2cap_pi(s)->scid == cid)
145 /* Find channel with given SCID.
146 * Returns locked socket */
147 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
151 s = __l2cap_get_chan_by_scid(l, cid);
154 read_unlock(&l->lock);
158 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
161 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
162 if (l2cap_pi(s)->ident == ident)
168 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
172 s = __l2cap_get_chan_by_ident(l, ident);
175 read_unlock(&l->lock);
179 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
181 u16 cid = L2CAP_CID_DYN_START;
183 for (; cid < L2CAP_CID_DYN_END; cid++) {
184 if (!__l2cap_get_chan_by_scid(l, cid))
191 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
196 l2cap_pi(l->head)->prev_c = sk;
198 l2cap_pi(sk)->next_c = l->head;
199 l2cap_pi(sk)->prev_c = NULL;
203 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
205 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
207 write_lock_bh(&l->lock);
212 l2cap_pi(next)->prev_c = prev;
214 l2cap_pi(prev)->next_c = next;
215 write_unlock_bh(&l->lock);
220 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
222 struct l2cap_chan_list *l = &conn->chan_list;
224 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
225 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
227 conn->disc_reason = 0x13;
229 l2cap_pi(sk)->conn = conn;
231 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
232 /* Alloc CID for connection-oriented socket */
233 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
234 } else if (sk->sk_type == SOCK_DGRAM) {
235 /* Connectionless socket */
236 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
237 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
238 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
240 /* Raw socket can send/recv signalling messages only */
241 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
242 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
243 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
246 __l2cap_chan_link(l, sk);
249 bt_accept_enqueue(parent, sk);
253 * Must be called on the locked socket. */
254 static void l2cap_chan_del(struct sock *sk, int err)
256 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
257 struct sock *parent = bt_sk(sk)->parent;
259 l2cap_sock_clear_timer(sk);
261 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
264 /* Unlink from channel list */
265 l2cap_chan_unlink(&conn->chan_list, sk);
266 l2cap_pi(sk)->conn = NULL;
267 hci_conn_put(conn->hcon);
270 sk->sk_state = BT_CLOSED;
271 sock_set_flag(sk, SOCK_ZAPPED);
277 bt_accept_unlink(sk);
278 parent->sk_data_ready(parent, 0);
280 sk->sk_state_change(sk);
283 /* Service level security */
284 static inline int l2cap_check_security(struct sock *sk)
286 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
289 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
290 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
291 auth_type = HCI_AT_NO_BONDING_MITM;
293 auth_type = HCI_AT_NO_BONDING;
295 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
296 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
298 switch (l2cap_pi(sk)->sec_level) {
299 case BT_SECURITY_HIGH:
300 auth_type = HCI_AT_GENERAL_BONDING_MITM;
302 case BT_SECURITY_MEDIUM:
303 auth_type = HCI_AT_GENERAL_BONDING;
306 auth_type = HCI_AT_NO_BONDING;
311 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
315 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
319 /* Get next available identificator.
320 * 1 - 128 are used by kernel.
321 * 129 - 199 are reserved.
322 * 200 - 254 are used by utilities like l2ping, etc.
325 spin_lock_bh(&conn->lock);
327 if (++conn->tx_ident > 128)
332 spin_unlock_bh(&conn->lock);
337 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
339 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
341 BT_DBG("code 0x%2.2x", code);
346 hci_send_acl(conn->hcon, skb, 0);
349 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
352 struct l2cap_hdr *lh;
353 struct l2cap_conn *conn = pi->conn;
354 int count, hlen = L2CAP_HDR_SIZE + 2;
356 if (pi->fcs == L2CAP_FCS_CRC16)
359 BT_DBG("pi %p, control 0x%2.2x", pi, control);
361 count = min_t(unsigned int, conn->mtu, hlen);
362 control |= L2CAP_CTRL_FRAME_TYPE;
364 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
365 control |= L2CAP_CTRL_FINAL;
366 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
369 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
370 control |= L2CAP_CTRL_POLL;
371 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
374 skb = bt_skb_alloc(count, GFP_ATOMIC);
378 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
379 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
380 lh->cid = cpu_to_le16(pi->dcid);
381 put_unaligned_le16(control, skb_put(skb, 2));
383 if (pi->fcs == L2CAP_FCS_CRC16) {
384 u16 fcs = crc16(0, (u8 *)lh, count - 2);
385 put_unaligned_le16(fcs, skb_put(skb, 2));
388 hci_send_acl(pi->conn->hcon, skb, 0);
391 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
393 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
394 control |= L2CAP_SUPER_RCV_NOT_READY;
395 pi->conn_state |= L2CAP_CONN_RNR_SENT;
397 control |= L2CAP_SUPER_RCV_READY;
399 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
401 l2cap_send_sframe(pi, control);
404 static void l2cap_do_start(struct sock *sk)
406 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
408 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
409 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
412 if (l2cap_check_security(sk)) {
413 struct l2cap_conn_req req;
414 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
415 req.psm = l2cap_pi(sk)->psm;
417 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
419 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
420 L2CAP_CONN_REQ, sizeof(req), &req);
423 struct l2cap_info_req req;
424 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
426 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
427 conn->info_ident = l2cap_get_ident(conn);
429 mod_timer(&conn->info_timer, jiffies +
430 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
432 l2cap_send_cmd(conn, conn->info_ident,
433 L2CAP_INFO_REQ, sizeof(req), &req);
437 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
439 struct l2cap_disconn_req req;
441 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
442 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
443 l2cap_send_cmd(conn, l2cap_get_ident(conn),
444 L2CAP_DISCONN_REQ, sizeof(req), &req);
447 /* ---- L2CAP connections ---- */
448 static void l2cap_conn_start(struct l2cap_conn *conn)
450 struct l2cap_chan_list *l = &conn->chan_list;
453 BT_DBG("conn %p", conn);
457 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
460 if (sk->sk_type != SOCK_SEQPACKET &&
461 sk->sk_type != SOCK_STREAM) {
466 if (sk->sk_state == BT_CONNECT) {
467 if (l2cap_check_security(sk)) {
468 struct l2cap_conn_req req;
469 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
470 req.psm = l2cap_pi(sk)->psm;
472 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
474 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
475 L2CAP_CONN_REQ, sizeof(req), &req);
477 } else if (sk->sk_state == BT_CONNECT2) {
478 struct l2cap_conn_rsp rsp;
479 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
480 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
482 if (l2cap_check_security(sk)) {
483 if (bt_sk(sk)->defer_setup) {
484 struct sock *parent = bt_sk(sk)->parent;
485 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
486 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
487 parent->sk_data_ready(parent, 0);
490 sk->sk_state = BT_CONFIG;
491 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
492 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
495 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
496 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
499 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
500 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
506 read_unlock(&l->lock);
509 static void l2cap_conn_ready(struct l2cap_conn *conn)
511 struct l2cap_chan_list *l = &conn->chan_list;
514 BT_DBG("conn %p", conn);
518 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
521 if (sk->sk_type != SOCK_SEQPACKET &&
522 sk->sk_type != SOCK_STREAM) {
523 l2cap_sock_clear_timer(sk);
524 sk->sk_state = BT_CONNECTED;
525 sk->sk_state_change(sk);
526 } else if (sk->sk_state == BT_CONNECT)
532 read_unlock(&l->lock);
535 /* Notify sockets that we cannot guaranty reliability anymore */
536 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
538 struct l2cap_chan_list *l = &conn->chan_list;
541 BT_DBG("conn %p", conn);
545 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
546 if (l2cap_pi(sk)->force_reliable)
550 read_unlock(&l->lock);
553 static void l2cap_info_timeout(unsigned long arg)
555 struct l2cap_conn *conn = (void *) arg;
557 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
558 conn->info_ident = 0;
560 l2cap_conn_start(conn);
563 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
565 struct l2cap_conn *conn = hcon->l2cap_data;
570 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
574 hcon->l2cap_data = conn;
577 BT_DBG("hcon %p conn %p", hcon, conn);
579 conn->mtu = hcon->hdev->acl_mtu;
580 conn->src = &hcon->hdev->bdaddr;
581 conn->dst = &hcon->dst;
585 spin_lock_init(&conn->lock);
586 rwlock_init(&conn->chan_list.lock);
588 setup_timer(&conn->info_timer, l2cap_info_timeout,
589 (unsigned long) conn);
591 conn->disc_reason = 0x13;
596 static void l2cap_conn_del(struct hci_conn *hcon, int err)
598 struct l2cap_conn *conn = hcon->l2cap_data;
604 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
606 kfree_skb(conn->rx_skb);
609 while ((sk = conn->chan_list.head)) {
611 l2cap_chan_del(sk, err);
616 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
617 del_timer_sync(&conn->info_timer);
619 hcon->l2cap_data = NULL;
623 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
625 struct l2cap_chan_list *l = &conn->chan_list;
626 write_lock_bh(&l->lock);
627 __l2cap_chan_add(conn, sk, parent);
628 write_unlock_bh(&l->lock);
631 /* ---- Socket interface ---- */
632 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
635 struct hlist_node *node;
636 sk_for_each(sk, node, &l2cap_sk_list.head)
637 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
644 /* Find socket with psm and source bdaddr.
645 * Returns closest match.
647 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
649 struct sock *sk = NULL, *sk1 = NULL;
650 struct hlist_node *node;
652 sk_for_each(sk, node, &l2cap_sk_list.head) {
653 if (state && sk->sk_state != state)
656 if (l2cap_pi(sk)->psm == psm) {
658 if (!bacmp(&bt_sk(sk)->src, src))
662 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
666 return node ? sk : sk1;
669 /* Find socket with given address (psm, src).
670 * Returns locked socket */
671 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
674 read_lock(&l2cap_sk_list.lock);
675 s = __l2cap_get_sock_by_psm(state, psm, src);
678 read_unlock(&l2cap_sk_list.lock);
682 static void l2cap_sock_destruct(struct sock *sk)
686 skb_queue_purge(&sk->sk_receive_queue);
687 skb_queue_purge(&sk->sk_write_queue);
690 static void l2cap_sock_cleanup_listen(struct sock *parent)
694 BT_DBG("parent %p", parent);
696 /* Close not yet accepted channels */
697 while ((sk = bt_accept_dequeue(parent, NULL)))
698 l2cap_sock_close(sk);
700 parent->sk_state = BT_CLOSED;
701 sock_set_flag(parent, SOCK_ZAPPED);
704 /* Kill socket (only if zapped and orphan)
705 * Must be called on unlocked socket.
707 static void l2cap_sock_kill(struct sock *sk)
709 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
712 BT_DBG("sk %p state %d", sk, sk->sk_state);
714 /* Kill poor orphan */
715 bt_sock_unlink(&l2cap_sk_list, sk);
716 sock_set_flag(sk, SOCK_DEAD);
720 static void __l2cap_sock_close(struct sock *sk, int reason)
722 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
724 switch (sk->sk_state) {
726 l2cap_sock_cleanup_listen(sk);
731 if (sk->sk_type == SOCK_SEQPACKET ||
732 sk->sk_type == SOCK_STREAM) {
733 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
735 sk->sk_state = BT_DISCONN;
736 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
737 l2cap_send_disconn_req(conn, sk);
739 l2cap_chan_del(sk, reason);
743 if (sk->sk_type == SOCK_SEQPACKET ||
744 sk->sk_type == SOCK_STREAM) {
745 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
746 struct l2cap_conn_rsp rsp;
749 if (bt_sk(sk)->defer_setup)
750 result = L2CAP_CR_SEC_BLOCK;
752 result = L2CAP_CR_BAD_PSM;
754 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
755 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
756 rsp.result = cpu_to_le16(result);
757 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
758 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
759 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
761 l2cap_chan_del(sk, reason);
766 l2cap_chan_del(sk, reason);
770 sock_set_flag(sk, SOCK_ZAPPED);
775 /* Must be called on unlocked socket. */
776 static void l2cap_sock_close(struct sock *sk)
778 l2cap_sock_clear_timer(sk);
780 __l2cap_sock_close(sk, ECONNRESET);
785 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
787 struct l2cap_pinfo *pi = l2cap_pi(sk);
792 sk->sk_type = parent->sk_type;
793 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
795 pi->imtu = l2cap_pi(parent)->imtu;
796 pi->omtu = l2cap_pi(parent)->omtu;
797 pi->mode = l2cap_pi(parent)->mode;
798 pi->fcs = l2cap_pi(parent)->fcs;
799 pi->max_tx = l2cap_pi(parent)->max_tx;
800 pi->tx_win = l2cap_pi(parent)->tx_win;
801 pi->sec_level = l2cap_pi(parent)->sec_level;
802 pi->role_switch = l2cap_pi(parent)->role_switch;
803 pi->force_reliable = l2cap_pi(parent)->force_reliable;
805 pi->imtu = L2CAP_DEFAULT_MTU;
807 if (enable_ertm && sk->sk_type == SOCK_STREAM)
808 pi->mode = L2CAP_MODE_ERTM;
810 pi->mode = L2CAP_MODE_BASIC;
811 pi->max_tx = max_transmit;
812 pi->fcs = L2CAP_FCS_CRC16;
813 pi->tx_win = tx_window;
814 pi->sec_level = BT_SECURITY_LOW;
816 pi->force_reliable = 0;
819 /* Default config options */
821 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
822 skb_queue_head_init(TX_QUEUE(sk));
823 skb_queue_head_init(SREJ_QUEUE(sk));
824 skb_queue_head_init(BUSY_QUEUE(sk));
825 INIT_LIST_HEAD(SREJ_LIST(sk));
828 static struct proto l2cap_proto = {
830 .owner = THIS_MODULE,
831 .obj_size = sizeof(struct l2cap_pinfo)
834 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
838 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
842 sock_init_data(sock, sk);
843 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
845 sk->sk_destruct = l2cap_sock_destruct;
846 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
848 sock_reset_flag(sk, SOCK_ZAPPED);
850 sk->sk_protocol = proto;
851 sk->sk_state = BT_OPEN;
853 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
855 bt_sock_link(&l2cap_sk_list, sk);
859 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
864 BT_DBG("sock %p", sock);
866 sock->state = SS_UNCONNECTED;
868 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
869 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
870 return -ESOCKTNOSUPPORT;
872 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
875 sock->ops = &l2cap_sock_ops;
877 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
881 l2cap_sock_init(sk, NULL);
885 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
887 struct sock *sk = sock->sk;
888 struct sockaddr_l2 la;
893 if (!addr || addr->sa_family != AF_BLUETOOTH)
896 memset(&la, 0, sizeof(la));
897 len = min_t(unsigned int, sizeof(la), alen);
898 memcpy(&la, addr, len);
905 if (sk->sk_state != BT_OPEN) {
910 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
911 !capable(CAP_NET_BIND_SERVICE)) {
916 write_lock_bh(&l2cap_sk_list.lock);
918 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
921 /* Save source address */
922 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
923 l2cap_pi(sk)->psm = la.l2_psm;
924 l2cap_pi(sk)->sport = la.l2_psm;
925 sk->sk_state = BT_BOUND;
927 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
928 __le16_to_cpu(la.l2_psm) == 0x0003)
929 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
932 write_unlock_bh(&l2cap_sk_list.lock);
939 static int l2cap_do_connect(struct sock *sk)
941 bdaddr_t *src = &bt_sk(sk)->src;
942 bdaddr_t *dst = &bt_sk(sk)->dst;
943 struct l2cap_conn *conn;
944 struct hci_conn *hcon;
945 struct hci_dev *hdev;
949 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
952 hdev = hci_get_route(dst, src);
954 return -EHOSTUNREACH;
956 hci_dev_lock_bh(hdev);
960 if (sk->sk_type == SOCK_RAW) {
961 switch (l2cap_pi(sk)->sec_level) {
962 case BT_SECURITY_HIGH:
963 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
965 case BT_SECURITY_MEDIUM:
966 auth_type = HCI_AT_DEDICATED_BONDING;
969 auth_type = HCI_AT_NO_BONDING;
972 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
973 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
974 auth_type = HCI_AT_NO_BONDING_MITM;
976 auth_type = HCI_AT_NO_BONDING;
978 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
979 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
981 switch (l2cap_pi(sk)->sec_level) {
982 case BT_SECURITY_HIGH:
983 auth_type = HCI_AT_GENERAL_BONDING_MITM;
985 case BT_SECURITY_MEDIUM:
986 auth_type = HCI_AT_GENERAL_BONDING;
989 auth_type = HCI_AT_NO_BONDING;
994 hcon = hci_connect(hdev, ACL_LINK, dst,
995 l2cap_pi(sk)->sec_level, auth_type);
999 conn = l2cap_conn_add(hcon, 0);
1007 /* Update source addr of the socket */
1008 bacpy(src, conn->src);
1010 l2cap_chan_add(conn, sk, NULL);
1012 sk->sk_state = BT_CONNECT;
1013 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1015 if (hcon->state == BT_CONNECTED) {
1016 if (sk->sk_type != SOCK_SEQPACKET &&
1017 sk->sk_type != SOCK_STREAM) {
1018 l2cap_sock_clear_timer(sk);
1019 sk->sk_state = BT_CONNECTED;
1025 hci_dev_unlock_bh(hdev);
1030 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1032 struct sock *sk = sock->sk;
1033 struct sockaddr_l2 la;
1036 BT_DBG("sk %p", sk);
1038 if (!addr || alen < sizeof(addr->sa_family) ||
1039 addr->sa_family != AF_BLUETOOTH)
1042 memset(&la, 0, sizeof(la));
1043 len = min_t(unsigned int, sizeof(la), alen);
1044 memcpy(&la, addr, len);
1051 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1057 switch (l2cap_pi(sk)->mode) {
1058 case L2CAP_MODE_BASIC:
1060 case L2CAP_MODE_ERTM:
1061 case L2CAP_MODE_STREAMING:
1070 switch (sk->sk_state) {
1074 /* Already connecting */
1078 /* Already connected */
1091 /* Set destination address and psm */
1092 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1093 l2cap_pi(sk)->psm = la.l2_psm;
1095 err = l2cap_do_connect(sk);
1100 err = bt_sock_wait_state(sk, BT_CONNECTED,
1101 sock_sndtimeo(sk, flags & O_NONBLOCK));
1107 static int l2cap_sock_listen(struct socket *sock, int backlog)
1109 struct sock *sk = sock->sk;
1112 BT_DBG("sk %p backlog %d", sk, backlog);
1116 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1117 || sk->sk_state != BT_BOUND) {
1122 switch (l2cap_pi(sk)->mode) {
1123 case L2CAP_MODE_BASIC:
1125 case L2CAP_MODE_ERTM:
1126 case L2CAP_MODE_STREAMING:
1135 if (!l2cap_pi(sk)->psm) {
1136 bdaddr_t *src = &bt_sk(sk)->src;
1141 write_lock_bh(&l2cap_sk_list.lock);
1143 for (psm = 0x1001; psm < 0x1100; psm += 2)
1144 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1145 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1146 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1151 write_unlock_bh(&l2cap_sk_list.lock);
1157 sk->sk_max_ack_backlog = backlog;
1158 sk->sk_ack_backlog = 0;
1159 sk->sk_state = BT_LISTEN;
1166 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1168 DECLARE_WAITQUEUE(wait, current);
1169 struct sock *sk = sock->sk, *nsk;
1173 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1175 if (sk->sk_state != BT_LISTEN) {
1180 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1182 BT_DBG("sk %p timeo %ld", sk, timeo);
1184 /* Wait for an incoming connection. (wake-one). */
1185 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1186 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1187 set_current_state(TASK_INTERRUPTIBLE);
1194 timeo = schedule_timeout(timeo);
1195 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1197 if (sk->sk_state != BT_LISTEN) {
1202 if (signal_pending(current)) {
1203 err = sock_intr_errno(timeo);
1207 set_current_state(TASK_RUNNING);
1208 remove_wait_queue(sk_sleep(sk), &wait);
1213 newsock->state = SS_CONNECTED;
1215 BT_DBG("new socket %p", nsk);
1222 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1224 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1225 struct sock *sk = sock->sk;
1227 BT_DBG("sock %p, sk %p", sock, sk);
1229 addr->sa_family = AF_BLUETOOTH;
1230 *len = sizeof(struct sockaddr_l2);
1233 la->l2_psm = l2cap_pi(sk)->psm;
1234 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1235 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1237 la->l2_psm = l2cap_pi(sk)->sport;
1238 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1239 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1245 static int __l2cap_wait_ack(struct sock *sk)
1247 DECLARE_WAITQUEUE(wait, current);
1251 add_wait_queue(sk->sk_sleep, &wait);
1252 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1253 set_current_state(TASK_INTERRUPTIBLE);
1258 if (signal_pending(current)) {
1259 err = sock_intr_errno(timeo);
1264 timeo = schedule_timeout(timeo);
1267 err = sock_error(sk);
1271 set_current_state(TASK_RUNNING);
1272 remove_wait_queue(sk->sk_sleep, &wait);
1276 static void l2cap_monitor_timeout(unsigned long arg)
1278 struct sock *sk = (void *) arg;
1281 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1282 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1287 l2cap_pi(sk)->retry_count++;
1288 __mod_monitor_timer();
1290 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1294 static void l2cap_retrans_timeout(unsigned long arg)
1296 struct sock *sk = (void *) arg;
1299 l2cap_pi(sk)->retry_count = 1;
1300 __mod_monitor_timer();
1302 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1304 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1308 static void l2cap_drop_acked_frames(struct sock *sk)
1310 struct sk_buff *skb;
1312 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1313 l2cap_pi(sk)->unacked_frames) {
1314 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1317 skb = skb_dequeue(TX_QUEUE(sk));
1320 l2cap_pi(sk)->unacked_frames--;
1323 if (!l2cap_pi(sk)->unacked_frames)
1324 del_timer(&l2cap_pi(sk)->retrans_timer);
1329 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1331 struct l2cap_pinfo *pi = l2cap_pi(sk);
1333 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1335 hci_send_acl(pi->conn->hcon, skb, 0);
1338 static int l2cap_streaming_send(struct sock *sk)
1340 struct sk_buff *skb, *tx_skb;
1341 struct l2cap_pinfo *pi = l2cap_pi(sk);
1344 while ((skb = sk->sk_send_head)) {
1345 tx_skb = skb_clone(skb, GFP_ATOMIC);
1347 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1348 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1349 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1351 if (pi->fcs == L2CAP_FCS_CRC16) {
1352 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1353 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1356 l2cap_do_send(sk, tx_skb);
1358 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1360 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1361 sk->sk_send_head = NULL;
1363 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1365 skb = skb_dequeue(TX_QUEUE(sk));
1371 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1373 struct l2cap_pinfo *pi = l2cap_pi(sk);
1374 struct sk_buff *skb, *tx_skb;
1377 skb = skb_peek(TX_QUEUE(sk));
1382 if (bt_cb(skb)->tx_seq == tx_seq)
1385 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1388 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1390 if (pi->remote_max_tx &&
1391 bt_cb(skb)->retries == pi->remote_max_tx) {
1392 l2cap_send_disconn_req(pi->conn, sk);
1396 tx_skb = skb_clone(skb, GFP_ATOMIC);
1397 bt_cb(skb)->retries++;
1398 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1399 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1400 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1401 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1403 if (pi->fcs == L2CAP_FCS_CRC16) {
1404 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1405 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1408 l2cap_do_send(sk, tx_skb);
1411 static int l2cap_ertm_send(struct sock *sk)
1413 struct sk_buff *skb, *tx_skb;
1414 struct l2cap_pinfo *pi = l2cap_pi(sk);
1418 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1421 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1422 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1424 if (pi->remote_max_tx &&
1425 bt_cb(skb)->retries == pi->remote_max_tx) {
1426 l2cap_send_disconn_req(pi->conn, sk);
1430 tx_skb = skb_clone(skb, GFP_ATOMIC);
1432 bt_cb(skb)->retries++;
1434 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1435 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1436 control |= L2CAP_CTRL_FINAL;
1437 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1439 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1440 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1441 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1444 if (pi->fcs == L2CAP_FCS_CRC16) {
1445 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1446 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1449 l2cap_do_send(sk, tx_skb);
1451 __mod_retrans_timer();
1453 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1454 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1456 pi->unacked_frames++;
1459 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1460 sk->sk_send_head = NULL;
1462 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1470 static int l2cap_retransmit_frames(struct sock *sk)
1472 struct l2cap_pinfo *pi = l2cap_pi(sk);
1475 spin_lock_bh(&pi->send_lock);
1477 if (!skb_queue_empty(TX_QUEUE(sk)))
1478 sk->sk_send_head = TX_QUEUE(sk)->next;
1480 pi->next_tx_seq = pi->expected_ack_seq;
1481 ret = l2cap_ertm_send(sk);
1483 spin_unlock_bh(&pi->send_lock);
1488 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1490 struct sock *sk = (struct sock *)pi;
1494 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1496 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1497 control |= L2CAP_SUPER_RCV_NOT_READY;
1498 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1499 l2cap_send_sframe(pi, control);
1503 spin_lock_bh(&pi->send_lock);
1504 nframes = l2cap_ertm_send(sk);
1505 spin_unlock_bh(&pi->send_lock);
1510 control |= L2CAP_SUPER_RCV_READY;
1511 l2cap_send_sframe(pi, control);
1514 static void l2cap_send_srejtail(struct sock *sk)
1516 struct srej_list *tail;
1519 control = L2CAP_SUPER_SELECT_REJECT;
1520 control |= L2CAP_CTRL_FINAL;
1522 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1523 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1525 l2cap_send_sframe(l2cap_pi(sk), control);
1528 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1530 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1531 struct sk_buff **frag;
1534 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1540 /* Continuation fragments (no L2CAP header) */
1541 frag = &skb_shinfo(skb)->frag_list;
1543 count = min_t(unsigned int, conn->mtu, len);
1545 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1548 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1554 frag = &(*frag)->next;
1560 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1562 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1563 struct sk_buff *skb;
1564 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1565 struct l2cap_hdr *lh;
1567 BT_DBG("sk %p len %d", sk, (int)len);
1569 count = min_t(unsigned int, (conn->mtu - hlen), len);
1570 skb = bt_skb_send_alloc(sk, count + hlen,
1571 msg->msg_flags & MSG_DONTWAIT, &err);
1573 return ERR_PTR(-ENOMEM);
1575 /* Create L2CAP header */
1576 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1577 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1578 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1579 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1581 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1582 if (unlikely(err < 0)) {
1584 return ERR_PTR(err);
1589 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1591 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1592 struct sk_buff *skb;
1593 int err, count, hlen = L2CAP_HDR_SIZE;
1594 struct l2cap_hdr *lh;
1596 BT_DBG("sk %p len %d", sk, (int)len);
1598 count = min_t(unsigned int, (conn->mtu - hlen), len);
1599 skb = bt_skb_send_alloc(sk, count + hlen,
1600 msg->msg_flags & MSG_DONTWAIT, &err);
1602 return ERR_PTR(-ENOMEM);
1604 /* Create L2CAP header */
1605 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1606 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1607 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1609 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1610 if (unlikely(err < 0)) {
1612 return ERR_PTR(err);
1617 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1619 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1620 struct sk_buff *skb;
1621 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1622 struct l2cap_hdr *lh;
1624 BT_DBG("sk %p len %d", sk, (int)len);
1627 return ERR_PTR(-ENOTCONN);
1632 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1635 count = min_t(unsigned int, (conn->mtu - hlen), len);
1636 skb = bt_skb_send_alloc(sk, count + hlen,
1637 msg->msg_flags & MSG_DONTWAIT, &err);
1639 return ERR_PTR(-ENOMEM);
1641 /* Create L2CAP header */
1642 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1643 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1644 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1645 put_unaligned_le16(control, skb_put(skb, 2));
1647 put_unaligned_le16(sdulen, skb_put(skb, 2));
1649 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1650 if (unlikely(err < 0)) {
1652 return ERR_PTR(err);
1655 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1656 put_unaligned_le16(0, skb_put(skb, 2));
1658 bt_cb(skb)->retries = 0;
1662 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1664 struct l2cap_pinfo *pi = l2cap_pi(sk);
1665 struct sk_buff *skb;
1666 struct sk_buff_head sar_queue;
1670 skb_queue_head_init(&sar_queue);
1671 control = L2CAP_SDU_START;
1672 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1674 return PTR_ERR(skb);
1676 __skb_queue_tail(&sar_queue, skb);
1677 len -= pi->remote_mps;
1678 size += pi->remote_mps;
1683 if (len > pi->remote_mps) {
1684 control = L2CAP_SDU_CONTINUE;
1685 buflen = pi->remote_mps;
1687 control = L2CAP_SDU_END;
1691 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1693 skb_queue_purge(&sar_queue);
1694 return PTR_ERR(skb);
1697 __skb_queue_tail(&sar_queue, skb);
1701 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1702 spin_lock_bh(&pi->send_lock);
1703 if (sk->sk_send_head == NULL)
1704 sk->sk_send_head = sar_queue.next;
1705 spin_unlock_bh(&pi->send_lock);
1710 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1712 struct sock *sk = sock->sk;
1713 struct l2cap_pinfo *pi = l2cap_pi(sk);
1714 struct sk_buff *skb;
1718 BT_DBG("sock %p, sk %p", sock, sk);
1720 err = sock_error(sk);
1724 if (msg->msg_flags & MSG_OOB)
1729 if (sk->sk_state != BT_CONNECTED) {
1734 /* Connectionless channel */
1735 if (sk->sk_type == SOCK_DGRAM) {
1736 skb = l2cap_create_connless_pdu(sk, msg, len);
1740 l2cap_do_send(sk, skb);
1747 case L2CAP_MODE_BASIC:
1748 /* Check outgoing MTU */
1749 if (len > pi->omtu) {
1754 /* Create a basic PDU */
1755 skb = l2cap_create_basic_pdu(sk, msg, len);
1761 l2cap_do_send(sk, skb);
1765 case L2CAP_MODE_ERTM:
1766 case L2CAP_MODE_STREAMING:
1767 /* Entire SDU fits into one PDU */
1768 if (len <= pi->remote_mps) {
1769 control = L2CAP_SDU_UNSEGMENTED;
1770 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1775 __skb_queue_tail(TX_QUEUE(sk), skb);
1777 if (pi->mode == L2CAP_MODE_ERTM)
1778 spin_lock_bh(&pi->send_lock);
1780 if (sk->sk_send_head == NULL)
1781 sk->sk_send_head = skb;
1783 if (pi->mode == L2CAP_MODE_ERTM)
1784 spin_unlock_bh(&pi->send_lock);
1786 /* Segment SDU into multiples PDUs */
1787 err = l2cap_sar_segment_sdu(sk, msg, len);
1792 if (pi->mode == L2CAP_MODE_STREAMING) {
1793 err = l2cap_streaming_send(sk);
1795 spin_lock_bh(&pi->send_lock);
1796 err = l2cap_ertm_send(sk);
1797 spin_unlock_bh(&pi->send_lock);
1805 BT_DBG("bad state %1.1x", pi->mode);
1814 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1816 struct sock *sk = sock->sk;
1820 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1821 struct l2cap_conn_rsp rsp;
1823 sk->sk_state = BT_CONFIG;
1825 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1826 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1827 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1828 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1829 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1830 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1838 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1841 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1843 struct sock *sk = sock->sk;
1844 struct l2cap_options opts;
1848 BT_DBG("sk %p", sk);
1854 opts.imtu = l2cap_pi(sk)->imtu;
1855 opts.omtu = l2cap_pi(sk)->omtu;
1856 opts.flush_to = l2cap_pi(sk)->flush_to;
1857 opts.mode = l2cap_pi(sk)->mode;
1858 opts.fcs = l2cap_pi(sk)->fcs;
1859 opts.max_tx = l2cap_pi(sk)->max_tx;
1860 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1862 len = min_t(unsigned int, sizeof(opts), optlen);
1863 if (copy_from_user((char *) &opts, optval, len)) {
1868 l2cap_pi(sk)->mode = opts.mode;
1869 switch (l2cap_pi(sk)->mode) {
1870 case L2CAP_MODE_BASIC:
1872 case L2CAP_MODE_ERTM:
1873 case L2CAP_MODE_STREAMING:
1882 l2cap_pi(sk)->imtu = opts.imtu;
1883 l2cap_pi(sk)->omtu = opts.omtu;
1884 l2cap_pi(sk)->fcs = opts.fcs;
1885 l2cap_pi(sk)->max_tx = opts.max_tx;
1886 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1890 if (get_user(opt, (u32 __user *) optval)) {
1895 if (opt & L2CAP_LM_AUTH)
1896 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1897 if (opt & L2CAP_LM_ENCRYPT)
1898 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1899 if (opt & L2CAP_LM_SECURE)
1900 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1902 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1903 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1915 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1917 struct sock *sk = sock->sk;
1918 struct bt_security sec;
1922 BT_DBG("sk %p", sk);
1924 if (level == SOL_L2CAP)
1925 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1927 if (level != SOL_BLUETOOTH)
1928 return -ENOPROTOOPT;
1934 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1935 && sk->sk_type != SOCK_RAW) {
1940 sec.level = BT_SECURITY_LOW;
1942 len = min_t(unsigned int, sizeof(sec), optlen);
1943 if (copy_from_user((char *) &sec, optval, len)) {
1948 if (sec.level < BT_SECURITY_LOW ||
1949 sec.level > BT_SECURITY_HIGH) {
1954 l2cap_pi(sk)->sec_level = sec.level;
1957 case BT_DEFER_SETUP:
1958 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1963 if (get_user(opt, (u32 __user *) optval)) {
1968 bt_sk(sk)->defer_setup = opt;
1980 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1982 struct sock *sk = sock->sk;
1983 struct l2cap_options opts;
1984 struct l2cap_conninfo cinfo;
1988 BT_DBG("sk %p", sk);
1990 if (get_user(len, optlen))
1997 opts.imtu = l2cap_pi(sk)->imtu;
1998 opts.omtu = l2cap_pi(sk)->omtu;
1999 opts.flush_to = l2cap_pi(sk)->flush_to;
2000 opts.mode = l2cap_pi(sk)->mode;
2001 opts.fcs = l2cap_pi(sk)->fcs;
2002 opts.max_tx = l2cap_pi(sk)->max_tx;
2003 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2005 len = min_t(unsigned int, len, sizeof(opts));
2006 if (copy_to_user(optval, (char *) &opts, len))
2012 switch (l2cap_pi(sk)->sec_level) {
2013 case BT_SECURITY_LOW:
2014 opt = L2CAP_LM_AUTH;
2016 case BT_SECURITY_MEDIUM:
2017 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2019 case BT_SECURITY_HIGH:
2020 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2028 if (l2cap_pi(sk)->role_switch)
2029 opt |= L2CAP_LM_MASTER;
2031 if (l2cap_pi(sk)->force_reliable)
2032 opt |= L2CAP_LM_RELIABLE;
2034 if (put_user(opt, (u32 __user *) optval))
2038 case L2CAP_CONNINFO:
2039 if (sk->sk_state != BT_CONNECTED &&
2040 !(sk->sk_state == BT_CONNECT2 &&
2041 bt_sk(sk)->defer_setup)) {
2046 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2047 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2049 len = min_t(unsigned int, len, sizeof(cinfo));
2050 if (copy_to_user(optval, (char *) &cinfo, len))
2064 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2066 struct sock *sk = sock->sk;
2067 struct bt_security sec;
2070 BT_DBG("sk %p", sk);
2072 if (level == SOL_L2CAP)
2073 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2075 if (level != SOL_BLUETOOTH)
2076 return -ENOPROTOOPT;
2078 if (get_user(len, optlen))
2085 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2086 && sk->sk_type != SOCK_RAW) {
2091 sec.level = l2cap_pi(sk)->sec_level;
2093 len = min_t(unsigned int, len, sizeof(sec));
2094 if (copy_to_user(optval, (char *) &sec, len))
2099 case BT_DEFER_SETUP:
2100 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2105 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2119 static int l2cap_sock_shutdown(struct socket *sock, int how)
2121 struct sock *sk = sock->sk;
2124 BT_DBG("sock %p, sk %p", sock, sk);
2130 if (!sk->sk_shutdown) {
2131 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2132 err = __l2cap_wait_ack(sk);
2134 sk->sk_shutdown = SHUTDOWN_MASK;
2135 l2cap_sock_clear_timer(sk);
2136 __l2cap_sock_close(sk, 0);
2138 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2139 err = bt_sock_wait_state(sk, BT_CLOSED,
2146 static int l2cap_sock_release(struct socket *sock)
2148 struct sock *sk = sock->sk;
2151 BT_DBG("sock %p, sk %p", sock, sk);
2156 err = l2cap_sock_shutdown(sock, 2);
2159 l2cap_sock_kill(sk);
2163 static void l2cap_chan_ready(struct sock *sk)
2165 struct sock *parent = bt_sk(sk)->parent;
2167 BT_DBG("sk %p, parent %p", sk, parent);
2169 l2cap_pi(sk)->conf_state = 0;
2170 l2cap_sock_clear_timer(sk);
2173 /* Outgoing channel.
2174 * Wake up socket sleeping on connect.
2176 sk->sk_state = BT_CONNECTED;
2177 sk->sk_state_change(sk);
2179 /* Incoming channel.
2180 * Wake up socket sleeping on accept.
2182 parent->sk_data_ready(parent, 0);
2186 /* Copy frame to all raw sockets on that connection */
2187 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2189 struct l2cap_chan_list *l = &conn->chan_list;
2190 struct sk_buff *nskb;
2193 BT_DBG("conn %p", conn);
2195 read_lock(&l->lock);
2196 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2197 if (sk->sk_type != SOCK_RAW)
2200 /* Don't send frame to the socket it came from */
2203 nskb = skb_clone(skb, GFP_ATOMIC);
2207 if (sock_queue_rcv_skb(sk, nskb))
2210 read_unlock(&l->lock);
2213 /* ---- L2CAP signalling commands ---- */
2214 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2215 u8 code, u8 ident, u16 dlen, void *data)
2217 struct sk_buff *skb, **frag;
2218 struct l2cap_cmd_hdr *cmd;
2219 struct l2cap_hdr *lh;
2222 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2223 conn, code, ident, dlen);
2225 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2226 count = min_t(unsigned int, conn->mtu, len);
2228 skb = bt_skb_alloc(count, GFP_ATOMIC);
2232 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2233 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2234 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2236 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2239 cmd->len = cpu_to_le16(dlen);
2242 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2243 memcpy(skb_put(skb, count), data, count);
2249 /* Continuation fragments (no L2CAP header) */
2250 frag = &skb_shinfo(skb)->frag_list;
2252 count = min_t(unsigned int, conn->mtu, len);
2254 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2258 memcpy(skb_put(*frag, count), data, count);
2263 frag = &(*frag)->next;
2273 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2275 struct l2cap_conf_opt *opt = *ptr;
2278 len = L2CAP_CONF_OPT_SIZE + opt->len;
2286 *val = *((u8 *) opt->val);
2290 *val = __le16_to_cpu(*((__le16 *) opt->val));
2294 *val = __le32_to_cpu(*((__le32 *) opt->val));
2298 *val = (unsigned long) opt->val;
2302 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2306 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2308 struct l2cap_conf_opt *opt = *ptr;
2310 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2317 *((u8 *) opt->val) = val;
2321 *((__le16 *) opt->val) = cpu_to_le16(val);
2325 *((__le32 *) opt->val) = cpu_to_le32(val);
2329 memcpy(opt->val, (void *) val, len);
2333 *ptr += L2CAP_CONF_OPT_SIZE + len;
2336 static void l2cap_ack_timeout(unsigned long arg)
2338 struct sock *sk = (void *) arg;
2341 l2cap_send_ack(l2cap_pi(sk));
2345 static inline void l2cap_ertm_init(struct sock *sk)
2347 l2cap_pi(sk)->expected_ack_seq = 0;
2348 l2cap_pi(sk)->unacked_frames = 0;
2349 l2cap_pi(sk)->buffer_seq = 0;
2350 l2cap_pi(sk)->num_acked = 0;
2351 l2cap_pi(sk)->frames_sent = 0;
2353 setup_timer(&l2cap_pi(sk)->retrans_timer,
2354 l2cap_retrans_timeout, (unsigned long) sk);
2355 setup_timer(&l2cap_pi(sk)->monitor_timer,
2356 l2cap_monitor_timeout, (unsigned long) sk);
2357 setup_timer(&l2cap_pi(sk)->ack_timer,
2358 l2cap_ack_timeout, (unsigned long) sk);
2360 __skb_queue_head_init(SREJ_QUEUE(sk));
2361 __skb_queue_head_init(BUSY_QUEUE(sk));
2362 spin_lock_init(&l2cap_pi(sk)->send_lock);
2364 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2367 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2369 u32 local_feat_mask = l2cap_feat_mask;
2371 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2374 case L2CAP_MODE_ERTM:
2375 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2376 case L2CAP_MODE_STREAMING:
2377 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2383 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2386 case L2CAP_MODE_STREAMING:
2387 case L2CAP_MODE_ERTM:
2388 if (l2cap_mode_supported(mode, remote_feat_mask))
2392 return L2CAP_MODE_BASIC;
2396 static int l2cap_build_conf_req(struct sock *sk, void *data)
2398 struct l2cap_pinfo *pi = l2cap_pi(sk);
2399 struct l2cap_conf_req *req = data;
2400 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2401 void *ptr = req->data;
2403 BT_DBG("sk %p", sk);
2405 if (pi->num_conf_req || pi->num_conf_rsp)
2409 case L2CAP_MODE_STREAMING:
2410 case L2CAP_MODE_ERTM:
2411 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2412 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2413 l2cap_send_disconn_req(pi->conn, sk);
2416 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2422 case L2CAP_MODE_BASIC:
2423 if (pi->imtu != L2CAP_DEFAULT_MTU)
2424 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2427 case L2CAP_MODE_ERTM:
2428 rfc.mode = L2CAP_MODE_ERTM;
2429 rfc.txwin_size = pi->tx_win;
2430 rfc.max_transmit = pi->max_tx;
2431 rfc.retrans_timeout = 0;
2432 rfc.monitor_timeout = 0;
2433 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2434 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2435 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2437 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2438 sizeof(rfc), (unsigned long) &rfc);
2440 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2443 if (pi->fcs == L2CAP_FCS_NONE ||
2444 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2445 pi->fcs = L2CAP_FCS_NONE;
2446 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2450 case L2CAP_MODE_STREAMING:
2451 rfc.mode = L2CAP_MODE_STREAMING;
2453 rfc.max_transmit = 0;
2454 rfc.retrans_timeout = 0;
2455 rfc.monitor_timeout = 0;
2456 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2457 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2458 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2460 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2461 sizeof(rfc), (unsigned long) &rfc);
2463 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2466 if (pi->fcs == L2CAP_FCS_NONE ||
2467 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2468 pi->fcs = L2CAP_FCS_NONE;
2469 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2474 /* FIXME: Need actual value of the flush timeout */
2475 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2476 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2478 req->dcid = cpu_to_le16(pi->dcid);
2479 req->flags = cpu_to_le16(0);
2484 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2486 struct l2cap_pinfo *pi = l2cap_pi(sk);
2487 struct l2cap_conf_rsp *rsp = data;
2488 void *ptr = rsp->data;
2489 void *req = pi->conf_req;
2490 int len = pi->conf_len;
2491 int type, hint, olen;
2493 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2494 u16 mtu = L2CAP_DEFAULT_MTU;
2495 u16 result = L2CAP_CONF_SUCCESS;
2497 BT_DBG("sk %p", sk);
2499 while (len >= L2CAP_CONF_OPT_SIZE) {
2500 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2502 hint = type & L2CAP_CONF_HINT;
2503 type &= L2CAP_CONF_MASK;
2506 case L2CAP_CONF_MTU:
2510 case L2CAP_CONF_FLUSH_TO:
2514 case L2CAP_CONF_QOS:
2517 case L2CAP_CONF_RFC:
2518 if (olen == sizeof(rfc))
2519 memcpy(&rfc, (void *) val, olen);
2522 case L2CAP_CONF_FCS:
2523 if (val == L2CAP_FCS_NONE)
2524 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2532 result = L2CAP_CONF_UNKNOWN;
2533 *((u8 *) ptr++) = type;
2538 if (pi->num_conf_rsp || pi->num_conf_req)
2542 case L2CAP_MODE_STREAMING:
2543 case L2CAP_MODE_ERTM:
2544 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2545 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2546 return -ECONNREFUSED;
2549 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2554 if (pi->mode != rfc.mode) {
2555 result = L2CAP_CONF_UNACCEPT;
2556 rfc.mode = pi->mode;
2558 if (pi->num_conf_rsp == 1)
2559 return -ECONNREFUSED;
2561 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2562 sizeof(rfc), (unsigned long) &rfc);
2566 if (result == L2CAP_CONF_SUCCESS) {
2567 /* Configure output options and let the other side know
2568 * which ones we don't like. */
2570 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2571 result = L2CAP_CONF_UNACCEPT;
2574 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2576 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2579 case L2CAP_MODE_BASIC:
2580 pi->fcs = L2CAP_FCS_NONE;
2581 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2584 case L2CAP_MODE_ERTM:
2585 pi->remote_tx_win = rfc.txwin_size;
2586 pi->remote_max_tx = rfc.max_transmit;
2587 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2588 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2590 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2592 rfc.retrans_timeout =
2593 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2594 rfc.monitor_timeout =
2595 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2597 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2599 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2600 sizeof(rfc), (unsigned long) &rfc);
2604 case L2CAP_MODE_STREAMING:
2605 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2606 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2608 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2610 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2612 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2613 sizeof(rfc), (unsigned long) &rfc);
2618 result = L2CAP_CONF_UNACCEPT;
2620 memset(&rfc, 0, sizeof(rfc));
2621 rfc.mode = pi->mode;
2624 if (result == L2CAP_CONF_SUCCESS)
2625 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2627 rsp->scid = cpu_to_le16(pi->dcid);
2628 rsp->result = cpu_to_le16(result);
2629 rsp->flags = cpu_to_le16(0x0000);
2634 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2636 struct l2cap_pinfo *pi = l2cap_pi(sk);
2637 struct l2cap_conf_req *req = data;
2638 void *ptr = req->data;
2641 struct l2cap_conf_rfc rfc;
2643 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2645 while (len >= L2CAP_CONF_OPT_SIZE) {
2646 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2649 case L2CAP_CONF_MTU:
2650 if (val < L2CAP_DEFAULT_MIN_MTU) {
2651 *result = L2CAP_CONF_UNACCEPT;
2652 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2655 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2658 case L2CAP_CONF_FLUSH_TO:
2660 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2664 case L2CAP_CONF_RFC:
2665 if (olen == sizeof(rfc))
2666 memcpy(&rfc, (void *)val, olen);
2668 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2669 rfc.mode != pi->mode)
2670 return -ECONNREFUSED;
2672 pi->mode = rfc.mode;
2675 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2676 sizeof(rfc), (unsigned long) &rfc);
2681 if (*result == L2CAP_CONF_SUCCESS) {
2683 case L2CAP_MODE_ERTM:
2684 pi->remote_tx_win = rfc.txwin_size;
2685 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2686 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2687 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2689 case L2CAP_MODE_STREAMING:
2690 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2694 req->dcid = cpu_to_le16(pi->dcid);
2695 req->flags = cpu_to_le16(0x0000);
2700 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2702 struct l2cap_conf_rsp *rsp = data;
2703 void *ptr = rsp->data;
2705 BT_DBG("sk %p", sk);
2707 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2708 rsp->result = cpu_to_le16(result);
2709 rsp->flags = cpu_to_le16(flags);
2714 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2716 struct l2cap_pinfo *pi = l2cap_pi(sk);
2719 struct l2cap_conf_rfc rfc;
2721 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2723 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2726 while (len >= L2CAP_CONF_OPT_SIZE) {
2727 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2730 case L2CAP_CONF_RFC:
2731 if (olen == sizeof(rfc))
2732 memcpy(&rfc, (void *)val, olen);
2739 case L2CAP_MODE_ERTM:
2740 pi->remote_tx_win = rfc.txwin_size;
2741 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2742 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2743 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2745 case L2CAP_MODE_STREAMING:
2746 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2750 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2752 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2754 if (rej->reason != 0x0000)
2757 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2758 cmd->ident == conn->info_ident) {
2759 del_timer(&conn->info_timer);
2761 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2762 conn->info_ident = 0;
2764 l2cap_conn_start(conn);
2770 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2772 struct l2cap_chan_list *list = &conn->chan_list;
2773 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2774 struct l2cap_conn_rsp rsp;
2775 struct sock *sk, *parent;
2776 int result, status = L2CAP_CS_NO_INFO;
2778 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2779 __le16 psm = req->psm;
2781 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2783 /* Check if we have socket listening on psm */
2784 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2786 result = L2CAP_CR_BAD_PSM;
2790 /* Check if the ACL is secure enough (if not SDP) */
2791 if (psm != cpu_to_le16(0x0001) &&
2792 !hci_conn_check_link_mode(conn->hcon)) {
2793 conn->disc_reason = 0x05;
2794 result = L2CAP_CR_SEC_BLOCK;
2798 result = L2CAP_CR_NO_MEM;
2800 /* Check for backlog size */
2801 if (sk_acceptq_is_full(parent)) {
2802 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2806 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2810 write_lock_bh(&list->lock);
2812 /* Check if we already have channel with that dcid */
2813 if (__l2cap_get_chan_by_dcid(list, scid)) {
2814 write_unlock_bh(&list->lock);
2815 sock_set_flag(sk, SOCK_ZAPPED);
2816 l2cap_sock_kill(sk);
2820 hci_conn_hold(conn->hcon);
2822 l2cap_sock_init(sk, parent);
2823 bacpy(&bt_sk(sk)->src, conn->src);
2824 bacpy(&bt_sk(sk)->dst, conn->dst);
2825 l2cap_pi(sk)->psm = psm;
2826 l2cap_pi(sk)->dcid = scid;
2828 __l2cap_chan_add(conn, sk, parent);
2829 dcid = l2cap_pi(sk)->scid;
2831 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2833 l2cap_pi(sk)->ident = cmd->ident;
2835 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2836 if (l2cap_check_security(sk)) {
2837 if (bt_sk(sk)->defer_setup) {
2838 sk->sk_state = BT_CONNECT2;
2839 result = L2CAP_CR_PEND;
2840 status = L2CAP_CS_AUTHOR_PEND;
2841 parent->sk_data_ready(parent, 0);
2843 sk->sk_state = BT_CONFIG;
2844 result = L2CAP_CR_SUCCESS;
2845 status = L2CAP_CS_NO_INFO;
2848 sk->sk_state = BT_CONNECT2;
2849 result = L2CAP_CR_PEND;
2850 status = L2CAP_CS_AUTHEN_PEND;
2853 sk->sk_state = BT_CONNECT2;
2854 result = L2CAP_CR_PEND;
2855 status = L2CAP_CS_NO_INFO;
2858 write_unlock_bh(&list->lock);
2861 bh_unlock_sock(parent);
2864 rsp.scid = cpu_to_le16(scid);
2865 rsp.dcid = cpu_to_le16(dcid);
2866 rsp.result = cpu_to_le16(result);
2867 rsp.status = cpu_to_le16(status);
2868 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2870 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2871 struct l2cap_info_req info;
2872 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2874 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2875 conn->info_ident = l2cap_get_ident(conn);
2877 mod_timer(&conn->info_timer, jiffies +
2878 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2880 l2cap_send_cmd(conn, conn->info_ident,
2881 L2CAP_INFO_REQ, sizeof(info), &info);
2887 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2889 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2890 u16 scid, dcid, result, status;
2894 scid = __le16_to_cpu(rsp->scid);
2895 dcid = __le16_to_cpu(rsp->dcid);
2896 result = __le16_to_cpu(rsp->result);
2897 status = __le16_to_cpu(rsp->status);
2899 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2902 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2906 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2912 case L2CAP_CR_SUCCESS:
2913 sk->sk_state = BT_CONFIG;
2914 l2cap_pi(sk)->ident = 0;
2915 l2cap_pi(sk)->dcid = dcid;
2916 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2918 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2920 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2921 l2cap_build_conf_req(sk, req), req);
2922 l2cap_pi(sk)->num_conf_req++;
2926 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2930 l2cap_chan_del(sk, ECONNREFUSED);
2938 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2940 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2946 dcid = __le16_to_cpu(req->dcid);
2947 flags = __le16_to_cpu(req->flags);
2949 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2951 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2955 if (sk->sk_state == BT_DISCONN)
2958 /* Reject if config buffer is too small. */
2959 len = cmd_len - sizeof(*req);
2960 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2961 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2962 l2cap_build_conf_rsp(sk, rsp,
2963 L2CAP_CONF_REJECT, flags), rsp);
2968 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2969 l2cap_pi(sk)->conf_len += len;
2971 if (flags & 0x0001) {
2972 /* Incomplete config. Send empty response. */
2973 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2974 l2cap_build_conf_rsp(sk, rsp,
2975 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2979 /* Complete config. */
2980 len = l2cap_parse_conf_req(sk, rsp);
2982 l2cap_send_disconn_req(conn, sk);
2986 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2987 l2cap_pi(sk)->num_conf_rsp++;
2989 /* Reset config buffer. */
2990 l2cap_pi(sk)->conf_len = 0;
2992 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2995 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2996 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2997 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2998 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3000 sk->sk_state = BT_CONNECTED;
3002 l2cap_pi(sk)->next_tx_seq = 0;
3003 l2cap_pi(sk)->expected_tx_seq = 0;
3004 __skb_queue_head_init(TX_QUEUE(sk));
3005 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3006 l2cap_ertm_init(sk);
3008 l2cap_chan_ready(sk);
3012 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3014 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3015 l2cap_build_conf_req(sk, buf), buf);
3016 l2cap_pi(sk)->num_conf_req++;
3024 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3026 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3027 u16 scid, flags, result;
3029 int len = cmd->len - sizeof(*rsp);
3031 scid = __le16_to_cpu(rsp->scid);
3032 flags = __le16_to_cpu(rsp->flags);
3033 result = __le16_to_cpu(rsp->result);
3035 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3036 scid, flags, result);
3038 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3043 case L2CAP_CONF_SUCCESS:
3044 l2cap_conf_rfc_get(sk, rsp->data, len);
3047 case L2CAP_CONF_UNACCEPT:
3048 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3051 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3052 l2cap_send_disconn_req(conn, sk);
3056 /* throw out any old stored conf requests */
3057 result = L2CAP_CONF_SUCCESS;
3058 len = l2cap_parse_conf_rsp(sk, rsp->data,
3061 l2cap_send_disconn_req(conn, sk);
3065 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3066 L2CAP_CONF_REQ, len, req);
3067 l2cap_pi(sk)->num_conf_req++;
3068 if (result != L2CAP_CONF_SUCCESS)
3074 sk->sk_state = BT_DISCONN;
3075 sk->sk_err = ECONNRESET;
3076 l2cap_sock_set_timer(sk, HZ * 5);
3077 l2cap_send_disconn_req(conn, sk);
3084 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3086 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3087 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3088 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3089 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3091 sk->sk_state = BT_CONNECTED;
3092 l2cap_pi(sk)->next_tx_seq = 0;
3093 l2cap_pi(sk)->expected_tx_seq = 0;
3094 __skb_queue_head_init(TX_QUEUE(sk));
3095 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3096 l2cap_ertm_init(sk);
3098 l2cap_chan_ready(sk);
3106 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3108 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3109 struct l2cap_disconn_rsp rsp;
3113 scid = __le16_to_cpu(req->scid);
3114 dcid = __le16_to_cpu(req->dcid);
3116 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3118 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3122 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3123 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3124 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3126 sk->sk_shutdown = SHUTDOWN_MASK;
3128 skb_queue_purge(TX_QUEUE(sk));
3130 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3131 skb_queue_purge(SREJ_QUEUE(sk));
3132 skb_queue_purge(BUSY_QUEUE(sk));
3133 del_timer(&l2cap_pi(sk)->retrans_timer);
3134 del_timer(&l2cap_pi(sk)->monitor_timer);
3135 del_timer(&l2cap_pi(sk)->ack_timer);
3138 l2cap_chan_del(sk, ECONNRESET);
3141 l2cap_sock_kill(sk);
3145 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3147 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3151 scid = __le16_to_cpu(rsp->scid);
3152 dcid = __le16_to_cpu(rsp->dcid);
3154 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3156 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3160 skb_queue_purge(TX_QUEUE(sk));
3162 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3163 skb_queue_purge(SREJ_QUEUE(sk));
3164 skb_queue_purge(BUSY_QUEUE(sk));
3165 del_timer(&l2cap_pi(sk)->retrans_timer);
3166 del_timer(&l2cap_pi(sk)->monitor_timer);
3167 del_timer(&l2cap_pi(sk)->ack_timer);
3170 l2cap_chan_del(sk, 0);
3173 l2cap_sock_kill(sk);
3177 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3179 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3182 type = __le16_to_cpu(req->type);
3184 BT_DBG("type 0x%4.4x", type);
3186 if (type == L2CAP_IT_FEAT_MASK) {
3188 u32 feat_mask = l2cap_feat_mask;
3189 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3190 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3191 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3193 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3195 put_unaligned_le32(feat_mask, rsp->data);
3196 l2cap_send_cmd(conn, cmd->ident,
3197 L2CAP_INFO_RSP, sizeof(buf), buf);
3198 } else if (type == L2CAP_IT_FIXED_CHAN) {
3200 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3201 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3202 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3203 memcpy(buf + 4, l2cap_fixed_chan, 8);
3204 l2cap_send_cmd(conn, cmd->ident,
3205 L2CAP_INFO_RSP, sizeof(buf), buf);
3207 struct l2cap_info_rsp rsp;
3208 rsp.type = cpu_to_le16(type);
3209 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3210 l2cap_send_cmd(conn, cmd->ident,
3211 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3217 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3219 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3222 type = __le16_to_cpu(rsp->type);
3223 result = __le16_to_cpu(rsp->result);
3225 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3227 del_timer(&conn->info_timer);
3229 if (type == L2CAP_IT_FEAT_MASK) {
3230 conn->feat_mask = get_unaligned_le32(rsp->data);
3232 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3233 struct l2cap_info_req req;
3234 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3236 conn->info_ident = l2cap_get_ident(conn);
3238 l2cap_send_cmd(conn, conn->info_ident,
3239 L2CAP_INFO_REQ, sizeof(req), &req);
3241 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3242 conn->info_ident = 0;
3244 l2cap_conn_start(conn);
3246 } else if (type == L2CAP_IT_FIXED_CHAN) {
3247 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3248 conn->info_ident = 0;
3250 l2cap_conn_start(conn);
3256 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3258 u8 *data = skb->data;
3260 struct l2cap_cmd_hdr cmd;
3263 l2cap_raw_recv(conn, skb);
3265 while (len >= L2CAP_CMD_HDR_SIZE) {
3267 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3268 data += L2CAP_CMD_HDR_SIZE;
3269 len -= L2CAP_CMD_HDR_SIZE;
3271 cmd_len = le16_to_cpu(cmd.len);
3273 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3275 if (cmd_len > len || !cmd.ident) {
3276 BT_DBG("corrupted command");
3281 case L2CAP_COMMAND_REJ:
3282 l2cap_command_rej(conn, &cmd, data);
3285 case L2CAP_CONN_REQ:
3286 err = l2cap_connect_req(conn, &cmd, data);
3289 case L2CAP_CONN_RSP:
3290 err = l2cap_connect_rsp(conn, &cmd, data);
3293 case L2CAP_CONF_REQ:
3294 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3297 case L2CAP_CONF_RSP:
3298 err = l2cap_config_rsp(conn, &cmd, data);
3301 case L2CAP_DISCONN_REQ:
3302 err = l2cap_disconnect_req(conn, &cmd, data);
3305 case L2CAP_DISCONN_RSP:
3306 err = l2cap_disconnect_rsp(conn, &cmd, data);
3309 case L2CAP_ECHO_REQ:
3310 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3313 case L2CAP_ECHO_RSP:
3316 case L2CAP_INFO_REQ:
3317 err = l2cap_information_req(conn, &cmd, data);
3320 case L2CAP_INFO_RSP:
3321 err = l2cap_information_rsp(conn, &cmd, data);
3325 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3331 struct l2cap_cmd_rej rej;
3332 BT_DBG("error %d", err);
3334 /* FIXME: Map err to a valid reason */
3335 rej.reason = cpu_to_le16(0);
3336 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3346 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3348 u16 our_fcs, rcv_fcs;
3349 int hdr_size = L2CAP_HDR_SIZE + 2;
3351 if (pi->fcs == L2CAP_FCS_CRC16) {
3352 skb_trim(skb, skb->len - 2);
3353 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3354 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3356 if (our_fcs != rcv_fcs)
3362 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3364 struct l2cap_pinfo *pi = l2cap_pi(sk);
3367 pi->frames_sent = 0;
3368 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3370 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3372 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3373 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3374 l2cap_send_sframe(pi, control);
3375 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3376 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3379 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3380 __mod_retrans_timer();
3382 spin_lock_bh(&pi->send_lock);
3383 l2cap_ertm_send(sk);
3384 spin_unlock_bh(&pi->send_lock);
3386 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3387 pi->frames_sent == 0) {
3388 control |= L2CAP_SUPER_RCV_READY;
3389 l2cap_send_sframe(pi, control);
3393 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3395 struct sk_buff *next_skb;
3397 bt_cb(skb)->tx_seq = tx_seq;
3398 bt_cb(skb)->sar = sar;
3400 next_skb = skb_peek(SREJ_QUEUE(sk));
3402 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3407 if (bt_cb(next_skb)->tx_seq == tx_seq)
3410 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3411 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3415 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3418 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3420 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3425 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3427 struct l2cap_pinfo *pi = l2cap_pi(sk);
3428 struct sk_buff *_skb;
3431 switch (control & L2CAP_CTRL_SAR) {
3432 case L2CAP_SDU_UNSEGMENTED:
3433 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3436 err = sock_queue_rcv_skb(sk, skb);
3442 case L2CAP_SDU_START:
3443 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3446 pi->sdu_len = get_unaligned_le16(skb->data);
3448 if (pi->sdu_len > pi->imtu)
3451 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3455 /* pull sdu_len bytes only after alloc, because of Local Busy
3456 * condition we have to be sure that this will be executed
3457 * only once, i.e., when alloc does not fail */
3460 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3462 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3463 pi->partial_sdu_len = skb->len;
3466 case L2CAP_SDU_CONTINUE:
3467 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3473 pi->partial_sdu_len += skb->len;
3474 if (pi->partial_sdu_len > pi->sdu_len)
3477 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3482 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3488 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3489 pi->partial_sdu_len += skb->len;
3491 if (pi->partial_sdu_len > pi->imtu)
3494 if (pi->partial_sdu_len != pi->sdu_len)
3497 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3500 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3502 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3506 err = sock_queue_rcv_skb(sk, _skb);
3509 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3513 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3514 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3528 l2cap_send_disconn_req(pi->conn, sk);
3533 static void l2cap_busy_work(struct work_struct *work)
3535 DECLARE_WAITQUEUE(wait, current);
3536 struct l2cap_pinfo *pi =
3537 container_of(work, struct l2cap_pinfo, busy_work);
3538 struct sock *sk = (struct sock *)pi;
3539 int n_tries = 0, timeo = HZ/5, err;
3540 struct sk_buff *skb;
3545 add_wait_queue(sk->sk_sleep, &wait);
3546 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3547 set_current_state(TASK_INTERRUPTIBLE);
3549 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3551 l2cap_send_disconn_req(pi->conn, sk);
3558 if (signal_pending(current)) {
3559 err = sock_intr_errno(timeo);
3564 timeo = schedule_timeout(timeo);
3567 err = sock_error(sk);
3571 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3572 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3573 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3575 skb_queue_head(BUSY_QUEUE(sk), skb);
3579 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3586 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3589 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3590 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3591 l2cap_send_sframe(pi, control);
3592 l2cap_pi(sk)->retry_count = 1;
3594 del_timer(&pi->retrans_timer);
3595 __mod_monitor_timer();
3597 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3600 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3601 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3603 set_current_state(TASK_RUNNING);
3604 remove_wait_queue(sk->sk_sleep, &wait);
3609 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3611 struct l2cap_pinfo *pi = l2cap_pi(sk);
3614 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3615 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3616 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3620 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3622 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3626 /* Busy Condition */
3627 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3628 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3629 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3631 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3632 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3633 l2cap_send_sframe(pi, sctrl);
3635 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3637 queue_work(_busy_wq, &pi->busy_work);
3642 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3644 struct l2cap_pinfo *pi = l2cap_pi(sk);
3645 struct sk_buff *_skb;
3649 * TODO: We have to notify the userland if some data is lost with the
3653 switch (control & L2CAP_CTRL_SAR) {
3654 case L2CAP_SDU_UNSEGMENTED:
3655 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3660 err = sock_queue_rcv_skb(sk, skb);
3666 case L2CAP_SDU_START:
3667 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3672 pi->sdu_len = get_unaligned_le16(skb->data);
3675 if (pi->sdu_len > pi->imtu) {
3680 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3686 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3688 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3689 pi->partial_sdu_len = skb->len;
3693 case L2CAP_SDU_CONTINUE:
3694 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3697 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3699 pi->partial_sdu_len += skb->len;
3700 if (pi->partial_sdu_len > pi->sdu_len)
3708 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3711 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3713 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3714 pi->partial_sdu_len += skb->len;
3716 if (pi->partial_sdu_len > pi->imtu)
3719 if (pi->partial_sdu_len == pi->sdu_len) {
3720 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3721 err = sock_queue_rcv_skb(sk, _skb);
3736 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3738 struct sk_buff *skb;
3741 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3742 if (bt_cb(skb)->tx_seq != tx_seq)
3745 skb = skb_dequeue(SREJ_QUEUE(sk));
3746 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3747 l2cap_ertm_reassembly_sdu(sk, skb, control);
3748 l2cap_pi(sk)->buffer_seq_srej =
3749 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3754 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3756 struct l2cap_pinfo *pi = l2cap_pi(sk);
3757 struct srej_list *l, *tmp;
3760 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3761 if (l->tx_seq == tx_seq) {
3766 control = L2CAP_SUPER_SELECT_REJECT;
3767 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3768 l2cap_send_sframe(pi, control);
3770 list_add_tail(&l->list, SREJ_LIST(sk));
3774 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3776 struct l2cap_pinfo *pi = l2cap_pi(sk);
3777 struct srej_list *new;
3780 while (tx_seq != pi->expected_tx_seq) {
3781 control = L2CAP_SUPER_SELECT_REJECT;
3782 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3783 l2cap_send_sframe(pi, control);
3785 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3786 new->tx_seq = pi->expected_tx_seq++;
3787 list_add_tail(&new->list, SREJ_LIST(sk));
3789 pi->expected_tx_seq++;
3792 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3794 struct l2cap_pinfo *pi = l2cap_pi(sk);
3795 u8 tx_seq = __get_txseq(rx_control);
3796 u8 req_seq = __get_reqseq(rx_control);
3797 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3798 u8 tx_seq_offset, expected_tx_seq_offset;
3799 int num_to_ack = (pi->tx_win/6) + 1;
3802 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3804 if (L2CAP_CTRL_FINAL & rx_control &&
3805 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3806 del_timer(&pi->monitor_timer);
3807 if (pi->unacked_frames > 0)
3808 __mod_retrans_timer();
3809 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3812 pi->expected_ack_seq = req_seq;
3813 l2cap_drop_acked_frames(sk);
3815 if (tx_seq == pi->expected_tx_seq)
3818 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3819 if (tx_seq_offset < 0)
3820 tx_seq_offset += 64;
3822 /* invalid tx_seq */
3823 if (tx_seq_offset >= pi->tx_win) {
3824 l2cap_send_disconn_req(pi->conn, sk);
3828 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3831 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3832 struct srej_list *first;
3834 first = list_first_entry(SREJ_LIST(sk),
3835 struct srej_list, list);
3836 if (tx_seq == first->tx_seq) {
3837 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3838 l2cap_check_srej_gap(sk, tx_seq);
3840 list_del(&first->list);
3843 if (list_empty(SREJ_LIST(sk))) {
3844 pi->buffer_seq = pi->buffer_seq_srej;
3845 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3849 struct srej_list *l;
3851 /* duplicated tx_seq */
3852 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3855 list_for_each_entry(l, SREJ_LIST(sk), list) {
3856 if (l->tx_seq == tx_seq) {
3857 l2cap_resend_srejframe(sk, tx_seq);
3861 l2cap_send_srejframe(sk, tx_seq);
3864 expected_tx_seq_offset =
3865 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3866 if (expected_tx_seq_offset < 0)
3867 expected_tx_seq_offset += 64;
3869 /* duplicated tx_seq */
3870 if (tx_seq_offset < expected_tx_seq_offset)
3873 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3875 INIT_LIST_HEAD(SREJ_LIST(sk));
3876 pi->buffer_seq_srej = pi->buffer_seq;
3878 __skb_queue_head_init(SREJ_QUEUE(sk));
3879 __skb_queue_head_init(BUSY_QUEUE(sk));
3880 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3882 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3884 l2cap_send_srejframe(sk, tx_seq);
3889 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3891 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3892 bt_cb(skb)->tx_seq = tx_seq;
3893 bt_cb(skb)->sar = sar;
3894 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3898 if (rx_control & L2CAP_CTRL_FINAL) {
3899 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3900 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3902 l2cap_retransmit_frames(sk);
3905 err = l2cap_push_rx_skb(sk, skb, rx_control);
3911 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3912 if (pi->num_acked == num_to_ack - 1)
3922 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3924 struct l2cap_pinfo *pi = l2cap_pi(sk);
3926 pi->expected_ack_seq = __get_reqseq(rx_control);
3927 l2cap_drop_acked_frames(sk);
3929 if (rx_control & L2CAP_CTRL_POLL) {
3930 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3931 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3932 (pi->unacked_frames > 0))
3933 __mod_retrans_timer();
3935 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3936 l2cap_send_srejtail(sk);
3938 l2cap_send_i_or_rr_or_rnr(sk);
3939 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3942 } else if (rx_control & L2CAP_CTRL_FINAL) {
3943 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3945 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3946 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3948 l2cap_retransmit_frames(sk);
3951 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3952 (pi->unacked_frames > 0))
3953 __mod_retrans_timer();
3955 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3956 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3959 spin_lock_bh(&pi->send_lock);
3960 l2cap_ertm_send(sk);
3961 spin_unlock_bh(&pi->send_lock);
3966 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3968 struct l2cap_pinfo *pi = l2cap_pi(sk);
3969 u8 tx_seq = __get_reqseq(rx_control);
3971 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3973 pi->expected_ack_seq = tx_seq;
3974 l2cap_drop_acked_frames(sk);
3976 if (rx_control & L2CAP_CTRL_FINAL) {
3977 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3978 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3980 l2cap_retransmit_frames(sk);
3982 l2cap_retransmit_frames(sk);
3984 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3985 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3988 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3990 struct l2cap_pinfo *pi = l2cap_pi(sk);
3991 u8 tx_seq = __get_reqseq(rx_control);
3993 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3995 if (rx_control & L2CAP_CTRL_POLL) {
3996 pi->expected_ack_seq = tx_seq;
3997 l2cap_drop_acked_frames(sk);
3998 l2cap_retransmit_one_frame(sk, tx_seq);
4000 spin_lock_bh(&pi->send_lock);
4001 l2cap_ertm_send(sk);
4002 spin_unlock_bh(&pi->send_lock);
4004 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4005 pi->srej_save_reqseq = tx_seq;
4006 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4008 } else if (rx_control & L2CAP_CTRL_FINAL) {
4009 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4010 pi->srej_save_reqseq == tx_seq)
4011 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4013 l2cap_retransmit_one_frame(sk, tx_seq);
4015 l2cap_retransmit_one_frame(sk, tx_seq);
4016 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4017 pi->srej_save_reqseq = tx_seq;
4018 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4023 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4025 struct l2cap_pinfo *pi = l2cap_pi(sk);
4026 u8 tx_seq = __get_reqseq(rx_control);
4028 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4029 pi->expected_ack_seq = tx_seq;
4030 l2cap_drop_acked_frames(sk);
4032 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4033 del_timer(&pi->retrans_timer);
4034 if (rx_control & L2CAP_CTRL_POLL)
4035 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4039 if (rx_control & L2CAP_CTRL_POLL)
4040 l2cap_send_srejtail(sk);
4042 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4045 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4047 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4049 if (L2CAP_CTRL_FINAL & rx_control &&
4050 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4051 del_timer(&l2cap_pi(sk)->monitor_timer);
4052 if (l2cap_pi(sk)->unacked_frames > 0)
4053 __mod_retrans_timer();
4054 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4057 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4058 case L2CAP_SUPER_RCV_READY:
4059 l2cap_data_channel_rrframe(sk, rx_control);
4062 case L2CAP_SUPER_REJECT:
4063 l2cap_data_channel_rejframe(sk, rx_control);
4066 case L2CAP_SUPER_SELECT_REJECT:
4067 l2cap_data_channel_srejframe(sk, rx_control);
4070 case L2CAP_SUPER_RCV_NOT_READY:
4071 l2cap_data_channel_rnrframe(sk, rx_control);
4079 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4082 struct l2cap_pinfo *pi;
4084 u8 tx_seq, req_seq, next_tx_seq_offset, req_seq_offset;
4086 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4088 BT_DBG("unknown cid 0x%4.4x", cid);
4094 BT_DBG("sk %p, len %d", sk, skb->len);
4096 if (sk->sk_state != BT_CONNECTED)
4100 case L2CAP_MODE_BASIC:
4101 /* If socket recv buffers overflows we drop data here
4102 * which is *bad* because L2CAP has to be reliable.
4103 * But we don't have any other choice. L2CAP doesn't
4104 * provide flow control mechanism. */
4106 if (pi->imtu < skb->len)
4109 if (!sock_queue_rcv_skb(sk, skb))
4113 case L2CAP_MODE_ERTM:
4114 control = get_unaligned_le16(skb->data);
4118 if (__is_sar_start(control))
4121 if (pi->fcs == L2CAP_FCS_CRC16)
4125 * We can just drop the corrupted I-frame here.
4126 * Receiver will miss it and start proper recovery
4127 * procedures and ask retransmission.
4129 if (len > pi->mps) {
4130 l2cap_send_disconn_req(pi->conn, sk);
4134 if (l2cap_check_fcs(pi, skb))
4137 req_seq = __get_reqseq(control);
4138 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4139 if (req_seq_offset < 0)
4140 req_seq_offset += 64;
4142 next_tx_seq_offset =
4143 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4144 if (next_tx_seq_offset < 0)
4145 next_tx_seq_offset += 64;
4147 /* check for invalid req-seq */
4148 if (req_seq_offset > next_tx_seq_offset) {
4149 l2cap_send_disconn_req(pi->conn, sk);
4153 if (__is_iframe(control)) {
4155 l2cap_send_disconn_req(pi->conn, sk);
4159 l2cap_data_channel_iframe(sk, control, skb);
4162 l2cap_send_disconn_req(pi->conn, sk);
4166 l2cap_data_channel_sframe(sk, control, skb);
4171 case L2CAP_MODE_STREAMING:
4172 control = get_unaligned_le16(skb->data);
4176 if (__is_sar_start(control))
4179 if (pi->fcs == L2CAP_FCS_CRC16)
4182 if (len > pi->mps || len < 4 || __is_sframe(control))
4185 if (l2cap_check_fcs(pi, skb))
4188 tx_seq = __get_txseq(control);
4190 if (pi->expected_tx_seq == tx_seq)
4191 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4193 pi->expected_tx_seq = (tx_seq + 1) % 64;
4195 l2cap_streaming_reassembly_sdu(sk, skb, control);
4200 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4214 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4218 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4222 BT_DBG("sk %p, len %d", sk, skb->len);
4224 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4227 if (l2cap_pi(sk)->imtu < skb->len)
4230 if (!sock_queue_rcv_skb(sk, skb))
4242 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4244 struct l2cap_hdr *lh = (void *) skb->data;
4248 skb_pull(skb, L2CAP_HDR_SIZE);
4249 cid = __le16_to_cpu(lh->cid);
4250 len = __le16_to_cpu(lh->len);
4252 if (len != skb->len) {
4257 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4260 case L2CAP_CID_SIGNALING:
4261 l2cap_sig_channel(conn, skb);
4264 case L2CAP_CID_CONN_LESS:
4265 psm = get_unaligned_le16(skb->data);
4267 l2cap_conless_channel(conn, psm, skb);
4271 l2cap_data_channel(conn, cid, skb);
4276 /* ---- L2CAP interface with lower layer (HCI) ---- */
4278 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4280 int exact = 0, lm1 = 0, lm2 = 0;
4281 register struct sock *sk;
4282 struct hlist_node *node;
4284 if (type != ACL_LINK)
4287 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4289 /* Find listening sockets and check their link_mode */
4290 read_lock(&l2cap_sk_list.lock);
4291 sk_for_each(sk, node, &l2cap_sk_list.head) {
4292 if (sk->sk_state != BT_LISTEN)
4295 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4296 lm1 |= HCI_LM_ACCEPT;
4297 if (l2cap_pi(sk)->role_switch)
4298 lm1 |= HCI_LM_MASTER;
4300 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4301 lm2 |= HCI_LM_ACCEPT;
4302 if (l2cap_pi(sk)->role_switch)
4303 lm2 |= HCI_LM_MASTER;
4306 read_unlock(&l2cap_sk_list.lock);
4308 return exact ? lm1 : lm2;
4311 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4313 struct l2cap_conn *conn;
4315 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4317 if (hcon->type != ACL_LINK)
4321 conn = l2cap_conn_add(hcon, status);
4323 l2cap_conn_ready(conn);
4325 l2cap_conn_del(hcon, bt_err(status));
4330 static int l2cap_disconn_ind(struct hci_conn *hcon)
4332 struct l2cap_conn *conn = hcon->l2cap_data;
4334 BT_DBG("hcon %p", hcon);
4336 if (hcon->type != ACL_LINK || !conn)
4339 return conn->disc_reason;
4342 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4344 BT_DBG("hcon %p reason %d", hcon, reason);
4346 if (hcon->type != ACL_LINK)
4349 l2cap_conn_del(hcon, bt_err(reason));
4354 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4356 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4359 if (encrypt == 0x00) {
4360 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4361 l2cap_sock_clear_timer(sk);
4362 l2cap_sock_set_timer(sk, HZ * 5);
4363 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4364 __l2cap_sock_close(sk, ECONNREFUSED);
4366 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4367 l2cap_sock_clear_timer(sk);
4371 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4373 struct l2cap_chan_list *l;
4374 struct l2cap_conn *conn = hcon->l2cap_data;
4380 l = &conn->chan_list;
4382 BT_DBG("conn %p", conn);
4384 read_lock(&l->lock);
4386 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4389 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4394 if (!status && (sk->sk_state == BT_CONNECTED ||
4395 sk->sk_state == BT_CONFIG)) {
4396 l2cap_check_encryption(sk, encrypt);
4401 if (sk->sk_state == BT_CONNECT) {
4403 struct l2cap_conn_req req;
4404 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4405 req.psm = l2cap_pi(sk)->psm;
4407 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4409 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4410 L2CAP_CONN_REQ, sizeof(req), &req);
4412 l2cap_sock_clear_timer(sk);
4413 l2cap_sock_set_timer(sk, HZ / 10);
4415 } else if (sk->sk_state == BT_CONNECT2) {
4416 struct l2cap_conn_rsp rsp;
4420 sk->sk_state = BT_CONFIG;
4421 result = L2CAP_CR_SUCCESS;
4423 sk->sk_state = BT_DISCONN;
4424 l2cap_sock_set_timer(sk, HZ / 10);
4425 result = L2CAP_CR_SEC_BLOCK;
4428 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4429 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4430 rsp.result = cpu_to_le16(result);
4431 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4432 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4433 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4439 read_unlock(&l->lock);
4444 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4446 struct l2cap_conn *conn = hcon->l2cap_data;
4448 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4451 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4453 if (flags & ACL_START) {
4454 struct l2cap_hdr *hdr;
4458 BT_ERR("Unexpected start frame (len %d)", skb->len);
4459 kfree_skb(conn->rx_skb);
4460 conn->rx_skb = NULL;
4462 l2cap_conn_unreliable(conn, ECOMM);
4466 BT_ERR("Frame is too short (len %d)", skb->len);
4467 l2cap_conn_unreliable(conn, ECOMM);
4471 hdr = (struct l2cap_hdr *) skb->data;
4472 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4474 if (len == skb->len) {
4475 /* Complete frame received */
4476 l2cap_recv_frame(conn, skb);
4480 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4482 if (skb->len > len) {
4483 BT_ERR("Frame is too long (len %d, expected len %d)",
4485 l2cap_conn_unreliable(conn, ECOMM);
4489 /* Allocate skb for the complete frame (with header) */
4490 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4494 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4496 conn->rx_len = len - skb->len;
4498 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4500 if (!conn->rx_len) {
4501 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4502 l2cap_conn_unreliable(conn, ECOMM);
4506 if (skb->len > conn->rx_len) {
4507 BT_ERR("Fragment is too long (len %d, expected %d)",
4508 skb->len, conn->rx_len);
4509 kfree_skb(conn->rx_skb);
4510 conn->rx_skb = NULL;
4512 l2cap_conn_unreliable(conn, ECOMM);
4516 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4518 conn->rx_len -= skb->len;
4520 if (!conn->rx_len) {
4521 /* Complete frame received */
4522 l2cap_recv_frame(conn, conn->rx_skb);
4523 conn->rx_skb = NULL;
4532 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4535 struct hlist_node *node;
4537 read_lock_bh(&l2cap_sk_list.lock);
4539 sk_for_each(sk, node, &l2cap_sk_list.head) {
4540 struct l2cap_pinfo *pi = l2cap_pi(sk);
4542 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4543 batostr(&bt_sk(sk)->src),
4544 batostr(&bt_sk(sk)->dst),
4545 sk->sk_state, __le16_to_cpu(pi->psm),
4547 pi->imtu, pi->omtu, pi->sec_level);
4550 read_unlock_bh(&l2cap_sk_list.lock);
4555 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4557 return single_open(file, l2cap_debugfs_show, inode->i_private);
4560 static const struct file_operations l2cap_debugfs_fops = {
4561 .open = l2cap_debugfs_open,
4563 .llseek = seq_lseek,
4564 .release = single_release,
4567 static struct dentry *l2cap_debugfs;
4569 static const struct proto_ops l2cap_sock_ops = {
4570 .family = PF_BLUETOOTH,
4571 .owner = THIS_MODULE,
4572 .release = l2cap_sock_release,
4573 .bind = l2cap_sock_bind,
4574 .connect = l2cap_sock_connect,
4575 .listen = l2cap_sock_listen,
4576 .accept = l2cap_sock_accept,
4577 .getname = l2cap_sock_getname,
4578 .sendmsg = l2cap_sock_sendmsg,
4579 .recvmsg = l2cap_sock_recvmsg,
4580 .poll = bt_sock_poll,
4581 .ioctl = bt_sock_ioctl,
4582 .mmap = sock_no_mmap,
4583 .socketpair = sock_no_socketpair,
4584 .shutdown = l2cap_sock_shutdown,
4585 .setsockopt = l2cap_sock_setsockopt,
4586 .getsockopt = l2cap_sock_getsockopt
4589 static const struct net_proto_family l2cap_sock_family_ops = {
4590 .family = PF_BLUETOOTH,
4591 .owner = THIS_MODULE,
4592 .create = l2cap_sock_create,
4595 static struct hci_proto l2cap_hci_proto = {
4597 .id = HCI_PROTO_L2CAP,
4598 .connect_ind = l2cap_connect_ind,
4599 .connect_cfm = l2cap_connect_cfm,
4600 .disconn_ind = l2cap_disconn_ind,
4601 .disconn_cfm = l2cap_disconn_cfm,
4602 .security_cfm = l2cap_security_cfm,
4603 .recv_acldata = l2cap_recv_acldata
4606 static int __init l2cap_init(void)
4610 err = proto_register(&l2cap_proto, 0);
4614 _busy_wq = create_singlethread_workqueue("l2cap");
4618 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4620 BT_ERR("L2CAP socket registration failed");
4624 err = hci_register_proto(&l2cap_hci_proto);
4626 BT_ERR("L2CAP protocol registration failed");
4627 bt_sock_unregister(BTPROTO_L2CAP);
4632 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4633 bt_debugfs, NULL, &l2cap_debugfs_fops);
4635 BT_ERR("Failed to create L2CAP debug file");
4638 BT_INFO("L2CAP ver %s", VERSION);
4639 BT_INFO("L2CAP socket layer initialized");
4644 proto_unregister(&l2cap_proto);
4648 static void __exit l2cap_exit(void)
4650 debugfs_remove(l2cap_debugfs);
4652 flush_workqueue(_busy_wq);
4653 destroy_workqueue(_busy_wq);
4655 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4656 BT_ERR("L2CAP socket unregistration failed");
4658 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4659 BT_ERR("L2CAP protocol unregistration failed");
4661 proto_unregister(&l2cap_proto);
4664 void l2cap_load(void)
4666 /* Dummy function to trigger automatic L2CAP module loading by
4667 * other modules that use L2CAP sockets but don't use any other
4668 * symbols from it. */
4671 EXPORT_SYMBOL(l2cap_load);
4673 module_init(l2cap_init);
4674 module_exit(l2cap_exit);
4676 module_param(enable_ertm, bool, 0644);
4677 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4679 module_param(max_transmit, uint, 0644);
4680 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4682 module_param(tx_window, uint, 0644);
4683 MODULE_PARM_DESC(tx_window, "Transmission window size value (default = 63)");
4685 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4686 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4687 MODULE_VERSION(VERSION);
4688 MODULE_LICENSE("GPL");
4689 MODULE_ALIAS("bt-proto-0");