2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops;
65 static struct workqueue_struct *_busy_wq;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void l2cap_busy_work(struct work_struct *work);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
86 BT_DBG("sock %p state %d", sk, sk->sk_state);
90 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
91 reason = ECONNREFUSED;
92 else if (sk->sk_state == BT_CONNECT &&
93 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
94 reason = ECONNREFUSED;
98 __l2cap_sock_close(sk, reason);
106 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
108 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
109 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
112 static void l2cap_sock_clear_timer(struct sock *sk)
114 BT_DBG("sock %p state %d", sk, sk->sk_state);
115 sk_stop_timer(sk, &sk->sk_timer);
118 /* ---- L2CAP channels ---- */
119 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->dcid == cid)
129 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
132 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
133 if (l2cap_pi(s)->scid == cid)
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
145 s = __l2cap_get_chan_by_scid(l, cid);
148 read_unlock(&l->lock);
152 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
155 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
156 if (l2cap_pi(s)->ident == ident)
162 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
166 s = __l2cap_get_chan_by_ident(l, ident);
169 read_unlock(&l->lock);
173 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
175 u16 cid = L2CAP_CID_DYN_START;
177 for (; cid < L2CAP_CID_DYN_END; cid++) {
178 if (!__l2cap_get_chan_by_scid(l, cid))
185 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
190 l2cap_pi(l->head)->prev_c = sk;
192 l2cap_pi(sk)->next_c = l->head;
193 l2cap_pi(sk)->prev_c = NULL;
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
199 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
201 write_lock_bh(&l->lock);
206 l2cap_pi(next)->prev_c = prev;
208 l2cap_pi(prev)->next_c = next;
209 write_unlock_bh(&l->lock);
214 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
216 struct l2cap_chan_list *l = &conn->chan_list;
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
219 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
221 conn->disc_reason = 0x13;
223 l2cap_pi(sk)->conn = conn;
225 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
228 } else if (sk->sk_type == SOCK_DGRAM) {
229 /* Connectionless socket */
230 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
231 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
236 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
240 __l2cap_chan_link(l, sk);
243 bt_accept_enqueue(parent, sk);
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock *sk, int err)
250 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
251 struct sock *parent = bt_sk(sk)->parent;
253 l2cap_sock_clear_timer(sk);
255 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn->chan_list, sk);
260 l2cap_pi(sk)->conn = NULL;
261 hci_conn_put(conn->hcon);
264 sk->sk_state = BT_CLOSED;
265 sock_set_flag(sk, SOCK_ZAPPED);
271 bt_accept_unlink(sk);
272 parent->sk_data_ready(parent, 0);
274 sk->sk_state_change(sk);
276 skb_queue_purge(TX_QUEUE(sk));
278 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
279 struct srej_list *l, *tmp;
281 del_timer(&l2cap_pi(sk)->retrans_timer);
282 del_timer(&l2cap_pi(sk)->monitor_timer);
283 del_timer(&l2cap_pi(sk)->ack_timer);
285 skb_queue_purge(SREJ_QUEUE(sk));
286 skb_queue_purge(BUSY_QUEUE(sk));
288 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
295 /* Service level security */
296 static inline int l2cap_check_security(struct sock *sk)
298 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
301 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
302 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
303 auth_type = HCI_AT_NO_BONDING_MITM;
305 auth_type = HCI_AT_NO_BONDING;
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
308 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
310 switch (l2cap_pi(sk)->sec_level) {
311 case BT_SECURITY_HIGH:
312 auth_type = HCI_AT_GENERAL_BONDING_MITM;
314 case BT_SECURITY_MEDIUM:
315 auth_type = HCI_AT_GENERAL_BONDING;
318 auth_type = HCI_AT_NO_BONDING;
323 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
327 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
331 /* Get next available identificator.
332 * 1 - 128 are used by kernel.
333 * 129 - 199 are reserved.
334 * 200 - 254 are used by utilities like l2ping, etc.
337 spin_lock_bh(&conn->lock);
339 if (++conn->tx_ident > 128)
344 spin_unlock_bh(&conn->lock);
349 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
351 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
353 BT_DBG("code 0x%2.2x", code);
358 hci_send_acl(conn->hcon, skb, 0);
361 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
364 struct l2cap_hdr *lh;
365 struct l2cap_conn *conn = pi->conn;
366 struct sock *sk = (struct sock *)pi;
367 int count, hlen = L2CAP_HDR_SIZE + 2;
369 if (sk->sk_state != BT_CONNECTED)
372 if (pi->fcs == L2CAP_FCS_CRC16)
375 BT_DBG("pi %p, control 0x%2.2x", pi, control);
377 count = min_t(unsigned int, conn->mtu, hlen);
378 control |= L2CAP_CTRL_FRAME_TYPE;
380 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
381 control |= L2CAP_CTRL_FINAL;
382 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
385 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
386 control |= L2CAP_CTRL_POLL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
390 skb = bt_skb_alloc(count, GFP_ATOMIC);
394 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
395 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
396 lh->cid = cpu_to_le16(pi->dcid);
397 put_unaligned_le16(control, skb_put(skb, 2));
399 if (pi->fcs == L2CAP_FCS_CRC16) {
400 u16 fcs = crc16(0, (u8 *)lh, count - 2);
401 put_unaligned_le16(fcs, skb_put(skb, 2));
404 hci_send_acl(pi->conn->hcon, skb, 0);
407 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
409 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
410 control |= L2CAP_SUPER_RCV_NOT_READY;
411 pi->conn_state |= L2CAP_CONN_RNR_SENT;
413 control |= L2CAP_SUPER_RCV_READY;
415 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
417 l2cap_send_sframe(pi, control);
420 static inline int __l2cap_no_conn_pending(struct sock *sk)
422 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
425 static void l2cap_do_start(struct sock *sk)
427 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
429 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
430 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
433 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
434 struct l2cap_conn_req req;
435 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
436 req.psm = l2cap_pi(sk)->psm;
438 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
439 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
441 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
442 L2CAP_CONN_REQ, sizeof(req), &req);
445 struct l2cap_info_req req;
446 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
448 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
449 conn->info_ident = l2cap_get_ident(conn);
451 mod_timer(&conn->info_timer, jiffies +
452 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
454 l2cap_send_cmd(conn, conn->info_ident,
455 L2CAP_INFO_REQ, sizeof(req), &req);
459 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
461 struct l2cap_disconn_req req;
466 skb_queue_purge(TX_QUEUE(sk));
468 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
469 del_timer(&l2cap_pi(sk)->retrans_timer);
470 del_timer(&l2cap_pi(sk)->monitor_timer);
471 del_timer(&l2cap_pi(sk)->ack_timer);
474 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
475 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
476 l2cap_send_cmd(conn, l2cap_get_ident(conn),
477 L2CAP_DISCONN_REQ, sizeof(req), &req);
479 sk->sk_state = BT_DISCONN;
482 /* ---- L2CAP connections ---- */
483 static void l2cap_conn_start(struct l2cap_conn *conn)
485 struct l2cap_chan_list *l = &conn->chan_list;
488 BT_DBG("conn %p", conn);
492 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
495 if (sk->sk_type != SOCK_SEQPACKET &&
496 sk->sk_type != SOCK_STREAM) {
501 if (sk->sk_state == BT_CONNECT) {
502 if (l2cap_check_security(sk) &&
503 __l2cap_no_conn_pending(sk)) {
504 struct l2cap_conn_req req;
505 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
506 req.psm = l2cap_pi(sk)->psm;
508 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
509 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
511 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
512 L2CAP_CONN_REQ, sizeof(req), &req);
514 } else if (sk->sk_state == BT_CONNECT2) {
515 struct l2cap_conn_rsp rsp;
516 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
517 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
519 if (l2cap_check_security(sk)) {
520 if (bt_sk(sk)->defer_setup) {
521 struct sock *parent = bt_sk(sk)->parent;
522 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
523 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
524 parent->sk_data_ready(parent, 0);
527 sk->sk_state = BT_CONFIG;
528 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
529 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
532 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
533 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
536 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
537 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
543 read_unlock(&l->lock);
546 static void l2cap_conn_ready(struct l2cap_conn *conn)
548 struct l2cap_chan_list *l = &conn->chan_list;
551 BT_DBG("conn %p", conn);
555 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
558 if (sk->sk_type != SOCK_SEQPACKET &&
559 sk->sk_type != SOCK_STREAM) {
560 l2cap_sock_clear_timer(sk);
561 sk->sk_state = BT_CONNECTED;
562 sk->sk_state_change(sk);
563 } else if (sk->sk_state == BT_CONNECT)
569 read_unlock(&l->lock);
572 /* Notify sockets that we cannot guaranty reliability anymore */
573 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
575 struct l2cap_chan_list *l = &conn->chan_list;
578 BT_DBG("conn %p", conn);
582 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
583 if (l2cap_pi(sk)->force_reliable)
587 read_unlock(&l->lock);
590 static void l2cap_info_timeout(unsigned long arg)
592 struct l2cap_conn *conn = (void *) arg;
594 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
595 conn->info_ident = 0;
597 l2cap_conn_start(conn);
600 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
602 struct l2cap_conn *conn = hcon->l2cap_data;
607 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
611 hcon->l2cap_data = conn;
614 BT_DBG("hcon %p conn %p", hcon, conn);
616 conn->mtu = hcon->hdev->acl_mtu;
617 conn->src = &hcon->hdev->bdaddr;
618 conn->dst = &hcon->dst;
622 spin_lock_init(&conn->lock);
623 rwlock_init(&conn->chan_list.lock);
625 setup_timer(&conn->info_timer, l2cap_info_timeout,
626 (unsigned long) conn);
628 conn->disc_reason = 0x13;
633 static void l2cap_conn_del(struct hci_conn *hcon, int err)
635 struct l2cap_conn *conn = hcon->l2cap_data;
641 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
643 kfree_skb(conn->rx_skb);
646 while ((sk = conn->chan_list.head)) {
648 l2cap_chan_del(sk, err);
653 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
654 del_timer_sync(&conn->info_timer);
656 hcon->l2cap_data = NULL;
660 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
662 struct l2cap_chan_list *l = &conn->chan_list;
663 write_lock_bh(&l->lock);
664 __l2cap_chan_add(conn, sk, parent);
665 write_unlock_bh(&l->lock);
668 /* ---- Socket interface ---- */
669 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
672 struct hlist_node *node;
673 sk_for_each(sk, node, &l2cap_sk_list.head)
674 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
681 /* Find socket with psm and source bdaddr.
682 * Returns closest match.
684 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
686 struct sock *sk = NULL, *sk1 = NULL;
687 struct hlist_node *node;
689 sk_for_each(sk, node, &l2cap_sk_list.head) {
690 if (state && sk->sk_state != state)
693 if (l2cap_pi(sk)->psm == psm) {
695 if (!bacmp(&bt_sk(sk)->src, src))
699 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
703 return node ? sk : sk1;
706 /* Find socket with given address (psm, src).
707 * Returns locked socket */
708 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
711 read_lock(&l2cap_sk_list.lock);
712 s = __l2cap_get_sock_by_psm(state, psm, src);
715 read_unlock(&l2cap_sk_list.lock);
719 static void l2cap_sock_destruct(struct sock *sk)
723 skb_queue_purge(&sk->sk_receive_queue);
724 skb_queue_purge(&sk->sk_write_queue);
727 static void l2cap_sock_cleanup_listen(struct sock *parent)
731 BT_DBG("parent %p", parent);
733 /* Close not yet accepted channels */
734 while ((sk = bt_accept_dequeue(parent, NULL)))
735 l2cap_sock_close(sk);
737 parent->sk_state = BT_CLOSED;
738 sock_set_flag(parent, SOCK_ZAPPED);
741 /* Kill socket (only if zapped and orphan)
742 * Must be called on unlocked socket.
744 static void l2cap_sock_kill(struct sock *sk)
746 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
749 BT_DBG("sk %p state %d", sk, sk->sk_state);
751 /* Kill poor orphan */
752 bt_sock_unlink(&l2cap_sk_list, sk);
753 sock_set_flag(sk, SOCK_DEAD);
757 static void __l2cap_sock_close(struct sock *sk, int reason)
759 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
761 switch (sk->sk_state) {
763 l2cap_sock_cleanup_listen(sk);
768 if (sk->sk_type == SOCK_SEQPACKET ||
769 sk->sk_type == SOCK_STREAM) {
770 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
772 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
773 l2cap_send_disconn_req(conn, sk);
775 l2cap_chan_del(sk, reason);
779 if (sk->sk_type == SOCK_SEQPACKET ||
780 sk->sk_type == SOCK_STREAM) {
781 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
782 struct l2cap_conn_rsp rsp;
785 if (bt_sk(sk)->defer_setup)
786 result = L2CAP_CR_SEC_BLOCK;
788 result = L2CAP_CR_BAD_PSM;
790 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
791 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
792 rsp.result = cpu_to_le16(result);
793 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
794 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
795 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
797 l2cap_chan_del(sk, reason);
802 l2cap_chan_del(sk, reason);
806 sock_set_flag(sk, SOCK_ZAPPED);
811 /* Must be called on unlocked socket. */
812 static void l2cap_sock_close(struct sock *sk)
814 l2cap_sock_clear_timer(sk);
816 __l2cap_sock_close(sk, ECONNRESET);
821 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
823 struct l2cap_pinfo *pi = l2cap_pi(sk);
828 sk->sk_type = parent->sk_type;
829 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
831 pi->imtu = l2cap_pi(parent)->imtu;
832 pi->omtu = l2cap_pi(parent)->omtu;
833 pi->mode = l2cap_pi(parent)->mode;
834 pi->fcs = l2cap_pi(parent)->fcs;
835 pi->max_tx = l2cap_pi(parent)->max_tx;
836 pi->tx_win = l2cap_pi(parent)->tx_win;
837 pi->sec_level = l2cap_pi(parent)->sec_level;
838 pi->role_switch = l2cap_pi(parent)->role_switch;
839 pi->force_reliable = l2cap_pi(parent)->force_reliable;
841 pi->imtu = L2CAP_DEFAULT_MTU;
843 if (enable_ertm && sk->sk_type == SOCK_STREAM)
844 pi->mode = L2CAP_MODE_ERTM;
846 pi->mode = L2CAP_MODE_BASIC;
847 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
848 pi->fcs = L2CAP_FCS_CRC16;
849 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
850 pi->sec_level = BT_SECURITY_LOW;
852 pi->force_reliable = 0;
855 /* Default config options */
857 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
858 skb_queue_head_init(TX_QUEUE(sk));
859 skb_queue_head_init(SREJ_QUEUE(sk));
860 skb_queue_head_init(BUSY_QUEUE(sk));
861 INIT_LIST_HEAD(SREJ_LIST(sk));
864 static struct proto l2cap_proto = {
866 .owner = THIS_MODULE,
867 .obj_size = sizeof(struct l2cap_pinfo)
870 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
874 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
878 sock_init_data(sock, sk);
879 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
881 sk->sk_destruct = l2cap_sock_destruct;
882 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
884 sock_reset_flag(sk, SOCK_ZAPPED);
886 sk->sk_protocol = proto;
887 sk->sk_state = BT_OPEN;
889 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
891 bt_sock_link(&l2cap_sk_list, sk);
895 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
900 BT_DBG("sock %p", sock);
902 sock->state = SS_UNCONNECTED;
904 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
905 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
906 return -ESOCKTNOSUPPORT;
908 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
911 sock->ops = &l2cap_sock_ops;
913 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
917 l2cap_sock_init(sk, NULL);
921 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
923 struct sock *sk = sock->sk;
924 struct sockaddr_l2 la;
929 if (!addr || addr->sa_family != AF_BLUETOOTH)
932 memset(&la, 0, sizeof(la));
933 len = min_t(unsigned int, sizeof(la), alen);
934 memcpy(&la, addr, len);
941 if (sk->sk_state != BT_OPEN) {
946 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
947 !capable(CAP_NET_BIND_SERVICE)) {
952 write_lock_bh(&l2cap_sk_list.lock);
954 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
957 /* Save source address */
958 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
959 l2cap_pi(sk)->psm = la.l2_psm;
960 l2cap_pi(sk)->sport = la.l2_psm;
961 sk->sk_state = BT_BOUND;
963 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
964 __le16_to_cpu(la.l2_psm) == 0x0003)
965 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
968 write_unlock_bh(&l2cap_sk_list.lock);
975 static int l2cap_do_connect(struct sock *sk)
977 bdaddr_t *src = &bt_sk(sk)->src;
978 bdaddr_t *dst = &bt_sk(sk)->dst;
979 struct l2cap_conn *conn;
980 struct hci_conn *hcon;
981 struct hci_dev *hdev;
985 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
988 hdev = hci_get_route(dst, src);
990 return -EHOSTUNREACH;
992 hci_dev_lock_bh(hdev);
996 if (sk->sk_type == SOCK_RAW) {
997 switch (l2cap_pi(sk)->sec_level) {
998 case BT_SECURITY_HIGH:
999 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1001 case BT_SECURITY_MEDIUM:
1002 auth_type = HCI_AT_DEDICATED_BONDING;
1005 auth_type = HCI_AT_NO_BONDING;
1008 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1009 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1010 auth_type = HCI_AT_NO_BONDING_MITM;
1012 auth_type = HCI_AT_NO_BONDING;
1014 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1015 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1017 switch (l2cap_pi(sk)->sec_level) {
1018 case BT_SECURITY_HIGH:
1019 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1021 case BT_SECURITY_MEDIUM:
1022 auth_type = HCI_AT_GENERAL_BONDING;
1025 auth_type = HCI_AT_NO_BONDING;
1030 hcon = hci_connect(hdev, ACL_LINK, dst,
1031 l2cap_pi(sk)->sec_level, auth_type);
1035 conn = l2cap_conn_add(hcon, 0);
1043 /* Update source addr of the socket */
1044 bacpy(src, conn->src);
1046 l2cap_chan_add(conn, sk, NULL);
1048 sk->sk_state = BT_CONNECT;
1049 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1051 if (hcon->state == BT_CONNECTED) {
1052 if (sk->sk_type != SOCK_SEQPACKET &&
1053 sk->sk_type != SOCK_STREAM) {
1054 l2cap_sock_clear_timer(sk);
1055 sk->sk_state = BT_CONNECTED;
1061 hci_dev_unlock_bh(hdev);
1066 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1068 struct sock *sk = sock->sk;
1069 struct sockaddr_l2 la;
1072 BT_DBG("sk %p", sk);
1074 if (!addr || alen < sizeof(addr->sa_family) ||
1075 addr->sa_family != AF_BLUETOOTH)
1078 memset(&la, 0, sizeof(la));
1079 len = min_t(unsigned int, sizeof(la), alen);
1080 memcpy(&la, addr, len);
1087 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1093 switch (l2cap_pi(sk)->mode) {
1094 case L2CAP_MODE_BASIC:
1096 case L2CAP_MODE_ERTM:
1097 case L2CAP_MODE_STREAMING:
1106 switch (sk->sk_state) {
1110 /* Already connecting */
1114 /* Already connected */
1127 /* Set destination address and psm */
1128 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1129 l2cap_pi(sk)->psm = la.l2_psm;
1131 err = l2cap_do_connect(sk);
1136 err = bt_sock_wait_state(sk, BT_CONNECTED,
1137 sock_sndtimeo(sk, flags & O_NONBLOCK));
1143 static int l2cap_sock_listen(struct socket *sock, int backlog)
1145 struct sock *sk = sock->sk;
1148 BT_DBG("sk %p backlog %d", sk, backlog);
1152 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1153 || sk->sk_state != BT_BOUND) {
1158 switch (l2cap_pi(sk)->mode) {
1159 case L2CAP_MODE_BASIC:
1161 case L2CAP_MODE_ERTM:
1162 case L2CAP_MODE_STREAMING:
1171 if (!l2cap_pi(sk)->psm) {
1172 bdaddr_t *src = &bt_sk(sk)->src;
1177 write_lock_bh(&l2cap_sk_list.lock);
1179 for (psm = 0x1001; psm < 0x1100; psm += 2)
1180 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1181 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1182 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1187 write_unlock_bh(&l2cap_sk_list.lock);
1193 sk->sk_max_ack_backlog = backlog;
1194 sk->sk_ack_backlog = 0;
1195 sk->sk_state = BT_LISTEN;
1202 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1204 DECLARE_WAITQUEUE(wait, current);
1205 struct sock *sk = sock->sk, *nsk;
1209 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1211 if (sk->sk_state != BT_LISTEN) {
1216 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1218 BT_DBG("sk %p timeo %ld", sk, timeo);
1220 /* Wait for an incoming connection. (wake-one). */
1221 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1222 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1223 set_current_state(TASK_INTERRUPTIBLE);
1230 timeo = schedule_timeout(timeo);
1231 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1233 if (sk->sk_state != BT_LISTEN) {
1238 if (signal_pending(current)) {
1239 err = sock_intr_errno(timeo);
1243 set_current_state(TASK_RUNNING);
1244 remove_wait_queue(sk_sleep(sk), &wait);
1249 newsock->state = SS_CONNECTED;
1251 BT_DBG("new socket %p", nsk);
1258 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1260 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1261 struct sock *sk = sock->sk;
1263 BT_DBG("sock %p, sk %p", sock, sk);
1265 addr->sa_family = AF_BLUETOOTH;
1266 *len = sizeof(struct sockaddr_l2);
1269 la->l2_psm = l2cap_pi(sk)->psm;
1270 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1271 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1273 la->l2_psm = l2cap_pi(sk)->sport;
1274 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1275 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1281 static int __l2cap_wait_ack(struct sock *sk)
1283 DECLARE_WAITQUEUE(wait, current);
1287 add_wait_queue(sk_sleep(sk), &wait);
1288 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1289 set_current_state(TASK_INTERRUPTIBLE);
1294 if (signal_pending(current)) {
1295 err = sock_intr_errno(timeo);
1300 timeo = schedule_timeout(timeo);
1303 err = sock_error(sk);
1307 set_current_state(TASK_RUNNING);
1308 remove_wait_queue(sk_sleep(sk), &wait);
1312 static void l2cap_monitor_timeout(unsigned long arg)
1314 struct sock *sk = (void *) arg;
1317 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1318 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1323 l2cap_pi(sk)->retry_count++;
1324 __mod_monitor_timer();
1326 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1330 static void l2cap_retrans_timeout(unsigned long arg)
1332 struct sock *sk = (void *) arg;
1335 l2cap_pi(sk)->retry_count = 1;
1336 __mod_monitor_timer();
1338 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1340 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1344 static void l2cap_drop_acked_frames(struct sock *sk)
1346 struct sk_buff *skb;
1348 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1349 l2cap_pi(sk)->unacked_frames) {
1350 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1353 skb = skb_dequeue(TX_QUEUE(sk));
1356 l2cap_pi(sk)->unacked_frames--;
1359 if (!l2cap_pi(sk)->unacked_frames)
1360 del_timer(&l2cap_pi(sk)->retrans_timer);
1363 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1365 struct l2cap_pinfo *pi = l2cap_pi(sk);
1367 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1369 hci_send_acl(pi->conn->hcon, skb, 0);
1372 static int l2cap_streaming_send(struct sock *sk)
1374 struct sk_buff *skb, *tx_skb;
1375 struct l2cap_pinfo *pi = l2cap_pi(sk);
1378 while ((skb = sk->sk_send_head)) {
1379 tx_skb = skb_clone(skb, GFP_ATOMIC);
1381 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1382 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1383 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1385 if (pi->fcs == L2CAP_FCS_CRC16) {
1386 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1387 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1390 l2cap_do_send(sk, tx_skb);
1392 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1394 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1395 sk->sk_send_head = NULL;
1397 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1399 skb = skb_dequeue(TX_QUEUE(sk));
1405 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1407 struct l2cap_pinfo *pi = l2cap_pi(sk);
1408 struct sk_buff *skb, *tx_skb;
1411 skb = skb_peek(TX_QUEUE(sk));
1416 if (bt_cb(skb)->tx_seq == tx_seq)
1419 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1422 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1424 if (pi->remote_max_tx &&
1425 bt_cb(skb)->retries == pi->remote_max_tx) {
1426 l2cap_send_disconn_req(pi->conn, sk);
1430 tx_skb = skb_clone(skb, GFP_ATOMIC);
1431 bt_cb(skb)->retries++;
1432 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1434 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1435 control |= L2CAP_CTRL_FINAL;
1436 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1439 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1440 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1442 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1444 if (pi->fcs == L2CAP_FCS_CRC16) {
1445 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1446 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1449 l2cap_do_send(sk, tx_skb);
1452 static int l2cap_ertm_send(struct sock *sk)
1454 struct sk_buff *skb, *tx_skb;
1455 struct l2cap_pinfo *pi = l2cap_pi(sk);
1459 if (sk->sk_state != BT_CONNECTED)
1462 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1464 if (pi->remote_max_tx &&
1465 bt_cb(skb)->retries == pi->remote_max_tx) {
1466 l2cap_send_disconn_req(pi->conn, sk);
1470 tx_skb = skb_clone(skb, GFP_ATOMIC);
1472 bt_cb(skb)->retries++;
1474 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1475 control &= L2CAP_CTRL_SAR;
1477 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1478 control |= L2CAP_CTRL_FINAL;
1479 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1481 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1482 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1483 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1486 if (pi->fcs == L2CAP_FCS_CRC16) {
1487 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1488 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1491 l2cap_do_send(sk, tx_skb);
1493 __mod_retrans_timer();
1495 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1496 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1498 pi->unacked_frames++;
1501 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1502 sk->sk_send_head = NULL;
1504 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1512 static int l2cap_retransmit_frames(struct sock *sk)
1514 struct l2cap_pinfo *pi = l2cap_pi(sk);
1517 spin_lock_bh(&pi->send_lock);
1519 if (!skb_queue_empty(TX_QUEUE(sk)))
1520 sk->sk_send_head = TX_QUEUE(sk)->next;
1522 pi->next_tx_seq = pi->expected_ack_seq;
1523 ret = l2cap_ertm_send(sk);
1525 spin_unlock_bh(&pi->send_lock);
1530 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1532 struct sock *sk = (struct sock *)pi;
1536 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1538 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1539 control |= L2CAP_SUPER_RCV_NOT_READY;
1540 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1541 l2cap_send_sframe(pi, control);
1545 spin_lock_bh(&pi->send_lock);
1546 nframes = l2cap_ertm_send(sk);
1547 spin_unlock_bh(&pi->send_lock);
1552 control |= L2CAP_SUPER_RCV_READY;
1553 l2cap_send_sframe(pi, control);
1556 static void l2cap_send_srejtail(struct sock *sk)
1558 struct srej_list *tail;
1561 control = L2CAP_SUPER_SELECT_REJECT;
1562 control |= L2CAP_CTRL_FINAL;
1564 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1565 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1567 l2cap_send_sframe(l2cap_pi(sk), control);
1570 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1572 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1573 struct sk_buff **frag;
1576 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1582 /* Continuation fragments (no L2CAP header) */
1583 frag = &skb_shinfo(skb)->frag_list;
1585 count = min_t(unsigned int, conn->mtu, len);
1587 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1590 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1596 frag = &(*frag)->next;
1602 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1604 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1605 struct sk_buff *skb;
1606 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1607 struct l2cap_hdr *lh;
1609 BT_DBG("sk %p len %d", sk, (int)len);
1611 count = min_t(unsigned int, (conn->mtu - hlen), len);
1612 skb = bt_skb_send_alloc(sk, count + hlen,
1613 msg->msg_flags & MSG_DONTWAIT, &err);
1615 return ERR_PTR(-ENOMEM);
1617 /* Create L2CAP header */
1618 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1619 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1620 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1621 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1623 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1624 if (unlikely(err < 0)) {
1626 return ERR_PTR(err);
1631 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1633 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1634 struct sk_buff *skb;
1635 int err, count, hlen = L2CAP_HDR_SIZE;
1636 struct l2cap_hdr *lh;
1638 BT_DBG("sk %p len %d", sk, (int)len);
1640 count = min_t(unsigned int, (conn->mtu - hlen), len);
1641 skb = bt_skb_send_alloc(sk, count + hlen,
1642 msg->msg_flags & MSG_DONTWAIT, &err);
1644 return ERR_PTR(-ENOMEM);
1646 /* Create L2CAP header */
1647 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1648 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1649 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1651 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1652 if (unlikely(err < 0)) {
1654 return ERR_PTR(err);
1659 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1661 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1662 struct sk_buff *skb;
1663 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1664 struct l2cap_hdr *lh;
1666 BT_DBG("sk %p len %d", sk, (int)len);
1669 return ERR_PTR(-ENOTCONN);
1674 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1677 count = min_t(unsigned int, (conn->mtu - hlen), len);
1678 skb = bt_skb_send_alloc(sk, count + hlen,
1679 msg->msg_flags & MSG_DONTWAIT, &err);
1681 return ERR_PTR(-ENOMEM);
1683 /* Create L2CAP header */
1684 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1685 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1686 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1687 put_unaligned_le16(control, skb_put(skb, 2));
1689 put_unaligned_le16(sdulen, skb_put(skb, 2));
1691 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1692 if (unlikely(err < 0)) {
1694 return ERR_PTR(err);
1697 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1698 put_unaligned_le16(0, skb_put(skb, 2));
1700 bt_cb(skb)->retries = 0;
1704 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1706 struct l2cap_pinfo *pi = l2cap_pi(sk);
1707 struct sk_buff *skb;
1708 struct sk_buff_head sar_queue;
1712 skb_queue_head_init(&sar_queue);
1713 control = L2CAP_SDU_START;
1714 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1716 return PTR_ERR(skb);
1718 __skb_queue_tail(&sar_queue, skb);
1719 len -= pi->remote_mps;
1720 size += pi->remote_mps;
1725 if (len > pi->remote_mps) {
1726 control = L2CAP_SDU_CONTINUE;
1727 buflen = pi->remote_mps;
1729 control = L2CAP_SDU_END;
1733 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1735 skb_queue_purge(&sar_queue);
1736 return PTR_ERR(skb);
1739 __skb_queue_tail(&sar_queue, skb);
1743 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1744 spin_lock_bh(&pi->send_lock);
1745 if (sk->sk_send_head == NULL)
1746 sk->sk_send_head = sar_queue.next;
1747 spin_unlock_bh(&pi->send_lock);
1752 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1754 struct sock *sk = sock->sk;
1755 struct l2cap_pinfo *pi = l2cap_pi(sk);
1756 struct sk_buff *skb;
1760 BT_DBG("sock %p, sk %p", sock, sk);
1762 err = sock_error(sk);
1766 if (msg->msg_flags & MSG_OOB)
1771 if (sk->sk_state != BT_CONNECTED) {
1776 /* Connectionless channel */
1777 if (sk->sk_type == SOCK_DGRAM) {
1778 skb = l2cap_create_connless_pdu(sk, msg, len);
1782 l2cap_do_send(sk, skb);
1789 case L2CAP_MODE_BASIC:
1790 /* Check outgoing MTU */
1791 if (len > pi->omtu) {
1796 /* Create a basic PDU */
1797 skb = l2cap_create_basic_pdu(sk, msg, len);
1803 l2cap_do_send(sk, skb);
1807 case L2CAP_MODE_ERTM:
1808 case L2CAP_MODE_STREAMING:
1809 /* Entire SDU fits into one PDU */
1810 if (len <= pi->remote_mps) {
1811 control = L2CAP_SDU_UNSEGMENTED;
1812 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1817 __skb_queue_tail(TX_QUEUE(sk), skb);
1819 if (pi->mode == L2CAP_MODE_ERTM)
1820 spin_lock_bh(&pi->send_lock);
1822 if (sk->sk_send_head == NULL)
1823 sk->sk_send_head = skb;
1825 if (pi->mode == L2CAP_MODE_ERTM)
1826 spin_unlock_bh(&pi->send_lock);
1828 /* Segment SDU into multiples PDUs */
1829 err = l2cap_sar_segment_sdu(sk, msg, len);
1834 if (pi->mode == L2CAP_MODE_STREAMING) {
1835 err = l2cap_streaming_send(sk);
1837 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1838 pi->conn_state && L2CAP_CONN_WAIT_F) {
1842 spin_lock_bh(&pi->send_lock);
1843 err = l2cap_ertm_send(sk);
1844 spin_unlock_bh(&pi->send_lock);
1852 BT_DBG("bad state %1.1x", pi->mode);
1861 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1863 struct sock *sk = sock->sk;
1867 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1868 struct l2cap_conn_rsp rsp;
1870 sk->sk_state = BT_CONFIG;
1872 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1873 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1874 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1875 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1876 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1877 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1885 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1888 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1890 struct sock *sk = sock->sk;
1891 struct l2cap_options opts;
1895 BT_DBG("sk %p", sk);
1901 opts.imtu = l2cap_pi(sk)->imtu;
1902 opts.omtu = l2cap_pi(sk)->omtu;
1903 opts.flush_to = l2cap_pi(sk)->flush_to;
1904 opts.mode = l2cap_pi(sk)->mode;
1905 opts.fcs = l2cap_pi(sk)->fcs;
1906 opts.max_tx = l2cap_pi(sk)->max_tx;
1907 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1909 len = min_t(unsigned int, sizeof(opts), optlen);
1910 if (copy_from_user((char *) &opts, optval, len)) {
1915 l2cap_pi(sk)->mode = opts.mode;
1916 switch (l2cap_pi(sk)->mode) {
1917 case L2CAP_MODE_BASIC:
1919 case L2CAP_MODE_ERTM:
1920 case L2CAP_MODE_STREAMING:
1929 l2cap_pi(sk)->imtu = opts.imtu;
1930 l2cap_pi(sk)->omtu = opts.omtu;
1931 l2cap_pi(sk)->fcs = opts.fcs;
1932 l2cap_pi(sk)->max_tx = opts.max_tx;
1933 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1937 if (get_user(opt, (u32 __user *) optval)) {
1942 if (opt & L2CAP_LM_AUTH)
1943 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1944 if (opt & L2CAP_LM_ENCRYPT)
1945 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1946 if (opt & L2CAP_LM_SECURE)
1947 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1949 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1950 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1962 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1964 struct sock *sk = sock->sk;
1965 struct bt_security sec;
1969 BT_DBG("sk %p", sk);
1971 if (level == SOL_L2CAP)
1972 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1974 if (level != SOL_BLUETOOTH)
1975 return -ENOPROTOOPT;
1981 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1982 && sk->sk_type != SOCK_RAW) {
1987 sec.level = BT_SECURITY_LOW;
1989 len = min_t(unsigned int, sizeof(sec), optlen);
1990 if (copy_from_user((char *) &sec, optval, len)) {
1995 if (sec.level < BT_SECURITY_LOW ||
1996 sec.level > BT_SECURITY_HIGH) {
2001 l2cap_pi(sk)->sec_level = sec.level;
2004 case BT_DEFER_SETUP:
2005 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2010 if (get_user(opt, (u32 __user *) optval)) {
2015 bt_sk(sk)->defer_setup = opt;
2027 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2029 struct sock *sk = sock->sk;
2030 struct l2cap_options opts;
2031 struct l2cap_conninfo cinfo;
2035 BT_DBG("sk %p", sk);
2037 if (get_user(len, optlen))
2044 opts.imtu = l2cap_pi(sk)->imtu;
2045 opts.omtu = l2cap_pi(sk)->omtu;
2046 opts.flush_to = l2cap_pi(sk)->flush_to;
2047 opts.mode = l2cap_pi(sk)->mode;
2048 opts.fcs = l2cap_pi(sk)->fcs;
2049 opts.max_tx = l2cap_pi(sk)->max_tx;
2050 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2052 len = min_t(unsigned int, len, sizeof(opts));
2053 if (copy_to_user(optval, (char *) &opts, len))
2059 switch (l2cap_pi(sk)->sec_level) {
2060 case BT_SECURITY_LOW:
2061 opt = L2CAP_LM_AUTH;
2063 case BT_SECURITY_MEDIUM:
2064 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2066 case BT_SECURITY_HIGH:
2067 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2075 if (l2cap_pi(sk)->role_switch)
2076 opt |= L2CAP_LM_MASTER;
2078 if (l2cap_pi(sk)->force_reliable)
2079 opt |= L2CAP_LM_RELIABLE;
2081 if (put_user(opt, (u32 __user *) optval))
2085 case L2CAP_CONNINFO:
2086 if (sk->sk_state != BT_CONNECTED &&
2087 !(sk->sk_state == BT_CONNECT2 &&
2088 bt_sk(sk)->defer_setup)) {
2093 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2094 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2096 len = min_t(unsigned int, len, sizeof(cinfo));
2097 if (copy_to_user(optval, (char *) &cinfo, len))
2111 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2113 struct sock *sk = sock->sk;
2114 struct bt_security sec;
2117 BT_DBG("sk %p", sk);
2119 if (level == SOL_L2CAP)
2120 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2122 if (level != SOL_BLUETOOTH)
2123 return -ENOPROTOOPT;
2125 if (get_user(len, optlen))
2132 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2133 && sk->sk_type != SOCK_RAW) {
2138 sec.level = l2cap_pi(sk)->sec_level;
2140 len = min_t(unsigned int, len, sizeof(sec));
2141 if (copy_to_user(optval, (char *) &sec, len))
2146 case BT_DEFER_SETUP:
2147 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2152 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2166 static int l2cap_sock_shutdown(struct socket *sock, int how)
2168 struct sock *sk = sock->sk;
2171 BT_DBG("sock %p, sk %p", sock, sk);
2177 if (!sk->sk_shutdown) {
2178 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2179 err = __l2cap_wait_ack(sk);
2181 sk->sk_shutdown = SHUTDOWN_MASK;
2182 l2cap_sock_clear_timer(sk);
2183 __l2cap_sock_close(sk, 0);
2185 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2186 err = bt_sock_wait_state(sk, BT_CLOSED,
2193 static int l2cap_sock_release(struct socket *sock)
2195 struct sock *sk = sock->sk;
2198 BT_DBG("sock %p, sk %p", sock, sk);
2203 err = l2cap_sock_shutdown(sock, 2);
2206 l2cap_sock_kill(sk);
2210 static void l2cap_chan_ready(struct sock *sk)
2212 struct sock *parent = bt_sk(sk)->parent;
2214 BT_DBG("sk %p, parent %p", sk, parent);
2216 l2cap_pi(sk)->conf_state = 0;
2217 l2cap_sock_clear_timer(sk);
2220 /* Outgoing channel.
2221 * Wake up socket sleeping on connect.
2223 sk->sk_state = BT_CONNECTED;
2224 sk->sk_state_change(sk);
2226 /* Incoming channel.
2227 * Wake up socket sleeping on accept.
2229 parent->sk_data_ready(parent, 0);
2233 /* Copy frame to all raw sockets on that connection */
2234 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2236 struct l2cap_chan_list *l = &conn->chan_list;
2237 struct sk_buff *nskb;
2240 BT_DBG("conn %p", conn);
2242 read_lock(&l->lock);
2243 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2244 if (sk->sk_type != SOCK_RAW)
2247 /* Don't send frame to the socket it came from */
2250 nskb = skb_clone(skb, GFP_ATOMIC);
2254 if (sock_queue_rcv_skb(sk, nskb))
2257 read_unlock(&l->lock);
2260 /* ---- L2CAP signalling commands ---- */
2261 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2262 u8 code, u8 ident, u16 dlen, void *data)
2264 struct sk_buff *skb, **frag;
2265 struct l2cap_cmd_hdr *cmd;
2266 struct l2cap_hdr *lh;
2269 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2270 conn, code, ident, dlen);
2272 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2273 count = min_t(unsigned int, conn->mtu, len);
2275 skb = bt_skb_alloc(count, GFP_ATOMIC);
2279 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2280 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2281 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2283 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2286 cmd->len = cpu_to_le16(dlen);
2289 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2290 memcpy(skb_put(skb, count), data, count);
2296 /* Continuation fragments (no L2CAP header) */
2297 frag = &skb_shinfo(skb)->frag_list;
2299 count = min_t(unsigned int, conn->mtu, len);
2301 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2305 memcpy(skb_put(*frag, count), data, count);
2310 frag = &(*frag)->next;
2320 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2322 struct l2cap_conf_opt *opt = *ptr;
2325 len = L2CAP_CONF_OPT_SIZE + opt->len;
2333 *val = *((u8 *) opt->val);
2337 *val = __le16_to_cpu(*((__le16 *) opt->val));
2341 *val = __le32_to_cpu(*((__le32 *) opt->val));
2345 *val = (unsigned long) opt->val;
2349 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2353 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2355 struct l2cap_conf_opt *opt = *ptr;
2357 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2364 *((u8 *) opt->val) = val;
2368 *((__le16 *) opt->val) = cpu_to_le16(val);
2372 *((__le32 *) opt->val) = cpu_to_le32(val);
2376 memcpy(opt->val, (void *) val, len);
2380 *ptr += L2CAP_CONF_OPT_SIZE + len;
2383 static void l2cap_ack_timeout(unsigned long arg)
2385 struct sock *sk = (void *) arg;
2388 l2cap_send_ack(l2cap_pi(sk));
2392 static inline void l2cap_ertm_init(struct sock *sk)
2394 l2cap_pi(sk)->expected_ack_seq = 0;
2395 l2cap_pi(sk)->unacked_frames = 0;
2396 l2cap_pi(sk)->buffer_seq = 0;
2397 l2cap_pi(sk)->num_acked = 0;
2398 l2cap_pi(sk)->frames_sent = 0;
2400 setup_timer(&l2cap_pi(sk)->retrans_timer,
2401 l2cap_retrans_timeout, (unsigned long) sk);
2402 setup_timer(&l2cap_pi(sk)->monitor_timer,
2403 l2cap_monitor_timeout, (unsigned long) sk);
2404 setup_timer(&l2cap_pi(sk)->ack_timer,
2405 l2cap_ack_timeout, (unsigned long) sk);
2407 __skb_queue_head_init(SREJ_QUEUE(sk));
2408 __skb_queue_head_init(BUSY_QUEUE(sk));
2409 spin_lock_init(&l2cap_pi(sk)->send_lock);
2411 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2414 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2416 u32 local_feat_mask = l2cap_feat_mask;
2418 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2421 case L2CAP_MODE_ERTM:
2422 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2423 case L2CAP_MODE_STREAMING:
2424 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2430 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2433 case L2CAP_MODE_STREAMING:
2434 case L2CAP_MODE_ERTM:
2435 if (l2cap_mode_supported(mode, remote_feat_mask))
2439 return L2CAP_MODE_BASIC;
2443 static int l2cap_build_conf_req(struct sock *sk, void *data)
2445 struct l2cap_pinfo *pi = l2cap_pi(sk);
2446 struct l2cap_conf_req *req = data;
2447 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2448 void *ptr = req->data;
2450 BT_DBG("sk %p", sk);
2452 if (pi->num_conf_req || pi->num_conf_rsp)
2456 case L2CAP_MODE_STREAMING:
2457 case L2CAP_MODE_ERTM:
2458 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2459 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2460 l2cap_send_disconn_req(pi->conn, sk);
2463 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2469 case L2CAP_MODE_BASIC:
2470 if (pi->imtu != L2CAP_DEFAULT_MTU)
2471 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2474 case L2CAP_MODE_ERTM:
2475 rfc.mode = L2CAP_MODE_ERTM;
2476 rfc.txwin_size = pi->tx_win;
2477 rfc.max_transmit = pi->max_tx;
2478 rfc.retrans_timeout = 0;
2479 rfc.monitor_timeout = 0;
2480 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2481 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2482 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2484 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2485 sizeof(rfc), (unsigned long) &rfc);
2487 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2490 if (pi->fcs == L2CAP_FCS_NONE ||
2491 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2492 pi->fcs = L2CAP_FCS_NONE;
2493 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2497 case L2CAP_MODE_STREAMING:
2498 rfc.mode = L2CAP_MODE_STREAMING;
2500 rfc.max_transmit = 0;
2501 rfc.retrans_timeout = 0;
2502 rfc.monitor_timeout = 0;
2503 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2504 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2505 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2507 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2508 sizeof(rfc), (unsigned long) &rfc);
2510 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2513 if (pi->fcs == L2CAP_FCS_NONE ||
2514 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2515 pi->fcs = L2CAP_FCS_NONE;
2516 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2521 /* FIXME: Need actual value of the flush timeout */
2522 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2523 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2525 req->dcid = cpu_to_le16(pi->dcid);
2526 req->flags = cpu_to_le16(0);
2531 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2533 struct l2cap_pinfo *pi = l2cap_pi(sk);
2534 struct l2cap_conf_rsp *rsp = data;
2535 void *ptr = rsp->data;
2536 void *req = pi->conf_req;
2537 int len = pi->conf_len;
2538 int type, hint, olen;
2540 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2541 u16 mtu = L2CAP_DEFAULT_MTU;
2542 u16 result = L2CAP_CONF_SUCCESS;
2544 BT_DBG("sk %p", sk);
2546 while (len >= L2CAP_CONF_OPT_SIZE) {
2547 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2549 hint = type & L2CAP_CONF_HINT;
2550 type &= L2CAP_CONF_MASK;
2553 case L2CAP_CONF_MTU:
2557 case L2CAP_CONF_FLUSH_TO:
2561 case L2CAP_CONF_QOS:
2564 case L2CAP_CONF_RFC:
2565 if (olen == sizeof(rfc))
2566 memcpy(&rfc, (void *) val, olen);
2569 case L2CAP_CONF_FCS:
2570 if (val == L2CAP_FCS_NONE)
2571 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2579 result = L2CAP_CONF_UNKNOWN;
2580 *((u8 *) ptr++) = type;
2585 if (pi->num_conf_rsp || pi->num_conf_req)
2589 case L2CAP_MODE_STREAMING:
2590 case L2CAP_MODE_ERTM:
2591 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2592 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2593 return -ECONNREFUSED;
2596 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2601 if (pi->mode != rfc.mode) {
2602 result = L2CAP_CONF_UNACCEPT;
2603 rfc.mode = pi->mode;
2605 if (pi->num_conf_rsp == 1)
2606 return -ECONNREFUSED;
2608 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2609 sizeof(rfc), (unsigned long) &rfc);
2613 if (result == L2CAP_CONF_SUCCESS) {
2614 /* Configure output options and let the other side know
2615 * which ones we don't like. */
2617 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2618 result = L2CAP_CONF_UNACCEPT;
2621 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2623 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2626 case L2CAP_MODE_BASIC:
2627 pi->fcs = L2CAP_FCS_NONE;
2628 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2631 case L2CAP_MODE_ERTM:
2632 pi->remote_tx_win = rfc.txwin_size;
2633 pi->remote_max_tx = rfc.max_transmit;
2634 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2635 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2637 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2639 rfc.retrans_timeout =
2640 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2641 rfc.monitor_timeout =
2642 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2644 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2646 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2647 sizeof(rfc), (unsigned long) &rfc);
2651 case L2CAP_MODE_STREAMING:
2652 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2653 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2655 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2657 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2659 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2660 sizeof(rfc), (unsigned long) &rfc);
2665 result = L2CAP_CONF_UNACCEPT;
2667 memset(&rfc, 0, sizeof(rfc));
2668 rfc.mode = pi->mode;
2671 if (result == L2CAP_CONF_SUCCESS)
2672 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2674 rsp->scid = cpu_to_le16(pi->dcid);
2675 rsp->result = cpu_to_le16(result);
2676 rsp->flags = cpu_to_le16(0x0000);
2681 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2683 struct l2cap_pinfo *pi = l2cap_pi(sk);
2684 struct l2cap_conf_req *req = data;
2685 void *ptr = req->data;
2688 struct l2cap_conf_rfc rfc;
2690 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2692 while (len >= L2CAP_CONF_OPT_SIZE) {
2693 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2696 case L2CAP_CONF_MTU:
2697 if (val < L2CAP_DEFAULT_MIN_MTU) {
2698 *result = L2CAP_CONF_UNACCEPT;
2699 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2702 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2705 case L2CAP_CONF_FLUSH_TO:
2707 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2711 case L2CAP_CONF_RFC:
2712 if (olen == sizeof(rfc))
2713 memcpy(&rfc, (void *)val, olen);
2715 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2716 rfc.mode != pi->mode)
2717 return -ECONNREFUSED;
2719 pi->mode = rfc.mode;
2722 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2723 sizeof(rfc), (unsigned long) &rfc);
2728 if (*result == L2CAP_CONF_SUCCESS) {
2730 case L2CAP_MODE_ERTM:
2731 pi->remote_tx_win = rfc.txwin_size;
2732 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2733 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2734 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2736 case L2CAP_MODE_STREAMING:
2737 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2741 req->dcid = cpu_to_le16(pi->dcid);
2742 req->flags = cpu_to_le16(0x0000);
2747 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2749 struct l2cap_conf_rsp *rsp = data;
2750 void *ptr = rsp->data;
2752 BT_DBG("sk %p", sk);
2754 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2755 rsp->result = cpu_to_le16(result);
2756 rsp->flags = cpu_to_le16(flags);
2761 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2763 struct l2cap_pinfo *pi = l2cap_pi(sk);
2766 struct l2cap_conf_rfc rfc;
2768 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2770 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2773 while (len >= L2CAP_CONF_OPT_SIZE) {
2774 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2777 case L2CAP_CONF_RFC:
2778 if (olen == sizeof(rfc))
2779 memcpy(&rfc, (void *)val, olen);
2786 case L2CAP_MODE_ERTM:
2787 pi->remote_tx_win = rfc.txwin_size;
2788 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2789 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2790 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2792 case L2CAP_MODE_STREAMING:
2793 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2797 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2799 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2801 if (rej->reason != 0x0000)
2804 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2805 cmd->ident == conn->info_ident) {
2806 del_timer(&conn->info_timer);
2808 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2809 conn->info_ident = 0;
2811 l2cap_conn_start(conn);
2817 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2819 struct l2cap_chan_list *list = &conn->chan_list;
2820 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2821 struct l2cap_conn_rsp rsp;
2822 struct sock *sk, *parent;
2823 int result, status = L2CAP_CS_NO_INFO;
2825 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2826 __le16 psm = req->psm;
2828 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2830 /* Check if we have socket listening on psm */
2831 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2833 result = L2CAP_CR_BAD_PSM;
2837 /* Check if the ACL is secure enough (if not SDP) */
2838 if (psm != cpu_to_le16(0x0001) &&
2839 !hci_conn_check_link_mode(conn->hcon)) {
2840 conn->disc_reason = 0x05;
2841 result = L2CAP_CR_SEC_BLOCK;
2845 result = L2CAP_CR_NO_MEM;
2847 /* Check for backlog size */
2848 if (sk_acceptq_is_full(parent)) {
2849 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2853 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2857 write_lock_bh(&list->lock);
2859 /* Check if we already have channel with that dcid */
2860 if (__l2cap_get_chan_by_dcid(list, scid)) {
2861 write_unlock_bh(&list->lock);
2862 sock_set_flag(sk, SOCK_ZAPPED);
2863 l2cap_sock_kill(sk);
2867 hci_conn_hold(conn->hcon);
2869 l2cap_sock_init(sk, parent);
2870 bacpy(&bt_sk(sk)->src, conn->src);
2871 bacpy(&bt_sk(sk)->dst, conn->dst);
2872 l2cap_pi(sk)->psm = psm;
2873 l2cap_pi(sk)->dcid = scid;
2875 __l2cap_chan_add(conn, sk, parent);
2876 dcid = l2cap_pi(sk)->scid;
2878 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2880 l2cap_pi(sk)->ident = cmd->ident;
2882 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2883 if (l2cap_check_security(sk)) {
2884 if (bt_sk(sk)->defer_setup) {
2885 sk->sk_state = BT_CONNECT2;
2886 result = L2CAP_CR_PEND;
2887 status = L2CAP_CS_AUTHOR_PEND;
2888 parent->sk_data_ready(parent, 0);
2890 sk->sk_state = BT_CONFIG;
2891 result = L2CAP_CR_SUCCESS;
2892 status = L2CAP_CS_NO_INFO;
2895 sk->sk_state = BT_CONNECT2;
2896 result = L2CAP_CR_PEND;
2897 status = L2CAP_CS_AUTHEN_PEND;
2900 sk->sk_state = BT_CONNECT2;
2901 result = L2CAP_CR_PEND;
2902 status = L2CAP_CS_NO_INFO;
2905 write_unlock_bh(&list->lock);
2908 bh_unlock_sock(parent);
2911 rsp.scid = cpu_to_le16(scid);
2912 rsp.dcid = cpu_to_le16(dcid);
2913 rsp.result = cpu_to_le16(result);
2914 rsp.status = cpu_to_le16(status);
2915 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2917 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2918 struct l2cap_info_req info;
2919 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2921 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2922 conn->info_ident = l2cap_get_ident(conn);
2924 mod_timer(&conn->info_timer, jiffies +
2925 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2927 l2cap_send_cmd(conn, conn->info_ident,
2928 L2CAP_INFO_REQ, sizeof(info), &info);
2934 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2936 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2937 u16 scid, dcid, result, status;
2941 scid = __le16_to_cpu(rsp->scid);
2942 dcid = __le16_to_cpu(rsp->dcid);
2943 result = __le16_to_cpu(rsp->result);
2944 status = __le16_to_cpu(rsp->status);
2946 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2949 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2953 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2959 case L2CAP_CR_SUCCESS:
2960 sk->sk_state = BT_CONFIG;
2961 l2cap_pi(sk)->ident = 0;
2962 l2cap_pi(sk)->dcid = dcid;
2963 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2964 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2966 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2967 l2cap_build_conf_req(sk, req), req);
2968 l2cap_pi(sk)->num_conf_req++;
2972 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2976 l2cap_chan_del(sk, ECONNREFUSED);
2984 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2986 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2992 dcid = __le16_to_cpu(req->dcid);
2993 flags = __le16_to_cpu(req->flags);
2995 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2997 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3001 if (sk->sk_state == BT_DISCONN)
3004 /* Reject if config buffer is too small. */
3005 len = cmd_len - sizeof(*req);
3006 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3007 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3008 l2cap_build_conf_rsp(sk, rsp,
3009 L2CAP_CONF_REJECT, flags), rsp);
3014 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3015 l2cap_pi(sk)->conf_len += len;
3017 if (flags & 0x0001) {
3018 /* Incomplete config. Send empty response. */
3019 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3020 l2cap_build_conf_rsp(sk, rsp,
3021 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3025 /* Complete config. */
3026 len = l2cap_parse_conf_req(sk, rsp);
3028 l2cap_send_disconn_req(conn, sk);
3032 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3033 l2cap_pi(sk)->num_conf_rsp++;
3035 /* Reset config buffer. */
3036 l2cap_pi(sk)->conf_len = 0;
3038 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3041 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3042 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3043 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3044 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3046 sk->sk_state = BT_CONNECTED;
3048 l2cap_pi(sk)->next_tx_seq = 0;
3049 l2cap_pi(sk)->expected_tx_seq = 0;
3050 __skb_queue_head_init(TX_QUEUE(sk));
3051 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3052 l2cap_ertm_init(sk);
3054 l2cap_chan_ready(sk);
3058 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3060 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3061 l2cap_build_conf_req(sk, buf), buf);
3062 l2cap_pi(sk)->num_conf_req++;
3070 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3072 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3073 u16 scid, flags, result;
3075 int len = cmd->len - sizeof(*rsp);
3077 scid = __le16_to_cpu(rsp->scid);
3078 flags = __le16_to_cpu(rsp->flags);
3079 result = __le16_to_cpu(rsp->result);
3081 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3082 scid, flags, result);
3084 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3089 case L2CAP_CONF_SUCCESS:
3090 l2cap_conf_rfc_get(sk, rsp->data, len);
3093 case L2CAP_CONF_UNACCEPT:
3094 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3097 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3098 l2cap_send_disconn_req(conn, sk);
3102 /* throw out any old stored conf requests */
3103 result = L2CAP_CONF_SUCCESS;
3104 len = l2cap_parse_conf_rsp(sk, rsp->data,
3107 l2cap_send_disconn_req(conn, sk);
3111 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3112 L2CAP_CONF_REQ, len, req);
3113 l2cap_pi(sk)->num_conf_req++;
3114 if (result != L2CAP_CONF_SUCCESS)
3120 sk->sk_err = ECONNRESET;
3121 l2cap_sock_set_timer(sk, HZ * 5);
3122 l2cap_send_disconn_req(conn, sk);
3129 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3131 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3132 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3133 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3134 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3136 sk->sk_state = BT_CONNECTED;
3137 l2cap_pi(sk)->next_tx_seq = 0;
3138 l2cap_pi(sk)->expected_tx_seq = 0;
3139 __skb_queue_head_init(TX_QUEUE(sk));
3140 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3141 l2cap_ertm_init(sk);
3143 l2cap_chan_ready(sk);
3151 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3153 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3154 struct l2cap_disconn_rsp rsp;
3158 scid = __le16_to_cpu(req->scid);
3159 dcid = __le16_to_cpu(req->dcid);
3161 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3163 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3167 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3168 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3169 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3171 sk->sk_shutdown = SHUTDOWN_MASK;
3173 l2cap_chan_del(sk, ECONNRESET);
3176 l2cap_sock_kill(sk);
3180 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3182 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3186 scid = __le16_to_cpu(rsp->scid);
3187 dcid = __le16_to_cpu(rsp->dcid);
3189 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3191 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3195 l2cap_chan_del(sk, 0);
3198 l2cap_sock_kill(sk);
3202 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3204 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3207 type = __le16_to_cpu(req->type);
3209 BT_DBG("type 0x%4.4x", type);
3211 if (type == L2CAP_IT_FEAT_MASK) {
3213 u32 feat_mask = l2cap_feat_mask;
3214 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3215 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3216 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3218 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3220 put_unaligned_le32(feat_mask, rsp->data);
3221 l2cap_send_cmd(conn, cmd->ident,
3222 L2CAP_INFO_RSP, sizeof(buf), buf);
3223 } else if (type == L2CAP_IT_FIXED_CHAN) {
3225 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3226 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3227 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3228 memcpy(buf + 4, l2cap_fixed_chan, 8);
3229 l2cap_send_cmd(conn, cmd->ident,
3230 L2CAP_INFO_RSP, sizeof(buf), buf);
3232 struct l2cap_info_rsp rsp;
3233 rsp.type = cpu_to_le16(type);
3234 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3235 l2cap_send_cmd(conn, cmd->ident,
3236 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3242 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3244 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3247 type = __le16_to_cpu(rsp->type);
3248 result = __le16_to_cpu(rsp->result);
3250 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3252 del_timer(&conn->info_timer);
3254 if (type == L2CAP_IT_FEAT_MASK) {
3255 conn->feat_mask = get_unaligned_le32(rsp->data);
3257 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3258 struct l2cap_info_req req;
3259 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3261 conn->info_ident = l2cap_get_ident(conn);
3263 l2cap_send_cmd(conn, conn->info_ident,
3264 L2CAP_INFO_REQ, sizeof(req), &req);
3266 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3267 conn->info_ident = 0;
3269 l2cap_conn_start(conn);
3271 } else if (type == L2CAP_IT_FIXED_CHAN) {
3272 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3273 conn->info_ident = 0;
3275 l2cap_conn_start(conn);
3281 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3283 u8 *data = skb->data;
3285 struct l2cap_cmd_hdr cmd;
3288 l2cap_raw_recv(conn, skb);
3290 while (len >= L2CAP_CMD_HDR_SIZE) {
3292 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3293 data += L2CAP_CMD_HDR_SIZE;
3294 len -= L2CAP_CMD_HDR_SIZE;
3296 cmd_len = le16_to_cpu(cmd.len);
3298 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3300 if (cmd_len > len || !cmd.ident) {
3301 BT_DBG("corrupted command");
3306 case L2CAP_COMMAND_REJ:
3307 l2cap_command_rej(conn, &cmd, data);
3310 case L2CAP_CONN_REQ:
3311 err = l2cap_connect_req(conn, &cmd, data);
3314 case L2CAP_CONN_RSP:
3315 err = l2cap_connect_rsp(conn, &cmd, data);
3318 case L2CAP_CONF_REQ:
3319 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3322 case L2CAP_CONF_RSP:
3323 err = l2cap_config_rsp(conn, &cmd, data);
3326 case L2CAP_DISCONN_REQ:
3327 err = l2cap_disconnect_req(conn, &cmd, data);
3330 case L2CAP_DISCONN_RSP:
3331 err = l2cap_disconnect_rsp(conn, &cmd, data);
3334 case L2CAP_ECHO_REQ:
3335 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3338 case L2CAP_ECHO_RSP:
3341 case L2CAP_INFO_REQ:
3342 err = l2cap_information_req(conn, &cmd, data);
3345 case L2CAP_INFO_RSP:
3346 err = l2cap_information_rsp(conn, &cmd, data);
3350 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3356 struct l2cap_cmd_rej rej;
3357 BT_DBG("error %d", err);
3359 /* FIXME: Map err to a valid reason */
3360 rej.reason = cpu_to_le16(0);
3361 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3371 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3373 u16 our_fcs, rcv_fcs;
3374 int hdr_size = L2CAP_HDR_SIZE + 2;
3376 if (pi->fcs == L2CAP_FCS_CRC16) {
3377 skb_trim(skb, skb->len - 2);
3378 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3379 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3381 if (our_fcs != rcv_fcs)
3387 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3389 struct l2cap_pinfo *pi = l2cap_pi(sk);
3392 pi->frames_sent = 0;
3394 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3396 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3397 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3398 l2cap_send_sframe(pi, control);
3399 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3400 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3403 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3404 __mod_retrans_timer();
3406 spin_lock_bh(&pi->send_lock);
3407 l2cap_ertm_send(sk);
3408 spin_unlock_bh(&pi->send_lock);
3410 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3411 pi->frames_sent == 0) {
3412 control |= L2CAP_SUPER_RCV_READY;
3413 l2cap_send_sframe(pi, control);
3417 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3419 struct sk_buff *next_skb;
3420 struct l2cap_pinfo *pi = l2cap_pi(sk);
3421 int tx_seq_offset, next_tx_seq_offset;
3423 bt_cb(skb)->tx_seq = tx_seq;
3424 bt_cb(skb)->sar = sar;
3426 next_skb = skb_peek(SREJ_QUEUE(sk));
3428 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3432 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3433 if (tx_seq_offset < 0)
3434 tx_seq_offset += 64;
3437 if (bt_cb(next_skb)->tx_seq == tx_seq)
3440 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3441 pi->buffer_seq) % 64;
3442 if (next_tx_seq_offset < 0)
3443 next_tx_seq_offset += 64;
3445 if (next_tx_seq_offset > tx_seq_offset) {
3446 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3450 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3453 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3455 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3460 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3462 struct l2cap_pinfo *pi = l2cap_pi(sk);
3463 struct sk_buff *_skb;
3466 switch (control & L2CAP_CTRL_SAR) {
3467 case L2CAP_SDU_UNSEGMENTED:
3468 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3471 err = sock_queue_rcv_skb(sk, skb);
3477 case L2CAP_SDU_START:
3478 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3481 pi->sdu_len = get_unaligned_le16(skb->data);
3483 if (pi->sdu_len > pi->imtu)
3486 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3490 /* pull sdu_len bytes only after alloc, because of Local Busy
3491 * condition we have to be sure that this will be executed
3492 * only once, i.e., when alloc does not fail */
3495 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3497 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3498 pi->partial_sdu_len = skb->len;
3501 case L2CAP_SDU_CONTINUE:
3502 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3508 pi->partial_sdu_len += skb->len;
3509 if (pi->partial_sdu_len > pi->sdu_len)
3512 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3517 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3523 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3524 pi->partial_sdu_len += skb->len;
3526 if (pi->partial_sdu_len > pi->imtu)
3529 if (pi->partial_sdu_len != pi->sdu_len)
3532 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3535 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3537 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3541 err = sock_queue_rcv_skb(sk, _skb);
3544 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3548 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3549 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3563 l2cap_send_disconn_req(pi->conn, sk);
3568 static void l2cap_busy_work(struct work_struct *work)
3570 DECLARE_WAITQUEUE(wait, current);
3571 struct l2cap_pinfo *pi =
3572 container_of(work, struct l2cap_pinfo, busy_work);
3573 struct sock *sk = (struct sock *)pi;
3574 int n_tries = 0, timeo = HZ/5, err;
3575 struct sk_buff *skb;
3580 add_wait_queue(sk_sleep(sk), &wait);
3581 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3582 set_current_state(TASK_INTERRUPTIBLE);
3584 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3586 l2cap_send_disconn_req(pi->conn, sk);
3593 if (signal_pending(current)) {
3594 err = sock_intr_errno(timeo);
3599 timeo = schedule_timeout(timeo);
3602 err = sock_error(sk);
3606 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3607 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3608 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3610 skb_queue_head(BUSY_QUEUE(sk), skb);
3614 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3621 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3624 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3625 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3626 l2cap_send_sframe(pi, control);
3627 l2cap_pi(sk)->retry_count = 1;
3629 del_timer(&pi->retrans_timer);
3630 __mod_monitor_timer();
3632 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3635 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3636 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3638 set_current_state(TASK_RUNNING);
3639 remove_wait_queue(sk_sleep(sk), &wait);
3644 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3646 struct l2cap_pinfo *pi = l2cap_pi(sk);
3649 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3650 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3651 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3655 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3657 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3661 /* Busy Condition */
3662 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3663 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3664 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3666 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3667 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3668 l2cap_send_sframe(pi, sctrl);
3670 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3672 del_timer(&pi->ack_timer);
3674 queue_work(_busy_wq, &pi->busy_work);
3679 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3681 struct l2cap_pinfo *pi = l2cap_pi(sk);
3682 struct sk_buff *_skb;
3686 * TODO: We have to notify the userland if some data is lost with the
3690 switch (control & L2CAP_CTRL_SAR) {
3691 case L2CAP_SDU_UNSEGMENTED:
3692 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3697 err = sock_queue_rcv_skb(sk, skb);
3703 case L2CAP_SDU_START:
3704 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3709 pi->sdu_len = get_unaligned_le16(skb->data);
3712 if (pi->sdu_len > pi->imtu) {
3717 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3723 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3725 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3726 pi->partial_sdu_len = skb->len;
3730 case L2CAP_SDU_CONTINUE:
3731 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3734 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3736 pi->partial_sdu_len += skb->len;
3737 if (pi->partial_sdu_len > pi->sdu_len)
3745 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3748 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3750 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3751 pi->partial_sdu_len += skb->len;
3753 if (pi->partial_sdu_len > pi->imtu)
3756 if (pi->partial_sdu_len == pi->sdu_len) {
3757 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3758 err = sock_queue_rcv_skb(sk, _skb);
3773 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3775 struct sk_buff *skb;
3778 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3779 if (bt_cb(skb)->tx_seq != tx_seq)
3782 skb = skb_dequeue(SREJ_QUEUE(sk));
3783 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3784 l2cap_ertm_reassembly_sdu(sk, skb, control);
3785 l2cap_pi(sk)->buffer_seq_srej =
3786 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3787 tx_seq = (tx_seq + 1) % 64;
3791 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3793 struct l2cap_pinfo *pi = l2cap_pi(sk);
3794 struct srej_list *l, *tmp;
3797 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3798 if (l->tx_seq == tx_seq) {
3803 control = L2CAP_SUPER_SELECT_REJECT;
3804 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3805 l2cap_send_sframe(pi, control);
3807 list_add_tail(&l->list, SREJ_LIST(sk));
3811 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3813 struct l2cap_pinfo *pi = l2cap_pi(sk);
3814 struct srej_list *new;
3817 while (tx_seq != pi->expected_tx_seq) {
3818 control = L2CAP_SUPER_SELECT_REJECT;
3819 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3820 l2cap_send_sframe(pi, control);
3822 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3823 new->tx_seq = pi->expected_tx_seq;
3824 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3825 list_add_tail(&new->list, SREJ_LIST(sk));
3827 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3830 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3832 struct l2cap_pinfo *pi = l2cap_pi(sk);
3833 u8 tx_seq = __get_txseq(rx_control);
3834 u8 req_seq = __get_reqseq(rx_control);
3835 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3836 int tx_seq_offset, expected_tx_seq_offset;
3837 int num_to_ack = (pi->tx_win/6) + 1;
3840 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3842 if (L2CAP_CTRL_FINAL & rx_control &&
3843 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3844 del_timer(&pi->monitor_timer);
3845 if (pi->unacked_frames > 0)
3846 __mod_retrans_timer();
3847 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3850 pi->expected_ack_seq = req_seq;
3851 l2cap_drop_acked_frames(sk);
3853 if (tx_seq == pi->expected_tx_seq)
3856 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3857 if (tx_seq_offset < 0)
3858 tx_seq_offset += 64;
3860 /* invalid tx_seq */
3861 if (tx_seq_offset >= pi->tx_win) {
3862 l2cap_send_disconn_req(pi->conn, sk);
3866 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3869 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3870 struct srej_list *first;
3872 first = list_first_entry(SREJ_LIST(sk),
3873 struct srej_list, list);
3874 if (tx_seq == first->tx_seq) {
3875 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3876 l2cap_check_srej_gap(sk, tx_seq);
3878 list_del(&first->list);
3881 if (list_empty(SREJ_LIST(sk))) {
3882 pi->buffer_seq = pi->buffer_seq_srej;
3883 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3887 struct srej_list *l;
3889 /* duplicated tx_seq */
3890 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3893 list_for_each_entry(l, SREJ_LIST(sk), list) {
3894 if (l->tx_seq == tx_seq) {
3895 l2cap_resend_srejframe(sk, tx_seq);
3899 l2cap_send_srejframe(sk, tx_seq);
3902 expected_tx_seq_offset =
3903 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3904 if (expected_tx_seq_offset < 0)
3905 expected_tx_seq_offset += 64;
3907 /* duplicated tx_seq */
3908 if (tx_seq_offset < expected_tx_seq_offset)
3911 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3913 INIT_LIST_HEAD(SREJ_LIST(sk));
3914 pi->buffer_seq_srej = pi->buffer_seq;
3916 __skb_queue_head_init(SREJ_QUEUE(sk));
3917 __skb_queue_head_init(BUSY_QUEUE(sk));
3918 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3920 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3922 l2cap_send_srejframe(sk, tx_seq);
3924 del_timer(&pi->ack_timer);
3929 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3931 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3932 bt_cb(skb)->tx_seq = tx_seq;
3933 bt_cb(skb)->sar = sar;
3934 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3938 err = l2cap_push_rx_skb(sk, skb, rx_control);
3942 if (rx_control & L2CAP_CTRL_FINAL) {
3943 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3944 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3946 l2cap_retransmit_frames(sk);
3951 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3952 if (pi->num_acked == num_to_ack - 1)
3962 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3964 struct l2cap_pinfo *pi = l2cap_pi(sk);
3966 pi->expected_ack_seq = __get_reqseq(rx_control);
3967 l2cap_drop_acked_frames(sk);
3969 if (rx_control & L2CAP_CTRL_POLL) {
3970 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3971 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3972 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3973 (pi->unacked_frames > 0))
3974 __mod_retrans_timer();
3976 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3977 l2cap_send_srejtail(sk);
3979 l2cap_send_i_or_rr_or_rnr(sk);
3982 } else if (rx_control & L2CAP_CTRL_FINAL) {
3983 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3985 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3986 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3988 l2cap_retransmit_frames(sk);
3991 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3992 (pi->unacked_frames > 0))
3993 __mod_retrans_timer();
3995 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3996 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3999 spin_lock_bh(&pi->send_lock);
4000 l2cap_ertm_send(sk);
4001 spin_unlock_bh(&pi->send_lock);
4006 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4008 struct l2cap_pinfo *pi = l2cap_pi(sk);
4009 u8 tx_seq = __get_reqseq(rx_control);
4011 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4013 pi->expected_ack_seq = tx_seq;
4014 l2cap_drop_acked_frames(sk);
4016 if (rx_control & L2CAP_CTRL_FINAL) {
4017 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4018 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4020 l2cap_retransmit_frames(sk);
4022 l2cap_retransmit_frames(sk);
4024 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4025 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4028 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4030 struct l2cap_pinfo *pi = l2cap_pi(sk);
4031 u8 tx_seq = __get_reqseq(rx_control);
4033 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4035 if (rx_control & L2CAP_CTRL_POLL) {
4036 pi->expected_ack_seq = tx_seq;
4037 l2cap_drop_acked_frames(sk);
4039 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4040 l2cap_retransmit_one_frame(sk, tx_seq);
4042 spin_lock_bh(&pi->send_lock);
4043 l2cap_ertm_send(sk);
4044 spin_unlock_bh(&pi->send_lock);
4046 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4047 pi->srej_save_reqseq = tx_seq;
4048 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4050 } else if (rx_control & L2CAP_CTRL_FINAL) {
4051 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4052 pi->srej_save_reqseq == tx_seq)
4053 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4055 l2cap_retransmit_one_frame(sk, tx_seq);
4057 l2cap_retransmit_one_frame(sk, tx_seq);
4058 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4059 pi->srej_save_reqseq = tx_seq;
4060 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4065 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4067 struct l2cap_pinfo *pi = l2cap_pi(sk);
4068 u8 tx_seq = __get_reqseq(rx_control);
4070 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4071 pi->expected_ack_seq = tx_seq;
4072 l2cap_drop_acked_frames(sk);
4074 if (rx_control & L2CAP_CTRL_POLL)
4075 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4077 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4078 del_timer(&pi->retrans_timer);
4079 if (rx_control & L2CAP_CTRL_POLL)
4080 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4084 if (rx_control & L2CAP_CTRL_POLL)
4085 l2cap_send_srejtail(sk);
4087 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4090 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4092 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4094 if (L2CAP_CTRL_FINAL & rx_control &&
4095 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4096 del_timer(&l2cap_pi(sk)->monitor_timer);
4097 if (l2cap_pi(sk)->unacked_frames > 0)
4098 __mod_retrans_timer();
4099 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4102 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4103 case L2CAP_SUPER_RCV_READY:
4104 l2cap_data_channel_rrframe(sk, rx_control);
4107 case L2CAP_SUPER_REJECT:
4108 l2cap_data_channel_rejframe(sk, rx_control);
4111 case L2CAP_SUPER_SELECT_REJECT:
4112 l2cap_data_channel_srejframe(sk, rx_control);
4115 case L2CAP_SUPER_RCV_NOT_READY:
4116 l2cap_data_channel_rnrframe(sk, rx_control);
4124 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4127 struct l2cap_pinfo *pi;
4130 int len, next_tx_seq_offset, req_seq_offset;
4132 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4134 BT_DBG("unknown cid 0x%4.4x", cid);
4140 BT_DBG("sk %p, len %d", sk, skb->len);
4142 if (sk->sk_state != BT_CONNECTED)
4146 case L2CAP_MODE_BASIC:
4147 /* If socket recv buffers overflows we drop data here
4148 * which is *bad* because L2CAP has to be reliable.
4149 * But we don't have any other choice. L2CAP doesn't
4150 * provide flow control mechanism. */
4152 if (pi->imtu < skb->len)
4155 if (!sock_queue_rcv_skb(sk, skb))
4159 case L2CAP_MODE_ERTM:
4160 control = get_unaligned_le16(skb->data);
4164 if (__is_sar_start(control) && __is_iframe(control))
4167 if (pi->fcs == L2CAP_FCS_CRC16)
4171 * We can just drop the corrupted I-frame here.
4172 * Receiver will miss it and start proper recovery
4173 * procedures and ask retransmission.
4175 if (len > pi->mps) {
4176 l2cap_send_disconn_req(pi->conn, sk);
4180 if (l2cap_check_fcs(pi, skb))
4183 req_seq = __get_reqseq(control);
4184 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4185 if (req_seq_offset < 0)
4186 req_seq_offset += 64;
4188 next_tx_seq_offset =
4189 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4190 if (next_tx_seq_offset < 0)
4191 next_tx_seq_offset += 64;
4193 /* check for invalid req-seq */
4194 if (req_seq_offset > next_tx_seq_offset) {
4195 l2cap_send_disconn_req(pi->conn, sk);
4199 if (__is_iframe(control)) {
4201 l2cap_send_disconn_req(pi->conn, sk);
4205 l2cap_data_channel_iframe(sk, control, skb);
4208 l2cap_send_disconn_req(pi->conn, sk);
4212 l2cap_data_channel_sframe(sk, control, skb);
4217 case L2CAP_MODE_STREAMING:
4218 control = get_unaligned_le16(skb->data);
4222 if (__is_sar_start(control))
4225 if (pi->fcs == L2CAP_FCS_CRC16)
4228 if (len > pi->mps || len < 0 || __is_sframe(control))
4231 if (l2cap_check_fcs(pi, skb))
4234 tx_seq = __get_txseq(control);
4236 if (pi->expected_tx_seq == tx_seq)
4237 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4239 pi->expected_tx_seq = (tx_seq + 1) % 64;
4241 l2cap_streaming_reassembly_sdu(sk, skb, control);
4246 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4260 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4264 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4268 BT_DBG("sk %p, len %d", sk, skb->len);
4270 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4273 if (l2cap_pi(sk)->imtu < skb->len)
4276 if (!sock_queue_rcv_skb(sk, skb))
4288 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4290 struct l2cap_hdr *lh = (void *) skb->data;
4294 skb_pull(skb, L2CAP_HDR_SIZE);
4295 cid = __le16_to_cpu(lh->cid);
4296 len = __le16_to_cpu(lh->len);
4298 if (len != skb->len) {
4303 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4306 case L2CAP_CID_SIGNALING:
4307 l2cap_sig_channel(conn, skb);
4310 case L2CAP_CID_CONN_LESS:
4311 psm = get_unaligned_le16(skb->data);
4313 l2cap_conless_channel(conn, psm, skb);
4317 l2cap_data_channel(conn, cid, skb);
4322 /* ---- L2CAP interface with lower layer (HCI) ---- */
4324 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4326 int exact = 0, lm1 = 0, lm2 = 0;
4327 register struct sock *sk;
4328 struct hlist_node *node;
4330 if (type != ACL_LINK)
4333 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4335 /* Find listening sockets and check their link_mode */
4336 read_lock(&l2cap_sk_list.lock);
4337 sk_for_each(sk, node, &l2cap_sk_list.head) {
4338 if (sk->sk_state != BT_LISTEN)
4341 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4342 lm1 |= HCI_LM_ACCEPT;
4343 if (l2cap_pi(sk)->role_switch)
4344 lm1 |= HCI_LM_MASTER;
4346 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4347 lm2 |= HCI_LM_ACCEPT;
4348 if (l2cap_pi(sk)->role_switch)
4349 lm2 |= HCI_LM_MASTER;
4352 read_unlock(&l2cap_sk_list.lock);
4354 return exact ? lm1 : lm2;
4357 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4359 struct l2cap_conn *conn;
4361 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4363 if (hcon->type != ACL_LINK)
4367 conn = l2cap_conn_add(hcon, status);
4369 l2cap_conn_ready(conn);
4371 l2cap_conn_del(hcon, bt_err(status));
4376 static int l2cap_disconn_ind(struct hci_conn *hcon)
4378 struct l2cap_conn *conn = hcon->l2cap_data;
4380 BT_DBG("hcon %p", hcon);
4382 if (hcon->type != ACL_LINK || !conn)
4385 return conn->disc_reason;
4388 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4390 BT_DBG("hcon %p reason %d", hcon, reason);
4392 if (hcon->type != ACL_LINK)
4395 l2cap_conn_del(hcon, bt_err(reason));
4400 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4402 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4405 if (encrypt == 0x00) {
4406 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4407 l2cap_sock_clear_timer(sk);
4408 l2cap_sock_set_timer(sk, HZ * 5);
4409 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4410 __l2cap_sock_close(sk, ECONNREFUSED);
4412 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4413 l2cap_sock_clear_timer(sk);
4417 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4419 struct l2cap_chan_list *l;
4420 struct l2cap_conn *conn = hcon->l2cap_data;
4426 l = &conn->chan_list;
4428 BT_DBG("conn %p", conn);
4430 read_lock(&l->lock);
4432 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4435 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4440 if (!status && (sk->sk_state == BT_CONNECTED ||
4441 sk->sk_state == BT_CONFIG)) {
4442 l2cap_check_encryption(sk, encrypt);
4447 if (sk->sk_state == BT_CONNECT) {
4449 struct l2cap_conn_req req;
4450 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4451 req.psm = l2cap_pi(sk)->psm;
4453 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4454 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4456 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4457 L2CAP_CONN_REQ, sizeof(req), &req);
4459 l2cap_sock_clear_timer(sk);
4460 l2cap_sock_set_timer(sk, HZ / 10);
4462 } else if (sk->sk_state == BT_CONNECT2) {
4463 struct l2cap_conn_rsp rsp;
4467 sk->sk_state = BT_CONFIG;
4468 result = L2CAP_CR_SUCCESS;
4470 sk->sk_state = BT_DISCONN;
4471 l2cap_sock_set_timer(sk, HZ / 10);
4472 result = L2CAP_CR_SEC_BLOCK;
4475 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4476 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4477 rsp.result = cpu_to_le16(result);
4478 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4479 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4480 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4486 read_unlock(&l->lock);
4491 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4493 struct l2cap_conn *conn = hcon->l2cap_data;
4495 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4498 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4500 if (flags & ACL_START) {
4501 struct l2cap_hdr *hdr;
4505 BT_ERR("Unexpected start frame (len %d)", skb->len);
4506 kfree_skb(conn->rx_skb);
4507 conn->rx_skb = NULL;
4509 l2cap_conn_unreliable(conn, ECOMM);
4513 BT_ERR("Frame is too short (len %d)", skb->len);
4514 l2cap_conn_unreliable(conn, ECOMM);
4518 hdr = (struct l2cap_hdr *) skb->data;
4519 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4521 if (len == skb->len) {
4522 /* Complete frame received */
4523 l2cap_recv_frame(conn, skb);
4527 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4529 if (skb->len > len) {
4530 BT_ERR("Frame is too long (len %d, expected len %d)",
4532 l2cap_conn_unreliable(conn, ECOMM);
4536 /* Allocate skb for the complete frame (with header) */
4537 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4541 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4543 conn->rx_len = len - skb->len;
4545 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4547 if (!conn->rx_len) {
4548 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4549 l2cap_conn_unreliable(conn, ECOMM);
4553 if (skb->len > conn->rx_len) {
4554 BT_ERR("Fragment is too long (len %d, expected %d)",
4555 skb->len, conn->rx_len);
4556 kfree_skb(conn->rx_skb);
4557 conn->rx_skb = NULL;
4559 l2cap_conn_unreliable(conn, ECOMM);
4563 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4565 conn->rx_len -= skb->len;
4567 if (!conn->rx_len) {
4568 /* Complete frame received */
4569 l2cap_recv_frame(conn, conn->rx_skb);
4570 conn->rx_skb = NULL;
4579 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4582 struct hlist_node *node;
4584 read_lock_bh(&l2cap_sk_list.lock);
4586 sk_for_each(sk, node, &l2cap_sk_list.head) {
4587 struct l2cap_pinfo *pi = l2cap_pi(sk);
4589 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4590 batostr(&bt_sk(sk)->src),
4591 batostr(&bt_sk(sk)->dst),
4592 sk->sk_state, __le16_to_cpu(pi->psm),
4594 pi->imtu, pi->omtu, pi->sec_level);
4597 read_unlock_bh(&l2cap_sk_list.lock);
4602 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4604 return single_open(file, l2cap_debugfs_show, inode->i_private);
4607 static const struct file_operations l2cap_debugfs_fops = {
4608 .open = l2cap_debugfs_open,
4610 .llseek = seq_lseek,
4611 .release = single_release,
4614 static struct dentry *l2cap_debugfs;
4616 static const struct proto_ops l2cap_sock_ops = {
4617 .family = PF_BLUETOOTH,
4618 .owner = THIS_MODULE,
4619 .release = l2cap_sock_release,
4620 .bind = l2cap_sock_bind,
4621 .connect = l2cap_sock_connect,
4622 .listen = l2cap_sock_listen,
4623 .accept = l2cap_sock_accept,
4624 .getname = l2cap_sock_getname,
4625 .sendmsg = l2cap_sock_sendmsg,
4626 .recvmsg = l2cap_sock_recvmsg,
4627 .poll = bt_sock_poll,
4628 .ioctl = bt_sock_ioctl,
4629 .mmap = sock_no_mmap,
4630 .socketpair = sock_no_socketpair,
4631 .shutdown = l2cap_sock_shutdown,
4632 .setsockopt = l2cap_sock_setsockopt,
4633 .getsockopt = l2cap_sock_getsockopt
4636 static const struct net_proto_family l2cap_sock_family_ops = {
4637 .family = PF_BLUETOOTH,
4638 .owner = THIS_MODULE,
4639 .create = l2cap_sock_create,
4642 static struct hci_proto l2cap_hci_proto = {
4644 .id = HCI_PROTO_L2CAP,
4645 .connect_ind = l2cap_connect_ind,
4646 .connect_cfm = l2cap_connect_cfm,
4647 .disconn_ind = l2cap_disconn_ind,
4648 .disconn_cfm = l2cap_disconn_cfm,
4649 .security_cfm = l2cap_security_cfm,
4650 .recv_acldata = l2cap_recv_acldata
4653 static int __init l2cap_init(void)
4657 err = proto_register(&l2cap_proto, 0);
4661 _busy_wq = create_singlethread_workqueue("l2cap");
4665 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4667 BT_ERR("L2CAP socket registration failed");
4671 err = hci_register_proto(&l2cap_hci_proto);
4673 BT_ERR("L2CAP protocol registration failed");
4674 bt_sock_unregister(BTPROTO_L2CAP);
4679 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4680 bt_debugfs, NULL, &l2cap_debugfs_fops);
4682 BT_ERR("Failed to create L2CAP debug file");
4685 BT_INFO("L2CAP ver %s", VERSION);
4686 BT_INFO("L2CAP socket layer initialized");
4691 proto_unregister(&l2cap_proto);
4695 static void __exit l2cap_exit(void)
4697 debugfs_remove(l2cap_debugfs);
4699 flush_workqueue(_busy_wq);
4700 destroy_workqueue(_busy_wq);
4702 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4703 BT_ERR("L2CAP socket unregistration failed");
4705 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4706 BT_ERR("L2CAP protocol unregistration failed");
4708 proto_unregister(&l2cap_proto);
4711 void l2cap_load(void)
4713 /* Dummy function to trigger automatic L2CAP module loading by
4714 * other modules that use L2CAP sockets but don't use any other
4715 * symbols from it. */
4717 EXPORT_SYMBOL(l2cap_load);
4719 module_init(l2cap_init);
4720 module_exit(l2cap_exit);
4722 module_param(enable_ertm, bool, 0644);
4723 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4725 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4726 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4727 MODULE_VERSION(VERSION);
4728 MODULE_LICENSE("GPL");
4729 MODULE_ALIAS("bt-proto-0");