2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops;
65 static struct workqueue_struct *_busy_wq;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void l2cap_busy_work(struct work_struct *work);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
86 BT_DBG("sock %p state %d", sk, sk->sk_state);
90 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
91 reason = ECONNREFUSED;
92 else if (sk->sk_state == BT_CONNECT &&
93 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
94 reason = ECONNREFUSED;
98 __l2cap_sock_close(sk, reason);
106 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
108 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
109 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
112 static void l2cap_sock_clear_timer(struct sock *sk)
114 BT_DBG("sock %p state %d", sk, sk->sk_state);
115 sk_stop_timer(sk, &sk->sk_timer);
118 /* ---- L2CAP channels ---- */
119 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->dcid == cid)
129 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
132 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
133 if (l2cap_pi(s)->scid == cid)
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
145 s = __l2cap_get_chan_by_scid(l, cid);
148 read_unlock(&l->lock);
152 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
155 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
156 if (l2cap_pi(s)->ident == ident)
162 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
166 s = __l2cap_get_chan_by_ident(l, ident);
169 read_unlock(&l->lock);
173 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
175 u16 cid = L2CAP_CID_DYN_START;
177 for (; cid < L2CAP_CID_DYN_END; cid++) {
178 if (!__l2cap_get_chan_by_scid(l, cid))
185 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
190 l2cap_pi(l->head)->prev_c = sk;
192 l2cap_pi(sk)->next_c = l->head;
193 l2cap_pi(sk)->prev_c = NULL;
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
199 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
201 write_lock_bh(&l->lock);
206 l2cap_pi(next)->prev_c = prev;
208 l2cap_pi(prev)->next_c = next;
209 write_unlock_bh(&l->lock);
214 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
216 struct l2cap_chan_list *l = &conn->chan_list;
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
219 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
221 conn->disc_reason = 0x13;
223 l2cap_pi(sk)->conn = conn;
225 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
228 } else if (sk->sk_type == SOCK_DGRAM) {
229 /* Connectionless socket */
230 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
231 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
236 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
240 __l2cap_chan_link(l, sk);
243 bt_accept_enqueue(parent, sk);
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock *sk, int err)
250 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
251 struct sock *parent = bt_sk(sk)->parent;
253 l2cap_sock_clear_timer(sk);
255 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn->chan_list, sk);
260 l2cap_pi(sk)->conn = NULL;
261 hci_conn_put(conn->hcon);
264 sk->sk_state = BT_CLOSED;
265 sock_set_flag(sk, SOCK_ZAPPED);
271 bt_accept_unlink(sk);
272 parent->sk_data_ready(parent, 0);
274 sk->sk_state_change(sk);
276 skb_queue_purge(TX_QUEUE(sk));
278 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
279 struct srej_list *l, *tmp;
281 del_timer(&l2cap_pi(sk)->retrans_timer);
282 del_timer(&l2cap_pi(sk)->monitor_timer);
283 del_timer(&l2cap_pi(sk)->ack_timer);
285 skb_queue_purge(SREJ_QUEUE(sk));
286 skb_queue_purge(BUSY_QUEUE(sk));
288 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
295 /* Service level security */
296 static inline int l2cap_check_security(struct sock *sk)
298 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
301 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
302 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
303 auth_type = HCI_AT_NO_BONDING_MITM;
305 auth_type = HCI_AT_NO_BONDING;
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
308 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
310 switch (l2cap_pi(sk)->sec_level) {
311 case BT_SECURITY_HIGH:
312 auth_type = HCI_AT_GENERAL_BONDING_MITM;
314 case BT_SECURITY_MEDIUM:
315 auth_type = HCI_AT_GENERAL_BONDING;
318 auth_type = HCI_AT_NO_BONDING;
323 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
327 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
331 /* Get next available identificator.
332 * 1 - 128 are used by kernel.
333 * 129 - 199 are reserved.
334 * 200 - 254 are used by utilities like l2ping, etc.
337 spin_lock_bh(&conn->lock);
339 if (++conn->tx_ident > 128)
344 spin_unlock_bh(&conn->lock);
349 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
351 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
353 BT_DBG("code 0x%2.2x", code);
358 hci_send_acl(conn->hcon, skb, 0);
361 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
364 struct l2cap_hdr *lh;
365 struct l2cap_conn *conn = pi->conn;
366 struct sock *sk = (struct sock *)pi;
367 int count, hlen = L2CAP_HDR_SIZE + 2;
369 if (sk->sk_state != BT_CONNECTED)
372 if (pi->fcs == L2CAP_FCS_CRC16)
375 BT_DBG("pi %p, control 0x%2.2x", pi, control);
377 count = min_t(unsigned int, conn->mtu, hlen);
378 control |= L2CAP_CTRL_FRAME_TYPE;
380 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
381 control |= L2CAP_CTRL_FINAL;
382 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
385 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
386 control |= L2CAP_CTRL_POLL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
390 skb = bt_skb_alloc(count, GFP_ATOMIC);
394 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
395 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
396 lh->cid = cpu_to_le16(pi->dcid);
397 put_unaligned_le16(control, skb_put(skb, 2));
399 if (pi->fcs == L2CAP_FCS_CRC16) {
400 u16 fcs = crc16(0, (u8 *)lh, count - 2);
401 put_unaligned_le16(fcs, skb_put(skb, 2));
404 hci_send_acl(pi->conn->hcon, skb, 0);
407 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
409 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
410 control |= L2CAP_SUPER_RCV_NOT_READY;
411 pi->conn_state |= L2CAP_CONN_RNR_SENT;
413 control |= L2CAP_SUPER_RCV_READY;
415 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
417 l2cap_send_sframe(pi, control);
420 static inline int __l2cap_no_conn_pending(struct sock *sk)
422 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
425 static void l2cap_do_start(struct sock *sk)
427 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
429 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
430 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
433 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
434 struct l2cap_conn_req req;
435 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
436 req.psm = l2cap_pi(sk)->psm;
438 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
439 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
441 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
442 L2CAP_CONN_REQ, sizeof(req), &req);
445 struct l2cap_info_req req;
446 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
448 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
449 conn->info_ident = l2cap_get_ident(conn);
451 mod_timer(&conn->info_timer, jiffies +
452 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
454 l2cap_send_cmd(conn, conn->info_ident,
455 L2CAP_INFO_REQ, sizeof(req), &req);
459 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
461 struct l2cap_disconn_req req;
466 skb_queue_purge(TX_QUEUE(sk));
468 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
469 del_timer(&l2cap_pi(sk)->retrans_timer);
470 del_timer(&l2cap_pi(sk)->monitor_timer);
471 del_timer(&l2cap_pi(sk)->ack_timer);
474 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
475 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
476 l2cap_send_cmd(conn, l2cap_get_ident(conn),
477 L2CAP_DISCONN_REQ, sizeof(req), &req);
479 sk->sk_state = BT_DISCONN;
483 /* ---- L2CAP connections ---- */
484 static void l2cap_conn_start(struct l2cap_conn *conn)
486 struct l2cap_chan_list *l = &conn->chan_list;
489 BT_DBG("conn %p", conn);
493 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
496 if (sk->sk_type != SOCK_SEQPACKET &&
497 sk->sk_type != SOCK_STREAM) {
502 if (sk->sk_state == BT_CONNECT) {
503 if (l2cap_check_security(sk) &&
504 __l2cap_no_conn_pending(sk)) {
505 struct l2cap_conn_req req;
506 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
507 req.psm = l2cap_pi(sk)->psm;
509 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
510 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
512 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
513 L2CAP_CONN_REQ, sizeof(req), &req);
515 } else if (sk->sk_state == BT_CONNECT2) {
516 struct l2cap_conn_rsp rsp;
517 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
518 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
520 if (l2cap_check_security(sk)) {
521 if (bt_sk(sk)->defer_setup) {
522 struct sock *parent = bt_sk(sk)->parent;
523 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
524 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
525 parent->sk_data_ready(parent, 0);
528 sk->sk_state = BT_CONFIG;
529 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
530 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
533 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
534 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
537 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
538 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
544 read_unlock(&l->lock);
547 static void l2cap_conn_ready(struct l2cap_conn *conn)
549 struct l2cap_chan_list *l = &conn->chan_list;
552 BT_DBG("conn %p", conn);
556 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
559 if (sk->sk_type != SOCK_SEQPACKET &&
560 sk->sk_type != SOCK_STREAM) {
561 l2cap_sock_clear_timer(sk);
562 sk->sk_state = BT_CONNECTED;
563 sk->sk_state_change(sk);
564 } else if (sk->sk_state == BT_CONNECT)
570 read_unlock(&l->lock);
573 /* Notify sockets that we cannot guaranty reliability anymore */
574 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
576 struct l2cap_chan_list *l = &conn->chan_list;
579 BT_DBG("conn %p", conn);
583 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
584 if (l2cap_pi(sk)->force_reliable)
588 read_unlock(&l->lock);
591 static void l2cap_info_timeout(unsigned long arg)
593 struct l2cap_conn *conn = (void *) arg;
595 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
596 conn->info_ident = 0;
598 l2cap_conn_start(conn);
601 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
603 struct l2cap_conn *conn = hcon->l2cap_data;
608 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
612 hcon->l2cap_data = conn;
615 BT_DBG("hcon %p conn %p", hcon, conn);
617 conn->mtu = hcon->hdev->acl_mtu;
618 conn->src = &hcon->hdev->bdaddr;
619 conn->dst = &hcon->dst;
623 spin_lock_init(&conn->lock);
624 rwlock_init(&conn->chan_list.lock);
626 setup_timer(&conn->info_timer, l2cap_info_timeout,
627 (unsigned long) conn);
629 conn->disc_reason = 0x13;
634 static void l2cap_conn_del(struct hci_conn *hcon, int err)
636 struct l2cap_conn *conn = hcon->l2cap_data;
642 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
644 kfree_skb(conn->rx_skb);
647 while ((sk = conn->chan_list.head)) {
649 l2cap_chan_del(sk, err);
654 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
655 del_timer_sync(&conn->info_timer);
657 hcon->l2cap_data = NULL;
661 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
663 struct l2cap_chan_list *l = &conn->chan_list;
664 write_lock_bh(&l->lock);
665 __l2cap_chan_add(conn, sk, parent);
666 write_unlock_bh(&l->lock);
669 /* ---- Socket interface ---- */
670 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
673 struct hlist_node *node;
674 sk_for_each(sk, node, &l2cap_sk_list.head)
675 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
682 /* Find socket with psm and source bdaddr.
683 * Returns closest match.
685 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
687 struct sock *sk = NULL, *sk1 = NULL;
688 struct hlist_node *node;
690 sk_for_each(sk, node, &l2cap_sk_list.head) {
691 if (state && sk->sk_state != state)
694 if (l2cap_pi(sk)->psm == psm) {
696 if (!bacmp(&bt_sk(sk)->src, src))
700 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
704 return node ? sk : sk1;
707 /* Find socket with given address (psm, src).
708 * Returns locked socket */
709 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
712 read_lock(&l2cap_sk_list.lock);
713 s = __l2cap_get_sock_by_psm(state, psm, src);
716 read_unlock(&l2cap_sk_list.lock);
720 static void l2cap_sock_destruct(struct sock *sk)
724 skb_queue_purge(&sk->sk_receive_queue);
725 skb_queue_purge(&sk->sk_write_queue);
728 static void l2cap_sock_cleanup_listen(struct sock *parent)
732 BT_DBG("parent %p", parent);
734 /* Close not yet accepted channels */
735 while ((sk = bt_accept_dequeue(parent, NULL)))
736 l2cap_sock_close(sk);
738 parent->sk_state = BT_CLOSED;
739 sock_set_flag(parent, SOCK_ZAPPED);
742 /* Kill socket (only if zapped and orphan)
743 * Must be called on unlocked socket.
745 static void l2cap_sock_kill(struct sock *sk)
747 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
750 BT_DBG("sk %p state %d", sk, sk->sk_state);
752 /* Kill poor orphan */
753 bt_sock_unlink(&l2cap_sk_list, sk);
754 sock_set_flag(sk, SOCK_DEAD);
758 static void __l2cap_sock_close(struct sock *sk, int reason)
760 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
762 switch (sk->sk_state) {
764 l2cap_sock_cleanup_listen(sk);
769 if (sk->sk_type == SOCK_SEQPACKET ||
770 sk->sk_type == SOCK_STREAM) {
771 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
773 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
774 l2cap_send_disconn_req(conn, sk, reason);
776 l2cap_chan_del(sk, reason);
780 if (sk->sk_type == SOCK_SEQPACKET ||
781 sk->sk_type == SOCK_STREAM) {
782 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
783 struct l2cap_conn_rsp rsp;
786 if (bt_sk(sk)->defer_setup)
787 result = L2CAP_CR_SEC_BLOCK;
789 result = L2CAP_CR_BAD_PSM;
791 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
792 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
793 rsp.result = cpu_to_le16(result);
794 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
795 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
796 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
798 l2cap_chan_del(sk, reason);
803 l2cap_chan_del(sk, reason);
807 sock_set_flag(sk, SOCK_ZAPPED);
812 /* Must be called on unlocked socket. */
813 static void l2cap_sock_close(struct sock *sk)
815 l2cap_sock_clear_timer(sk);
817 __l2cap_sock_close(sk, ECONNRESET);
822 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
824 struct l2cap_pinfo *pi = l2cap_pi(sk);
829 sk->sk_type = parent->sk_type;
830 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
832 pi->imtu = l2cap_pi(parent)->imtu;
833 pi->omtu = l2cap_pi(parent)->omtu;
834 pi->mode = l2cap_pi(parent)->mode;
835 pi->fcs = l2cap_pi(parent)->fcs;
836 pi->max_tx = l2cap_pi(parent)->max_tx;
837 pi->tx_win = l2cap_pi(parent)->tx_win;
838 pi->sec_level = l2cap_pi(parent)->sec_level;
839 pi->role_switch = l2cap_pi(parent)->role_switch;
840 pi->force_reliable = l2cap_pi(parent)->force_reliable;
842 pi->imtu = L2CAP_DEFAULT_MTU;
844 if (enable_ertm && sk->sk_type == SOCK_STREAM)
845 pi->mode = L2CAP_MODE_ERTM;
847 pi->mode = L2CAP_MODE_BASIC;
848 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
849 pi->fcs = L2CAP_FCS_CRC16;
850 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
851 pi->sec_level = BT_SECURITY_LOW;
853 pi->force_reliable = 0;
856 /* Default config options */
858 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
859 skb_queue_head_init(TX_QUEUE(sk));
860 skb_queue_head_init(SREJ_QUEUE(sk));
861 skb_queue_head_init(BUSY_QUEUE(sk));
862 INIT_LIST_HEAD(SREJ_LIST(sk));
865 static struct proto l2cap_proto = {
867 .owner = THIS_MODULE,
868 .obj_size = sizeof(struct l2cap_pinfo)
871 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
875 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
879 sock_init_data(sock, sk);
880 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
882 sk->sk_destruct = l2cap_sock_destruct;
883 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
885 sock_reset_flag(sk, SOCK_ZAPPED);
887 sk->sk_protocol = proto;
888 sk->sk_state = BT_OPEN;
890 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
892 bt_sock_link(&l2cap_sk_list, sk);
896 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
901 BT_DBG("sock %p", sock);
903 sock->state = SS_UNCONNECTED;
905 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
906 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
907 return -ESOCKTNOSUPPORT;
909 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
912 sock->ops = &l2cap_sock_ops;
914 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
918 l2cap_sock_init(sk, NULL);
922 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
924 struct sock *sk = sock->sk;
925 struct sockaddr_l2 la;
930 if (!addr || addr->sa_family != AF_BLUETOOTH)
933 memset(&la, 0, sizeof(la));
934 len = min_t(unsigned int, sizeof(la), alen);
935 memcpy(&la, addr, len);
942 if (sk->sk_state != BT_OPEN) {
947 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
948 !capable(CAP_NET_BIND_SERVICE)) {
953 write_lock_bh(&l2cap_sk_list.lock);
955 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
958 /* Save source address */
959 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
960 l2cap_pi(sk)->psm = la.l2_psm;
961 l2cap_pi(sk)->sport = la.l2_psm;
962 sk->sk_state = BT_BOUND;
964 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
965 __le16_to_cpu(la.l2_psm) == 0x0003)
966 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
969 write_unlock_bh(&l2cap_sk_list.lock);
976 static int l2cap_do_connect(struct sock *sk)
978 bdaddr_t *src = &bt_sk(sk)->src;
979 bdaddr_t *dst = &bt_sk(sk)->dst;
980 struct l2cap_conn *conn;
981 struct hci_conn *hcon;
982 struct hci_dev *hdev;
986 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
989 hdev = hci_get_route(dst, src);
991 return -EHOSTUNREACH;
993 hci_dev_lock_bh(hdev);
997 if (sk->sk_type == SOCK_RAW) {
998 switch (l2cap_pi(sk)->sec_level) {
999 case BT_SECURITY_HIGH:
1000 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1002 case BT_SECURITY_MEDIUM:
1003 auth_type = HCI_AT_DEDICATED_BONDING;
1006 auth_type = HCI_AT_NO_BONDING;
1009 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1010 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1011 auth_type = HCI_AT_NO_BONDING_MITM;
1013 auth_type = HCI_AT_NO_BONDING;
1015 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1016 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1018 switch (l2cap_pi(sk)->sec_level) {
1019 case BT_SECURITY_HIGH:
1020 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1022 case BT_SECURITY_MEDIUM:
1023 auth_type = HCI_AT_GENERAL_BONDING;
1026 auth_type = HCI_AT_NO_BONDING;
1031 hcon = hci_connect(hdev, ACL_LINK, dst,
1032 l2cap_pi(sk)->sec_level, auth_type);
1036 conn = l2cap_conn_add(hcon, 0);
1044 /* Update source addr of the socket */
1045 bacpy(src, conn->src);
1047 l2cap_chan_add(conn, sk, NULL);
1049 sk->sk_state = BT_CONNECT;
1050 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1052 if (hcon->state == BT_CONNECTED) {
1053 if (sk->sk_type != SOCK_SEQPACKET &&
1054 sk->sk_type != SOCK_STREAM) {
1055 l2cap_sock_clear_timer(sk);
1056 sk->sk_state = BT_CONNECTED;
1062 hci_dev_unlock_bh(hdev);
1067 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1069 struct sock *sk = sock->sk;
1070 struct sockaddr_l2 la;
1073 BT_DBG("sk %p", sk);
1075 if (!addr || alen < sizeof(addr->sa_family) ||
1076 addr->sa_family != AF_BLUETOOTH)
1079 memset(&la, 0, sizeof(la));
1080 len = min_t(unsigned int, sizeof(la), alen);
1081 memcpy(&la, addr, len);
1088 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1094 switch (l2cap_pi(sk)->mode) {
1095 case L2CAP_MODE_BASIC:
1097 case L2CAP_MODE_ERTM:
1098 case L2CAP_MODE_STREAMING:
1107 switch (sk->sk_state) {
1111 /* Already connecting */
1115 /* Already connected */
1128 /* Set destination address and psm */
1129 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1130 l2cap_pi(sk)->psm = la.l2_psm;
1132 err = l2cap_do_connect(sk);
1137 err = bt_sock_wait_state(sk, BT_CONNECTED,
1138 sock_sndtimeo(sk, flags & O_NONBLOCK));
1144 static int l2cap_sock_listen(struct socket *sock, int backlog)
1146 struct sock *sk = sock->sk;
1149 BT_DBG("sk %p backlog %d", sk, backlog);
1153 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1154 || sk->sk_state != BT_BOUND) {
1159 switch (l2cap_pi(sk)->mode) {
1160 case L2CAP_MODE_BASIC:
1162 case L2CAP_MODE_ERTM:
1163 case L2CAP_MODE_STREAMING:
1172 if (!l2cap_pi(sk)->psm) {
1173 bdaddr_t *src = &bt_sk(sk)->src;
1178 write_lock_bh(&l2cap_sk_list.lock);
1180 for (psm = 0x1001; psm < 0x1100; psm += 2)
1181 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1182 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1183 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1188 write_unlock_bh(&l2cap_sk_list.lock);
1194 sk->sk_max_ack_backlog = backlog;
1195 sk->sk_ack_backlog = 0;
1196 sk->sk_state = BT_LISTEN;
1203 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1205 DECLARE_WAITQUEUE(wait, current);
1206 struct sock *sk = sock->sk, *nsk;
1210 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1212 if (sk->sk_state != BT_LISTEN) {
1217 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1219 BT_DBG("sk %p timeo %ld", sk, timeo);
1221 /* Wait for an incoming connection. (wake-one). */
1222 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1223 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1224 set_current_state(TASK_INTERRUPTIBLE);
1231 timeo = schedule_timeout(timeo);
1232 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1234 if (sk->sk_state != BT_LISTEN) {
1239 if (signal_pending(current)) {
1240 err = sock_intr_errno(timeo);
1244 set_current_state(TASK_RUNNING);
1245 remove_wait_queue(sk_sleep(sk), &wait);
1250 newsock->state = SS_CONNECTED;
1252 BT_DBG("new socket %p", nsk);
1259 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1261 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1262 struct sock *sk = sock->sk;
1264 BT_DBG("sock %p, sk %p", sock, sk);
1266 addr->sa_family = AF_BLUETOOTH;
1267 *len = sizeof(struct sockaddr_l2);
1270 la->l2_psm = l2cap_pi(sk)->psm;
1271 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1272 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1274 la->l2_psm = l2cap_pi(sk)->sport;
1275 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1276 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1282 static int __l2cap_wait_ack(struct sock *sk)
1284 DECLARE_WAITQUEUE(wait, current);
1288 add_wait_queue(sk_sleep(sk), &wait);
1289 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1290 set_current_state(TASK_INTERRUPTIBLE);
1295 if (signal_pending(current)) {
1296 err = sock_intr_errno(timeo);
1301 timeo = schedule_timeout(timeo);
1304 err = sock_error(sk);
1308 set_current_state(TASK_RUNNING);
1309 remove_wait_queue(sk_sleep(sk), &wait);
1313 static void l2cap_monitor_timeout(unsigned long arg)
1315 struct sock *sk = (void *) arg;
1317 BT_DBG("sk %p", sk);
1320 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1321 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1326 l2cap_pi(sk)->retry_count++;
1327 __mod_monitor_timer();
1329 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1333 static void l2cap_retrans_timeout(unsigned long arg)
1335 struct sock *sk = (void *) arg;
1337 BT_DBG("sk %p", sk);
1340 l2cap_pi(sk)->retry_count = 1;
1341 __mod_monitor_timer();
1343 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1345 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1349 static void l2cap_drop_acked_frames(struct sock *sk)
1351 struct sk_buff *skb;
1353 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1354 l2cap_pi(sk)->unacked_frames) {
1355 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1358 skb = skb_dequeue(TX_QUEUE(sk));
1361 l2cap_pi(sk)->unacked_frames--;
1364 if (!l2cap_pi(sk)->unacked_frames)
1365 del_timer(&l2cap_pi(sk)->retrans_timer);
1368 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1370 struct l2cap_pinfo *pi = l2cap_pi(sk);
1372 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1374 hci_send_acl(pi->conn->hcon, skb, 0);
1377 static int l2cap_streaming_send(struct sock *sk)
1379 struct sk_buff *skb, *tx_skb;
1380 struct l2cap_pinfo *pi = l2cap_pi(sk);
1383 while ((skb = sk->sk_send_head)) {
1384 tx_skb = skb_clone(skb, GFP_ATOMIC);
1386 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1387 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1388 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1390 if (pi->fcs == L2CAP_FCS_CRC16) {
1391 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1392 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1395 l2cap_do_send(sk, tx_skb);
1397 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1399 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1400 sk->sk_send_head = NULL;
1402 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1404 skb = skb_dequeue(TX_QUEUE(sk));
1410 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1412 struct l2cap_pinfo *pi = l2cap_pi(sk);
1413 struct sk_buff *skb, *tx_skb;
1416 skb = skb_peek(TX_QUEUE(sk));
1421 if (bt_cb(skb)->tx_seq == tx_seq)
1424 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1427 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1429 if (pi->remote_max_tx &&
1430 bt_cb(skb)->retries == pi->remote_max_tx) {
1431 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1435 tx_skb = skb_clone(skb, GFP_ATOMIC);
1436 bt_cb(skb)->retries++;
1437 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1439 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1440 control |= L2CAP_CTRL_FINAL;
1441 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1444 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1445 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1447 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1449 if (pi->fcs == L2CAP_FCS_CRC16) {
1450 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1451 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1454 l2cap_do_send(sk, tx_skb);
1457 static int l2cap_ertm_send(struct sock *sk)
1459 struct sk_buff *skb, *tx_skb;
1460 struct l2cap_pinfo *pi = l2cap_pi(sk);
1464 if (sk->sk_state != BT_CONNECTED)
1467 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1469 if (pi->remote_max_tx &&
1470 bt_cb(skb)->retries == pi->remote_max_tx) {
1471 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1475 tx_skb = skb_clone(skb, GFP_ATOMIC);
1477 bt_cb(skb)->retries++;
1479 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1480 control &= L2CAP_CTRL_SAR;
1482 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1483 control |= L2CAP_CTRL_FINAL;
1484 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1486 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1487 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1488 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1491 if (pi->fcs == L2CAP_FCS_CRC16) {
1492 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1493 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1496 l2cap_do_send(sk, tx_skb);
1498 __mod_retrans_timer();
1500 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1501 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1503 pi->unacked_frames++;
1506 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1507 sk->sk_send_head = NULL;
1509 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1517 static int l2cap_retransmit_frames(struct sock *sk)
1519 struct l2cap_pinfo *pi = l2cap_pi(sk);
1522 spin_lock_bh(&pi->send_lock);
1524 if (!skb_queue_empty(TX_QUEUE(sk)))
1525 sk->sk_send_head = TX_QUEUE(sk)->next;
1527 pi->next_tx_seq = pi->expected_ack_seq;
1528 ret = l2cap_ertm_send(sk);
1530 spin_unlock_bh(&pi->send_lock);
1535 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1537 struct sock *sk = (struct sock *)pi;
1541 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1543 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1544 control |= L2CAP_SUPER_RCV_NOT_READY;
1545 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1546 l2cap_send_sframe(pi, control);
1550 spin_lock_bh(&pi->send_lock);
1551 nframes = l2cap_ertm_send(sk);
1552 spin_unlock_bh(&pi->send_lock);
1557 control |= L2CAP_SUPER_RCV_READY;
1558 l2cap_send_sframe(pi, control);
1561 static void l2cap_send_srejtail(struct sock *sk)
1563 struct srej_list *tail;
1566 control = L2CAP_SUPER_SELECT_REJECT;
1567 control |= L2CAP_CTRL_FINAL;
1569 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1570 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1572 l2cap_send_sframe(l2cap_pi(sk), control);
1575 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1577 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1578 struct sk_buff **frag;
1581 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1587 /* Continuation fragments (no L2CAP header) */
1588 frag = &skb_shinfo(skb)->frag_list;
1590 count = min_t(unsigned int, conn->mtu, len);
1592 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1595 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1601 frag = &(*frag)->next;
1607 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1609 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1610 struct sk_buff *skb;
1611 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1612 struct l2cap_hdr *lh;
1614 BT_DBG("sk %p len %d", sk, (int)len);
1616 count = min_t(unsigned int, (conn->mtu - hlen), len);
1617 skb = bt_skb_send_alloc(sk, count + hlen,
1618 msg->msg_flags & MSG_DONTWAIT, &err);
1620 return ERR_PTR(-ENOMEM);
1622 /* Create L2CAP header */
1623 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1624 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1625 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1626 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1628 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1629 if (unlikely(err < 0)) {
1631 return ERR_PTR(err);
1636 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1638 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1639 struct sk_buff *skb;
1640 int err, count, hlen = L2CAP_HDR_SIZE;
1641 struct l2cap_hdr *lh;
1643 BT_DBG("sk %p len %d", sk, (int)len);
1645 count = min_t(unsigned int, (conn->mtu - hlen), len);
1646 skb = bt_skb_send_alloc(sk, count + hlen,
1647 msg->msg_flags & MSG_DONTWAIT, &err);
1649 return ERR_PTR(-ENOMEM);
1651 /* Create L2CAP header */
1652 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1653 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1654 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1656 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1657 if (unlikely(err < 0)) {
1659 return ERR_PTR(err);
1664 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1666 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1667 struct sk_buff *skb;
1668 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1669 struct l2cap_hdr *lh;
1671 BT_DBG("sk %p len %d", sk, (int)len);
1674 return ERR_PTR(-ENOTCONN);
1679 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1682 count = min_t(unsigned int, (conn->mtu - hlen), len);
1683 skb = bt_skb_send_alloc(sk, count + hlen,
1684 msg->msg_flags & MSG_DONTWAIT, &err);
1686 return ERR_PTR(-ENOMEM);
1688 /* Create L2CAP header */
1689 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1690 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1691 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1692 put_unaligned_le16(control, skb_put(skb, 2));
1694 put_unaligned_le16(sdulen, skb_put(skb, 2));
1696 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1697 if (unlikely(err < 0)) {
1699 return ERR_PTR(err);
1702 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1703 put_unaligned_le16(0, skb_put(skb, 2));
1705 bt_cb(skb)->retries = 0;
1709 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1711 struct l2cap_pinfo *pi = l2cap_pi(sk);
1712 struct sk_buff *skb;
1713 struct sk_buff_head sar_queue;
1717 skb_queue_head_init(&sar_queue);
1718 control = L2CAP_SDU_START;
1719 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1721 return PTR_ERR(skb);
1723 __skb_queue_tail(&sar_queue, skb);
1724 len -= pi->remote_mps;
1725 size += pi->remote_mps;
1730 if (len > pi->remote_mps) {
1731 control = L2CAP_SDU_CONTINUE;
1732 buflen = pi->remote_mps;
1734 control = L2CAP_SDU_END;
1738 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1740 skb_queue_purge(&sar_queue);
1741 return PTR_ERR(skb);
1744 __skb_queue_tail(&sar_queue, skb);
1748 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1749 spin_lock_bh(&pi->send_lock);
1750 if (sk->sk_send_head == NULL)
1751 sk->sk_send_head = sar_queue.next;
1752 spin_unlock_bh(&pi->send_lock);
1757 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1759 struct sock *sk = sock->sk;
1760 struct l2cap_pinfo *pi = l2cap_pi(sk);
1761 struct sk_buff *skb;
1765 BT_DBG("sock %p, sk %p", sock, sk);
1767 err = sock_error(sk);
1771 if (msg->msg_flags & MSG_OOB)
1776 if (sk->sk_state != BT_CONNECTED) {
1781 /* Connectionless channel */
1782 if (sk->sk_type == SOCK_DGRAM) {
1783 skb = l2cap_create_connless_pdu(sk, msg, len);
1787 l2cap_do_send(sk, skb);
1794 case L2CAP_MODE_BASIC:
1795 /* Check outgoing MTU */
1796 if (len > pi->omtu) {
1801 /* Create a basic PDU */
1802 skb = l2cap_create_basic_pdu(sk, msg, len);
1808 l2cap_do_send(sk, skb);
1812 case L2CAP_MODE_ERTM:
1813 case L2CAP_MODE_STREAMING:
1814 /* Entire SDU fits into one PDU */
1815 if (len <= pi->remote_mps) {
1816 control = L2CAP_SDU_UNSEGMENTED;
1817 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1822 __skb_queue_tail(TX_QUEUE(sk), skb);
1824 if (pi->mode == L2CAP_MODE_ERTM)
1825 spin_lock_bh(&pi->send_lock);
1827 if (sk->sk_send_head == NULL)
1828 sk->sk_send_head = skb;
1830 if (pi->mode == L2CAP_MODE_ERTM)
1831 spin_unlock_bh(&pi->send_lock);
1833 /* Segment SDU into multiples PDUs */
1834 err = l2cap_sar_segment_sdu(sk, msg, len);
1839 if (pi->mode == L2CAP_MODE_STREAMING) {
1840 err = l2cap_streaming_send(sk);
1842 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1843 pi->conn_state && L2CAP_CONN_WAIT_F) {
1847 spin_lock_bh(&pi->send_lock);
1848 err = l2cap_ertm_send(sk);
1849 spin_unlock_bh(&pi->send_lock);
1857 BT_DBG("bad state %1.1x", pi->mode);
1866 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1868 struct sock *sk = sock->sk;
1872 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1873 struct l2cap_conn_rsp rsp;
1875 sk->sk_state = BT_CONFIG;
1877 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1878 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1879 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1880 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1881 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1882 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1890 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1893 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1895 struct sock *sk = sock->sk;
1896 struct l2cap_options opts;
1900 BT_DBG("sk %p", sk);
1906 opts.imtu = l2cap_pi(sk)->imtu;
1907 opts.omtu = l2cap_pi(sk)->omtu;
1908 opts.flush_to = l2cap_pi(sk)->flush_to;
1909 opts.mode = l2cap_pi(sk)->mode;
1910 opts.fcs = l2cap_pi(sk)->fcs;
1911 opts.max_tx = l2cap_pi(sk)->max_tx;
1912 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1914 len = min_t(unsigned int, sizeof(opts), optlen);
1915 if (copy_from_user((char *) &opts, optval, len)) {
1920 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1925 l2cap_pi(sk)->mode = opts.mode;
1926 switch (l2cap_pi(sk)->mode) {
1927 case L2CAP_MODE_BASIC:
1929 case L2CAP_MODE_ERTM:
1930 case L2CAP_MODE_STREAMING:
1939 l2cap_pi(sk)->imtu = opts.imtu;
1940 l2cap_pi(sk)->omtu = opts.omtu;
1941 l2cap_pi(sk)->fcs = opts.fcs;
1942 l2cap_pi(sk)->max_tx = opts.max_tx;
1943 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1947 if (get_user(opt, (u32 __user *) optval)) {
1952 if (opt & L2CAP_LM_AUTH)
1953 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1954 if (opt & L2CAP_LM_ENCRYPT)
1955 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1956 if (opt & L2CAP_LM_SECURE)
1957 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1959 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1960 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1972 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1974 struct sock *sk = sock->sk;
1975 struct bt_security sec;
1979 BT_DBG("sk %p", sk);
1981 if (level == SOL_L2CAP)
1982 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1984 if (level != SOL_BLUETOOTH)
1985 return -ENOPROTOOPT;
1991 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1992 && sk->sk_type != SOCK_RAW) {
1997 sec.level = BT_SECURITY_LOW;
1999 len = min_t(unsigned int, sizeof(sec), optlen);
2000 if (copy_from_user((char *) &sec, optval, len)) {
2005 if (sec.level < BT_SECURITY_LOW ||
2006 sec.level > BT_SECURITY_HIGH) {
2011 l2cap_pi(sk)->sec_level = sec.level;
2014 case BT_DEFER_SETUP:
2015 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2020 if (get_user(opt, (u32 __user *) optval)) {
2025 bt_sk(sk)->defer_setup = opt;
2037 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2039 struct sock *sk = sock->sk;
2040 struct l2cap_options opts;
2041 struct l2cap_conninfo cinfo;
2045 BT_DBG("sk %p", sk);
2047 if (get_user(len, optlen))
2054 opts.imtu = l2cap_pi(sk)->imtu;
2055 opts.omtu = l2cap_pi(sk)->omtu;
2056 opts.flush_to = l2cap_pi(sk)->flush_to;
2057 opts.mode = l2cap_pi(sk)->mode;
2058 opts.fcs = l2cap_pi(sk)->fcs;
2059 opts.max_tx = l2cap_pi(sk)->max_tx;
2060 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2062 len = min_t(unsigned int, len, sizeof(opts));
2063 if (copy_to_user(optval, (char *) &opts, len))
2069 switch (l2cap_pi(sk)->sec_level) {
2070 case BT_SECURITY_LOW:
2071 opt = L2CAP_LM_AUTH;
2073 case BT_SECURITY_MEDIUM:
2074 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2076 case BT_SECURITY_HIGH:
2077 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2085 if (l2cap_pi(sk)->role_switch)
2086 opt |= L2CAP_LM_MASTER;
2088 if (l2cap_pi(sk)->force_reliable)
2089 opt |= L2CAP_LM_RELIABLE;
2091 if (put_user(opt, (u32 __user *) optval))
2095 case L2CAP_CONNINFO:
2096 if (sk->sk_state != BT_CONNECTED &&
2097 !(sk->sk_state == BT_CONNECT2 &&
2098 bt_sk(sk)->defer_setup)) {
2103 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2104 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2106 len = min_t(unsigned int, len, sizeof(cinfo));
2107 if (copy_to_user(optval, (char *) &cinfo, len))
2121 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2123 struct sock *sk = sock->sk;
2124 struct bt_security sec;
2127 BT_DBG("sk %p", sk);
2129 if (level == SOL_L2CAP)
2130 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2132 if (level != SOL_BLUETOOTH)
2133 return -ENOPROTOOPT;
2135 if (get_user(len, optlen))
2142 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2143 && sk->sk_type != SOCK_RAW) {
2148 sec.level = l2cap_pi(sk)->sec_level;
2150 len = min_t(unsigned int, len, sizeof(sec));
2151 if (copy_to_user(optval, (char *) &sec, len))
2156 case BT_DEFER_SETUP:
2157 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2162 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2176 static int l2cap_sock_shutdown(struct socket *sock, int how)
2178 struct sock *sk = sock->sk;
2181 BT_DBG("sock %p, sk %p", sock, sk);
2187 if (!sk->sk_shutdown) {
2188 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2189 err = __l2cap_wait_ack(sk);
2191 sk->sk_shutdown = SHUTDOWN_MASK;
2192 l2cap_sock_clear_timer(sk);
2193 __l2cap_sock_close(sk, 0);
2195 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2196 err = bt_sock_wait_state(sk, BT_CLOSED,
2200 if (!err && sk->sk_err)
2207 static int l2cap_sock_release(struct socket *sock)
2209 struct sock *sk = sock->sk;
2212 BT_DBG("sock %p, sk %p", sock, sk);
2217 err = l2cap_sock_shutdown(sock, 2);
2220 l2cap_sock_kill(sk);
2224 static void l2cap_chan_ready(struct sock *sk)
2226 struct sock *parent = bt_sk(sk)->parent;
2228 BT_DBG("sk %p, parent %p", sk, parent);
2230 l2cap_pi(sk)->conf_state = 0;
2231 l2cap_sock_clear_timer(sk);
2234 /* Outgoing channel.
2235 * Wake up socket sleeping on connect.
2237 sk->sk_state = BT_CONNECTED;
2238 sk->sk_state_change(sk);
2240 /* Incoming channel.
2241 * Wake up socket sleeping on accept.
2243 parent->sk_data_ready(parent, 0);
2247 /* Copy frame to all raw sockets on that connection */
2248 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2250 struct l2cap_chan_list *l = &conn->chan_list;
2251 struct sk_buff *nskb;
2254 BT_DBG("conn %p", conn);
2256 read_lock(&l->lock);
2257 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2258 if (sk->sk_type != SOCK_RAW)
2261 /* Don't send frame to the socket it came from */
2264 nskb = skb_clone(skb, GFP_ATOMIC);
2268 if (sock_queue_rcv_skb(sk, nskb))
2271 read_unlock(&l->lock);
2274 /* ---- L2CAP signalling commands ---- */
2275 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2276 u8 code, u8 ident, u16 dlen, void *data)
2278 struct sk_buff *skb, **frag;
2279 struct l2cap_cmd_hdr *cmd;
2280 struct l2cap_hdr *lh;
2283 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2284 conn, code, ident, dlen);
2286 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2287 count = min_t(unsigned int, conn->mtu, len);
2289 skb = bt_skb_alloc(count, GFP_ATOMIC);
2293 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2294 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2295 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2297 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2300 cmd->len = cpu_to_le16(dlen);
2303 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2304 memcpy(skb_put(skb, count), data, count);
2310 /* Continuation fragments (no L2CAP header) */
2311 frag = &skb_shinfo(skb)->frag_list;
2313 count = min_t(unsigned int, conn->mtu, len);
2315 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2319 memcpy(skb_put(*frag, count), data, count);
2324 frag = &(*frag)->next;
2334 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2336 struct l2cap_conf_opt *opt = *ptr;
2339 len = L2CAP_CONF_OPT_SIZE + opt->len;
2347 *val = *((u8 *) opt->val);
2351 *val = __le16_to_cpu(*((__le16 *) opt->val));
2355 *val = __le32_to_cpu(*((__le32 *) opt->val));
2359 *val = (unsigned long) opt->val;
2363 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2367 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2369 struct l2cap_conf_opt *opt = *ptr;
2371 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2378 *((u8 *) opt->val) = val;
2382 *((__le16 *) opt->val) = cpu_to_le16(val);
2386 *((__le32 *) opt->val) = cpu_to_le32(val);
2390 memcpy(opt->val, (void *) val, len);
2394 *ptr += L2CAP_CONF_OPT_SIZE + len;
2397 static void l2cap_ack_timeout(unsigned long arg)
2399 struct sock *sk = (void *) arg;
2402 l2cap_send_ack(l2cap_pi(sk));
2406 static inline void l2cap_ertm_init(struct sock *sk)
2408 l2cap_pi(sk)->expected_ack_seq = 0;
2409 l2cap_pi(sk)->unacked_frames = 0;
2410 l2cap_pi(sk)->buffer_seq = 0;
2411 l2cap_pi(sk)->num_acked = 0;
2412 l2cap_pi(sk)->frames_sent = 0;
2414 setup_timer(&l2cap_pi(sk)->retrans_timer,
2415 l2cap_retrans_timeout, (unsigned long) sk);
2416 setup_timer(&l2cap_pi(sk)->monitor_timer,
2417 l2cap_monitor_timeout, (unsigned long) sk);
2418 setup_timer(&l2cap_pi(sk)->ack_timer,
2419 l2cap_ack_timeout, (unsigned long) sk);
2421 __skb_queue_head_init(SREJ_QUEUE(sk));
2422 __skb_queue_head_init(BUSY_QUEUE(sk));
2423 spin_lock_init(&l2cap_pi(sk)->send_lock);
2425 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2428 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2430 u32 local_feat_mask = l2cap_feat_mask;
2432 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2435 case L2CAP_MODE_ERTM:
2436 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2437 case L2CAP_MODE_STREAMING:
2438 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2444 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2447 case L2CAP_MODE_STREAMING:
2448 case L2CAP_MODE_ERTM:
2449 if (l2cap_mode_supported(mode, remote_feat_mask))
2453 return L2CAP_MODE_BASIC;
2457 static int l2cap_build_conf_req(struct sock *sk, void *data)
2459 struct l2cap_pinfo *pi = l2cap_pi(sk);
2460 struct l2cap_conf_req *req = data;
2461 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2462 void *ptr = req->data;
2464 BT_DBG("sk %p", sk);
2466 if (pi->num_conf_req || pi->num_conf_rsp)
2470 case L2CAP_MODE_STREAMING:
2471 case L2CAP_MODE_ERTM:
2472 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2473 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2474 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2477 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2483 case L2CAP_MODE_BASIC:
2484 if (pi->imtu != L2CAP_DEFAULT_MTU)
2485 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2488 case L2CAP_MODE_ERTM:
2489 rfc.mode = L2CAP_MODE_ERTM;
2490 rfc.txwin_size = pi->tx_win;
2491 rfc.max_transmit = pi->max_tx;
2492 rfc.retrans_timeout = 0;
2493 rfc.monitor_timeout = 0;
2494 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2495 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2496 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2498 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2499 sizeof(rfc), (unsigned long) &rfc);
2501 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2504 if (pi->fcs == L2CAP_FCS_NONE ||
2505 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2506 pi->fcs = L2CAP_FCS_NONE;
2507 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2511 case L2CAP_MODE_STREAMING:
2512 rfc.mode = L2CAP_MODE_STREAMING;
2514 rfc.max_transmit = 0;
2515 rfc.retrans_timeout = 0;
2516 rfc.monitor_timeout = 0;
2517 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2518 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2519 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2521 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2522 sizeof(rfc), (unsigned long) &rfc);
2524 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2527 if (pi->fcs == L2CAP_FCS_NONE ||
2528 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2529 pi->fcs = L2CAP_FCS_NONE;
2530 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2535 /* FIXME: Need actual value of the flush timeout */
2536 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2537 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2539 req->dcid = cpu_to_le16(pi->dcid);
2540 req->flags = cpu_to_le16(0);
2545 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2547 struct l2cap_pinfo *pi = l2cap_pi(sk);
2548 struct l2cap_conf_rsp *rsp = data;
2549 void *ptr = rsp->data;
2550 void *req = pi->conf_req;
2551 int len = pi->conf_len;
2552 int type, hint, olen;
2554 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2555 u16 mtu = L2CAP_DEFAULT_MTU;
2556 u16 result = L2CAP_CONF_SUCCESS;
2558 BT_DBG("sk %p", sk);
2560 while (len >= L2CAP_CONF_OPT_SIZE) {
2561 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2563 hint = type & L2CAP_CONF_HINT;
2564 type &= L2CAP_CONF_MASK;
2567 case L2CAP_CONF_MTU:
2571 case L2CAP_CONF_FLUSH_TO:
2575 case L2CAP_CONF_QOS:
2578 case L2CAP_CONF_RFC:
2579 if (olen == sizeof(rfc))
2580 memcpy(&rfc, (void *) val, olen);
2583 case L2CAP_CONF_FCS:
2584 if (val == L2CAP_FCS_NONE)
2585 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2593 result = L2CAP_CONF_UNKNOWN;
2594 *((u8 *) ptr++) = type;
2599 if (pi->num_conf_rsp || pi->num_conf_req)
2603 case L2CAP_MODE_STREAMING:
2604 case L2CAP_MODE_ERTM:
2605 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2606 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2607 return -ECONNREFUSED;
2610 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2615 if (pi->mode != rfc.mode) {
2616 result = L2CAP_CONF_UNACCEPT;
2617 rfc.mode = pi->mode;
2619 if (pi->num_conf_rsp == 1)
2620 return -ECONNREFUSED;
2622 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2623 sizeof(rfc), (unsigned long) &rfc);
2627 if (result == L2CAP_CONF_SUCCESS) {
2628 /* Configure output options and let the other side know
2629 * which ones we don't like. */
2631 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2632 result = L2CAP_CONF_UNACCEPT;
2635 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2637 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2640 case L2CAP_MODE_BASIC:
2641 pi->fcs = L2CAP_FCS_NONE;
2642 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2645 case L2CAP_MODE_ERTM:
2646 pi->remote_tx_win = rfc.txwin_size;
2647 pi->remote_max_tx = rfc.max_transmit;
2648 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2649 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2651 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2653 rfc.retrans_timeout =
2654 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2655 rfc.monitor_timeout =
2656 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2658 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2660 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2661 sizeof(rfc), (unsigned long) &rfc);
2665 case L2CAP_MODE_STREAMING:
2666 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2667 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2669 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2671 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2673 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2674 sizeof(rfc), (unsigned long) &rfc);
2679 result = L2CAP_CONF_UNACCEPT;
2681 memset(&rfc, 0, sizeof(rfc));
2682 rfc.mode = pi->mode;
2685 if (result == L2CAP_CONF_SUCCESS)
2686 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2688 rsp->scid = cpu_to_le16(pi->dcid);
2689 rsp->result = cpu_to_le16(result);
2690 rsp->flags = cpu_to_le16(0x0000);
2695 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2697 struct l2cap_pinfo *pi = l2cap_pi(sk);
2698 struct l2cap_conf_req *req = data;
2699 void *ptr = req->data;
2702 struct l2cap_conf_rfc rfc;
2704 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2706 while (len >= L2CAP_CONF_OPT_SIZE) {
2707 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2710 case L2CAP_CONF_MTU:
2711 if (val < L2CAP_DEFAULT_MIN_MTU) {
2712 *result = L2CAP_CONF_UNACCEPT;
2713 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2716 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2719 case L2CAP_CONF_FLUSH_TO:
2721 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2725 case L2CAP_CONF_RFC:
2726 if (olen == sizeof(rfc))
2727 memcpy(&rfc, (void *)val, olen);
2729 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2730 rfc.mode != pi->mode)
2731 return -ECONNREFUSED;
2733 pi->mode = rfc.mode;
2736 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2737 sizeof(rfc), (unsigned long) &rfc);
2742 if (*result == L2CAP_CONF_SUCCESS) {
2744 case L2CAP_MODE_ERTM:
2745 pi->remote_tx_win = rfc.txwin_size;
2746 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2747 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2748 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2750 case L2CAP_MODE_STREAMING:
2751 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2755 req->dcid = cpu_to_le16(pi->dcid);
2756 req->flags = cpu_to_le16(0x0000);
2761 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2763 struct l2cap_conf_rsp *rsp = data;
2764 void *ptr = rsp->data;
2766 BT_DBG("sk %p", sk);
2768 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2769 rsp->result = cpu_to_le16(result);
2770 rsp->flags = cpu_to_le16(flags);
2775 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2777 struct l2cap_pinfo *pi = l2cap_pi(sk);
2780 struct l2cap_conf_rfc rfc;
2782 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2784 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2787 while (len >= L2CAP_CONF_OPT_SIZE) {
2788 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2791 case L2CAP_CONF_RFC:
2792 if (olen == sizeof(rfc))
2793 memcpy(&rfc, (void *)val, olen);
2800 case L2CAP_MODE_ERTM:
2801 pi->remote_tx_win = rfc.txwin_size;
2802 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2803 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2804 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2806 case L2CAP_MODE_STREAMING:
2807 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2811 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2813 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2815 if (rej->reason != 0x0000)
2818 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2819 cmd->ident == conn->info_ident) {
2820 del_timer(&conn->info_timer);
2822 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2823 conn->info_ident = 0;
2825 l2cap_conn_start(conn);
2831 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2833 struct l2cap_chan_list *list = &conn->chan_list;
2834 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2835 struct l2cap_conn_rsp rsp;
2836 struct sock *sk, *parent;
2837 int result, status = L2CAP_CS_NO_INFO;
2839 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2840 __le16 psm = req->psm;
2842 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2844 /* Check if we have socket listening on psm */
2845 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2847 result = L2CAP_CR_BAD_PSM;
2851 /* Check if the ACL is secure enough (if not SDP) */
2852 if (psm != cpu_to_le16(0x0001) &&
2853 !hci_conn_check_link_mode(conn->hcon)) {
2854 conn->disc_reason = 0x05;
2855 result = L2CAP_CR_SEC_BLOCK;
2859 result = L2CAP_CR_NO_MEM;
2861 /* Check for backlog size */
2862 if (sk_acceptq_is_full(parent)) {
2863 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2867 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2871 write_lock_bh(&list->lock);
2873 /* Check if we already have channel with that dcid */
2874 if (__l2cap_get_chan_by_dcid(list, scid)) {
2875 write_unlock_bh(&list->lock);
2876 sock_set_flag(sk, SOCK_ZAPPED);
2877 l2cap_sock_kill(sk);
2881 hci_conn_hold(conn->hcon);
2883 l2cap_sock_init(sk, parent);
2884 bacpy(&bt_sk(sk)->src, conn->src);
2885 bacpy(&bt_sk(sk)->dst, conn->dst);
2886 l2cap_pi(sk)->psm = psm;
2887 l2cap_pi(sk)->dcid = scid;
2889 __l2cap_chan_add(conn, sk, parent);
2890 dcid = l2cap_pi(sk)->scid;
2892 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2894 l2cap_pi(sk)->ident = cmd->ident;
2896 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2897 if (l2cap_check_security(sk)) {
2898 if (bt_sk(sk)->defer_setup) {
2899 sk->sk_state = BT_CONNECT2;
2900 result = L2CAP_CR_PEND;
2901 status = L2CAP_CS_AUTHOR_PEND;
2902 parent->sk_data_ready(parent, 0);
2904 sk->sk_state = BT_CONFIG;
2905 result = L2CAP_CR_SUCCESS;
2906 status = L2CAP_CS_NO_INFO;
2909 sk->sk_state = BT_CONNECT2;
2910 result = L2CAP_CR_PEND;
2911 status = L2CAP_CS_AUTHEN_PEND;
2914 sk->sk_state = BT_CONNECT2;
2915 result = L2CAP_CR_PEND;
2916 status = L2CAP_CS_NO_INFO;
2919 write_unlock_bh(&list->lock);
2922 bh_unlock_sock(parent);
2925 rsp.scid = cpu_to_le16(scid);
2926 rsp.dcid = cpu_to_le16(dcid);
2927 rsp.result = cpu_to_le16(result);
2928 rsp.status = cpu_to_le16(status);
2929 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2931 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2932 struct l2cap_info_req info;
2933 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2935 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2936 conn->info_ident = l2cap_get_ident(conn);
2938 mod_timer(&conn->info_timer, jiffies +
2939 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2941 l2cap_send_cmd(conn, conn->info_ident,
2942 L2CAP_INFO_REQ, sizeof(info), &info);
2948 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2950 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2951 u16 scid, dcid, result, status;
2955 scid = __le16_to_cpu(rsp->scid);
2956 dcid = __le16_to_cpu(rsp->dcid);
2957 result = __le16_to_cpu(rsp->result);
2958 status = __le16_to_cpu(rsp->status);
2960 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2963 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2967 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2973 case L2CAP_CR_SUCCESS:
2974 sk->sk_state = BT_CONFIG;
2975 l2cap_pi(sk)->ident = 0;
2976 l2cap_pi(sk)->dcid = dcid;
2977 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2978 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2980 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2981 l2cap_build_conf_req(sk, req), req);
2982 l2cap_pi(sk)->num_conf_req++;
2986 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2990 l2cap_chan_del(sk, ECONNREFUSED);
2998 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3000 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3006 dcid = __le16_to_cpu(req->dcid);
3007 flags = __le16_to_cpu(req->flags);
3009 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3011 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3015 if (sk->sk_state == BT_DISCONN)
3018 /* Reject if config buffer is too small. */
3019 len = cmd_len - sizeof(*req);
3020 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3021 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3022 l2cap_build_conf_rsp(sk, rsp,
3023 L2CAP_CONF_REJECT, flags), rsp);
3028 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3029 l2cap_pi(sk)->conf_len += len;
3031 if (flags & 0x0001) {
3032 /* Incomplete config. Send empty response. */
3033 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3034 l2cap_build_conf_rsp(sk, rsp,
3035 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3039 /* Complete config. */
3040 len = l2cap_parse_conf_req(sk, rsp);
3042 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3046 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3047 l2cap_pi(sk)->num_conf_rsp++;
3049 /* Reset config buffer. */
3050 l2cap_pi(sk)->conf_len = 0;
3052 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3055 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3056 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3057 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3058 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3060 sk->sk_state = BT_CONNECTED;
3062 l2cap_pi(sk)->next_tx_seq = 0;
3063 l2cap_pi(sk)->expected_tx_seq = 0;
3064 __skb_queue_head_init(TX_QUEUE(sk));
3065 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3066 l2cap_ertm_init(sk);
3068 l2cap_chan_ready(sk);
3072 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3074 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3075 l2cap_build_conf_req(sk, buf), buf);
3076 l2cap_pi(sk)->num_conf_req++;
3084 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3086 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3087 u16 scid, flags, result;
3089 int len = cmd->len - sizeof(*rsp);
3091 scid = __le16_to_cpu(rsp->scid);
3092 flags = __le16_to_cpu(rsp->flags);
3093 result = __le16_to_cpu(rsp->result);
3095 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3096 scid, flags, result);
3098 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3103 case L2CAP_CONF_SUCCESS:
3104 l2cap_conf_rfc_get(sk, rsp->data, len);
3107 case L2CAP_CONF_UNACCEPT:
3108 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3111 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3112 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3116 /* throw out any old stored conf requests */
3117 result = L2CAP_CONF_SUCCESS;
3118 len = l2cap_parse_conf_rsp(sk, rsp->data,
3121 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3125 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3126 L2CAP_CONF_REQ, len, req);
3127 l2cap_pi(sk)->num_conf_req++;
3128 if (result != L2CAP_CONF_SUCCESS)
3134 sk->sk_err = ECONNRESET;
3135 l2cap_sock_set_timer(sk, HZ * 5);
3136 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3143 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3145 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3146 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3147 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3148 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3150 sk->sk_state = BT_CONNECTED;
3151 l2cap_pi(sk)->next_tx_seq = 0;
3152 l2cap_pi(sk)->expected_tx_seq = 0;
3153 __skb_queue_head_init(TX_QUEUE(sk));
3154 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3155 l2cap_ertm_init(sk);
3157 l2cap_chan_ready(sk);
3165 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3167 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3168 struct l2cap_disconn_rsp rsp;
3172 scid = __le16_to_cpu(req->scid);
3173 dcid = __le16_to_cpu(req->dcid);
3175 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3177 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3181 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3182 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3183 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3185 sk->sk_shutdown = SHUTDOWN_MASK;
3187 l2cap_chan_del(sk, ECONNRESET);
3190 l2cap_sock_kill(sk);
3194 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3196 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3200 scid = __le16_to_cpu(rsp->scid);
3201 dcid = __le16_to_cpu(rsp->dcid);
3203 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3205 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3209 l2cap_chan_del(sk, 0);
3212 l2cap_sock_kill(sk);
3216 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3218 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3221 type = __le16_to_cpu(req->type);
3223 BT_DBG("type 0x%4.4x", type);
3225 if (type == L2CAP_IT_FEAT_MASK) {
3227 u32 feat_mask = l2cap_feat_mask;
3228 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3229 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3230 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3232 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3234 put_unaligned_le32(feat_mask, rsp->data);
3235 l2cap_send_cmd(conn, cmd->ident,
3236 L2CAP_INFO_RSP, sizeof(buf), buf);
3237 } else if (type == L2CAP_IT_FIXED_CHAN) {
3239 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3240 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3241 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3242 memcpy(buf + 4, l2cap_fixed_chan, 8);
3243 l2cap_send_cmd(conn, cmd->ident,
3244 L2CAP_INFO_RSP, sizeof(buf), buf);
3246 struct l2cap_info_rsp rsp;
3247 rsp.type = cpu_to_le16(type);
3248 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3249 l2cap_send_cmd(conn, cmd->ident,
3250 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3256 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3258 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3261 type = __le16_to_cpu(rsp->type);
3262 result = __le16_to_cpu(rsp->result);
3264 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3266 del_timer(&conn->info_timer);
3268 if (type == L2CAP_IT_FEAT_MASK) {
3269 conn->feat_mask = get_unaligned_le32(rsp->data);
3271 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3272 struct l2cap_info_req req;
3273 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3275 conn->info_ident = l2cap_get_ident(conn);
3277 l2cap_send_cmd(conn, conn->info_ident,
3278 L2CAP_INFO_REQ, sizeof(req), &req);
3280 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3281 conn->info_ident = 0;
3283 l2cap_conn_start(conn);
3285 } else if (type == L2CAP_IT_FIXED_CHAN) {
3286 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3287 conn->info_ident = 0;
3289 l2cap_conn_start(conn);
3295 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3297 u8 *data = skb->data;
3299 struct l2cap_cmd_hdr cmd;
3302 l2cap_raw_recv(conn, skb);
3304 while (len >= L2CAP_CMD_HDR_SIZE) {
3306 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3307 data += L2CAP_CMD_HDR_SIZE;
3308 len -= L2CAP_CMD_HDR_SIZE;
3310 cmd_len = le16_to_cpu(cmd.len);
3312 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3314 if (cmd_len > len || !cmd.ident) {
3315 BT_DBG("corrupted command");
3320 case L2CAP_COMMAND_REJ:
3321 l2cap_command_rej(conn, &cmd, data);
3324 case L2CAP_CONN_REQ:
3325 err = l2cap_connect_req(conn, &cmd, data);
3328 case L2CAP_CONN_RSP:
3329 err = l2cap_connect_rsp(conn, &cmd, data);
3332 case L2CAP_CONF_REQ:
3333 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3336 case L2CAP_CONF_RSP:
3337 err = l2cap_config_rsp(conn, &cmd, data);
3340 case L2CAP_DISCONN_REQ:
3341 err = l2cap_disconnect_req(conn, &cmd, data);
3344 case L2CAP_DISCONN_RSP:
3345 err = l2cap_disconnect_rsp(conn, &cmd, data);
3348 case L2CAP_ECHO_REQ:
3349 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3352 case L2CAP_ECHO_RSP:
3355 case L2CAP_INFO_REQ:
3356 err = l2cap_information_req(conn, &cmd, data);
3359 case L2CAP_INFO_RSP:
3360 err = l2cap_information_rsp(conn, &cmd, data);
3364 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3370 struct l2cap_cmd_rej rej;
3371 BT_DBG("error %d", err);
3373 /* FIXME: Map err to a valid reason */
3374 rej.reason = cpu_to_le16(0);
3375 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3385 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3387 u16 our_fcs, rcv_fcs;
3388 int hdr_size = L2CAP_HDR_SIZE + 2;
3390 if (pi->fcs == L2CAP_FCS_CRC16) {
3391 skb_trim(skb, skb->len - 2);
3392 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3393 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3395 if (our_fcs != rcv_fcs)
3401 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3403 struct l2cap_pinfo *pi = l2cap_pi(sk);
3406 pi->frames_sent = 0;
3408 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3410 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3411 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3412 l2cap_send_sframe(pi, control);
3413 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3414 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3417 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3418 l2cap_retransmit_frames(sk);
3420 spin_lock_bh(&pi->send_lock);
3421 l2cap_ertm_send(sk);
3422 spin_unlock_bh(&pi->send_lock);
3424 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3425 pi->frames_sent == 0) {
3426 control |= L2CAP_SUPER_RCV_READY;
3427 l2cap_send_sframe(pi, control);
3431 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3433 struct sk_buff *next_skb;
3434 struct l2cap_pinfo *pi = l2cap_pi(sk);
3435 int tx_seq_offset, next_tx_seq_offset;
3437 bt_cb(skb)->tx_seq = tx_seq;
3438 bt_cb(skb)->sar = sar;
3440 next_skb = skb_peek(SREJ_QUEUE(sk));
3442 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3446 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3447 if (tx_seq_offset < 0)
3448 tx_seq_offset += 64;
3451 if (bt_cb(next_skb)->tx_seq == tx_seq)
3454 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3455 pi->buffer_seq) % 64;
3456 if (next_tx_seq_offset < 0)
3457 next_tx_seq_offset += 64;
3459 if (next_tx_seq_offset > tx_seq_offset) {
3460 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3464 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3467 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3469 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3474 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3476 struct l2cap_pinfo *pi = l2cap_pi(sk);
3477 struct sk_buff *_skb;
3480 switch (control & L2CAP_CTRL_SAR) {
3481 case L2CAP_SDU_UNSEGMENTED:
3482 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3485 err = sock_queue_rcv_skb(sk, skb);
3491 case L2CAP_SDU_START:
3492 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3495 pi->sdu_len = get_unaligned_le16(skb->data);
3497 if (pi->sdu_len > pi->imtu)
3500 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3504 /* pull sdu_len bytes only after alloc, because of Local Busy
3505 * condition we have to be sure that this will be executed
3506 * only once, i.e., when alloc does not fail */
3509 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3511 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3512 pi->partial_sdu_len = skb->len;
3515 case L2CAP_SDU_CONTINUE:
3516 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3522 pi->partial_sdu_len += skb->len;
3523 if (pi->partial_sdu_len > pi->sdu_len)
3526 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3531 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3537 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3538 pi->partial_sdu_len += skb->len;
3540 if (pi->partial_sdu_len > pi->imtu)
3543 if (pi->partial_sdu_len != pi->sdu_len)
3546 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3549 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3551 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3555 err = sock_queue_rcv_skb(sk, _skb);
3558 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3562 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3563 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3577 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3582 static void l2cap_busy_work(struct work_struct *work)
3584 DECLARE_WAITQUEUE(wait, current);
3585 struct l2cap_pinfo *pi =
3586 container_of(work, struct l2cap_pinfo, busy_work);
3587 struct sock *sk = (struct sock *)pi;
3588 int n_tries = 0, timeo = HZ/5, err;
3589 struct sk_buff *skb;
3594 add_wait_queue(sk_sleep(sk), &wait);
3595 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3596 set_current_state(TASK_INTERRUPTIBLE);
3598 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3600 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3607 if (signal_pending(current)) {
3608 err = sock_intr_errno(timeo);
3613 timeo = schedule_timeout(timeo);
3616 err = sock_error(sk);
3620 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3621 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3622 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3624 skb_queue_head(BUSY_QUEUE(sk), skb);
3628 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3635 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3638 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3639 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3640 l2cap_send_sframe(pi, control);
3641 l2cap_pi(sk)->retry_count = 1;
3643 del_timer(&pi->retrans_timer);
3644 __mod_monitor_timer();
3646 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3649 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3650 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3652 BT_DBG("sk %p, Exit local busy", sk);
3654 set_current_state(TASK_RUNNING);
3655 remove_wait_queue(sk_sleep(sk), &wait);
3660 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3662 struct l2cap_pinfo *pi = l2cap_pi(sk);
3665 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3666 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3667 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3671 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3673 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3677 /* Busy Condition */
3678 BT_DBG("sk %p, Enter local busy", sk);
3680 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3681 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3682 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3684 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3685 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3686 l2cap_send_sframe(pi, sctrl);
3688 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3690 del_timer(&pi->ack_timer);
3692 queue_work(_busy_wq, &pi->busy_work);
3697 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3699 struct l2cap_pinfo *pi = l2cap_pi(sk);
3700 struct sk_buff *_skb;
3704 * TODO: We have to notify the userland if some data is lost with the
3708 switch (control & L2CAP_CTRL_SAR) {
3709 case L2CAP_SDU_UNSEGMENTED:
3710 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3715 err = sock_queue_rcv_skb(sk, skb);
3721 case L2CAP_SDU_START:
3722 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3727 pi->sdu_len = get_unaligned_le16(skb->data);
3730 if (pi->sdu_len > pi->imtu) {
3735 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3741 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3743 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3744 pi->partial_sdu_len = skb->len;
3748 case L2CAP_SDU_CONTINUE:
3749 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3752 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3754 pi->partial_sdu_len += skb->len;
3755 if (pi->partial_sdu_len > pi->sdu_len)
3763 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3766 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3768 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3769 pi->partial_sdu_len += skb->len;
3771 if (pi->partial_sdu_len > pi->imtu)
3774 if (pi->partial_sdu_len == pi->sdu_len) {
3775 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3776 err = sock_queue_rcv_skb(sk, _skb);
3791 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3793 struct sk_buff *skb;
3796 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3797 if (bt_cb(skb)->tx_seq != tx_seq)
3800 skb = skb_dequeue(SREJ_QUEUE(sk));
3801 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3802 l2cap_ertm_reassembly_sdu(sk, skb, control);
3803 l2cap_pi(sk)->buffer_seq_srej =
3804 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3805 tx_seq = (tx_seq + 1) % 64;
3809 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3811 struct l2cap_pinfo *pi = l2cap_pi(sk);
3812 struct srej_list *l, *tmp;
3815 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3816 if (l->tx_seq == tx_seq) {
3821 control = L2CAP_SUPER_SELECT_REJECT;
3822 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3823 l2cap_send_sframe(pi, control);
3825 list_add_tail(&l->list, SREJ_LIST(sk));
3829 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3831 struct l2cap_pinfo *pi = l2cap_pi(sk);
3832 struct srej_list *new;
3835 while (tx_seq != pi->expected_tx_seq) {
3836 control = L2CAP_SUPER_SELECT_REJECT;
3837 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3838 l2cap_send_sframe(pi, control);
3840 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3841 new->tx_seq = pi->expected_tx_seq;
3842 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3843 list_add_tail(&new->list, SREJ_LIST(sk));
3845 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3848 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3850 struct l2cap_pinfo *pi = l2cap_pi(sk);
3851 u8 tx_seq = __get_txseq(rx_control);
3852 u8 req_seq = __get_reqseq(rx_control);
3853 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3854 int tx_seq_offset, expected_tx_seq_offset;
3855 int num_to_ack = (pi->tx_win/6) + 1;
3858 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3861 if (L2CAP_CTRL_FINAL & rx_control &&
3862 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3863 del_timer(&pi->monitor_timer);
3864 if (pi->unacked_frames > 0)
3865 __mod_retrans_timer();
3866 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3869 pi->expected_ack_seq = req_seq;
3870 l2cap_drop_acked_frames(sk);
3872 if (tx_seq == pi->expected_tx_seq)
3875 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3876 if (tx_seq_offset < 0)
3877 tx_seq_offset += 64;
3879 /* invalid tx_seq */
3880 if (tx_seq_offset >= pi->tx_win) {
3881 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3885 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3888 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3889 struct srej_list *first;
3891 first = list_first_entry(SREJ_LIST(sk),
3892 struct srej_list, list);
3893 if (tx_seq == first->tx_seq) {
3894 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3895 l2cap_check_srej_gap(sk, tx_seq);
3897 list_del(&first->list);
3900 if (list_empty(SREJ_LIST(sk))) {
3901 pi->buffer_seq = pi->buffer_seq_srej;
3902 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3904 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3907 struct srej_list *l;
3909 /* duplicated tx_seq */
3910 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3913 list_for_each_entry(l, SREJ_LIST(sk), list) {
3914 if (l->tx_seq == tx_seq) {
3915 l2cap_resend_srejframe(sk, tx_seq);
3919 l2cap_send_srejframe(sk, tx_seq);
3922 expected_tx_seq_offset =
3923 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3924 if (expected_tx_seq_offset < 0)
3925 expected_tx_seq_offset += 64;
3927 /* duplicated tx_seq */
3928 if (tx_seq_offset < expected_tx_seq_offset)
3931 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3933 BT_DBG("sk %p, Enter SREJ", sk);
3935 INIT_LIST_HEAD(SREJ_LIST(sk));
3936 pi->buffer_seq_srej = pi->buffer_seq;
3938 __skb_queue_head_init(SREJ_QUEUE(sk));
3939 __skb_queue_head_init(BUSY_QUEUE(sk));
3940 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3942 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3944 l2cap_send_srejframe(sk, tx_seq);
3946 del_timer(&pi->ack_timer);
3951 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3953 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3954 bt_cb(skb)->tx_seq = tx_seq;
3955 bt_cb(skb)->sar = sar;
3956 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3960 err = l2cap_push_rx_skb(sk, skb, rx_control);
3964 if (rx_control & L2CAP_CTRL_FINAL) {
3965 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3966 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3968 l2cap_retransmit_frames(sk);
3973 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3974 if (pi->num_acked == num_to_ack - 1)
3984 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3986 struct l2cap_pinfo *pi = l2cap_pi(sk);
3988 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
3991 pi->expected_ack_seq = __get_reqseq(rx_control);
3992 l2cap_drop_acked_frames(sk);
3994 if (rx_control & L2CAP_CTRL_POLL) {
3995 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3996 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3997 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3998 (pi->unacked_frames > 0))
3999 __mod_retrans_timer();
4001 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4002 l2cap_send_srejtail(sk);
4004 l2cap_send_i_or_rr_or_rnr(sk);
4007 } else if (rx_control & L2CAP_CTRL_FINAL) {
4008 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4010 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4011 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4013 l2cap_retransmit_frames(sk);
4016 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4017 (pi->unacked_frames > 0))
4018 __mod_retrans_timer();
4020 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4021 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4024 spin_lock_bh(&pi->send_lock);
4025 l2cap_ertm_send(sk);
4026 spin_unlock_bh(&pi->send_lock);
4031 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4033 struct l2cap_pinfo *pi = l2cap_pi(sk);
4034 u8 tx_seq = __get_reqseq(rx_control);
4036 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4038 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4040 pi->expected_ack_seq = tx_seq;
4041 l2cap_drop_acked_frames(sk);
4043 if (rx_control & L2CAP_CTRL_FINAL) {
4044 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4045 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4047 l2cap_retransmit_frames(sk);
4049 l2cap_retransmit_frames(sk);
4051 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4052 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4055 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4057 struct l2cap_pinfo *pi = l2cap_pi(sk);
4058 u8 tx_seq = __get_reqseq(rx_control);
4060 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4062 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4064 if (rx_control & L2CAP_CTRL_POLL) {
4065 pi->expected_ack_seq = tx_seq;
4066 l2cap_drop_acked_frames(sk);
4068 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4069 l2cap_retransmit_one_frame(sk, tx_seq);
4071 spin_lock_bh(&pi->send_lock);
4072 l2cap_ertm_send(sk);
4073 spin_unlock_bh(&pi->send_lock);
4075 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4076 pi->srej_save_reqseq = tx_seq;
4077 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4079 } else if (rx_control & L2CAP_CTRL_FINAL) {
4080 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4081 pi->srej_save_reqseq == tx_seq)
4082 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4084 l2cap_retransmit_one_frame(sk, tx_seq);
4086 l2cap_retransmit_one_frame(sk, tx_seq);
4087 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4088 pi->srej_save_reqseq = tx_seq;
4089 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4094 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4096 struct l2cap_pinfo *pi = l2cap_pi(sk);
4097 u8 tx_seq = __get_reqseq(rx_control);
4099 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4101 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4102 pi->expected_ack_seq = tx_seq;
4103 l2cap_drop_acked_frames(sk);
4105 if (rx_control & L2CAP_CTRL_POLL)
4106 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4108 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4109 del_timer(&pi->retrans_timer);
4110 if (rx_control & L2CAP_CTRL_POLL)
4111 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4115 if (rx_control & L2CAP_CTRL_POLL)
4116 l2cap_send_srejtail(sk);
4118 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4121 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4123 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4125 if (L2CAP_CTRL_FINAL & rx_control &&
4126 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4127 del_timer(&l2cap_pi(sk)->monitor_timer);
4128 if (l2cap_pi(sk)->unacked_frames > 0)
4129 __mod_retrans_timer();
4130 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4133 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4134 case L2CAP_SUPER_RCV_READY:
4135 l2cap_data_channel_rrframe(sk, rx_control);
4138 case L2CAP_SUPER_REJECT:
4139 l2cap_data_channel_rejframe(sk, rx_control);
4142 case L2CAP_SUPER_SELECT_REJECT:
4143 l2cap_data_channel_srejframe(sk, rx_control);
4146 case L2CAP_SUPER_RCV_NOT_READY:
4147 l2cap_data_channel_rnrframe(sk, rx_control);
4155 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4158 struct l2cap_pinfo *pi;
4161 int len, next_tx_seq_offset, req_seq_offset;
4163 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4165 BT_DBG("unknown cid 0x%4.4x", cid);
4171 BT_DBG("sk %p, len %d", sk, skb->len);
4173 if (sk->sk_state != BT_CONNECTED)
4177 case L2CAP_MODE_BASIC:
4178 /* If socket recv buffers overflows we drop data here
4179 * which is *bad* because L2CAP has to be reliable.
4180 * But we don't have any other choice. L2CAP doesn't
4181 * provide flow control mechanism. */
4183 if (pi->imtu < skb->len)
4186 if (!sock_queue_rcv_skb(sk, skb))
4190 case L2CAP_MODE_ERTM:
4191 control = get_unaligned_le16(skb->data);
4196 * We can just drop the corrupted I-frame here.
4197 * Receiver will miss it and start proper recovery
4198 * procedures and ask retransmission.
4200 if (l2cap_check_fcs(pi, skb))
4203 if (__is_sar_start(control) && __is_iframe(control))
4206 if (pi->fcs == L2CAP_FCS_CRC16)
4209 if (len > pi->mps) {
4210 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4214 req_seq = __get_reqseq(control);
4215 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4216 if (req_seq_offset < 0)
4217 req_seq_offset += 64;
4219 next_tx_seq_offset =
4220 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4221 if (next_tx_seq_offset < 0)
4222 next_tx_seq_offset += 64;
4224 /* check for invalid req-seq */
4225 if (req_seq_offset > next_tx_seq_offset) {
4226 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4230 if (__is_iframe(control)) {
4232 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4236 l2cap_data_channel_iframe(sk, control, skb);
4239 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4243 l2cap_data_channel_sframe(sk, control, skb);
4248 case L2CAP_MODE_STREAMING:
4249 control = get_unaligned_le16(skb->data);
4253 if (l2cap_check_fcs(pi, skb))
4256 if (__is_sar_start(control))
4259 if (pi->fcs == L2CAP_FCS_CRC16)
4262 if (len > pi->mps || len < 0 || __is_sframe(control))
4265 tx_seq = __get_txseq(control);
4267 if (pi->expected_tx_seq == tx_seq)
4268 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4270 pi->expected_tx_seq = (tx_seq + 1) % 64;
4272 l2cap_streaming_reassembly_sdu(sk, skb, control);
4277 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4291 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4295 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4299 BT_DBG("sk %p, len %d", sk, skb->len);
4301 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4304 if (l2cap_pi(sk)->imtu < skb->len)
4307 if (!sock_queue_rcv_skb(sk, skb))
4319 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4321 struct l2cap_hdr *lh = (void *) skb->data;
4325 skb_pull(skb, L2CAP_HDR_SIZE);
4326 cid = __le16_to_cpu(lh->cid);
4327 len = __le16_to_cpu(lh->len);
4329 if (len != skb->len) {
4334 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4337 case L2CAP_CID_SIGNALING:
4338 l2cap_sig_channel(conn, skb);
4341 case L2CAP_CID_CONN_LESS:
4342 psm = get_unaligned_le16(skb->data);
4344 l2cap_conless_channel(conn, psm, skb);
4348 l2cap_data_channel(conn, cid, skb);
4353 /* ---- L2CAP interface with lower layer (HCI) ---- */
4355 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4357 int exact = 0, lm1 = 0, lm2 = 0;
4358 register struct sock *sk;
4359 struct hlist_node *node;
4361 if (type != ACL_LINK)
4364 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4366 /* Find listening sockets and check their link_mode */
4367 read_lock(&l2cap_sk_list.lock);
4368 sk_for_each(sk, node, &l2cap_sk_list.head) {
4369 if (sk->sk_state != BT_LISTEN)
4372 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4373 lm1 |= HCI_LM_ACCEPT;
4374 if (l2cap_pi(sk)->role_switch)
4375 lm1 |= HCI_LM_MASTER;
4377 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4378 lm2 |= HCI_LM_ACCEPT;
4379 if (l2cap_pi(sk)->role_switch)
4380 lm2 |= HCI_LM_MASTER;
4383 read_unlock(&l2cap_sk_list.lock);
4385 return exact ? lm1 : lm2;
4388 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4390 struct l2cap_conn *conn;
4392 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4394 if (hcon->type != ACL_LINK)
4398 conn = l2cap_conn_add(hcon, status);
4400 l2cap_conn_ready(conn);
4402 l2cap_conn_del(hcon, bt_err(status));
4407 static int l2cap_disconn_ind(struct hci_conn *hcon)
4409 struct l2cap_conn *conn = hcon->l2cap_data;
4411 BT_DBG("hcon %p", hcon);
4413 if (hcon->type != ACL_LINK || !conn)
4416 return conn->disc_reason;
4419 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4421 BT_DBG("hcon %p reason %d", hcon, reason);
4423 if (hcon->type != ACL_LINK)
4426 l2cap_conn_del(hcon, bt_err(reason));
4431 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4433 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4436 if (encrypt == 0x00) {
4437 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4438 l2cap_sock_clear_timer(sk);
4439 l2cap_sock_set_timer(sk, HZ * 5);
4440 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4441 __l2cap_sock_close(sk, ECONNREFUSED);
4443 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4444 l2cap_sock_clear_timer(sk);
4448 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4450 struct l2cap_chan_list *l;
4451 struct l2cap_conn *conn = hcon->l2cap_data;
4457 l = &conn->chan_list;
4459 BT_DBG("conn %p", conn);
4461 read_lock(&l->lock);
4463 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4466 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4471 if (!status && (sk->sk_state == BT_CONNECTED ||
4472 sk->sk_state == BT_CONFIG)) {
4473 l2cap_check_encryption(sk, encrypt);
4478 if (sk->sk_state == BT_CONNECT) {
4480 struct l2cap_conn_req req;
4481 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4482 req.psm = l2cap_pi(sk)->psm;
4484 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4485 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4487 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4488 L2CAP_CONN_REQ, sizeof(req), &req);
4490 l2cap_sock_clear_timer(sk);
4491 l2cap_sock_set_timer(sk, HZ / 10);
4493 } else if (sk->sk_state == BT_CONNECT2) {
4494 struct l2cap_conn_rsp rsp;
4498 sk->sk_state = BT_CONFIG;
4499 result = L2CAP_CR_SUCCESS;
4501 sk->sk_state = BT_DISCONN;
4502 l2cap_sock_set_timer(sk, HZ / 10);
4503 result = L2CAP_CR_SEC_BLOCK;
4506 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4507 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4508 rsp.result = cpu_to_le16(result);
4509 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4510 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4511 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4517 read_unlock(&l->lock);
4522 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4524 struct l2cap_conn *conn = hcon->l2cap_data;
4526 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4529 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4531 if (flags & ACL_START) {
4532 struct l2cap_hdr *hdr;
4536 BT_ERR("Unexpected start frame (len %d)", skb->len);
4537 kfree_skb(conn->rx_skb);
4538 conn->rx_skb = NULL;
4540 l2cap_conn_unreliable(conn, ECOMM);
4544 BT_ERR("Frame is too short (len %d)", skb->len);
4545 l2cap_conn_unreliable(conn, ECOMM);
4549 hdr = (struct l2cap_hdr *) skb->data;
4550 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4552 if (len == skb->len) {
4553 /* Complete frame received */
4554 l2cap_recv_frame(conn, skb);
4558 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4560 if (skb->len > len) {
4561 BT_ERR("Frame is too long (len %d, expected len %d)",
4563 l2cap_conn_unreliable(conn, ECOMM);
4567 /* Allocate skb for the complete frame (with header) */
4568 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4572 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4574 conn->rx_len = len - skb->len;
4576 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4578 if (!conn->rx_len) {
4579 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4580 l2cap_conn_unreliable(conn, ECOMM);
4584 if (skb->len > conn->rx_len) {
4585 BT_ERR("Fragment is too long (len %d, expected %d)",
4586 skb->len, conn->rx_len);
4587 kfree_skb(conn->rx_skb);
4588 conn->rx_skb = NULL;
4590 l2cap_conn_unreliable(conn, ECOMM);
4594 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4596 conn->rx_len -= skb->len;
4598 if (!conn->rx_len) {
4599 /* Complete frame received */
4600 l2cap_recv_frame(conn, conn->rx_skb);
4601 conn->rx_skb = NULL;
4610 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4613 struct hlist_node *node;
4615 read_lock_bh(&l2cap_sk_list.lock);
4617 sk_for_each(sk, node, &l2cap_sk_list.head) {
4618 struct l2cap_pinfo *pi = l2cap_pi(sk);
4620 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4621 batostr(&bt_sk(sk)->src),
4622 batostr(&bt_sk(sk)->dst),
4623 sk->sk_state, __le16_to_cpu(pi->psm),
4625 pi->imtu, pi->omtu, pi->sec_level);
4628 read_unlock_bh(&l2cap_sk_list.lock);
4633 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4635 return single_open(file, l2cap_debugfs_show, inode->i_private);
4638 static const struct file_operations l2cap_debugfs_fops = {
4639 .open = l2cap_debugfs_open,
4641 .llseek = seq_lseek,
4642 .release = single_release,
4645 static struct dentry *l2cap_debugfs;
4647 static const struct proto_ops l2cap_sock_ops = {
4648 .family = PF_BLUETOOTH,
4649 .owner = THIS_MODULE,
4650 .release = l2cap_sock_release,
4651 .bind = l2cap_sock_bind,
4652 .connect = l2cap_sock_connect,
4653 .listen = l2cap_sock_listen,
4654 .accept = l2cap_sock_accept,
4655 .getname = l2cap_sock_getname,
4656 .sendmsg = l2cap_sock_sendmsg,
4657 .recvmsg = l2cap_sock_recvmsg,
4658 .poll = bt_sock_poll,
4659 .ioctl = bt_sock_ioctl,
4660 .mmap = sock_no_mmap,
4661 .socketpair = sock_no_socketpair,
4662 .shutdown = l2cap_sock_shutdown,
4663 .setsockopt = l2cap_sock_setsockopt,
4664 .getsockopt = l2cap_sock_getsockopt
4667 static const struct net_proto_family l2cap_sock_family_ops = {
4668 .family = PF_BLUETOOTH,
4669 .owner = THIS_MODULE,
4670 .create = l2cap_sock_create,
4673 static struct hci_proto l2cap_hci_proto = {
4675 .id = HCI_PROTO_L2CAP,
4676 .connect_ind = l2cap_connect_ind,
4677 .connect_cfm = l2cap_connect_cfm,
4678 .disconn_ind = l2cap_disconn_ind,
4679 .disconn_cfm = l2cap_disconn_cfm,
4680 .security_cfm = l2cap_security_cfm,
4681 .recv_acldata = l2cap_recv_acldata
4684 static int __init l2cap_init(void)
4688 err = proto_register(&l2cap_proto, 0);
4692 _busy_wq = create_singlethread_workqueue("l2cap");
4696 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4698 BT_ERR("L2CAP socket registration failed");
4702 err = hci_register_proto(&l2cap_hci_proto);
4704 BT_ERR("L2CAP protocol registration failed");
4705 bt_sock_unregister(BTPROTO_L2CAP);
4710 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4711 bt_debugfs, NULL, &l2cap_debugfs_fops);
4713 BT_ERR("Failed to create L2CAP debug file");
4716 BT_INFO("L2CAP ver %s", VERSION);
4717 BT_INFO("L2CAP socket layer initialized");
4722 proto_unregister(&l2cap_proto);
4726 static void __exit l2cap_exit(void)
4728 debugfs_remove(l2cap_debugfs);
4730 flush_workqueue(_busy_wq);
4731 destroy_workqueue(_busy_wq);
4733 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4734 BT_ERR("L2CAP socket unregistration failed");
4736 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4737 BT_ERR("L2CAP protocol unregistration failed");
4739 proto_unregister(&l2cap_proto);
4742 void l2cap_load(void)
4744 /* Dummy function to trigger automatic L2CAP module loading by
4745 * other modules that use L2CAP sockets but don't use any other
4746 * symbols from it. */
4748 EXPORT_SYMBOL(l2cap_load);
4750 module_init(l2cap_init);
4751 module_exit(l2cap_exit);
4753 module_param(enable_ertm, bool, 0644);
4754 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4756 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4757 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4758 MODULE_VERSION(VERSION);
4759 MODULE_LICENSE("GPL");
4760 MODULE_ALIAS("bt-proto-0");