2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops;
65 static struct workqueue_struct *_busy_wq;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void l2cap_busy_work(struct work_struct *work);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
86 BT_DBG("sock %p state %d", sk, sk->sk_state);
90 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
91 reason = ECONNREFUSED;
92 else if (sk->sk_state == BT_CONNECT &&
93 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
94 reason = ECONNREFUSED;
98 __l2cap_sock_close(sk, reason);
106 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
108 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
109 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
112 static void l2cap_sock_clear_timer(struct sock *sk)
114 BT_DBG("sock %p state %d", sk, sk->sk_state);
115 sk_stop_timer(sk, &sk->sk_timer);
118 /* ---- L2CAP channels ---- */
119 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->dcid == cid)
129 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
132 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
133 if (l2cap_pi(s)->scid == cid)
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
145 s = __l2cap_get_chan_by_scid(l, cid);
148 read_unlock(&l->lock);
152 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
155 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
156 if (l2cap_pi(s)->ident == ident)
162 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
166 s = __l2cap_get_chan_by_ident(l, ident);
169 read_unlock(&l->lock);
173 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
175 u16 cid = L2CAP_CID_DYN_START;
177 for (; cid < L2CAP_CID_DYN_END; cid++) {
178 if (!__l2cap_get_chan_by_scid(l, cid))
185 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
190 l2cap_pi(l->head)->prev_c = sk;
192 l2cap_pi(sk)->next_c = l->head;
193 l2cap_pi(sk)->prev_c = NULL;
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
199 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
201 write_lock_bh(&l->lock);
206 l2cap_pi(next)->prev_c = prev;
208 l2cap_pi(prev)->next_c = next;
209 write_unlock_bh(&l->lock);
214 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
216 struct l2cap_chan_list *l = &conn->chan_list;
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
219 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
221 conn->disc_reason = 0x13;
223 l2cap_pi(sk)->conn = conn;
225 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
228 } else if (sk->sk_type == SOCK_DGRAM) {
229 /* Connectionless socket */
230 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
231 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
236 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
240 __l2cap_chan_link(l, sk);
243 bt_accept_enqueue(parent, sk);
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock *sk, int err)
250 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
251 struct sock *parent = bt_sk(sk)->parent;
253 l2cap_sock_clear_timer(sk);
255 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn->chan_list, sk);
260 l2cap_pi(sk)->conn = NULL;
261 hci_conn_put(conn->hcon);
264 sk->sk_state = BT_CLOSED;
265 sock_set_flag(sk, SOCK_ZAPPED);
271 bt_accept_unlink(sk);
272 parent->sk_data_ready(parent, 0);
274 sk->sk_state_change(sk);
276 skb_queue_purge(TX_QUEUE(sk));
278 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
279 struct srej_list *l, *tmp;
281 del_timer(&l2cap_pi(sk)->retrans_timer);
282 del_timer(&l2cap_pi(sk)->monitor_timer);
283 del_timer(&l2cap_pi(sk)->ack_timer);
285 skb_queue_purge(SREJ_QUEUE(sk));
286 skb_queue_purge(BUSY_QUEUE(sk));
288 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
295 /* Service level security */
296 static inline int l2cap_check_security(struct sock *sk)
298 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
301 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
302 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
303 auth_type = HCI_AT_NO_BONDING_MITM;
305 auth_type = HCI_AT_NO_BONDING;
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
308 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
310 switch (l2cap_pi(sk)->sec_level) {
311 case BT_SECURITY_HIGH:
312 auth_type = HCI_AT_GENERAL_BONDING_MITM;
314 case BT_SECURITY_MEDIUM:
315 auth_type = HCI_AT_GENERAL_BONDING;
318 auth_type = HCI_AT_NO_BONDING;
323 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
327 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
331 /* Get next available identificator.
332 * 1 - 128 are used by kernel.
333 * 129 - 199 are reserved.
334 * 200 - 254 are used by utilities like l2ping, etc.
337 spin_lock_bh(&conn->lock);
339 if (++conn->tx_ident > 128)
344 spin_unlock_bh(&conn->lock);
349 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
351 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
353 BT_DBG("code 0x%2.2x", code);
358 hci_send_acl(conn->hcon, skb, 0);
361 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
364 struct l2cap_hdr *lh;
365 struct l2cap_conn *conn = pi->conn;
366 struct sock *sk = (struct sock *)pi;
367 int count, hlen = L2CAP_HDR_SIZE + 2;
369 if (sk->sk_state != BT_CONNECTED)
372 if (pi->fcs == L2CAP_FCS_CRC16)
375 BT_DBG("pi %p, control 0x%2.2x", pi, control);
377 count = min_t(unsigned int, conn->mtu, hlen);
378 control |= L2CAP_CTRL_FRAME_TYPE;
380 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
381 control |= L2CAP_CTRL_FINAL;
382 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
385 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
386 control |= L2CAP_CTRL_POLL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
390 skb = bt_skb_alloc(count, GFP_ATOMIC);
394 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
395 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
396 lh->cid = cpu_to_le16(pi->dcid);
397 put_unaligned_le16(control, skb_put(skb, 2));
399 if (pi->fcs == L2CAP_FCS_CRC16) {
400 u16 fcs = crc16(0, (u8 *)lh, count - 2);
401 put_unaligned_le16(fcs, skb_put(skb, 2));
404 hci_send_acl(pi->conn->hcon, skb, 0);
407 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
409 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
410 control |= L2CAP_SUPER_RCV_NOT_READY;
411 pi->conn_state |= L2CAP_CONN_RNR_SENT;
413 control |= L2CAP_SUPER_RCV_READY;
415 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
417 l2cap_send_sframe(pi, control);
420 static inline int __l2cap_no_conn_pending(struct sock *sk)
422 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
425 static void l2cap_do_start(struct sock *sk)
427 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
429 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
430 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
433 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
434 struct l2cap_conn_req req;
435 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
436 req.psm = l2cap_pi(sk)->psm;
438 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
439 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
441 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
442 L2CAP_CONN_REQ, sizeof(req), &req);
445 struct l2cap_info_req req;
446 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
448 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
449 conn->info_ident = l2cap_get_ident(conn);
451 mod_timer(&conn->info_timer, jiffies +
452 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
454 l2cap_send_cmd(conn, conn->info_ident,
455 L2CAP_INFO_REQ, sizeof(req), &req);
459 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
461 struct l2cap_disconn_req req;
466 skb_queue_purge(TX_QUEUE(sk));
468 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
469 del_timer(&l2cap_pi(sk)->retrans_timer);
470 del_timer(&l2cap_pi(sk)->monitor_timer);
471 del_timer(&l2cap_pi(sk)->ack_timer);
474 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
475 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
476 l2cap_send_cmd(conn, l2cap_get_ident(conn),
477 L2CAP_DISCONN_REQ, sizeof(req), &req);
479 sk->sk_state = BT_DISCONN;
483 /* ---- L2CAP connections ---- */
484 static void l2cap_conn_start(struct l2cap_conn *conn)
486 struct l2cap_chan_list *l = &conn->chan_list;
489 BT_DBG("conn %p", conn);
493 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
496 if (sk->sk_type != SOCK_SEQPACKET &&
497 sk->sk_type != SOCK_STREAM) {
502 if (sk->sk_state == BT_CONNECT) {
503 if (l2cap_check_security(sk) &&
504 __l2cap_no_conn_pending(sk)) {
505 struct l2cap_conn_req req;
506 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
507 req.psm = l2cap_pi(sk)->psm;
509 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
510 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
512 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
513 L2CAP_CONN_REQ, sizeof(req), &req);
515 } else if (sk->sk_state == BT_CONNECT2) {
516 struct l2cap_conn_rsp rsp;
517 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
518 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
520 if (l2cap_check_security(sk)) {
521 if (bt_sk(sk)->defer_setup) {
522 struct sock *parent = bt_sk(sk)->parent;
523 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
524 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
525 parent->sk_data_ready(parent, 0);
528 sk->sk_state = BT_CONFIG;
529 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
530 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
533 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
534 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
537 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
538 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
544 read_unlock(&l->lock);
547 static void l2cap_conn_ready(struct l2cap_conn *conn)
549 struct l2cap_chan_list *l = &conn->chan_list;
552 BT_DBG("conn %p", conn);
556 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
559 if (sk->sk_type != SOCK_SEQPACKET &&
560 sk->sk_type != SOCK_STREAM) {
561 l2cap_sock_clear_timer(sk);
562 sk->sk_state = BT_CONNECTED;
563 sk->sk_state_change(sk);
564 } else if (sk->sk_state == BT_CONNECT)
570 read_unlock(&l->lock);
573 /* Notify sockets that we cannot guaranty reliability anymore */
574 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
576 struct l2cap_chan_list *l = &conn->chan_list;
579 BT_DBG("conn %p", conn);
583 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
584 if (l2cap_pi(sk)->force_reliable)
588 read_unlock(&l->lock);
591 static void l2cap_info_timeout(unsigned long arg)
593 struct l2cap_conn *conn = (void *) arg;
595 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
596 conn->info_ident = 0;
598 l2cap_conn_start(conn);
601 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
603 struct l2cap_conn *conn = hcon->l2cap_data;
608 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
612 hcon->l2cap_data = conn;
615 BT_DBG("hcon %p conn %p", hcon, conn);
617 conn->mtu = hcon->hdev->acl_mtu;
618 conn->src = &hcon->hdev->bdaddr;
619 conn->dst = &hcon->dst;
623 spin_lock_init(&conn->lock);
624 rwlock_init(&conn->chan_list.lock);
626 setup_timer(&conn->info_timer, l2cap_info_timeout,
627 (unsigned long) conn);
629 conn->disc_reason = 0x13;
634 static void l2cap_conn_del(struct hci_conn *hcon, int err)
636 struct l2cap_conn *conn = hcon->l2cap_data;
642 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
644 kfree_skb(conn->rx_skb);
647 while ((sk = conn->chan_list.head)) {
649 l2cap_chan_del(sk, err);
654 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
655 del_timer_sync(&conn->info_timer);
657 hcon->l2cap_data = NULL;
661 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
663 struct l2cap_chan_list *l = &conn->chan_list;
664 write_lock_bh(&l->lock);
665 __l2cap_chan_add(conn, sk, parent);
666 write_unlock_bh(&l->lock);
669 /* ---- Socket interface ---- */
670 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
673 struct hlist_node *node;
674 sk_for_each(sk, node, &l2cap_sk_list.head)
675 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
682 /* Find socket with psm and source bdaddr.
683 * Returns closest match.
685 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
687 struct sock *sk = NULL, *sk1 = NULL;
688 struct hlist_node *node;
690 sk_for_each(sk, node, &l2cap_sk_list.head) {
691 if (state && sk->sk_state != state)
694 if (l2cap_pi(sk)->psm == psm) {
696 if (!bacmp(&bt_sk(sk)->src, src))
700 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
704 return node ? sk : sk1;
707 /* Find socket with given address (psm, src).
708 * Returns locked socket */
709 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
712 read_lock(&l2cap_sk_list.lock);
713 s = __l2cap_get_sock_by_psm(state, psm, src);
716 read_unlock(&l2cap_sk_list.lock);
720 static void l2cap_sock_destruct(struct sock *sk)
724 skb_queue_purge(&sk->sk_receive_queue);
725 skb_queue_purge(&sk->sk_write_queue);
728 static void l2cap_sock_cleanup_listen(struct sock *parent)
732 BT_DBG("parent %p", parent);
734 /* Close not yet accepted channels */
735 while ((sk = bt_accept_dequeue(parent, NULL)))
736 l2cap_sock_close(sk);
738 parent->sk_state = BT_CLOSED;
739 sock_set_flag(parent, SOCK_ZAPPED);
742 /* Kill socket (only if zapped and orphan)
743 * Must be called on unlocked socket.
745 static void l2cap_sock_kill(struct sock *sk)
747 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
750 BT_DBG("sk %p state %d", sk, sk->sk_state);
752 /* Kill poor orphan */
753 bt_sock_unlink(&l2cap_sk_list, sk);
754 sock_set_flag(sk, SOCK_DEAD);
758 static void __l2cap_sock_close(struct sock *sk, int reason)
760 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
762 switch (sk->sk_state) {
764 l2cap_sock_cleanup_listen(sk);
769 if (sk->sk_type == SOCK_SEQPACKET ||
770 sk->sk_type == SOCK_STREAM) {
771 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
773 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
774 l2cap_send_disconn_req(conn, sk, reason);
776 l2cap_chan_del(sk, reason);
780 if (sk->sk_type == SOCK_SEQPACKET ||
781 sk->sk_type == SOCK_STREAM) {
782 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
783 struct l2cap_conn_rsp rsp;
786 if (bt_sk(sk)->defer_setup)
787 result = L2CAP_CR_SEC_BLOCK;
789 result = L2CAP_CR_BAD_PSM;
791 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
792 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
793 rsp.result = cpu_to_le16(result);
794 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
795 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
796 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
798 l2cap_chan_del(sk, reason);
803 l2cap_chan_del(sk, reason);
807 sock_set_flag(sk, SOCK_ZAPPED);
812 /* Must be called on unlocked socket. */
813 static void l2cap_sock_close(struct sock *sk)
815 l2cap_sock_clear_timer(sk);
817 __l2cap_sock_close(sk, ECONNRESET);
822 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
824 struct l2cap_pinfo *pi = l2cap_pi(sk);
829 sk->sk_type = parent->sk_type;
830 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
832 pi->imtu = l2cap_pi(parent)->imtu;
833 pi->omtu = l2cap_pi(parent)->omtu;
834 pi->conf_state = l2cap_pi(parent)->conf_state;
835 pi->mode = l2cap_pi(parent)->mode;
836 pi->fcs = l2cap_pi(parent)->fcs;
837 pi->max_tx = l2cap_pi(parent)->max_tx;
838 pi->tx_win = l2cap_pi(parent)->tx_win;
839 pi->sec_level = l2cap_pi(parent)->sec_level;
840 pi->role_switch = l2cap_pi(parent)->role_switch;
841 pi->force_reliable = l2cap_pi(parent)->force_reliable;
843 pi->imtu = L2CAP_DEFAULT_MTU;
845 if (enable_ertm && sk->sk_type == SOCK_STREAM) {
846 pi->mode = L2CAP_MODE_ERTM;
847 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
849 pi->mode = L2CAP_MODE_BASIC;
851 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
852 pi->fcs = L2CAP_FCS_CRC16;
853 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
854 pi->sec_level = BT_SECURITY_LOW;
856 pi->force_reliable = 0;
859 /* Default config options */
861 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
862 skb_queue_head_init(TX_QUEUE(sk));
863 skb_queue_head_init(SREJ_QUEUE(sk));
864 skb_queue_head_init(BUSY_QUEUE(sk));
865 INIT_LIST_HEAD(SREJ_LIST(sk));
868 static struct proto l2cap_proto = {
870 .owner = THIS_MODULE,
871 .obj_size = sizeof(struct l2cap_pinfo)
874 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
878 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
882 sock_init_data(sock, sk);
883 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
885 sk->sk_destruct = l2cap_sock_destruct;
886 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
888 sock_reset_flag(sk, SOCK_ZAPPED);
890 sk->sk_protocol = proto;
891 sk->sk_state = BT_OPEN;
893 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
895 bt_sock_link(&l2cap_sk_list, sk);
899 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
904 BT_DBG("sock %p", sock);
906 sock->state = SS_UNCONNECTED;
908 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
909 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
910 return -ESOCKTNOSUPPORT;
912 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
915 sock->ops = &l2cap_sock_ops;
917 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
921 l2cap_sock_init(sk, NULL);
925 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
927 struct sock *sk = sock->sk;
928 struct sockaddr_l2 la;
933 if (!addr || addr->sa_family != AF_BLUETOOTH)
936 memset(&la, 0, sizeof(la));
937 len = min_t(unsigned int, sizeof(la), alen);
938 memcpy(&la, addr, len);
945 if (sk->sk_state != BT_OPEN) {
950 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
951 !capable(CAP_NET_BIND_SERVICE)) {
956 write_lock_bh(&l2cap_sk_list.lock);
958 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
961 /* Save source address */
962 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
963 l2cap_pi(sk)->psm = la.l2_psm;
964 l2cap_pi(sk)->sport = la.l2_psm;
965 sk->sk_state = BT_BOUND;
967 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
968 __le16_to_cpu(la.l2_psm) == 0x0003)
969 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
972 write_unlock_bh(&l2cap_sk_list.lock);
979 static int l2cap_do_connect(struct sock *sk)
981 bdaddr_t *src = &bt_sk(sk)->src;
982 bdaddr_t *dst = &bt_sk(sk)->dst;
983 struct l2cap_conn *conn;
984 struct hci_conn *hcon;
985 struct hci_dev *hdev;
989 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
992 hdev = hci_get_route(dst, src);
994 return -EHOSTUNREACH;
996 hci_dev_lock_bh(hdev);
1000 if (sk->sk_type == SOCK_RAW) {
1001 switch (l2cap_pi(sk)->sec_level) {
1002 case BT_SECURITY_HIGH:
1003 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1005 case BT_SECURITY_MEDIUM:
1006 auth_type = HCI_AT_DEDICATED_BONDING;
1009 auth_type = HCI_AT_NO_BONDING;
1012 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1013 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1014 auth_type = HCI_AT_NO_BONDING_MITM;
1016 auth_type = HCI_AT_NO_BONDING;
1018 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1019 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1021 switch (l2cap_pi(sk)->sec_level) {
1022 case BT_SECURITY_HIGH:
1023 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1025 case BT_SECURITY_MEDIUM:
1026 auth_type = HCI_AT_GENERAL_BONDING;
1029 auth_type = HCI_AT_NO_BONDING;
1034 hcon = hci_connect(hdev, ACL_LINK, dst,
1035 l2cap_pi(sk)->sec_level, auth_type);
1039 conn = l2cap_conn_add(hcon, 0);
1047 /* Update source addr of the socket */
1048 bacpy(src, conn->src);
1050 l2cap_chan_add(conn, sk, NULL);
1052 sk->sk_state = BT_CONNECT;
1053 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1055 if (hcon->state == BT_CONNECTED) {
1056 if (sk->sk_type != SOCK_SEQPACKET &&
1057 sk->sk_type != SOCK_STREAM) {
1058 l2cap_sock_clear_timer(sk);
1059 sk->sk_state = BT_CONNECTED;
1065 hci_dev_unlock_bh(hdev);
1070 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1072 struct sock *sk = sock->sk;
1073 struct sockaddr_l2 la;
1076 BT_DBG("sk %p", sk);
1078 if (!addr || alen < sizeof(addr->sa_family) ||
1079 addr->sa_family != AF_BLUETOOTH)
1082 memset(&la, 0, sizeof(la));
1083 len = min_t(unsigned int, sizeof(la), alen);
1084 memcpy(&la, addr, len);
1091 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1097 switch (l2cap_pi(sk)->mode) {
1098 case L2CAP_MODE_BASIC:
1100 case L2CAP_MODE_ERTM:
1101 case L2CAP_MODE_STREAMING:
1110 switch (sk->sk_state) {
1114 /* Already connecting */
1118 /* Already connected */
1131 /* Set destination address and psm */
1132 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1133 l2cap_pi(sk)->psm = la.l2_psm;
1135 err = l2cap_do_connect(sk);
1140 err = bt_sock_wait_state(sk, BT_CONNECTED,
1141 sock_sndtimeo(sk, flags & O_NONBLOCK));
1147 static int l2cap_sock_listen(struct socket *sock, int backlog)
1149 struct sock *sk = sock->sk;
1152 BT_DBG("sk %p backlog %d", sk, backlog);
1156 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1157 || sk->sk_state != BT_BOUND) {
1162 switch (l2cap_pi(sk)->mode) {
1163 case L2CAP_MODE_BASIC:
1165 case L2CAP_MODE_ERTM:
1166 case L2CAP_MODE_STREAMING:
1175 if (!l2cap_pi(sk)->psm) {
1176 bdaddr_t *src = &bt_sk(sk)->src;
1181 write_lock_bh(&l2cap_sk_list.lock);
1183 for (psm = 0x1001; psm < 0x1100; psm += 2)
1184 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1185 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1186 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1191 write_unlock_bh(&l2cap_sk_list.lock);
1197 sk->sk_max_ack_backlog = backlog;
1198 sk->sk_ack_backlog = 0;
1199 sk->sk_state = BT_LISTEN;
1206 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1208 DECLARE_WAITQUEUE(wait, current);
1209 struct sock *sk = sock->sk, *nsk;
1213 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1215 if (sk->sk_state != BT_LISTEN) {
1220 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1222 BT_DBG("sk %p timeo %ld", sk, timeo);
1224 /* Wait for an incoming connection. (wake-one). */
1225 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1226 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1227 set_current_state(TASK_INTERRUPTIBLE);
1234 timeo = schedule_timeout(timeo);
1235 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1237 if (sk->sk_state != BT_LISTEN) {
1242 if (signal_pending(current)) {
1243 err = sock_intr_errno(timeo);
1247 set_current_state(TASK_RUNNING);
1248 remove_wait_queue(sk_sleep(sk), &wait);
1253 newsock->state = SS_CONNECTED;
1255 BT_DBG("new socket %p", nsk);
1262 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1264 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1265 struct sock *sk = sock->sk;
1267 BT_DBG("sock %p, sk %p", sock, sk);
1269 addr->sa_family = AF_BLUETOOTH;
1270 *len = sizeof(struct sockaddr_l2);
1273 la->l2_psm = l2cap_pi(sk)->psm;
1274 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1275 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1277 la->l2_psm = l2cap_pi(sk)->sport;
1278 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1279 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1285 static int __l2cap_wait_ack(struct sock *sk)
1287 DECLARE_WAITQUEUE(wait, current);
1291 add_wait_queue(sk_sleep(sk), &wait);
1292 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1293 set_current_state(TASK_INTERRUPTIBLE);
1298 if (signal_pending(current)) {
1299 err = sock_intr_errno(timeo);
1304 timeo = schedule_timeout(timeo);
1307 err = sock_error(sk);
1311 set_current_state(TASK_RUNNING);
1312 remove_wait_queue(sk_sleep(sk), &wait);
1316 static void l2cap_monitor_timeout(unsigned long arg)
1318 struct sock *sk = (void *) arg;
1320 BT_DBG("sk %p", sk);
1323 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1324 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1329 l2cap_pi(sk)->retry_count++;
1330 __mod_monitor_timer();
1332 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1336 static void l2cap_retrans_timeout(unsigned long arg)
1338 struct sock *sk = (void *) arg;
1340 BT_DBG("sk %p", sk);
1343 l2cap_pi(sk)->retry_count = 1;
1344 __mod_monitor_timer();
1346 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1348 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1352 static void l2cap_drop_acked_frames(struct sock *sk)
1354 struct sk_buff *skb;
1356 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1357 l2cap_pi(sk)->unacked_frames) {
1358 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1361 skb = skb_dequeue(TX_QUEUE(sk));
1364 l2cap_pi(sk)->unacked_frames--;
1367 if (!l2cap_pi(sk)->unacked_frames)
1368 del_timer(&l2cap_pi(sk)->retrans_timer);
1371 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1373 struct l2cap_pinfo *pi = l2cap_pi(sk);
1375 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1377 hci_send_acl(pi->conn->hcon, skb, 0);
1380 static int l2cap_streaming_send(struct sock *sk)
1382 struct sk_buff *skb, *tx_skb;
1383 struct l2cap_pinfo *pi = l2cap_pi(sk);
1386 while ((skb = sk->sk_send_head)) {
1387 tx_skb = skb_clone(skb, GFP_ATOMIC);
1389 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1390 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1391 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1393 if (pi->fcs == L2CAP_FCS_CRC16) {
1394 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1395 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1398 l2cap_do_send(sk, tx_skb);
1400 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1402 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1403 sk->sk_send_head = NULL;
1405 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1407 skb = skb_dequeue(TX_QUEUE(sk));
1413 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1415 struct l2cap_pinfo *pi = l2cap_pi(sk);
1416 struct sk_buff *skb, *tx_skb;
1419 skb = skb_peek(TX_QUEUE(sk));
1424 if (bt_cb(skb)->tx_seq == tx_seq)
1427 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1430 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1432 if (pi->remote_max_tx &&
1433 bt_cb(skb)->retries == pi->remote_max_tx) {
1434 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1438 tx_skb = skb_clone(skb, GFP_ATOMIC);
1439 bt_cb(skb)->retries++;
1440 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1442 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1443 control |= L2CAP_CTRL_FINAL;
1444 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1447 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1448 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1450 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1452 if (pi->fcs == L2CAP_FCS_CRC16) {
1453 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1454 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1457 l2cap_do_send(sk, tx_skb);
1460 static int l2cap_ertm_send(struct sock *sk)
1462 struct sk_buff *skb, *tx_skb;
1463 struct l2cap_pinfo *pi = l2cap_pi(sk);
1467 if (sk->sk_state != BT_CONNECTED)
1470 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1472 if (pi->remote_max_tx &&
1473 bt_cb(skb)->retries == pi->remote_max_tx) {
1474 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1478 tx_skb = skb_clone(skb, GFP_ATOMIC);
1480 bt_cb(skb)->retries++;
1482 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1483 control &= L2CAP_CTRL_SAR;
1485 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1486 control |= L2CAP_CTRL_FINAL;
1487 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1489 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1490 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1491 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1494 if (pi->fcs == L2CAP_FCS_CRC16) {
1495 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1496 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1499 l2cap_do_send(sk, tx_skb);
1501 __mod_retrans_timer();
1503 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1504 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1506 pi->unacked_frames++;
1509 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1510 sk->sk_send_head = NULL;
1512 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1520 static int l2cap_retransmit_frames(struct sock *sk)
1522 struct l2cap_pinfo *pi = l2cap_pi(sk);
1525 spin_lock_bh(&pi->send_lock);
1527 if (!skb_queue_empty(TX_QUEUE(sk)))
1528 sk->sk_send_head = TX_QUEUE(sk)->next;
1530 pi->next_tx_seq = pi->expected_ack_seq;
1531 ret = l2cap_ertm_send(sk);
1533 spin_unlock_bh(&pi->send_lock);
1538 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1540 struct sock *sk = (struct sock *)pi;
1544 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1546 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1547 control |= L2CAP_SUPER_RCV_NOT_READY;
1548 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1549 l2cap_send_sframe(pi, control);
1553 spin_lock_bh(&pi->send_lock);
1554 nframes = l2cap_ertm_send(sk);
1555 spin_unlock_bh(&pi->send_lock);
1560 control |= L2CAP_SUPER_RCV_READY;
1561 l2cap_send_sframe(pi, control);
1564 static void l2cap_send_srejtail(struct sock *sk)
1566 struct srej_list *tail;
1569 control = L2CAP_SUPER_SELECT_REJECT;
1570 control |= L2CAP_CTRL_FINAL;
1572 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1573 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1575 l2cap_send_sframe(l2cap_pi(sk), control);
1578 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1580 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1581 struct sk_buff **frag;
1584 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1590 /* Continuation fragments (no L2CAP header) */
1591 frag = &skb_shinfo(skb)->frag_list;
1593 count = min_t(unsigned int, conn->mtu, len);
1595 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1598 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1604 frag = &(*frag)->next;
1610 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1612 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1613 struct sk_buff *skb;
1614 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1615 struct l2cap_hdr *lh;
1617 BT_DBG("sk %p len %d", sk, (int)len);
1619 count = min_t(unsigned int, (conn->mtu - hlen), len);
1620 skb = bt_skb_send_alloc(sk, count + hlen,
1621 msg->msg_flags & MSG_DONTWAIT, &err);
1623 return ERR_PTR(-ENOMEM);
1625 /* Create L2CAP header */
1626 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1627 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1628 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1629 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1631 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1632 if (unlikely(err < 0)) {
1634 return ERR_PTR(err);
1639 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1641 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1642 struct sk_buff *skb;
1643 int err, count, hlen = L2CAP_HDR_SIZE;
1644 struct l2cap_hdr *lh;
1646 BT_DBG("sk %p len %d", sk, (int)len);
1648 count = min_t(unsigned int, (conn->mtu - hlen), len);
1649 skb = bt_skb_send_alloc(sk, count + hlen,
1650 msg->msg_flags & MSG_DONTWAIT, &err);
1652 return ERR_PTR(-ENOMEM);
1654 /* Create L2CAP header */
1655 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1656 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1657 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1659 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1660 if (unlikely(err < 0)) {
1662 return ERR_PTR(err);
1667 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1669 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1670 struct sk_buff *skb;
1671 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1672 struct l2cap_hdr *lh;
1674 BT_DBG("sk %p len %d", sk, (int)len);
1677 return ERR_PTR(-ENOTCONN);
1682 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1685 count = min_t(unsigned int, (conn->mtu - hlen), len);
1686 skb = bt_skb_send_alloc(sk, count + hlen,
1687 msg->msg_flags & MSG_DONTWAIT, &err);
1689 return ERR_PTR(-ENOMEM);
1691 /* Create L2CAP header */
1692 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1693 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1694 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1695 put_unaligned_le16(control, skb_put(skb, 2));
1697 put_unaligned_le16(sdulen, skb_put(skb, 2));
1699 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1700 if (unlikely(err < 0)) {
1702 return ERR_PTR(err);
1705 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1706 put_unaligned_le16(0, skb_put(skb, 2));
1708 bt_cb(skb)->retries = 0;
1712 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1714 struct l2cap_pinfo *pi = l2cap_pi(sk);
1715 struct sk_buff *skb;
1716 struct sk_buff_head sar_queue;
1720 skb_queue_head_init(&sar_queue);
1721 control = L2CAP_SDU_START;
1722 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1724 return PTR_ERR(skb);
1726 __skb_queue_tail(&sar_queue, skb);
1727 len -= pi->remote_mps;
1728 size += pi->remote_mps;
1733 if (len > pi->remote_mps) {
1734 control = L2CAP_SDU_CONTINUE;
1735 buflen = pi->remote_mps;
1737 control = L2CAP_SDU_END;
1741 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1743 skb_queue_purge(&sar_queue);
1744 return PTR_ERR(skb);
1747 __skb_queue_tail(&sar_queue, skb);
1751 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1752 spin_lock_bh(&pi->send_lock);
1753 if (sk->sk_send_head == NULL)
1754 sk->sk_send_head = sar_queue.next;
1755 spin_unlock_bh(&pi->send_lock);
1760 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1762 struct sock *sk = sock->sk;
1763 struct l2cap_pinfo *pi = l2cap_pi(sk);
1764 struct sk_buff *skb;
1768 BT_DBG("sock %p, sk %p", sock, sk);
1770 err = sock_error(sk);
1774 if (msg->msg_flags & MSG_OOB)
1779 if (sk->sk_state != BT_CONNECTED) {
1784 /* Connectionless channel */
1785 if (sk->sk_type == SOCK_DGRAM) {
1786 skb = l2cap_create_connless_pdu(sk, msg, len);
1790 l2cap_do_send(sk, skb);
1797 case L2CAP_MODE_BASIC:
1798 /* Check outgoing MTU */
1799 if (len > pi->omtu) {
1804 /* Create a basic PDU */
1805 skb = l2cap_create_basic_pdu(sk, msg, len);
1811 l2cap_do_send(sk, skb);
1815 case L2CAP_MODE_ERTM:
1816 case L2CAP_MODE_STREAMING:
1817 /* Entire SDU fits into one PDU */
1818 if (len <= pi->remote_mps) {
1819 control = L2CAP_SDU_UNSEGMENTED;
1820 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1825 __skb_queue_tail(TX_QUEUE(sk), skb);
1827 if (pi->mode == L2CAP_MODE_ERTM)
1828 spin_lock_bh(&pi->send_lock);
1830 if (sk->sk_send_head == NULL)
1831 sk->sk_send_head = skb;
1833 if (pi->mode == L2CAP_MODE_ERTM)
1834 spin_unlock_bh(&pi->send_lock);
1836 /* Segment SDU into multiples PDUs */
1837 err = l2cap_sar_segment_sdu(sk, msg, len);
1842 if (pi->mode == L2CAP_MODE_STREAMING) {
1843 err = l2cap_streaming_send(sk);
1845 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1846 pi->conn_state && L2CAP_CONN_WAIT_F) {
1850 spin_lock_bh(&pi->send_lock);
1851 err = l2cap_ertm_send(sk);
1852 spin_unlock_bh(&pi->send_lock);
1860 BT_DBG("bad state %1.1x", pi->mode);
1869 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1871 struct sock *sk = sock->sk;
1875 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1876 struct l2cap_conn_rsp rsp;
1878 sk->sk_state = BT_CONFIG;
1880 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1881 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1882 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1883 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1884 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1885 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1893 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1896 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1898 struct sock *sk = sock->sk;
1899 struct l2cap_options opts;
1903 BT_DBG("sk %p", sk);
1909 opts.imtu = l2cap_pi(sk)->imtu;
1910 opts.omtu = l2cap_pi(sk)->omtu;
1911 opts.flush_to = l2cap_pi(sk)->flush_to;
1912 opts.mode = l2cap_pi(sk)->mode;
1913 opts.fcs = l2cap_pi(sk)->fcs;
1914 opts.max_tx = l2cap_pi(sk)->max_tx;
1915 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1917 len = min_t(unsigned int, sizeof(opts), optlen);
1918 if (copy_from_user((char *) &opts, optval, len)) {
1923 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1928 l2cap_pi(sk)->mode = opts.mode;
1929 switch (l2cap_pi(sk)->mode) {
1930 case L2CAP_MODE_BASIC:
1931 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1933 case L2CAP_MODE_ERTM:
1934 case L2CAP_MODE_STREAMING:
1943 l2cap_pi(sk)->imtu = opts.imtu;
1944 l2cap_pi(sk)->omtu = opts.omtu;
1945 l2cap_pi(sk)->fcs = opts.fcs;
1946 l2cap_pi(sk)->max_tx = opts.max_tx;
1947 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1951 if (get_user(opt, (u32 __user *) optval)) {
1956 if (opt & L2CAP_LM_AUTH)
1957 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1958 if (opt & L2CAP_LM_ENCRYPT)
1959 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1960 if (opt & L2CAP_LM_SECURE)
1961 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1963 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1964 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1976 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1978 struct sock *sk = sock->sk;
1979 struct bt_security sec;
1983 BT_DBG("sk %p", sk);
1985 if (level == SOL_L2CAP)
1986 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1988 if (level != SOL_BLUETOOTH)
1989 return -ENOPROTOOPT;
1995 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1996 && sk->sk_type != SOCK_RAW) {
2001 sec.level = BT_SECURITY_LOW;
2003 len = min_t(unsigned int, sizeof(sec), optlen);
2004 if (copy_from_user((char *) &sec, optval, len)) {
2009 if (sec.level < BT_SECURITY_LOW ||
2010 sec.level > BT_SECURITY_HIGH) {
2015 l2cap_pi(sk)->sec_level = sec.level;
2018 case BT_DEFER_SETUP:
2019 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2024 if (get_user(opt, (u32 __user *) optval)) {
2029 bt_sk(sk)->defer_setup = opt;
2041 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2043 struct sock *sk = sock->sk;
2044 struct l2cap_options opts;
2045 struct l2cap_conninfo cinfo;
2049 BT_DBG("sk %p", sk);
2051 if (get_user(len, optlen))
2058 opts.imtu = l2cap_pi(sk)->imtu;
2059 opts.omtu = l2cap_pi(sk)->omtu;
2060 opts.flush_to = l2cap_pi(sk)->flush_to;
2061 opts.mode = l2cap_pi(sk)->mode;
2062 opts.fcs = l2cap_pi(sk)->fcs;
2063 opts.max_tx = l2cap_pi(sk)->max_tx;
2064 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2066 len = min_t(unsigned int, len, sizeof(opts));
2067 if (copy_to_user(optval, (char *) &opts, len))
2073 switch (l2cap_pi(sk)->sec_level) {
2074 case BT_SECURITY_LOW:
2075 opt = L2CAP_LM_AUTH;
2077 case BT_SECURITY_MEDIUM:
2078 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2080 case BT_SECURITY_HIGH:
2081 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2089 if (l2cap_pi(sk)->role_switch)
2090 opt |= L2CAP_LM_MASTER;
2092 if (l2cap_pi(sk)->force_reliable)
2093 opt |= L2CAP_LM_RELIABLE;
2095 if (put_user(opt, (u32 __user *) optval))
2099 case L2CAP_CONNINFO:
2100 if (sk->sk_state != BT_CONNECTED &&
2101 !(sk->sk_state == BT_CONNECT2 &&
2102 bt_sk(sk)->defer_setup)) {
2107 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2108 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2110 len = min_t(unsigned int, len, sizeof(cinfo));
2111 if (copy_to_user(optval, (char *) &cinfo, len))
2125 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2127 struct sock *sk = sock->sk;
2128 struct bt_security sec;
2131 BT_DBG("sk %p", sk);
2133 if (level == SOL_L2CAP)
2134 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2136 if (level != SOL_BLUETOOTH)
2137 return -ENOPROTOOPT;
2139 if (get_user(len, optlen))
2146 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2147 && sk->sk_type != SOCK_RAW) {
2152 sec.level = l2cap_pi(sk)->sec_level;
2154 len = min_t(unsigned int, len, sizeof(sec));
2155 if (copy_to_user(optval, (char *) &sec, len))
2160 case BT_DEFER_SETUP:
2161 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2166 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2180 static int l2cap_sock_shutdown(struct socket *sock, int how)
2182 struct sock *sk = sock->sk;
2185 BT_DBG("sock %p, sk %p", sock, sk);
2191 if (!sk->sk_shutdown) {
2192 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2193 err = __l2cap_wait_ack(sk);
2195 sk->sk_shutdown = SHUTDOWN_MASK;
2196 l2cap_sock_clear_timer(sk);
2197 __l2cap_sock_close(sk, 0);
2199 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2200 err = bt_sock_wait_state(sk, BT_CLOSED,
2204 if (!err && sk->sk_err)
2211 static int l2cap_sock_release(struct socket *sock)
2213 struct sock *sk = sock->sk;
2216 BT_DBG("sock %p, sk %p", sock, sk);
2221 err = l2cap_sock_shutdown(sock, 2);
2224 l2cap_sock_kill(sk);
2228 static void l2cap_chan_ready(struct sock *sk)
2230 struct sock *parent = bt_sk(sk)->parent;
2232 BT_DBG("sk %p, parent %p", sk, parent);
2234 l2cap_pi(sk)->conf_state = 0;
2235 l2cap_sock_clear_timer(sk);
2238 /* Outgoing channel.
2239 * Wake up socket sleeping on connect.
2241 sk->sk_state = BT_CONNECTED;
2242 sk->sk_state_change(sk);
2244 /* Incoming channel.
2245 * Wake up socket sleeping on accept.
2247 parent->sk_data_ready(parent, 0);
2251 /* Copy frame to all raw sockets on that connection */
2252 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2254 struct l2cap_chan_list *l = &conn->chan_list;
2255 struct sk_buff *nskb;
2258 BT_DBG("conn %p", conn);
2260 read_lock(&l->lock);
2261 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2262 if (sk->sk_type != SOCK_RAW)
2265 /* Don't send frame to the socket it came from */
2268 nskb = skb_clone(skb, GFP_ATOMIC);
2272 if (sock_queue_rcv_skb(sk, nskb))
2275 read_unlock(&l->lock);
2278 /* ---- L2CAP signalling commands ---- */
2279 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2280 u8 code, u8 ident, u16 dlen, void *data)
2282 struct sk_buff *skb, **frag;
2283 struct l2cap_cmd_hdr *cmd;
2284 struct l2cap_hdr *lh;
2287 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2288 conn, code, ident, dlen);
2290 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2291 count = min_t(unsigned int, conn->mtu, len);
2293 skb = bt_skb_alloc(count, GFP_ATOMIC);
2297 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2298 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2299 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2301 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2304 cmd->len = cpu_to_le16(dlen);
2307 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2308 memcpy(skb_put(skb, count), data, count);
2314 /* Continuation fragments (no L2CAP header) */
2315 frag = &skb_shinfo(skb)->frag_list;
2317 count = min_t(unsigned int, conn->mtu, len);
2319 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2323 memcpy(skb_put(*frag, count), data, count);
2328 frag = &(*frag)->next;
2338 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2340 struct l2cap_conf_opt *opt = *ptr;
2343 len = L2CAP_CONF_OPT_SIZE + opt->len;
2351 *val = *((u8 *) opt->val);
2355 *val = __le16_to_cpu(*((__le16 *) opt->val));
2359 *val = __le32_to_cpu(*((__le32 *) opt->val));
2363 *val = (unsigned long) opt->val;
2367 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2371 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2373 struct l2cap_conf_opt *opt = *ptr;
2375 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2382 *((u8 *) opt->val) = val;
2386 *((__le16 *) opt->val) = cpu_to_le16(val);
2390 *((__le32 *) opt->val) = cpu_to_le32(val);
2394 memcpy(opt->val, (void *) val, len);
2398 *ptr += L2CAP_CONF_OPT_SIZE + len;
2401 static void l2cap_ack_timeout(unsigned long arg)
2403 struct sock *sk = (void *) arg;
2406 l2cap_send_ack(l2cap_pi(sk));
2410 static inline void l2cap_ertm_init(struct sock *sk)
2412 l2cap_pi(sk)->expected_ack_seq = 0;
2413 l2cap_pi(sk)->unacked_frames = 0;
2414 l2cap_pi(sk)->buffer_seq = 0;
2415 l2cap_pi(sk)->num_acked = 0;
2416 l2cap_pi(sk)->frames_sent = 0;
2418 setup_timer(&l2cap_pi(sk)->retrans_timer,
2419 l2cap_retrans_timeout, (unsigned long) sk);
2420 setup_timer(&l2cap_pi(sk)->monitor_timer,
2421 l2cap_monitor_timeout, (unsigned long) sk);
2422 setup_timer(&l2cap_pi(sk)->ack_timer,
2423 l2cap_ack_timeout, (unsigned long) sk);
2425 __skb_queue_head_init(SREJ_QUEUE(sk));
2426 __skb_queue_head_init(BUSY_QUEUE(sk));
2427 spin_lock_init(&l2cap_pi(sk)->send_lock);
2429 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2432 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2434 u32 local_feat_mask = l2cap_feat_mask;
2436 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2439 case L2CAP_MODE_ERTM:
2440 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2441 case L2CAP_MODE_STREAMING:
2442 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2448 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2451 case L2CAP_MODE_STREAMING:
2452 case L2CAP_MODE_ERTM:
2453 if (l2cap_mode_supported(mode, remote_feat_mask))
2457 return L2CAP_MODE_BASIC;
2461 static int l2cap_build_conf_req(struct sock *sk, void *data)
2463 struct l2cap_pinfo *pi = l2cap_pi(sk);
2464 struct l2cap_conf_req *req = data;
2465 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2466 void *ptr = req->data;
2468 BT_DBG("sk %p", sk);
2470 if (pi->num_conf_req || pi->num_conf_rsp)
2474 case L2CAP_MODE_STREAMING:
2475 case L2CAP_MODE_ERTM:
2476 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2477 pi->mode = l2cap_select_mode(rfc.mode,
2478 pi->conn->feat_mask);
2482 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2483 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2486 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2492 case L2CAP_MODE_BASIC:
2493 if (pi->imtu != L2CAP_DEFAULT_MTU)
2494 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2497 case L2CAP_MODE_ERTM:
2498 rfc.mode = L2CAP_MODE_ERTM;
2499 rfc.txwin_size = pi->tx_win;
2500 rfc.max_transmit = pi->max_tx;
2501 rfc.retrans_timeout = 0;
2502 rfc.monitor_timeout = 0;
2503 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2504 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2505 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2507 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2508 sizeof(rfc), (unsigned long) &rfc);
2510 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2513 if (pi->fcs == L2CAP_FCS_NONE ||
2514 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2515 pi->fcs = L2CAP_FCS_NONE;
2516 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2520 case L2CAP_MODE_STREAMING:
2521 rfc.mode = L2CAP_MODE_STREAMING;
2523 rfc.max_transmit = 0;
2524 rfc.retrans_timeout = 0;
2525 rfc.monitor_timeout = 0;
2526 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2527 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2528 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2530 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2531 sizeof(rfc), (unsigned long) &rfc);
2533 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2536 if (pi->fcs == L2CAP_FCS_NONE ||
2537 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2538 pi->fcs = L2CAP_FCS_NONE;
2539 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2544 /* FIXME: Need actual value of the flush timeout */
2545 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2546 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2548 req->dcid = cpu_to_le16(pi->dcid);
2549 req->flags = cpu_to_le16(0);
2554 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2556 struct l2cap_pinfo *pi = l2cap_pi(sk);
2557 struct l2cap_conf_rsp *rsp = data;
2558 void *ptr = rsp->data;
2559 void *req = pi->conf_req;
2560 int len = pi->conf_len;
2561 int type, hint, olen;
2563 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2564 u16 mtu = L2CAP_DEFAULT_MTU;
2565 u16 result = L2CAP_CONF_SUCCESS;
2567 BT_DBG("sk %p", sk);
2569 while (len >= L2CAP_CONF_OPT_SIZE) {
2570 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2572 hint = type & L2CAP_CONF_HINT;
2573 type &= L2CAP_CONF_MASK;
2576 case L2CAP_CONF_MTU:
2580 case L2CAP_CONF_FLUSH_TO:
2584 case L2CAP_CONF_QOS:
2587 case L2CAP_CONF_RFC:
2588 if (olen == sizeof(rfc))
2589 memcpy(&rfc, (void *) val, olen);
2592 case L2CAP_CONF_FCS:
2593 if (val == L2CAP_FCS_NONE)
2594 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2602 result = L2CAP_CONF_UNKNOWN;
2603 *((u8 *) ptr++) = type;
2608 if (pi->num_conf_rsp || pi->num_conf_req)
2612 case L2CAP_MODE_STREAMING:
2613 case L2CAP_MODE_ERTM:
2614 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2615 pi->mode = l2cap_select_mode(rfc.mode,
2616 pi->conn->feat_mask);
2620 if (pi->mode != rfc.mode)
2621 return -ECONNREFUSED;
2625 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2630 if (pi->mode != rfc.mode) {
2631 result = L2CAP_CONF_UNACCEPT;
2632 rfc.mode = pi->mode;
2634 if (pi->num_conf_rsp == 1)
2635 return -ECONNREFUSED;
2637 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2638 sizeof(rfc), (unsigned long) &rfc);
2642 if (result == L2CAP_CONF_SUCCESS) {
2643 /* Configure output options and let the other side know
2644 * which ones we don't like. */
2646 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2647 result = L2CAP_CONF_UNACCEPT;
2650 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2652 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2655 case L2CAP_MODE_BASIC:
2656 pi->fcs = L2CAP_FCS_NONE;
2657 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2660 case L2CAP_MODE_ERTM:
2661 pi->remote_tx_win = rfc.txwin_size;
2662 pi->remote_max_tx = rfc.max_transmit;
2663 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2664 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2666 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2668 rfc.retrans_timeout =
2669 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2670 rfc.monitor_timeout =
2671 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2673 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2675 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2676 sizeof(rfc), (unsigned long) &rfc);
2680 case L2CAP_MODE_STREAMING:
2681 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2682 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2684 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2686 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2688 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2689 sizeof(rfc), (unsigned long) &rfc);
2694 result = L2CAP_CONF_UNACCEPT;
2696 memset(&rfc, 0, sizeof(rfc));
2697 rfc.mode = pi->mode;
2700 if (result == L2CAP_CONF_SUCCESS)
2701 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2703 rsp->scid = cpu_to_le16(pi->dcid);
2704 rsp->result = cpu_to_le16(result);
2705 rsp->flags = cpu_to_le16(0x0000);
2710 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2712 struct l2cap_pinfo *pi = l2cap_pi(sk);
2713 struct l2cap_conf_req *req = data;
2714 void *ptr = req->data;
2717 struct l2cap_conf_rfc rfc;
2719 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2721 while (len >= L2CAP_CONF_OPT_SIZE) {
2722 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2725 case L2CAP_CONF_MTU:
2726 if (val < L2CAP_DEFAULT_MIN_MTU) {
2727 *result = L2CAP_CONF_UNACCEPT;
2728 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2731 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2734 case L2CAP_CONF_FLUSH_TO:
2736 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2740 case L2CAP_CONF_RFC:
2741 if (olen == sizeof(rfc))
2742 memcpy(&rfc, (void *)val, olen);
2744 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2745 rfc.mode != pi->mode)
2746 return -ECONNREFUSED;
2748 pi->mode = rfc.mode;
2751 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2752 sizeof(rfc), (unsigned long) &rfc);
2757 if (*result == L2CAP_CONF_SUCCESS) {
2759 case L2CAP_MODE_ERTM:
2760 pi->remote_tx_win = rfc.txwin_size;
2761 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2762 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2763 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2765 case L2CAP_MODE_STREAMING:
2766 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2770 req->dcid = cpu_to_le16(pi->dcid);
2771 req->flags = cpu_to_le16(0x0000);
2776 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2778 struct l2cap_conf_rsp *rsp = data;
2779 void *ptr = rsp->data;
2781 BT_DBG("sk %p", sk);
2783 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2784 rsp->result = cpu_to_le16(result);
2785 rsp->flags = cpu_to_le16(flags);
2790 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2792 struct l2cap_pinfo *pi = l2cap_pi(sk);
2795 struct l2cap_conf_rfc rfc;
2797 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2799 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2802 while (len >= L2CAP_CONF_OPT_SIZE) {
2803 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2806 case L2CAP_CONF_RFC:
2807 if (olen == sizeof(rfc))
2808 memcpy(&rfc, (void *)val, olen);
2815 case L2CAP_MODE_ERTM:
2816 pi->remote_tx_win = rfc.txwin_size;
2817 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2818 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2819 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2821 case L2CAP_MODE_STREAMING:
2822 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2826 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2828 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2830 if (rej->reason != 0x0000)
2833 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2834 cmd->ident == conn->info_ident) {
2835 del_timer(&conn->info_timer);
2837 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2838 conn->info_ident = 0;
2840 l2cap_conn_start(conn);
2846 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2848 struct l2cap_chan_list *list = &conn->chan_list;
2849 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2850 struct l2cap_conn_rsp rsp;
2851 struct sock *sk, *parent;
2852 int result, status = L2CAP_CS_NO_INFO;
2854 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2855 __le16 psm = req->psm;
2857 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2859 /* Check if we have socket listening on psm */
2860 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2862 result = L2CAP_CR_BAD_PSM;
2866 /* Check if the ACL is secure enough (if not SDP) */
2867 if (psm != cpu_to_le16(0x0001) &&
2868 !hci_conn_check_link_mode(conn->hcon)) {
2869 conn->disc_reason = 0x05;
2870 result = L2CAP_CR_SEC_BLOCK;
2874 result = L2CAP_CR_NO_MEM;
2876 /* Check for backlog size */
2877 if (sk_acceptq_is_full(parent)) {
2878 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2882 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2886 write_lock_bh(&list->lock);
2888 /* Check if we already have channel with that dcid */
2889 if (__l2cap_get_chan_by_dcid(list, scid)) {
2890 write_unlock_bh(&list->lock);
2891 sock_set_flag(sk, SOCK_ZAPPED);
2892 l2cap_sock_kill(sk);
2896 hci_conn_hold(conn->hcon);
2898 l2cap_sock_init(sk, parent);
2899 bacpy(&bt_sk(sk)->src, conn->src);
2900 bacpy(&bt_sk(sk)->dst, conn->dst);
2901 l2cap_pi(sk)->psm = psm;
2902 l2cap_pi(sk)->dcid = scid;
2904 __l2cap_chan_add(conn, sk, parent);
2905 dcid = l2cap_pi(sk)->scid;
2907 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2909 l2cap_pi(sk)->ident = cmd->ident;
2911 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2912 if (l2cap_check_security(sk)) {
2913 if (bt_sk(sk)->defer_setup) {
2914 sk->sk_state = BT_CONNECT2;
2915 result = L2CAP_CR_PEND;
2916 status = L2CAP_CS_AUTHOR_PEND;
2917 parent->sk_data_ready(parent, 0);
2919 sk->sk_state = BT_CONFIG;
2920 result = L2CAP_CR_SUCCESS;
2921 status = L2CAP_CS_NO_INFO;
2924 sk->sk_state = BT_CONNECT2;
2925 result = L2CAP_CR_PEND;
2926 status = L2CAP_CS_AUTHEN_PEND;
2929 sk->sk_state = BT_CONNECT2;
2930 result = L2CAP_CR_PEND;
2931 status = L2CAP_CS_NO_INFO;
2934 write_unlock_bh(&list->lock);
2937 bh_unlock_sock(parent);
2940 rsp.scid = cpu_to_le16(scid);
2941 rsp.dcid = cpu_to_le16(dcid);
2942 rsp.result = cpu_to_le16(result);
2943 rsp.status = cpu_to_le16(status);
2944 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2946 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2947 struct l2cap_info_req info;
2948 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2950 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2951 conn->info_ident = l2cap_get_ident(conn);
2953 mod_timer(&conn->info_timer, jiffies +
2954 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2956 l2cap_send_cmd(conn, conn->info_ident,
2957 L2CAP_INFO_REQ, sizeof(info), &info);
2963 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2965 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2966 u16 scid, dcid, result, status;
2970 scid = __le16_to_cpu(rsp->scid);
2971 dcid = __le16_to_cpu(rsp->dcid);
2972 result = __le16_to_cpu(rsp->result);
2973 status = __le16_to_cpu(rsp->status);
2975 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2978 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2982 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2988 case L2CAP_CR_SUCCESS:
2989 sk->sk_state = BT_CONFIG;
2990 l2cap_pi(sk)->ident = 0;
2991 l2cap_pi(sk)->dcid = dcid;
2992 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2993 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2995 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2996 l2cap_build_conf_req(sk, req), req);
2997 l2cap_pi(sk)->num_conf_req++;
3001 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3005 l2cap_chan_del(sk, ECONNREFUSED);
3013 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3015 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3021 dcid = __le16_to_cpu(req->dcid);
3022 flags = __le16_to_cpu(req->flags);
3024 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3026 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3030 if (sk->sk_state == BT_DISCONN)
3033 /* Reject if config buffer is too small. */
3034 len = cmd_len - sizeof(*req);
3035 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3036 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3037 l2cap_build_conf_rsp(sk, rsp,
3038 L2CAP_CONF_REJECT, flags), rsp);
3043 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3044 l2cap_pi(sk)->conf_len += len;
3046 if (flags & 0x0001) {
3047 /* Incomplete config. Send empty response. */
3048 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3049 l2cap_build_conf_rsp(sk, rsp,
3050 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3054 /* Complete config. */
3055 len = l2cap_parse_conf_req(sk, rsp);
3057 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3061 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3062 l2cap_pi(sk)->num_conf_rsp++;
3064 /* Reset config buffer. */
3065 l2cap_pi(sk)->conf_len = 0;
3067 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3070 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3071 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3072 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3073 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3075 sk->sk_state = BT_CONNECTED;
3077 l2cap_pi(sk)->next_tx_seq = 0;
3078 l2cap_pi(sk)->expected_tx_seq = 0;
3079 __skb_queue_head_init(TX_QUEUE(sk));
3080 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3081 l2cap_ertm_init(sk);
3083 l2cap_chan_ready(sk);
3087 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3089 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3090 l2cap_build_conf_req(sk, buf), buf);
3091 l2cap_pi(sk)->num_conf_req++;
3099 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3101 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3102 u16 scid, flags, result;
3104 int len = cmd->len - sizeof(*rsp);
3106 scid = __le16_to_cpu(rsp->scid);
3107 flags = __le16_to_cpu(rsp->flags);
3108 result = __le16_to_cpu(rsp->result);
3110 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3111 scid, flags, result);
3113 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3118 case L2CAP_CONF_SUCCESS:
3119 l2cap_conf_rfc_get(sk, rsp->data, len);
3122 case L2CAP_CONF_UNACCEPT:
3123 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3126 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3127 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3131 /* throw out any old stored conf requests */
3132 result = L2CAP_CONF_SUCCESS;
3133 len = l2cap_parse_conf_rsp(sk, rsp->data,
3136 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3140 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3141 L2CAP_CONF_REQ, len, req);
3142 l2cap_pi(sk)->num_conf_req++;
3143 if (result != L2CAP_CONF_SUCCESS)
3149 sk->sk_err = ECONNRESET;
3150 l2cap_sock_set_timer(sk, HZ * 5);
3151 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3158 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3160 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3161 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3162 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3163 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3165 sk->sk_state = BT_CONNECTED;
3166 l2cap_pi(sk)->next_tx_seq = 0;
3167 l2cap_pi(sk)->expected_tx_seq = 0;
3168 __skb_queue_head_init(TX_QUEUE(sk));
3169 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3170 l2cap_ertm_init(sk);
3172 l2cap_chan_ready(sk);
3180 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3182 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3183 struct l2cap_disconn_rsp rsp;
3187 scid = __le16_to_cpu(req->scid);
3188 dcid = __le16_to_cpu(req->dcid);
3190 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3192 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3196 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3197 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3198 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3200 sk->sk_shutdown = SHUTDOWN_MASK;
3202 l2cap_chan_del(sk, ECONNRESET);
3205 l2cap_sock_kill(sk);
3209 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3211 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3215 scid = __le16_to_cpu(rsp->scid);
3216 dcid = __le16_to_cpu(rsp->dcid);
3218 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3220 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3224 l2cap_chan_del(sk, 0);
3227 l2cap_sock_kill(sk);
3231 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3233 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3236 type = __le16_to_cpu(req->type);
3238 BT_DBG("type 0x%4.4x", type);
3240 if (type == L2CAP_IT_FEAT_MASK) {
3242 u32 feat_mask = l2cap_feat_mask;
3243 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3244 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3245 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3247 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3249 put_unaligned_le32(feat_mask, rsp->data);
3250 l2cap_send_cmd(conn, cmd->ident,
3251 L2CAP_INFO_RSP, sizeof(buf), buf);
3252 } else if (type == L2CAP_IT_FIXED_CHAN) {
3254 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3255 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3256 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3257 memcpy(buf + 4, l2cap_fixed_chan, 8);
3258 l2cap_send_cmd(conn, cmd->ident,
3259 L2CAP_INFO_RSP, sizeof(buf), buf);
3261 struct l2cap_info_rsp rsp;
3262 rsp.type = cpu_to_le16(type);
3263 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3264 l2cap_send_cmd(conn, cmd->ident,
3265 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3271 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3273 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3276 type = __le16_to_cpu(rsp->type);
3277 result = __le16_to_cpu(rsp->result);
3279 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3281 del_timer(&conn->info_timer);
3283 if (type == L2CAP_IT_FEAT_MASK) {
3284 conn->feat_mask = get_unaligned_le32(rsp->data);
3286 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3287 struct l2cap_info_req req;
3288 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3290 conn->info_ident = l2cap_get_ident(conn);
3292 l2cap_send_cmd(conn, conn->info_ident,
3293 L2CAP_INFO_REQ, sizeof(req), &req);
3295 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3296 conn->info_ident = 0;
3298 l2cap_conn_start(conn);
3300 } else if (type == L2CAP_IT_FIXED_CHAN) {
3301 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3302 conn->info_ident = 0;
3304 l2cap_conn_start(conn);
3310 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3312 u8 *data = skb->data;
3314 struct l2cap_cmd_hdr cmd;
3317 l2cap_raw_recv(conn, skb);
3319 while (len >= L2CAP_CMD_HDR_SIZE) {
3321 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3322 data += L2CAP_CMD_HDR_SIZE;
3323 len -= L2CAP_CMD_HDR_SIZE;
3325 cmd_len = le16_to_cpu(cmd.len);
3327 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3329 if (cmd_len > len || !cmd.ident) {
3330 BT_DBG("corrupted command");
3335 case L2CAP_COMMAND_REJ:
3336 l2cap_command_rej(conn, &cmd, data);
3339 case L2CAP_CONN_REQ:
3340 err = l2cap_connect_req(conn, &cmd, data);
3343 case L2CAP_CONN_RSP:
3344 err = l2cap_connect_rsp(conn, &cmd, data);
3347 case L2CAP_CONF_REQ:
3348 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3351 case L2CAP_CONF_RSP:
3352 err = l2cap_config_rsp(conn, &cmd, data);
3355 case L2CAP_DISCONN_REQ:
3356 err = l2cap_disconnect_req(conn, &cmd, data);
3359 case L2CAP_DISCONN_RSP:
3360 err = l2cap_disconnect_rsp(conn, &cmd, data);
3363 case L2CAP_ECHO_REQ:
3364 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3367 case L2CAP_ECHO_RSP:
3370 case L2CAP_INFO_REQ:
3371 err = l2cap_information_req(conn, &cmd, data);
3374 case L2CAP_INFO_RSP:
3375 err = l2cap_information_rsp(conn, &cmd, data);
3379 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3385 struct l2cap_cmd_rej rej;
3386 BT_DBG("error %d", err);
3388 /* FIXME: Map err to a valid reason */
3389 rej.reason = cpu_to_le16(0);
3390 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3400 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3402 u16 our_fcs, rcv_fcs;
3403 int hdr_size = L2CAP_HDR_SIZE + 2;
3405 if (pi->fcs == L2CAP_FCS_CRC16) {
3406 skb_trim(skb, skb->len - 2);
3407 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3408 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3410 if (our_fcs != rcv_fcs)
3416 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3418 struct l2cap_pinfo *pi = l2cap_pi(sk);
3421 pi->frames_sent = 0;
3423 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3425 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3426 control |= L2CAP_SUPER_RCV_NOT_READY;
3427 l2cap_send_sframe(pi, control);
3428 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3431 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3432 l2cap_retransmit_frames(sk);
3434 spin_lock_bh(&pi->send_lock);
3435 l2cap_ertm_send(sk);
3436 spin_unlock_bh(&pi->send_lock);
3438 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3439 pi->frames_sent == 0) {
3440 control |= L2CAP_SUPER_RCV_READY;
3441 l2cap_send_sframe(pi, control);
3445 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3447 struct sk_buff *next_skb;
3448 struct l2cap_pinfo *pi = l2cap_pi(sk);
3449 int tx_seq_offset, next_tx_seq_offset;
3451 bt_cb(skb)->tx_seq = tx_seq;
3452 bt_cb(skb)->sar = sar;
3454 next_skb = skb_peek(SREJ_QUEUE(sk));
3456 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3460 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3461 if (tx_seq_offset < 0)
3462 tx_seq_offset += 64;
3465 if (bt_cb(next_skb)->tx_seq == tx_seq)
3468 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3469 pi->buffer_seq) % 64;
3470 if (next_tx_seq_offset < 0)
3471 next_tx_seq_offset += 64;
3473 if (next_tx_seq_offset > tx_seq_offset) {
3474 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3478 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3481 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3483 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3488 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3490 struct l2cap_pinfo *pi = l2cap_pi(sk);
3491 struct sk_buff *_skb;
3494 switch (control & L2CAP_CTRL_SAR) {
3495 case L2CAP_SDU_UNSEGMENTED:
3496 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3499 err = sock_queue_rcv_skb(sk, skb);
3505 case L2CAP_SDU_START:
3506 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3509 pi->sdu_len = get_unaligned_le16(skb->data);
3511 if (pi->sdu_len > pi->imtu)
3514 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3518 /* pull sdu_len bytes only after alloc, because of Local Busy
3519 * condition we have to be sure that this will be executed
3520 * only once, i.e., when alloc does not fail */
3523 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3525 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3526 pi->partial_sdu_len = skb->len;
3529 case L2CAP_SDU_CONTINUE:
3530 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3536 pi->partial_sdu_len += skb->len;
3537 if (pi->partial_sdu_len > pi->sdu_len)
3540 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3545 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3551 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3552 pi->partial_sdu_len += skb->len;
3554 if (pi->partial_sdu_len > pi->imtu)
3557 if (pi->partial_sdu_len != pi->sdu_len)
3560 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3563 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3565 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3569 err = sock_queue_rcv_skb(sk, _skb);
3572 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3576 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3577 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3591 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3596 static void l2cap_busy_work(struct work_struct *work)
3598 DECLARE_WAITQUEUE(wait, current);
3599 struct l2cap_pinfo *pi =
3600 container_of(work, struct l2cap_pinfo, busy_work);
3601 struct sock *sk = (struct sock *)pi;
3602 int n_tries = 0, timeo = HZ/5, err;
3603 struct sk_buff *skb;
3608 add_wait_queue(sk_sleep(sk), &wait);
3609 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3610 set_current_state(TASK_INTERRUPTIBLE);
3612 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3614 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3621 if (signal_pending(current)) {
3622 err = sock_intr_errno(timeo);
3627 timeo = schedule_timeout(timeo);
3630 err = sock_error(sk);
3634 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3635 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3636 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3638 skb_queue_head(BUSY_QUEUE(sk), skb);
3642 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3649 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3652 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3653 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3654 l2cap_send_sframe(pi, control);
3655 l2cap_pi(sk)->retry_count = 1;
3657 del_timer(&pi->retrans_timer);
3658 __mod_monitor_timer();
3660 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3663 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3664 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3666 BT_DBG("sk %p, Exit local busy", sk);
3668 set_current_state(TASK_RUNNING);
3669 remove_wait_queue(sk_sleep(sk), &wait);
3674 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3676 struct l2cap_pinfo *pi = l2cap_pi(sk);
3679 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3680 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3681 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3685 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3687 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3691 /* Busy Condition */
3692 BT_DBG("sk %p, Enter local busy", sk);
3694 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3695 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3696 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3698 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3699 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3700 l2cap_send_sframe(pi, sctrl);
3702 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3704 del_timer(&pi->ack_timer);
3706 queue_work(_busy_wq, &pi->busy_work);
3711 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3713 struct l2cap_pinfo *pi = l2cap_pi(sk);
3714 struct sk_buff *_skb;
3718 * TODO: We have to notify the userland if some data is lost with the
3722 switch (control & L2CAP_CTRL_SAR) {
3723 case L2CAP_SDU_UNSEGMENTED:
3724 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3729 err = sock_queue_rcv_skb(sk, skb);
3735 case L2CAP_SDU_START:
3736 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3741 pi->sdu_len = get_unaligned_le16(skb->data);
3744 if (pi->sdu_len > pi->imtu) {
3749 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3755 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3757 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3758 pi->partial_sdu_len = skb->len;
3762 case L2CAP_SDU_CONTINUE:
3763 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3766 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3768 pi->partial_sdu_len += skb->len;
3769 if (pi->partial_sdu_len > pi->sdu_len)
3777 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3780 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3782 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3783 pi->partial_sdu_len += skb->len;
3785 if (pi->partial_sdu_len > pi->imtu)
3788 if (pi->partial_sdu_len == pi->sdu_len) {
3789 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3790 err = sock_queue_rcv_skb(sk, _skb);
3805 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3807 struct sk_buff *skb;
3810 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3811 if (bt_cb(skb)->tx_seq != tx_seq)
3814 skb = skb_dequeue(SREJ_QUEUE(sk));
3815 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3816 l2cap_ertm_reassembly_sdu(sk, skb, control);
3817 l2cap_pi(sk)->buffer_seq_srej =
3818 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3819 tx_seq = (tx_seq + 1) % 64;
3823 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3825 struct l2cap_pinfo *pi = l2cap_pi(sk);
3826 struct srej_list *l, *tmp;
3829 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3830 if (l->tx_seq == tx_seq) {
3835 control = L2CAP_SUPER_SELECT_REJECT;
3836 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3837 l2cap_send_sframe(pi, control);
3839 list_add_tail(&l->list, SREJ_LIST(sk));
3843 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3845 struct l2cap_pinfo *pi = l2cap_pi(sk);
3846 struct srej_list *new;
3849 while (tx_seq != pi->expected_tx_seq) {
3850 control = L2CAP_SUPER_SELECT_REJECT;
3851 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3852 l2cap_send_sframe(pi, control);
3854 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3855 new->tx_seq = pi->expected_tx_seq;
3856 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3857 list_add_tail(&new->list, SREJ_LIST(sk));
3859 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3862 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3864 struct l2cap_pinfo *pi = l2cap_pi(sk);
3865 u8 tx_seq = __get_txseq(rx_control);
3866 u8 req_seq = __get_reqseq(rx_control);
3867 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3868 int tx_seq_offset, expected_tx_seq_offset;
3869 int num_to_ack = (pi->tx_win/6) + 1;
3872 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3875 if (L2CAP_CTRL_FINAL & rx_control &&
3876 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3877 del_timer(&pi->monitor_timer);
3878 if (pi->unacked_frames > 0)
3879 __mod_retrans_timer();
3880 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3883 pi->expected_ack_seq = req_seq;
3884 l2cap_drop_acked_frames(sk);
3886 if (tx_seq == pi->expected_tx_seq)
3889 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3890 if (tx_seq_offset < 0)
3891 tx_seq_offset += 64;
3893 /* invalid tx_seq */
3894 if (tx_seq_offset >= pi->tx_win) {
3895 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3899 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3902 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3903 struct srej_list *first;
3905 first = list_first_entry(SREJ_LIST(sk),
3906 struct srej_list, list);
3907 if (tx_seq == first->tx_seq) {
3908 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3909 l2cap_check_srej_gap(sk, tx_seq);
3911 list_del(&first->list);
3914 if (list_empty(SREJ_LIST(sk))) {
3915 pi->buffer_seq = pi->buffer_seq_srej;
3916 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3918 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3921 struct srej_list *l;
3923 /* duplicated tx_seq */
3924 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3927 list_for_each_entry(l, SREJ_LIST(sk), list) {
3928 if (l->tx_seq == tx_seq) {
3929 l2cap_resend_srejframe(sk, tx_seq);
3933 l2cap_send_srejframe(sk, tx_seq);
3936 expected_tx_seq_offset =
3937 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3938 if (expected_tx_seq_offset < 0)
3939 expected_tx_seq_offset += 64;
3941 /* duplicated tx_seq */
3942 if (tx_seq_offset < expected_tx_seq_offset)
3945 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3947 BT_DBG("sk %p, Enter SREJ", sk);
3949 INIT_LIST_HEAD(SREJ_LIST(sk));
3950 pi->buffer_seq_srej = pi->buffer_seq;
3952 __skb_queue_head_init(SREJ_QUEUE(sk));
3953 __skb_queue_head_init(BUSY_QUEUE(sk));
3954 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3956 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3958 l2cap_send_srejframe(sk, tx_seq);
3960 del_timer(&pi->ack_timer);
3965 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3967 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3968 bt_cb(skb)->tx_seq = tx_seq;
3969 bt_cb(skb)->sar = sar;
3970 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3974 err = l2cap_push_rx_skb(sk, skb, rx_control);
3978 if (rx_control & L2CAP_CTRL_FINAL) {
3979 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3980 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3982 l2cap_retransmit_frames(sk);
3987 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3988 if (pi->num_acked == num_to_ack - 1)
3998 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4000 struct l2cap_pinfo *pi = l2cap_pi(sk);
4002 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4005 pi->expected_ack_seq = __get_reqseq(rx_control);
4006 l2cap_drop_acked_frames(sk);
4008 if (rx_control & L2CAP_CTRL_POLL) {
4009 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4010 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4011 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4012 (pi->unacked_frames > 0))
4013 __mod_retrans_timer();
4015 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4016 l2cap_send_srejtail(sk);
4018 l2cap_send_i_or_rr_or_rnr(sk);
4021 } else if (rx_control & L2CAP_CTRL_FINAL) {
4022 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4024 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4025 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4027 l2cap_retransmit_frames(sk);
4030 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4031 (pi->unacked_frames > 0))
4032 __mod_retrans_timer();
4034 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4035 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4038 spin_lock_bh(&pi->send_lock);
4039 l2cap_ertm_send(sk);
4040 spin_unlock_bh(&pi->send_lock);
4045 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4047 struct l2cap_pinfo *pi = l2cap_pi(sk);
4048 u8 tx_seq = __get_reqseq(rx_control);
4050 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4052 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4054 pi->expected_ack_seq = tx_seq;
4055 l2cap_drop_acked_frames(sk);
4057 if (rx_control & L2CAP_CTRL_FINAL) {
4058 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4059 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4061 l2cap_retransmit_frames(sk);
4063 l2cap_retransmit_frames(sk);
4065 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4066 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4069 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4071 struct l2cap_pinfo *pi = l2cap_pi(sk);
4072 u8 tx_seq = __get_reqseq(rx_control);
4074 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4076 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4078 if (rx_control & L2CAP_CTRL_POLL) {
4079 pi->expected_ack_seq = tx_seq;
4080 l2cap_drop_acked_frames(sk);
4082 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4083 l2cap_retransmit_one_frame(sk, tx_seq);
4085 spin_lock_bh(&pi->send_lock);
4086 l2cap_ertm_send(sk);
4087 spin_unlock_bh(&pi->send_lock);
4089 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4090 pi->srej_save_reqseq = tx_seq;
4091 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4093 } else if (rx_control & L2CAP_CTRL_FINAL) {
4094 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4095 pi->srej_save_reqseq == tx_seq)
4096 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4098 l2cap_retransmit_one_frame(sk, tx_seq);
4100 l2cap_retransmit_one_frame(sk, tx_seq);
4101 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4102 pi->srej_save_reqseq = tx_seq;
4103 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4108 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4110 struct l2cap_pinfo *pi = l2cap_pi(sk);
4111 u8 tx_seq = __get_reqseq(rx_control);
4113 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4115 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4116 pi->expected_ack_seq = tx_seq;
4117 l2cap_drop_acked_frames(sk);
4119 if (rx_control & L2CAP_CTRL_POLL)
4120 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4122 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4123 del_timer(&pi->retrans_timer);
4124 if (rx_control & L2CAP_CTRL_POLL)
4125 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4129 if (rx_control & L2CAP_CTRL_POLL)
4130 l2cap_send_srejtail(sk);
4132 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4135 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4137 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4139 if (L2CAP_CTRL_FINAL & rx_control &&
4140 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4141 del_timer(&l2cap_pi(sk)->monitor_timer);
4142 if (l2cap_pi(sk)->unacked_frames > 0)
4143 __mod_retrans_timer();
4144 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4147 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4148 case L2CAP_SUPER_RCV_READY:
4149 l2cap_data_channel_rrframe(sk, rx_control);
4152 case L2CAP_SUPER_REJECT:
4153 l2cap_data_channel_rejframe(sk, rx_control);
4156 case L2CAP_SUPER_SELECT_REJECT:
4157 l2cap_data_channel_srejframe(sk, rx_control);
4160 case L2CAP_SUPER_RCV_NOT_READY:
4161 l2cap_data_channel_rnrframe(sk, rx_control);
4169 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4172 struct l2cap_pinfo *pi;
4175 int len, next_tx_seq_offset, req_seq_offset;
4177 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4179 BT_DBG("unknown cid 0x%4.4x", cid);
4185 BT_DBG("sk %p, len %d", sk, skb->len);
4187 if (sk->sk_state != BT_CONNECTED)
4191 case L2CAP_MODE_BASIC:
4192 /* If socket recv buffers overflows we drop data here
4193 * which is *bad* because L2CAP has to be reliable.
4194 * But we don't have any other choice. L2CAP doesn't
4195 * provide flow control mechanism. */
4197 if (pi->imtu < skb->len)
4200 if (!sock_queue_rcv_skb(sk, skb))
4204 case L2CAP_MODE_ERTM:
4205 control = get_unaligned_le16(skb->data);
4210 * We can just drop the corrupted I-frame here.
4211 * Receiver will miss it and start proper recovery
4212 * procedures and ask retransmission.
4214 if (l2cap_check_fcs(pi, skb))
4217 if (__is_sar_start(control) && __is_iframe(control))
4220 if (pi->fcs == L2CAP_FCS_CRC16)
4223 if (len > pi->mps) {
4224 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4228 req_seq = __get_reqseq(control);
4229 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4230 if (req_seq_offset < 0)
4231 req_seq_offset += 64;
4233 next_tx_seq_offset =
4234 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4235 if (next_tx_seq_offset < 0)
4236 next_tx_seq_offset += 64;
4238 /* check for invalid req-seq */
4239 if (req_seq_offset > next_tx_seq_offset) {
4240 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4244 if (__is_iframe(control)) {
4246 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4250 l2cap_data_channel_iframe(sk, control, skb);
4253 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4257 l2cap_data_channel_sframe(sk, control, skb);
4262 case L2CAP_MODE_STREAMING:
4263 control = get_unaligned_le16(skb->data);
4267 if (l2cap_check_fcs(pi, skb))
4270 if (__is_sar_start(control))
4273 if (pi->fcs == L2CAP_FCS_CRC16)
4276 if (len > pi->mps || len < 0 || __is_sframe(control))
4279 tx_seq = __get_txseq(control);
4281 if (pi->expected_tx_seq == tx_seq)
4282 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4284 pi->expected_tx_seq = (tx_seq + 1) % 64;
4286 l2cap_streaming_reassembly_sdu(sk, skb, control);
4291 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4305 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4309 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4313 BT_DBG("sk %p, len %d", sk, skb->len);
4315 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4318 if (l2cap_pi(sk)->imtu < skb->len)
4321 if (!sock_queue_rcv_skb(sk, skb))
4333 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4335 struct l2cap_hdr *lh = (void *) skb->data;
4339 skb_pull(skb, L2CAP_HDR_SIZE);
4340 cid = __le16_to_cpu(lh->cid);
4341 len = __le16_to_cpu(lh->len);
4343 if (len != skb->len) {
4348 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4351 case L2CAP_CID_SIGNALING:
4352 l2cap_sig_channel(conn, skb);
4355 case L2CAP_CID_CONN_LESS:
4356 psm = get_unaligned_le16(skb->data);
4358 l2cap_conless_channel(conn, psm, skb);
4362 l2cap_data_channel(conn, cid, skb);
4367 /* ---- L2CAP interface with lower layer (HCI) ---- */
4369 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4371 int exact = 0, lm1 = 0, lm2 = 0;
4372 register struct sock *sk;
4373 struct hlist_node *node;
4375 if (type != ACL_LINK)
4378 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4380 /* Find listening sockets and check their link_mode */
4381 read_lock(&l2cap_sk_list.lock);
4382 sk_for_each(sk, node, &l2cap_sk_list.head) {
4383 if (sk->sk_state != BT_LISTEN)
4386 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4387 lm1 |= HCI_LM_ACCEPT;
4388 if (l2cap_pi(sk)->role_switch)
4389 lm1 |= HCI_LM_MASTER;
4391 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4392 lm2 |= HCI_LM_ACCEPT;
4393 if (l2cap_pi(sk)->role_switch)
4394 lm2 |= HCI_LM_MASTER;
4397 read_unlock(&l2cap_sk_list.lock);
4399 return exact ? lm1 : lm2;
4402 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4404 struct l2cap_conn *conn;
4406 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4408 if (hcon->type != ACL_LINK)
4412 conn = l2cap_conn_add(hcon, status);
4414 l2cap_conn_ready(conn);
4416 l2cap_conn_del(hcon, bt_err(status));
4421 static int l2cap_disconn_ind(struct hci_conn *hcon)
4423 struct l2cap_conn *conn = hcon->l2cap_data;
4425 BT_DBG("hcon %p", hcon);
4427 if (hcon->type != ACL_LINK || !conn)
4430 return conn->disc_reason;
4433 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4435 BT_DBG("hcon %p reason %d", hcon, reason);
4437 if (hcon->type != ACL_LINK)
4440 l2cap_conn_del(hcon, bt_err(reason));
4445 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4447 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4450 if (encrypt == 0x00) {
4451 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4452 l2cap_sock_clear_timer(sk);
4453 l2cap_sock_set_timer(sk, HZ * 5);
4454 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4455 __l2cap_sock_close(sk, ECONNREFUSED);
4457 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4458 l2cap_sock_clear_timer(sk);
4462 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4464 struct l2cap_chan_list *l;
4465 struct l2cap_conn *conn = hcon->l2cap_data;
4471 l = &conn->chan_list;
4473 BT_DBG("conn %p", conn);
4475 read_lock(&l->lock);
4477 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4480 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4485 if (!status && (sk->sk_state == BT_CONNECTED ||
4486 sk->sk_state == BT_CONFIG)) {
4487 l2cap_check_encryption(sk, encrypt);
4492 if (sk->sk_state == BT_CONNECT) {
4494 struct l2cap_conn_req req;
4495 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4496 req.psm = l2cap_pi(sk)->psm;
4498 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4499 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4501 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4502 L2CAP_CONN_REQ, sizeof(req), &req);
4504 l2cap_sock_clear_timer(sk);
4505 l2cap_sock_set_timer(sk, HZ / 10);
4507 } else if (sk->sk_state == BT_CONNECT2) {
4508 struct l2cap_conn_rsp rsp;
4512 sk->sk_state = BT_CONFIG;
4513 result = L2CAP_CR_SUCCESS;
4515 sk->sk_state = BT_DISCONN;
4516 l2cap_sock_set_timer(sk, HZ / 10);
4517 result = L2CAP_CR_SEC_BLOCK;
4520 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4521 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4522 rsp.result = cpu_to_le16(result);
4523 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4524 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4525 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4531 read_unlock(&l->lock);
4536 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4538 struct l2cap_conn *conn = hcon->l2cap_data;
4540 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4543 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4545 if (flags & ACL_START) {
4546 struct l2cap_hdr *hdr;
4550 BT_ERR("Unexpected start frame (len %d)", skb->len);
4551 kfree_skb(conn->rx_skb);
4552 conn->rx_skb = NULL;
4554 l2cap_conn_unreliable(conn, ECOMM);
4558 BT_ERR("Frame is too short (len %d)", skb->len);
4559 l2cap_conn_unreliable(conn, ECOMM);
4563 hdr = (struct l2cap_hdr *) skb->data;
4564 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4566 if (len == skb->len) {
4567 /* Complete frame received */
4568 l2cap_recv_frame(conn, skb);
4572 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4574 if (skb->len > len) {
4575 BT_ERR("Frame is too long (len %d, expected len %d)",
4577 l2cap_conn_unreliable(conn, ECOMM);
4581 /* Allocate skb for the complete frame (with header) */
4582 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4586 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4588 conn->rx_len = len - skb->len;
4590 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4592 if (!conn->rx_len) {
4593 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4594 l2cap_conn_unreliable(conn, ECOMM);
4598 if (skb->len > conn->rx_len) {
4599 BT_ERR("Fragment is too long (len %d, expected %d)",
4600 skb->len, conn->rx_len);
4601 kfree_skb(conn->rx_skb);
4602 conn->rx_skb = NULL;
4604 l2cap_conn_unreliable(conn, ECOMM);
4608 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4610 conn->rx_len -= skb->len;
4612 if (!conn->rx_len) {
4613 /* Complete frame received */
4614 l2cap_recv_frame(conn, conn->rx_skb);
4615 conn->rx_skb = NULL;
4624 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4627 struct hlist_node *node;
4629 read_lock_bh(&l2cap_sk_list.lock);
4631 sk_for_each(sk, node, &l2cap_sk_list.head) {
4632 struct l2cap_pinfo *pi = l2cap_pi(sk);
4634 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4635 batostr(&bt_sk(sk)->src),
4636 batostr(&bt_sk(sk)->dst),
4637 sk->sk_state, __le16_to_cpu(pi->psm),
4639 pi->imtu, pi->omtu, pi->sec_level);
4642 read_unlock_bh(&l2cap_sk_list.lock);
4647 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4649 return single_open(file, l2cap_debugfs_show, inode->i_private);
4652 static const struct file_operations l2cap_debugfs_fops = {
4653 .open = l2cap_debugfs_open,
4655 .llseek = seq_lseek,
4656 .release = single_release,
4659 static struct dentry *l2cap_debugfs;
4661 static const struct proto_ops l2cap_sock_ops = {
4662 .family = PF_BLUETOOTH,
4663 .owner = THIS_MODULE,
4664 .release = l2cap_sock_release,
4665 .bind = l2cap_sock_bind,
4666 .connect = l2cap_sock_connect,
4667 .listen = l2cap_sock_listen,
4668 .accept = l2cap_sock_accept,
4669 .getname = l2cap_sock_getname,
4670 .sendmsg = l2cap_sock_sendmsg,
4671 .recvmsg = l2cap_sock_recvmsg,
4672 .poll = bt_sock_poll,
4673 .ioctl = bt_sock_ioctl,
4674 .mmap = sock_no_mmap,
4675 .socketpair = sock_no_socketpair,
4676 .shutdown = l2cap_sock_shutdown,
4677 .setsockopt = l2cap_sock_setsockopt,
4678 .getsockopt = l2cap_sock_getsockopt
4681 static const struct net_proto_family l2cap_sock_family_ops = {
4682 .family = PF_BLUETOOTH,
4683 .owner = THIS_MODULE,
4684 .create = l2cap_sock_create,
4687 static struct hci_proto l2cap_hci_proto = {
4689 .id = HCI_PROTO_L2CAP,
4690 .connect_ind = l2cap_connect_ind,
4691 .connect_cfm = l2cap_connect_cfm,
4692 .disconn_ind = l2cap_disconn_ind,
4693 .disconn_cfm = l2cap_disconn_cfm,
4694 .security_cfm = l2cap_security_cfm,
4695 .recv_acldata = l2cap_recv_acldata
4698 static int __init l2cap_init(void)
4702 err = proto_register(&l2cap_proto, 0);
4706 _busy_wq = create_singlethread_workqueue("l2cap");
4710 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4712 BT_ERR("L2CAP socket registration failed");
4716 err = hci_register_proto(&l2cap_hci_proto);
4718 BT_ERR("L2CAP protocol registration failed");
4719 bt_sock_unregister(BTPROTO_L2CAP);
4724 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4725 bt_debugfs, NULL, &l2cap_debugfs_fops);
4727 BT_ERR("Failed to create L2CAP debug file");
4730 BT_INFO("L2CAP ver %s", VERSION);
4731 BT_INFO("L2CAP socket layer initialized");
4736 proto_unregister(&l2cap_proto);
4740 static void __exit l2cap_exit(void)
4742 debugfs_remove(l2cap_debugfs);
4744 flush_workqueue(_busy_wq);
4745 destroy_workqueue(_busy_wq);
4747 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4748 BT_ERR("L2CAP socket unregistration failed");
4750 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4751 BT_ERR("L2CAP protocol unregistration failed");
4753 proto_unregister(&l2cap_proto);
4756 void l2cap_load(void)
4758 /* Dummy function to trigger automatic L2CAP module loading by
4759 * other modules that use L2CAP sockets but don't use any other
4760 * symbols from it. */
4762 EXPORT_SYMBOL(l2cap_load);
4764 module_init(l2cap_init);
4765 module_exit(l2cap_exit);
4767 module_param(enable_ertm, bool, 0644);
4768 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4770 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4771 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4772 MODULE_VERSION(VERSION);
4773 MODULE_LICENSE("GPL");
4774 MODULE_ALIAS("bt-proto-0");