2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops;
65 static struct workqueue_struct *_busy_wq;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void l2cap_busy_work(struct work_struct *work);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 /* ---- L2CAP timers ---- */
81 static void l2cap_sock_timeout(unsigned long arg)
83 struct sock *sk = (struct sock *) arg;
86 BT_DBG("sock %p state %d", sk, sk->sk_state);
90 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
91 reason = ECONNREFUSED;
92 else if (sk->sk_state == BT_CONNECT &&
93 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
94 reason = ECONNREFUSED;
98 __l2cap_sock_close(sk, reason);
106 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
108 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
109 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
112 static void l2cap_sock_clear_timer(struct sock *sk)
114 BT_DBG("sock %p state %d", sk, sk->sk_state);
115 sk_stop_timer(sk, &sk->sk_timer);
118 /* ---- L2CAP channels ---- */
119 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
122 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
123 if (l2cap_pi(s)->dcid == cid)
129 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
132 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
133 if (l2cap_pi(s)->scid == cid)
139 /* Find channel with given SCID.
140 * Returns locked socket */
141 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
145 s = __l2cap_get_chan_by_scid(l, cid);
148 read_unlock(&l->lock);
152 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
155 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
156 if (l2cap_pi(s)->ident == ident)
162 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
166 s = __l2cap_get_chan_by_ident(l, ident);
169 read_unlock(&l->lock);
173 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
175 u16 cid = L2CAP_CID_DYN_START;
177 for (; cid < L2CAP_CID_DYN_END; cid++) {
178 if (!__l2cap_get_chan_by_scid(l, cid))
185 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
190 l2cap_pi(l->head)->prev_c = sk;
192 l2cap_pi(sk)->next_c = l->head;
193 l2cap_pi(sk)->prev_c = NULL;
197 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
199 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
201 write_lock_bh(&l->lock);
206 l2cap_pi(next)->prev_c = prev;
208 l2cap_pi(prev)->next_c = next;
209 write_unlock_bh(&l->lock);
214 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
216 struct l2cap_chan_list *l = &conn->chan_list;
218 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
219 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
221 conn->disc_reason = 0x13;
223 l2cap_pi(sk)->conn = conn;
225 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
226 /* Alloc CID for connection-oriented socket */
227 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
228 } else if (sk->sk_type == SOCK_DGRAM) {
229 /* Connectionless socket */
230 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
231 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
232 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
234 /* Raw socket can send/recv signalling messages only */
235 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
236 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
237 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
240 __l2cap_chan_link(l, sk);
243 bt_accept_enqueue(parent, sk);
247 * Must be called on the locked socket. */
248 static void l2cap_chan_del(struct sock *sk, int err)
250 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
251 struct sock *parent = bt_sk(sk)->parent;
253 l2cap_sock_clear_timer(sk);
255 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
258 /* Unlink from channel list */
259 l2cap_chan_unlink(&conn->chan_list, sk);
260 l2cap_pi(sk)->conn = NULL;
261 hci_conn_put(conn->hcon);
264 sk->sk_state = BT_CLOSED;
265 sock_set_flag(sk, SOCK_ZAPPED);
271 bt_accept_unlink(sk);
272 parent->sk_data_ready(parent, 0);
274 sk->sk_state_change(sk);
276 skb_queue_purge(TX_QUEUE(sk));
278 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
279 struct srej_list *l, *tmp;
281 del_timer(&l2cap_pi(sk)->retrans_timer);
282 del_timer(&l2cap_pi(sk)->monitor_timer);
283 del_timer(&l2cap_pi(sk)->ack_timer);
285 skb_queue_purge(SREJ_QUEUE(sk));
286 skb_queue_purge(BUSY_QUEUE(sk));
288 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
295 /* Service level security */
296 static inline int l2cap_check_security(struct sock *sk)
298 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
301 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
302 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
303 auth_type = HCI_AT_NO_BONDING_MITM;
305 auth_type = HCI_AT_NO_BONDING;
307 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
308 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
310 switch (l2cap_pi(sk)->sec_level) {
311 case BT_SECURITY_HIGH:
312 auth_type = HCI_AT_GENERAL_BONDING_MITM;
314 case BT_SECURITY_MEDIUM:
315 auth_type = HCI_AT_GENERAL_BONDING;
318 auth_type = HCI_AT_NO_BONDING;
323 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
327 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
331 /* Get next available identificator.
332 * 1 - 128 are used by kernel.
333 * 129 - 199 are reserved.
334 * 200 - 254 are used by utilities like l2ping, etc.
337 spin_lock_bh(&conn->lock);
339 if (++conn->tx_ident > 128)
344 spin_unlock_bh(&conn->lock);
349 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
351 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
353 BT_DBG("code 0x%2.2x", code);
358 hci_send_acl(conn->hcon, skb, 0);
361 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
364 struct l2cap_hdr *lh;
365 struct l2cap_conn *conn = pi->conn;
366 struct sock *sk = (struct sock *)pi;
367 int count, hlen = L2CAP_HDR_SIZE + 2;
369 if (sk->sk_state != BT_CONNECTED)
372 if (pi->fcs == L2CAP_FCS_CRC16)
375 BT_DBG("pi %p, control 0x%2.2x", pi, control);
377 count = min_t(unsigned int, conn->mtu, hlen);
378 control |= L2CAP_CTRL_FRAME_TYPE;
380 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
381 control |= L2CAP_CTRL_FINAL;
382 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
385 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
386 control |= L2CAP_CTRL_POLL;
387 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
390 skb = bt_skb_alloc(count, GFP_ATOMIC);
394 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
395 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
396 lh->cid = cpu_to_le16(pi->dcid);
397 put_unaligned_le16(control, skb_put(skb, 2));
399 if (pi->fcs == L2CAP_FCS_CRC16) {
400 u16 fcs = crc16(0, (u8 *)lh, count - 2);
401 put_unaligned_le16(fcs, skb_put(skb, 2));
404 hci_send_acl(pi->conn->hcon, skb, 0);
407 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
409 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
410 control |= L2CAP_SUPER_RCV_NOT_READY;
411 pi->conn_state |= L2CAP_CONN_RNR_SENT;
413 control |= L2CAP_SUPER_RCV_READY;
415 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
417 l2cap_send_sframe(pi, control);
420 static inline int __l2cap_no_conn_pending(struct sock *sk)
422 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
425 static void l2cap_do_start(struct sock *sk)
427 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
429 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
430 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
433 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
434 struct l2cap_conn_req req;
435 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
436 req.psm = l2cap_pi(sk)->psm;
438 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
439 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
441 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
442 L2CAP_CONN_REQ, sizeof(req), &req);
445 struct l2cap_info_req req;
446 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
448 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
449 conn->info_ident = l2cap_get_ident(conn);
451 mod_timer(&conn->info_timer, jiffies +
452 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
454 l2cap_send_cmd(conn, conn->info_ident,
455 L2CAP_INFO_REQ, sizeof(req), &req);
459 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
461 struct l2cap_disconn_req req;
466 skb_queue_purge(TX_QUEUE(sk));
468 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
469 del_timer(&l2cap_pi(sk)->retrans_timer);
470 del_timer(&l2cap_pi(sk)->monitor_timer);
471 del_timer(&l2cap_pi(sk)->ack_timer);
474 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
475 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
476 l2cap_send_cmd(conn, l2cap_get_ident(conn),
477 L2CAP_DISCONN_REQ, sizeof(req), &req);
479 sk->sk_state = BT_DISCONN;
483 /* ---- L2CAP connections ---- */
484 static void l2cap_conn_start(struct l2cap_conn *conn)
486 struct l2cap_chan_list *l = &conn->chan_list;
489 BT_DBG("conn %p", conn);
493 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
496 if (sk->sk_type != SOCK_SEQPACKET &&
497 sk->sk_type != SOCK_STREAM) {
502 if (sk->sk_state == BT_CONNECT) {
503 if (l2cap_check_security(sk) &&
504 __l2cap_no_conn_pending(sk)) {
505 struct l2cap_conn_req req;
506 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
507 req.psm = l2cap_pi(sk)->psm;
509 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
510 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
512 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
513 L2CAP_CONN_REQ, sizeof(req), &req);
515 } else if (sk->sk_state == BT_CONNECT2) {
516 struct l2cap_conn_rsp rsp;
517 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
518 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
520 if (l2cap_check_security(sk)) {
521 if (bt_sk(sk)->defer_setup) {
522 struct sock *parent = bt_sk(sk)->parent;
523 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
524 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
525 parent->sk_data_ready(parent, 0);
528 sk->sk_state = BT_CONFIG;
529 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
530 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
533 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
534 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
537 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
538 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
544 read_unlock(&l->lock);
547 static void l2cap_conn_ready(struct l2cap_conn *conn)
549 struct l2cap_chan_list *l = &conn->chan_list;
552 BT_DBG("conn %p", conn);
556 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
559 if (sk->sk_type != SOCK_SEQPACKET &&
560 sk->sk_type != SOCK_STREAM) {
561 l2cap_sock_clear_timer(sk);
562 sk->sk_state = BT_CONNECTED;
563 sk->sk_state_change(sk);
564 } else if (sk->sk_state == BT_CONNECT)
570 read_unlock(&l->lock);
573 /* Notify sockets that we cannot guaranty reliability anymore */
574 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
576 struct l2cap_chan_list *l = &conn->chan_list;
579 BT_DBG("conn %p", conn);
583 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
584 if (l2cap_pi(sk)->force_reliable)
588 read_unlock(&l->lock);
591 static void l2cap_info_timeout(unsigned long arg)
593 struct l2cap_conn *conn = (void *) arg;
595 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
596 conn->info_ident = 0;
598 l2cap_conn_start(conn);
601 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
603 struct l2cap_conn *conn = hcon->l2cap_data;
608 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
612 hcon->l2cap_data = conn;
615 BT_DBG("hcon %p conn %p", hcon, conn);
617 conn->mtu = hcon->hdev->acl_mtu;
618 conn->src = &hcon->hdev->bdaddr;
619 conn->dst = &hcon->dst;
623 spin_lock_init(&conn->lock);
624 rwlock_init(&conn->chan_list.lock);
626 setup_timer(&conn->info_timer, l2cap_info_timeout,
627 (unsigned long) conn);
629 conn->disc_reason = 0x13;
634 static void l2cap_conn_del(struct hci_conn *hcon, int err)
636 struct l2cap_conn *conn = hcon->l2cap_data;
642 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
644 kfree_skb(conn->rx_skb);
647 while ((sk = conn->chan_list.head)) {
649 l2cap_chan_del(sk, err);
654 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
655 del_timer_sync(&conn->info_timer);
657 hcon->l2cap_data = NULL;
661 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
663 struct l2cap_chan_list *l = &conn->chan_list;
664 write_lock_bh(&l->lock);
665 __l2cap_chan_add(conn, sk, parent);
666 write_unlock_bh(&l->lock);
669 /* ---- Socket interface ---- */
670 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
673 struct hlist_node *node;
674 sk_for_each(sk, node, &l2cap_sk_list.head)
675 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
682 /* Find socket with psm and source bdaddr.
683 * Returns closest match.
685 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
687 struct sock *sk = NULL, *sk1 = NULL;
688 struct hlist_node *node;
690 sk_for_each(sk, node, &l2cap_sk_list.head) {
691 if (state && sk->sk_state != state)
694 if (l2cap_pi(sk)->psm == psm) {
696 if (!bacmp(&bt_sk(sk)->src, src))
700 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
704 return node ? sk : sk1;
707 /* Find socket with given address (psm, src).
708 * Returns locked socket */
709 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
712 read_lock(&l2cap_sk_list.lock);
713 s = __l2cap_get_sock_by_psm(state, psm, src);
716 read_unlock(&l2cap_sk_list.lock);
720 static void l2cap_sock_destruct(struct sock *sk)
724 skb_queue_purge(&sk->sk_receive_queue);
725 skb_queue_purge(&sk->sk_write_queue);
728 static void l2cap_sock_cleanup_listen(struct sock *parent)
732 BT_DBG("parent %p", parent);
734 /* Close not yet accepted channels */
735 while ((sk = bt_accept_dequeue(parent, NULL)))
736 l2cap_sock_close(sk);
738 parent->sk_state = BT_CLOSED;
739 sock_set_flag(parent, SOCK_ZAPPED);
742 /* Kill socket (only if zapped and orphan)
743 * Must be called on unlocked socket.
745 static void l2cap_sock_kill(struct sock *sk)
747 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
750 BT_DBG("sk %p state %d", sk, sk->sk_state);
752 /* Kill poor orphan */
753 bt_sock_unlink(&l2cap_sk_list, sk);
754 sock_set_flag(sk, SOCK_DEAD);
758 static void __l2cap_sock_close(struct sock *sk, int reason)
760 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
762 switch (sk->sk_state) {
764 l2cap_sock_cleanup_listen(sk);
769 if (sk->sk_type == SOCK_SEQPACKET ||
770 sk->sk_type == SOCK_STREAM) {
771 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
773 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
774 l2cap_send_disconn_req(conn, sk, reason);
776 l2cap_chan_del(sk, reason);
780 if (sk->sk_type == SOCK_SEQPACKET ||
781 sk->sk_type == SOCK_STREAM) {
782 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
783 struct l2cap_conn_rsp rsp;
786 if (bt_sk(sk)->defer_setup)
787 result = L2CAP_CR_SEC_BLOCK;
789 result = L2CAP_CR_BAD_PSM;
791 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
792 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
793 rsp.result = cpu_to_le16(result);
794 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
795 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
796 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
798 l2cap_chan_del(sk, reason);
803 l2cap_chan_del(sk, reason);
807 sock_set_flag(sk, SOCK_ZAPPED);
812 /* Must be called on unlocked socket. */
813 static void l2cap_sock_close(struct sock *sk)
815 l2cap_sock_clear_timer(sk);
817 __l2cap_sock_close(sk, ECONNRESET);
822 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
824 struct l2cap_pinfo *pi = l2cap_pi(sk);
829 sk->sk_type = parent->sk_type;
830 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
832 pi->imtu = l2cap_pi(parent)->imtu;
833 pi->omtu = l2cap_pi(parent)->omtu;
834 pi->conf_state = l2cap_pi(parent)->conf_state;
835 pi->mode = l2cap_pi(parent)->mode;
836 pi->fcs = l2cap_pi(parent)->fcs;
837 pi->max_tx = l2cap_pi(parent)->max_tx;
838 pi->tx_win = l2cap_pi(parent)->tx_win;
839 pi->sec_level = l2cap_pi(parent)->sec_level;
840 pi->role_switch = l2cap_pi(parent)->role_switch;
841 pi->force_reliable = l2cap_pi(parent)->force_reliable;
843 pi->imtu = L2CAP_DEFAULT_MTU;
845 if (enable_ertm && sk->sk_type == SOCK_STREAM) {
846 pi->mode = L2CAP_MODE_ERTM;
847 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
849 pi->mode = L2CAP_MODE_BASIC;
851 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
852 pi->fcs = L2CAP_FCS_CRC16;
853 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
854 pi->sec_level = BT_SECURITY_LOW;
856 pi->force_reliable = 0;
859 /* Default config options */
861 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
862 skb_queue_head_init(TX_QUEUE(sk));
863 skb_queue_head_init(SREJ_QUEUE(sk));
864 skb_queue_head_init(BUSY_QUEUE(sk));
865 INIT_LIST_HEAD(SREJ_LIST(sk));
868 static struct proto l2cap_proto = {
870 .owner = THIS_MODULE,
871 .obj_size = sizeof(struct l2cap_pinfo)
874 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
878 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
882 sock_init_data(sock, sk);
883 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
885 sk->sk_destruct = l2cap_sock_destruct;
886 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
888 sock_reset_flag(sk, SOCK_ZAPPED);
890 sk->sk_protocol = proto;
891 sk->sk_state = BT_OPEN;
893 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
895 bt_sock_link(&l2cap_sk_list, sk);
899 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
904 BT_DBG("sock %p", sock);
906 sock->state = SS_UNCONNECTED;
908 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
909 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
910 return -ESOCKTNOSUPPORT;
912 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
915 sock->ops = &l2cap_sock_ops;
917 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
921 l2cap_sock_init(sk, NULL);
925 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
927 struct sock *sk = sock->sk;
928 struct sockaddr_l2 la;
933 if (!addr || addr->sa_family != AF_BLUETOOTH)
936 memset(&la, 0, sizeof(la));
937 len = min_t(unsigned int, sizeof(la), alen);
938 memcpy(&la, addr, len);
945 if (sk->sk_state != BT_OPEN) {
950 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
951 !capable(CAP_NET_BIND_SERVICE)) {
956 write_lock_bh(&l2cap_sk_list.lock);
958 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
961 /* Save source address */
962 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
963 l2cap_pi(sk)->psm = la.l2_psm;
964 l2cap_pi(sk)->sport = la.l2_psm;
965 sk->sk_state = BT_BOUND;
967 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
968 __le16_to_cpu(la.l2_psm) == 0x0003)
969 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
972 write_unlock_bh(&l2cap_sk_list.lock);
979 static int l2cap_do_connect(struct sock *sk)
981 bdaddr_t *src = &bt_sk(sk)->src;
982 bdaddr_t *dst = &bt_sk(sk)->dst;
983 struct l2cap_conn *conn;
984 struct hci_conn *hcon;
985 struct hci_dev *hdev;
989 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
992 hdev = hci_get_route(dst, src);
994 return -EHOSTUNREACH;
996 hci_dev_lock_bh(hdev);
1000 if (sk->sk_type == SOCK_RAW) {
1001 switch (l2cap_pi(sk)->sec_level) {
1002 case BT_SECURITY_HIGH:
1003 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1005 case BT_SECURITY_MEDIUM:
1006 auth_type = HCI_AT_DEDICATED_BONDING;
1009 auth_type = HCI_AT_NO_BONDING;
1012 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1013 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1014 auth_type = HCI_AT_NO_BONDING_MITM;
1016 auth_type = HCI_AT_NO_BONDING;
1018 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1019 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1021 switch (l2cap_pi(sk)->sec_level) {
1022 case BT_SECURITY_HIGH:
1023 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1025 case BT_SECURITY_MEDIUM:
1026 auth_type = HCI_AT_GENERAL_BONDING;
1029 auth_type = HCI_AT_NO_BONDING;
1034 hcon = hci_connect(hdev, ACL_LINK, dst,
1035 l2cap_pi(sk)->sec_level, auth_type);
1039 conn = l2cap_conn_add(hcon, 0);
1047 /* Update source addr of the socket */
1048 bacpy(src, conn->src);
1050 l2cap_chan_add(conn, sk, NULL);
1052 sk->sk_state = BT_CONNECT;
1053 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1055 if (hcon->state == BT_CONNECTED) {
1056 if (sk->sk_type != SOCK_SEQPACKET &&
1057 sk->sk_type != SOCK_STREAM) {
1058 l2cap_sock_clear_timer(sk);
1059 sk->sk_state = BT_CONNECTED;
1065 hci_dev_unlock_bh(hdev);
1070 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1072 struct sock *sk = sock->sk;
1073 struct sockaddr_l2 la;
1076 BT_DBG("sk %p", sk);
1078 if (!addr || alen < sizeof(addr->sa_family) ||
1079 addr->sa_family != AF_BLUETOOTH)
1082 memset(&la, 0, sizeof(la));
1083 len = min_t(unsigned int, sizeof(la), alen);
1084 memcpy(&la, addr, len);
1091 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1097 switch (l2cap_pi(sk)->mode) {
1098 case L2CAP_MODE_BASIC:
1100 case L2CAP_MODE_ERTM:
1101 case L2CAP_MODE_STREAMING:
1110 switch (sk->sk_state) {
1114 /* Already connecting */
1118 /* Already connected */
1131 /* Set destination address and psm */
1132 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1133 l2cap_pi(sk)->psm = la.l2_psm;
1135 err = l2cap_do_connect(sk);
1140 err = bt_sock_wait_state(sk, BT_CONNECTED,
1141 sock_sndtimeo(sk, flags & O_NONBLOCK));
1147 static int l2cap_sock_listen(struct socket *sock, int backlog)
1149 struct sock *sk = sock->sk;
1152 BT_DBG("sk %p backlog %d", sk, backlog);
1156 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1157 || sk->sk_state != BT_BOUND) {
1162 switch (l2cap_pi(sk)->mode) {
1163 case L2CAP_MODE_BASIC:
1165 case L2CAP_MODE_ERTM:
1166 case L2CAP_MODE_STREAMING:
1175 if (!l2cap_pi(sk)->psm) {
1176 bdaddr_t *src = &bt_sk(sk)->src;
1181 write_lock_bh(&l2cap_sk_list.lock);
1183 for (psm = 0x1001; psm < 0x1100; psm += 2)
1184 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1185 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1186 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1191 write_unlock_bh(&l2cap_sk_list.lock);
1197 sk->sk_max_ack_backlog = backlog;
1198 sk->sk_ack_backlog = 0;
1199 sk->sk_state = BT_LISTEN;
1206 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1208 DECLARE_WAITQUEUE(wait, current);
1209 struct sock *sk = sock->sk, *nsk;
1213 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1215 if (sk->sk_state != BT_LISTEN) {
1220 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1222 BT_DBG("sk %p timeo %ld", sk, timeo);
1224 /* Wait for an incoming connection. (wake-one). */
1225 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1226 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1227 set_current_state(TASK_INTERRUPTIBLE);
1234 timeo = schedule_timeout(timeo);
1235 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1237 if (sk->sk_state != BT_LISTEN) {
1242 if (signal_pending(current)) {
1243 err = sock_intr_errno(timeo);
1247 set_current_state(TASK_RUNNING);
1248 remove_wait_queue(sk_sleep(sk), &wait);
1253 newsock->state = SS_CONNECTED;
1255 BT_DBG("new socket %p", nsk);
1262 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1264 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1265 struct sock *sk = sock->sk;
1267 BT_DBG("sock %p, sk %p", sock, sk);
1269 addr->sa_family = AF_BLUETOOTH;
1270 *len = sizeof(struct sockaddr_l2);
1273 la->l2_psm = l2cap_pi(sk)->psm;
1274 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1275 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1277 la->l2_psm = l2cap_pi(sk)->sport;
1278 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1279 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1285 static int __l2cap_wait_ack(struct sock *sk)
1287 DECLARE_WAITQUEUE(wait, current);
1291 add_wait_queue(sk_sleep(sk), &wait);
1292 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1293 set_current_state(TASK_INTERRUPTIBLE);
1298 if (signal_pending(current)) {
1299 err = sock_intr_errno(timeo);
1304 timeo = schedule_timeout(timeo);
1307 err = sock_error(sk);
1311 set_current_state(TASK_RUNNING);
1312 remove_wait_queue(sk_sleep(sk), &wait);
1316 static void l2cap_monitor_timeout(unsigned long arg)
1318 struct sock *sk = (void *) arg;
1320 BT_DBG("sk %p", sk);
1323 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1324 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1329 l2cap_pi(sk)->retry_count++;
1330 __mod_monitor_timer();
1332 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1336 static void l2cap_retrans_timeout(unsigned long arg)
1338 struct sock *sk = (void *) arg;
1340 BT_DBG("sk %p", sk);
1343 l2cap_pi(sk)->retry_count = 1;
1344 __mod_monitor_timer();
1346 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1348 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1352 static void l2cap_drop_acked_frames(struct sock *sk)
1354 struct sk_buff *skb;
1356 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1357 l2cap_pi(sk)->unacked_frames) {
1358 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1361 skb = skb_dequeue(TX_QUEUE(sk));
1364 l2cap_pi(sk)->unacked_frames--;
1367 if (!l2cap_pi(sk)->unacked_frames)
1368 del_timer(&l2cap_pi(sk)->retrans_timer);
1371 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1373 struct l2cap_pinfo *pi = l2cap_pi(sk);
1375 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1377 hci_send_acl(pi->conn->hcon, skb, 0);
1380 static int l2cap_streaming_send(struct sock *sk)
1382 struct sk_buff *skb, *tx_skb;
1383 struct l2cap_pinfo *pi = l2cap_pi(sk);
1386 while ((skb = sk->sk_send_head)) {
1387 tx_skb = skb_clone(skb, GFP_ATOMIC);
1389 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1390 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1391 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1393 if (pi->fcs == L2CAP_FCS_CRC16) {
1394 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1395 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1398 l2cap_do_send(sk, tx_skb);
1400 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1402 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1403 sk->sk_send_head = NULL;
1405 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1407 skb = skb_dequeue(TX_QUEUE(sk));
1413 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1415 struct l2cap_pinfo *pi = l2cap_pi(sk);
1416 struct sk_buff *skb, *tx_skb;
1419 skb = skb_peek(TX_QUEUE(sk));
1424 if (bt_cb(skb)->tx_seq == tx_seq)
1427 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1430 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1432 if (pi->remote_max_tx &&
1433 bt_cb(skb)->retries == pi->remote_max_tx) {
1434 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1438 tx_skb = skb_clone(skb, GFP_ATOMIC);
1439 bt_cb(skb)->retries++;
1440 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1442 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1443 control |= L2CAP_CTRL_FINAL;
1444 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1447 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1448 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1450 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1452 if (pi->fcs == L2CAP_FCS_CRC16) {
1453 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1454 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1457 l2cap_do_send(sk, tx_skb);
1460 static int l2cap_ertm_send(struct sock *sk)
1462 struct sk_buff *skb, *tx_skb;
1463 struct l2cap_pinfo *pi = l2cap_pi(sk);
1467 if (sk->sk_state != BT_CONNECTED)
1470 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1472 if (pi->remote_max_tx &&
1473 bt_cb(skb)->retries == pi->remote_max_tx) {
1474 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1478 tx_skb = skb_clone(skb, GFP_ATOMIC);
1480 bt_cb(skb)->retries++;
1482 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1483 control &= L2CAP_CTRL_SAR;
1485 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1486 control |= L2CAP_CTRL_FINAL;
1487 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1489 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1490 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1491 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1494 if (pi->fcs == L2CAP_FCS_CRC16) {
1495 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1496 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1499 l2cap_do_send(sk, tx_skb);
1501 __mod_retrans_timer();
1503 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1504 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1506 pi->unacked_frames++;
1509 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1510 sk->sk_send_head = NULL;
1512 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1520 static int l2cap_retransmit_frames(struct sock *sk)
1522 struct l2cap_pinfo *pi = l2cap_pi(sk);
1525 spin_lock_bh(&pi->send_lock);
1527 if (!skb_queue_empty(TX_QUEUE(sk)))
1528 sk->sk_send_head = TX_QUEUE(sk)->next;
1530 pi->next_tx_seq = pi->expected_ack_seq;
1531 ret = l2cap_ertm_send(sk);
1533 spin_unlock_bh(&pi->send_lock);
1538 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1540 struct sock *sk = (struct sock *)pi;
1544 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1546 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1547 control |= L2CAP_SUPER_RCV_NOT_READY;
1548 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1549 l2cap_send_sframe(pi, control);
1553 spin_lock_bh(&pi->send_lock);
1554 nframes = l2cap_ertm_send(sk);
1555 spin_unlock_bh(&pi->send_lock);
1560 control |= L2CAP_SUPER_RCV_READY;
1561 l2cap_send_sframe(pi, control);
1564 static void l2cap_send_srejtail(struct sock *sk)
1566 struct srej_list *tail;
1569 control = L2CAP_SUPER_SELECT_REJECT;
1570 control |= L2CAP_CTRL_FINAL;
1572 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1573 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1575 l2cap_send_sframe(l2cap_pi(sk), control);
1578 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1580 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1581 struct sk_buff **frag;
1584 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1590 /* Continuation fragments (no L2CAP header) */
1591 frag = &skb_shinfo(skb)->frag_list;
1593 count = min_t(unsigned int, conn->mtu, len);
1595 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1598 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1604 frag = &(*frag)->next;
1610 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1612 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1613 struct sk_buff *skb;
1614 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1615 struct l2cap_hdr *lh;
1617 BT_DBG("sk %p len %d", sk, (int)len);
1619 count = min_t(unsigned int, (conn->mtu - hlen), len);
1620 skb = bt_skb_send_alloc(sk, count + hlen,
1621 msg->msg_flags & MSG_DONTWAIT, &err);
1623 return ERR_PTR(-ENOMEM);
1625 /* Create L2CAP header */
1626 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1627 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1628 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1629 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1631 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1632 if (unlikely(err < 0)) {
1634 return ERR_PTR(err);
1639 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1641 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1642 struct sk_buff *skb;
1643 int err, count, hlen = L2CAP_HDR_SIZE;
1644 struct l2cap_hdr *lh;
1646 BT_DBG("sk %p len %d", sk, (int)len);
1648 count = min_t(unsigned int, (conn->mtu - hlen), len);
1649 skb = bt_skb_send_alloc(sk, count + hlen,
1650 msg->msg_flags & MSG_DONTWAIT, &err);
1652 return ERR_PTR(-ENOMEM);
1654 /* Create L2CAP header */
1655 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1656 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1657 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1659 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1660 if (unlikely(err < 0)) {
1662 return ERR_PTR(err);
1667 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1669 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1670 struct sk_buff *skb;
1671 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1672 struct l2cap_hdr *lh;
1674 BT_DBG("sk %p len %d", sk, (int)len);
1677 return ERR_PTR(-ENOTCONN);
1682 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1685 count = min_t(unsigned int, (conn->mtu - hlen), len);
1686 skb = bt_skb_send_alloc(sk, count + hlen,
1687 msg->msg_flags & MSG_DONTWAIT, &err);
1689 return ERR_PTR(-ENOMEM);
1691 /* Create L2CAP header */
1692 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1693 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1694 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1695 put_unaligned_le16(control, skb_put(skb, 2));
1697 put_unaligned_le16(sdulen, skb_put(skb, 2));
1699 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1700 if (unlikely(err < 0)) {
1702 return ERR_PTR(err);
1705 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1706 put_unaligned_le16(0, skb_put(skb, 2));
1708 bt_cb(skb)->retries = 0;
1712 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1714 struct l2cap_pinfo *pi = l2cap_pi(sk);
1715 struct sk_buff *skb;
1716 struct sk_buff_head sar_queue;
1720 skb_queue_head_init(&sar_queue);
1721 control = L2CAP_SDU_START;
1722 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1724 return PTR_ERR(skb);
1726 __skb_queue_tail(&sar_queue, skb);
1727 len -= pi->remote_mps;
1728 size += pi->remote_mps;
1733 if (len > pi->remote_mps) {
1734 control = L2CAP_SDU_CONTINUE;
1735 buflen = pi->remote_mps;
1737 control = L2CAP_SDU_END;
1741 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1743 skb_queue_purge(&sar_queue);
1744 return PTR_ERR(skb);
1747 __skb_queue_tail(&sar_queue, skb);
1751 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1752 spin_lock_bh(&pi->send_lock);
1753 if (sk->sk_send_head == NULL)
1754 sk->sk_send_head = sar_queue.next;
1755 spin_unlock_bh(&pi->send_lock);
1760 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1762 struct sock *sk = sock->sk;
1763 struct l2cap_pinfo *pi = l2cap_pi(sk);
1764 struct sk_buff *skb;
1768 BT_DBG("sock %p, sk %p", sock, sk);
1770 err = sock_error(sk);
1774 if (msg->msg_flags & MSG_OOB)
1779 if (sk->sk_state != BT_CONNECTED) {
1784 /* Connectionless channel */
1785 if (sk->sk_type == SOCK_DGRAM) {
1786 skb = l2cap_create_connless_pdu(sk, msg, len);
1790 l2cap_do_send(sk, skb);
1797 case L2CAP_MODE_BASIC:
1798 /* Check outgoing MTU */
1799 if (len > pi->omtu) {
1804 /* Create a basic PDU */
1805 skb = l2cap_create_basic_pdu(sk, msg, len);
1811 l2cap_do_send(sk, skb);
1815 case L2CAP_MODE_ERTM:
1816 case L2CAP_MODE_STREAMING:
1817 /* Entire SDU fits into one PDU */
1818 if (len <= pi->remote_mps) {
1819 control = L2CAP_SDU_UNSEGMENTED;
1820 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1825 __skb_queue_tail(TX_QUEUE(sk), skb);
1827 if (pi->mode == L2CAP_MODE_ERTM)
1828 spin_lock_bh(&pi->send_lock);
1830 if (sk->sk_send_head == NULL)
1831 sk->sk_send_head = skb;
1833 if (pi->mode == L2CAP_MODE_ERTM)
1834 spin_unlock_bh(&pi->send_lock);
1836 /* Segment SDU into multiples PDUs */
1837 err = l2cap_sar_segment_sdu(sk, msg, len);
1842 if (pi->mode == L2CAP_MODE_STREAMING) {
1843 err = l2cap_streaming_send(sk);
1845 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1846 pi->conn_state && L2CAP_CONN_WAIT_F) {
1850 spin_lock_bh(&pi->send_lock);
1851 err = l2cap_ertm_send(sk);
1852 spin_unlock_bh(&pi->send_lock);
1860 BT_DBG("bad state %1.1x", pi->mode);
1869 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1871 struct sock *sk = sock->sk;
1875 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1876 struct l2cap_conn_rsp rsp;
1878 sk->sk_state = BT_CONFIG;
1880 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1881 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1882 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1883 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1884 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1885 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1893 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1896 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1898 struct sock *sk = sock->sk;
1899 struct l2cap_options opts;
1903 BT_DBG("sk %p", sk);
1909 opts.imtu = l2cap_pi(sk)->imtu;
1910 opts.omtu = l2cap_pi(sk)->omtu;
1911 opts.flush_to = l2cap_pi(sk)->flush_to;
1912 opts.mode = l2cap_pi(sk)->mode;
1913 opts.fcs = l2cap_pi(sk)->fcs;
1914 opts.max_tx = l2cap_pi(sk)->max_tx;
1915 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1917 len = min_t(unsigned int, sizeof(opts), optlen);
1918 if (copy_from_user((char *) &opts, optval, len)) {
1923 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1928 l2cap_pi(sk)->mode = opts.mode;
1929 switch (l2cap_pi(sk)->mode) {
1930 case L2CAP_MODE_BASIC:
1931 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1933 case L2CAP_MODE_ERTM:
1934 case L2CAP_MODE_STREAMING:
1943 l2cap_pi(sk)->imtu = opts.imtu;
1944 l2cap_pi(sk)->omtu = opts.omtu;
1945 l2cap_pi(sk)->fcs = opts.fcs;
1946 l2cap_pi(sk)->max_tx = opts.max_tx;
1947 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1951 if (get_user(opt, (u32 __user *) optval)) {
1956 if (opt & L2CAP_LM_AUTH)
1957 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1958 if (opt & L2CAP_LM_ENCRYPT)
1959 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1960 if (opt & L2CAP_LM_SECURE)
1961 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1963 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1964 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1976 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1978 struct sock *sk = sock->sk;
1979 struct bt_security sec;
1983 BT_DBG("sk %p", sk);
1985 if (level == SOL_L2CAP)
1986 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1988 if (level != SOL_BLUETOOTH)
1989 return -ENOPROTOOPT;
1995 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1996 && sk->sk_type != SOCK_RAW) {
2001 sec.level = BT_SECURITY_LOW;
2003 len = min_t(unsigned int, sizeof(sec), optlen);
2004 if (copy_from_user((char *) &sec, optval, len)) {
2009 if (sec.level < BT_SECURITY_LOW ||
2010 sec.level > BT_SECURITY_HIGH) {
2015 l2cap_pi(sk)->sec_level = sec.level;
2018 case BT_DEFER_SETUP:
2019 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2024 if (get_user(opt, (u32 __user *) optval)) {
2029 bt_sk(sk)->defer_setup = opt;
2041 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2043 struct sock *sk = sock->sk;
2044 struct l2cap_options opts;
2045 struct l2cap_conninfo cinfo;
2049 BT_DBG("sk %p", sk);
2051 if (get_user(len, optlen))
2058 opts.imtu = l2cap_pi(sk)->imtu;
2059 opts.omtu = l2cap_pi(sk)->omtu;
2060 opts.flush_to = l2cap_pi(sk)->flush_to;
2061 opts.mode = l2cap_pi(sk)->mode;
2062 opts.fcs = l2cap_pi(sk)->fcs;
2063 opts.max_tx = l2cap_pi(sk)->max_tx;
2064 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2066 len = min_t(unsigned int, len, sizeof(opts));
2067 if (copy_to_user(optval, (char *) &opts, len))
2073 switch (l2cap_pi(sk)->sec_level) {
2074 case BT_SECURITY_LOW:
2075 opt = L2CAP_LM_AUTH;
2077 case BT_SECURITY_MEDIUM:
2078 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2080 case BT_SECURITY_HIGH:
2081 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2089 if (l2cap_pi(sk)->role_switch)
2090 opt |= L2CAP_LM_MASTER;
2092 if (l2cap_pi(sk)->force_reliable)
2093 opt |= L2CAP_LM_RELIABLE;
2095 if (put_user(opt, (u32 __user *) optval))
2099 case L2CAP_CONNINFO:
2100 if (sk->sk_state != BT_CONNECTED &&
2101 !(sk->sk_state == BT_CONNECT2 &&
2102 bt_sk(sk)->defer_setup)) {
2107 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2108 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2110 len = min_t(unsigned int, len, sizeof(cinfo));
2111 if (copy_to_user(optval, (char *) &cinfo, len))
2125 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2127 struct sock *sk = sock->sk;
2128 struct bt_security sec;
2131 BT_DBG("sk %p", sk);
2133 if (level == SOL_L2CAP)
2134 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2136 if (level != SOL_BLUETOOTH)
2137 return -ENOPROTOOPT;
2139 if (get_user(len, optlen))
2146 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2147 && sk->sk_type != SOCK_RAW) {
2152 sec.level = l2cap_pi(sk)->sec_level;
2154 len = min_t(unsigned int, len, sizeof(sec));
2155 if (copy_to_user(optval, (char *) &sec, len))
2160 case BT_DEFER_SETUP:
2161 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2166 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2180 static int l2cap_sock_shutdown(struct socket *sock, int how)
2182 struct sock *sk = sock->sk;
2185 BT_DBG("sock %p, sk %p", sock, sk);
2191 if (!sk->sk_shutdown) {
2192 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2193 err = __l2cap_wait_ack(sk);
2195 sk->sk_shutdown = SHUTDOWN_MASK;
2196 l2cap_sock_clear_timer(sk);
2197 __l2cap_sock_close(sk, 0);
2199 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2200 err = bt_sock_wait_state(sk, BT_CLOSED,
2204 if (!err && sk->sk_err)
2211 static int l2cap_sock_release(struct socket *sock)
2213 struct sock *sk = sock->sk;
2216 BT_DBG("sock %p, sk %p", sock, sk);
2221 err = l2cap_sock_shutdown(sock, 2);
2224 l2cap_sock_kill(sk);
2228 static void l2cap_chan_ready(struct sock *sk)
2230 struct sock *parent = bt_sk(sk)->parent;
2232 BT_DBG("sk %p, parent %p", sk, parent);
2234 l2cap_pi(sk)->conf_state = 0;
2235 l2cap_sock_clear_timer(sk);
2238 /* Outgoing channel.
2239 * Wake up socket sleeping on connect.
2241 sk->sk_state = BT_CONNECTED;
2242 sk->sk_state_change(sk);
2244 /* Incoming channel.
2245 * Wake up socket sleeping on accept.
2247 parent->sk_data_ready(parent, 0);
2251 /* Copy frame to all raw sockets on that connection */
2252 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2254 struct l2cap_chan_list *l = &conn->chan_list;
2255 struct sk_buff *nskb;
2258 BT_DBG("conn %p", conn);
2260 read_lock(&l->lock);
2261 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2262 if (sk->sk_type != SOCK_RAW)
2265 /* Don't send frame to the socket it came from */
2268 nskb = skb_clone(skb, GFP_ATOMIC);
2272 if (sock_queue_rcv_skb(sk, nskb))
2275 read_unlock(&l->lock);
2278 /* ---- L2CAP signalling commands ---- */
2279 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2280 u8 code, u8 ident, u16 dlen, void *data)
2282 struct sk_buff *skb, **frag;
2283 struct l2cap_cmd_hdr *cmd;
2284 struct l2cap_hdr *lh;
2287 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2288 conn, code, ident, dlen);
2290 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2291 count = min_t(unsigned int, conn->mtu, len);
2293 skb = bt_skb_alloc(count, GFP_ATOMIC);
2297 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2298 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2299 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2301 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2304 cmd->len = cpu_to_le16(dlen);
2307 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2308 memcpy(skb_put(skb, count), data, count);
2314 /* Continuation fragments (no L2CAP header) */
2315 frag = &skb_shinfo(skb)->frag_list;
2317 count = min_t(unsigned int, conn->mtu, len);
2319 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2323 memcpy(skb_put(*frag, count), data, count);
2328 frag = &(*frag)->next;
2338 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2340 struct l2cap_conf_opt *opt = *ptr;
2343 len = L2CAP_CONF_OPT_SIZE + opt->len;
2351 *val = *((u8 *) opt->val);
2355 *val = __le16_to_cpu(*((__le16 *) opt->val));
2359 *val = __le32_to_cpu(*((__le32 *) opt->val));
2363 *val = (unsigned long) opt->val;
2367 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2371 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2373 struct l2cap_conf_opt *opt = *ptr;
2375 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2382 *((u8 *) opt->val) = val;
2386 *((__le16 *) opt->val) = cpu_to_le16(val);
2390 *((__le32 *) opt->val) = cpu_to_le32(val);
2394 memcpy(opt->val, (void *) val, len);
2398 *ptr += L2CAP_CONF_OPT_SIZE + len;
2401 static void l2cap_ack_timeout(unsigned long arg)
2403 struct sock *sk = (void *) arg;
2406 l2cap_send_ack(l2cap_pi(sk));
2410 static inline void l2cap_ertm_init(struct sock *sk)
2412 l2cap_pi(sk)->expected_ack_seq = 0;
2413 l2cap_pi(sk)->unacked_frames = 0;
2414 l2cap_pi(sk)->buffer_seq = 0;
2415 l2cap_pi(sk)->num_acked = 0;
2416 l2cap_pi(sk)->frames_sent = 0;
2418 setup_timer(&l2cap_pi(sk)->retrans_timer,
2419 l2cap_retrans_timeout, (unsigned long) sk);
2420 setup_timer(&l2cap_pi(sk)->monitor_timer,
2421 l2cap_monitor_timeout, (unsigned long) sk);
2422 setup_timer(&l2cap_pi(sk)->ack_timer,
2423 l2cap_ack_timeout, (unsigned long) sk);
2425 __skb_queue_head_init(SREJ_QUEUE(sk));
2426 __skb_queue_head_init(BUSY_QUEUE(sk));
2427 spin_lock_init(&l2cap_pi(sk)->send_lock);
2429 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2432 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2434 u32 local_feat_mask = l2cap_feat_mask;
2436 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2439 case L2CAP_MODE_ERTM:
2440 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2441 case L2CAP_MODE_STREAMING:
2442 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2448 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2451 case L2CAP_MODE_STREAMING:
2452 case L2CAP_MODE_ERTM:
2453 if (l2cap_mode_supported(mode, remote_feat_mask))
2457 return L2CAP_MODE_BASIC;
2461 static int l2cap_build_conf_req(struct sock *sk, void *data)
2463 struct l2cap_pinfo *pi = l2cap_pi(sk);
2464 struct l2cap_conf_req *req = data;
2465 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2466 void *ptr = req->data;
2468 BT_DBG("sk %p", sk);
2470 if (pi->num_conf_req || pi->num_conf_rsp)
2474 case L2CAP_MODE_STREAMING:
2475 case L2CAP_MODE_ERTM:
2476 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2477 pi->mode = l2cap_select_mode(rfc.mode,
2478 pi->conn->feat_mask);
2482 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2483 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
2486 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2492 case L2CAP_MODE_BASIC:
2493 if (pi->imtu != L2CAP_DEFAULT_MTU)
2494 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2497 case L2CAP_MODE_ERTM:
2498 rfc.mode = L2CAP_MODE_ERTM;
2499 rfc.txwin_size = pi->tx_win;
2500 rfc.max_transmit = pi->max_tx;
2501 rfc.retrans_timeout = 0;
2502 rfc.monitor_timeout = 0;
2503 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2504 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2505 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2507 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2508 sizeof(rfc), (unsigned long) &rfc);
2510 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2513 if (pi->fcs == L2CAP_FCS_NONE ||
2514 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2515 pi->fcs = L2CAP_FCS_NONE;
2516 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2520 case L2CAP_MODE_STREAMING:
2521 rfc.mode = L2CAP_MODE_STREAMING;
2523 rfc.max_transmit = 0;
2524 rfc.retrans_timeout = 0;
2525 rfc.monitor_timeout = 0;
2526 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2527 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2528 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2530 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2531 sizeof(rfc), (unsigned long) &rfc);
2533 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2536 if (pi->fcs == L2CAP_FCS_NONE ||
2537 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2538 pi->fcs = L2CAP_FCS_NONE;
2539 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2544 /* FIXME: Need actual value of the flush timeout */
2545 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2546 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2548 req->dcid = cpu_to_le16(pi->dcid);
2549 req->flags = cpu_to_le16(0);
2554 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2556 struct l2cap_pinfo *pi = l2cap_pi(sk);
2557 struct l2cap_conf_rsp *rsp = data;
2558 void *ptr = rsp->data;
2559 void *req = pi->conf_req;
2560 int len = pi->conf_len;
2561 int type, hint, olen;
2563 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2564 u16 mtu = L2CAP_DEFAULT_MTU;
2565 u16 result = L2CAP_CONF_SUCCESS;
2567 BT_DBG("sk %p", sk);
2569 while (len >= L2CAP_CONF_OPT_SIZE) {
2570 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2572 hint = type & L2CAP_CONF_HINT;
2573 type &= L2CAP_CONF_MASK;
2576 case L2CAP_CONF_MTU:
2580 case L2CAP_CONF_FLUSH_TO:
2584 case L2CAP_CONF_QOS:
2587 case L2CAP_CONF_RFC:
2588 if (olen == sizeof(rfc))
2589 memcpy(&rfc, (void *) val, olen);
2592 case L2CAP_CONF_FCS:
2593 if (val == L2CAP_FCS_NONE)
2594 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2602 result = L2CAP_CONF_UNKNOWN;
2603 *((u8 *) ptr++) = type;
2608 if (pi->num_conf_rsp || pi->num_conf_req)
2612 case L2CAP_MODE_STREAMING:
2613 case L2CAP_MODE_ERTM:
2614 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2615 pi->mode = l2cap_select_mode(rfc.mode,
2616 pi->conn->feat_mask);
2620 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2621 return -ECONNREFUSED;
2624 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2629 if (pi->mode != rfc.mode) {
2630 result = L2CAP_CONF_UNACCEPT;
2631 rfc.mode = pi->mode;
2633 if (pi->num_conf_rsp == 1)
2634 return -ECONNREFUSED;
2636 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2637 sizeof(rfc), (unsigned long) &rfc);
2641 if (result == L2CAP_CONF_SUCCESS) {
2642 /* Configure output options and let the other side know
2643 * which ones we don't like. */
2645 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2646 result = L2CAP_CONF_UNACCEPT;
2649 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2651 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2654 case L2CAP_MODE_BASIC:
2655 pi->fcs = L2CAP_FCS_NONE;
2656 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2659 case L2CAP_MODE_ERTM:
2660 pi->remote_tx_win = rfc.txwin_size;
2661 pi->remote_max_tx = rfc.max_transmit;
2662 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2663 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2665 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2667 rfc.retrans_timeout =
2668 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2669 rfc.monitor_timeout =
2670 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2672 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2674 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2675 sizeof(rfc), (unsigned long) &rfc);
2679 case L2CAP_MODE_STREAMING:
2680 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2681 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2683 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2685 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2687 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2688 sizeof(rfc), (unsigned long) &rfc);
2693 result = L2CAP_CONF_UNACCEPT;
2695 memset(&rfc, 0, sizeof(rfc));
2696 rfc.mode = pi->mode;
2699 if (result == L2CAP_CONF_SUCCESS)
2700 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2702 rsp->scid = cpu_to_le16(pi->dcid);
2703 rsp->result = cpu_to_le16(result);
2704 rsp->flags = cpu_to_le16(0x0000);
2709 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2711 struct l2cap_pinfo *pi = l2cap_pi(sk);
2712 struct l2cap_conf_req *req = data;
2713 void *ptr = req->data;
2716 struct l2cap_conf_rfc rfc;
2718 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2720 while (len >= L2CAP_CONF_OPT_SIZE) {
2721 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2724 case L2CAP_CONF_MTU:
2725 if (val < L2CAP_DEFAULT_MIN_MTU) {
2726 *result = L2CAP_CONF_UNACCEPT;
2727 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2730 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2733 case L2CAP_CONF_FLUSH_TO:
2735 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2739 case L2CAP_CONF_RFC:
2740 if (olen == sizeof(rfc))
2741 memcpy(&rfc, (void *)val, olen);
2743 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2744 rfc.mode != pi->mode)
2745 return -ECONNREFUSED;
2747 pi->mode = rfc.mode;
2750 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2751 sizeof(rfc), (unsigned long) &rfc);
2756 if (*result == L2CAP_CONF_SUCCESS) {
2758 case L2CAP_MODE_ERTM:
2759 pi->remote_tx_win = rfc.txwin_size;
2760 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2761 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2762 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2764 case L2CAP_MODE_STREAMING:
2765 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2769 req->dcid = cpu_to_le16(pi->dcid);
2770 req->flags = cpu_to_le16(0x0000);
2775 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2777 struct l2cap_conf_rsp *rsp = data;
2778 void *ptr = rsp->data;
2780 BT_DBG("sk %p", sk);
2782 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2783 rsp->result = cpu_to_le16(result);
2784 rsp->flags = cpu_to_le16(flags);
2789 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2791 struct l2cap_pinfo *pi = l2cap_pi(sk);
2794 struct l2cap_conf_rfc rfc;
2796 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2798 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2801 while (len >= L2CAP_CONF_OPT_SIZE) {
2802 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2805 case L2CAP_CONF_RFC:
2806 if (olen == sizeof(rfc))
2807 memcpy(&rfc, (void *)val, olen);
2814 case L2CAP_MODE_ERTM:
2815 pi->remote_tx_win = rfc.txwin_size;
2816 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2817 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2818 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2820 case L2CAP_MODE_STREAMING:
2821 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2825 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2827 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2829 if (rej->reason != 0x0000)
2832 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2833 cmd->ident == conn->info_ident) {
2834 del_timer(&conn->info_timer);
2836 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2837 conn->info_ident = 0;
2839 l2cap_conn_start(conn);
2845 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2847 struct l2cap_chan_list *list = &conn->chan_list;
2848 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2849 struct l2cap_conn_rsp rsp;
2850 struct sock *sk, *parent;
2851 int result, status = L2CAP_CS_NO_INFO;
2853 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2854 __le16 psm = req->psm;
2856 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2858 /* Check if we have socket listening on psm */
2859 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2861 result = L2CAP_CR_BAD_PSM;
2865 /* Check if the ACL is secure enough (if not SDP) */
2866 if (psm != cpu_to_le16(0x0001) &&
2867 !hci_conn_check_link_mode(conn->hcon)) {
2868 conn->disc_reason = 0x05;
2869 result = L2CAP_CR_SEC_BLOCK;
2873 result = L2CAP_CR_NO_MEM;
2875 /* Check for backlog size */
2876 if (sk_acceptq_is_full(parent)) {
2877 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2881 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2885 write_lock_bh(&list->lock);
2887 /* Check if we already have channel with that dcid */
2888 if (__l2cap_get_chan_by_dcid(list, scid)) {
2889 write_unlock_bh(&list->lock);
2890 sock_set_flag(sk, SOCK_ZAPPED);
2891 l2cap_sock_kill(sk);
2895 hci_conn_hold(conn->hcon);
2897 l2cap_sock_init(sk, parent);
2898 bacpy(&bt_sk(sk)->src, conn->src);
2899 bacpy(&bt_sk(sk)->dst, conn->dst);
2900 l2cap_pi(sk)->psm = psm;
2901 l2cap_pi(sk)->dcid = scid;
2903 __l2cap_chan_add(conn, sk, parent);
2904 dcid = l2cap_pi(sk)->scid;
2906 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2908 l2cap_pi(sk)->ident = cmd->ident;
2910 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2911 if (l2cap_check_security(sk)) {
2912 if (bt_sk(sk)->defer_setup) {
2913 sk->sk_state = BT_CONNECT2;
2914 result = L2CAP_CR_PEND;
2915 status = L2CAP_CS_AUTHOR_PEND;
2916 parent->sk_data_ready(parent, 0);
2918 sk->sk_state = BT_CONFIG;
2919 result = L2CAP_CR_SUCCESS;
2920 status = L2CAP_CS_NO_INFO;
2923 sk->sk_state = BT_CONNECT2;
2924 result = L2CAP_CR_PEND;
2925 status = L2CAP_CS_AUTHEN_PEND;
2928 sk->sk_state = BT_CONNECT2;
2929 result = L2CAP_CR_PEND;
2930 status = L2CAP_CS_NO_INFO;
2933 write_unlock_bh(&list->lock);
2936 bh_unlock_sock(parent);
2939 rsp.scid = cpu_to_le16(scid);
2940 rsp.dcid = cpu_to_le16(dcid);
2941 rsp.result = cpu_to_le16(result);
2942 rsp.status = cpu_to_le16(status);
2943 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2945 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2946 struct l2cap_info_req info;
2947 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2949 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2950 conn->info_ident = l2cap_get_ident(conn);
2952 mod_timer(&conn->info_timer, jiffies +
2953 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2955 l2cap_send_cmd(conn, conn->info_ident,
2956 L2CAP_INFO_REQ, sizeof(info), &info);
2962 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2964 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2965 u16 scid, dcid, result, status;
2969 scid = __le16_to_cpu(rsp->scid);
2970 dcid = __le16_to_cpu(rsp->dcid);
2971 result = __le16_to_cpu(rsp->result);
2972 status = __le16_to_cpu(rsp->status);
2974 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2977 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2981 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2987 case L2CAP_CR_SUCCESS:
2988 sk->sk_state = BT_CONFIG;
2989 l2cap_pi(sk)->ident = 0;
2990 l2cap_pi(sk)->dcid = dcid;
2991 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2992 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2994 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2995 l2cap_build_conf_req(sk, req), req);
2996 l2cap_pi(sk)->num_conf_req++;
3000 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3004 l2cap_chan_del(sk, ECONNREFUSED);
3012 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3014 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3020 dcid = __le16_to_cpu(req->dcid);
3021 flags = __le16_to_cpu(req->flags);
3023 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3025 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3029 if (sk->sk_state == BT_DISCONN)
3032 /* Reject if config buffer is too small. */
3033 len = cmd_len - sizeof(*req);
3034 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3035 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3036 l2cap_build_conf_rsp(sk, rsp,
3037 L2CAP_CONF_REJECT, flags), rsp);
3042 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3043 l2cap_pi(sk)->conf_len += len;
3045 if (flags & 0x0001) {
3046 /* Incomplete config. Send empty response. */
3047 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3048 l2cap_build_conf_rsp(sk, rsp,
3049 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3053 /* Complete config. */
3054 len = l2cap_parse_conf_req(sk, rsp);
3056 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3060 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3061 l2cap_pi(sk)->num_conf_rsp++;
3063 /* Reset config buffer. */
3064 l2cap_pi(sk)->conf_len = 0;
3066 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3069 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3070 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3071 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3072 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3074 sk->sk_state = BT_CONNECTED;
3076 l2cap_pi(sk)->next_tx_seq = 0;
3077 l2cap_pi(sk)->expected_tx_seq = 0;
3078 __skb_queue_head_init(TX_QUEUE(sk));
3079 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3080 l2cap_ertm_init(sk);
3082 l2cap_chan_ready(sk);
3086 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3088 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3089 l2cap_build_conf_req(sk, buf), buf);
3090 l2cap_pi(sk)->num_conf_req++;
3098 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3100 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3101 u16 scid, flags, result;
3103 int len = cmd->len - sizeof(*rsp);
3105 scid = __le16_to_cpu(rsp->scid);
3106 flags = __le16_to_cpu(rsp->flags);
3107 result = __le16_to_cpu(rsp->result);
3109 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3110 scid, flags, result);
3112 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3117 case L2CAP_CONF_SUCCESS:
3118 l2cap_conf_rfc_get(sk, rsp->data, len);
3121 case L2CAP_CONF_UNACCEPT:
3122 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3125 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3126 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3130 /* throw out any old stored conf requests */
3131 result = L2CAP_CONF_SUCCESS;
3132 len = l2cap_parse_conf_rsp(sk, rsp->data,
3135 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3139 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3140 L2CAP_CONF_REQ, len, req);
3141 l2cap_pi(sk)->num_conf_req++;
3142 if (result != L2CAP_CONF_SUCCESS)
3148 sk->sk_err = ECONNRESET;
3149 l2cap_sock_set_timer(sk, HZ * 5);
3150 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3157 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3159 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3160 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3161 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3162 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3164 sk->sk_state = BT_CONNECTED;
3165 l2cap_pi(sk)->next_tx_seq = 0;
3166 l2cap_pi(sk)->expected_tx_seq = 0;
3167 __skb_queue_head_init(TX_QUEUE(sk));
3168 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3169 l2cap_ertm_init(sk);
3171 l2cap_chan_ready(sk);
3179 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3181 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3182 struct l2cap_disconn_rsp rsp;
3186 scid = __le16_to_cpu(req->scid);
3187 dcid = __le16_to_cpu(req->dcid);
3189 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3191 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3195 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3196 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3197 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3199 sk->sk_shutdown = SHUTDOWN_MASK;
3201 l2cap_chan_del(sk, ECONNRESET);
3204 l2cap_sock_kill(sk);
3208 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3210 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3214 scid = __le16_to_cpu(rsp->scid);
3215 dcid = __le16_to_cpu(rsp->dcid);
3217 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3219 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3223 l2cap_chan_del(sk, 0);
3226 l2cap_sock_kill(sk);
3230 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3232 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3235 type = __le16_to_cpu(req->type);
3237 BT_DBG("type 0x%4.4x", type);
3239 if (type == L2CAP_IT_FEAT_MASK) {
3241 u32 feat_mask = l2cap_feat_mask;
3242 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3243 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3244 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3246 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3248 put_unaligned_le32(feat_mask, rsp->data);
3249 l2cap_send_cmd(conn, cmd->ident,
3250 L2CAP_INFO_RSP, sizeof(buf), buf);
3251 } else if (type == L2CAP_IT_FIXED_CHAN) {
3253 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3254 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3255 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3256 memcpy(buf + 4, l2cap_fixed_chan, 8);
3257 l2cap_send_cmd(conn, cmd->ident,
3258 L2CAP_INFO_RSP, sizeof(buf), buf);
3260 struct l2cap_info_rsp rsp;
3261 rsp.type = cpu_to_le16(type);
3262 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3263 l2cap_send_cmd(conn, cmd->ident,
3264 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3270 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3272 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3275 type = __le16_to_cpu(rsp->type);
3276 result = __le16_to_cpu(rsp->result);
3278 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3280 del_timer(&conn->info_timer);
3282 if (type == L2CAP_IT_FEAT_MASK) {
3283 conn->feat_mask = get_unaligned_le32(rsp->data);
3285 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3286 struct l2cap_info_req req;
3287 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3289 conn->info_ident = l2cap_get_ident(conn);
3291 l2cap_send_cmd(conn, conn->info_ident,
3292 L2CAP_INFO_REQ, sizeof(req), &req);
3294 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3295 conn->info_ident = 0;
3297 l2cap_conn_start(conn);
3299 } else if (type == L2CAP_IT_FIXED_CHAN) {
3300 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3301 conn->info_ident = 0;
3303 l2cap_conn_start(conn);
3309 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3311 u8 *data = skb->data;
3313 struct l2cap_cmd_hdr cmd;
3316 l2cap_raw_recv(conn, skb);
3318 while (len >= L2CAP_CMD_HDR_SIZE) {
3320 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3321 data += L2CAP_CMD_HDR_SIZE;
3322 len -= L2CAP_CMD_HDR_SIZE;
3324 cmd_len = le16_to_cpu(cmd.len);
3326 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3328 if (cmd_len > len || !cmd.ident) {
3329 BT_DBG("corrupted command");
3334 case L2CAP_COMMAND_REJ:
3335 l2cap_command_rej(conn, &cmd, data);
3338 case L2CAP_CONN_REQ:
3339 err = l2cap_connect_req(conn, &cmd, data);
3342 case L2CAP_CONN_RSP:
3343 err = l2cap_connect_rsp(conn, &cmd, data);
3346 case L2CAP_CONF_REQ:
3347 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3350 case L2CAP_CONF_RSP:
3351 err = l2cap_config_rsp(conn, &cmd, data);
3354 case L2CAP_DISCONN_REQ:
3355 err = l2cap_disconnect_req(conn, &cmd, data);
3358 case L2CAP_DISCONN_RSP:
3359 err = l2cap_disconnect_rsp(conn, &cmd, data);
3362 case L2CAP_ECHO_REQ:
3363 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3366 case L2CAP_ECHO_RSP:
3369 case L2CAP_INFO_REQ:
3370 err = l2cap_information_req(conn, &cmd, data);
3373 case L2CAP_INFO_RSP:
3374 err = l2cap_information_rsp(conn, &cmd, data);
3378 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3384 struct l2cap_cmd_rej rej;
3385 BT_DBG("error %d", err);
3387 /* FIXME: Map err to a valid reason */
3388 rej.reason = cpu_to_le16(0);
3389 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3399 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3401 u16 our_fcs, rcv_fcs;
3402 int hdr_size = L2CAP_HDR_SIZE + 2;
3404 if (pi->fcs == L2CAP_FCS_CRC16) {
3405 skb_trim(skb, skb->len - 2);
3406 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3407 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3409 if (our_fcs != rcv_fcs)
3415 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3417 struct l2cap_pinfo *pi = l2cap_pi(sk);
3420 pi->frames_sent = 0;
3422 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3424 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3425 control |= L2CAP_SUPER_RCV_NOT_READY;
3426 l2cap_send_sframe(pi, control);
3427 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3430 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3431 l2cap_retransmit_frames(sk);
3433 spin_lock_bh(&pi->send_lock);
3434 l2cap_ertm_send(sk);
3435 spin_unlock_bh(&pi->send_lock);
3437 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3438 pi->frames_sent == 0) {
3439 control |= L2CAP_SUPER_RCV_READY;
3440 l2cap_send_sframe(pi, control);
3444 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3446 struct sk_buff *next_skb;
3447 struct l2cap_pinfo *pi = l2cap_pi(sk);
3448 int tx_seq_offset, next_tx_seq_offset;
3450 bt_cb(skb)->tx_seq = tx_seq;
3451 bt_cb(skb)->sar = sar;
3453 next_skb = skb_peek(SREJ_QUEUE(sk));
3455 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3459 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3460 if (tx_seq_offset < 0)
3461 tx_seq_offset += 64;
3464 if (bt_cb(next_skb)->tx_seq == tx_seq)
3467 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3468 pi->buffer_seq) % 64;
3469 if (next_tx_seq_offset < 0)
3470 next_tx_seq_offset += 64;
3472 if (next_tx_seq_offset > tx_seq_offset) {
3473 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3477 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3480 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3482 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3487 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3489 struct l2cap_pinfo *pi = l2cap_pi(sk);
3490 struct sk_buff *_skb;
3493 switch (control & L2CAP_CTRL_SAR) {
3494 case L2CAP_SDU_UNSEGMENTED:
3495 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3498 err = sock_queue_rcv_skb(sk, skb);
3504 case L2CAP_SDU_START:
3505 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3508 pi->sdu_len = get_unaligned_le16(skb->data);
3510 if (pi->sdu_len > pi->imtu)
3513 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3517 /* pull sdu_len bytes only after alloc, because of Local Busy
3518 * condition we have to be sure that this will be executed
3519 * only once, i.e., when alloc does not fail */
3522 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3524 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3525 pi->partial_sdu_len = skb->len;
3528 case L2CAP_SDU_CONTINUE:
3529 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3535 pi->partial_sdu_len += skb->len;
3536 if (pi->partial_sdu_len > pi->sdu_len)
3539 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3544 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3550 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3551 pi->partial_sdu_len += skb->len;
3553 if (pi->partial_sdu_len > pi->imtu)
3556 if (pi->partial_sdu_len != pi->sdu_len)
3559 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3562 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3564 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3568 err = sock_queue_rcv_skb(sk, _skb);
3571 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3575 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3576 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3590 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3595 static void l2cap_busy_work(struct work_struct *work)
3597 DECLARE_WAITQUEUE(wait, current);
3598 struct l2cap_pinfo *pi =
3599 container_of(work, struct l2cap_pinfo, busy_work);
3600 struct sock *sk = (struct sock *)pi;
3601 int n_tries = 0, timeo = HZ/5, err;
3602 struct sk_buff *skb;
3607 add_wait_queue(sk_sleep(sk), &wait);
3608 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3609 set_current_state(TASK_INTERRUPTIBLE);
3611 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3613 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3620 if (signal_pending(current)) {
3621 err = sock_intr_errno(timeo);
3626 timeo = schedule_timeout(timeo);
3629 err = sock_error(sk);
3633 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3634 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3635 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3637 skb_queue_head(BUSY_QUEUE(sk), skb);
3641 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3648 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3651 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3652 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3653 l2cap_send_sframe(pi, control);
3654 l2cap_pi(sk)->retry_count = 1;
3656 del_timer(&pi->retrans_timer);
3657 __mod_monitor_timer();
3659 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3662 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3663 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3665 BT_DBG("sk %p, Exit local busy", sk);
3667 set_current_state(TASK_RUNNING);
3668 remove_wait_queue(sk_sleep(sk), &wait);
3673 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3675 struct l2cap_pinfo *pi = l2cap_pi(sk);
3678 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3679 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3680 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3684 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3686 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3690 /* Busy Condition */
3691 BT_DBG("sk %p, Enter local busy", sk);
3693 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3694 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3695 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3697 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3698 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3699 l2cap_send_sframe(pi, sctrl);
3701 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3703 del_timer(&pi->ack_timer);
3705 queue_work(_busy_wq, &pi->busy_work);
3710 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3712 struct l2cap_pinfo *pi = l2cap_pi(sk);
3713 struct sk_buff *_skb;
3717 * TODO: We have to notify the userland if some data is lost with the
3721 switch (control & L2CAP_CTRL_SAR) {
3722 case L2CAP_SDU_UNSEGMENTED:
3723 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3728 err = sock_queue_rcv_skb(sk, skb);
3734 case L2CAP_SDU_START:
3735 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3740 pi->sdu_len = get_unaligned_le16(skb->data);
3743 if (pi->sdu_len > pi->imtu) {
3748 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3754 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3756 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3757 pi->partial_sdu_len = skb->len;
3761 case L2CAP_SDU_CONTINUE:
3762 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3765 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3767 pi->partial_sdu_len += skb->len;
3768 if (pi->partial_sdu_len > pi->sdu_len)
3776 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3779 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3781 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3782 pi->partial_sdu_len += skb->len;
3784 if (pi->partial_sdu_len > pi->imtu)
3787 if (pi->partial_sdu_len == pi->sdu_len) {
3788 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3789 err = sock_queue_rcv_skb(sk, _skb);
3804 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3806 struct sk_buff *skb;
3809 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3810 if (bt_cb(skb)->tx_seq != tx_seq)
3813 skb = skb_dequeue(SREJ_QUEUE(sk));
3814 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3815 l2cap_ertm_reassembly_sdu(sk, skb, control);
3816 l2cap_pi(sk)->buffer_seq_srej =
3817 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3818 tx_seq = (tx_seq + 1) % 64;
3822 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3824 struct l2cap_pinfo *pi = l2cap_pi(sk);
3825 struct srej_list *l, *tmp;
3828 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3829 if (l->tx_seq == tx_seq) {
3834 control = L2CAP_SUPER_SELECT_REJECT;
3835 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3836 l2cap_send_sframe(pi, control);
3838 list_add_tail(&l->list, SREJ_LIST(sk));
3842 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3844 struct l2cap_pinfo *pi = l2cap_pi(sk);
3845 struct srej_list *new;
3848 while (tx_seq != pi->expected_tx_seq) {
3849 control = L2CAP_SUPER_SELECT_REJECT;
3850 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3851 l2cap_send_sframe(pi, control);
3853 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3854 new->tx_seq = pi->expected_tx_seq;
3855 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3856 list_add_tail(&new->list, SREJ_LIST(sk));
3858 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3861 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3863 struct l2cap_pinfo *pi = l2cap_pi(sk);
3864 u8 tx_seq = __get_txseq(rx_control);
3865 u8 req_seq = __get_reqseq(rx_control);
3866 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3867 int tx_seq_offset, expected_tx_seq_offset;
3868 int num_to_ack = (pi->tx_win/6) + 1;
3871 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3874 if (L2CAP_CTRL_FINAL & rx_control &&
3875 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3876 del_timer(&pi->monitor_timer);
3877 if (pi->unacked_frames > 0)
3878 __mod_retrans_timer();
3879 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3882 pi->expected_ack_seq = req_seq;
3883 l2cap_drop_acked_frames(sk);
3885 if (tx_seq == pi->expected_tx_seq)
3888 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3889 if (tx_seq_offset < 0)
3890 tx_seq_offset += 64;
3892 /* invalid tx_seq */
3893 if (tx_seq_offset >= pi->tx_win) {
3894 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3898 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3901 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3902 struct srej_list *first;
3904 first = list_first_entry(SREJ_LIST(sk),
3905 struct srej_list, list);
3906 if (tx_seq == first->tx_seq) {
3907 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3908 l2cap_check_srej_gap(sk, tx_seq);
3910 list_del(&first->list);
3913 if (list_empty(SREJ_LIST(sk))) {
3914 pi->buffer_seq = pi->buffer_seq_srej;
3915 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3917 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3920 struct srej_list *l;
3922 /* duplicated tx_seq */
3923 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3926 list_for_each_entry(l, SREJ_LIST(sk), list) {
3927 if (l->tx_seq == tx_seq) {
3928 l2cap_resend_srejframe(sk, tx_seq);
3932 l2cap_send_srejframe(sk, tx_seq);
3935 expected_tx_seq_offset =
3936 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3937 if (expected_tx_seq_offset < 0)
3938 expected_tx_seq_offset += 64;
3940 /* duplicated tx_seq */
3941 if (tx_seq_offset < expected_tx_seq_offset)
3944 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3946 BT_DBG("sk %p, Enter SREJ", sk);
3948 INIT_LIST_HEAD(SREJ_LIST(sk));
3949 pi->buffer_seq_srej = pi->buffer_seq;
3951 __skb_queue_head_init(SREJ_QUEUE(sk));
3952 __skb_queue_head_init(BUSY_QUEUE(sk));
3953 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3955 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3957 l2cap_send_srejframe(sk, tx_seq);
3959 del_timer(&pi->ack_timer);
3964 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3966 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3967 bt_cb(skb)->tx_seq = tx_seq;
3968 bt_cb(skb)->sar = sar;
3969 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3973 err = l2cap_push_rx_skb(sk, skb, rx_control);
3977 if (rx_control & L2CAP_CTRL_FINAL) {
3978 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3979 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3981 l2cap_retransmit_frames(sk);
3986 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3987 if (pi->num_acked == num_to_ack - 1)
3997 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3999 struct l2cap_pinfo *pi = l2cap_pi(sk);
4001 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4004 pi->expected_ack_seq = __get_reqseq(rx_control);
4005 l2cap_drop_acked_frames(sk);
4007 if (rx_control & L2CAP_CTRL_POLL) {
4008 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4009 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4010 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4011 (pi->unacked_frames > 0))
4012 __mod_retrans_timer();
4014 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4015 l2cap_send_srejtail(sk);
4017 l2cap_send_i_or_rr_or_rnr(sk);
4020 } else if (rx_control & L2CAP_CTRL_FINAL) {
4021 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4023 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4024 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4026 l2cap_retransmit_frames(sk);
4029 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4030 (pi->unacked_frames > 0))
4031 __mod_retrans_timer();
4033 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4034 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4037 spin_lock_bh(&pi->send_lock);
4038 l2cap_ertm_send(sk);
4039 spin_unlock_bh(&pi->send_lock);
4044 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4046 struct l2cap_pinfo *pi = l2cap_pi(sk);
4047 u8 tx_seq = __get_reqseq(rx_control);
4049 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4051 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4053 pi->expected_ack_seq = tx_seq;
4054 l2cap_drop_acked_frames(sk);
4056 if (rx_control & L2CAP_CTRL_FINAL) {
4057 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4058 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4060 l2cap_retransmit_frames(sk);
4062 l2cap_retransmit_frames(sk);
4064 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4065 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4068 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4070 struct l2cap_pinfo *pi = l2cap_pi(sk);
4071 u8 tx_seq = __get_reqseq(rx_control);
4073 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4075 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4077 if (rx_control & L2CAP_CTRL_POLL) {
4078 pi->expected_ack_seq = tx_seq;
4079 l2cap_drop_acked_frames(sk);
4081 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4082 l2cap_retransmit_one_frame(sk, tx_seq);
4084 spin_lock_bh(&pi->send_lock);
4085 l2cap_ertm_send(sk);
4086 spin_unlock_bh(&pi->send_lock);
4088 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4089 pi->srej_save_reqseq = tx_seq;
4090 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4092 } else if (rx_control & L2CAP_CTRL_FINAL) {
4093 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4094 pi->srej_save_reqseq == tx_seq)
4095 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4097 l2cap_retransmit_one_frame(sk, tx_seq);
4099 l2cap_retransmit_one_frame(sk, tx_seq);
4100 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4101 pi->srej_save_reqseq = tx_seq;
4102 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4107 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4109 struct l2cap_pinfo *pi = l2cap_pi(sk);
4110 u8 tx_seq = __get_reqseq(rx_control);
4112 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4114 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4115 pi->expected_ack_seq = tx_seq;
4116 l2cap_drop_acked_frames(sk);
4118 if (rx_control & L2CAP_CTRL_POLL)
4119 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4121 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4122 del_timer(&pi->retrans_timer);
4123 if (rx_control & L2CAP_CTRL_POLL)
4124 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4128 if (rx_control & L2CAP_CTRL_POLL)
4129 l2cap_send_srejtail(sk);
4131 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4134 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4136 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4138 if (L2CAP_CTRL_FINAL & rx_control &&
4139 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4140 del_timer(&l2cap_pi(sk)->monitor_timer);
4141 if (l2cap_pi(sk)->unacked_frames > 0)
4142 __mod_retrans_timer();
4143 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4146 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4147 case L2CAP_SUPER_RCV_READY:
4148 l2cap_data_channel_rrframe(sk, rx_control);
4151 case L2CAP_SUPER_REJECT:
4152 l2cap_data_channel_rejframe(sk, rx_control);
4155 case L2CAP_SUPER_SELECT_REJECT:
4156 l2cap_data_channel_srejframe(sk, rx_control);
4159 case L2CAP_SUPER_RCV_NOT_READY:
4160 l2cap_data_channel_rnrframe(sk, rx_control);
4168 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4171 struct l2cap_pinfo *pi;
4174 int len, next_tx_seq_offset, req_seq_offset;
4176 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4178 BT_DBG("unknown cid 0x%4.4x", cid);
4184 BT_DBG("sk %p, len %d", sk, skb->len);
4186 if (sk->sk_state != BT_CONNECTED)
4190 case L2CAP_MODE_BASIC:
4191 /* If socket recv buffers overflows we drop data here
4192 * which is *bad* because L2CAP has to be reliable.
4193 * But we don't have any other choice. L2CAP doesn't
4194 * provide flow control mechanism. */
4196 if (pi->imtu < skb->len)
4199 if (!sock_queue_rcv_skb(sk, skb))
4203 case L2CAP_MODE_ERTM:
4204 control = get_unaligned_le16(skb->data);
4209 * We can just drop the corrupted I-frame here.
4210 * Receiver will miss it and start proper recovery
4211 * procedures and ask retransmission.
4213 if (l2cap_check_fcs(pi, skb))
4216 if (__is_sar_start(control) && __is_iframe(control))
4219 if (pi->fcs == L2CAP_FCS_CRC16)
4222 if (len > pi->mps) {
4223 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4227 req_seq = __get_reqseq(control);
4228 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4229 if (req_seq_offset < 0)
4230 req_seq_offset += 64;
4232 next_tx_seq_offset =
4233 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4234 if (next_tx_seq_offset < 0)
4235 next_tx_seq_offset += 64;
4237 /* check for invalid req-seq */
4238 if (req_seq_offset > next_tx_seq_offset) {
4239 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4243 if (__is_iframe(control)) {
4245 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4249 l2cap_data_channel_iframe(sk, control, skb);
4252 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4256 l2cap_data_channel_sframe(sk, control, skb);
4261 case L2CAP_MODE_STREAMING:
4262 control = get_unaligned_le16(skb->data);
4266 if (l2cap_check_fcs(pi, skb))
4269 if (__is_sar_start(control))
4272 if (pi->fcs == L2CAP_FCS_CRC16)
4275 if (len > pi->mps || len < 0 || __is_sframe(control))
4278 tx_seq = __get_txseq(control);
4280 if (pi->expected_tx_seq == tx_seq)
4281 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4283 pi->expected_tx_seq = (tx_seq + 1) % 64;
4285 l2cap_streaming_reassembly_sdu(sk, skb, control);
4290 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4304 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4308 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4312 BT_DBG("sk %p, len %d", sk, skb->len);
4314 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4317 if (l2cap_pi(sk)->imtu < skb->len)
4320 if (!sock_queue_rcv_skb(sk, skb))
4332 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4334 struct l2cap_hdr *lh = (void *) skb->data;
4338 skb_pull(skb, L2CAP_HDR_SIZE);
4339 cid = __le16_to_cpu(lh->cid);
4340 len = __le16_to_cpu(lh->len);
4342 if (len != skb->len) {
4347 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4350 case L2CAP_CID_SIGNALING:
4351 l2cap_sig_channel(conn, skb);
4354 case L2CAP_CID_CONN_LESS:
4355 psm = get_unaligned_le16(skb->data);
4357 l2cap_conless_channel(conn, psm, skb);
4361 l2cap_data_channel(conn, cid, skb);
4366 /* ---- L2CAP interface with lower layer (HCI) ---- */
4368 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4370 int exact = 0, lm1 = 0, lm2 = 0;
4371 register struct sock *sk;
4372 struct hlist_node *node;
4374 if (type != ACL_LINK)
4377 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4379 /* Find listening sockets and check their link_mode */
4380 read_lock(&l2cap_sk_list.lock);
4381 sk_for_each(sk, node, &l2cap_sk_list.head) {
4382 if (sk->sk_state != BT_LISTEN)
4385 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4386 lm1 |= HCI_LM_ACCEPT;
4387 if (l2cap_pi(sk)->role_switch)
4388 lm1 |= HCI_LM_MASTER;
4390 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4391 lm2 |= HCI_LM_ACCEPT;
4392 if (l2cap_pi(sk)->role_switch)
4393 lm2 |= HCI_LM_MASTER;
4396 read_unlock(&l2cap_sk_list.lock);
4398 return exact ? lm1 : lm2;
4401 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4403 struct l2cap_conn *conn;
4405 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4407 if (hcon->type != ACL_LINK)
4411 conn = l2cap_conn_add(hcon, status);
4413 l2cap_conn_ready(conn);
4415 l2cap_conn_del(hcon, bt_err(status));
4420 static int l2cap_disconn_ind(struct hci_conn *hcon)
4422 struct l2cap_conn *conn = hcon->l2cap_data;
4424 BT_DBG("hcon %p", hcon);
4426 if (hcon->type != ACL_LINK || !conn)
4429 return conn->disc_reason;
4432 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4434 BT_DBG("hcon %p reason %d", hcon, reason);
4436 if (hcon->type != ACL_LINK)
4439 l2cap_conn_del(hcon, bt_err(reason));
4444 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4446 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4449 if (encrypt == 0x00) {
4450 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4451 l2cap_sock_clear_timer(sk);
4452 l2cap_sock_set_timer(sk, HZ * 5);
4453 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4454 __l2cap_sock_close(sk, ECONNREFUSED);
4456 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4457 l2cap_sock_clear_timer(sk);
4461 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4463 struct l2cap_chan_list *l;
4464 struct l2cap_conn *conn = hcon->l2cap_data;
4470 l = &conn->chan_list;
4472 BT_DBG("conn %p", conn);
4474 read_lock(&l->lock);
4476 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4479 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4484 if (!status && (sk->sk_state == BT_CONNECTED ||
4485 sk->sk_state == BT_CONFIG)) {
4486 l2cap_check_encryption(sk, encrypt);
4491 if (sk->sk_state == BT_CONNECT) {
4493 struct l2cap_conn_req req;
4494 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4495 req.psm = l2cap_pi(sk)->psm;
4497 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4498 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4500 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4501 L2CAP_CONN_REQ, sizeof(req), &req);
4503 l2cap_sock_clear_timer(sk);
4504 l2cap_sock_set_timer(sk, HZ / 10);
4506 } else if (sk->sk_state == BT_CONNECT2) {
4507 struct l2cap_conn_rsp rsp;
4511 sk->sk_state = BT_CONFIG;
4512 result = L2CAP_CR_SUCCESS;
4514 sk->sk_state = BT_DISCONN;
4515 l2cap_sock_set_timer(sk, HZ / 10);
4516 result = L2CAP_CR_SEC_BLOCK;
4519 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4520 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4521 rsp.result = cpu_to_le16(result);
4522 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4523 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4524 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4530 read_unlock(&l->lock);
4535 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4537 struct l2cap_conn *conn = hcon->l2cap_data;
4539 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4542 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4544 if (flags & ACL_START) {
4545 struct l2cap_hdr *hdr;
4549 BT_ERR("Unexpected start frame (len %d)", skb->len);
4550 kfree_skb(conn->rx_skb);
4551 conn->rx_skb = NULL;
4553 l2cap_conn_unreliable(conn, ECOMM);
4557 BT_ERR("Frame is too short (len %d)", skb->len);
4558 l2cap_conn_unreliable(conn, ECOMM);
4562 hdr = (struct l2cap_hdr *) skb->data;
4563 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4565 if (len == skb->len) {
4566 /* Complete frame received */
4567 l2cap_recv_frame(conn, skb);
4571 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4573 if (skb->len > len) {
4574 BT_ERR("Frame is too long (len %d, expected len %d)",
4576 l2cap_conn_unreliable(conn, ECOMM);
4580 /* Allocate skb for the complete frame (with header) */
4581 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4585 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4587 conn->rx_len = len - skb->len;
4589 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4591 if (!conn->rx_len) {
4592 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4593 l2cap_conn_unreliable(conn, ECOMM);
4597 if (skb->len > conn->rx_len) {
4598 BT_ERR("Fragment is too long (len %d, expected %d)",
4599 skb->len, conn->rx_len);
4600 kfree_skb(conn->rx_skb);
4601 conn->rx_skb = NULL;
4603 l2cap_conn_unreliable(conn, ECOMM);
4607 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4609 conn->rx_len -= skb->len;
4611 if (!conn->rx_len) {
4612 /* Complete frame received */
4613 l2cap_recv_frame(conn, conn->rx_skb);
4614 conn->rx_skb = NULL;
4623 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4626 struct hlist_node *node;
4628 read_lock_bh(&l2cap_sk_list.lock);
4630 sk_for_each(sk, node, &l2cap_sk_list.head) {
4631 struct l2cap_pinfo *pi = l2cap_pi(sk);
4633 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4634 batostr(&bt_sk(sk)->src),
4635 batostr(&bt_sk(sk)->dst),
4636 sk->sk_state, __le16_to_cpu(pi->psm),
4638 pi->imtu, pi->omtu, pi->sec_level);
4641 read_unlock_bh(&l2cap_sk_list.lock);
4646 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4648 return single_open(file, l2cap_debugfs_show, inode->i_private);
4651 static const struct file_operations l2cap_debugfs_fops = {
4652 .open = l2cap_debugfs_open,
4654 .llseek = seq_lseek,
4655 .release = single_release,
4658 static struct dentry *l2cap_debugfs;
4660 static const struct proto_ops l2cap_sock_ops = {
4661 .family = PF_BLUETOOTH,
4662 .owner = THIS_MODULE,
4663 .release = l2cap_sock_release,
4664 .bind = l2cap_sock_bind,
4665 .connect = l2cap_sock_connect,
4666 .listen = l2cap_sock_listen,
4667 .accept = l2cap_sock_accept,
4668 .getname = l2cap_sock_getname,
4669 .sendmsg = l2cap_sock_sendmsg,
4670 .recvmsg = l2cap_sock_recvmsg,
4671 .poll = bt_sock_poll,
4672 .ioctl = bt_sock_ioctl,
4673 .mmap = sock_no_mmap,
4674 .socketpair = sock_no_socketpair,
4675 .shutdown = l2cap_sock_shutdown,
4676 .setsockopt = l2cap_sock_setsockopt,
4677 .getsockopt = l2cap_sock_getsockopt
4680 static const struct net_proto_family l2cap_sock_family_ops = {
4681 .family = PF_BLUETOOTH,
4682 .owner = THIS_MODULE,
4683 .create = l2cap_sock_create,
4686 static struct hci_proto l2cap_hci_proto = {
4688 .id = HCI_PROTO_L2CAP,
4689 .connect_ind = l2cap_connect_ind,
4690 .connect_cfm = l2cap_connect_cfm,
4691 .disconn_ind = l2cap_disconn_ind,
4692 .disconn_cfm = l2cap_disconn_cfm,
4693 .security_cfm = l2cap_security_cfm,
4694 .recv_acldata = l2cap_recv_acldata
4697 static int __init l2cap_init(void)
4701 err = proto_register(&l2cap_proto, 0);
4705 _busy_wq = create_singlethread_workqueue("l2cap");
4709 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4711 BT_ERR("L2CAP socket registration failed");
4715 err = hci_register_proto(&l2cap_hci_proto);
4717 BT_ERR("L2CAP protocol registration failed");
4718 bt_sock_unregister(BTPROTO_L2CAP);
4723 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4724 bt_debugfs, NULL, &l2cap_debugfs_fops);
4726 BT_ERR("Failed to create L2CAP debug file");
4729 BT_INFO("L2CAP ver %s", VERSION);
4730 BT_INFO("L2CAP socket layer initialized");
4735 proto_unregister(&l2cap_proto);
4739 static void __exit l2cap_exit(void)
4741 debugfs_remove(l2cap_debugfs);
4743 flush_workqueue(_busy_wq);
4744 destroy_workqueue(_busy_wq);
4746 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4747 BT_ERR("L2CAP socket unregistration failed");
4749 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4750 BT_ERR("L2CAP protocol unregistration failed");
4752 proto_unregister(&l2cap_proto);
4755 void l2cap_load(void)
4757 /* Dummy function to trigger automatic L2CAP module loading by
4758 * other modules that use L2CAP sockets but don't use any other
4759 * symbols from it. */
4761 EXPORT_SYMBOL(l2cap_load);
4763 module_init(l2cap_init);
4764 module_exit(l2cap_exit);
4766 module_param(enable_ertm, bool, 0644);
4767 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4769 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4770 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4771 MODULE_VERSION(VERSION);
4772 MODULE_LICENSE("GPL");
4773 MODULE_ALIAS("bt-proto-0");