2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 #ifdef CONFIG_BT_L2CAP_EXT_FEATURES
59 static int enable_ertm = 1;
61 static int enable_ertm = 0;
63 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
64 static int tx_window = L2CAP_DEFAULT_TX_WINDOW;
66 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
67 static u8 l2cap_fixed_chan[8] = { 0x02, };
69 static const struct proto_ops l2cap_sock_ops;
71 static struct bt_sock_list l2cap_sk_list = {
72 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
79 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
80 u8 code, u8 ident, u16 dlen, void *data);
82 /* ---- L2CAP timers ---- */
83 static void l2cap_sock_timeout(unsigned long arg)
85 struct sock *sk = (struct sock *) arg;
88 BT_DBG("sock %p state %d", sk, sk->sk_state);
92 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
93 reason = ECONNREFUSED;
94 else if (sk->sk_state == BT_CONNECT &&
95 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
96 reason = ECONNREFUSED;
100 __l2cap_sock_close(sk, reason);
108 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
110 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
111 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
114 static void l2cap_sock_clear_timer(struct sock *sk)
116 BT_DBG("sock %p state %d", sk, sk->sk_state);
117 sk_stop_timer(sk, &sk->sk_timer);
120 /* ---- L2CAP channels ---- */
121 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
124 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
125 if (l2cap_pi(s)->dcid == cid)
131 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
134 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
135 if (l2cap_pi(s)->scid == cid)
141 /* Find channel with given SCID.
142 * Returns locked socket */
143 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
147 s = __l2cap_get_chan_by_scid(l, cid);
150 read_unlock(&l->lock);
154 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
157 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
158 if (l2cap_pi(s)->ident == ident)
164 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
168 s = __l2cap_get_chan_by_ident(l, ident);
171 read_unlock(&l->lock);
175 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
177 u16 cid = L2CAP_CID_DYN_START;
179 for (; cid < L2CAP_CID_DYN_END; cid++) {
180 if (!__l2cap_get_chan_by_scid(l, cid))
187 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
192 l2cap_pi(l->head)->prev_c = sk;
194 l2cap_pi(sk)->next_c = l->head;
195 l2cap_pi(sk)->prev_c = NULL;
199 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
201 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
203 write_lock_bh(&l->lock);
208 l2cap_pi(next)->prev_c = prev;
210 l2cap_pi(prev)->next_c = next;
211 write_unlock_bh(&l->lock);
216 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
218 struct l2cap_chan_list *l = &conn->chan_list;
220 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
221 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
223 conn->disc_reason = 0x13;
225 l2cap_pi(sk)->conn = conn;
227 if (sk->sk_type == SOCK_SEQPACKET) {
228 /* Alloc CID for connection-oriented socket */
229 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
230 } else if (sk->sk_type == SOCK_DGRAM) {
231 /* Connectionless socket */
232 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
233 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
236 /* Raw socket can send/recv signalling messages only */
237 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
238 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
239 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
242 __l2cap_chan_link(l, sk);
245 bt_accept_enqueue(parent, sk);
249 * Must be called on the locked socket. */
250 static void l2cap_chan_del(struct sock *sk, int err)
252 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
253 struct sock *parent = bt_sk(sk)->parent;
255 l2cap_sock_clear_timer(sk);
257 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
260 /* Unlink from channel list */
261 l2cap_chan_unlink(&conn->chan_list, sk);
262 l2cap_pi(sk)->conn = NULL;
263 hci_conn_put(conn->hcon);
266 sk->sk_state = BT_CLOSED;
267 sock_set_flag(sk, SOCK_ZAPPED);
273 bt_accept_unlink(sk);
274 parent->sk_data_ready(parent, 0);
276 sk->sk_state_change(sk);
279 /* Service level security */
280 static inline int l2cap_check_security(struct sock *sk)
282 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
285 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
286 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
287 auth_type = HCI_AT_NO_BONDING_MITM;
289 auth_type = HCI_AT_NO_BONDING;
291 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
292 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
294 switch (l2cap_pi(sk)->sec_level) {
295 case BT_SECURITY_HIGH:
296 auth_type = HCI_AT_GENERAL_BONDING_MITM;
298 case BT_SECURITY_MEDIUM:
299 auth_type = HCI_AT_GENERAL_BONDING;
302 auth_type = HCI_AT_NO_BONDING;
307 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
311 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
315 /* Get next available identificator.
316 * 1 - 128 are used by kernel.
317 * 129 - 199 are reserved.
318 * 200 - 254 are used by utilities like l2ping, etc.
321 spin_lock_bh(&conn->lock);
323 if (++conn->tx_ident > 128)
328 spin_unlock_bh(&conn->lock);
333 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
335 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
337 BT_DBG("code 0x%2.2x", code);
342 return hci_send_acl(conn->hcon, skb, 0);
345 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
348 struct l2cap_hdr *lh;
349 struct l2cap_conn *conn = pi->conn;
350 int count, hlen = L2CAP_HDR_SIZE + 2;
352 if (pi->fcs == L2CAP_FCS_CRC16)
355 BT_DBG("pi %p, control 0x%2.2x", pi, control);
357 count = min_t(unsigned int, conn->mtu, hlen);
358 control |= L2CAP_CTRL_FRAME_TYPE;
360 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
361 control |= L2CAP_CTRL_FINAL;
362 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
365 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
366 control |= L2CAP_CTRL_POLL;
367 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
370 skb = bt_skb_alloc(count, GFP_ATOMIC);
374 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
375 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
376 lh->cid = cpu_to_le16(pi->dcid);
377 put_unaligned_le16(control, skb_put(skb, 2));
379 if (pi->fcs == L2CAP_FCS_CRC16) {
380 u16 fcs = crc16(0, (u8 *)lh, count - 2);
381 put_unaligned_le16(fcs, skb_put(skb, 2));
384 return hci_send_acl(pi->conn->hcon, skb, 0);
387 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
389 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
390 control |= L2CAP_SUPER_RCV_NOT_READY;
392 control |= L2CAP_SUPER_RCV_READY;
394 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
396 return l2cap_send_sframe(pi, control);
399 static void l2cap_do_start(struct sock *sk)
401 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
403 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
404 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
407 if (l2cap_check_security(sk)) {
408 struct l2cap_conn_req req;
409 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
410 req.psm = l2cap_pi(sk)->psm;
412 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
414 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
415 L2CAP_CONN_REQ, sizeof(req), &req);
418 struct l2cap_info_req req;
419 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
421 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
422 conn->info_ident = l2cap_get_ident(conn);
424 mod_timer(&conn->info_timer, jiffies +
425 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
427 l2cap_send_cmd(conn, conn->info_ident,
428 L2CAP_INFO_REQ, sizeof(req), &req);
432 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
434 struct l2cap_disconn_req req;
436 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
437 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
438 l2cap_send_cmd(conn, l2cap_get_ident(conn),
439 L2CAP_DISCONN_REQ, sizeof(req), &req);
442 /* ---- L2CAP connections ---- */
443 static void l2cap_conn_start(struct l2cap_conn *conn)
445 struct l2cap_chan_list *l = &conn->chan_list;
448 BT_DBG("conn %p", conn);
452 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
455 if (sk->sk_type != SOCK_SEQPACKET) {
460 if (sk->sk_state == BT_CONNECT) {
461 if (l2cap_check_security(sk)) {
462 struct l2cap_conn_req req;
463 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
464 req.psm = l2cap_pi(sk)->psm;
466 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
468 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
469 L2CAP_CONN_REQ, sizeof(req), &req);
471 } else if (sk->sk_state == BT_CONNECT2) {
472 struct l2cap_conn_rsp rsp;
473 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
474 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
476 if (l2cap_check_security(sk)) {
477 if (bt_sk(sk)->defer_setup) {
478 struct sock *parent = bt_sk(sk)->parent;
479 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
480 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
481 parent->sk_data_ready(parent, 0);
484 sk->sk_state = BT_CONFIG;
485 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
486 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
489 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
490 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
493 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
494 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
500 read_unlock(&l->lock);
503 static void l2cap_conn_ready(struct l2cap_conn *conn)
505 struct l2cap_chan_list *l = &conn->chan_list;
508 BT_DBG("conn %p", conn);
512 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
515 if (sk->sk_type != SOCK_SEQPACKET) {
516 l2cap_sock_clear_timer(sk);
517 sk->sk_state = BT_CONNECTED;
518 sk->sk_state_change(sk);
519 } else if (sk->sk_state == BT_CONNECT)
525 read_unlock(&l->lock);
528 /* Notify sockets that we cannot guaranty reliability anymore */
529 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
531 struct l2cap_chan_list *l = &conn->chan_list;
534 BT_DBG("conn %p", conn);
538 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
539 if (l2cap_pi(sk)->force_reliable)
543 read_unlock(&l->lock);
546 static void l2cap_info_timeout(unsigned long arg)
548 struct l2cap_conn *conn = (void *) arg;
550 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
551 conn->info_ident = 0;
553 l2cap_conn_start(conn);
556 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
558 struct l2cap_conn *conn = hcon->l2cap_data;
563 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
567 hcon->l2cap_data = conn;
570 BT_DBG("hcon %p conn %p", hcon, conn);
572 conn->mtu = hcon->hdev->acl_mtu;
573 conn->src = &hcon->hdev->bdaddr;
574 conn->dst = &hcon->dst;
578 spin_lock_init(&conn->lock);
579 rwlock_init(&conn->chan_list.lock);
581 setup_timer(&conn->info_timer, l2cap_info_timeout,
582 (unsigned long) conn);
584 conn->disc_reason = 0x13;
589 static void l2cap_conn_del(struct hci_conn *hcon, int err)
591 struct l2cap_conn *conn = hcon->l2cap_data;
597 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
599 kfree_skb(conn->rx_skb);
602 while ((sk = conn->chan_list.head)) {
604 l2cap_chan_del(sk, err);
609 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
610 del_timer_sync(&conn->info_timer);
612 hcon->l2cap_data = NULL;
616 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
618 struct l2cap_chan_list *l = &conn->chan_list;
619 write_lock_bh(&l->lock);
620 __l2cap_chan_add(conn, sk, parent);
621 write_unlock_bh(&l->lock);
624 /* ---- Socket interface ---- */
625 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
628 struct hlist_node *node;
629 sk_for_each(sk, node, &l2cap_sk_list.head)
630 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
637 /* Find socket with psm and source bdaddr.
638 * Returns closest match.
640 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
642 struct sock *sk = NULL, *sk1 = NULL;
643 struct hlist_node *node;
645 sk_for_each(sk, node, &l2cap_sk_list.head) {
646 if (state && sk->sk_state != state)
649 if (l2cap_pi(sk)->psm == psm) {
651 if (!bacmp(&bt_sk(sk)->src, src))
655 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
659 return node ? sk : sk1;
662 /* Find socket with given address (psm, src).
663 * Returns locked socket */
664 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
667 read_lock(&l2cap_sk_list.lock);
668 s = __l2cap_get_sock_by_psm(state, psm, src);
671 read_unlock(&l2cap_sk_list.lock);
675 static void l2cap_sock_destruct(struct sock *sk)
679 skb_queue_purge(&sk->sk_receive_queue);
680 skb_queue_purge(&sk->sk_write_queue);
683 static void l2cap_sock_cleanup_listen(struct sock *parent)
687 BT_DBG("parent %p", parent);
689 /* Close not yet accepted channels */
690 while ((sk = bt_accept_dequeue(parent, NULL)))
691 l2cap_sock_close(sk);
693 parent->sk_state = BT_CLOSED;
694 sock_set_flag(parent, SOCK_ZAPPED);
697 /* Kill socket (only if zapped and orphan)
698 * Must be called on unlocked socket.
700 static void l2cap_sock_kill(struct sock *sk)
702 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
705 BT_DBG("sk %p state %d", sk, sk->sk_state);
707 /* Kill poor orphan */
708 bt_sock_unlink(&l2cap_sk_list, sk);
709 sock_set_flag(sk, SOCK_DEAD);
713 static void __l2cap_sock_close(struct sock *sk, int reason)
715 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
717 switch (sk->sk_state) {
719 l2cap_sock_cleanup_listen(sk);
724 if (sk->sk_type == SOCK_SEQPACKET) {
725 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
727 sk->sk_state = BT_DISCONN;
728 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
729 l2cap_send_disconn_req(conn, sk);
731 l2cap_chan_del(sk, reason);
735 if (sk->sk_type == SOCK_SEQPACKET) {
736 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
737 struct l2cap_conn_rsp rsp;
740 if (bt_sk(sk)->defer_setup)
741 result = L2CAP_CR_SEC_BLOCK;
743 result = L2CAP_CR_BAD_PSM;
745 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
746 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
747 rsp.result = cpu_to_le16(result);
748 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
749 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
750 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
752 l2cap_chan_del(sk, reason);
757 l2cap_chan_del(sk, reason);
761 sock_set_flag(sk, SOCK_ZAPPED);
766 /* Must be called on unlocked socket. */
767 static void l2cap_sock_close(struct sock *sk)
769 l2cap_sock_clear_timer(sk);
771 __l2cap_sock_close(sk, ECONNRESET);
776 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
778 struct l2cap_pinfo *pi = l2cap_pi(sk);
783 sk->sk_type = parent->sk_type;
784 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
786 pi->imtu = l2cap_pi(parent)->imtu;
787 pi->omtu = l2cap_pi(parent)->omtu;
788 pi->mode = l2cap_pi(parent)->mode;
789 pi->fcs = l2cap_pi(parent)->fcs;
790 pi->max_tx = l2cap_pi(parent)->max_tx;
791 pi->tx_win = l2cap_pi(parent)->tx_win;
792 pi->sec_level = l2cap_pi(parent)->sec_level;
793 pi->role_switch = l2cap_pi(parent)->role_switch;
794 pi->force_reliable = l2cap_pi(parent)->force_reliable;
796 pi->imtu = L2CAP_DEFAULT_MTU;
798 pi->mode = L2CAP_MODE_BASIC;
799 pi->max_tx = max_transmit;
800 pi->fcs = L2CAP_FCS_CRC16;
801 pi->tx_win = tx_window;
802 pi->sec_level = BT_SECURITY_LOW;
804 pi->force_reliable = 0;
807 /* Default config options */
809 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
810 skb_queue_head_init(TX_QUEUE(sk));
811 skb_queue_head_init(SREJ_QUEUE(sk));
812 INIT_LIST_HEAD(SREJ_LIST(sk));
815 static struct proto l2cap_proto = {
817 .owner = THIS_MODULE,
818 .obj_size = sizeof(struct l2cap_pinfo)
821 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
825 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
829 sock_init_data(sock, sk);
830 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
832 sk->sk_destruct = l2cap_sock_destruct;
833 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
835 sock_reset_flag(sk, SOCK_ZAPPED);
837 sk->sk_protocol = proto;
838 sk->sk_state = BT_OPEN;
840 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
842 bt_sock_link(&l2cap_sk_list, sk);
846 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
851 BT_DBG("sock %p", sock);
853 sock->state = SS_UNCONNECTED;
855 if (sock->type != SOCK_SEQPACKET &&
856 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
857 return -ESOCKTNOSUPPORT;
859 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
862 sock->ops = &l2cap_sock_ops;
864 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
868 l2cap_sock_init(sk, NULL);
872 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
874 struct sock *sk = sock->sk;
875 struct sockaddr_l2 la;
880 if (!addr || addr->sa_family != AF_BLUETOOTH)
883 memset(&la, 0, sizeof(la));
884 len = min_t(unsigned int, sizeof(la), alen);
885 memcpy(&la, addr, len);
892 if (sk->sk_state != BT_OPEN) {
897 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
898 !capable(CAP_NET_BIND_SERVICE)) {
903 write_lock_bh(&l2cap_sk_list.lock);
905 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
908 /* Save source address */
909 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
910 l2cap_pi(sk)->psm = la.l2_psm;
911 l2cap_pi(sk)->sport = la.l2_psm;
912 sk->sk_state = BT_BOUND;
914 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
915 __le16_to_cpu(la.l2_psm) == 0x0003)
916 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
919 write_unlock_bh(&l2cap_sk_list.lock);
926 static int l2cap_do_connect(struct sock *sk)
928 bdaddr_t *src = &bt_sk(sk)->src;
929 bdaddr_t *dst = &bt_sk(sk)->dst;
930 struct l2cap_conn *conn;
931 struct hci_conn *hcon;
932 struct hci_dev *hdev;
936 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
939 hdev = hci_get_route(dst, src);
941 return -EHOSTUNREACH;
943 hci_dev_lock_bh(hdev);
947 if (sk->sk_type == SOCK_RAW) {
948 switch (l2cap_pi(sk)->sec_level) {
949 case BT_SECURITY_HIGH:
950 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
952 case BT_SECURITY_MEDIUM:
953 auth_type = HCI_AT_DEDICATED_BONDING;
956 auth_type = HCI_AT_NO_BONDING;
959 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
960 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
961 auth_type = HCI_AT_NO_BONDING_MITM;
963 auth_type = HCI_AT_NO_BONDING;
965 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
966 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
968 switch (l2cap_pi(sk)->sec_level) {
969 case BT_SECURITY_HIGH:
970 auth_type = HCI_AT_GENERAL_BONDING_MITM;
972 case BT_SECURITY_MEDIUM:
973 auth_type = HCI_AT_GENERAL_BONDING;
976 auth_type = HCI_AT_NO_BONDING;
981 hcon = hci_connect(hdev, ACL_LINK, dst,
982 l2cap_pi(sk)->sec_level, auth_type);
986 conn = l2cap_conn_add(hcon, 0);
994 /* Update source addr of the socket */
995 bacpy(src, conn->src);
997 l2cap_chan_add(conn, sk, NULL);
999 sk->sk_state = BT_CONNECT;
1000 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1002 if (hcon->state == BT_CONNECTED) {
1003 if (sk->sk_type != SOCK_SEQPACKET) {
1004 l2cap_sock_clear_timer(sk);
1005 sk->sk_state = BT_CONNECTED;
1011 hci_dev_unlock_bh(hdev);
1016 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1018 struct sock *sk = sock->sk;
1019 struct sockaddr_l2 la;
1022 BT_DBG("sk %p", sk);
1024 if (!addr || alen < sizeof(addr->sa_family) ||
1025 addr->sa_family != AF_BLUETOOTH)
1028 memset(&la, 0, sizeof(la));
1029 len = min_t(unsigned int, sizeof(la), alen);
1030 memcpy(&la, addr, len);
1037 if (sk->sk_type == SOCK_SEQPACKET && !la.l2_psm) {
1042 switch (l2cap_pi(sk)->mode) {
1043 case L2CAP_MODE_BASIC:
1045 case L2CAP_MODE_ERTM:
1046 case L2CAP_MODE_STREAMING:
1055 switch (sk->sk_state) {
1059 /* Already connecting */
1063 /* Already connected */
1076 /* Set destination address and psm */
1077 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1078 l2cap_pi(sk)->psm = la.l2_psm;
1080 err = l2cap_do_connect(sk);
1085 err = bt_sock_wait_state(sk, BT_CONNECTED,
1086 sock_sndtimeo(sk, flags & O_NONBLOCK));
1092 static int l2cap_sock_listen(struct socket *sock, int backlog)
1094 struct sock *sk = sock->sk;
1097 BT_DBG("sk %p backlog %d", sk, backlog);
1101 if (sk->sk_state != BT_BOUND || sock->type != SOCK_SEQPACKET) {
1106 switch (l2cap_pi(sk)->mode) {
1107 case L2CAP_MODE_BASIC:
1109 case L2CAP_MODE_ERTM:
1110 case L2CAP_MODE_STREAMING:
1119 if (!l2cap_pi(sk)->psm) {
1120 bdaddr_t *src = &bt_sk(sk)->src;
1125 write_lock_bh(&l2cap_sk_list.lock);
1127 for (psm = 0x1001; psm < 0x1100; psm += 2)
1128 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1129 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1130 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1135 write_unlock_bh(&l2cap_sk_list.lock);
1141 sk->sk_max_ack_backlog = backlog;
1142 sk->sk_ack_backlog = 0;
1143 sk->sk_state = BT_LISTEN;
1150 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1152 DECLARE_WAITQUEUE(wait, current);
1153 struct sock *sk = sock->sk, *nsk;
1157 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1159 if (sk->sk_state != BT_LISTEN) {
1164 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1166 BT_DBG("sk %p timeo %ld", sk, timeo);
1168 /* Wait for an incoming connection. (wake-one). */
1169 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1170 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1171 set_current_state(TASK_INTERRUPTIBLE);
1178 timeo = schedule_timeout(timeo);
1179 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1181 if (sk->sk_state != BT_LISTEN) {
1186 if (signal_pending(current)) {
1187 err = sock_intr_errno(timeo);
1191 set_current_state(TASK_RUNNING);
1192 remove_wait_queue(sk_sleep(sk), &wait);
1197 newsock->state = SS_CONNECTED;
1199 BT_DBG("new socket %p", nsk);
1206 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1208 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1209 struct sock *sk = sock->sk;
1211 BT_DBG("sock %p, sk %p", sock, sk);
1213 addr->sa_family = AF_BLUETOOTH;
1214 *len = sizeof(struct sockaddr_l2);
1217 la->l2_psm = l2cap_pi(sk)->psm;
1218 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1219 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1221 la->l2_psm = l2cap_pi(sk)->sport;
1222 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1223 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1229 static void l2cap_monitor_timeout(unsigned long arg)
1231 struct sock *sk = (void *) arg;
1235 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1236 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1241 l2cap_pi(sk)->retry_count++;
1242 __mod_monitor_timer();
1244 control = L2CAP_CTRL_POLL;
1245 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1249 static void l2cap_retrans_timeout(unsigned long arg)
1251 struct sock *sk = (void *) arg;
1255 l2cap_pi(sk)->retry_count = 1;
1256 __mod_monitor_timer();
1258 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1260 control = L2CAP_CTRL_POLL;
1261 l2cap_send_rr_or_rnr(l2cap_pi(sk), control);
1265 static void l2cap_drop_acked_frames(struct sock *sk)
1267 struct sk_buff *skb;
1269 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1270 l2cap_pi(sk)->unacked_frames) {
1271 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1274 skb = skb_dequeue(TX_QUEUE(sk));
1277 l2cap_pi(sk)->unacked_frames--;
1280 if (!l2cap_pi(sk)->unacked_frames)
1281 del_timer(&l2cap_pi(sk)->retrans_timer);
1286 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1288 struct l2cap_pinfo *pi = l2cap_pi(sk);
1291 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1293 err = hci_send_acl(pi->conn->hcon, skb, 0);
1300 static int l2cap_streaming_send(struct sock *sk)
1302 struct sk_buff *skb, *tx_skb;
1303 struct l2cap_pinfo *pi = l2cap_pi(sk);
1307 while ((skb = sk->sk_send_head)) {
1308 tx_skb = skb_clone(skb, GFP_ATOMIC);
1310 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1311 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1312 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1314 if (pi->fcs == L2CAP_FCS_CRC16) {
1315 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1316 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1319 err = l2cap_do_send(sk, tx_skb);
1321 l2cap_send_disconn_req(pi->conn, sk);
1325 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1327 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1328 sk->sk_send_head = NULL;
1330 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1332 skb = skb_dequeue(TX_QUEUE(sk));
1338 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1340 struct l2cap_pinfo *pi = l2cap_pi(sk);
1341 struct sk_buff *skb, *tx_skb;
1345 skb = skb_peek(TX_QUEUE(sk));
1347 if (bt_cb(skb)->tx_seq != tx_seq) {
1348 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1350 skb = skb_queue_next(TX_QUEUE(sk), skb);
1354 if (pi->remote_max_tx &&
1355 bt_cb(skb)->retries == pi->remote_max_tx) {
1356 l2cap_send_disconn_req(pi->conn, sk);
1360 tx_skb = skb_clone(skb, GFP_ATOMIC);
1361 bt_cb(skb)->retries++;
1362 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1363 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1364 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1365 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1367 if (pi->fcs == L2CAP_FCS_CRC16) {
1368 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1369 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1372 err = l2cap_do_send(sk, tx_skb);
1374 l2cap_send_disconn_req(pi->conn, sk);
1382 static int l2cap_ertm_send(struct sock *sk)
1384 struct sk_buff *skb, *tx_skb;
1385 struct l2cap_pinfo *pi = l2cap_pi(sk);
1389 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1392 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1393 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1395 if (pi->remote_max_tx &&
1396 bt_cb(skb)->retries == pi->remote_max_tx) {
1397 l2cap_send_disconn_req(pi->conn, sk);
1401 tx_skb = skb_clone(skb, GFP_ATOMIC);
1403 bt_cb(skb)->retries++;
1405 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1406 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1407 control |= L2CAP_CTRL_FINAL;
1408 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1410 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1411 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1412 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1415 if (pi->fcs == L2CAP_FCS_CRC16) {
1416 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1417 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1420 err = l2cap_do_send(sk, tx_skb);
1422 l2cap_send_disconn_req(pi->conn, sk);
1425 __mod_retrans_timer();
1427 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1428 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1430 pi->unacked_frames++;
1433 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1434 sk->sk_send_head = NULL;
1436 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1444 static int l2cap_send_ack(struct l2cap_pinfo *pi)
1446 struct sock *sk = (struct sock *)pi;
1449 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1451 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1452 control |= L2CAP_SUPER_RCV_NOT_READY;
1453 return l2cap_send_sframe(pi, control);
1454 } else if (l2cap_ertm_send(sk) == 0) {
1455 control |= L2CAP_SUPER_RCV_READY;
1456 return l2cap_send_sframe(pi, control);
1461 static int l2cap_send_srejtail(struct sock *sk)
1463 struct srej_list *tail;
1466 control = L2CAP_SUPER_SELECT_REJECT;
1467 control |= L2CAP_CTRL_FINAL;
1469 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1470 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1472 l2cap_send_sframe(l2cap_pi(sk), control);
1477 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1479 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1480 struct sk_buff **frag;
1483 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count)) {
1490 /* Continuation fragments (no L2CAP header) */
1491 frag = &skb_shinfo(skb)->frag_list;
1493 count = min_t(unsigned int, conn->mtu, len);
1495 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1498 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1504 frag = &(*frag)->next;
1510 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1512 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1513 struct sk_buff *skb;
1514 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1515 struct l2cap_hdr *lh;
1517 BT_DBG("sk %p len %d", sk, (int)len);
1519 count = min_t(unsigned int, (conn->mtu - hlen), len);
1520 skb = bt_skb_send_alloc(sk, count + hlen,
1521 msg->msg_flags & MSG_DONTWAIT, &err);
1523 return ERR_PTR(-ENOMEM);
1525 /* Create L2CAP header */
1526 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1527 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1528 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1529 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1531 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1532 if (unlikely(err < 0)) {
1534 return ERR_PTR(err);
1539 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1541 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1542 struct sk_buff *skb;
1543 int err, count, hlen = L2CAP_HDR_SIZE;
1544 struct l2cap_hdr *lh;
1546 BT_DBG("sk %p len %d", sk, (int)len);
1548 count = min_t(unsigned int, (conn->mtu - hlen), len);
1549 skb = bt_skb_send_alloc(sk, count + hlen,
1550 msg->msg_flags & MSG_DONTWAIT, &err);
1552 return ERR_PTR(-ENOMEM);
1554 /* Create L2CAP header */
1555 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1556 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1557 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1559 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1560 if (unlikely(err < 0)) {
1562 return ERR_PTR(err);
1567 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1569 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1570 struct sk_buff *skb;
1571 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1572 struct l2cap_hdr *lh;
1574 BT_DBG("sk %p len %d", sk, (int)len);
1577 return ERR_PTR(-ENOTCONN);
1582 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1585 count = min_t(unsigned int, (conn->mtu - hlen), len);
1586 skb = bt_skb_send_alloc(sk, count + hlen,
1587 msg->msg_flags & MSG_DONTWAIT, &err);
1589 return ERR_PTR(-ENOMEM);
1591 /* Create L2CAP header */
1592 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1593 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1594 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1595 put_unaligned_le16(control, skb_put(skb, 2));
1597 put_unaligned_le16(sdulen, skb_put(skb, 2));
1599 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1600 if (unlikely(err < 0)) {
1602 return ERR_PTR(err);
1605 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1606 put_unaligned_le16(0, skb_put(skb, 2));
1608 bt_cb(skb)->retries = 0;
1612 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1614 struct l2cap_pinfo *pi = l2cap_pi(sk);
1615 struct sk_buff *skb;
1616 struct sk_buff_head sar_queue;
1620 __skb_queue_head_init(&sar_queue);
1621 control = L2CAP_SDU_START;
1622 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1624 return PTR_ERR(skb);
1626 __skb_queue_tail(&sar_queue, skb);
1627 len -= pi->remote_mps;
1628 size += pi->remote_mps;
1634 if (len > pi->remote_mps) {
1635 control |= L2CAP_SDU_CONTINUE;
1636 buflen = pi->remote_mps;
1638 control |= L2CAP_SDU_END;
1642 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1644 skb_queue_purge(&sar_queue);
1645 return PTR_ERR(skb);
1648 __skb_queue_tail(&sar_queue, skb);
1653 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1654 if (sk->sk_send_head == NULL)
1655 sk->sk_send_head = sar_queue.next;
1660 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1662 struct sock *sk = sock->sk;
1663 struct l2cap_pinfo *pi = l2cap_pi(sk);
1664 struct sk_buff *skb;
1668 BT_DBG("sock %p, sk %p", sock, sk);
1670 err = sock_error(sk);
1674 if (msg->msg_flags & MSG_OOB)
1679 if (sk->sk_state != BT_CONNECTED) {
1684 /* Connectionless channel */
1685 if (sk->sk_type == SOCK_DGRAM) {
1686 skb = l2cap_create_connless_pdu(sk, msg, len);
1690 err = l2cap_do_send(sk, skb);
1695 case L2CAP_MODE_BASIC:
1696 /* Check outgoing MTU */
1697 if (len > pi->omtu) {
1702 /* Create a basic PDU */
1703 skb = l2cap_create_basic_pdu(sk, msg, len);
1709 err = l2cap_do_send(sk, skb);
1714 case L2CAP_MODE_ERTM:
1715 case L2CAP_MODE_STREAMING:
1716 /* Entire SDU fits into one PDU */
1717 if (len <= pi->remote_mps) {
1718 control = L2CAP_SDU_UNSEGMENTED;
1719 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1724 __skb_queue_tail(TX_QUEUE(sk), skb);
1725 if (sk->sk_send_head == NULL)
1726 sk->sk_send_head = skb;
1728 /* Segment SDU into multiples PDUs */
1729 err = l2cap_sar_segment_sdu(sk, msg, len);
1734 if (pi->mode == L2CAP_MODE_STREAMING)
1735 err = l2cap_streaming_send(sk);
1737 err = l2cap_ertm_send(sk);
1744 BT_DBG("bad state %1.1x", pi->mode);
1753 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1755 struct sock *sk = sock->sk;
1759 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1760 struct l2cap_conn_rsp rsp;
1762 sk->sk_state = BT_CONFIG;
1764 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1765 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1766 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1767 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1768 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1769 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1777 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1780 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1782 struct sock *sk = sock->sk;
1783 struct l2cap_options opts;
1787 BT_DBG("sk %p", sk);
1793 opts.imtu = l2cap_pi(sk)->imtu;
1794 opts.omtu = l2cap_pi(sk)->omtu;
1795 opts.flush_to = l2cap_pi(sk)->flush_to;
1796 opts.mode = l2cap_pi(sk)->mode;
1797 opts.fcs = l2cap_pi(sk)->fcs;
1798 opts.max_tx = l2cap_pi(sk)->max_tx;
1799 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1801 len = min_t(unsigned int, sizeof(opts), optlen);
1802 if (copy_from_user((char *) &opts, optval, len)) {
1807 l2cap_pi(sk)->imtu = opts.imtu;
1808 l2cap_pi(sk)->omtu = opts.omtu;
1809 l2cap_pi(sk)->mode = opts.mode;
1810 l2cap_pi(sk)->fcs = opts.fcs;
1811 l2cap_pi(sk)->max_tx = opts.max_tx;
1812 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1816 if (get_user(opt, (u32 __user *) optval)) {
1821 if (opt & L2CAP_LM_AUTH)
1822 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1823 if (opt & L2CAP_LM_ENCRYPT)
1824 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1825 if (opt & L2CAP_LM_SECURE)
1826 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1828 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1829 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1841 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1843 struct sock *sk = sock->sk;
1844 struct bt_security sec;
1848 BT_DBG("sk %p", sk);
1850 if (level == SOL_L2CAP)
1851 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1853 if (level != SOL_BLUETOOTH)
1854 return -ENOPROTOOPT;
1860 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
1865 sec.level = BT_SECURITY_LOW;
1867 len = min_t(unsigned int, sizeof(sec), optlen);
1868 if (copy_from_user((char *) &sec, optval, len)) {
1873 if (sec.level < BT_SECURITY_LOW ||
1874 sec.level > BT_SECURITY_HIGH) {
1879 l2cap_pi(sk)->sec_level = sec.level;
1882 case BT_DEFER_SETUP:
1883 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1888 if (get_user(opt, (u32 __user *) optval)) {
1893 bt_sk(sk)->defer_setup = opt;
1905 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1907 struct sock *sk = sock->sk;
1908 struct l2cap_options opts;
1909 struct l2cap_conninfo cinfo;
1913 BT_DBG("sk %p", sk);
1915 if (get_user(len, optlen))
1922 opts.imtu = l2cap_pi(sk)->imtu;
1923 opts.omtu = l2cap_pi(sk)->omtu;
1924 opts.flush_to = l2cap_pi(sk)->flush_to;
1925 opts.mode = l2cap_pi(sk)->mode;
1926 opts.fcs = l2cap_pi(sk)->fcs;
1927 opts.max_tx = l2cap_pi(sk)->max_tx;
1928 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1930 len = min_t(unsigned int, len, sizeof(opts));
1931 if (copy_to_user(optval, (char *) &opts, len))
1937 switch (l2cap_pi(sk)->sec_level) {
1938 case BT_SECURITY_LOW:
1939 opt = L2CAP_LM_AUTH;
1941 case BT_SECURITY_MEDIUM:
1942 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1944 case BT_SECURITY_HIGH:
1945 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1953 if (l2cap_pi(sk)->role_switch)
1954 opt |= L2CAP_LM_MASTER;
1956 if (l2cap_pi(sk)->force_reliable)
1957 opt |= L2CAP_LM_RELIABLE;
1959 if (put_user(opt, (u32 __user *) optval))
1963 case L2CAP_CONNINFO:
1964 if (sk->sk_state != BT_CONNECTED &&
1965 !(sk->sk_state == BT_CONNECT2 &&
1966 bt_sk(sk)->defer_setup)) {
1971 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1972 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1974 len = min_t(unsigned int, len, sizeof(cinfo));
1975 if (copy_to_user(optval, (char *) &cinfo, len))
1989 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
1991 struct sock *sk = sock->sk;
1992 struct bt_security sec;
1995 BT_DBG("sk %p", sk);
1997 if (level == SOL_L2CAP)
1998 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2000 if (level != SOL_BLUETOOTH)
2001 return -ENOPROTOOPT;
2003 if (get_user(len, optlen))
2010 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_RAW) {
2015 sec.level = l2cap_pi(sk)->sec_level;
2017 len = min_t(unsigned int, len, sizeof(sec));
2018 if (copy_to_user(optval, (char *) &sec, len))
2023 case BT_DEFER_SETUP:
2024 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2029 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2043 static int l2cap_sock_shutdown(struct socket *sock, int how)
2045 struct sock *sk = sock->sk;
2048 BT_DBG("sock %p, sk %p", sock, sk);
2054 if (!sk->sk_shutdown) {
2055 sk->sk_shutdown = SHUTDOWN_MASK;
2056 l2cap_sock_clear_timer(sk);
2057 __l2cap_sock_close(sk, 0);
2059 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2060 err = bt_sock_wait_state(sk, BT_CLOSED,
2067 static int l2cap_sock_release(struct socket *sock)
2069 struct sock *sk = sock->sk;
2072 BT_DBG("sock %p, sk %p", sock, sk);
2077 err = l2cap_sock_shutdown(sock, 2);
2080 l2cap_sock_kill(sk);
2084 static void l2cap_chan_ready(struct sock *sk)
2086 struct sock *parent = bt_sk(sk)->parent;
2088 BT_DBG("sk %p, parent %p", sk, parent);
2090 l2cap_pi(sk)->conf_state = 0;
2091 l2cap_sock_clear_timer(sk);
2094 /* Outgoing channel.
2095 * Wake up socket sleeping on connect.
2097 sk->sk_state = BT_CONNECTED;
2098 sk->sk_state_change(sk);
2100 /* Incoming channel.
2101 * Wake up socket sleeping on accept.
2103 parent->sk_data_ready(parent, 0);
2107 /* Copy frame to all raw sockets on that connection */
2108 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2110 struct l2cap_chan_list *l = &conn->chan_list;
2111 struct sk_buff *nskb;
2114 BT_DBG("conn %p", conn);
2116 read_lock(&l->lock);
2117 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2118 if (sk->sk_type != SOCK_RAW)
2121 /* Don't send frame to the socket it came from */
2124 nskb = skb_clone(skb, GFP_ATOMIC);
2128 if (sock_queue_rcv_skb(sk, nskb))
2131 read_unlock(&l->lock);
2134 /* ---- L2CAP signalling commands ---- */
2135 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2136 u8 code, u8 ident, u16 dlen, void *data)
2138 struct sk_buff *skb, **frag;
2139 struct l2cap_cmd_hdr *cmd;
2140 struct l2cap_hdr *lh;
2143 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2144 conn, code, ident, dlen);
2146 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2147 count = min_t(unsigned int, conn->mtu, len);
2149 skb = bt_skb_alloc(count, GFP_ATOMIC);
2153 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2154 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2155 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2157 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2160 cmd->len = cpu_to_le16(dlen);
2163 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2164 memcpy(skb_put(skb, count), data, count);
2170 /* Continuation fragments (no L2CAP header) */
2171 frag = &skb_shinfo(skb)->frag_list;
2173 count = min_t(unsigned int, conn->mtu, len);
2175 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2179 memcpy(skb_put(*frag, count), data, count);
2184 frag = &(*frag)->next;
2194 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2196 struct l2cap_conf_opt *opt = *ptr;
2199 len = L2CAP_CONF_OPT_SIZE + opt->len;
2207 *val = *((u8 *) opt->val);
2211 *val = __le16_to_cpu(*((__le16 *) opt->val));
2215 *val = __le32_to_cpu(*((__le32 *) opt->val));
2219 *val = (unsigned long) opt->val;
2223 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2227 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2229 struct l2cap_conf_opt *opt = *ptr;
2231 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2238 *((u8 *) opt->val) = val;
2242 *((__le16 *) opt->val) = cpu_to_le16(val);
2246 *((__le32 *) opt->val) = cpu_to_le32(val);
2250 memcpy(opt->val, (void *) val, len);
2254 *ptr += L2CAP_CONF_OPT_SIZE + len;
2257 static void l2cap_ack_timeout(unsigned long arg)
2259 struct sock *sk = (void *) arg;
2262 l2cap_send_ack(l2cap_pi(sk));
2266 static inline void l2cap_ertm_init(struct sock *sk)
2268 l2cap_pi(sk)->expected_ack_seq = 0;
2269 l2cap_pi(sk)->unacked_frames = 0;
2270 l2cap_pi(sk)->buffer_seq = 0;
2271 l2cap_pi(sk)->num_acked = 0;
2272 l2cap_pi(sk)->frames_sent = 0;
2274 setup_timer(&l2cap_pi(sk)->retrans_timer,
2275 l2cap_retrans_timeout, (unsigned long) sk);
2276 setup_timer(&l2cap_pi(sk)->monitor_timer,
2277 l2cap_monitor_timeout, (unsigned long) sk);
2278 setup_timer(&l2cap_pi(sk)->ack_timer,
2279 l2cap_ack_timeout, (unsigned long) sk);
2281 __skb_queue_head_init(SREJ_QUEUE(sk));
2284 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2286 u32 local_feat_mask = l2cap_feat_mask;
2288 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2291 case L2CAP_MODE_ERTM:
2292 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2293 case L2CAP_MODE_STREAMING:
2294 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2300 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2303 case L2CAP_MODE_STREAMING:
2304 case L2CAP_MODE_ERTM:
2305 if (l2cap_mode_supported(mode, remote_feat_mask))
2309 return L2CAP_MODE_BASIC;
2313 static int l2cap_build_conf_req(struct sock *sk, void *data)
2315 struct l2cap_pinfo *pi = l2cap_pi(sk);
2316 struct l2cap_conf_req *req = data;
2317 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2318 void *ptr = req->data;
2320 BT_DBG("sk %p", sk);
2322 if (pi->num_conf_req || pi->num_conf_rsp)
2326 case L2CAP_MODE_STREAMING:
2327 case L2CAP_MODE_ERTM:
2328 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2329 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2330 l2cap_send_disconn_req(pi->conn, sk);
2333 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2339 case L2CAP_MODE_BASIC:
2340 if (pi->imtu != L2CAP_DEFAULT_MTU)
2341 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2344 case L2CAP_MODE_ERTM:
2345 rfc.mode = L2CAP_MODE_ERTM;
2346 rfc.txwin_size = pi->tx_win;
2347 rfc.max_transmit = pi->max_tx;
2348 rfc.retrans_timeout = 0;
2349 rfc.monitor_timeout = 0;
2350 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2351 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2352 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2354 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2355 sizeof(rfc), (unsigned long) &rfc);
2357 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2360 if (pi->fcs == L2CAP_FCS_NONE ||
2361 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2362 pi->fcs = L2CAP_FCS_NONE;
2363 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2367 case L2CAP_MODE_STREAMING:
2368 rfc.mode = L2CAP_MODE_STREAMING;
2370 rfc.max_transmit = 0;
2371 rfc.retrans_timeout = 0;
2372 rfc.monitor_timeout = 0;
2373 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2374 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2375 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2377 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2378 sizeof(rfc), (unsigned long) &rfc);
2380 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2383 if (pi->fcs == L2CAP_FCS_NONE ||
2384 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2385 pi->fcs = L2CAP_FCS_NONE;
2386 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2391 /* FIXME: Need actual value of the flush timeout */
2392 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2393 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2395 req->dcid = cpu_to_le16(pi->dcid);
2396 req->flags = cpu_to_le16(0);
2401 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2403 struct l2cap_pinfo *pi = l2cap_pi(sk);
2404 struct l2cap_conf_rsp *rsp = data;
2405 void *ptr = rsp->data;
2406 void *req = pi->conf_req;
2407 int len = pi->conf_len;
2408 int type, hint, olen;
2410 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2411 u16 mtu = L2CAP_DEFAULT_MTU;
2412 u16 result = L2CAP_CONF_SUCCESS;
2414 BT_DBG("sk %p", sk);
2416 while (len >= L2CAP_CONF_OPT_SIZE) {
2417 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2419 hint = type & L2CAP_CONF_HINT;
2420 type &= L2CAP_CONF_MASK;
2423 case L2CAP_CONF_MTU:
2427 case L2CAP_CONF_FLUSH_TO:
2431 case L2CAP_CONF_QOS:
2434 case L2CAP_CONF_RFC:
2435 if (olen == sizeof(rfc))
2436 memcpy(&rfc, (void *) val, olen);
2439 case L2CAP_CONF_FCS:
2440 if (val == L2CAP_FCS_NONE)
2441 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2449 result = L2CAP_CONF_UNKNOWN;
2450 *((u8 *) ptr++) = type;
2455 if (pi->num_conf_rsp || pi->num_conf_req)
2459 case L2CAP_MODE_STREAMING:
2460 case L2CAP_MODE_ERTM:
2461 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2462 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2463 return -ECONNREFUSED;
2466 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2471 if (pi->mode != rfc.mode) {
2472 result = L2CAP_CONF_UNACCEPT;
2473 rfc.mode = pi->mode;
2475 if (pi->num_conf_rsp == 1)
2476 return -ECONNREFUSED;
2478 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2479 sizeof(rfc), (unsigned long) &rfc);
2483 if (result == L2CAP_CONF_SUCCESS) {
2484 /* Configure output options and let the other side know
2485 * which ones we don't like. */
2487 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2488 result = L2CAP_CONF_UNACCEPT;
2491 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2493 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2496 case L2CAP_MODE_BASIC:
2497 pi->fcs = L2CAP_FCS_NONE;
2498 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2501 case L2CAP_MODE_ERTM:
2502 pi->remote_tx_win = rfc.txwin_size;
2503 pi->remote_max_tx = rfc.max_transmit;
2504 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2505 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2507 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2509 rfc.retrans_timeout =
2510 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2511 rfc.monitor_timeout =
2512 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2514 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2516 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2517 sizeof(rfc), (unsigned long) &rfc);
2521 case L2CAP_MODE_STREAMING:
2522 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2523 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2525 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2527 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2529 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2530 sizeof(rfc), (unsigned long) &rfc);
2535 result = L2CAP_CONF_UNACCEPT;
2537 memset(&rfc, 0, sizeof(rfc));
2538 rfc.mode = pi->mode;
2541 if (result == L2CAP_CONF_SUCCESS)
2542 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2544 rsp->scid = cpu_to_le16(pi->dcid);
2545 rsp->result = cpu_to_le16(result);
2546 rsp->flags = cpu_to_le16(0x0000);
2551 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2553 struct l2cap_pinfo *pi = l2cap_pi(sk);
2554 struct l2cap_conf_req *req = data;
2555 void *ptr = req->data;
2558 struct l2cap_conf_rfc rfc;
2560 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2562 while (len >= L2CAP_CONF_OPT_SIZE) {
2563 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2566 case L2CAP_CONF_MTU:
2567 if (val < L2CAP_DEFAULT_MIN_MTU) {
2568 *result = L2CAP_CONF_UNACCEPT;
2569 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2572 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2575 case L2CAP_CONF_FLUSH_TO:
2577 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2581 case L2CAP_CONF_RFC:
2582 if (olen == sizeof(rfc))
2583 memcpy(&rfc, (void *)val, olen);
2585 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2586 rfc.mode != pi->mode)
2587 return -ECONNREFUSED;
2589 pi->mode = rfc.mode;
2592 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2593 sizeof(rfc), (unsigned long) &rfc);
2598 if (*result == L2CAP_CONF_SUCCESS) {
2600 case L2CAP_MODE_ERTM:
2601 pi->remote_tx_win = rfc.txwin_size;
2602 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2603 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2604 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2606 case L2CAP_MODE_STREAMING:
2607 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2611 req->dcid = cpu_to_le16(pi->dcid);
2612 req->flags = cpu_to_le16(0x0000);
2617 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2619 struct l2cap_conf_rsp *rsp = data;
2620 void *ptr = rsp->data;
2622 BT_DBG("sk %p", sk);
2624 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2625 rsp->result = cpu_to_le16(result);
2626 rsp->flags = cpu_to_le16(flags);
2631 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2633 struct l2cap_pinfo *pi = l2cap_pi(sk);
2636 struct l2cap_conf_rfc rfc;
2638 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2640 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2643 while (len >= L2CAP_CONF_OPT_SIZE) {
2644 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2647 case L2CAP_CONF_RFC:
2648 if (olen == sizeof(rfc))
2649 memcpy(&rfc, (void *)val, olen);
2656 case L2CAP_MODE_ERTM:
2657 pi->remote_tx_win = rfc.txwin_size;
2658 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2659 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2660 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2662 case L2CAP_MODE_STREAMING:
2663 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2667 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2669 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2671 if (rej->reason != 0x0000)
2674 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2675 cmd->ident == conn->info_ident) {
2676 del_timer(&conn->info_timer);
2678 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2679 conn->info_ident = 0;
2681 l2cap_conn_start(conn);
2687 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2689 struct l2cap_chan_list *list = &conn->chan_list;
2690 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2691 struct l2cap_conn_rsp rsp;
2692 struct sock *sk, *parent;
2693 int result, status = L2CAP_CS_NO_INFO;
2695 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2696 __le16 psm = req->psm;
2698 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2700 /* Check if we have socket listening on psm */
2701 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2703 result = L2CAP_CR_BAD_PSM;
2707 /* Check if the ACL is secure enough (if not SDP) */
2708 if (psm != cpu_to_le16(0x0001) &&
2709 !hci_conn_check_link_mode(conn->hcon)) {
2710 conn->disc_reason = 0x05;
2711 result = L2CAP_CR_SEC_BLOCK;
2715 result = L2CAP_CR_NO_MEM;
2717 /* Check for backlog size */
2718 if (sk_acceptq_is_full(parent)) {
2719 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2723 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2727 write_lock_bh(&list->lock);
2729 /* Check if we already have channel with that dcid */
2730 if (__l2cap_get_chan_by_dcid(list, scid)) {
2731 write_unlock_bh(&list->lock);
2732 sock_set_flag(sk, SOCK_ZAPPED);
2733 l2cap_sock_kill(sk);
2737 hci_conn_hold(conn->hcon);
2739 l2cap_sock_init(sk, parent);
2740 bacpy(&bt_sk(sk)->src, conn->src);
2741 bacpy(&bt_sk(sk)->dst, conn->dst);
2742 l2cap_pi(sk)->psm = psm;
2743 l2cap_pi(sk)->dcid = scid;
2745 __l2cap_chan_add(conn, sk, parent);
2746 dcid = l2cap_pi(sk)->scid;
2748 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2750 l2cap_pi(sk)->ident = cmd->ident;
2752 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2753 if (l2cap_check_security(sk)) {
2754 if (bt_sk(sk)->defer_setup) {
2755 sk->sk_state = BT_CONNECT2;
2756 result = L2CAP_CR_PEND;
2757 status = L2CAP_CS_AUTHOR_PEND;
2758 parent->sk_data_ready(parent, 0);
2760 sk->sk_state = BT_CONFIG;
2761 result = L2CAP_CR_SUCCESS;
2762 status = L2CAP_CS_NO_INFO;
2765 sk->sk_state = BT_CONNECT2;
2766 result = L2CAP_CR_PEND;
2767 status = L2CAP_CS_AUTHEN_PEND;
2770 sk->sk_state = BT_CONNECT2;
2771 result = L2CAP_CR_PEND;
2772 status = L2CAP_CS_NO_INFO;
2775 write_unlock_bh(&list->lock);
2778 bh_unlock_sock(parent);
2781 rsp.scid = cpu_to_le16(scid);
2782 rsp.dcid = cpu_to_le16(dcid);
2783 rsp.result = cpu_to_le16(result);
2784 rsp.status = cpu_to_le16(status);
2785 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2787 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2788 struct l2cap_info_req info;
2789 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2791 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2792 conn->info_ident = l2cap_get_ident(conn);
2794 mod_timer(&conn->info_timer, jiffies +
2795 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2797 l2cap_send_cmd(conn, conn->info_ident,
2798 L2CAP_INFO_REQ, sizeof(info), &info);
2804 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2806 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2807 u16 scid, dcid, result, status;
2811 scid = __le16_to_cpu(rsp->scid);
2812 dcid = __le16_to_cpu(rsp->dcid);
2813 result = __le16_to_cpu(rsp->result);
2814 status = __le16_to_cpu(rsp->status);
2816 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2819 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2823 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2829 case L2CAP_CR_SUCCESS:
2830 sk->sk_state = BT_CONFIG;
2831 l2cap_pi(sk)->ident = 0;
2832 l2cap_pi(sk)->dcid = dcid;
2833 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2835 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2837 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2838 l2cap_build_conf_req(sk, req), req);
2839 l2cap_pi(sk)->num_conf_req++;
2843 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2847 l2cap_chan_del(sk, ECONNREFUSED);
2855 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2857 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2863 dcid = __le16_to_cpu(req->dcid);
2864 flags = __le16_to_cpu(req->flags);
2866 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2868 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2872 if (sk->sk_state == BT_DISCONN)
2875 /* Reject if config buffer is too small. */
2876 len = cmd_len - sizeof(*req);
2877 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2878 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2879 l2cap_build_conf_rsp(sk, rsp,
2880 L2CAP_CONF_REJECT, flags), rsp);
2885 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2886 l2cap_pi(sk)->conf_len += len;
2888 if (flags & 0x0001) {
2889 /* Incomplete config. Send empty response. */
2890 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2891 l2cap_build_conf_rsp(sk, rsp,
2892 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2896 /* Complete config. */
2897 len = l2cap_parse_conf_req(sk, rsp);
2899 l2cap_send_disconn_req(conn, sk);
2903 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2904 l2cap_pi(sk)->num_conf_rsp++;
2906 /* Reset config buffer. */
2907 l2cap_pi(sk)->conf_len = 0;
2909 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2912 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2913 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2914 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2915 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2917 sk->sk_state = BT_CONNECTED;
2919 l2cap_pi(sk)->next_tx_seq = 0;
2920 l2cap_pi(sk)->expected_tx_seq = 0;
2921 __skb_queue_head_init(TX_QUEUE(sk));
2922 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2923 l2cap_ertm_init(sk);
2925 l2cap_chan_ready(sk);
2929 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2931 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2932 l2cap_build_conf_req(sk, buf), buf);
2933 l2cap_pi(sk)->num_conf_req++;
2941 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2943 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2944 u16 scid, flags, result;
2946 int len = cmd->len - sizeof(*rsp);
2948 scid = __le16_to_cpu(rsp->scid);
2949 flags = __le16_to_cpu(rsp->flags);
2950 result = __le16_to_cpu(rsp->result);
2952 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2953 scid, flags, result);
2955 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2960 case L2CAP_CONF_SUCCESS:
2961 l2cap_conf_rfc_get(sk, rsp->data, len);
2964 case L2CAP_CONF_UNACCEPT:
2965 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2968 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2969 l2cap_send_disconn_req(conn, sk);
2973 /* throw out any old stored conf requests */
2974 result = L2CAP_CONF_SUCCESS;
2975 len = l2cap_parse_conf_rsp(sk, rsp->data,
2978 l2cap_send_disconn_req(conn, sk);
2982 l2cap_send_cmd(conn, l2cap_get_ident(conn),
2983 L2CAP_CONF_REQ, len, req);
2984 l2cap_pi(sk)->num_conf_req++;
2985 if (result != L2CAP_CONF_SUCCESS)
2991 sk->sk_state = BT_DISCONN;
2992 sk->sk_err = ECONNRESET;
2993 l2cap_sock_set_timer(sk, HZ * 5);
2994 l2cap_send_disconn_req(conn, sk);
3001 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3003 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3004 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3005 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3006 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3008 sk->sk_state = BT_CONNECTED;
3009 l2cap_pi(sk)->next_tx_seq = 0;
3010 l2cap_pi(sk)->expected_tx_seq = 0;
3011 __skb_queue_head_init(TX_QUEUE(sk));
3012 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3013 l2cap_ertm_init(sk);
3015 l2cap_chan_ready(sk);
3023 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3025 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3026 struct l2cap_disconn_rsp rsp;
3030 scid = __le16_to_cpu(req->scid);
3031 dcid = __le16_to_cpu(req->dcid);
3033 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3035 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3039 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3040 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3041 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3043 sk->sk_shutdown = SHUTDOWN_MASK;
3045 skb_queue_purge(TX_QUEUE(sk));
3047 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3048 skb_queue_purge(SREJ_QUEUE(sk));
3049 del_timer(&l2cap_pi(sk)->retrans_timer);
3050 del_timer(&l2cap_pi(sk)->monitor_timer);
3051 del_timer(&l2cap_pi(sk)->ack_timer);
3054 l2cap_chan_del(sk, ECONNRESET);
3057 l2cap_sock_kill(sk);
3061 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3063 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3067 scid = __le16_to_cpu(rsp->scid);
3068 dcid = __le16_to_cpu(rsp->dcid);
3070 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3072 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3076 skb_queue_purge(TX_QUEUE(sk));
3078 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3079 skb_queue_purge(SREJ_QUEUE(sk));
3080 del_timer(&l2cap_pi(sk)->retrans_timer);
3081 del_timer(&l2cap_pi(sk)->monitor_timer);
3082 del_timer(&l2cap_pi(sk)->ack_timer);
3085 l2cap_chan_del(sk, 0);
3088 l2cap_sock_kill(sk);
3092 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3094 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3097 type = __le16_to_cpu(req->type);
3099 BT_DBG("type 0x%4.4x", type);
3101 if (type == L2CAP_IT_FEAT_MASK) {
3103 u32 feat_mask = l2cap_feat_mask;
3104 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3105 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3106 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3108 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3110 put_unaligned_le32(feat_mask, rsp->data);
3111 l2cap_send_cmd(conn, cmd->ident,
3112 L2CAP_INFO_RSP, sizeof(buf), buf);
3113 } else if (type == L2CAP_IT_FIXED_CHAN) {
3115 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3116 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3117 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3118 memcpy(buf + 4, l2cap_fixed_chan, 8);
3119 l2cap_send_cmd(conn, cmd->ident,
3120 L2CAP_INFO_RSP, sizeof(buf), buf);
3122 struct l2cap_info_rsp rsp;
3123 rsp.type = cpu_to_le16(type);
3124 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3125 l2cap_send_cmd(conn, cmd->ident,
3126 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3132 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3134 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3137 type = __le16_to_cpu(rsp->type);
3138 result = __le16_to_cpu(rsp->result);
3140 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3142 del_timer(&conn->info_timer);
3144 if (type == L2CAP_IT_FEAT_MASK) {
3145 conn->feat_mask = get_unaligned_le32(rsp->data);
3147 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3148 struct l2cap_info_req req;
3149 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3151 conn->info_ident = l2cap_get_ident(conn);
3153 l2cap_send_cmd(conn, conn->info_ident,
3154 L2CAP_INFO_REQ, sizeof(req), &req);
3156 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3157 conn->info_ident = 0;
3159 l2cap_conn_start(conn);
3161 } else if (type == L2CAP_IT_FIXED_CHAN) {
3162 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3163 conn->info_ident = 0;
3165 l2cap_conn_start(conn);
3171 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3173 u8 *data = skb->data;
3175 struct l2cap_cmd_hdr cmd;
3178 l2cap_raw_recv(conn, skb);
3180 while (len >= L2CAP_CMD_HDR_SIZE) {
3182 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3183 data += L2CAP_CMD_HDR_SIZE;
3184 len -= L2CAP_CMD_HDR_SIZE;
3186 cmd_len = le16_to_cpu(cmd.len);
3188 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3190 if (cmd_len > len || !cmd.ident) {
3191 BT_DBG("corrupted command");
3196 case L2CAP_COMMAND_REJ:
3197 l2cap_command_rej(conn, &cmd, data);
3200 case L2CAP_CONN_REQ:
3201 err = l2cap_connect_req(conn, &cmd, data);
3204 case L2CAP_CONN_RSP:
3205 err = l2cap_connect_rsp(conn, &cmd, data);
3208 case L2CAP_CONF_REQ:
3209 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3212 case L2CAP_CONF_RSP:
3213 err = l2cap_config_rsp(conn, &cmd, data);
3216 case L2CAP_DISCONN_REQ:
3217 err = l2cap_disconnect_req(conn, &cmd, data);
3220 case L2CAP_DISCONN_RSP:
3221 err = l2cap_disconnect_rsp(conn, &cmd, data);
3224 case L2CAP_ECHO_REQ:
3225 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3228 case L2CAP_ECHO_RSP:
3231 case L2CAP_INFO_REQ:
3232 err = l2cap_information_req(conn, &cmd, data);
3235 case L2CAP_INFO_RSP:
3236 err = l2cap_information_rsp(conn, &cmd, data);
3240 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3246 struct l2cap_cmd_rej rej;
3247 BT_DBG("error %d", err);
3249 /* FIXME: Map err to a valid reason */
3250 rej.reason = cpu_to_le16(0);
3251 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3261 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3263 u16 our_fcs, rcv_fcs;
3264 int hdr_size = L2CAP_HDR_SIZE + 2;
3266 if (pi->fcs == L2CAP_FCS_CRC16) {
3267 skb_trim(skb, skb->len - 2);
3268 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3269 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3271 if (our_fcs != rcv_fcs)
3277 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3279 struct l2cap_pinfo *pi = l2cap_pi(sk);
3282 pi->frames_sent = 0;
3283 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3285 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3287 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3288 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3289 l2cap_send_sframe(pi, control);
3290 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3293 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3294 __mod_retrans_timer();
3296 l2cap_ertm_send(sk);
3298 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3299 pi->frames_sent == 0) {
3300 control |= L2CAP_SUPER_RCV_READY;
3301 l2cap_send_sframe(pi, control);
3305 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3307 struct sk_buff *next_skb;
3309 bt_cb(skb)->tx_seq = tx_seq;
3310 bt_cb(skb)->sar = sar;
3312 next_skb = skb_peek(SREJ_QUEUE(sk));
3314 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3319 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3320 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3324 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3327 } while((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3329 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3332 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3334 struct l2cap_pinfo *pi = l2cap_pi(sk);
3335 struct sk_buff *_skb;
3338 switch (control & L2CAP_CTRL_SAR) {
3339 case L2CAP_SDU_UNSEGMENTED:
3340 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3345 err = sock_queue_rcv_skb(sk, skb);
3351 case L2CAP_SDU_START:
3352 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3357 pi->sdu_len = get_unaligned_le16(skb->data);
3360 if (pi->sdu_len > pi->imtu) {
3365 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3371 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3373 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3374 pi->partial_sdu_len = skb->len;
3378 case L2CAP_SDU_CONTINUE:
3379 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3382 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3384 pi->partial_sdu_len += skb->len;
3385 if (pi->partial_sdu_len > pi->sdu_len)
3393 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3396 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3398 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3399 pi->partial_sdu_len += skb->len;
3401 if (pi->partial_sdu_len > pi->imtu)
3404 if (pi->partial_sdu_len == pi->sdu_len) {
3405 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3406 err = sock_queue_rcv_skb(sk, _skb);
3421 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3423 struct sk_buff *skb;
3426 while((skb = skb_peek(SREJ_QUEUE(sk)))) {
3427 if (bt_cb(skb)->tx_seq != tx_seq)
3430 skb = skb_dequeue(SREJ_QUEUE(sk));
3431 control |= bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3432 l2cap_sar_reassembly_sdu(sk, skb, control);
3433 l2cap_pi(sk)->buffer_seq_srej =
3434 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3439 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3441 struct l2cap_pinfo *pi = l2cap_pi(sk);
3442 struct srej_list *l, *tmp;
3445 list_for_each_entry_safe(l,tmp, SREJ_LIST(sk), list) {
3446 if (l->tx_seq == tx_seq) {
3451 control = L2CAP_SUPER_SELECT_REJECT;
3452 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3453 l2cap_send_sframe(pi, control);
3455 list_add_tail(&l->list, SREJ_LIST(sk));
3459 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3461 struct l2cap_pinfo *pi = l2cap_pi(sk);
3462 struct srej_list *new;
3465 while (tx_seq != pi->expected_tx_seq) {
3466 control = L2CAP_SUPER_SELECT_REJECT;
3467 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3468 l2cap_send_sframe(pi, control);
3470 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3471 new->tx_seq = pi->expected_tx_seq++;
3472 list_add_tail(&new->list, SREJ_LIST(sk));
3474 pi->expected_tx_seq++;
3477 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3479 struct l2cap_pinfo *pi = l2cap_pi(sk);
3480 u8 tx_seq = __get_txseq(rx_control);
3481 u8 req_seq = __get_reqseq(rx_control);
3482 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3483 int num_to_ack = (pi->tx_win/6) + 1;
3486 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3488 if (L2CAP_CTRL_FINAL & rx_control) {
3489 del_timer(&pi->monitor_timer);
3490 if (pi->unacked_frames > 0)
3491 __mod_retrans_timer();
3492 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3495 pi->expected_ack_seq = req_seq;
3496 l2cap_drop_acked_frames(sk);
3498 if (tx_seq == pi->expected_tx_seq)
3501 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3502 struct srej_list *first;
3504 first = list_first_entry(SREJ_LIST(sk),
3505 struct srej_list, list);
3506 if (tx_seq == first->tx_seq) {
3507 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3508 l2cap_check_srej_gap(sk, tx_seq);
3510 list_del(&first->list);
3513 if (list_empty(SREJ_LIST(sk))) {
3514 pi->buffer_seq = pi->buffer_seq_srej;
3515 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3519 struct srej_list *l;
3520 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3522 list_for_each_entry(l, SREJ_LIST(sk), list) {
3523 if (l->tx_seq == tx_seq) {
3524 l2cap_resend_srejframe(sk, tx_seq);
3528 l2cap_send_srejframe(sk, tx_seq);
3531 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3533 INIT_LIST_HEAD(SREJ_LIST(sk));
3534 pi->buffer_seq_srej = pi->buffer_seq;
3536 __skb_queue_head_init(SREJ_QUEUE(sk));
3537 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3539 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3541 l2cap_send_srejframe(sk, tx_seq);
3546 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3548 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3549 bt_cb(skb)->tx_seq = tx_seq;
3550 bt_cb(skb)->sar = sar;
3551 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3555 if (rx_control & L2CAP_CTRL_FINAL) {
3556 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3557 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3559 if (!skb_queue_empty(TX_QUEUE(sk)))
3560 sk->sk_send_head = TX_QUEUE(sk)->next;
3561 pi->next_tx_seq = pi->expected_ack_seq;
3562 l2cap_ertm_send(sk);
3566 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3568 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3574 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3575 if (pi->num_acked == num_to_ack - 1)
3581 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3583 struct l2cap_pinfo *pi = l2cap_pi(sk);
3585 pi->expected_ack_seq = __get_reqseq(rx_control);
3586 l2cap_drop_acked_frames(sk);
3588 if (rx_control & L2CAP_CTRL_POLL) {
3589 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3590 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3591 (pi->unacked_frames > 0))
3592 __mod_retrans_timer();
3594 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3595 l2cap_send_srejtail(sk);
3597 l2cap_send_i_or_rr_or_rnr(sk);
3598 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3601 } else if (rx_control & L2CAP_CTRL_FINAL) {
3602 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3604 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3605 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3607 if (!skb_queue_empty(TX_QUEUE(sk)))
3608 sk->sk_send_head = TX_QUEUE(sk)->next;
3609 pi->next_tx_seq = pi->expected_ack_seq;
3610 l2cap_ertm_send(sk);
3614 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3615 (pi->unacked_frames > 0))
3616 __mod_retrans_timer();
3618 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3619 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3622 l2cap_ertm_send(sk);
3626 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3628 struct l2cap_pinfo *pi = l2cap_pi(sk);
3629 u8 tx_seq = __get_reqseq(rx_control);
3631 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3633 pi->expected_ack_seq = tx_seq;
3634 l2cap_drop_acked_frames(sk);
3636 if (rx_control & L2CAP_CTRL_FINAL) {
3637 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3638 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3640 if (!skb_queue_empty(TX_QUEUE(sk)))
3641 sk->sk_send_head = TX_QUEUE(sk)->next;
3642 pi->next_tx_seq = pi->expected_ack_seq;
3643 l2cap_ertm_send(sk);
3646 if (!skb_queue_empty(TX_QUEUE(sk)))
3647 sk->sk_send_head = TX_QUEUE(sk)->next;
3648 pi->next_tx_seq = pi->expected_ack_seq;
3649 l2cap_ertm_send(sk);
3651 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3652 pi->srej_save_reqseq = tx_seq;
3653 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3657 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3659 struct l2cap_pinfo *pi = l2cap_pi(sk);
3660 u8 tx_seq = __get_reqseq(rx_control);
3662 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3664 if (rx_control & L2CAP_CTRL_POLL) {
3665 pi->expected_ack_seq = tx_seq;
3666 l2cap_drop_acked_frames(sk);
3667 l2cap_retransmit_frame(sk, tx_seq);
3668 l2cap_ertm_send(sk);
3669 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3670 pi->srej_save_reqseq = tx_seq;
3671 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3673 } else if (rx_control & L2CAP_CTRL_FINAL) {
3674 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3675 pi->srej_save_reqseq == tx_seq)
3676 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3678 l2cap_retransmit_frame(sk, tx_seq);
3680 l2cap_retransmit_frame(sk, tx_seq);
3681 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3682 pi->srej_save_reqseq = tx_seq;
3683 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3688 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3690 struct l2cap_pinfo *pi = l2cap_pi(sk);
3691 u8 tx_seq = __get_reqseq(rx_control);
3693 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3694 pi->expected_ack_seq = tx_seq;
3695 l2cap_drop_acked_frames(sk);
3697 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3698 del_timer(&pi->retrans_timer);
3699 if (rx_control & L2CAP_CTRL_POLL) {
3700 u16 control = L2CAP_CTRL_FINAL;
3701 l2cap_send_rr_or_rnr(pi, control);
3706 if (rx_control & L2CAP_CTRL_POLL)
3707 l2cap_send_srejtail(sk);
3709 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3712 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3714 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3716 if (L2CAP_CTRL_FINAL & rx_control) {
3717 del_timer(&l2cap_pi(sk)->monitor_timer);
3718 if (l2cap_pi(sk)->unacked_frames > 0)
3719 __mod_retrans_timer();
3720 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3723 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3724 case L2CAP_SUPER_RCV_READY:
3725 l2cap_data_channel_rrframe(sk, rx_control);
3728 case L2CAP_SUPER_REJECT:
3729 l2cap_data_channel_rejframe(sk, rx_control);
3732 case L2CAP_SUPER_SELECT_REJECT:
3733 l2cap_data_channel_srejframe(sk, rx_control);
3736 case L2CAP_SUPER_RCV_NOT_READY:
3737 l2cap_data_channel_rnrframe(sk, rx_control);
3745 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3748 struct l2cap_pinfo *pi;
3752 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3754 BT_DBG("unknown cid 0x%4.4x", cid);
3760 BT_DBG("sk %p, len %d", sk, skb->len);
3762 if (sk->sk_state != BT_CONNECTED)
3766 case L2CAP_MODE_BASIC:
3767 /* If socket recv buffers overflows we drop data here
3768 * which is *bad* because L2CAP has to be reliable.
3769 * But we don't have any other choice. L2CAP doesn't
3770 * provide flow control mechanism. */
3772 if (pi->imtu < skb->len)
3775 if (!sock_queue_rcv_skb(sk, skb))
3779 case L2CAP_MODE_ERTM:
3780 control = get_unaligned_le16(skb->data);
3784 if (__is_sar_start(control))
3787 if (pi->fcs == L2CAP_FCS_CRC16)
3791 * We can just drop the corrupted I-frame here.
3792 * Receiver will miss it and start proper recovery
3793 * procedures and ask retransmission.
3798 if (l2cap_check_fcs(pi, skb))
3801 if (__is_iframe(control)) {
3805 l2cap_data_channel_iframe(sk, control, skb);
3810 l2cap_data_channel_sframe(sk, control, skb);
3815 case L2CAP_MODE_STREAMING:
3816 control = get_unaligned_le16(skb->data);
3820 if (__is_sar_start(control))
3823 if (pi->fcs == L2CAP_FCS_CRC16)
3826 if (len > pi->mps || len < 4 || __is_sframe(control))
3829 if (l2cap_check_fcs(pi, skb))
3832 tx_seq = __get_txseq(control);
3834 if (pi->expected_tx_seq == tx_seq)
3835 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3837 pi->expected_tx_seq = (tx_seq + 1) % 64;
3839 l2cap_sar_reassembly_sdu(sk, skb, control);
3844 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3858 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3862 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3866 BT_DBG("sk %p, len %d", sk, skb->len);
3868 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3871 if (l2cap_pi(sk)->imtu < skb->len)
3874 if (!sock_queue_rcv_skb(sk, skb))
3886 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3888 struct l2cap_hdr *lh = (void *) skb->data;
3892 skb_pull(skb, L2CAP_HDR_SIZE);
3893 cid = __le16_to_cpu(lh->cid);
3894 len = __le16_to_cpu(lh->len);
3896 if (len != skb->len) {
3901 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3904 case L2CAP_CID_SIGNALING:
3905 l2cap_sig_channel(conn, skb);
3908 case L2CAP_CID_CONN_LESS:
3909 psm = get_unaligned_le16(skb->data);
3911 l2cap_conless_channel(conn, psm, skb);
3915 l2cap_data_channel(conn, cid, skb);
3920 /* ---- L2CAP interface with lower layer (HCI) ---- */
3922 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3924 int exact = 0, lm1 = 0, lm2 = 0;
3925 register struct sock *sk;
3926 struct hlist_node *node;
3928 if (type != ACL_LINK)
3931 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3933 /* Find listening sockets and check their link_mode */
3934 read_lock(&l2cap_sk_list.lock);
3935 sk_for_each(sk, node, &l2cap_sk_list.head) {
3936 if (sk->sk_state != BT_LISTEN)
3939 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3940 lm1 |= HCI_LM_ACCEPT;
3941 if (l2cap_pi(sk)->role_switch)
3942 lm1 |= HCI_LM_MASTER;
3944 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3945 lm2 |= HCI_LM_ACCEPT;
3946 if (l2cap_pi(sk)->role_switch)
3947 lm2 |= HCI_LM_MASTER;
3950 read_unlock(&l2cap_sk_list.lock);
3952 return exact ? lm1 : lm2;
3955 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3957 struct l2cap_conn *conn;
3959 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3961 if (hcon->type != ACL_LINK)
3965 conn = l2cap_conn_add(hcon, status);
3967 l2cap_conn_ready(conn);
3969 l2cap_conn_del(hcon, bt_err(status));
3974 static int l2cap_disconn_ind(struct hci_conn *hcon)
3976 struct l2cap_conn *conn = hcon->l2cap_data;
3978 BT_DBG("hcon %p", hcon);
3980 if (hcon->type != ACL_LINK || !conn)
3983 return conn->disc_reason;
3986 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
3988 BT_DBG("hcon %p reason %d", hcon, reason);
3990 if (hcon->type != ACL_LINK)
3993 l2cap_conn_del(hcon, bt_err(reason));
3998 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4000 if (sk->sk_type != SOCK_SEQPACKET)
4003 if (encrypt == 0x00) {
4004 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4005 l2cap_sock_clear_timer(sk);
4006 l2cap_sock_set_timer(sk, HZ * 5);
4007 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4008 __l2cap_sock_close(sk, ECONNREFUSED);
4010 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4011 l2cap_sock_clear_timer(sk);
4015 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4017 struct l2cap_chan_list *l;
4018 struct l2cap_conn *conn = hcon->l2cap_data;
4024 l = &conn->chan_list;
4026 BT_DBG("conn %p", conn);
4028 read_lock(&l->lock);
4030 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4033 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4038 if (!status && (sk->sk_state == BT_CONNECTED ||
4039 sk->sk_state == BT_CONFIG)) {
4040 l2cap_check_encryption(sk, encrypt);
4045 if (sk->sk_state == BT_CONNECT) {
4047 struct l2cap_conn_req req;
4048 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4049 req.psm = l2cap_pi(sk)->psm;
4051 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4053 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4054 L2CAP_CONN_REQ, sizeof(req), &req);
4056 l2cap_sock_clear_timer(sk);
4057 l2cap_sock_set_timer(sk, HZ / 10);
4059 } else if (sk->sk_state == BT_CONNECT2) {
4060 struct l2cap_conn_rsp rsp;
4064 sk->sk_state = BT_CONFIG;
4065 result = L2CAP_CR_SUCCESS;
4067 sk->sk_state = BT_DISCONN;
4068 l2cap_sock_set_timer(sk, HZ / 10);
4069 result = L2CAP_CR_SEC_BLOCK;
4072 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4073 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4074 rsp.result = cpu_to_le16(result);
4075 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4076 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4077 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4083 read_unlock(&l->lock);
4088 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4090 struct l2cap_conn *conn = hcon->l2cap_data;
4092 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4095 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4097 if (flags & ACL_START) {
4098 struct l2cap_hdr *hdr;
4102 BT_ERR("Unexpected start frame (len %d)", skb->len);
4103 kfree_skb(conn->rx_skb);
4104 conn->rx_skb = NULL;
4106 l2cap_conn_unreliable(conn, ECOMM);
4110 BT_ERR("Frame is too short (len %d)", skb->len);
4111 l2cap_conn_unreliable(conn, ECOMM);
4115 hdr = (struct l2cap_hdr *) skb->data;
4116 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4118 if (len == skb->len) {
4119 /* Complete frame received */
4120 l2cap_recv_frame(conn, skb);
4124 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4126 if (skb->len > len) {
4127 BT_ERR("Frame is too long (len %d, expected len %d)",
4129 l2cap_conn_unreliable(conn, ECOMM);
4133 /* Allocate skb for the complete frame (with header) */
4134 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4138 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4140 conn->rx_len = len - skb->len;
4142 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4144 if (!conn->rx_len) {
4145 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4146 l2cap_conn_unreliable(conn, ECOMM);
4150 if (skb->len > conn->rx_len) {
4151 BT_ERR("Fragment is too long (len %d, expected %d)",
4152 skb->len, conn->rx_len);
4153 kfree_skb(conn->rx_skb);
4154 conn->rx_skb = NULL;
4156 l2cap_conn_unreliable(conn, ECOMM);
4160 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4162 conn->rx_len -= skb->len;
4164 if (!conn->rx_len) {
4165 /* Complete frame received */
4166 l2cap_recv_frame(conn, conn->rx_skb);
4167 conn->rx_skb = NULL;
4176 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4179 struct hlist_node *node;
4181 read_lock_bh(&l2cap_sk_list.lock);
4183 sk_for_each(sk, node, &l2cap_sk_list.head) {
4184 struct l2cap_pinfo *pi = l2cap_pi(sk);
4186 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4187 batostr(&bt_sk(sk)->src),
4188 batostr(&bt_sk(sk)->dst),
4189 sk->sk_state, __le16_to_cpu(pi->psm),
4191 pi->imtu, pi->omtu, pi->sec_level);
4194 read_unlock_bh(&l2cap_sk_list.lock);
4199 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4201 return single_open(file, l2cap_debugfs_show, inode->i_private);
4204 static const struct file_operations l2cap_debugfs_fops = {
4205 .open = l2cap_debugfs_open,
4207 .llseek = seq_lseek,
4208 .release = single_release,
4211 static struct dentry *l2cap_debugfs;
4213 static const struct proto_ops l2cap_sock_ops = {
4214 .family = PF_BLUETOOTH,
4215 .owner = THIS_MODULE,
4216 .release = l2cap_sock_release,
4217 .bind = l2cap_sock_bind,
4218 .connect = l2cap_sock_connect,
4219 .listen = l2cap_sock_listen,
4220 .accept = l2cap_sock_accept,
4221 .getname = l2cap_sock_getname,
4222 .sendmsg = l2cap_sock_sendmsg,
4223 .recvmsg = l2cap_sock_recvmsg,
4224 .poll = bt_sock_poll,
4225 .ioctl = bt_sock_ioctl,
4226 .mmap = sock_no_mmap,
4227 .socketpair = sock_no_socketpair,
4228 .shutdown = l2cap_sock_shutdown,
4229 .setsockopt = l2cap_sock_setsockopt,
4230 .getsockopt = l2cap_sock_getsockopt
4233 static const struct net_proto_family l2cap_sock_family_ops = {
4234 .family = PF_BLUETOOTH,
4235 .owner = THIS_MODULE,
4236 .create = l2cap_sock_create,
4239 static struct hci_proto l2cap_hci_proto = {
4241 .id = HCI_PROTO_L2CAP,
4242 .connect_ind = l2cap_connect_ind,
4243 .connect_cfm = l2cap_connect_cfm,
4244 .disconn_ind = l2cap_disconn_ind,
4245 .disconn_cfm = l2cap_disconn_cfm,
4246 .security_cfm = l2cap_security_cfm,
4247 .recv_acldata = l2cap_recv_acldata
4250 static int __init l2cap_init(void)
4254 err = proto_register(&l2cap_proto, 0);
4258 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4260 BT_ERR("L2CAP socket registration failed");
4264 err = hci_register_proto(&l2cap_hci_proto);
4266 BT_ERR("L2CAP protocol registration failed");
4267 bt_sock_unregister(BTPROTO_L2CAP);
4272 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4273 bt_debugfs, NULL, &l2cap_debugfs_fops);
4275 BT_ERR("Failed to create L2CAP debug file");
4278 BT_INFO("L2CAP ver %s", VERSION);
4279 BT_INFO("L2CAP socket layer initialized");
4284 proto_unregister(&l2cap_proto);
4288 static void __exit l2cap_exit(void)
4290 debugfs_remove(l2cap_debugfs);
4292 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4293 BT_ERR("L2CAP socket unregistration failed");
4295 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4296 BT_ERR("L2CAP protocol unregistration failed");
4298 proto_unregister(&l2cap_proto);
4301 void l2cap_load(void)
4303 /* Dummy function to trigger automatic L2CAP module loading by
4304 * other modules that use L2CAP sockets but don't use any other
4305 * symbols from it. */
4308 EXPORT_SYMBOL(l2cap_load);
4310 module_init(l2cap_init);
4311 module_exit(l2cap_exit);
4313 module_param(enable_ertm, bool, 0644);
4314 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4316 module_param(max_transmit, uint, 0644);
4317 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4319 module_param(tx_window, uint, 0644);
4320 MODULE_PARM_DESC(tx_window, "Transmission window size value (default = 63)");
4322 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4323 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4324 MODULE_VERSION(VERSION);
4325 MODULE_LICENSE("GPL");
4326 MODULE_ALIAS("bt-proto-0");