2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 #ifdef CONFIG_BT_L2CAP_EXT_FEATURES
59 static int enable_ertm = 1;
61 static int enable_ertm = 0;
63 static int max_transmit = L2CAP_DEFAULT_MAX_TX;
64 static int tx_window = L2CAP_DEFAULT_TX_WINDOW;
66 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
67 static u8 l2cap_fixed_chan[8] = { 0x02, };
69 static const struct proto_ops l2cap_sock_ops;
71 static struct bt_sock_list l2cap_sk_list = {
72 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
75 static void __l2cap_sock_close(struct sock *sk, int reason);
76 static void l2cap_sock_close(struct sock *sk);
77 static void l2cap_sock_kill(struct sock *sk);
79 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
80 u8 code, u8 ident, u16 dlen, void *data);
82 /* ---- L2CAP timers ---- */
83 static void l2cap_sock_timeout(unsigned long arg)
85 struct sock *sk = (struct sock *) arg;
88 BT_DBG("sock %p state %d", sk, sk->sk_state);
92 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
93 reason = ECONNREFUSED;
94 else if (sk->sk_state == BT_CONNECT &&
95 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
96 reason = ECONNREFUSED;
100 __l2cap_sock_close(sk, reason);
108 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
110 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
111 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
114 static void l2cap_sock_clear_timer(struct sock *sk)
116 BT_DBG("sock %p state %d", sk, sk->sk_state);
117 sk_stop_timer(sk, &sk->sk_timer);
120 /* ---- L2CAP channels ---- */
121 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
124 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
125 if (l2cap_pi(s)->dcid == cid)
131 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
134 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
135 if (l2cap_pi(s)->scid == cid)
141 /* Find channel with given SCID.
142 * Returns locked socket */
143 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
147 s = __l2cap_get_chan_by_scid(l, cid);
150 read_unlock(&l->lock);
154 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
157 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
158 if (l2cap_pi(s)->ident == ident)
164 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
168 s = __l2cap_get_chan_by_ident(l, ident);
171 read_unlock(&l->lock);
175 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
177 u16 cid = L2CAP_CID_DYN_START;
179 for (; cid < L2CAP_CID_DYN_END; cid++) {
180 if (!__l2cap_get_chan_by_scid(l, cid))
187 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
192 l2cap_pi(l->head)->prev_c = sk;
194 l2cap_pi(sk)->next_c = l->head;
195 l2cap_pi(sk)->prev_c = NULL;
199 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
201 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
203 write_lock_bh(&l->lock);
208 l2cap_pi(next)->prev_c = prev;
210 l2cap_pi(prev)->next_c = next;
211 write_unlock_bh(&l->lock);
216 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
218 struct l2cap_chan_list *l = &conn->chan_list;
220 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
221 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
223 conn->disc_reason = 0x13;
225 l2cap_pi(sk)->conn = conn;
227 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
228 /* Alloc CID for connection-oriented socket */
229 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
230 } else if (sk->sk_type == SOCK_DGRAM) {
231 /* Connectionless socket */
232 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
233 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
236 /* Raw socket can send/recv signalling messages only */
237 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
238 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
239 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
242 __l2cap_chan_link(l, sk);
245 bt_accept_enqueue(parent, sk);
249 * Must be called on the locked socket. */
250 static void l2cap_chan_del(struct sock *sk, int err)
252 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
253 struct sock *parent = bt_sk(sk)->parent;
255 l2cap_sock_clear_timer(sk);
257 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
260 /* Unlink from channel list */
261 l2cap_chan_unlink(&conn->chan_list, sk);
262 l2cap_pi(sk)->conn = NULL;
263 hci_conn_put(conn->hcon);
266 sk->sk_state = BT_CLOSED;
267 sock_set_flag(sk, SOCK_ZAPPED);
273 bt_accept_unlink(sk);
274 parent->sk_data_ready(parent, 0);
276 sk->sk_state_change(sk);
279 /* Service level security */
280 static inline int l2cap_check_security(struct sock *sk)
282 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
285 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
286 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
287 auth_type = HCI_AT_NO_BONDING_MITM;
289 auth_type = HCI_AT_NO_BONDING;
291 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
292 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
294 switch (l2cap_pi(sk)->sec_level) {
295 case BT_SECURITY_HIGH:
296 auth_type = HCI_AT_GENERAL_BONDING_MITM;
298 case BT_SECURITY_MEDIUM:
299 auth_type = HCI_AT_GENERAL_BONDING;
302 auth_type = HCI_AT_NO_BONDING;
307 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
311 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
315 /* Get next available identificator.
316 * 1 - 128 are used by kernel.
317 * 129 - 199 are reserved.
318 * 200 - 254 are used by utilities like l2ping, etc.
321 spin_lock_bh(&conn->lock);
323 if (++conn->tx_ident > 128)
328 spin_unlock_bh(&conn->lock);
333 static inline int l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
335 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
337 BT_DBG("code 0x%2.2x", code);
342 return hci_send_acl(conn->hcon, skb, 0);
345 static inline int l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
348 struct l2cap_hdr *lh;
349 struct l2cap_conn *conn = pi->conn;
350 int count, hlen = L2CAP_HDR_SIZE + 2;
352 if (pi->fcs == L2CAP_FCS_CRC16)
355 BT_DBG("pi %p, control 0x%2.2x", pi, control);
357 count = min_t(unsigned int, conn->mtu, hlen);
358 control |= L2CAP_CTRL_FRAME_TYPE;
360 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
361 control |= L2CAP_CTRL_FINAL;
362 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
365 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
366 control |= L2CAP_CTRL_POLL;
367 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
370 skb = bt_skb_alloc(count, GFP_ATOMIC);
374 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
375 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
376 lh->cid = cpu_to_le16(pi->dcid);
377 put_unaligned_le16(control, skb_put(skb, 2));
379 if (pi->fcs == L2CAP_FCS_CRC16) {
380 u16 fcs = crc16(0, (u8 *)lh, count - 2);
381 put_unaligned_le16(fcs, skb_put(skb, 2));
384 return hci_send_acl(pi->conn->hcon, skb, 0);
387 static inline int l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
389 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY)
390 control |= L2CAP_SUPER_RCV_NOT_READY;
392 control |= L2CAP_SUPER_RCV_READY;
394 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
396 return l2cap_send_sframe(pi, control);
399 static void l2cap_do_start(struct sock *sk)
401 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
403 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
404 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
407 if (l2cap_check_security(sk)) {
408 struct l2cap_conn_req req;
409 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
410 req.psm = l2cap_pi(sk)->psm;
412 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
414 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
415 L2CAP_CONN_REQ, sizeof(req), &req);
418 struct l2cap_info_req req;
419 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
421 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
422 conn->info_ident = l2cap_get_ident(conn);
424 mod_timer(&conn->info_timer, jiffies +
425 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
427 l2cap_send_cmd(conn, conn->info_ident,
428 L2CAP_INFO_REQ, sizeof(req), &req);
432 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk)
434 struct l2cap_disconn_req req;
436 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
437 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
438 l2cap_send_cmd(conn, l2cap_get_ident(conn),
439 L2CAP_DISCONN_REQ, sizeof(req), &req);
442 /* ---- L2CAP connections ---- */
443 static void l2cap_conn_start(struct l2cap_conn *conn)
445 struct l2cap_chan_list *l = &conn->chan_list;
448 BT_DBG("conn %p", conn);
452 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
455 if (sk->sk_type != SOCK_SEQPACKET &&
456 sk->sk_type != SOCK_STREAM) {
461 if (sk->sk_state == BT_CONNECT) {
462 if (l2cap_check_security(sk)) {
463 struct l2cap_conn_req req;
464 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
465 req.psm = l2cap_pi(sk)->psm;
467 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
469 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
470 L2CAP_CONN_REQ, sizeof(req), &req);
472 } else if (sk->sk_state == BT_CONNECT2) {
473 struct l2cap_conn_rsp rsp;
474 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
475 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
477 if (l2cap_check_security(sk)) {
478 if (bt_sk(sk)->defer_setup) {
479 struct sock *parent = bt_sk(sk)->parent;
480 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
481 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
482 parent->sk_data_ready(parent, 0);
485 sk->sk_state = BT_CONFIG;
486 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
487 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
490 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
491 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
494 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
495 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
501 read_unlock(&l->lock);
504 static void l2cap_conn_ready(struct l2cap_conn *conn)
506 struct l2cap_chan_list *l = &conn->chan_list;
509 BT_DBG("conn %p", conn);
513 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
516 if (sk->sk_type != SOCK_SEQPACKET &&
517 sk->sk_type != SOCK_STREAM) {
518 l2cap_sock_clear_timer(sk);
519 sk->sk_state = BT_CONNECTED;
520 sk->sk_state_change(sk);
521 } else if (sk->sk_state == BT_CONNECT)
527 read_unlock(&l->lock);
530 /* Notify sockets that we cannot guaranty reliability anymore */
531 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
533 struct l2cap_chan_list *l = &conn->chan_list;
536 BT_DBG("conn %p", conn);
540 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
541 if (l2cap_pi(sk)->force_reliable)
545 read_unlock(&l->lock);
548 static void l2cap_info_timeout(unsigned long arg)
550 struct l2cap_conn *conn = (void *) arg;
552 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
553 conn->info_ident = 0;
555 l2cap_conn_start(conn);
558 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
560 struct l2cap_conn *conn = hcon->l2cap_data;
565 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
569 hcon->l2cap_data = conn;
572 BT_DBG("hcon %p conn %p", hcon, conn);
574 conn->mtu = hcon->hdev->acl_mtu;
575 conn->src = &hcon->hdev->bdaddr;
576 conn->dst = &hcon->dst;
580 spin_lock_init(&conn->lock);
581 rwlock_init(&conn->chan_list.lock);
583 setup_timer(&conn->info_timer, l2cap_info_timeout,
584 (unsigned long) conn);
586 conn->disc_reason = 0x13;
591 static void l2cap_conn_del(struct hci_conn *hcon, int err)
593 struct l2cap_conn *conn = hcon->l2cap_data;
599 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
601 kfree_skb(conn->rx_skb);
604 while ((sk = conn->chan_list.head)) {
606 l2cap_chan_del(sk, err);
611 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
612 del_timer_sync(&conn->info_timer);
614 hcon->l2cap_data = NULL;
618 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
620 struct l2cap_chan_list *l = &conn->chan_list;
621 write_lock_bh(&l->lock);
622 __l2cap_chan_add(conn, sk, parent);
623 write_unlock_bh(&l->lock);
626 /* ---- Socket interface ---- */
627 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
630 struct hlist_node *node;
631 sk_for_each(sk, node, &l2cap_sk_list.head)
632 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
639 /* Find socket with psm and source bdaddr.
640 * Returns closest match.
642 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
644 struct sock *sk = NULL, *sk1 = NULL;
645 struct hlist_node *node;
647 sk_for_each(sk, node, &l2cap_sk_list.head) {
648 if (state && sk->sk_state != state)
651 if (l2cap_pi(sk)->psm == psm) {
653 if (!bacmp(&bt_sk(sk)->src, src))
657 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
661 return node ? sk : sk1;
664 /* Find socket with given address (psm, src).
665 * Returns locked socket */
666 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
669 read_lock(&l2cap_sk_list.lock);
670 s = __l2cap_get_sock_by_psm(state, psm, src);
673 read_unlock(&l2cap_sk_list.lock);
677 static void l2cap_sock_destruct(struct sock *sk)
681 skb_queue_purge(&sk->sk_receive_queue);
682 skb_queue_purge(&sk->sk_write_queue);
685 static void l2cap_sock_cleanup_listen(struct sock *parent)
689 BT_DBG("parent %p", parent);
691 /* Close not yet accepted channels */
692 while ((sk = bt_accept_dequeue(parent, NULL)))
693 l2cap_sock_close(sk);
695 parent->sk_state = BT_CLOSED;
696 sock_set_flag(parent, SOCK_ZAPPED);
699 /* Kill socket (only if zapped and orphan)
700 * Must be called on unlocked socket.
702 static void l2cap_sock_kill(struct sock *sk)
704 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
707 BT_DBG("sk %p state %d", sk, sk->sk_state);
709 /* Kill poor orphan */
710 bt_sock_unlink(&l2cap_sk_list, sk);
711 sock_set_flag(sk, SOCK_DEAD);
715 static void __l2cap_sock_close(struct sock *sk, int reason)
717 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
719 switch (sk->sk_state) {
721 l2cap_sock_cleanup_listen(sk);
726 if (sk->sk_type == SOCK_SEQPACKET ||
727 sk->sk_type == SOCK_STREAM) {
728 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
730 sk->sk_state = BT_DISCONN;
731 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
732 l2cap_send_disconn_req(conn, sk);
734 l2cap_chan_del(sk, reason);
738 if (sk->sk_type == SOCK_SEQPACKET ||
739 sk->sk_type == SOCK_STREAM) {
740 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
741 struct l2cap_conn_rsp rsp;
744 if (bt_sk(sk)->defer_setup)
745 result = L2CAP_CR_SEC_BLOCK;
747 result = L2CAP_CR_BAD_PSM;
749 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
750 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
751 rsp.result = cpu_to_le16(result);
752 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
753 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
754 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
756 l2cap_chan_del(sk, reason);
761 l2cap_chan_del(sk, reason);
765 sock_set_flag(sk, SOCK_ZAPPED);
770 /* Must be called on unlocked socket. */
771 static void l2cap_sock_close(struct sock *sk)
773 l2cap_sock_clear_timer(sk);
775 __l2cap_sock_close(sk, ECONNRESET);
780 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
782 struct l2cap_pinfo *pi = l2cap_pi(sk);
787 sk->sk_type = parent->sk_type;
788 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
790 pi->imtu = l2cap_pi(parent)->imtu;
791 pi->omtu = l2cap_pi(parent)->omtu;
792 pi->mode = l2cap_pi(parent)->mode;
793 pi->fcs = l2cap_pi(parent)->fcs;
794 pi->max_tx = l2cap_pi(parent)->max_tx;
795 pi->tx_win = l2cap_pi(parent)->tx_win;
796 pi->sec_level = l2cap_pi(parent)->sec_level;
797 pi->role_switch = l2cap_pi(parent)->role_switch;
798 pi->force_reliable = l2cap_pi(parent)->force_reliable;
800 pi->imtu = L2CAP_DEFAULT_MTU;
802 if (enable_ertm && sk->sk_type == SOCK_STREAM)
803 pi->mode = L2CAP_MODE_ERTM;
805 pi->mode = L2CAP_MODE_BASIC;
806 pi->max_tx = max_transmit;
807 pi->fcs = L2CAP_FCS_CRC16;
808 pi->tx_win = tx_window;
809 pi->sec_level = BT_SECURITY_LOW;
811 pi->force_reliable = 0;
814 /* Default config options */
816 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
817 skb_queue_head_init(TX_QUEUE(sk));
818 skb_queue_head_init(SREJ_QUEUE(sk));
819 INIT_LIST_HEAD(SREJ_LIST(sk));
822 static struct proto l2cap_proto = {
824 .owner = THIS_MODULE,
825 .obj_size = sizeof(struct l2cap_pinfo)
828 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
832 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
836 sock_init_data(sock, sk);
837 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
839 sk->sk_destruct = l2cap_sock_destruct;
840 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
842 sock_reset_flag(sk, SOCK_ZAPPED);
844 sk->sk_protocol = proto;
845 sk->sk_state = BT_OPEN;
847 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
849 bt_sock_link(&l2cap_sk_list, sk);
853 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
858 BT_DBG("sock %p", sock);
860 sock->state = SS_UNCONNECTED;
862 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
863 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
864 return -ESOCKTNOSUPPORT;
866 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
869 sock->ops = &l2cap_sock_ops;
871 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
875 l2cap_sock_init(sk, NULL);
879 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
881 struct sock *sk = sock->sk;
882 struct sockaddr_l2 la;
887 if (!addr || addr->sa_family != AF_BLUETOOTH)
890 memset(&la, 0, sizeof(la));
891 len = min_t(unsigned int, sizeof(la), alen);
892 memcpy(&la, addr, len);
899 if (sk->sk_state != BT_OPEN) {
904 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
905 !capable(CAP_NET_BIND_SERVICE)) {
910 write_lock_bh(&l2cap_sk_list.lock);
912 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
915 /* Save source address */
916 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
917 l2cap_pi(sk)->psm = la.l2_psm;
918 l2cap_pi(sk)->sport = la.l2_psm;
919 sk->sk_state = BT_BOUND;
921 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
922 __le16_to_cpu(la.l2_psm) == 0x0003)
923 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
926 write_unlock_bh(&l2cap_sk_list.lock);
933 static int l2cap_do_connect(struct sock *sk)
935 bdaddr_t *src = &bt_sk(sk)->src;
936 bdaddr_t *dst = &bt_sk(sk)->dst;
937 struct l2cap_conn *conn;
938 struct hci_conn *hcon;
939 struct hci_dev *hdev;
943 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
946 hdev = hci_get_route(dst, src);
948 return -EHOSTUNREACH;
950 hci_dev_lock_bh(hdev);
954 if (sk->sk_type == SOCK_RAW) {
955 switch (l2cap_pi(sk)->sec_level) {
956 case BT_SECURITY_HIGH:
957 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
959 case BT_SECURITY_MEDIUM:
960 auth_type = HCI_AT_DEDICATED_BONDING;
963 auth_type = HCI_AT_NO_BONDING;
966 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
967 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
968 auth_type = HCI_AT_NO_BONDING_MITM;
970 auth_type = HCI_AT_NO_BONDING;
972 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
973 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
975 switch (l2cap_pi(sk)->sec_level) {
976 case BT_SECURITY_HIGH:
977 auth_type = HCI_AT_GENERAL_BONDING_MITM;
979 case BT_SECURITY_MEDIUM:
980 auth_type = HCI_AT_GENERAL_BONDING;
983 auth_type = HCI_AT_NO_BONDING;
988 hcon = hci_connect(hdev, ACL_LINK, dst,
989 l2cap_pi(sk)->sec_level, auth_type);
993 conn = l2cap_conn_add(hcon, 0);
1001 /* Update source addr of the socket */
1002 bacpy(src, conn->src);
1004 l2cap_chan_add(conn, sk, NULL);
1006 sk->sk_state = BT_CONNECT;
1007 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1009 if (hcon->state == BT_CONNECTED) {
1010 if (sk->sk_type != SOCK_SEQPACKET &&
1011 sk->sk_type != SOCK_STREAM) {
1012 l2cap_sock_clear_timer(sk);
1013 sk->sk_state = BT_CONNECTED;
1019 hci_dev_unlock_bh(hdev);
1024 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1026 struct sock *sk = sock->sk;
1027 struct sockaddr_l2 la;
1030 BT_DBG("sk %p", sk);
1032 if (!addr || alen < sizeof(addr->sa_family) ||
1033 addr->sa_family != AF_BLUETOOTH)
1036 memset(&la, 0, sizeof(la));
1037 len = min_t(unsigned int, sizeof(la), alen);
1038 memcpy(&la, addr, len);
1045 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1051 switch (l2cap_pi(sk)->mode) {
1052 case L2CAP_MODE_BASIC:
1054 case L2CAP_MODE_ERTM:
1055 case L2CAP_MODE_STREAMING:
1064 switch (sk->sk_state) {
1068 /* Already connecting */
1072 /* Already connected */
1085 /* Set destination address and psm */
1086 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1087 l2cap_pi(sk)->psm = la.l2_psm;
1089 err = l2cap_do_connect(sk);
1094 err = bt_sock_wait_state(sk, BT_CONNECTED,
1095 sock_sndtimeo(sk, flags & O_NONBLOCK));
1101 static int l2cap_sock_listen(struct socket *sock, int backlog)
1103 struct sock *sk = sock->sk;
1106 BT_DBG("sk %p backlog %d", sk, backlog);
1110 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1111 || sk->sk_state != BT_BOUND) {
1116 switch (l2cap_pi(sk)->mode) {
1117 case L2CAP_MODE_BASIC:
1119 case L2CAP_MODE_ERTM:
1120 case L2CAP_MODE_STREAMING:
1129 if (!l2cap_pi(sk)->psm) {
1130 bdaddr_t *src = &bt_sk(sk)->src;
1135 write_lock_bh(&l2cap_sk_list.lock);
1137 for (psm = 0x1001; psm < 0x1100; psm += 2)
1138 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1139 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1140 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1145 write_unlock_bh(&l2cap_sk_list.lock);
1151 sk->sk_max_ack_backlog = backlog;
1152 sk->sk_ack_backlog = 0;
1153 sk->sk_state = BT_LISTEN;
1160 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1162 DECLARE_WAITQUEUE(wait, current);
1163 struct sock *sk = sock->sk, *nsk;
1167 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1169 if (sk->sk_state != BT_LISTEN) {
1174 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1176 BT_DBG("sk %p timeo %ld", sk, timeo);
1178 /* Wait for an incoming connection. (wake-one). */
1179 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1180 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1181 set_current_state(TASK_INTERRUPTIBLE);
1188 timeo = schedule_timeout(timeo);
1189 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1191 if (sk->sk_state != BT_LISTEN) {
1196 if (signal_pending(current)) {
1197 err = sock_intr_errno(timeo);
1201 set_current_state(TASK_RUNNING);
1202 remove_wait_queue(sk_sleep(sk), &wait);
1207 newsock->state = SS_CONNECTED;
1209 BT_DBG("new socket %p", nsk);
1216 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1218 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1219 struct sock *sk = sock->sk;
1221 BT_DBG("sock %p, sk %p", sock, sk);
1223 addr->sa_family = AF_BLUETOOTH;
1224 *len = sizeof(struct sockaddr_l2);
1227 la->l2_psm = l2cap_pi(sk)->psm;
1228 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1229 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1231 la->l2_psm = l2cap_pi(sk)->sport;
1232 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1233 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1239 static void l2cap_monitor_timeout(unsigned long arg)
1241 struct sock *sk = (void *) arg;
1244 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1245 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk);
1250 l2cap_pi(sk)->retry_count++;
1251 __mod_monitor_timer();
1253 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1257 static void l2cap_retrans_timeout(unsigned long arg)
1259 struct sock *sk = (void *) arg;
1262 l2cap_pi(sk)->retry_count = 1;
1263 __mod_monitor_timer();
1265 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1267 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1271 static void l2cap_drop_acked_frames(struct sock *sk)
1273 struct sk_buff *skb;
1275 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1276 l2cap_pi(sk)->unacked_frames) {
1277 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1280 skb = skb_dequeue(TX_QUEUE(sk));
1283 l2cap_pi(sk)->unacked_frames--;
1286 if (!l2cap_pi(sk)->unacked_frames)
1287 del_timer(&l2cap_pi(sk)->retrans_timer);
1292 static inline int l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1294 struct l2cap_pinfo *pi = l2cap_pi(sk);
1297 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1299 err = hci_send_acl(pi->conn->hcon, skb, 0);
1306 static int l2cap_streaming_send(struct sock *sk)
1308 struct sk_buff *skb, *tx_skb;
1309 struct l2cap_pinfo *pi = l2cap_pi(sk);
1313 while ((skb = sk->sk_send_head)) {
1314 tx_skb = skb_clone(skb, GFP_ATOMIC);
1316 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1317 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1318 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1320 if (pi->fcs == L2CAP_FCS_CRC16) {
1321 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1322 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1325 err = l2cap_do_send(sk, tx_skb);
1327 l2cap_send_disconn_req(pi->conn, sk);
1331 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1333 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1334 sk->sk_send_head = NULL;
1336 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1338 skb = skb_dequeue(TX_QUEUE(sk));
1344 static int l2cap_retransmit_frame(struct sock *sk, u8 tx_seq)
1346 struct l2cap_pinfo *pi = l2cap_pi(sk);
1347 struct sk_buff *skb, *tx_skb;
1351 skb = skb_peek(TX_QUEUE(sk));
1353 if (bt_cb(skb)->tx_seq != tx_seq) {
1354 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1356 skb = skb_queue_next(TX_QUEUE(sk), skb);
1360 if (pi->remote_max_tx &&
1361 bt_cb(skb)->retries == pi->remote_max_tx) {
1362 l2cap_send_disconn_req(pi->conn, sk);
1366 tx_skb = skb_clone(skb, GFP_ATOMIC);
1367 bt_cb(skb)->retries++;
1368 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1369 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1370 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1371 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1373 if (pi->fcs == L2CAP_FCS_CRC16) {
1374 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1375 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1378 err = l2cap_do_send(sk, tx_skb);
1380 l2cap_send_disconn_req(pi->conn, sk);
1388 static int l2cap_ertm_send(struct sock *sk)
1390 struct sk_buff *skb, *tx_skb;
1391 struct l2cap_pinfo *pi = l2cap_pi(sk);
1395 if (pi->conn_state & L2CAP_CONN_WAIT_F)
1398 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk)) &&
1399 !(pi->conn_state & L2CAP_CONN_REMOTE_BUSY)) {
1401 if (pi->remote_max_tx &&
1402 bt_cb(skb)->retries == pi->remote_max_tx) {
1403 l2cap_send_disconn_req(pi->conn, sk);
1407 tx_skb = skb_clone(skb, GFP_ATOMIC);
1409 bt_cb(skb)->retries++;
1411 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1412 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1413 control |= L2CAP_CTRL_FINAL;
1414 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1416 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1417 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1418 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1421 if (pi->fcs == L2CAP_FCS_CRC16) {
1422 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1423 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1426 err = l2cap_do_send(sk, tx_skb);
1428 l2cap_send_disconn_req(pi->conn, sk);
1431 __mod_retrans_timer();
1433 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1434 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1436 pi->unacked_frames++;
1439 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1440 sk->sk_send_head = NULL;
1442 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1450 static int l2cap_send_ack(struct l2cap_pinfo *pi)
1452 struct sock *sk = (struct sock *)pi;
1455 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1457 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1458 control |= L2CAP_SUPER_RCV_NOT_READY;
1459 return l2cap_send_sframe(pi, control);
1460 } else if (l2cap_ertm_send(sk) == 0) {
1461 control |= L2CAP_SUPER_RCV_READY;
1462 return l2cap_send_sframe(pi, control);
1467 static int l2cap_send_srejtail(struct sock *sk)
1469 struct srej_list *tail;
1472 control = L2CAP_SUPER_SELECT_REJECT;
1473 control |= L2CAP_CTRL_FINAL;
1475 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1476 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1478 l2cap_send_sframe(l2cap_pi(sk), control);
1483 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1485 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1486 struct sk_buff **frag;
1489 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1495 /* Continuation fragments (no L2CAP header) */
1496 frag = &skb_shinfo(skb)->frag_list;
1498 count = min_t(unsigned int, conn->mtu, len);
1500 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1503 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1509 frag = &(*frag)->next;
1515 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1517 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1518 struct sk_buff *skb;
1519 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1520 struct l2cap_hdr *lh;
1522 BT_DBG("sk %p len %d", sk, (int)len);
1524 count = min_t(unsigned int, (conn->mtu - hlen), len);
1525 skb = bt_skb_send_alloc(sk, count + hlen,
1526 msg->msg_flags & MSG_DONTWAIT, &err);
1528 return ERR_PTR(-ENOMEM);
1530 /* Create L2CAP header */
1531 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1532 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1533 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1534 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1536 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1537 if (unlikely(err < 0)) {
1539 return ERR_PTR(err);
1544 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1546 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1547 struct sk_buff *skb;
1548 int err, count, hlen = L2CAP_HDR_SIZE;
1549 struct l2cap_hdr *lh;
1551 BT_DBG("sk %p len %d", sk, (int)len);
1553 count = min_t(unsigned int, (conn->mtu - hlen), len);
1554 skb = bt_skb_send_alloc(sk, count + hlen,
1555 msg->msg_flags & MSG_DONTWAIT, &err);
1557 return ERR_PTR(-ENOMEM);
1559 /* Create L2CAP header */
1560 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1561 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1562 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1564 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1565 if (unlikely(err < 0)) {
1567 return ERR_PTR(err);
1572 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1574 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1575 struct sk_buff *skb;
1576 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1577 struct l2cap_hdr *lh;
1579 BT_DBG("sk %p len %d", sk, (int)len);
1582 return ERR_PTR(-ENOTCONN);
1587 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1590 count = min_t(unsigned int, (conn->mtu - hlen), len);
1591 skb = bt_skb_send_alloc(sk, count + hlen,
1592 msg->msg_flags & MSG_DONTWAIT, &err);
1594 return ERR_PTR(-ENOMEM);
1596 /* Create L2CAP header */
1597 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1598 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1599 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1600 put_unaligned_le16(control, skb_put(skb, 2));
1602 put_unaligned_le16(sdulen, skb_put(skb, 2));
1604 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1605 if (unlikely(err < 0)) {
1607 return ERR_PTR(err);
1610 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1611 put_unaligned_le16(0, skb_put(skb, 2));
1613 bt_cb(skb)->retries = 0;
1617 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1619 struct l2cap_pinfo *pi = l2cap_pi(sk);
1620 struct sk_buff *skb;
1621 struct sk_buff_head sar_queue;
1625 __skb_queue_head_init(&sar_queue);
1626 control = L2CAP_SDU_START;
1627 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1629 return PTR_ERR(skb);
1631 __skb_queue_tail(&sar_queue, skb);
1632 len -= pi->remote_mps;
1633 size += pi->remote_mps;
1638 if (len > pi->remote_mps) {
1639 control = L2CAP_SDU_CONTINUE;
1640 buflen = pi->remote_mps;
1642 control = L2CAP_SDU_END;
1646 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1648 skb_queue_purge(&sar_queue);
1649 return PTR_ERR(skb);
1652 __skb_queue_tail(&sar_queue, skb);
1656 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1657 if (sk->sk_send_head == NULL)
1658 sk->sk_send_head = sar_queue.next;
1663 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1665 struct sock *sk = sock->sk;
1666 struct l2cap_pinfo *pi = l2cap_pi(sk);
1667 struct sk_buff *skb;
1671 BT_DBG("sock %p, sk %p", sock, sk);
1673 err = sock_error(sk);
1677 if (msg->msg_flags & MSG_OOB)
1682 if (sk->sk_state != BT_CONNECTED) {
1687 /* Connectionless channel */
1688 if (sk->sk_type == SOCK_DGRAM) {
1689 skb = l2cap_create_connless_pdu(sk, msg, len);
1693 err = l2cap_do_send(sk, skb);
1698 case L2CAP_MODE_BASIC:
1699 /* Check outgoing MTU */
1700 if (len > pi->omtu) {
1705 /* Create a basic PDU */
1706 skb = l2cap_create_basic_pdu(sk, msg, len);
1712 err = l2cap_do_send(sk, skb);
1717 case L2CAP_MODE_ERTM:
1718 case L2CAP_MODE_STREAMING:
1719 /* Entire SDU fits into one PDU */
1720 if (len <= pi->remote_mps) {
1721 control = L2CAP_SDU_UNSEGMENTED;
1722 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1727 __skb_queue_tail(TX_QUEUE(sk), skb);
1728 if (sk->sk_send_head == NULL)
1729 sk->sk_send_head = skb;
1731 /* Segment SDU into multiples PDUs */
1732 err = l2cap_sar_segment_sdu(sk, msg, len);
1737 if (pi->mode == L2CAP_MODE_STREAMING)
1738 err = l2cap_streaming_send(sk);
1740 err = l2cap_ertm_send(sk);
1747 BT_DBG("bad state %1.1x", pi->mode);
1756 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1758 struct sock *sk = sock->sk;
1762 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1763 struct l2cap_conn_rsp rsp;
1765 sk->sk_state = BT_CONFIG;
1767 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1768 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1769 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1770 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1771 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1772 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1780 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1783 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1785 struct sock *sk = sock->sk;
1786 struct l2cap_options opts;
1790 BT_DBG("sk %p", sk);
1796 opts.imtu = l2cap_pi(sk)->imtu;
1797 opts.omtu = l2cap_pi(sk)->omtu;
1798 opts.flush_to = l2cap_pi(sk)->flush_to;
1799 opts.mode = l2cap_pi(sk)->mode;
1800 opts.fcs = l2cap_pi(sk)->fcs;
1801 opts.max_tx = l2cap_pi(sk)->max_tx;
1802 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1804 len = min_t(unsigned int, sizeof(opts), optlen);
1805 if (copy_from_user((char *) &opts, optval, len)) {
1810 l2cap_pi(sk)->mode = opts.mode;
1811 switch (l2cap_pi(sk)->mode) {
1812 case L2CAP_MODE_BASIC:
1814 case L2CAP_MODE_ERTM:
1815 case L2CAP_MODE_STREAMING:
1824 l2cap_pi(sk)->imtu = opts.imtu;
1825 l2cap_pi(sk)->omtu = opts.omtu;
1826 l2cap_pi(sk)->fcs = opts.fcs;
1827 l2cap_pi(sk)->max_tx = opts.max_tx;
1828 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1832 if (get_user(opt, (u32 __user *) optval)) {
1837 if (opt & L2CAP_LM_AUTH)
1838 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1839 if (opt & L2CAP_LM_ENCRYPT)
1840 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1841 if (opt & L2CAP_LM_SECURE)
1842 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1844 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1845 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1857 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
1859 struct sock *sk = sock->sk;
1860 struct bt_security sec;
1864 BT_DBG("sk %p", sk);
1866 if (level == SOL_L2CAP)
1867 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
1869 if (level != SOL_BLUETOOTH)
1870 return -ENOPROTOOPT;
1876 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
1877 && sk->sk_type != SOCK_RAW) {
1882 sec.level = BT_SECURITY_LOW;
1884 len = min_t(unsigned int, sizeof(sec), optlen);
1885 if (copy_from_user((char *) &sec, optval, len)) {
1890 if (sec.level < BT_SECURITY_LOW ||
1891 sec.level > BT_SECURITY_HIGH) {
1896 l2cap_pi(sk)->sec_level = sec.level;
1899 case BT_DEFER_SETUP:
1900 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
1905 if (get_user(opt, (u32 __user *) optval)) {
1910 bt_sk(sk)->defer_setup = opt;
1922 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
1924 struct sock *sk = sock->sk;
1925 struct l2cap_options opts;
1926 struct l2cap_conninfo cinfo;
1930 BT_DBG("sk %p", sk);
1932 if (get_user(len, optlen))
1939 opts.imtu = l2cap_pi(sk)->imtu;
1940 opts.omtu = l2cap_pi(sk)->omtu;
1941 opts.flush_to = l2cap_pi(sk)->flush_to;
1942 opts.mode = l2cap_pi(sk)->mode;
1943 opts.fcs = l2cap_pi(sk)->fcs;
1944 opts.max_tx = l2cap_pi(sk)->max_tx;
1945 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1947 len = min_t(unsigned int, len, sizeof(opts));
1948 if (copy_to_user(optval, (char *) &opts, len))
1954 switch (l2cap_pi(sk)->sec_level) {
1955 case BT_SECURITY_LOW:
1956 opt = L2CAP_LM_AUTH;
1958 case BT_SECURITY_MEDIUM:
1959 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
1961 case BT_SECURITY_HIGH:
1962 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
1970 if (l2cap_pi(sk)->role_switch)
1971 opt |= L2CAP_LM_MASTER;
1973 if (l2cap_pi(sk)->force_reliable)
1974 opt |= L2CAP_LM_RELIABLE;
1976 if (put_user(opt, (u32 __user *) optval))
1980 case L2CAP_CONNINFO:
1981 if (sk->sk_state != BT_CONNECTED &&
1982 !(sk->sk_state == BT_CONNECT2 &&
1983 bt_sk(sk)->defer_setup)) {
1988 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
1989 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
1991 len = min_t(unsigned int, len, sizeof(cinfo));
1992 if (copy_to_user(optval, (char *) &cinfo, len))
2006 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2008 struct sock *sk = sock->sk;
2009 struct bt_security sec;
2012 BT_DBG("sk %p", sk);
2014 if (level == SOL_L2CAP)
2015 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2017 if (level != SOL_BLUETOOTH)
2018 return -ENOPROTOOPT;
2020 if (get_user(len, optlen))
2027 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2028 && sk->sk_type != SOCK_RAW) {
2033 sec.level = l2cap_pi(sk)->sec_level;
2035 len = min_t(unsigned int, len, sizeof(sec));
2036 if (copy_to_user(optval, (char *) &sec, len))
2041 case BT_DEFER_SETUP:
2042 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2047 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2061 static int l2cap_sock_shutdown(struct socket *sock, int how)
2063 struct sock *sk = sock->sk;
2066 BT_DBG("sock %p, sk %p", sock, sk);
2072 if (!sk->sk_shutdown) {
2073 sk->sk_shutdown = SHUTDOWN_MASK;
2074 l2cap_sock_clear_timer(sk);
2075 __l2cap_sock_close(sk, 0);
2077 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2078 err = bt_sock_wait_state(sk, BT_CLOSED,
2085 static int l2cap_sock_release(struct socket *sock)
2087 struct sock *sk = sock->sk;
2090 BT_DBG("sock %p, sk %p", sock, sk);
2095 err = l2cap_sock_shutdown(sock, 2);
2098 l2cap_sock_kill(sk);
2102 static void l2cap_chan_ready(struct sock *sk)
2104 struct sock *parent = bt_sk(sk)->parent;
2106 BT_DBG("sk %p, parent %p", sk, parent);
2108 l2cap_pi(sk)->conf_state = 0;
2109 l2cap_sock_clear_timer(sk);
2112 /* Outgoing channel.
2113 * Wake up socket sleeping on connect.
2115 sk->sk_state = BT_CONNECTED;
2116 sk->sk_state_change(sk);
2118 /* Incoming channel.
2119 * Wake up socket sleeping on accept.
2121 parent->sk_data_ready(parent, 0);
2125 /* Copy frame to all raw sockets on that connection */
2126 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2128 struct l2cap_chan_list *l = &conn->chan_list;
2129 struct sk_buff *nskb;
2132 BT_DBG("conn %p", conn);
2134 read_lock(&l->lock);
2135 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2136 if (sk->sk_type != SOCK_RAW)
2139 /* Don't send frame to the socket it came from */
2142 nskb = skb_clone(skb, GFP_ATOMIC);
2146 if (sock_queue_rcv_skb(sk, nskb))
2149 read_unlock(&l->lock);
2152 /* ---- L2CAP signalling commands ---- */
2153 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2154 u8 code, u8 ident, u16 dlen, void *data)
2156 struct sk_buff *skb, **frag;
2157 struct l2cap_cmd_hdr *cmd;
2158 struct l2cap_hdr *lh;
2161 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2162 conn, code, ident, dlen);
2164 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2165 count = min_t(unsigned int, conn->mtu, len);
2167 skb = bt_skb_alloc(count, GFP_ATOMIC);
2171 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2172 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2173 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2175 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2178 cmd->len = cpu_to_le16(dlen);
2181 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2182 memcpy(skb_put(skb, count), data, count);
2188 /* Continuation fragments (no L2CAP header) */
2189 frag = &skb_shinfo(skb)->frag_list;
2191 count = min_t(unsigned int, conn->mtu, len);
2193 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2197 memcpy(skb_put(*frag, count), data, count);
2202 frag = &(*frag)->next;
2212 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2214 struct l2cap_conf_opt *opt = *ptr;
2217 len = L2CAP_CONF_OPT_SIZE + opt->len;
2225 *val = *((u8 *) opt->val);
2229 *val = __le16_to_cpu(*((__le16 *) opt->val));
2233 *val = __le32_to_cpu(*((__le32 *) opt->val));
2237 *val = (unsigned long) opt->val;
2241 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2245 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2247 struct l2cap_conf_opt *opt = *ptr;
2249 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2256 *((u8 *) opt->val) = val;
2260 *((__le16 *) opt->val) = cpu_to_le16(val);
2264 *((__le32 *) opt->val) = cpu_to_le32(val);
2268 memcpy(opt->val, (void *) val, len);
2272 *ptr += L2CAP_CONF_OPT_SIZE + len;
2275 static void l2cap_ack_timeout(unsigned long arg)
2277 struct sock *sk = (void *) arg;
2280 l2cap_send_ack(l2cap_pi(sk));
2284 static inline void l2cap_ertm_init(struct sock *sk)
2286 l2cap_pi(sk)->expected_ack_seq = 0;
2287 l2cap_pi(sk)->unacked_frames = 0;
2288 l2cap_pi(sk)->buffer_seq = 0;
2289 l2cap_pi(sk)->num_acked = 0;
2290 l2cap_pi(sk)->frames_sent = 0;
2292 setup_timer(&l2cap_pi(sk)->retrans_timer,
2293 l2cap_retrans_timeout, (unsigned long) sk);
2294 setup_timer(&l2cap_pi(sk)->monitor_timer,
2295 l2cap_monitor_timeout, (unsigned long) sk);
2296 setup_timer(&l2cap_pi(sk)->ack_timer,
2297 l2cap_ack_timeout, (unsigned long) sk);
2299 __skb_queue_head_init(SREJ_QUEUE(sk));
2302 static int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
2304 u32 local_feat_mask = l2cap_feat_mask;
2306 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
2309 case L2CAP_MODE_ERTM:
2310 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
2311 case L2CAP_MODE_STREAMING:
2312 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
2318 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2321 case L2CAP_MODE_STREAMING:
2322 case L2CAP_MODE_ERTM:
2323 if (l2cap_mode_supported(mode, remote_feat_mask))
2327 return L2CAP_MODE_BASIC;
2331 static int l2cap_build_conf_req(struct sock *sk, void *data)
2333 struct l2cap_pinfo *pi = l2cap_pi(sk);
2334 struct l2cap_conf_req *req = data;
2335 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2336 void *ptr = req->data;
2338 BT_DBG("sk %p", sk);
2340 if (pi->num_conf_req || pi->num_conf_rsp)
2344 case L2CAP_MODE_STREAMING:
2345 case L2CAP_MODE_ERTM:
2346 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2347 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2348 l2cap_send_disconn_req(pi->conn, sk);
2351 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2357 case L2CAP_MODE_BASIC:
2358 if (pi->imtu != L2CAP_DEFAULT_MTU)
2359 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2362 case L2CAP_MODE_ERTM:
2363 rfc.mode = L2CAP_MODE_ERTM;
2364 rfc.txwin_size = pi->tx_win;
2365 rfc.max_transmit = pi->max_tx;
2366 rfc.retrans_timeout = 0;
2367 rfc.monitor_timeout = 0;
2368 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2369 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2370 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2372 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2373 sizeof(rfc), (unsigned long) &rfc);
2375 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2378 if (pi->fcs == L2CAP_FCS_NONE ||
2379 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2380 pi->fcs = L2CAP_FCS_NONE;
2381 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2385 case L2CAP_MODE_STREAMING:
2386 rfc.mode = L2CAP_MODE_STREAMING;
2388 rfc.max_transmit = 0;
2389 rfc.retrans_timeout = 0;
2390 rfc.monitor_timeout = 0;
2391 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2392 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2393 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2395 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2396 sizeof(rfc), (unsigned long) &rfc);
2398 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2401 if (pi->fcs == L2CAP_FCS_NONE ||
2402 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2403 pi->fcs = L2CAP_FCS_NONE;
2404 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2409 /* FIXME: Need actual value of the flush timeout */
2410 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2411 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2413 req->dcid = cpu_to_le16(pi->dcid);
2414 req->flags = cpu_to_le16(0);
2419 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2421 struct l2cap_pinfo *pi = l2cap_pi(sk);
2422 struct l2cap_conf_rsp *rsp = data;
2423 void *ptr = rsp->data;
2424 void *req = pi->conf_req;
2425 int len = pi->conf_len;
2426 int type, hint, olen;
2428 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2429 u16 mtu = L2CAP_DEFAULT_MTU;
2430 u16 result = L2CAP_CONF_SUCCESS;
2432 BT_DBG("sk %p", sk);
2434 while (len >= L2CAP_CONF_OPT_SIZE) {
2435 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2437 hint = type & L2CAP_CONF_HINT;
2438 type &= L2CAP_CONF_MASK;
2441 case L2CAP_CONF_MTU:
2445 case L2CAP_CONF_FLUSH_TO:
2449 case L2CAP_CONF_QOS:
2452 case L2CAP_CONF_RFC:
2453 if (olen == sizeof(rfc))
2454 memcpy(&rfc, (void *) val, olen);
2457 case L2CAP_CONF_FCS:
2458 if (val == L2CAP_FCS_NONE)
2459 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2467 result = L2CAP_CONF_UNKNOWN;
2468 *((u8 *) ptr++) = type;
2473 if (pi->num_conf_rsp || pi->num_conf_req)
2477 case L2CAP_MODE_STREAMING:
2478 case L2CAP_MODE_ERTM:
2479 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
2480 if (!l2cap_mode_supported(pi->mode, pi->conn->feat_mask))
2481 return -ECONNREFUSED;
2484 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2489 if (pi->mode != rfc.mode) {
2490 result = L2CAP_CONF_UNACCEPT;
2491 rfc.mode = pi->mode;
2493 if (pi->num_conf_rsp == 1)
2494 return -ECONNREFUSED;
2496 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2497 sizeof(rfc), (unsigned long) &rfc);
2501 if (result == L2CAP_CONF_SUCCESS) {
2502 /* Configure output options and let the other side know
2503 * which ones we don't like. */
2505 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2506 result = L2CAP_CONF_UNACCEPT;
2509 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2511 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2514 case L2CAP_MODE_BASIC:
2515 pi->fcs = L2CAP_FCS_NONE;
2516 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2519 case L2CAP_MODE_ERTM:
2520 pi->remote_tx_win = rfc.txwin_size;
2521 pi->remote_max_tx = rfc.max_transmit;
2522 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2523 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2525 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2527 rfc.retrans_timeout =
2528 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2529 rfc.monitor_timeout =
2530 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2532 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2534 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2535 sizeof(rfc), (unsigned long) &rfc);
2539 case L2CAP_MODE_STREAMING:
2540 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2541 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2543 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2545 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2547 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2548 sizeof(rfc), (unsigned long) &rfc);
2553 result = L2CAP_CONF_UNACCEPT;
2555 memset(&rfc, 0, sizeof(rfc));
2556 rfc.mode = pi->mode;
2559 if (result == L2CAP_CONF_SUCCESS)
2560 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2562 rsp->scid = cpu_to_le16(pi->dcid);
2563 rsp->result = cpu_to_le16(result);
2564 rsp->flags = cpu_to_le16(0x0000);
2569 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2571 struct l2cap_pinfo *pi = l2cap_pi(sk);
2572 struct l2cap_conf_req *req = data;
2573 void *ptr = req->data;
2576 struct l2cap_conf_rfc rfc;
2578 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2580 while (len >= L2CAP_CONF_OPT_SIZE) {
2581 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2584 case L2CAP_CONF_MTU:
2585 if (val < L2CAP_DEFAULT_MIN_MTU) {
2586 *result = L2CAP_CONF_UNACCEPT;
2587 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2590 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2593 case L2CAP_CONF_FLUSH_TO:
2595 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2599 case L2CAP_CONF_RFC:
2600 if (olen == sizeof(rfc))
2601 memcpy(&rfc, (void *)val, olen);
2603 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2604 rfc.mode != pi->mode)
2605 return -ECONNREFUSED;
2607 pi->mode = rfc.mode;
2610 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2611 sizeof(rfc), (unsigned long) &rfc);
2616 if (*result == L2CAP_CONF_SUCCESS) {
2618 case L2CAP_MODE_ERTM:
2619 pi->remote_tx_win = rfc.txwin_size;
2620 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2621 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2622 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2624 case L2CAP_MODE_STREAMING:
2625 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2629 req->dcid = cpu_to_le16(pi->dcid);
2630 req->flags = cpu_to_le16(0x0000);
2635 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2637 struct l2cap_conf_rsp *rsp = data;
2638 void *ptr = rsp->data;
2640 BT_DBG("sk %p", sk);
2642 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2643 rsp->result = cpu_to_le16(result);
2644 rsp->flags = cpu_to_le16(flags);
2649 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2651 struct l2cap_pinfo *pi = l2cap_pi(sk);
2654 struct l2cap_conf_rfc rfc;
2656 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2658 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2661 while (len >= L2CAP_CONF_OPT_SIZE) {
2662 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2665 case L2CAP_CONF_RFC:
2666 if (olen == sizeof(rfc))
2667 memcpy(&rfc, (void *)val, olen);
2674 case L2CAP_MODE_ERTM:
2675 pi->remote_tx_win = rfc.txwin_size;
2676 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2677 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2678 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2680 case L2CAP_MODE_STREAMING:
2681 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2685 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2687 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2689 if (rej->reason != 0x0000)
2692 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2693 cmd->ident == conn->info_ident) {
2694 del_timer(&conn->info_timer);
2696 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2697 conn->info_ident = 0;
2699 l2cap_conn_start(conn);
2705 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2707 struct l2cap_chan_list *list = &conn->chan_list;
2708 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2709 struct l2cap_conn_rsp rsp;
2710 struct sock *sk, *parent;
2711 int result, status = L2CAP_CS_NO_INFO;
2713 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2714 __le16 psm = req->psm;
2716 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2718 /* Check if we have socket listening on psm */
2719 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2721 result = L2CAP_CR_BAD_PSM;
2725 /* Check if the ACL is secure enough (if not SDP) */
2726 if (psm != cpu_to_le16(0x0001) &&
2727 !hci_conn_check_link_mode(conn->hcon)) {
2728 conn->disc_reason = 0x05;
2729 result = L2CAP_CR_SEC_BLOCK;
2733 result = L2CAP_CR_NO_MEM;
2735 /* Check for backlog size */
2736 if (sk_acceptq_is_full(parent)) {
2737 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2741 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2745 write_lock_bh(&list->lock);
2747 /* Check if we already have channel with that dcid */
2748 if (__l2cap_get_chan_by_dcid(list, scid)) {
2749 write_unlock_bh(&list->lock);
2750 sock_set_flag(sk, SOCK_ZAPPED);
2751 l2cap_sock_kill(sk);
2755 hci_conn_hold(conn->hcon);
2757 l2cap_sock_init(sk, parent);
2758 bacpy(&bt_sk(sk)->src, conn->src);
2759 bacpy(&bt_sk(sk)->dst, conn->dst);
2760 l2cap_pi(sk)->psm = psm;
2761 l2cap_pi(sk)->dcid = scid;
2763 __l2cap_chan_add(conn, sk, parent);
2764 dcid = l2cap_pi(sk)->scid;
2766 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2768 l2cap_pi(sk)->ident = cmd->ident;
2770 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2771 if (l2cap_check_security(sk)) {
2772 if (bt_sk(sk)->defer_setup) {
2773 sk->sk_state = BT_CONNECT2;
2774 result = L2CAP_CR_PEND;
2775 status = L2CAP_CS_AUTHOR_PEND;
2776 parent->sk_data_ready(parent, 0);
2778 sk->sk_state = BT_CONFIG;
2779 result = L2CAP_CR_SUCCESS;
2780 status = L2CAP_CS_NO_INFO;
2783 sk->sk_state = BT_CONNECT2;
2784 result = L2CAP_CR_PEND;
2785 status = L2CAP_CS_AUTHEN_PEND;
2788 sk->sk_state = BT_CONNECT2;
2789 result = L2CAP_CR_PEND;
2790 status = L2CAP_CS_NO_INFO;
2793 write_unlock_bh(&list->lock);
2796 bh_unlock_sock(parent);
2799 rsp.scid = cpu_to_le16(scid);
2800 rsp.dcid = cpu_to_le16(dcid);
2801 rsp.result = cpu_to_le16(result);
2802 rsp.status = cpu_to_le16(status);
2803 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2805 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2806 struct l2cap_info_req info;
2807 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2809 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2810 conn->info_ident = l2cap_get_ident(conn);
2812 mod_timer(&conn->info_timer, jiffies +
2813 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2815 l2cap_send_cmd(conn, conn->info_ident,
2816 L2CAP_INFO_REQ, sizeof(info), &info);
2822 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2824 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2825 u16 scid, dcid, result, status;
2829 scid = __le16_to_cpu(rsp->scid);
2830 dcid = __le16_to_cpu(rsp->dcid);
2831 result = __le16_to_cpu(rsp->result);
2832 status = __le16_to_cpu(rsp->status);
2834 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2837 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2841 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2847 case L2CAP_CR_SUCCESS:
2848 sk->sk_state = BT_CONFIG;
2849 l2cap_pi(sk)->ident = 0;
2850 l2cap_pi(sk)->dcid = dcid;
2851 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
2853 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
2855 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2856 l2cap_build_conf_req(sk, req), req);
2857 l2cap_pi(sk)->num_conf_req++;
2861 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
2865 l2cap_chan_del(sk, ECONNREFUSED);
2873 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
2875 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
2881 dcid = __le16_to_cpu(req->dcid);
2882 flags = __le16_to_cpu(req->flags);
2884 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
2886 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
2890 if (sk->sk_state == BT_DISCONN)
2893 /* Reject if config buffer is too small. */
2894 len = cmd_len - sizeof(*req);
2895 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
2896 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2897 l2cap_build_conf_rsp(sk, rsp,
2898 L2CAP_CONF_REJECT, flags), rsp);
2903 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
2904 l2cap_pi(sk)->conf_len += len;
2906 if (flags & 0x0001) {
2907 /* Incomplete config. Send empty response. */
2908 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
2909 l2cap_build_conf_rsp(sk, rsp,
2910 L2CAP_CONF_SUCCESS, 0x0001), rsp);
2914 /* Complete config. */
2915 len = l2cap_parse_conf_req(sk, rsp);
2917 l2cap_send_disconn_req(conn, sk);
2921 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
2922 l2cap_pi(sk)->num_conf_rsp++;
2924 /* Reset config buffer. */
2925 l2cap_pi(sk)->conf_len = 0;
2927 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
2930 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
2931 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
2932 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
2933 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
2935 sk->sk_state = BT_CONNECTED;
2937 l2cap_pi(sk)->next_tx_seq = 0;
2938 l2cap_pi(sk)->expected_tx_seq = 0;
2939 __skb_queue_head_init(TX_QUEUE(sk));
2940 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2941 l2cap_ertm_init(sk);
2943 l2cap_chan_ready(sk);
2947 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
2949 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
2950 l2cap_build_conf_req(sk, buf), buf);
2951 l2cap_pi(sk)->num_conf_req++;
2959 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2961 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
2962 u16 scid, flags, result;
2964 int len = cmd->len - sizeof(*rsp);
2966 scid = __le16_to_cpu(rsp->scid);
2967 flags = __le16_to_cpu(rsp->flags);
2968 result = __le16_to_cpu(rsp->result);
2970 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
2971 scid, flags, result);
2973 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2978 case L2CAP_CONF_SUCCESS:
2979 l2cap_conf_rfc_get(sk, rsp->data, len);
2982 case L2CAP_CONF_UNACCEPT:
2983 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
2986 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
2987 l2cap_send_disconn_req(conn, sk);
2991 /* throw out any old stored conf requests */
2992 result = L2CAP_CONF_SUCCESS;
2993 len = l2cap_parse_conf_rsp(sk, rsp->data,
2996 l2cap_send_disconn_req(conn, sk);
3000 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3001 L2CAP_CONF_REQ, len, req);
3002 l2cap_pi(sk)->num_conf_req++;
3003 if (result != L2CAP_CONF_SUCCESS)
3009 sk->sk_state = BT_DISCONN;
3010 sk->sk_err = ECONNRESET;
3011 l2cap_sock_set_timer(sk, HZ * 5);
3012 l2cap_send_disconn_req(conn, sk);
3019 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3021 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3022 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3023 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3024 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3026 sk->sk_state = BT_CONNECTED;
3027 l2cap_pi(sk)->next_tx_seq = 0;
3028 l2cap_pi(sk)->expected_tx_seq = 0;
3029 __skb_queue_head_init(TX_QUEUE(sk));
3030 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3031 l2cap_ertm_init(sk);
3033 l2cap_chan_ready(sk);
3041 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3043 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3044 struct l2cap_disconn_rsp rsp;
3048 scid = __le16_to_cpu(req->scid);
3049 dcid = __le16_to_cpu(req->dcid);
3051 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3053 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3057 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3058 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3059 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3061 sk->sk_shutdown = SHUTDOWN_MASK;
3063 skb_queue_purge(TX_QUEUE(sk));
3065 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3066 skb_queue_purge(SREJ_QUEUE(sk));
3067 del_timer(&l2cap_pi(sk)->retrans_timer);
3068 del_timer(&l2cap_pi(sk)->monitor_timer);
3069 del_timer(&l2cap_pi(sk)->ack_timer);
3072 l2cap_chan_del(sk, ECONNRESET);
3075 l2cap_sock_kill(sk);
3079 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3081 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3085 scid = __le16_to_cpu(rsp->scid);
3086 dcid = __le16_to_cpu(rsp->dcid);
3088 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3090 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3094 skb_queue_purge(TX_QUEUE(sk));
3096 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
3097 skb_queue_purge(SREJ_QUEUE(sk));
3098 del_timer(&l2cap_pi(sk)->retrans_timer);
3099 del_timer(&l2cap_pi(sk)->monitor_timer);
3100 del_timer(&l2cap_pi(sk)->ack_timer);
3103 l2cap_chan_del(sk, 0);
3106 l2cap_sock_kill(sk);
3110 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3112 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3115 type = __le16_to_cpu(req->type);
3117 BT_DBG("type 0x%4.4x", type);
3119 if (type == L2CAP_IT_FEAT_MASK) {
3121 u32 feat_mask = l2cap_feat_mask;
3122 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3123 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3124 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3126 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3128 put_unaligned_le32(feat_mask, rsp->data);
3129 l2cap_send_cmd(conn, cmd->ident,
3130 L2CAP_INFO_RSP, sizeof(buf), buf);
3131 } else if (type == L2CAP_IT_FIXED_CHAN) {
3133 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3134 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3135 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3136 memcpy(buf + 4, l2cap_fixed_chan, 8);
3137 l2cap_send_cmd(conn, cmd->ident,
3138 L2CAP_INFO_RSP, sizeof(buf), buf);
3140 struct l2cap_info_rsp rsp;
3141 rsp.type = cpu_to_le16(type);
3142 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3143 l2cap_send_cmd(conn, cmd->ident,
3144 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3150 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3152 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3155 type = __le16_to_cpu(rsp->type);
3156 result = __le16_to_cpu(rsp->result);
3158 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3160 del_timer(&conn->info_timer);
3162 if (type == L2CAP_IT_FEAT_MASK) {
3163 conn->feat_mask = get_unaligned_le32(rsp->data);
3165 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3166 struct l2cap_info_req req;
3167 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3169 conn->info_ident = l2cap_get_ident(conn);
3171 l2cap_send_cmd(conn, conn->info_ident,
3172 L2CAP_INFO_REQ, sizeof(req), &req);
3174 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3175 conn->info_ident = 0;
3177 l2cap_conn_start(conn);
3179 } else if (type == L2CAP_IT_FIXED_CHAN) {
3180 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3181 conn->info_ident = 0;
3183 l2cap_conn_start(conn);
3189 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3191 u8 *data = skb->data;
3193 struct l2cap_cmd_hdr cmd;
3196 l2cap_raw_recv(conn, skb);
3198 while (len >= L2CAP_CMD_HDR_SIZE) {
3200 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3201 data += L2CAP_CMD_HDR_SIZE;
3202 len -= L2CAP_CMD_HDR_SIZE;
3204 cmd_len = le16_to_cpu(cmd.len);
3206 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3208 if (cmd_len > len || !cmd.ident) {
3209 BT_DBG("corrupted command");
3214 case L2CAP_COMMAND_REJ:
3215 l2cap_command_rej(conn, &cmd, data);
3218 case L2CAP_CONN_REQ:
3219 err = l2cap_connect_req(conn, &cmd, data);
3222 case L2CAP_CONN_RSP:
3223 err = l2cap_connect_rsp(conn, &cmd, data);
3226 case L2CAP_CONF_REQ:
3227 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3230 case L2CAP_CONF_RSP:
3231 err = l2cap_config_rsp(conn, &cmd, data);
3234 case L2CAP_DISCONN_REQ:
3235 err = l2cap_disconnect_req(conn, &cmd, data);
3238 case L2CAP_DISCONN_RSP:
3239 err = l2cap_disconnect_rsp(conn, &cmd, data);
3242 case L2CAP_ECHO_REQ:
3243 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3246 case L2CAP_ECHO_RSP:
3249 case L2CAP_INFO_REQ:
3250 err = l2cap_information_req(conn, &cmd, data);
3253 case L2CAP_INFO_RSP:
3254 err = l2cap_information_rsp(conn, &cmd, data);
3258 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3264 struct l2cap_cmd_rej rej;
3265 BT_DBG("error %d", err);
3267 /* FIXME: Map err to a valid reason */
3268 rej.reason = cpu_to_le16(0);
3269 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3279 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3281 u16 our_fcs, rcv_fcs;
3282 int hdr_size = L2CAP_HDR_SIZE + 2;
3284 if (pi->fcs == L2CAP_FCS_CRC16) {
3285 skb_trim(skb, skb->len - 2);
3286 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3287 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3289 if (our_fcs != rcv_fcs)
3295 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3297 struct l2cap_pinfo *pi = l2cap_pi(sk);
3300 pi->frames_sent = 0;
3301 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
3303 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3305 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3306 control |= L2CAP_SUPER_RCV_NOT_READY | L2CAP_CTRL_FINAL;
3307 l2cap_send_sframe(pi, control);
3308 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
3311 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY && pi->unacked_frames > 0)
3312 __mod_retrans_timer();
3314 l2cap_ertm_send(sk);
3316 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3317 pi->frames_sent == 0) {
3318 control |= L2CAP_SUPER_RCV_READY;
3319 l2cap_send_sframe(pi, control);
3323 static void l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3325 struct sk_buff *next_skb;
3327 bt_cb(skb)->tx_seq = tx_seq;
3328 bt_cb(skb)->sar = sar;
3330 next_skb = skb_peek(SREJ_QUEUE(sk));
3332 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3337 if (bt_cb(next_skb)->tx_seq > tx_seq) {
3338 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3342 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3345 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3347 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3350 static int l2cap_sar_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3352 struct l2cap_pinfo *pi = l2cap_pi(sk);
3353 struct sk_buff *_skb;
3356 switch (control & L2CAP_CTRL_SAR) {
3357 case L2CAP_SDU_UNSEGMENTED:
3358 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3363 err = sock_queue_rcv_skb(sk, skb);
3369 case L2CAP_SDU_START:
3370 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3375 pi->sdu_len = get_unaligned_le16(skb->data);
3378 if (pi->sdu_len > pi->imtu) {
3383 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3389 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3391 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3392 pi->partial_sdu_len = skb->len;
3396 case L2CAP_SDU_CONTINUE:
3397 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3400 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3402 pi->partial_sdu_len += skb->len;
3403 if (pi->partial_sdu_len > pi->sdu_len)
3411 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3414 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3416 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3417 pi->partial_sdu_len += skb->len;
3419 if (pi->partial_sdu_len > pi->imtu)
3422 if (pi->partial_sdu_len == pi->sdu_len) {
3423 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3424 err = sock_queue_rcv_skb(sk, _skb);
3439 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3441 struct sk_buff *skb;
3444 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3445 if (bt_cb(skb)->tx_seq != tx_seq)
3448 skb = skb_dequeue(SREJ_QUEUE(sk));
3449 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3450 l2cap_sar_reassembly_sdu(sk, skb, control);
3451 l2cap_pi(sk)->buffer_seq_srej =
3452 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3457 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3459 struct l2cap_pinfo *pi = l2cap_pi(sk);
3460 struct srej_list *l, *tmp;
3463 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3464 if (l->tx_seq == tx_seq) {
3469 control = L2CAP_SUPER_SELECT_REJECT;
3470 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3471 l2cap_send_sframe(pi, control);
3473 list_add_tail(&l->list, SREJ_LIST(sk));
3477 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3479 struct l2cap_pinfo *pi = l2cap_pi(sk);
3480 struct srej_list *new;
3483 while (tx_seq != pi->expected_tx_seq) {
3484 control = L2CAP_SUPER_SELECT_REJECT;
3485 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3486 l2cap_send_sframe(pi, control);
3488 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3489 new->tx_seq = pi->expected_tx_seq++;
3490 list_add_tail(&new->list, SREJ_LIST(sk));
3492 pi->expected_tx_seq++;
3495 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3497 struct l2cap_pinfo *pi = l2cap_pi(sk);
3498 u8 tx_seq = __get_txseq(rx_control);
3499 u8 req_seq = __get_reqseq(rx_control);
3500 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3501 int num_to_ack = (pi->tx_win/6) + 1;
3504 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3506 if (L2CAP_CTRL_FINAL & rx_control) {
3507 del_timer(&pi->monitor_timer);
3508 if (pi->unacked_frames > 0)
3509 __mod_retrans_timer();
3510 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3513 pi->expected_ack_seq = req_seq;
3514 l2cap_drop_acked_frames(sk);
3516 if (tx_seq == pi->expected_tx_seq)
3519 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3520 struct srej_list *first;
3522 first = list_first_entry(SREJ_LIST(sk),
3523 struct srej_list, list);
3524 if (tx_seq == first->tx_seq) {
3525 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3526 l2cap_check_srej_gap(sk, tx_seq);
3528 list_del(&first->list);
3531 if (list_empty(SREJ_LIST(sk))) {
3532 pi->buffer_seq = pi->buffer_seq_srej;
3533 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3537 struct srej_list *l;
3538 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3540 list_for_each_entry(l, SREJ_LIST(sk), list) {
3541 if (l->tx_seq == tx_seq) {
3542 l2cap_resend_srejframe(sk, tx_seq);
3546 l2cap_send_srejframe(sk, tx_seq);
3549 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3551 INIT_LIST_HEAD(SREJ_LIST(sk));
3552 pi->buffer_seq_srej = pi->buffer_seq;
3554 __skb_queue_head_init(SREJ_QUEUE(sk));
3555 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3557 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3559 l2cap_send_srejframe(sk, tx_seq);
3564 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3566 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3567 bt_cb(skb)->tx_seq = tx_seq;
3568 bt_cb(skb)->sar = sar;
3569 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3573 if (rx_control & L2CAP_CTRL_FINAL) {
3574 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3575 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3577 if (!skb_queue_empty(TX_QUEUE(sk)))
3578 sk->sk_send_head = TX_QUEUE(sk)->next;
3579 pi->next_tx_seq = pi->expected_ack_seq;
3580 l2cap_ertm_send(sk);
3584 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3586 err = l2cap_sar_reassembly_sdu(sk, skb, rx_control);
3592 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
3593 if (pi->num_acked == num_to_ack - 1)
3599 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
3601 struct l2cap_pinfo *pi = l2cap_pi(sk);
3603 pi->expected_ack_seq = __get_reqseq(rx_control);
3604 l2cap_drop_acked_frames(sk);
3606 if (rx_control & L2CAP_CTRL_POLL) {
3607 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3608 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3609 (pi->unacked_frames > 0))
3610 __mod_retrans_timer();
3612 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3613 l2cap_send_srejtail(sk);
3615 l2cap_send_i_or_rr_or_rnr(sk);
3616 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3619 } else if (rx_control & L2CAP_CTRL_FINAL) {
3620 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3622 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3623 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3625 if (!skb_queue_empty(TX_QUEUE(sk)))
3626 sk->sk_send_head = TX_QUEUE(sk)->next;
3627 pi->next_tx_seq = pi->expected_ack_seq;
3628 l2cap_ertm_send(sk);
3632 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
3633 (pi->unacked_frames > 0))
3634 __mod_retrans_timer();
3636 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3637 if (pi->conn_state & L2CAP_CONN_SREJ_SENT)
3640 l2cap_ertm_send(sk);
3644 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
3646 struct l2cap_pinfo *pi = l2cap_pi(sk);
3647 u8 tx_seq = __get_reqseq(rx_control);
3649 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3651 pi->expected_ack_seq = tx_seq;
3652 l2cap_drop_acked_frames(sk);
3654 if (rx_control & L2CAP_CTRL_FINAL) {
3655 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3656 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3658 if (!skb_queue_empty(TX_QUEUE(sk)))
3659 sk->sk_send_head = TX_QUEUE(sk)->next;
3660 pi->next_tx_seq = pi->expected_ack_seq;
3661 l2cap_ertm_send(sk);
3664 if (!skb_queue_empty(TX_QUEUE(sk)))
3665 sk->sk_send_head = TX_QUEUE(sk)->next;
3666 pi->next_tx_seq = pi->expected_ack_seq;
3667 l2cap_ertm_send(sk);
3669 if (pi->conn_state & L2CAP_CONN_WAIT_F)
3670 pi->conn_state |= L2CAP_CONN_REJ_ACT;
3673 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
3675 struct l2cap_pinfo *pi = l2cap_pi(sk);
3676 u8 tx_seq = __get_reqseq(rx_control);
3678 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
3680 if (rx_control & L2CAP_CTRL_POLL) {
3681 pi->expected_ack_seq = tx_seq;
3682 l2cap_drop_acked_frames(sk);
3683 l2cap_retransmit_frame(sk, tx_seq);
3684 l2cap_ertm_send(sk);
3685 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3686 pi->srej_save_reqseq = tx_seq;
3687 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3689 } else if (rx_control & L2CAP_CTRL_FINAL) {
3690 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
3691 pi->srej_save_reqseq == tx_seq)
3692 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
3694 l2cap_retransmit_frame(sk, tx_seq);
3696 l2cap_retransmit_frame(sk, tx_seq);
3697 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
3698 pi->srej_save_reqseq = tx_seq;
3699 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
3704 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
3706 struct l2cap_pinfo *pi = l2cap_pi(sk);
3707 u8 tx_seq = __get_reqseq(rx_control);
3709 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
3710 pi->expected_ack_seq = tx_seq;
3711 l2cap_drop_acked_frames(sk);
3713 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
3714 del_timer(&pi->retrans_timer);
3715 if (rx_control & L2CAP_CTRL_POLL)
3716 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
3720 if (rx_control & L2CAP_CTRL_POLL)
3721 l2cap_send_srejtail(sk);
3723 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
3726 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3728 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
3730 if (L2CAP_CTRL_FINAL & rx_control) {
3731 del_timer(&l2cap_pi(sk)->monitor_timer);
3732 if (l2cap_pi(sk)->unacked_frames > 0)
3733 __mod_retrans_timer();
3734 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
3737 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
3738 case L2CAP_SUPER_RCV_READY:
3739 l2cap_data_channel_rrframe(sk, rx_control);
3742 case L2CAP_SUPER_REJECT:
3743 l2cap_data_channel_rejframe(sk, rx_control);
3746 case L2CAP_SUPER_SELECT_REJECT:
3747 l2cap_data_channel_srejframe(sk, rx_control);
3750 case L2CAP_SUPER_RCV_NOT_READY:
3751 l2cap_data_channel_rnrframe(sk, rx_control);
3759 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
3762 struct l2cap_pinfo *pi;
3764 u8 tx_seq, req_seq, next_tx_seq_offset, req_seq_offset;
3766 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
3768 BT_DBG("unknown cid 0x%4.4x", cid);
3774 BT_DBG("sk %p, len %d", sk, skb->len);
3776 if (sk->sk_state != BT_CONNECTED)
3780 case L2CAP_MODE_BASIC:
3781 /* If socket recv buffers overflows we drop data here
3782 * which is *bad* because L2CAP has to be reliable.
3783 * But we don't have any other choice. L2CAP doesn't
3784 * provide flow control mechanism. */
3786 if (pi->imtu < skb->len)
3789 if (!sock_queue_rcv_skb(sk, skb))
3793 case L2CAP_MODE_ERTM:
3794 control = get_unaligned_le16(skb->data);
3798 if (__is_sar_start(control))
3801 if (pi->fcs == L2CAP_FCS_CRC16)
3805 * We can just drop the corrupted I-frame here.
3806 * Receiver will miss it and start proper recovery
3807 * procedures and ask retransmission.
3812 if (l2cap_check_fcs(pi, skb))
3815 req_seq = __get_reqseq(control);
3816 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
3817 if (req_seq_offset < 0)
3818 req_seq_offset += 64;
3820 next_tx_seq_offset =
3821 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
3822 if (next_tx_seq_offset < 0)
3823 next_tx_seq_offset += 64;
3825 /* check for invalid req-seq */
3826 if (req_seq_offset > next_tx_seq_offset) {
3827 l2cap_send_disconn_req(pi->conn, sk);
3831 if (__is_iframe(control)) {
3835 l2cap_data_channel_iframe(sk, control, skb);
3840 l2cap_data_channel_sframe(sk, control, skb);
3845 case L2CAP_MODE_STREAMING:
3846 control = get_unaligned_le16(skb->data);
3850 if (__is_sar_start(control))
3853 if (pi->fcs == L2CAP_FCS_CRC16)
3856 if (len > pi->mps || len < 4 || __is_sframe(control))
3859 if (l2cap_check_fcs(pi, skb))
3862 tx_seq = __get_txseq(control);
3864 if (pi->expected_tx_seq == tx_seq)
3865 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3867 pi->expected_tx_seq = (tx_seq + 1) % 64;
3869 l2cap_sar_reassembly_sdu(sk, skb, control);
3874 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
3888 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
3892 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
3896 BT_DBG("sk %p, len %d", sk, skb->len);
3898 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
3901 if (l2cap_pi(sk)->imtu < skb->len)
3904 if (!sock_queue_rcv_skb(sk, skb))
3916 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
3918 struct l2cap_hdr *lh = (void *) skb->data;
3922 skb_pull(skb, L2CAP_HDR_SIZE);
3923 cid = __le16_to_cpu(lh->cid);
3924 len = __le16_to_cpu(lh->len);
3926 if (len != skb->len) {
3931 BT_DBG("len %d, cid 0x%4.4x", len, cid);
3934 case L2CAP_CID_SIGNALING:
3935 l2cap_sig_channel(conn, skb);
3938 case L2CAP_CID_CONN_LESS:
3939 psm = get_unaligned_le16(skb->data);
3941 l2cap_conless_channel(conn, psm, skb);
3945 l2cap_data_channel(conn, cid, skb);
3950 /* ---- L2CAP interface with lower layer (HCI) ---- */
3952 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
3954 int exact = 0, lm1 = 0, lm2 = 0;
3955 register struct sock *sk;
3956 struct hlist_node *node;
3958 if (type != ACL_LINK)
3961 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
3963 /* Find listening sockets and check their link_mode */
3964 read_lock(&l2cap_sk_list.lock);
3965 sk_for_each(sk, node, &l2cap_sk_list.head) {
3966 if (sk->sk_state != BT_LISTEN)
3969 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
3970 lm1 |= HCI_LM_ACCEPT;
3971 if (l2cap_pi(sk)->role_switch)
3972 lm1 |= HCI_LM_MASTER;
3974 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
3975 lm2 |= HCI_LM_ACCEPT;
3976 if (l2cap_pi(sk)->role_switch)
3977 lm2 |= HCI_LM_MASTER;
3980 read_unlock(&l2cap_sk_list.lock);
3982 return exact ? lm1 : lm2;
3985 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
3987 struct l2cap_conn *conn;
3989 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
3991 if (hcon->type != ACL_LINK)
3995 conn = l2cap_conn_add(hcon, status);
3997 l2cap_conn_ready(conn);
3999 l2cap_conn_del(hcon, bt_err(status));
4004 static int l2cap_disconn_ind(struct hci_conn *hcon)
4006 struct l2cap_conn *conn = hcon->l2cap_data;
4008 BT_DBG("hcon %p", hcon);
4010 if (hcon->type != ACL_LINK || !conn)
4013 return conn->disc_reason;
4016 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4018 BT_DBG("hcon %p reason %d", hcon, reason);
4020 if (hcon->type != ACL_LINK)
4023 l2cap_conn_del(hcon, bt_err(reason));
4028 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4030 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4033 if (encrypt == 0x00) {
4034 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4035 l2cap_sock_clear_timer(sk);
4036 l2cap_sock_set_timer(sk, HZ * 5);
4037 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4038 __l2cap_sock_close(sk, ECONNREFUSED);
4040 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4041 l2cap_sock_clear_timer(sk);
4045 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4047 struct l2cap_chan_list *l;
4048 struct l2cap_conn *conn = hcon->l2cap_data;
4054 l = &conn->chan_list;
4056 BT_DBG("conn %p", conn);
4058 read_lock(&l->lock);
4060 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4063 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4068 if (!status && (sk->sk_state == BT_CONNECTED ||
4069 sk->sk_state == BT_CONFIG)) {
4070 l2cap_check_encryption(sk, encrypt);
4075 if (sk->sk_state == BT_CONNECT) {
4077 struct l2cap_conn_req req;
4078 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4079 req.psm = l2cap_pi(sk)->psm;
4081 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4083 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4084 L2CAP_CONN_REQ, sizeof(req), &req);
4086 l2cap_sock_clear_timer(sk);
4087 l2cap_sock_set_timer(sk, HZ / 10);
4089 } else if (sk->sk_state == BT_CONNECT2) {
4090 struct l2cap_conn_rsp rsp;
4094 sk->sk_state = BT_CONFIG;
4095 result = L2CAP_CR_SUCCESS;
4097 sk->sk_state = BT_DISCONN;
4098 l2cap_sock_set_timer(sk, HZ / 10);
4099 result = L2CAP_CR_SEC_BLOCK;
4102 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4103 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4104 rsp.result = cpu_to_le16(result);
4105 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4106 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4107 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4113 read_unlock(&l->lock);
4118 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4120 struct l2cap_conn *conn = hcon->l2cap_data;
4122 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4125 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4127 if (flags & ACL_START) {
4128 struct l2cap_hdr *hdr;
4132 BT_ERR("Unexpected start frame (len %d)", skb->len);
4133 kfree_skb(conn->rx_skb);
4134 conn->rx_skb = NULL;
4136 l2cap_conn_unreliable(conn, ECOMM);
4140 BT_ERR("Frame is too short (len %d)", skb->len);
4141 l2cap_conn_unreliable(conn, ECOMM);
4145 hdr = (struct l2cap_hdr *) skb->data;
4146 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4148 if (len == skb->len) {
4149 /* Complete frame received */
4150 l2cap_recv_frame(conn, skb);
4154 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4156 if (skb->len > len) {
4157 BT_ERR("Frame is too long (len %d, expected len %d)",
4159 l2cap_conn_unreliable(conn, ECOMM);
4163 /* Allocate skb for the complete frame (with header) */
4164 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4168 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4170 conn->rx_len = len - skb->len;
4172 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4174 if (!conn->rx_len) {
4175 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4176 l2cap_conn_unreliable(conn, ECOMM);
4180 if (skb->len > conn->rx_len) {
4181 BT_ERR("Fragment is too long (len %d, expected %d)",
4182 skb->len, conn->rx_len);
4183 kfree_skb(conn->rx_skb);
4184 conn->rx_skb = NULL;
4186 l2cap_conn_unreliable(conn, ECOMM);
4190 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4192 conn->rx_len -= skb->len;
4194 if (!conn->rx_len) {
4195 /* Complete frame received */
4196 l2cap_recv_frame(conn, conn->rx_skb);
4197 conn->rx_skb = NULL;
4206 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4209 struct hlist_node *node;
4211 read_lock_bh(&l2cap_sk_list.lock);
4213 sk_for_each(sk, node, &l2cap_sk_list.head) {
4214 struct l2cap_pinfo *pi = l2cap_pi(sk);
4216 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4217 batostr(&bt_sk(sk)->src),
4218 batostr(&bt_sk(sk)->dst),
4219 sk->sk_state, __le16_to_cpu(pi->psm),
4221 pi->imtu, pi->omtu, pi->sec_level);
4224 read_unlock_bh(&l2cap_sk_list.lock);
4229 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4231 return single_open(file, l2cap_debugfs_show, inode->i_private);
4234 static const struct file_operations l2cap_debugfs_fops = {
4235 .open = l2cap_debugfs_open,
4237 .llseek = seq_lseek,
4238 .release = single_release,
4241 static struct dentry *l2cap_debugfs;
4243 static const struct proto_ops l2cap_sock_ops = {
4244 .family = PF_BLUETOOTH,
4245 .owner = THIS_MODULE,
4246 .release = l2cap_sock_release,
4247 .bind = l2cap_sock_bind,
4248 .connect = l2cap_sock_connect,
4249 .listen = l2cap_sock_listen,
4250 .accept = l2cap_sock_accept,
4251 .getname = l2cap_sock_getname,
4252 .sendmsg = l2cap_sock_sendmsg,
4253 .recvmsg = l2cap_sock_recvmsg,
4254 .poll = bt_sock_poll,
4255 .ioctl = bt_sock_ioctl,
4256 .mmap = sock_no_mmap,
4257 .socketpair = sock_no_socketpair,
4258 .shutdown = l2cap_sock_shutdown,
4259 .setsockopt = l2cap_sock_setsockopt,
4260 .getsockopt = l2cap_sock_getsockopt
4263 static const struct net_proto_family l2cap_sock_family_ops = {
4264 .family = PF_BLUETOOTH,
4265 .owner = THIS_MODULE,
4266 .create = l2cap_sock_create,
4269 static struct hci_proto l2cap_hci_proto = {
4271 .id = HCI_PROTO_L2CAP,
4272 .connect_ind = l2cap_connect_ind,
4273 .connect_cfm = l2cap_connect_cfm,
4274 .disconn_ind = l2cap_disconn_ind,
4275 .disconn_cfm = l2cap_disconn_cfm,
4276 .security_cfm = l2cap_security_cfm,
4277 .recv_acldata = l2cap_recv_acldata
4280 static int __init l2cap_init(void)
4284 err = proto_register(&l2cap_proto, 0);
4288 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4290 BT_ERR("L2CAP socket registration failed");
4294 err = hci_register_proto(&l2cap_hci_proto);
4296 BT_ERR("L2CAP protocol registration failed");
4297 bt_sock_unregister(BTPROTO_L2CAP);
4302 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4303 bt_debugfs, NULL, &l2cap_debugfs_fops);
4305 BT_ERR("Failed to create L2CAP debug file");
4308 BT_INFO("L2CAP ver %s", VERSION);
4309 BT_INFO("L2CAP socket layer initialized");
4314 proto_unregister(&l2cap_proto);
4318 static void __exit l2cap_exit(void)
4320 debugfs_remove(l2cap_debugfs);
4322 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4323 BT_ERR("L2CAP socket unregistration failed");
4325 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4326 BT_ERR("L2CAP protocol unregistration failed");
4328 proto_unregister(&l2cap_proto);
4331 void l2cap_load(void)
4333 /* Dummy function to trigger automatic L2CAP module loading by
4334 * other modules that use L2CAP sockets but don't use any other
4335 * symbols from it. */
4338 EXPORT_SYMBOL(l2cap_load);
4340 module_init(l2cap_init);
4341 module_exit(l2cap_exit);
4343 module_param(enable_ertm, bool, 0644);
4344 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4346 module_param(max_transmit, uint, 0644);
4347 MODULE_PARM_DESC(max_transmit, "Max transmit value (default = 3)");
4349 module_param(tx_window, uint, 0644);
4350 MODULE_PARM_DESC(tx_window, "Transmission window size value (default = 63)");
4352 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4353 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4354 MODULE_VERSION(VERSION);
4355 MODULE_LICENSE("GPL");
4356 MODULE_ALIAS("bt-proto-0");