2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth L2CAP core and sockets. */
27 #include <linux/module.h>
29 #include <linux/types.h>
30 #include <linux/capability.h>
31 #include <linux/errno.h>
32 #include <linux/kernel.h>
33 #include <linux/sched.h>
34 #include <linux/slab.h>
35 #include <linux/poll.h>
36 #include <linux/fcntl.h>
37 #include <linux/init.h>
38 #include <linux/interrupt.h>
39 #include <linux/socket.h>
40 #include <linux/skbuff.h>
41 #include <linux/list.h>
42 #include <linux/device.h>
43 #include <linux/debugfs.h>
44 #include <linux/seq_file.h>
45 #include <linux/uaccess.h>
46 #include <linux/crc16.h>
49 #include <asm/system.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54 #include <net/bluetooth/l2cap.h>
56 #define VERSION "2.14"
58 static int enable_ertm = 0;
60 static u32 l2cap_feat_mask = L2CAP_FEAT_FIXED_CHAN;
61 static u8 l2cap_fixed_chan[8] = { 0x02, };
63 static const struct proto_ops l2cap_sock_ops;
65 static struct workqueue_struct *_busy_wq;
67 static struct bt_sock_list l2cap_sk_list = {
68 .lock = __RW_LOCK_UNLOCKED(l2cap_sk_list.lock)
71 static void l2cap_busy_work(struct work_struct *work);
73 static void __l2cap_sock_close(struct sock *sk, int reason);
74 static void l2cap_sock_close(struct sock *sk);
75 static void l2cap_sock_kill(struct sock *sk);
77 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
78 u8 code, u8 ident, u16 dlen, void *data);
80 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb);
82 /* ---- L2CAP timers ---- */
83 static void l2cap_sock_timeout(unsigned long arg)
85 struct sock *sk = (struct sock *) arg;
88 BT_DBG("sock %p state %d", sk, sk->sk_state);
92 if (sk->sk_state == BT_CONNECTED || sk->sk_state == BT_CONFIG)
93 reason = ECONNREFUSED;
94 else if (sk->sk_state == BT_CONNECT &&
95 l2cap_pi(sk)->sec_level != BT_SECURITY_SDP)
96 reason = ECONNREFUSED;
100 __l2cap_sock_close(sk, reason);
108 static void l2cap_sock_set_timer(struct sock *sk, long timeout)
110 BT_DBG("sk %p state %d timeout %ld", sk, sk->sk_state, timeout);
111 sk_reset_timer(sk, &sk->sk_timer, jiffies + timeout);
114 static void l2cap_sock_clear_timer(struct sock *sk)
116 BT_DBG("sock %p state %d", sk, sk->sk_state);
117 sk_stop_timer(sk, &sk->sk_timer);
120 /* ---- L2CAP channels ---- */
121 static struct sock *__l2cap_get_chan_by_dcid(struct l2cap_chan_list *l, u16 cid)
124 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
125 if (l2cap_pi(s)->dcid == cid)
131 static struct sock *__l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
134 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
135 if (l2cap_pi(s)->scid == cid)
141 /* Find channel with given SCID.
142 * Returns locked socket */
143 static inline struct sock *l2cap_get_chan_by_scid(struct l2cap_chan_list *l, u16 cid)
147 s = __l2cap_get_chan_by_scid(l, cid);
150 read_unlock(&l->lock);
154 static struct sock *__l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
157 for (s = l->head; s; s = l2cap_pi(s)->next_c) {
158 if (l2cap_pi(s)->ident == ident)
164 static inline struct sock *l2cap_get_chan_by_ident(struct l2cap_chan_list *l, u8 ident)
168 s = __l2cap_get_chan_by_ident(l, ident);
171 read_unlock(&l->lock);
175 static u16 l2cap_alloc_cid(struct l2cap_chan_list *l)
177 u16 cid = L2CAP_CID_DYN_START;
179 for (; cid < L2CAP_CID_DYN_END; cid++) {
180 if (!__l2cap_get_chan_by_scid(l, cid))
187 static inline void __l2cap_chan_link(struct l2cap_chan_list *l, struct sock *sk)
192 l2cap_pi(l->head)->prev_c = sk;
194 l2cap_pi(sk)->next_c = l->head;
195 l2cap_pi(sk)->prev_c = NULL;
199 static inline void l2cap_chan_unlink(struct l2cap_chan_list *l, struct sock *sk)
201 struct sock *next = l2cap_pi(sk)->next_c, *prev = l2cap_pi(sk)->prev_c;
203 write_lock_bh(&l->lock);
208 l2cap_pi(next)->prev_c = prev;
210 l2cap_pi(prev)->next_c = next;
211 write_unlock_bh(&l->lock);
216 static void __l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
218 struct l2cap_chan_list *l = &conn->chan_list;
220 BT_DBG("conn %p, psm 0x%2.2x, dcid 0x%4.4x", conn,
221 l2cap_pi(sk)->psm, l2cap_pi(sk)->dcid);
223 conn->disc_reason = 0x13;
225 l2cap_pi(sk)->conn = conn;
227 if (sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM) {
228 /* Alloc CID for connection-oriented socket */
229 l2cap_pi(sk)->scid = l2cap_alloc_cid(l);
230 } else if (sk->sk_type == SOCK_DGRAM) {
231 /* Connectionless socket */
232 l2cap_pi(sk)->scid = L2CAP_CID_CONN_LESS;
233 l2cap_pi(sk)->dcid = L2CAP_CID_CONN_LESS;
234 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
236 /* Raw socket can send/recv signalling messages only */
237 l2cap_pi(sk)->scid = L2CAP_CID_SIGNALING;
238 l2cap_pi(sk)->dcid = L2CAP_CID_SIGNALING;
239 l2cap_pi(sk)->omtu = L2CAP_DEFAULT_MTU;
242 __l2cap_chan_link(l, sk);
245 bt_accept_enqueue(parent, sk);
249 * Must be called on the locked socket. */
250 static void l2cap_chan_del(struct sock *sk, int err)
252 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
253 struct sock *parent = bt_sk(sk)->parent;
255 l2cap_sock_clear_timer(sk);
257 BT_DBG("sk %p, conn %p, err %d", sk, conn, err);
260 /* Unlink from channel list */
261 l2cap_chan_unlink(&conn->chan_list, sk);
262 l2cap_pi(sk)->conn = NULL;
263 hci_conn_put(conn->hcon);
266 sk->sk_state = BT_CLOSED;
267 sock_set_flag(sk, SOCK_ZAPPED);
273 bt_accept_unlink(sk);
274 parent->sk_data_ready(parent, 0);
276 sk->sk_state_change(sk);
278 skb_queue_purge(TX_QUEUE(sk));
280 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
281 struct srej_list *l, *tmp;
283 del_timer(&l2cap_pi(sk)->retrans_timer);
284 del_timer(&l2cap_pi(sk)->monitor_timer);
285 del_timer(&l2cap_pi(sk)->ack_timer);
287 skb_queue_purge(SREJ_QUEUE(sk));
288 skb_queue_purge(BUSY_QUEUE(sk));
290 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
297 /* Service level security */
298 static inline int l2cap_check_security(struct sock *sk)
300 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
303 if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
304 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
305 auth_type = HCI_AT_NO_BONDING_MITM;
307 auth_type = HCI_AT_NO_BONDING;
309 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
310 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
312 switch (l2cap_pi(sk)->sec_level) {
313 case BT_SECURITY_HIGH:
314 auth_type = HCI_AT_GENERAL_BONDING_MITM;
316 case BT_SECURITY_MEDIUM:
317 auth_type = HCI_AT_GENERAL_BONDING;
320 auth_type = HCI_AT_NO_BONDING;
325 return hci_conn_security(conn->hcon, l2cap_pi(sk)->sec_level,
329 static inline u8 l2cap_get_ident(struct l2cap_conn *conn)
333 /* Get next available identificator.
334 * 1 - 128 are used by kernel.
335 * 129 - 199 are reserved.
336 * 200 - 254 are used by utilities like l2ping, etc.
339 spin_lock_bh(&conn->lock);
341 if (++conn->tx_ident > 128)
346 spin_unlock_bh(&conn->lock);
351 static inline void l2cap_send_cmd(struct l2cap_conn *conn, u8 ident, u8 code, u16 len, void *data)
353 struct sk_buff *skb = l2cap_build_cmd(conn, code, ident, len, data);
355 BT_DBG("code 0x%2.2x", code);
360 hci_send_acl(conn->hcon, skb, 0);
363 static inline void l2cap_send_sframe(struct l2cap_pinfo *pi, u16 control)
366 struct l2cap_hdr *lh;
367 struct l2cap_conn *conn = pi->conn;
368 struct sock *sk = (struct sock *)pi;
369 int count, hlen = L2CAP_HDR_SIZE + 2;
371 if (sk->sk_state != BT_CONNECTED)
374 if (pi->fcs == L2CAP_FCS_CRC16)
377 BT_DBG("pi %p, control 0x%2.2x", pi, control);
379 count = min_t(unsigned int, conn->mtu, hlen);
380 control |= L2CAP_CTRL_FRAME_TYPE;
382 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
383 control |= L2CAP_CTRL_FINAL;
384 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
387 if (pi->conn_state & L2CAP_CONN_SEND_PBIT) {
388 control |= L2CAP_CTRL_POLL;
389 pi->conn_state &= ~L2CAP_CONN_SEND_PBIT;
392 skb = bt_skb_alloc(count, GFP_ATOMIC);
396 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
397 lh->len = cpu_to_le16(hlen - L2CAP_HDR_SIZE);
398 lh->cid = cpu_to_le16(pi->dcid);
399 put_unaligned_le16(control, skb_put(skb, 2));
401 if (pi->fcs == L2CAP_FCS_CRC16) {
402 u16 fcs = crc16(0, (u8 *)lh, count - 2);
403 put_unaligned_le16(fcs, skb_put(skb, 2));
406 hci_send_acl(pi->conn->hcon, skb, 0);
409 static inline void l2cap_send_rr_or_rnr(struct l2cap_pinfo *pi, u16 control)
411 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
412 control |= L2CAP_SUPER_RCV_NOT_READY;
413 pi->conn_state |= L2CAP_CONN_RNR_SENT;
415 control |= L2CAP_SUPER_RCV_READY;
417 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
419 l2cap_send_sframe(pi, control);
422 static inline int __l2cap_no_conn_pending(struct sock *sk)
424 return !(l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND);
427 static void l2cap_do_start(struct sock *sk)
429 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
431 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) {
432 if (!(conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE))
435 if (l2cap_check_security(sk) && __l2cap_no_conn_pending(sk)) {
436 struct l2cap_conn_req req;
437 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
438 req.psm = l2cap_pi(sk)->psm;
440 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
441 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
443 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
444 L2CAP_CONN_REQ, sizeof(req), &req);
447 struct l2cap_info_req req;
448 req.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
450 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
451 conn->info_ident = l2cap_get_ident(conn);
453 mod_timer(&conn->info_timer, jiffies +
454 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
456 l2cap_send_cmd(conn, conn->info_ident,
457 L2CAP_INFO_REQ, sizeof(req), &req);
461 static inline int l2cap_mode_supported(__u8 mode, __u32 feat_mask)
463 u32 local_feat_mask = l2cap_feat_mask;
465 local_feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING;
468 case L2CAP_MODE_ERTM:
469 return L2CAP_FEAT_ERTM & feat_mask & local_feat_mask;
470 case L2CAP_MODE_STREAMING:
471 return L2CAP_FEAT_STREAMING & feat_mask & local_feat_mask;
477 static void l2cap_send_disconn_req(struct l2cap_conn *conn, struct sock *sk, int err)
479 struct l2cap_disconn_req req;
484 skb_queue_purge(TX_QUEUE(sk));
486 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM) {
487 del_timer(&l2cap_pi(sk)->retrans_timer);
488 del_timer(&l2cap_pi(sk)->monitor_timer);
489 del_timer(&l2cap_pi(sk)->ack_timer);
492 req.dcid = cpu_to_le16(l2cap_pi(sk)->dcid);
493 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
494 l2cap_send_cmd(conn, l2cap_get_ident(conn),
495 L2CAP_DISCONN_REQ, sizeof(req), &req);
497 sk->sk_state = BT_DISCONN;
501 /* ---- L2CAP connections ---- */
502 static void l2cap_conn_start(struct l2cap_conn *conn)
504 struct l2cap_chan_list *l = &conn->chan_list;
505 struct sock_del_list del, *tmp1, *tmp2;
508 BT_DBG("conn %p", conn);
510 INIT_LIST_HEAD(&del.list);
514 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
517 if (sk->sk_type != SOCK_SEQPACKET &&
518 sk->sk_type != SOCK_STREAM) {
523 if (sk->sk_state == BT_CONNECT) {
524 if (l2cap_check_security(sk) &&
525 __l2cap_no_conn_pending(sk)) {
526 struct l2cap_conn_req req;
528 if (!l2cap_mode_supported(l2cap_pi(sk)->mode,
530 && l2cap_pi(sk)->conf_state &
531 L2CAP_CONF_STATE2_DEVICE) {
532 tmp1 = kzalloc(sizeof(struct srej_list),
535 list_add_tail(&tmp1->list, &del.list);
540 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
541 req.psm = l2cap_pi(sk)->psm;
543 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
544 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
546 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
547 L2CAP_CONN_REQ, sizeof(req), &req);
549 } else if (sk->sk_state == BT_CONNECT2) {
550 struct l2cap_conn_rsp rsp;
551 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
552 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
554 if (l2cap_check_security(sk)) {
555 if (bt_sk(sk)->defer_setup) {
556 struct sock *parent = bt_sk(sk)->parent;
557 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
558 rsp.status = cpu_to_le16(L2CAP_CS_AUTHOR_PEND);
559 parent->sk_data_ready(parent, 0);
562 sk->sk_state = BT_CONFIG;
563 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
564 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
567 rsp.result = cpu_to_le16(L2CAP_CR_PEND);
568 rsp.status = cpu_to_le16(L2CAP_CS_AUTHEN_PEND);
571 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
572 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
578 read_unlock(&l->lock);
580 list_for_each_entry_safe(tmp1, tmp2, &del.list, list) {
581 bh_lock_sock(tmp1->sk);
582 __l2cap_sock_close(tmp1->sk, ECONNRESET);
583 bh_unlock_sock(tmp1->sk);
584 list_del(&tmp1->list);
589 static void l2cap_conn_ready(struct l2cap_conn *conn)
591 struct l2cap_chan_list *l = &conn->chan_list;
594 BT_DBG("conn %p", conn);
598 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
601 if (sk->sk_type != SOCK_SEQPACKET &&
602 sk->sk_type != SOCK_STREAM) {
603 l2cap_sock_clear_timer(sk);
604 sk->sk_state = BT_CONNECTED;
605 sk->sk_state_change(sk);
606 } else if (sk->sk_state == BT_CONNECT)
612 read_unlock(&l->lock);
615 /* Notify sockets that we cannot guaranty reliability anymore */
616 static void l2cap_conn_unreliable(struct l2cap_conn *conn, int err)
618 struct l2cap_chan_list *l = &conn->chan_list;
621 BT_DBG("conn %p", conn);
625 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
626 if (l2cap_pi(sk)->force_reliable)
630 read_unlock(&l->lock);
633 static void l2cap_info_timeout(unsigned long arg)
635 struct l2cap_conn *conn = (void *) arg;
637 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
638 conn->info_ident = 0;
640 l2cap_conn_start(conn);
643 static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon, u8 status)
645 struct l2cap_conn *conn = hcon->l2cap_data;
650 conn = kzalloc(sizeof(struct l2cap_conn), GFP_ATOMIC);
654 hcon->l2cap_data = conn;
657 BT_DBG("hcon %p conn %p", hcon, conn);
659 conn->mtu = hcon->hdev->acl_mtu;
660 conn->src = &hcon->hdev->bdaddr;
661 conn->dst = &hcon->dst;
665 spin_lock_init(&conn->lock);
666 rwlock_init(&conn->chan_list.lock);
668 setup_timer(&conn->info_timer, l2cap_info_timeout,
669 (unsigned long) conn);
671 conn->disc_reason = 0x13;
676 static void l2cap_conn_del(struct hci_conn *hcon, int err)
678 struct l2cap_conn *conn = hcon->l2cap_data;
684 BT_DBG("hcon %p conn %p, err %d", hcon, conn, err);
686 kfree_skb(conn->rx_skb);
689 while ((sk = conn->chan_list.head)) {
691 l2cap_chan_del(sk, err);
696 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT)
697 del_timer_sync(&conn->info_timer);
699 hcon->l2cap_data = NULL;
703 static inline void l2cap_chan_add(struct l2cap_conn *conn, struct sock *sk, struct sock *parent)
705 struct l2cap_chan_list *l = &conn->chan_list;
706 write_lock_bh(&l->lock);
707 __l2cap_chan_add(conn, sk, parent);
708 write_unlock_bh(&l->lock);
711 /* ---- Socket interface ---- */
712 static struct sock *__l2cap_get_sock_by_addr(__le16 psm, bdaddr_t *src)
715 struct hlist_node *node;
716 sk_for_each(sk, node, &l2cap_sk_list.head)
717 if (l2cap_pi(sk)->sport == psm && !bacmp(&bt_sk(sk)->src, src))
724 /* Find socket with psm and source bdaddr.
725 * Returns closest match.
727 static struct sock *__l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
729 struct sock *sk = NULL, *sk1 = NULL;
730 struct hlist_node *node;
732 sk_for_each(sk, node, &l2cap_sk_list.head) {
733 if (state && sk->sk_state != state)
736 if (l2cap_pi(sk)->psm == psm) {
738 if (!bacmp(&bt_sk(sk)->src, src))
742 if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY))
746 return node ? sk : sk1;
749 /* Find socket with given address (psm, src).
750 * Returns locked socket */
751 static inline struct sock *l2cap_get_sock_by_psm(int state, __le16 psm, bdaddr_t *src)
754 read_lock(&l2cap_sk_list.lock);
755 s = __l2cap_get_sock_by_psm(state, psm, src);
758 read_unlock(&l2cap_sk_list.lock);
762 static void l2cap_sock_destruct(struct sock *sk)
766 skb_queue_purge(&sk->sk_receive_queue);
767 skb_queue_purge(&sk->sk_write_queue);
770 static void l2cap_sock_cleanup_listen(struct sock *parent)
774 BT_DBG("parent %p", parent);
776 /* Close not yet accepted channels */
777 while ((sk = bt_accept_dequeue(parent, NULL)))
778 l2cap_sock_close(sk);
780 parent->sk_state = BT_CLOSED;
781 sock_set_flag(parent, SOCK_ZAPPED);
784 /* Kill socket (only if zapped and orphan)
785 * Must be called on unlocked socket.
787 static void l2cap_sock_kill(struct sock *sk)
789 if (!sock_flag(sk, SOCK_ZAPPED) || sk->sk_socket)
792 BT_DBG("sk %p state %d", sk, sk->sk_state);
794 /* Kill poor orphan */
795 bt_sock_unlink(&l2cap_sk_list, sk);
796 sock_set_flag(sk, SOCK_DEAD);
800 static void __l2cap_sock_close(struct sock *sk, int reason)
802 BT_DBG("sk %p state %d socket %p", sk, sk->sk_state, sk->sk_socket);
804 switch (sk->sk_state) {
806 l2cap_sock_cleanup_listen(sk);
811 if (sk->sk_type == SOCK_SEQPACKET ||
812 sk->sk_type == SOCK_STREAM) {
813 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
815 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
816 l2cap_send_disconn_req(conn, sk, reason);
818 l2cap_chan_del(sk, reason);
822 if (sk->sk_type == SOCK_SEQPACKET ||
823 sk->sk_type == SOCK_STREAM) {
824 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
825 struct l2cap_conn_rsp rsp;
828 if (bt_sk(sk)->defer_setup)
829 result = L2CAP_CR_SEC_BLOCK;
831 result = L2CAP_CR_BAD_PSM;
833 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
834 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
835 rsp.result = cpu_to_le16(result);
836 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
837 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
838 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
840 l2cap_chan_del(sk, reason);
845 l2cap_chan_del(sk, reason);
849 sock_set_flag(sk, SOCK_ZAPPED);
854 /* Must be called on unlocked socket. */
855 static void l2cap_sock_close(struct sock *sk)
857 l2cap_sock_clear_timer(sk);
859 __l2cap_sock_close(sk, ECONNRESET);
864 static void l2cap_sock_init(struct sock *sk, struct sock *parent)
866 struct l2cap_pinfo *pi = l2cap_pi(sk);
871 sk->sk_type = parent->sk_type;
872 bt_sk(sk)->defer_setup = bt_sk(parent)->defer_setup;
874 pi->imtu = l2cap_pi(parent)->imtu;
875 pi->omtu = l2cap_pi(parent)->omtu;
876 pi->conf_state = l2cap_pi(parent)->conf_state;
877 pi->mode = l2cap_pi(parent)->mode;
878 pi->fcs = l2cap_pi(parent)->fcs;
879 pi->max_tx = l2cap_pi(parent)->max_tx;
880 pi->tx_win = l2cap_pi(parent)->tx_win;
881 pi->sec_level = l2cap_pi(parent)->sec_level;
882 pi->role_switch = l2cap_pi(parent)->role_switch;
883 pi->force_reliable = l2cap_pi(parent)->force_reliable;
885 pi->imtu = L2CAP_DEFAULT_MTU;
887 if (enable_ertm && sk->sk_type == SOCK_STREAM) {
888 pi->mode = L2CAP_MODE_ERTM;
889 pi->conf_state |= L2CAP_CONF_STATE2_DEVICE;
891 pi->mode = L2CAP_MODE_BASIC;
893 pi->max_tx = L2CAP_DEFAULT_MAX_TX;
894 pi->fcs = L2CAP_FCS_CRC16;
895 pi->tx_win = L2CAP_DEFAULT_TX_WINDOW;
896 pi->sec_level = BT_SECURITY_LOW;
898 pi->force_reliable = 0;
901 /* Default config options */
903 pi->flush_to = L2CAP_DEFAULT_FLUSH_TO;
904 skb_queue_head_init(TX_QUEUE(sk));
905 skb_queue_head_init(SREJ_QUEUE(sk));
906 skb_queue_head_init(BUSY_QUEUE(sk));
907 INIT_LIST_HEAD(SREJ_LIST(sk));
910 static struct proto l2cap_proto = {
912 .owner = THIS_MODULE,
913 .obj_size = sizeof(struct l2cap_pinfo)
916 static struct sock *l2cap_sock_alloc(struct net *net, struct socket *sock, int proto, gfp_t prio)
920 sk = sk_alloc(net, PF_BLUETOOTH, prio, &l2cap_proto);
924 sock_init_data(sock, sk);
925 INIT_LIST_HEAD(&bt_sk(sk)->accept_q);
927 sk->sk_destruct = l2cap_sock_destruct;
928 sk->sk_sndtimeo = msecs_to_jiffies(L2CAP_CONN_TIMEOUT);
930 sock_reset_flag(sk, SOCK_ZAPPED);
932 sk->sk_protocol = proto;
933 sk->sk_state = BT_OPEN;
935 setup_timer(&sk->sk_timer, l2cap_sock_timeout, (unsigned long) sk);
937 bt_sock_link(&l2cap_sk_list, sk);
941 static int l2cap_sock_create(struct net *net, struct socket *sock, int protocol,
946 BT_DBG("sock %p", sock);
948 sock->state = SS_UNCONNECTED;
950 if (sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM &&
951 sock->type != SOCK_DGRAM && sock->type != SOCK_RAW)
952 return -ESOCKTNOSUPPORT;
954 if (sock->type == SOCK_RAW && !kern && !capable(CAP_NET_RAW))
957 sock->ops = &l2cap_sock_ops;
959 sk = l2cap_sock_alloc(net, sock, protocol, GFP_ATOMIC);
963 l2cap_sock_init(sk, NULL);
967 static int l2cap_sock_bind(struct socket *sock, struct sockaddr *addr, int alen)
969 struct sock *sk = sock->sk;
970 struct sockaddr_l2 la;
975 if (!addr || addr->sa_family != AF_BLUETOOTH)
978 memset(&la, 0, sizeof(la));
979 len = min_t(unsigned int, sizeof(la), alen);
980 memcpy(&la, addr, len);
987 if (sk->sk_state != BT_OPEN) {
992 if (la.l2_psm && __le16_to_cpu(la.l2_psm) < 0x1001 &&
993 !capable(CAP_NET_BIND_SERVICE)) {
998 write_lock_bh(&l2cap_sk_list.lock);
1000 if (la.l2_psm && __l2cap_get_sock_by_addr(la.l2_psm, &la.l2_bdaddr)) {
1003 /* Save source address */
1004 bacpy(&bt_sk(sk)->src, &la.l2_bdaddr);
1005 l2cap_pi(sk)->psm = la.l2_psm;
1006 l2cap_pi(sk)->sport = la.l2_psm;
1007 sk->sk_state = BT_BOUND;
1009 if (__le16_to_cpu(la.l2_psm) == 0x0001 ||
1010 __le16_to_cpu(la.l2_psm) == 0x0003)
1011 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1014 write_unlock_bh(&l2cap_sk_list.lock);
1021 static int l2cap_do_connect(struct sock *sk)
1023 bdaddr_t *src = &bt_sk(sk)->src;
1024 bdaddr_t *dst = &bt_sk(sk)->dst;
1025 struct l2cap_conn *conn;
1026 struct hci_conn *hcon;
1027 struct hci_dev *hdev;
1031 BT_DBG("%s -> %s psm 0x%2.2x", batostr(src), batostr(dst),
1034 hdev = hci_get_route(dst, src);
1036 return -EHOSTUNREACH;
1038 hci_dev_lock_bh(hdev);
1042 if (sk->sk_type == SOCK_RAW) {
1043 switch (l2cap_pi(sk)->sec_level) {
1044 case BT_SECURITY_HIGH:
1045 auth_type = HCI_AT_DEDICATED_BONDING_MITM;
1047 case BT_SECURITY_MEDIUM:
1048 auth_type = HCI_AT_DEDICATED_BONDING;
1051 auth_type = HCI_AT_NO_BONDING;
1054 } else if (l2cap_pi(sk)->psm == cpu_to_le16(0x0001)) {
1055 if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
1056 auth_type = HCI_AT_NO_BONDING_MITM;
1058 auth_type = HCI_AT_NO_BONDING;
1060 if (l2cap_pi(sk)->sec_level == BT_SECURITY_LOW)
1061 l2cap_pi(sk)->sec_level = BT_SECURITY_SDP;
1063 switch (l2cap_pi(sk)->sec_level) {
1064 case BT_SECURITY_HIGH:
1065 auth_type = HCI_AT_GENERAL_BONDING_MITM;
1067 case BT_SECURITY_MEDIUM:
1068 auth_type = HCI_AT_GENERAL_BONDING;
1071 auth_type = HCI_AT_NO_BONDING;
1076 hcon = hci_connect(hdev, ACL_LINK, dst,
1077 l2cap_pi(sk)->sec_level, auth_type);
1081 conn = l2cap_conn_add(hcon, 0);
1089 /* Update source addr of the socket */
1090 bacpy(src, conn->src);
1092 l2cap_chan_add(conn, sk, NULL);
1094 sk->sk_state = BT_CONNECT;
1095 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
1097 if (hcon->state == BT_CONNECTED) {
1098 if (sk->sk_type != SOCK_SEQPACKET &&
1099 sk->sk_type != SOCK_STREAM) {
1100 l2cap_sock_clear_timer(sk);
1101 sk->sk_state = BT_CONNECTED;
1107 hci_dev_unlock_bh(hdev);
1112 static int l2cap_sock_connect(struct socket *sock, struct sockaddr *addr, int alen, int flags)
1114 struct sock *sk = sock->sk;
1115 struct sockaddr_l2 la;
1118 BT_DBG("sk %p", sk);
1120 if (!addr || alen < sizeof(addr->sa_family) ||
1121 addr->sa_family != AF_BLUETOOTH)
1124 memset(&la, 0, sizeof(la));
1125 len = min_t(unsigned int, sizeof(la), alen);
1126 memcpy(&la, addr, len);
1133 if ((sk->sk_type == SOCK_SEQPACKET || sk->sk_type == SOCK_STREAM)
1139 switch (l2cap_pi(sk)->mode) {
1140 case L2CAP_MODE_BASIC:
1142 case L2CAP_MODE_ERTM:
1143 case L2CAP_MODE_STREAMING:
1152 switch (sk->sk_state) {
1156 /* Already connecting */
1160 /* Already connected */
1173 /* Set destination address and psm */
1174 bacpy(&bt_sk(sk)->dst, &la.l2_bdaddr);
1175 l2cap_pi(sk)->psm = la.l2_psm;
1177 err = l2cap_do_connect(sk);
1182 err = bt_sock_wait_state(sk, BT_CONNECTED,
1183 sock_sndtimeo(sk, flags & O_NONBLOCK));
1189 static int l2cap_sock_listen(struct socket *sock, int backlog)
1191 struct sock *sk = sock->sk;
1194 BT_DBG("sk %p backlog %d", sk, backlog);
1198 if ((sock->type != SOCK_SEQPACKET && sock->type != SOCK_STREAM)
1199 || sk->sk_state != BT_BOUND) {
1204 switch (l2cap_pi(sk)->mode) {
1205 case L2CAP_MODE_BASIC:
1207 case L2CAP_MODE_ERTM:
1208 case L2CAP_MODE_STREAMING:
1217 if (!l2cap_pi(sk)->psm) {
1218 bdaddr_t *src = &bt_sk(sk)->src;
1223 write_lock_bh(&l2cap_sk_list.lock);
1225 for (psm = 0x1001; psm < 0x1100; psm += 2)
1226 if (!__l2cap_get_sock_by_addr(cpu_to_le16(psm), src)) {
1227 l2cap_pi(sk)->psm = cpu_to_le16(psm);
1228 l2cap_pi(sk)->sport = cpu_to_le16(psm);
1233 write_unlock_bh(&l2cap_sk_list.lock);
1239 sk->sk_max_ack_backlog = backlog;
1240 sk->sk_ack_backlog = 0;
1241 sk->sk_state = BT_LISTEN;
1248 static int l2cap_sock_accept(struct socket *sock, struct socket *newsock, int flags)
1250 DECLARE_WAITQUEUE(wait, current);
1251 struct sock *sk = sock->sk, *nsk;
1255 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1257 if (sk->sk_state != BT_LISTEN) {
1262 timeo = sock_rcvtimeo(sk, flags & O_NONBLOCK);
1264 BT_DBG("sk %p timeo %ld", sk, timeo);
1266 /* Wait for an incoming connection. (wake-one). */
1267 add_wait_queue_exclusive(sk_sleep(sk), &wait);
1268 while (!(nsk = bt_accept_dequeue(sk, newsock))) {
1269 set_current_state(TASK_INTERRUPTIBLE);
1276 timeo = schedule_timeout(timeo);
1277 lock_sock_nested(sk, SINGLE_DEPTH_NESTING);
1279 if (sk->sk_state != BT_LISTEN) {
1284 if (signal_pending(current)) {
1285 err = sock_intr_errno(timeo);
1289 set_current_state(TASK_RUNNING);
1290 remove_wait_queue(sk_sleep(sk), &wait);
1295 newsock->state = SS_CONNECTED;
1297 BT_DBG("new socket %p", nsk);
1304 static int l2cap_sock_getname(struct socket *sock, struct sockaddr *addr, int *len, int peer)
1306 struct sockaddr_l2 *la = (struct sockaddr_l2 *) addr;
1307 struct sock *sk = sock->sk;
1309 BT_DBG("sock %p, sk %p", sock, sk);
1311 addr->sa_family = AF_BLUETOOTH;
1312 *len = sizeof(struct sockaddr_l2);
1315 la->l2_psm = l2cap_pi(sk)->psm;
1316 bacpy(&la->l2_bdaddr, &bt_sk(sk)->dst);
1317 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1319 la->l2_psm = l2cap_pi(sk)->sport;
1320 bacpy(&la->l2_bdaddr, &bt_sk(sk)->src);
1321 la->l2_cid = cpu_to_le16(l2cap_pi(sk)->scid);
1327 static int __l2cap_wait_ack(struct sock *sk)
1329 DECLARE_WAITQUEUE(wait, current);
1333 add_wait_queue(sk_sleep(sk), &wait);
1334 while ((l2cap_pi(sk)->unacked_frames > 0 && l2cap_pi(sk)->conn)) {
1335 set_current_state(TASK_INTERRUPTIBLE);
1340 if (signal_pending(current)) {
1341 err = sock_intr_errno(timeo);
1346 timeo = schedule_timeout(timeo);
1349 err = sock_error(sk);
1353 set_current_state(TASK_RUNNING);
1354 remove_wait_queue(sk_sleep(sk), &wait);
1358 static void l2cap_monitor_timeout(unsigned long arg)
1360 struct sock *sk = (void *) arg;
1362 BT_DBG("sk %p", sk);
1365 if (l2cap_pi(sk)->retry_count >= l2cap_pi(sk)->remote_max_tx) {
1366 l2cap_send_disconn_req(l2cap_pi(sk)->conn, sk, ECONNABORTED);
1371 l2cap_pi(sk)->retry_count++;
1372 __mod_monitor_timer();
1374 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1378 static void l2cap_retrans_timeout(unsigned long arg)
1380 struct sock *sk = (void *) arg;
1382 BT_DBG("sk %p", sk);
1385 l2cap_pi(sk)->retry_count = 1;
1386 __mod_monitor_timer();
1388 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
1390 l2cap_send_rr_or_rnr(l2cap_pi(sk), L2CAP_CTRL_POLL);
1394 static void l2cap_drop_acked_frames(struct sock *sk)
1396 struct sk_buff *skb;
1398 while ((skb = skb_peek(TX_QUEUE(sk))) &&
1399 l2cap_pi(sk)->unacked_frames) {
1400 if (bt_cb(skb)->tx_seq == l2cap_pi(sk)->expected_ack_seq)
1403 skb = skb_dequeue(TX_QUEUE(sk));
1406 l2cap_pi(sk)->unacked_frames--;
1409 if (!l2cap_pi(sk)->unacked_frames)
1410 del_timer(&l2cap_pi(sk)->retrans_timer);
1413 static inline void l2cap_do_send(struct sock *sk, struct sk_buff *skb)
1415 struct l2cap_pinfo *pi = l2cap_pi(sk);
1417 BT_DBG("sk %p, skb %p len %d", sk, skb, skb->len);
1419 hci_send_acl(pi->conn->hcon, skb, 0);
1422 static int l2cap_streaming_send(struct sock *sk)
1424 struct sk_buff *skb, *tx_skb;
1425 struct l2cap_pinfo *pi = l2cap_pi(sk);
1428 while ((skb = sk->sk_send_head)) {
1429 tx_skb = skb_clone(skb, GFP_ATOMIC);
1431 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1432 control |= pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT;
1433 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1435 if (pi->fcs == L2CAP_FCS_CRC16) {
1436 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1437 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1440 l2cap_do_send(sk, tx_skb);
1442 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1444 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1445 sk->sk_send_head = NULL;
1447 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1449 skb = skb_dequeue(TX_QUEUE(sk));
1455 static void l2cap_retransmit_one_frame(struct sock *sk, u8 tx_seq)
1457 struct l2cap_pinfo *pi = l2cap_pi(sk);
1458 struct sk_buff *skb, *tx_skb;
1461 skb = skb_peek(TX_QUEUE(sk));
1466 if (bt_cb(skb)->tx_seq == tx_seq)
1469 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1472 } while ((skb = skb_queue_next(TX_QUEUE(sk), skb)));
1474 if (pi->remote_max_tx &&
1475 bt_cb(skb)->retries == pi->remote_max_tx) {
1476 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1480 tx_skb = skb_clone(skb, GFP_ATOMIC);
1481 bt_cb(skb)->retries++;
1482 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1484 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1485 control |= L2CAP_CTRL_FINAL;
1486 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1489 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1490 | (tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1492 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1494 if (pi->fcs == L2CAP_FCS_CRC16) {
1495 fcs = crc16(0, (u8 *)tx_skb->data, tx_skb->len - 2);
1496 put_unaligned_le16(fcs, tx_skb->data + tx_skb->len - 2);
1499 l2cap_do_send(sk, tx_skb);
1502 static int l2cap_ertm_send(struct sock *sk)
1504 struct sk_buff *skb, *tx_skb;
1505 struct l2cap_pinfo *pi = l2cap_pi(sk);
1509 if (sk->sk_state != BT_CONNECTED)
1512 while ((skb = sk->sk_send_head) && (!l2cap_tx_window_full(sk))) {
1514 if (pi->remote_max_tx &&
1515 bt_cb(skb)->retries == pi->remote_max_tx) {
1516 l2cap_send_disconn_req(pi->conn, sk, ECONNABORTED);
1520 tx_skb = skb_clone(skb, GFP_ATOMIC);
1522 bt_cb(skb)->retries++;
1524 control = get_unaligned_le16(tx_skb->data + L2CAP_HDR_SIZE);
1525 control &= L2CAP_CTRL_SAR;
1527 if (pi->conn_state & L2CAP_CONN_SEND_FBIT) {
1528 control |= L2CAP_CTRL_FINAL;
1529 pi->conn_state &= ~L2CAP_CONN_SEND_FBIT;
1531 control |= (pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT)
1532 | (pi->next_tx_seq << L2CAP_CTRL_TXSEQ_SHIFT);
1533 put_unaligned_le16(control, tx_skb->data + L2CAP_HDR_SIZE);
1536 if (pi->fcs == L2CAP_FCS_CRC16) {
1537 fcs = crc16(0, (u8 *)skb->data, tx_skb->len - 2);
1538 put_unaligned_le16(fcs, skb->data + tx_skb->len - 2);
1541 l2cap_do_send(sk, tx_skb);
1543 __mod_retrans_timer();
1545 bt_cb(skb)->tx_seq = pi->next_tx_seq;
1546 pi->next_tx_seq = (pi->next_tx_seq + 1) % 64;
1548 pi->unacked_frames++;
1551 if (skb_queue_is_last(TX_QUEUE(sk), skb))
1552 sk->sk_send_head = NULL;
1554 sk->sk_send_head = skb_queue_next(TX_QUEUE(sk), skb);
1562 static int l2cap_retransmit_frames(struct sock *sk)
1564 struct l2cap_pinfo *pi = l2cap_pi(sk);
1567 if (!skb_queue_empty(TX_QUEUE(sk)))
1568 sk->sk_send_head = TX_QUEUE(sk)->next;
1570 pi->next_tx_seq = pi->expected_ack_seq;
1571 ret = l2cap_ertm_send(sk);
1575 static void l2cap_send_ack(struct l2cap_pinfo *pi)
1577 struct sock *sk = (struct sock *)pi;
1580 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1582 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
1583 control |= L2CAP_SUPER_RCV_NOT_READY;
1584 pi->conn_state |= L2CAP_CONN_RNR_SENT;
1585 l2cap_send_sframe(pi, control);
1589 if (l2cap_ertm_send(sk) > 0)
1592 control |= L2CAP_SUPER_RCV_READY;
1593 l2cap_send_sframe(pi, control);
1596 static void l2cap_send_srejtail(struct sock *sk)
1598 struct srej_list *tail;
1601 control = L2CAP_SUPER_SELECT_REJECT;
1602 control |= L2CAP_CTRL_FINAL;
1604 tail = list_entry(SREJ_LIST(sk)->prev, struct srej_list, list);
1605 control |= tail->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
1607 l2cap_send_sframe(l2cap_pi(sk), control);
1610 static inline int l2cap_skbuff_fromiovec(struct sock *sk, struct msghdr *msg, int len, int count, struct sk_buff *skb)
1612 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1613 struct sk_buff **frag;
1616 if (memcpy_fromiovec(skb_put(skb, count), msg->msg_iov, count))
1622 /* Continuation fragments (no L2CAP header) */
1623 frag = &skb_shinfo(skb)->frag_list;
1625 count = min_t(unsigned int, conn->mtu, len);
1627 *frag = bt_skb_send_alloc(sk, count, msg->msg_flags & MSG_DONTWAIT, &err);
1630 if (memcpy_fromiovec(skb_put(*frag, count), msg->msg_iov, count))
1636 frag = &(*frag)->next;
1642 static struct sk_buff *l2cap_create_connless_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1644 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1645 struct sk_buff *skb;
1646 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1647 struct l2cap_hdr *lh;
1649 BT_DBG("sk %p len %d", sk, (int)len);
1651 count = min_t(unsigned int, (conn->mtu - hlen), len);
1652 skb = bt_skb_send_alloc(sk, count + hlen,
1653 msg->msg_flags & MSG_DONTWAIT, &err);
1655 return ERR_PTR(-ENOMEM);
1657 /* Create L2CAP header */
1658 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1659 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1660 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1661 put_unaligned_le16(l2cap_pi(sk)->psm, skb_put(skb, 2));
1663 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1664 if (unlikely(err < 0)) {
1666 return ERR_PTR(err);
1671 static struct sk_buff *l2cap_create_basic_pdu(struct sock *sk, struct msghdr *msg, size_t len)
1673 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1674 struct sk_buff *skb;
1675 int err, count, hlen = L2CAP_HDR_SIZE;
1676 struct l2cap_hdr *lh;
1678 BT_DBG("sk %p len %d", sk, (int)len);
1680 count = min_t(unsigned int, (conn->mtu - hlen), len);
1681 skb = bt_skb_send_alloc(sk, count + hlen,
1682 msg->msg_flags & MSG_DONTWAIT, &err);
1684 return ERR_PTR(-ENOMEM);
1686 /* Create L2CAP header */
1687 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1688 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1689 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1691 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1692 if (unlikely(err < 0)) {
1694 return ERR_PTR(err);
1699 static struct sk_buff *l2cap_create_iframe_pdu(struct sock *sk, struct msghdr *msg, size_t len, u16 control, u16 sdulen)
1701 struct l2cap_conn *conn = l2cap_pi(sk)->conn;
1702 struct sk_buff *skb;
1703 int err, count, hlen = L2CAP_HDR_SIZE + 2;
1704 struct l2cap_hdr *lh;
1706 BT_DBG("sk %p len %d", sk, (int)len);
1709 return ERR_PTR(-ENOTCONN);
1714 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1717 count = min_t(unsigned int, (conn->mtu - hlen), len);
1718 skb = bt_skb_send_alloc(sk, count + hlen,
1719 msg->msg_flags & MSG_DONTWAIT, &err);
1721 return ERR_PTR(-ENOMEM);
1723 /* Create L2CAP header */
1724 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
1725 lh->cid = cpu_to_le16(l2cap_pi(sk)->dcid);
1726 lh->len = cpu_to_le16(len + (hlen - L2CAP_HDR_SIZE));
1727 put_unaligned_le16(control, skb_put(skb, 2));
1729 put_unaligned_le16(sdulen, skb_put(skb, 2));
1731 err = l2cap_skbuff_fromiovec(sk, msg, len, count, skb);
1732 if (unlikely(err < 0)) {
1734 return ERR_PTR(err);
1737 if (l2cap_pi(sk)->fcs == L2CAP_FCS_CRC16)
1738 put_unaligned_le16(0, skb_put(skb, 2));
1740 bt_cb(skb)->retries = 0;
1744 static inline int l2cap_sar_segment_sdu(struct sock *sk, struct msghdr *msg, size_t len)
1746 struct l2cap_pinfo *pi = l2cap_pi(sk);
1747 struct sk_buff *skb;
1748 struct sk_buff_head sar_queue;
1752 skb_queue_head_init(&sar_queue);
1753 control = L2CAP_SDU_START;
1754 skb = l2cap_create_iframe_pdu(sk, msg, pi->remote_mps, control, len);
1756 return PTR_ERR(skb);
1758 __skb_queue_tail(&sar_queue, skb);
1759 len -= pi->remote_mps;
1760 size += pi->remote_mps;
1765 if (len > pi->remote_mps) {
1766 control = L2CAP_SDU_CONTINUE;
1767 buflen = pi->remote_mps;
1769 control = L2CAP_SDU_END;
1773 skb = l2cap_create_iframe_pdu(sk, msg, buflen, control, 0);
1775 skb_queue_purge(&sar_queue);
1776 return PTR_ERR(skb);
1779 __skb_queue_tail(&sar_queue, skb);
1783 skb_queue_splice_tail(&sar_queue, TX_QUEUE(sk));
1784 if (sk->sk_send_head == NULL)
1785 sk->sk_send_head = sar_queue.next;
1790 static int l2cap_sock_sendmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len)
1792 struct sock *sk = sock->sk;
1793 struct l2cap_pinfo *pi = l2cap_pi(sk);
1794 struct sk_buff *skb;
1798 BT_DBG("sock %p, sk %p", sock, sk);
1800 err = sock_error(sk);
1804 if (msg->msg_flags & MSG_OOB)
1809 if (sk->sk_state != BT_CONNECTED) {
1814 /* Connectionless channel */
1815 if (sk->sk_type == SOCK_DGRAM) {
1816 skb = l2cap_create_connless_pdu(sk, msg, len);
1820 l2cap_do_send(sk, skb);
1827 case L2CAP_MODE_BASIC:
1828 /* Check outgoing MTU */
1829 if (len > pi->omtu) {
1834 /* Create a basic PDU */
1835 skb = l2cap_create_basic_pdu(sk, msg, len);
1841 l2cap_do_send(sk, skb);
1845 case L2CAP_MODE_ERTM:
1846 case L2CAP_MODE_STREAMING:
1847 /* Entire SDU fits into one PDU */
1848 if (len <= pi->remote_mps) {
1849 control = L2CAP_SDU_UNSEGMENTED;
1850 skb = l2cap_create_iframe_pdu(sk, msg, len, control, 0);
1855 __skb_queue_tail(TX_QUEUE(sk), skb);
1857 if (sk->sk_send_head == NULL)
1858 sk->sk_send_head = skb;
1861 /* Segment SDU into multiples PDUs */
1862 err = l2cap_sar_segment_sdu(sk, msg, len);
1867 if (pi->mode == L2CAP_MODE_STREAMING) {
1868 err = l2cap_streaming_send(sk);
1870 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY &&
1871 pi->conn_state && L2CAP_CONN_WAIT_F) {
1875 err = l2cap_ertm_send(sk);
1883 BT_DBG("bad state %1.1x", pi->mode);
1892 static int l2cap_sock_recvmsg(struct kiocb *iocb, struct socket *sock, struct msghdr *msg, size_t len, int flags)
1894 struct sock *sk = sock->sk;
1898 if (sk->sk_state == BT_CONNECT2 && bt_sk(sk)->defer_setup) {
1899 struct l2cap_conn_rsp rsp;
1901 sk->sk_state = BT_CONFIG;
1903 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
1904 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
1905 rsp.result = cpu_to_le16(L2CAP_CR_SUCCESS);
1906 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
1907 l2cap_send_cmd(l2cap_pi(sk)->conn, l2cap_pi(sk)->ident,
1908 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
1916 return bt_sock_recvmsg(iocb, sock, msg, len, flags);
1919 static int l2cap_sock_setsockopt_old(struct socket *sock, int optname, char __user *optval, unsigned int optlen)
1921 struct sock *sk = sock->sk;
1922 struct l2cap_options opts;
1926 BT_DBG("sk %p", sk);
1932 opts.imtu = l2cap_pi(sk)->imtu;
1933 opts.omtu = l2cap_pi(sk)->omtu;
1934 opts.flush_to = l2cap_pi(sk)->flush_to;
1935 opts.mode = l2cap_pi(sk)->mode;
1936 opts.fcs = l2cap_pi(sk)->fcs;
1937 opts.max_tx = l2cap_pi(sk)->max_tx;
1938 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
1940 len = min_t(unsigned int, sizeof(opts), optlen);
1941 if (copy_from_user((char *) &opts, optval, len)) {
1946 if (opts.txwin_size > L2CAP_DEFAULT_TX_WINDOW) {
1951 l2cap_pi(sk)->mode = opts.mode;
1952 switch (l2cap_pi(sk)->mode) {
1953 case L2CAP_MODE_BASIC:
1954 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_STATE2_DEVICE;
1956 case L2CAP_MODE_ERTM:
1957 case L2CAP_MODE_STREAMING:
1966 l2cap_pi(sk)->imtu = opts.imtu;
1967 l2cap_pi(sk)->omtu = opts.omtu;
1968 l2cap_pi(sk)->fcs = opts.fcs;
1969 l2cap_pi(sk)->max_tx = opts.max_tx;
1970 l2cap_pi(sk)->tx_win = (__u8)opts.txwin_size;
1974 if (get_user(opt, (u32 __user *) optval)) {
1979 if (opt & L2CAP_LM_AUTH)
1980 l2cap_pi(sk)->sec_level = BT_SECURITY_LOW;
1981 if (opt & L2CAP_LM_ENCRYPT)
1982 l2cap_pi(sk)->sec_level = BT_SECURITY_MEDIUM;
1983 if (opt & L2CAP_LM_SECURE)
1984 l2cap_pi(sk)->sec_level = BT_SECURITY_HIGH;
1986 l2cap_pi(sk)->role_switch = (opt & L2CAP_LM_MASTER);
1987 l2cap_pi(sk)->force_reliable = (opt & L2CAP_LM_RELIABLE);
1999 static int l2cap_sock_setsockopt(struct socket *sock, int level, int optname, char __user *optval, unsigned int optlen)
2001 struct sock *sk = sock->sk;
2002 struct bt_security sec;
2006 BT_DBG("sk %p", sk);
2008 if (level == SOL_L2CAP)
2009 return l2cap_sock_setsockopt_old(sock, optname, optval, optlen);
2011 if (level != SOL_BLUETOOTH)
2012 return -ENOPROTOOPT;
2018 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2019 && sk->sk_type != SOCK_RAW) {
2024 sec.level = BT_SECURITY_LOW;
2026 len = min_t(unsigned int, sizeof(sec), optlen);
2027 if (copy_from_user((char *) &sec, optval, len)) {
2032 if (sec.level < BT_SECURITY_LOW ||
2033 sec.level > BT_SECURITY_HIGH) {
2038 l2cap_pi(sk)->sec_level = sec.level;
2041 case BT_DEFER_SETUP:
2042 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2047 if (get_user(opt, (u32 __user *) optval)) {
2052 bt_sk(sk)->defer_setup = opt;
2064 static int l2cap_sock_getsockopt_old(struct socket *sock, int optname, char __user *optval, int __user *optlen)
2066 struct sock *sk = sock->sk;
2067 struct l2cap_options opts;
2068 struct l2cap_conninfo cinfo;
2072 BT_DBG("sk %p", sk);
2074 if (get_user(len, optlen))
2081 opts.imtu = l2cap_pi(sk)->imtu;
2082 opts.omtu = l2cap_pi(sk)->omtu;
2083 opts.flush_to = l2cap_pi(sk)->flush_to;
2084 opts.mode = l2cap_pi(sk)->mode;
2085 opts.fcs = l2cap_pi(sk)->fcs;
2086 opts.max_tx = l2cap_pi(sk)->max_tx;
2087 opts.txwin_size = (__u16)l2cap_pi(sk)->tx_win;
2089 len = min_t(unsigned int, len, sizeof(opts));
2090 if (copy_to_user(optval, (char *) &opts, len))
2096 switch (l2cap_pi(sk)->sec_level) {
2097 case BT_SECURITY_LOW:
2098 opt = L2CAP_LM_AUTH;
2100 case BT_SECURITY_MEDIUM:
2101 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT;
2103 case BT_SECURITY_HIGH:
2104 opt = L2CAP_LM_AUTH | L2CAP_LM_ENCRYPT |
2112 if (l2cap_pi(sk)->role_switch)
2113 opt |= L2CAP_LM_MASTER;
2115 if (l2cap_pi(sk)->force_reliable)
2116 opt |= L2CAP_LM_RELIABLE;
2118 if (put_user(opt, (u32 __user *) optval))
2122 case L2CAP_CONNINFO:
2123 if (sk->sk_state != BT_CONNECTED &&
2124 !(sk->sk_state == BT_CONNECT2 &&
2125 bt_sk(sk)->defer_setup)) {
2130 cinfo.hci_handle = l2cap_pi(sk)->conn->hcon->handle;
2131 memcpy(cinfo.dev_class, l2cap_pi(sk)->conn->hcon->dev_class, 3);
2133 len = min_t(unsigned int, len, sizeof(cinfo));
2134 if (copy_to_user(optval, (char *) &cinfo, len))
2148 static int l2cap_sock_getsockopt(struct socket *sock, int level, int optname, char __user *optval, int __user *optlen)
2150 struct sock *sk = sock->sk;
2151 struct bt_security sec;
2154 BT_DBG("sk %p", sk);
2156 if (level == SOL_L2CAP)
2157 return l2cap_sock_getsockopt_old(sock, optname, optval, optlen);
2159 if (level != SOL_BLUETOOTH)
2160 return -ENOPROTOOPT;
2162 if (get_user(len, optlen))
2169 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM
2170 && sk->sk_type != SOCK_RAW) {
2175 sec.level = l2cap_pi(sk)->sec_level;
2177 len = min_t(unsigned int, len, sizeof(sec));
2178 if (copy_to_user(optval, (char *) &sec, len))
2183 case BT_DEFER_SETUP:
2184 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_LISTEN) {
2189 if (put_user(bt_sk(sk)->defer_setup, (u32 __user *) optval))
2203 static int l2cap_sock_shutdown(struct socket *sock, int how)
2205 struct sock *sk = sock->sk;
2208 BT_DBG("sock %p, sk %p", sock, sk);
2214 if (!sk->sk_shutdown) {
2215 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
2216 err = __l2cap_wait_ack(sk);
2218 sk->sk_shutdown = SHUTDOWN_MASK;
2219 l2cap_sock_clear_timer(sk);
2220 __l2cap_sock_close(sk, 0);
2222 if (sock_flag(sk, SOCK_LINGER) && sk->sk_lingertime)
2223 err = bt_sock_wait_state(sk, BT_CLOSED,
2227 if (!err && sk->sk_err)
2234 static int l2cap_sock_release(struct socket *sock)
2236 struct sock *sk = sock->sk;
2239 BT_DBG("sock %p, sk %p", sock, sk);
2244 err = l2cap_sock_shutdown(sock, 2);
2247 l2cap_sock_kill(sk);
2251 static void l2cap_chan_ready(struct sock *sk)
2253 struct sock *parent = bt_sk(sk)->parent;
2255 BT_DBG("sk %p, parent %p", sk, parent);
2257 l2cap_pi(sk)->conf_state = 0;
2258 l2cap_sock_clear_timer(sk);
2261 /* Outgoing channel.
2262 * Wake up socket sleeping on connect.
2264 sk->sk_state = BT_CONNECTED;
2265 sk->sk_state_change(sk);
2267 /* Incoming channel.
2268 * Wake up socket sleeping on accept.
2270 parent->sk_data_ready(parent, 0);
2274 /* Copy frame to all raw sockets on that connection */
2275 static void l2cap_raw_recv(struct l2cap_conn *conn, struct sk_buff *skb)
2277 struct l2cap_chan_list *l = &conn->chan_list;
2278 struct sk_buff *nskb;
2281 BT_DBG("conn %p", conn);
2283 read_lock(&l->lock);
2284 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
2285 if (sk->sk_type != SOCK_RAW)
2288 /* Don't send frame to the socket it came from */
2291 nskb = skb_clone(skb, GFP_ATOMIC);
2295 if (sock_queue_rcv_skb(sk, nskb))
2298 read_unlock(&l->lock);
2301 /* ---- L2CAP signalling commands ---- */
2302 static struct sk_buff *l2cap_build_cmd(struct l2cap_conn *conn,
2303 u8 code, u8 ident, u16 dlen, void *data)
2305 struct sk_buff *skb, **frag;
2306 struct l2cap_cmd_hdr *cmd;
2307 struct l2cap_hdr *lh;
2310 BT_DBG("conn %p, code 0x%2.2x, ident 0x%2.2x, len %d",
2311 conn, code, ident, dlen);
2313 len = L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE + dlen;
2314 count = min_t(unsigned int, conn->mtu, len);
2316 skb = bt_skb_alloc(count, GFP_ATOMIC);
2320 lh = (struct l2cap_hdr *) skb_put(skb, L2CAP_HDR_SIZE);
2321 lh->len = cpu_to_le16(L2CAP_CMD_HDR_SIZE + dlen);
2322 lh->cid = cpu_to_le16(L2CAP_CID_SIGNALING);
2324 cmd = (struct l2cap_cmd_hdr *) skb_put(skb, L2CAP_CMD_HDR_SIZE);
2327 cmd->len = cpu_to_le16(dlen);
2330 count -= L2CAP_HDR_SIZE + L2CAP_CMD_HDR_SIZE;
2331 memcpy(skb_put(skb, count), data, count);
2337 /* Continuation fragments (no L2CAP header) */
2338 frag = &skb_shinfo(skb)->frag_list;
2340 count = min_t(unsigned int, conn->mtu, len);
2342 *frag = bt_skb_alloc(count, GFP_ATOMIC);
2346 memcpy(skb_put(*frag, count), data, count);
2351 frag = &(*frag)->next;
2361 static inline int l2cap_get_conf_opt(void **ptr, int *type, int *olen, unsigned long *val)
2363 struct l2cap_conf_opt *opt = *ptr;
2366 len = L2CAP_CONF_OPT_SIZE + opt->len;
2374 *val = *((u8 *) opt->val);
2378 *val = __le16_to_cpu(*((__le16 *) opt->val));
2382 *val = __le32_to_cpu(*((__le32 *) opt->val));
2386 *val = (unsigned long) opt->val;
2390 BT_DBG("type 0x%2.2x len %d val 0x%lx", *type, opt->len, *val);
2394 static void l2cap_add_conf_opt(void **ptr, u8 type, u8 len, unsigned long val)
2396 struct l2cap_conf_opt *opt = *ptr;
2398 BT_DBG("type 0x%2.2x len %d val 0x%lx", type, len, val);
2405 *((u8 *) opt->val) = val;
2409 *((__le16 *) opt->val) = cpu_to_le16(val);
2413 *((__le32 *) opt->val) = cpu_to_le32(val);
2417 memcpy(opt->val, (void *) val, len);
2421 *ptr += L2CAP_CONF_OPT_SIZE + len;
2424 static void l2cap_ack_timeout(unsigned long arg)
2426 struct sock *sk = (void *) arg;
2429 l2cap_send_ack(l2cap_pi(sk));
2433 static inline void l2cap_ertm_init(struct sock *sk)
2435 l2cap_pi(sk)->expected_ack_seq = 0;
2436 l2cap_pi(sk)->unacked_frames = 0;
2437 l2cap_pi(sk)->buffer_seq = 0;
2438 l2cap_pi(sk)->num_acked = 0;
2439 l2cap_pi(sk)->frames_sent = 0;
2441 setup_timer(&l2cap_pi(sk)->retrans_timer,
2442 l2cap_retrans_timeout, (unsigned long) sk);
2443 setup_timer(&l2cap_pi(sk)->monitor_timer,
2444 l2cap_monitor_timeout, (unsigned long) sk);
2445 setup_timer(&l2cap_pi(sk)->ack_timer,
2446 l2cap_ack_timeout, (unsigned long) sk);
2448 __skb_queue_head_init(SREJ_QUEUE(sk));
2449 __skb_queue_head_init(BUSY_QUEUE(sk));
2451 INIT_WORK(&l2cap_pi(sk)->busy_work, l2cap_busy_work);
2453 sk->sk_backlog_rcv = l2cap_ertm_data_rcv;
2456 static inline __u8 l2cap_select_mode(__u8 mode, __u16 remote_feat_mask)
2459 case L2CAP_MODE_STREAMING:
2460 case L2CAP_MODE_ERTM:
2461 if (l2cap_mode_supported(mode, remote_feat_mask))
2465 return L2CAP_MODE_BASIC;
2469 static int l2cap_build_conf_req(struct sock *sk, void *data)
2471 struct l2cap_pinfo *pi = l2cap_pi(sk);
2472 struct l2cap_conf_req *req = data;
2473 struct l2cap_conf_rfc rfc = { .mode = pi->mode };
2474 void *ptr = req->data;
2476 BT_DBG("sk %p", sk);
2478 if (pi->num_conf_req || pi->num_conf_rsp)
2482 case L2CAP_MODE_STREAMING:
2483 case L2CAP_MODE_ERTM:
2484 if (pi->conf_state & L2CAP_CONF_STATE2_DEVICE)
2489 pi->mode = l2cap_select_mode(rfc.mode, pi->conn->feat_mask);
2495 case L2CAP_MODE_BASIC:
2496 if (pi->imtu != L2CAP_DEFAULT_MTU)
2497 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->imtu);
2499 rfc.mode = L2CAP_MODE_BASIC;
2501 rfc.max_transmit = 0;
2502 rfc.retrans_timeout = 0;
2503 rfc.monitor_timeout = 0;
2504 rfc.max_pdu_size = 0;
2508 case L2CAP_MODE_ERTM:
2509 rfc.mode = L2CAP_MODE_ERTM;
2510 rfc.txwin_size = pi->tx_win;
2511 rfc.max_transmit = pi->max_tx;
2512 rfc.retrans_timeout = 0;
2513 rfc.monitor_timeout = 0;
2514 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2515 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2516 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2518 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2521 if (pi->fcs == L2CAP_FCS_NONE ||
2522 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2523 pi->fcs = L2CAP_FCS_NONE;
2524 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2528 case L2CAP_MODE_STREAMING:
2529 rfc.mode = L2CAP_MODE_STREAMING;
2531 rfc.max_transmit = 0;
2532 rfc.retrans_timeout = 0;
2533 rfc.monitor_timeout = 0;
2534 rfc.max_pdu_size = cpu_to_le16(L2CAP_DEFAULT_MAX_PDU_SIZE);
2535 if (L2CAP_DEFAULT_MAX_PDU_SIZE > pi->conn->mtu - 10)
2536 rfc.max_pdu_size = cpu_to_le16(pi->conn->mtu - 10);
2538 if (!(pi->conn->feat_mask & L2CAP_FEAT_FCS))
2541 if (pi->fcs == L2CAP_FCS_NONE ||
2542 pi->conf_state & L2CAP_CONF_NO_FCS_RECV) {
2543 pi->fcs = L2CAP_FCS_NONE;
2544 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FCS, 1, pi->fcs);
2549 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC, sizeof(rfc),
2550 (unsigned long) &rfc);
2552 /* FIXME: Need actual value of the flush timeout */
2553 //if (flush_to != L2CAP_DEFAULT_FLUSH_TO)
2554 // l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO, 2, pi->flush_to);
2556 req->dcid = cpu_to_le16(pi->dcid);
2557 req->flags = cpu_to_le16(0);
2562 static int l2cap_parse_conf_req(struct sock *sk, void *data)
2564 struct l2cap_pinfo *pi = l2cap_pi(sk);
2565 struct l2cap_conf_rsp *rsp = data;
2566 void *ptr = rsp->data;
2567 void *req = pi->conf_req;
2568 int len = pi->conf_len;
2569 int type, hint, olen;
2571 struct l2cap_conf_rfc rfc = { .mode = L2CAP_MODE_BASIC };
2572 u16 mtu = L2CAP_DEFAULT_MTU;
2573 u16 result = L2CAP_CONF_SUCCESS;
2575 BT_DBG("sk %p", sk);
2577 while (len >= L2CAP_CONF_OPT_SIZE) {
2578 len -= l2cap_get_conf_opt(&req, &type, &olen, &val);
2580 hint = type & L2CAP_CONF_HINT;
2581 type &= L2CAP_CONF_MASK;
2584 case L2CAP_CONF_MTU:
2588 case L2CAP_CONF_FLUSH_TO:
2592 case L2CAP_CONF_QOS:
2595 case L2CAP_CONF_RFC:
2596 if (olen == sizeof(rfc))
2597 memcpy(&rfc, (void *) val, olen);
2600 case L2CAP_CONF_FCS:
2601 if (val == L2CAP_FCS_NONE)
2602 pi->conf_state |= L2CAP_CONF_NO_FCS_RECV;
2610 result = L2CAP_CONF_UNKNOWN;
2611 *((u8 *) ptr++) = type;
2616 if (pi->num_conf_rsp || pi->num_conf_req)
2620 case L2CAP_MODE_STREAMING:
2621 case L2CAP_MODE_ERTM:
2622 if (!(pi->conf_state & L2CAP_CONF_STATE2_DEVICE)) {
2623 pi->mode = l2cap_select_mode(rfc.mode,
2624 pi->conn->feat_mask);
2628 if (pi->mode != rfc.mode)
2629 return -ECONNREFUSED;
2635 if (pi->mode != rfc.mode) {
2636 result = L2CAP_CONF_UNACCEPT;
2637 rfc.mode = pi->mode;
2639 if (pi->num_conf_rsp == 1)
2640 return -ECONNREFUSED;
2642 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2643 sizeof(rfc), (unsigned long) &rfc);
2647 if (result == L2CAP_CONF_SUCCESS) {
2648 /* Configure output options and let the other side know
2649 * which ones we don't like. */
2651 if (mtu < L2CAP_DEFAULT_MIN_MTU)
2652 result = L2CAP_CONF_UNACCEPT;
2655 pi->conf_state |= L2CAP_CONF_MTU_DONE;
2657 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2660 case L2CAP_MODE_BASIC:
2661 pi->fcs = L2CAP_FCS_NONE;
2662 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2665 case L2CAP_MODE_ERTM:
2666 pi->remote_tx_win = rfc.txwin_size;
2667 pi->remote_max_tx = rfc.max_transmit;
2668 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2669 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2671 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2673 rfc.retrans_timeout =
2674 le16_to_cpu(L2CAP_DEFAULT_RETRANS_TO);
2675 rfc.monitor_timeout =
2676 le16_to_cpu(L2CAP_DEFAULT_MONITOR_TO);
2678 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2680 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2681 sizeof(rfc), (unsigned long) &rfc);
2685 case L2CAP_MODE_STREAMING:
2686 if (rfc.max_pdu_size > pi->conn->mtu - 10)
2687 rfc.max_pdu_size = le16_to_cpu(pi->conn->mtu - 10);
2689 pi->remote_mps = le16_to_cpu(rfc.max_pdu_size);
2691 pi->conf_state |= L2CAP_CONF_MODE_DONE;
2693 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2694 sizeof(rfc), (unsigned long) &rfc);
2699 result = L2CAP_CONF_UNACCEPT;
2701 memset(&rfc, 0, sizeof(rfc));
2702 rfc.mode = pi->mode;
2705 if (result == L2CAP_CONF_SUCCESS)
2706 pi->conf_state |= L2CAP_CONF_OUTPUT_DONE;
2708 rsp->scid = cpu_to_le16(pi->dcid);
2709 rsp->result = cpu_to_le16(result);
2710 rsp->flags = cpu_to_le16(0x0000);
2715 static int l2cap_parse_conf_rsp(struct sock *sk, void *rsp, int len, void *data, u16 *result)
2717 struct l2cap_pinfo *pi = l2cap_pi(sk);
2718 struct l2cap_conf_req *req = data;
2719 void *ptr = req->data;
2722 struct l2cap_conf_rfc rfc;
2724 BT_DBG("sk %p, rsp %p, len %d, req %p", sk, rsp, len, data);
2726 while (len >= L2CAP_CONF_OPT_SIZE) {
2727 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2730 case L2CAP_CONF_MTU:
2731 if (val < L2CAP_DEFAULT_MIN_MTU) {
2732 *result = L2CAP_CONF_UNACCEPT;
2733 pi->omtu = L2CAP_DEFAULT_MIN_MTU;
2736 l2cap_add_conf_opt(&ptr, L2CAP_CONF_MTU, 2, pi->omtu);
2739 case L2CAP_CONF_FLUSH_TO:
2741 l2cap_add_conf_opt(&ptr, L2CAP_CONF_FLUSH_TO,
2745 case L2CAP_CONF_RFC:
2746 if (olen == sizeof(rfc))
2747 memcpy(&rfc, (void *)val, olen);
2749 if ((pi->conf_state & L2CAP_CONF_STATE2_DEVICE) &&
2750 rfc.mode != pi->mode)
2751 return -ECONNREFUSED;
2755 l2cap_add_conf_opt(&ptr, L2CAP_CONF_RFC,
2756 sizeof(rfc), (unsigned long) &rfc);
2761 if (pi->mode == L2CAP_MODE_BASIC && pi->mode != rfc.mode)
2762 return -ECONNREFUSED;
2764 pi->mode = rfc.mode;
2766 if (*result == L2CAP_CONF_SUCCESS) {
2768 case L2CAP_MODE_ERTM:
2769 pi->remote_tx_win = rfc.txwin_size;
2770 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2771 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2772 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2774 case L2CAP_MODE_STREAMING:
2775 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2779 req->dcid = cpu_to_le16(pi->dcid);
2780 req->flags = cpu_to_le16(0x0000);
2785 static int l2cap_build_conf_rsp(struct sock *sk, void *data, u16 result, u16 flags)
2787 struct l2cap_conf_rsp *rsp = data;
2788 void *ptr = rsp->data;
2790 BT_DBG("sk %p", sk);
2792 rsp->scid = cpu_to_le16(l2cap_pi(sk)->dcid);
2793 rsp->result = cpu_to_le16(result);
2794 rsp->flags = cpu_to_le16(flags);
2799 static void l2cap_conf_rfc_get(struct sock *sk, void *rsp, int len)
2801 struct l2cap_pinfo *pi = l2cap_pi(sk);
2804 struct l2cap_conf_rfc rfc;
2806 BT_DBG("sk %p, rsp %p, len %d", sk, rsp, len);
2808 if ((pi->mode != L2CAP_MODE_ERTM) && (pi->mode != L2CAP_MODE_STREAMING))
2811 while (len >= L2CAP_CONF_OPT_SIZE) {
2812 len -= l2cap_get_conf_opt(&rsp, &type, &olen, &val);
2815 case L2CAP_CONF_RFC:
2816 if (olen == sizeof(rfc))
2817 memcpy(&rfc, (void *)val, olen);
2824 case L2CAP_MODE_ERTM:
2825 pi->remote_tx_win = rfc.txwin_size;
2826 pi->retrans_timeout = le16_to_cpu(rfc.retrans_timeout);
2827 pi->monitor_timeout = le16_to_cpu(rfc.monitor_timeout);
2828 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2830 case L2CAP_MODE_STREAMING:
2831 pi->mps = le16_to_cpu(rfc.max_pdu_size);
2835 static inline int l2cap_command_rej(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2837 struct l2cap_cmd_rej *rej = (struct l2cap_cmd_rej *) data;
2839 if (rej->reason != 0x0000)
2842 if ((conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_SENT) &&
2843 cmd->ident == conn->info_ident) {
2844 del_timer(&conn->info_timer);
2846 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
2847 conn->info_ident = 0;
2849 l2cap_conn_start(conn);
2855 static inline int l2cap_connect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2857 struct l2cap_chan_list *list = &conn->chan_list;
2858 struct l2cap_conn_req *req = (struct l2cap_conn_req *) data;
2859 struct l2cap_conn_rsp rsp;
2860 struct sock *sk, *parent;
2861 int result, status = L2CAP_CS_NO_INFO;
2863 u16 dcid = 0, scid = __le16_to_cpu(req->scid);
2864 __le16 psm = req->psm;
2866 BT_DBG("psm 0x%2.2x scid 0x%4.4x", psm, scid);
2868 /* Check if we have socket listening on psm */
2869 parent = l2cap_get_sock_by_psm(BT_LISTEN, psm, conn->src);
2871 result = L2CAP_CR_BAD_PSM;
2875 /* Check if the ACL is secure enough (if not SDP) */
2876 if (psm != cpu_to_le16(0x0001) &&
2877 !hci_conn_check_link_mode(conn->hcon)) {
2878 conn->disc_reason = 0x05;
2879 result = L2CAP_CR_SEC_BLOCK;
2883 result = L2CAP_CR_NO_MEM;
2885 /* Check for backlog size */
2886 if (sk_acceptq_is_full(parent)) {
2887 BT_DBG("backlog full %d", parent->sk_ack_backlog);
2891 sk = l2cap_sock_alloc(sock_net(parent), NULL, BTPROTO_L2CAP, GFP_ATOMIC);
2895 write_lock_bh(&list->lock);
2897 /* Check if we already have channel with that dcid */
2898 if (__l2cap_get_chan_by_dcid(list, scid)) {
2899 write_unlock_bh(&list->lock);
2900 sock_set_flag(sk, SOCK_ZAPPED);
2901 l2cap_sock_kill(sk);
2905 hci_conn_hold(conn->hcon);
2907 l2cap_sock_init(sk, parent);
2908 bacpy(&bt_sk(sk)->src, conn->src);
2909 bacpy(&bt_sk(sk)->dst, conn->dst);
2910 l2cap_pi(sk)->psm = psm;
2911 l2cap_pi(sk)->dcid = scid;
2913 __l2cap_chan_add(conn, sk, parent);
2914 dcid = l2cap_pi(sk)->scid;
2916 l2cap_sock_set_timer(sk, sk->sk_sndtimeo);
2918 l2cap_pi(sk)->ident = cmd->ident;
2920 if (conn->info_state & L2CAP_INFO_FEAT_MASK_REQ_DONE) {
2921 if (l2cap_check_security(sk)) {
2922 if (bt_sk(sk)->defer_setup) {
2923 sk->sk_state = BT_CONNECT2;
2924 result = L2CAP_CR_PEND;
2925 status = L2CAP_CS_AUTHOR_PEND;
2926 parent->sk_data_ready(parent, 0);
2928 sk->sk_state = BT_CONFIG;
2929 result = L2CAP_CR_SUCCESS;
2930 status = L2CAP_CS_NO_INFO;
2933 sk->sk_state = BT_CONNECT2;
2934 result = L2CAP_CR_PEND;
2935 status = L2CAP_CS_AUTHEN_PEND;
2938 sk->sk_state = BT_CONNECT2;
2939 result = L2CAP_CR_PEND;
2940 status = L2CAP_CS_NO_INFO;
2943 write_unlock_bh(&list->lock);
2946 bh_unlock_sock(parent);
2949 rsp.scid = cpu_to_le16(scid);
2950 rsp.dcid = cpu_to_le16(dcid);
2951 rsp.result = cpu_to_le16(result);
2952 rsp.status = cpu_to_le16(status);
2953 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONN_RSP, sizeof(rsp), &rsp);
2955 if (result == L2CAP_CR_PEND && status == L2CAP_CS_NO_INFO) {
2956 struct l2cap_info_req info;
2957 info.type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
2959 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_SENT;
2960 conn->info_ident = l2cap_get_ident(conn);
2962 mod_timer(&conn->info_timer, jiffies +
2963 msecs_to_jiffies(L2CAP_INFO_TIMEOUT));
2965 l2cap_send_cmd(conn, conn->info_ident,
2966 L2CAP_INFO_REQ, sizeof(info), &info);
2972 static inline int l2cap_connect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
2974 struct l2cap_conn_rsp *rsp = (struct l2cap_conn_rsp *) data;
2975 u16 scid, dcid, result, status;
2979 scid = __le16_to_cpu(rsp->scid);
2980 dcid = __le16_to_cpu(rsp->dcid);
2981 result = __le16_to_cpu(rsp->result);
2982 status = __le16_to_cpu(rsp->status);
2984 BT_DBG("dcid 0x%4.4x scid 0x%4.4x result 0x%2.2x status 0x%2.2x", dcid, scid, result, status);
2987 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
2991 sk = l2cap_get_chan_by_ident(&conn->chan_list, cmd->ident);
2997 case L2CAP_CR_SUCCESS:
2998 sk->sk_state = BT_CONFIG;
2999 l2cap_pi(sk)->ident = 0;
3000 l2cap_pi(sk)->dcid = dcid;
3001 l2cap_pi(sk)->conf_state |= L2CAP_CONF_REQ_SENT;
3002 l2cap_pi(sk)->conf_state &= ~L2CAP_CONF_CONNECT_PEND;
3004 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3005 l2cap_build_conf_req(sk, req), req);
3006 l2cap_pi(sk)->num_conf_req++;
3010 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
3014 l2cap_chan_del(sk, ECONNREFUSED);
3022 static inline int l2cap_config_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u16 cmd_len, u8 *data)
3024 struct l2cap_conf_req *req = (struct l2cap_conf_req *) data;
3030 dcid = __le16_to_cpu(req->dcid);
3031 flags = __le16_to_cpu(req->flags);
3033 BT_DBG("dcid 0x%4.4x flags 0x%2.2x", dcid, flags);
3035 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3039 if (sk->sk_state != BT_CONFIG) {
3040 struct l2cap_cmd_rej rej;
3042 rej.reason = cpu_to_le16(0x0002);
3043 l2cap_send_cmd(conn, cmd->ident, L2CAP_COMMAND_REJ,
3048 /* Reject if config buffer is too small. */
3049 len = cmd_len - sizeof(*req);
3050 if (l2cap_pi(sk)->conf_len + len > sizeof(l2cap_pi(sk)->conf_req)) {
3051 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3052 l2cap_build_conf_rsp(sk, rsp,
3053 L2CAP_CONF_REJECT, flags), rsp);
3058 memcpy(l2cap_pi(sk)->conf_req + l2cap_pi(sk)->conf_len, req->data, len);
3059 l2cap_pi(sk)->conf_len += len;
3061 if (flags & 0x0001) {
3062 /* Incomplete config. Send empty response. */
3063 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP,
3064 l2cap_build_conf_rsp(sk, rsp,
3065 L2CAP_CONF_SUCCESS, 0x0001), rsp);
3069 /* Complete config. */
3070 len = l2cap_parse_conf_req(sk, rsp);
3072 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3076 l2cap_send_cmd(conn, cmd->ident, L2CAP_CONF_RSP, len, rsp);
3077 l2cap_pi(sk)->num_conf_rsp++;
3079 /* Reset config buffer. */
3080 l2cap_pi(sk)->conf_len = 0;
3082 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE))
3085 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_INPUT_DONE) {
3086 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3087 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3088 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3090 sk->sk_state = BT_CONNECTED;
3092 l2cap_pi(sk)->next_tx_seq = 0;
3093 l2cap_pi(sk)->expected_tx_seq = 0;
3094 __skb_queue_head_init(TX_QUEUE(sk));
3095 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3096 l2cap_ertm_init(sk);
3098 l2cap_chan_ready(sk);
3102 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_REQ_SENT)) {
3104 l2cap_send_cmd(conn, l2cap_get_ident(conn), L2CAP_CONF_REQ,
3105 l2cap_build_conf_req(sk, buf), buf);
3106 l2cap_pi(sk)->num_conf_req++;
3114 static inline int l2cap_config_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3116 struct l2cap_conf_rsp *rsp = (struct l2cap_conf_rsp *)data;
3117 u16 scid, flags, result;
3119 int len = cmd->len - sizeof(*rsp);
3121 scid = __le16_to_cpu(rsp->scid);
3122 flags = __le16_to_cpu(rsp->flags);
3123 result = __le16_to_cpu(rsp->result);
3125 BT_DBG("scid 0x%4.4x flags 0x%2.2x result 0x%2.2x",
3126 scid, flags, result);
3128 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3133 case L2CAP_CONF_SUCCESS:
3134 l2cap_conf_rfc_get(sk, rsp->data, len);
3137 case L2CAP_CONF_UNACCEPT:
3138 if (l2cap_pi(sk)->num_conf_rsp <= L2CAP_CONF_MAX_CONF_RSP) {
3141 if (len > sizeof(req) - sizeof(struct l2cap_conf_req)) {
3142 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3146 /* throw out any old stored conf requests */
3147 result = L2CAP_CONF_SUCCESS;
3148 len = l2cap_parse_conf_rsp(sk, rsp->data,
3151 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3155 l2cap_send_cmd(conn, l2cap_get_ident(conn),
3156 L2CAP_CONF_REQ, len, req);
3157 l2cap_pi(sk)->num_conf_req++;
3158 if (result != L2CAP_CONF_SUCCESS)
3164 sk->sk_err = ECONNRESET;
3165 l2cap_sock_set_timer(sk, HZ * 5);
3166 l2cap_send_disconn_req(conn, sk, ECONNRESET);
3173 l2cap_pi(sk)->conf_state |= L2CAP_CONF_INPUT_DONE;
3175 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_OUTPUT_DONE) {
3176 if (!(l2cap_pi(sk)->conf_state & L2CAP_CONF_NO_FCS_RECV) ||
3177 l2cap_pi(sk)->fcs != L2CAP_FCS_NONE)
3178 l2cap_pi(sk)->fcs = L2CAP_FCS_CRC16;
3180 sk->sk_state = BT_CONNECTED;
3181 l2cap_pi(sk)->next_tx_seq = 0;
3182 l2cap_pi(sk)->expected_tx_seq = 0;
3183 __skb_queue_head_init(TX_QUEUE(sk));
3184 if (l2cap_pi(sk)->mode == L2CAP_MODE_ERTM)
3185 l2cap_ertm_init(sk);
3187 l2cap_chan_ready(sk);
3195 static inline int l2cap_disconnect_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3197 struct l2cap_disconn_req *req = (struct l2cap_disconn_req *) data;
3198 struct l2cap_disconn_rsp rsp;
3202 scid = __le16_to_cpu(req->scid);
3203 dcid = __le16_to_cpu(req->dcid);
3205 BT_DBG("scid 0x%4.4x dcid 0x%4.4x", scid, dcid);
3207 sk = l2cap_get_chan_by_scid(&conn->chan_list, dcid);
3211 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
3212 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
3213 l2cap_send_cmd(conn, cmd->ident, L2CAP_DISCONN_RSP, sizeof(rsp), &rsp);
3215 sk->sk_shutdown = SHUTDOWN_MASK;
3217 l2cap_chan_del(sk, ECONNRESET);
3220 l2cap_sock_kill(sk);
3224 static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3226 struct l2cap_disconn_rsp *rsp = (struct l2cap_disconn_rsp *) data;
3230 scid = __le16_to_cpu(rsp->scid);
3231 dcid = __le16_to_cpu(rsp->dcid);
3233 BT_DBG("dcid 0x%4.4x scid 0x%4.4x", dcid, scid);
3235 sk = l2cap_get_chan_by_scid(&conn->chan_list, scid);
3239 l2cap_chan_del(sk, 0);
3242 l2cap_sock_kill(sk);
3246 static inline int l2cap_information_req(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3248 struct l2cap_info_req *req = (struct l2cap_info_req *) data;
3251 type = __le16_to_cpu(req->type);
3253 BT_DBG("type 0x%4.4x", type);
3255 if (type == L2CAP_IT_FEAT_MASK) {
3257 u32 feat_mask = l2cap_feat_mask;
3258 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3259 rsp->type = cpu_to_le16(L2CAP_IT_FEAT_MASK);
3260 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3262 feat_mask |= L2CAP_FEAT_ERTM | L2CAP_FEAT_STREAMING
3264 put_unaligned_le32(feat_mask, rsp->data);
3265 l2cap_send_cmd(conn, cmd->ident,
3266 L2CAP_INFO_RSP, sizeof(buf), buf);
3267 } else if (type == L2CAP_IT_FIXED_CHAN) {
3269 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) buf;
3270 rsp->type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3271 rsp->result = cpu_to_le16(L2CAP_IR_SUCCESS);
3272 memcpy(buf + 4, l2cap_fixed_chan, 8);
3273 l2cap_send_cmd(conn, cmd->ident,
3274 L2CAP_INFO_RSP, sizeof(buf), buf);
3276 struct l2cap_info_rsp rsp;
3277 rsp.type = cpu_to_le16(type);
3278 rsp.result = cpu_to_le16(L2CAP_IR_NOTSUPP);
3279 l2cap_send_cmd(conn, cmd->ident,
3280 L2CAP_INFO_RSP, sizeof(rsp), &rsp);
3286 static inline int l2cap_information_rsp(struct l2cap_conn *conn, struct l2cap_cmd_hdr *cmd, u8 *data)
3288 struct l2cap_info_rsp *rsp = (struct l2cap_info_rsp *) data;
3291 type = __le16_to_cpu(rsp->type);
3292 result = __le16_to_cpu(rsp->result);
3294 BT_DBG("type 0x%4.4x result 0x%2.2x", type, result);
3296 del_timer(&conn->info_timer);
3298 if (type == L2CAP_IT_FEAT_MASK) {
3299 conn->feat_mask = get_unaligned_le32(rsp->data);
3301 if (conn->feat_mask & L2CAP_FEAT_FIXED_CHAN) {
3302 struct l2cap_info_req req;
3303 req.type = cpu_to_le16(L2CAP_IT_FIXED_CHAN);
3305 conn->info_ident = l2cap_get_ident(conn);
3307 l2cap_send_cmd(conn, conn->info_ident,
3308 L2CAP_INFO_REQ, sizeof(req), &req);
3310 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3311 conn->info_ident = 0;
3313 l2cap_conn_start(conn);
3315 } else if (type == L2CAP_IT_FIXED_CHAN) {
3316 conn->info_state |= L2CAP_INFO_FEAT_MASK_REQ_DONE;
3317 conn->info_ident = 0;
3319 l2cap_conn_start(conn);
3325 static inline void l2cap_sig_channel(struct l2cap_conn *conn, struct sk_buff *skb)
3327 u8 *data = skb->data;
3329 struct l2cap_cmd_hdr cmd;
3332 l2cap_raw_recv(conn, skb);
3334 while (len >= L2CAP_CMD_HDR_SIZE) {
3336 memcpy(&cmd, data, L2CAP_CMD_HDR_SIZE);
3337 data += L2CAP_CMD_HDR_SIZE;
3338 len -= L2CAP_CMD_HDR_SIZE;
3340 cmd_len = le16_to_cpu(cmd.len);
3342 BT_DBG("code 0x%2.2x len %d id 0x%2.2x", cmd.code, cmd_len, cmd.ident);
3344 if (cmd_len > len || !cmd.ident) {
3345 BT_DBG("corrupted command");
3350 case L2CAP_COMMAND_REJ:
3351 l2cap_command_rej(conn, &cmd, data);
3354 case L2CAP_CONN_REQ:
3355 err = l2cap_connect_req(conn, &cmd, data);
3358 case L2CAP_CONN_RSP:
3359 err = l2cap_connect_rsp(conn, &cmd, data);
3362 case L2CAP_CONF_REQ:
3363 err = l2cap_config_req(conn, &cmd, cmd_len, data);
3366 case L2CAP_CONF_RSP:
3367 err = l2cap_config_rsp(conn, &cmd, data);
3370 case L2CAP_DISCONN_REQ:
3371 err = l2cap_disconnect_req(conn, &cmd, data);
3374 case L2CAP_DISCONN_RSP:
3375 err = l2cap_disconnect_rsp(conn, &cmd, data);
3378 case L2CAP_ECHO_REQ:
3379 l2cap_send_cmd(conn, cmd.ident, L2CAP_ECHO_RSP, cmd_len, data);
3382 case L2CAP_ECHO_RSP:
3385 case L2CAP_INFO_REQ:
3386 err = l2cap_information_req(conn, &cmd, data);
3389 case L2CAP_INFO_RSP:
3390 err = l2cap_information_rsp(conn, &cmd, data);
3394 BT_ERR("Unknown signaling command 0x%2.2x", cmd.code);
3400 struct l2cap_cmd_rej rej;
3401 BT_DBG("error %d", err);
3403 /* FIXME: Map err to a valid reason */
3404 rej.reason = cpu_to_le16(0);
3405 l2cap_send_cmd(conn, cmd.ident, L2CAP_COMMAND_REJ, sizeof(rej), &rej);
3415 static int l2cap_check_fcs(struct l2cap_pinfo *pi, struct sk_buff *skb)
3417 u16 our_fcs, rcv_fcs;
3418 int hdr_size = L2CAP_HDR_SIZE + 2;
3420 if (pi->fcs == L2CAP_FCS_CRC16) {
3421 skb_trim(skb, skb->len - 2);
3422 rcv_fcs = get_unaligned_le16(skb->data + skb->len);
3423 our_fcs = crc16(0, skb->data - hdr_size, skb->len + hdr_size);
3425 if (our_fcs != rcv_fcs)
3431 static inline void l2cap_send_i_or_rr_or_rnr(struct sock *sk)
3433 struct l2cap_pinfo *pi = l2cap_pi(sk);
3436 pi->frames_sent = 0;
3438 control |= pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3440 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3441 control |= L2CAP_SUPER_RCV_NOT_READY;
3442 l2cap_send_sframe(pi, control);
3443 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3446 if (pi->conn_state & L2CAP_CONN_REMOTE_BUSY)
3447 l2cap_retransmit_frames(sk);
3449 l2cap_ertm_send(sk);
3451 if (!(pi->conn_state & L2CAP_CONN_LOCAL_BUSY) &&
3452 pi->frames_sent == 0) {
3453 control |= L2CAP_SUPER_RCV_READY;
3454 l2cap_send_sframe(pi, control);
3458 static int l2cap_add_to_srej_queue(struct sock *sk, struct sk_buff *skb, u8 tx_seq, u8 sar)
3460 struct sk_buff *next_skb;
3461 struct l2cap_pinfo *pi = l2cap_pi(sk);
3462 int tx_seq_offset, next_tx_seq_offset;
3464 bt_cb(skb)->tx_seq = tx_seq;
3465 bt_cb(skb)->sar = sar;
3467 next_skb = skb_peek(SREJ_QUEUE(sk));
3469 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3473 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3474 if (tx_seq_offset < 0)
3475 tx_seq_offset += 64;
3478 if (bt_cb(next_skb)->tx_seq == tx_seq)
3481 next_tx_seq_offset = (bt_cb(next_skb)->tx_seq -
3482 pi->buffer_seq) % 64;
3483 if (next_tx_seq_offset < 0)
3484 next_tx_seq_offset += 64;
3486 if (next_tx_seq_offset > tx_seq_offset) {
3487 __skb_queue_before(SREJ_QUEUE(sk), next_skb, skb);
3491 if (skb_queue_is_last(SREJ_QUEUE(sk), next_skb))
3494 } while ((next_skb = skb_queue_next(SREJ_QUEUE(sk), next_skb)));
3496 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3501 static int l2cap_ertm_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3503 struct l2cap_pinfo *pi = l2cap_pi(sk);
3504 struct sk_buff *_skb;
3507 switch (control & L2CAP_CTRL_SAR) {
3508 case L2CAP_SDU_UNSEGMENTED:
3509 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3512 err = sock_queue_rcv_skb(sk, skb);
3518 case L2CAP_SDU_START:
3519 if (pi->conn_state & L2CAP_CONN_SAR_SDU)
3522 pi->sdu_len = get_unaligned_le16(skb->data);
3524 if (pi->sdu_len > pi->imtu)
3527 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3531 /* pull sdu_len bytes only after alloc, because of Local Busy
3532 * condition we have to be sure that this will be executed
3533 * only once, i.e., when alloc does not fail */
3536 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3538 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3539 pi->partial_sdu_len = skb->len;
3542 case L2CAP_SDU_CONTINUE:
3543 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3549 pi->partial_sdu_len += skb->len;
3550 if (pi->partial_sdu_len > pi->sdu_len)
3553 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3558 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3564 if (!(pi->conn_state & L2CAP_CONN_SAR_RETRY)) {
3565 pi->partial_sdu_len += skb->len;
3567 if (pi->partial_sdu_len > pi->imtu)
3570 if (pi->partial_sdu_len != pi->sdu_len)
3573 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3576 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3578 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3582 err = sock_queue_rcv_skb(sk, _skb);
3585 pi->conn_state |= L2CAP_CONN_SAR_RETRY;
3589 pi->conn_state &= ~L2CAP_CONN_SAR_RETRY;
3590 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3604 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3609 static void l2cap_busy_work(struct work_struct *work)
3611 DECLARE_WAITQUEUE(wait, current);
3612 struct l2cap_pinfo *pi =
3613 container_of(work, struct l2cap_pinfo, busy_work);
3614 struct sock *sk = (struct sock *)pi;
3615 int n_tries = 0, timeo = HZ/5, err;
3616 struct sk_buff *skb;
3621 add_wait_queue(sk_sleep(sk), &wait);
3622 while ((skb = skb_peek(BUSY_QUEUE(sk)))) {
3623 set_current_state(TASK_INTERRUPTIBLE);
3625 if (n_tries++ > L2CAP_LOCAL_BUSY_TRIES) {
3627 l2cap_send_disconn_req(pi->conn, sk, EBUSY);
3634 if (signal_pending(current)) {
3635 err = sock_intr_errno(timeo);
3640 timeo = schedule_timeout(timeo);
3643 err = sock_error(sk);
3647 while ((skb = skb_dequeue(BUSY_QUEUE(sk)))) {
3648 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3649 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3651 skb_queue_head(BUSY_QUEUE(sk), skb);
3655 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3662 if (!(pi->conn_state & L2CAP_CONN_RNR_SENT))
3665 control = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3666 control |= L2CAP_SUPER_RCV_READY | L2CAP_CTRL_POLL;
3667 l2cap_send_sframe(pi, control);
3668 l2cap_pi(sk)->retry_count = 1;
3670 del_timer(&pi->retrans_timer);
3671 __mod_monitor_timer();
3673 l2cap_pi(sk)->conn_state |= L2CAP_CONN_WAIT_F;
3676 pi->conn_state &= ~L2CAP_CONN_LOCAL_BUSY;
3677 pi->conn_state &= ~L2CAP_CONN_RNR_SENT;
3679 BT_DBG("sk %p, Exit local busy", sk);
3681 set_current_state(TASK_RUNNING);
3682 remove_wait_queue(sk_sleep(sk), &wait);
3687 static int l2cap_push_rx_skb(struct sock *sk, struct sk_buff *skb, u16 control)
3689 struct l2cap_pinfo *pi = l2cap_pi(sk);
3692 if (pi->conn_state & L2CAP_CONN_LOCAL_BUSY) {
3693 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3694 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3698 err = l2cap_ertm_reassembly_sdu(sk, skb, control);
3700 pi->buffer_seq = (pi->buffer_seq + 1) % 64;
3704 /* Busy Condition */
3705 BT_DBG("sk %p, Enter local busy", sk);
3707 pi->conn_state |= L2CAP_CONN_LOCAL_BUSY;
3708 bt_cb(skb)->sar = control >> L2CAP_CTRL_SAR_SHIFT;
3709 __skb_queue_tail(BUSY_QUEUE(sk), skb);
3711 sctrl = pi->buffer_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3712 sctrl |= L2CAP_SUPER_RCV_NOT_READY;
3713 l2cap_send_sframe(pi, sctrl);
3715 pi->conn_state |= L2CAP_CONN_RNR_SENT;
3717 del_timer(&pi->ack_timer);
3719 queue_work(_busy_wq, &pi->busy_work);
3724 static int l2cap_streaming_reassembly_sdu(struct sock *sk, struct sk_buff *skb, u16 control)
3726 struct l2cap_pinfo *pi = l2cap_pi(sk);
3727 struct sk_buff *_skb;
3731 * TODO: We have to notify the userland if some data is lost with the
3735 switch (control & L2CAP_CTRL_SAR) {
3736 case L2CAP_SDU_UNSEGMENTED:
3737 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3742 err = sock_queue_rcv_skb(sk, skb);
3748 case L2CAP_SDU_START:
3749 if (pi->conn_state & L2CAP_CONN_SAR_SDU) {
3754 pi->sdu_len = get_unaligned_le16(skb->data);
3757 if (pi->sdu_len > pi->imtu) {
3762 pi->sdu = bt_skb_alloc(pi->sdu_len, GFP_ATOMIC);
3768 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3770 pi->conn_state |= L2CAP_CONN_SAR_SDU;
3771 pi->partial_sdu_len = skb->len;
3775 case L2CAP_SDU_CONTINUE:
3776 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3779 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3781 pi->partial_sdu_len += skb->len;
3782 if (pi->partial_sdu_len > pi->sdu_len)
3790 if (!(pi->conn_state & L2CAP_CONN_SAR_SDU))
3793 memcpy(skb_put(pi->sdu, skb->len), skb->data, skb->len);
3795 pi->conn_state &= ~L2CAP_CONN_SAR_SDU;
3796 pi->partial_sdu_len += skb->len;
3798 if (pi->partial_sdu_len > pi->imtu)
3801 if (pi->partial_sdu_len == pi->sdu_len) {
3802 _skb = skb_clone(pi->sdu, GFP_ATOMIC);
3803 err = sock_queue_rcv_skb(sk, _skb);
3818 static void l2cap_check_srej_gap(struct sock *sk, u8 tx_seq)
3820 struct sk_buff *skb;
3823 while ((skb = skb_peek(SREJ_QUEUE(sk)))) {
3824 if (bt_cb(skb)->tx_seq != tx_seq)
3827 skb = skb_dequeue(SREJ_QUEUE(sk));
3828 control = bt_cb(skb)->sar << L2CAP_CTRL_SAR_SHIFT;
3829 l2cap_ertm_reassembly_sdu(sk, skb, control);
3830 l2cap_pi(sk)->buffer_seq_srej =
3831 (l2cap_pi(sk)->buffer_seq_srej + 1) % 64;
3832 tx_seq = (tx_seq + 1) % 64;
3836 static void l2cap_resend_srejframe(struct sock *sk, u8 tx_seq)
3838 struct l2cap_pinfo *pi = l2cap_pi(sk);
3839 struct srej_list *l, *tmp;
3842 list_for_each_entry_safe(l, tmp, SREJ_LIST(sk), list) {
3843 if (l->tx_seq == tx_seq) {
3848 control = L2CAP_SUPER_SELECT_REJECT;
3849 control |= l->tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3850 l2cap_send_sframe(pi, control);
3852 list_add_tail(&l->list, SREJ_LIST(sk));
3856 static void l2cap_send_srejframe(struct sock *sk, u8 tx_seq)
3858 struct l2cap_pinfo *pi = l2cap_pi(sk);
3859 struct srej_list *new;
3862 while (tx_seq != pi->expected_tx_seq) {
3863 control = L2CAP_SUPER_SELECT_REJECT;
3864 control |= pi->expected_tx_seq << L2CAP_CTRL_REQSEQ_SHIFT;
3865 l2cap_send_sframe(pi, control);
3867 new = kzalloc(sizeof(struct srej_list), GFP_ATOMIC);
3868 new->tx_seq = pi->expected_tx_seq;
3869 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3870 list_add_tail(&new->list, SREJ_LIST(sk));
3872 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3875 static inline int l2cap_data_channel_iframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
3877 struct l2cap_pinfo *pi = l2cap_pi(sk);
3878 u8 tx_seq = __get_txseq(rx_control);
3879 u8 req_seq = __get_reqseq(rx_control);
3880 u8 sar = rx_control >> L2CAP_CTRL_SAR_SHIFT;
3881 int tx_seq_offset, expected_tx_seq_offset;
3882 int num_to_ack = (pi->tx_win/6) + 1;
3885 BT_DBG("sk %p len %d tx_seq %d rx_control 0x%4.4x", sk, skb->len, tx_seq,
3888 if (L2CAP_CTRL_FINAL & rx_control &&
3889 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
3890 del_timer(&pi->monitor_timer);
3891 if (pi->unacked_frames > 0)
3892 __mod_retrans_timer();
3893 pi->conn_state &= ~L2CAP_CONN_WAIT_F;
3896 pi->expected_ack_seq = req_seq;
3897 l2cap_drop_acked_frames(sk);
3899 if (tx_seq == pi->expected_tx_seq)
3902 tx_seq_offset = (tx_seq - pi->buffer_seq) % 64;
3903 if (tx_seq_offset < 0)
3904 tx_seq_offset += 64;
3906 /* invalid tx_seq */
3907 if (tx_seq_offset >= pi->tx_win) {
3908 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
3912 if (pi->conn_state == L2CAP_CONN_LOCAL_BUSY)
3915 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3916 struct srej_list *first;
3918 first = list_first_entry(SREJ_LIST(sk),
3919 struct srej_list, list);
3920 if (tx_seq == first->tx_seq) {
3921 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3922 l2cap_check_srej_gap(sk, tx_seq);
3924 list_del(&first->list);
3927 if (list_empty(SREJ_LIST(sk))) {
3928 pi->buffer_seq = pi->buffer_seq_srej;
3929 pi->conn_state &= ~L2CAP_CONN_SREJ_SENT;
3931 BT_DBG("sk %p, Exit SREJ_SENT", sk);
3934 struct srej_list *l;
3936 /* duplicated tx_seq */
3937 if (l2cap_add_to_srej_queue(sk, skb, tx_seq, sar) < 0)
3940 list_for_each_entry(l, SREJ_LIST(sk), list) {
3941 if (l->tx_seq == tx_seq) {
3942 l2cap_resend_srejframe(sk, tx_seq);
3946 l2cap_send_srejframe(sk, tx_seq);
3949 expected_tx_seq_offset =
3950 (pi->expected_tx_seq - pi->buffer_seq) % 64;
3951 if (expected_tx_seq_offset < 0)
3952 expected_tx_seq_offset += 64;
3954 /* duplicated tx_seq */
3955 if (tx_seq_offset < expected_tx_seq_offset)
3958 pi->conn_state |= L2CAP_CONN_SREJ_SENT;
3960 BT_DBG("sk %p, Enter SREJ", sk);
3962 INIT_LIST_HEAD(SREJ_LIST(sk));
3963 pi->buffer_seq_srej = pi->buffer_seq;
3965 __skb_queue_head_init(SREJ_QUEUE(sk));
3966 __skb_queue_head_init(BUSY_QUEUE(sk));
3967 l2cap_add_to_srej_queue(sk, skb, tx_seq, sar);
3969 pi->conn_state |= L2CAP_CONN_SEND_PBIT;
3971 l2cap_send_srejframe(sk, tx_seq);
3973 del_timer(&pi->ack_timer);
3978 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
3980 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
3981 bt_cb(skb)->tx_seq = tx_seq;
3982 bt_cb(skb)->sar = sar;
3983 __skb_queue_tail(SREJ_QUEUE(sk), skb);
3987 err = l2cap_push_rx_skb(sk, skb, rx_control);
3991 if (rx_control & L2CAP_CTRL_FINAL) {
3992 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
3993 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
3995 l2cap_retransmit_frames(sk);
4000 pi->num_acked = (pi->num_acked + 1) % num_to_ack;
4001 if (pi->num_acked == num_to_ack - 1)
4011 static inline void l2cap_data_channel_rrframe(struct sock *sk, u16 rx_control)
4013 struct l2cap_pinfo *pi = l2cap_pi(sk);
4015 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, __get_reqseq(rx_control),
4018 pi->expected_ack_seq = __get_reqseq(rx_control);
4019 l2cap_drop_acked_frames(sk);
4021 if (rx_control & L2CAP_CTRL_POLL) {
4022 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4023 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4024 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4025 (pi->unacked_frames > 0))
4026 __mod_retrans_timer();
4028 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4029 l2cap_send_srejtail(sk);
4031 l2cap_send_i_or_rr_or_rnr(sk);
4034 } else if (rx_control & L2CAP_CTRL_FINAL) {
4035 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4037 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4038 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4040 l2cap_retransmit_frames(sk);
4043 if ((pi->conn_state & L2CAP_CONN_REMOTE_BUSY) &&
4044 (pi->unacked_frames > 0))
4045 __mod_retrans_timer();
4047 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4048 if (pi->conn_state & L2CAP_CONN_SREJ_SENT) {
4051 l2cap_ertm_send(sk);
4056 static inline void l2cap_data_channel_rejframe(struct sock *sk, u16 rx_control)
4058 struct l2cap_pinfo *pi = l2cap_pi(sk);
4059 u8 tx_seq = __get_reqseq(rx_control);
4061 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4063 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4065 pi->expected_ack_seq = tx_seq;
4066 l2cap_drop_acked_frames(sk);
4068 if (rx_control & L2CAP_CTRL_FINAL) {
4069 if (pi->conn_state & L2CAP_CONN_REJ_ACT)
4070 pi->conn_state &= ~L2CAP_CONN_REJ_ACT;
4072 l2cap_retransmit_frames(sk);
4074 l2cap_retransmit_frames(sk);
4076 if (pi->conn_state & L2CAP_CONN_WAIT_F)
4077 pi->conn_state |= L2CAP_CONN_REJ_ACT;
4080 static inline void l2cap_data_channel_srejframe(struct sock *sk, u16 rx_control)
4082 struct l2cap_pinfo *pi = l2cap_pi(sk);
4083 u8 tx_seq = __get_reqseq(rx_control);
4085 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4087 pi->conn_state &= ~L2CAP_CONN_REMOTE_BUSY;
4089 if (rx_control & L2CAP_CTRL_POLL) {
4090 pi->expected_ack_seq = tx_seq;
4091 l2cap_drop_acked_frames(sk);
4093 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4094 l2cap_retransmit_one_frame(sk, tx_seq);
4096 l2cap_ertm_send(sk);
4098 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4099 pi->srej_save_reqseq = tx_seq;
4100 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4102 } else if (rx_control & L2CAP_CTRL_FINAL) {
4103 if ((pi->conn_state & L2CAP_CONN_SREJ_ACT) &&
4104 pi->srej_save_reqseq == tx_seq)
4105 pi->conn_state &= ~L2CAP_CONN_SREJ_ACT;
4107 l2cap_retransmit_one_frame(sk, tx_seq);
4109 l2cap_retransmit_one_frame(sk, tx_seq);
4110 if (pi->conn_state & L2CAP_CONN_WAIT_F) {
4111 pi->srej_save_reqseq = tx_seq;
4112 pi->conn_state |= L2CAP_CONN_SREJ_ACT;
4117 static inline void l2cap_data_channel_rnrframe(struct sock *sk, u16 rx_control)
4119 struct l2cap_pinfo *pi = l2cap_pi(sk);
4120 u8 tx_seq = __get_reqseq(rx_control);
4122 BT_DBG("sk %p, req_seq %d ctrl 0x%4.4x", sk, tx_seq, rx_control);
4124 pi->conn_state |= L2CAP_CONN_REMOTE_BUSY;
4125 pi->expected_ack_seq = tx_seq;
4126 l2cap_drop_acked_frames(sk);
4128 if (rx_control & L2CAP_CTRL_POLL)
4129 pi->conn_state |= L2CAP_CONN_SEND_FBIT;
4131 if (!(pi->conn_state & L2CAP_CONN_SREJ_SENT)) {
4132 del_timer(&pi->retrans_timer);
4133 if (rx_control & L2CAP_CTRL_POLL)
4134 l2cap_send_rr_or_rnr(pi, L2CAP_CTRL_FINAL);
4138 if (rx_control & L2CAP_CTRL_POLL)
4139 l2cap_send_srejtail(sk);
4141 l2cap_send_sframe(pi, L2CAP_SUPER_RCV_READY);
4144 static inline int l2cap_data_channel_sframe(struct sock *sk, u16 rx_control, struct sk_buff *skb)
4146 BT_DBG("sk %p rx_control 0x%4.4x len %d", sk, rx_control, skb->len);
4148 if (L2CAP_CTRL_FINAL & rx_control &&
4149 l2cap_pi(sk)->conn_state & L2CAP_CONN_WAIT_F) {
4150 del_timer(&l2cap_pi(sk)->monitor_timer);
4151 if (l2cap_pi(sk)->unacked_frames > 0)
4152 __mod_retrans_timer();
4153 l2cap_pi(sk)->conn_state &= ~L2CAP_CONN_WAIT_F;
4156 switch (rx_control & L2CAP_CTRL_SUPERVISE) {
4157 case L2CAP_SUPER_RCV_READY:
4158 l2cap_data_channel_rrframe(sk, rx_control);
4161 case L2CAP_SUPER_REJECT:
4162 l2cap_data_channel_rejframe(sk, rx_control);
4165 case L2CAP_SUPER_SELECT_REJECT:
4166 l2cap_data_channel_srejframe(sk, rx_control);
4169 case L2CAP_SUPER_RCV_NOT_READY:
4170 l2cap_data_channel_rnrframe(sk, rx_control);
4178 static int l2cap_ertm_data_rcv(struct sock *sk, struct sk_buff *skb)
4180 struct l2cap_pinfo *pi = l2cap_pi(sk);
4183 int len, next_tx_seq_offset, req_seq_offset;
4185 control = get_unaligned_le16(skb->data);
4190 * We can just drop the corrupted I-frame here.
4191 * Receiver will miss it and start proper recovery
4192 * procedures and ask retransmission.
4194 if (l2cap_check_fcs(pi, skb))
4197 if (__is_sar_start(control) && __is_iframe(control))
4200 if (pi->fcs == L2CAP_FCS_CRC16)
4203 if (len > pi->mps) {
4204 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4208 req_seq = __get_reqseq(control);
4209 req_seq_offset = (req_seq - pi->expected_ack_seq) % 64;
4210 if (req_seq_offset < 0)
4211 req_seq_offset += 64;
4213 next_tx_seq_offset =
4214 (pi->next_tx_seq - pi->expected_ack_seq) % 64;
4215 if (next_tx_seq_offset < 0)
4216 next_tx_seq_offset += 64;
4218 /* check for invalid req-seq */
4219 if (req_seq_offset > next_tx_seq_offset) {
4220 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4224 if (__is_iframe(control)) {
4226 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4230 l2cap_data_channel_iframe(sk, control, skb);
4234 l2cap_send_disconn_req(pi->conn, sk, ECONNRESET);
4238 l2cap_data_channel_sframe(sk, control, skb);
4248 static inline int l2cap_data_channel(struct l2cap_conn *conn, u16 cid, struct sk_buff *skb)
4251 struct l2cap_pinfo *pi;
4256 sk = l2cap_get_chan_by_scid(&conn->chan_list, cid);
4258 BT_DBG("unknown cid 0x%4.4x", cid);
4264 BT_DBG("sk %p, len %d", sk, skb->len);
4266 if (sk->sk_state != BT_CONNECTED)
4270 case L2CAP_MODE_BASIC:
4271 /* If socket recv buffers overflows we drop data here
4272 * which is *bad* because L2CAP has to be reliable.
4273 * But we don't have any other choice. L2CAP doesn't
4274 * provide flow control mechanism. */
4276 if (pi->imtu < skb->len)
4279 if (!sock_queue_rcv_skb(sk, skb))
4283 case L2CAP_MODE_ERTM:
4284 if (!sock_owned_by_user(sk)) {
4285 l2cap_ertm_data_rcv(sk, skb);
4287 if (sk_add_backlog(sk, skb))
4293 case L2CAP_MODE_STREAMING:
4294 control = get_unaligned_le16(skb->data);
4298 if (l2cap_check_fcs(pi, skb))
4301 if (__is_sar_start(control))
4304 if (pi->fcs == L2CAP_FCS_CRC16)
4307 if (len > pi->mps || len < 0 || __is_sframe(control))
4310 tx_seq = __get_txseq(control);
4312 if (pi->expected_tx_seq == tx_seq)
4313 pi->expected_tx_seq = (pi->expected_tx_seq + 1) % 64;
4315 pi->expected_tx_seq = (tx_seq + 1) % 64;
4317 l2cap_streaming_reassembly_sdu(sk, skb, control);
4322 BT_DBG("sk %p: bad mode 0x%2.2x", sk, pi->mode);
4336 static inline int l2cap_conless_channel(struct l2cap_conn *conn, __le16 psm, struct sk_buff *skb)
4340 sk = l2cap_get_sock_by_psm(0, psm, conn->src);
4344 BT_DBG("sk %p, len %d", sk, skb->len);
4346 if (sk->sk_state != BT_BOUND && sk->sk_state != BT_CONNECTED)
4349 if (l2cap_pi(sk)->imtu < skb->len)
4352 if (!sock_queue_rcv_skb(sk, skb))
4364 static void l2cap_recv_frame(struct l2cap_conn *conn, struct sk_buff *skb)
4366 struct l2cap_hdr *lh = (void *) skb->data;
4370 skb_pull(skb, L2CAP_HDR_SIZE);
4371 cid = __le16_to_cpu(lh->cid);
4372 len = __le16_to_cpu(lh->len);
4374 if (len != skb->len) {
4379 BT_DBG("len %d, cid 0x%4.4x", len, cid);
4382 case L2CAP_CID_SIGNALING:
4383 l2cap_sig_channel(conn, skb);
4386 case L2CAP_CID_CONN_LESS:
4387 psm = get_unaligned_le16(skb->data);
4389 l2cap_conless_channel(conn, psm, skb);
4393 l2cap_data_channel(conn, cid, skb);
4398 /* ---- L2CAP interface with lower layer (HCI) ---- */
4400 static int l2cap_connect_ind(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
4402 int exact = 0, lm1 = 0, lm2 = 0;
4403 register struct sock *sk;
4404 struct hlist_node *node;
4406 if (type != ACL_LINK)
4409 BT_DBG("hdev %s, bdaddr %s", hdev->name, batostr(bdaddr));
4411 /* Find listening sockets and check their link_mode */
4412 read_lock(&l2cap_sk_list.lock);
4413 sk_for_each(sk, node, &l2cap_sk_list.head) {
4414 if (sk->sk_state != BT_LISTEN)
4417 if (!bacmp(&bt_sk(sk)->src, &hdev->bdaddr)) {
4418 lm1 |= HCI_LM_ACCEPT;
4419 if (l2cap_pi(sk)->role_switch)
4420 lm1 |= HCI_LM_MASTER;
4422 } else if (!bacmp(&bt_sk(sk)->src, BDADDR_ANY)) {
4423 lm2 |= HCI_LM_ACCEPT;
4424 if (l2cap_pi(sk)->role_switch)
4425 lm2 |= HCI_LM_MASTER;
4428 read_unlock(&l2cap_sk_list.lock);
4430 return exact ? lm1 : lm2;
4433 static int l2cap_connect_cfm(struct hci_conn *hcon, u8 status)
4435 struct l2cap_conn *conn;
4437 BT_DBG("hcon %p bdaddr %s status %d", hcon, batostr(&hcon->dst), status);
4439 if (hcon->type != ACL_LINK)
4443 conn = l2cap_conn_add(hcon, status);
4445 l2cap_conn_ready(conn);
4447 l2cap_conn_del(hcon, bt_err(status));
4452 static int l2cap_disconn_ind(struct hci_conn *hcon)
4454 struct l2cap_conn *conn = hcon->l2cap_data;
4456 BT_DBG("hcon %p", hcon);
4458 if (hcon->type != ACL_LINK || !conn)
4461 return conn->disc_reason;
4464 static int l2cap_disconn_cfm(struct hci_conn *hcon, u8 reason)
4466 BT_DBG("hcon %p reason %d", hcon, reason);
4468 if (hcon->type != ACL_LINK)
4471 l2cap_conn_del(hcon, bt_err(reason));
4476 static inline void l2cap_check_encryption(struct sock *sk, u8 encrypt)
4478 if (sk->sk_type != SOCK_SEQPACKET && sk->sk_type != SOCK_STREAM)
4481 if (encrypt == 0x00) {
4482 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM) {
4483 l2cap_sock_clear_timer(sk);
4484 l2cap_sock_set_timer(sk, HZ * 5);
4485 } else if (l2cap_pi(sk)->sec_level == BT_SECURITY_HIGH)
4486 __l2cap_sock_close(sk, ECONNREFUSED);
4488 if (l2cap_pi(sk)->sec_level == BT_SECURITY_MEDIUM)
4489 l2cap_sock_clear_timer(sk);
4493 static int l2cap_security_cfm(struct hci_conn *hcon, u8 status, u8 encrypt)
4495 struct l2cap_chan_list *l;
4496 struct l2cap_conn *conn = hcon->l2cap_data;
4502 l = &conn->chan_list;
4504 BT_DBG("conn %p", conn);
4506 read_lock(&l->lock);
4508 for (sk = l->head; sk; sk = l2cap_pi(sk)->next_c) {
4511 if (l2cap_pi(sk)->conf_state & L2CAP_CONF_CONNECT_PEND) {
4516 if (!status && (sk->sk_state == BT_CONNECTED ||
4517 sk->sk_state == BT_CONFIG)) {
4518 l2cap_check_encryption(sk, encrypt);
4523 if (sk->sk_state == BT_CONNECT) {
4525 struct l2cap_conn_req req;
4526 req.scid = cpu_to_le16(l2cap_pi(sk)->scid);
4527 req.psm = l2cap_pi(sk)->psm;
4529 l2cap_pi(sk)->ident = l2cap_get_ident(conn);
4530 l2cap_pi(sk)->conf_state |= L2CAP_CONF_CONNECT_PEND;
4532 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4533 L2CAP_CONN_REQ, sizeof(req), &req);
4535 l2cap_sock_clear_timer(sk);
4536 l2cap_sock_set_timer(sk, HZ / 10);
4538 } else if (sk->sk_state == BT_CONNECT2) {
4539 struct l2cap_conn_rsp rsp;
4543 sk->sk_state = BT_CONFIG;
4544 result = L2CAP_CR_SUCCESS;
4546 sk->sk_state = BT_DISCONN;
4547 l2cap_sock_set_timer(sk, HZ / 10);
4548 result = L2CAP_CR_SEC_BLOCK;
4551 rsp.scid = cpu_to_le16(l2cap_pi(sk)->dcid);
4552 rsp.dcid = cpu_to_le16(l2cap_pi(sk)->scid);
4553 rsp.result = cpu_to_le16(result);
4554 rsp.status = cpu_to_le16(L2CAP_CS_NO_INFO);
4555 l2cap_send_cmd(conn, l2cap_pi(sk)->ident,
4556 L2CAP_CONN_RSP, sizeof(rsp), &rsp);
4562 read_unlock(&l->lock);
4567 static int l2cap_recv_acldata(struct hci_conn *hcon, struct sk_buff *skb, u16 flags)
4569 struct l2cap_conn *conn = hcon->l2cap_data;
4571 if (!conn && !(conn = l2cap_conn_add(hcon, 0)))
4574 BT_DBG("conn %p len %d flags 0x%x", conn, skb->len, flags);
4576 if (flags & ACL_START) {
4577 struct l2cap_hdr *hdr;
4581 BT_ERR("Unexpected start frame (len %d)", skb->len);
4582 kfree_skb(conn->rx_skb);
4583 conn->rx_skb = NULL;
4585 l2cap_conn_unreliable(conn, ECOMM);
4589 BT_ERR("Frame is too short (len %d)", skb->len);
4590 l2cap_conn_unreliable(conn, ECOMM);
4594 hdr = (struct l2cap_hdr *) skb->data;
4595 len = __le16_to_cpu(hdr->len) + L2CAP_HDR_SIZE;
4597 if (len == skb->len) {
4598 /* Complete frame received */
4599 l2cap_recv_frame(conn, skb);
4603 BT_DBG("Start: total len %d, frag len %d", len, skb->len);
4605 if (skb->len > len) {
4606 BT_ERR("Frame is too long (len %d, expected len %d)",
4608 l2cap_conn_unreliable(conn, ECOMM);
4612 /* Allocate skb for the complete frame (with header) */
4613 conn->rx_skb = bt_skb_alloc(len, GFP_ATOMIC);
4617 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4619 conn->rx_len = len - skb->len;
4621 BT_DBG("Cont: frag len %d (expecting %d)", skb->len, conn->rx_len);
4623 if (!conn->rx_len) {
4624 BT_ERR("Unexpected continuation frame (len %d)", skb->len);
4625 l2cap_conn_unreliable(conn, ECOMM);
4629 if (skb->len > conn->rx_len) {
4630 BT_ERR("Fragment is too long (len %d, expected %d)",
4631 skb->len, conn->rx_len);
4632 kfree_skb(conn->rx_skb);
4633 conn->rx_skb = NULL;
4635 l2cap_conn_unreliable(conn, ECOMM);
4639 skb_copy_from_linear_data(skb, skb_put(conn->rx_skb, skb->len),
4641 conn->rx_len -= skb->len;
4643 if (!conn->rx_len) {
4644 /* Complete frame received */
4645 l2cap_recv_frame(conn, conn->rx_skb);
4646 conn->rx_skb = NULL;
4655 static int l2cap_debugfs_show(struct seq_file *f, void *p)
4658 struct hlist_node *node;
4660 read_lock_bh(&l2cap_sk_list.lock);
4662 sk_for_each(sk, node, &l2cap_sk_list.head) {
4663 struct l2cap_pinfo *pi = l2cap_pi(sk);
4665 seq_printf(f, "%s %s %d %d 0x%4.4x 0x%4.4x %d %d %d\n",
4666 batostr(&bt_sk(sk)->src),
4667 batostr(&bt_sk(sk)->dst),
4668 sk->sk_state, __le16_to_cpu(pi->psm),
4670 pi->imtu, pi->omtu, pi->sec_level);
4673 read_unlock_bh(&l2cap_sk_list.lock);
4678 static int l2cap_debugfs_open(struct inode *inode, struct file *file)
4680 return single_open(file, l2cap_debugfs_show, inode->i_private);
4683 static const struct file_operations l2cap_debugfs_fops = {
4684 .open = l2cap_debugfs_open,
4686 .llseek = seq_lseek,
4687 .release = single_release,
4690 static struct dentry *l2cap_debugfs;
4692 static const struct proto_ops l2cap_sock_ops = {
4693 .family = PF_BLUETOOTH,
4694 .owner = THIS_MODULE,
4695 .release = l2cap_sock_release,
4696 .bind = l2cap_sock_bind,
4697 .connect = l2cap_sock_connect,
4698 .listen = l2cap_sock_listen,
4699 .accept = l2cap_sock_accept,
4700 .getname = l2cap_sock_getname,
4701 .sendmsg = l2cap_sock_sendmsg,
4702 .recvmsg = l2cap_sock_recvmsg,
4703 .poll = bt_sock_poll,
4704 .ioctl = bt_sock_ioctl,
4705 .mmap = sock_no_mmap,
4706 .socketpair = sock_no_socketpair,
4707 .shutdown = l2cap_sock_shutdown,
4708 .setsockopt = l2cap_sock_setsockopt,
4709 .getsockopt = l2cap_sock_getsockopt
4712 static const struct net_proto_family l2cap_sock_family_ops = {
4713 .family = PF_BLUETOOTH,
4714 .owner = THIS_MODULE,
4715 .create = l2cap_sock_create,
4718 static struct hci_proto l2cap_hci_proto = {
4720 .id = HCI_PROTO_L2CAP,
4721 .connect_ind = l2cap_connect_ind,
4722 .connect_cfm = l2cap_connect_cfm,
4723 .disconn_ind = l2cap_disconn_ind,
4724 .disconn_cfm = l2cap_disconn_cfm,
4725 .security_cfm = l2cap_security_cfm,
4726 .recv_acldata = l2cap_recv_acldata
4729 static int __init l2cap_init(void)
4733 err = proto_register(&l2cap_proto, 0);
4737 _busy_wq = create_singlethread_workqueue("l2cap");
4741 err = bt_sock_register(BTPROTO_L2CAP, &l2cap_sock_family_ops);
4743 BT_ERR("L2CAP socket registration failed");
4747 err = hci_register_proto(&l2cap_hci_proto);
4749 BT_ERR("L2CAP protocol registration failed");
4750 bt_sock_unregister(BTPROTO_L2CAP);
4755 l2cap_debugfs = debugfs_create_file("l2cap", 0444,
4756 bt_debugfs, NULL, &l2cap_debugfs_fops);
4758 BT_ERR("Failed to create L2CAP debug file");
4761 BT_INFO("L2CAP ver %s", VERSION);
4762 BT_INFO("L2CAP socket layer initialized");
4767 proto_unregister(&l2cap_proto);
4771 static void __exit l2cap_exit(void)
4773 debugfs_remove(l2cap_debugfs);
4775 flush_workqueue(_busy_wq);
4776 destroy_workqueue(_busy_wq);
4778 if (bt_sock_unregister(BTPROTO_L2CAP) < 0)
4779 BT_ERR("L2CAP socket unregistration failed");
4781 if (hci_unregister_proto(&l2cap_hci_proto) < 0)
4782 BT_ERR("L2CAP protocol unregistration failed");
4784 proto_unregister(&l2cap_proto);
4787 void l2cap_load(void)
4789 /* Dummy function to trigger automatic L2CAP module loading by
4790 * other modules that use L2CAP sockets but don't use any other
4791 * symbols from it. */
4793 EXPORT_SYMBOL(l2cap_load);
4795 module_init(l2cap_init);
4796 module_exit(l2cap_exit);
4798 module_param(enable_ertm, bool, 0644);
4799 MODULE_PARM_DESC(enable_ertm, "Enable enhanced retransmission mode");
4801 MODULE_AUTHOR("Marcel Holtmann <marcel@holtmann.org>");
4802 MODULE_DESCRIPTION("Bluetooth L2CAP ver " VERSION);
4803 MODULE_VERSION(VERSION);
4804 MODULE_LICENSE("GPL");
4805 MODULE_ALIAS("bt-proto-0");