2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
55 #define AUTO_OFF_TIMEOUT 2000
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
61 static DEFINE_RWLOCK(hci_task_lock);
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
72 #define HCI_MAX_PROTO 2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
78 /* ---- HCI notifications ---- */
80 int hci_register_notifier(struct notifier_block *nb)
82 return atomic_notifier_chain_register(&hci_notifier, nb);
85 int hci_unregister_notifier(struct notifier_block *nb)
87 return atomic_notifier_chain_unregister(&hci_notifier, nb);
90 static void hci_notify(struct hci_dev *hdev, int event)
92 atomic_notifier_call_chain(&hci_notifier, event, hdev);
95 /* ---- HCI requests ---- */
97 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
99 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
101 /* If this is the init phase check if the completed command matches
102 * the last init command, and if not just return.
104 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127 unsigned long opt, __u32 timeout)
129 DECLARE_WAITQUEUE(wait, current);
132 BT_DBG("%s start", hdev->name);
134 hdev->req_status = HCI_REQ_PEND;
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
140 schedule_timeout(timeout);
142 remove_wait_queue(&hdev->req_wait_q, &wait);
144 if (signal_pending(current))
147 switch (hdev->req_status) {
149 err = -bt_to_errno(hdev->req_result);
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
161 hdev->req_status = hdev->req_result = 0;
163 BT_DBG("%s end: err %d", hdev->name, err);
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169 unsigned long opt, __u32 timeout)
173 if (!test_bit(HCI_UP, &hdev->flags))
176 /* Serialize all requests */
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186 BT_DBG("%s %ld", hdev->name, opt);
189 set_bit(HCI_RESET, &hdev->flags);
190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
193 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
195 struct hci_cp_delete_stored_link_key cp;
200 BT_DBG("%s %ld", hdev->name, opt);
202 /* Driver initialization */
204 /* Special commands */
205 while ((skb = skb_dequeue(&hdev->driver_init))) {
206 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
207 skb->dev = (void *) hdev;
209 skb_queue_tail(&hdev->cmd_q, skb);
210 tasklet_schedule(&hdev->cmd_task);
212 skb_queue_purge(&hdev->driver_init);
214 /* Mandatory initialization */
217 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218 set_bit(HCI_RESET, &hdev->flags);
219 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
222 /* Read Local Supported Features */
223 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
225 /* Read Local Version */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
228 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
229 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
232 /* Host buffer size */
234 struct hci_cp_host_buffer_size cp;
235 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
236 cp.sco_mtu = HCI_MAX_SCO_SIZE;
237 cp.acl_max_pkt = cpu_to_le16(0xffff);
238 cp.sco_max_pkt = cpu_to_le16(0xffff);
239 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
243 /* Read BD Address */
244 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
246 /* Read Class of Device */
247 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
249 /* Read Local Name */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
252 /* Read Voice Setting */
253 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
255 /* Optional initialization */
257 /* Clear Event Filters */
258 flt_type = HCI_FLT_CLEAR_ALL;
259 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
261 /* Connection accept timeout ~20 secs */
262 param = cpu_to_le16(0x7d00);
263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
265 bacpy(&cp.bdaddr, BDADDR_ANY);
267 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
270 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
272 BT_DBG("%s", hdev->name);
274 /* Read LE buffer size */
275 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
278 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
282 BT_DBG("%s %x", hdev->name, scan);
284 /* Inquiry and Page scans */
285 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
288 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
292 BT_DBG("%s %x", hdev->name, auth);
295 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
298 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
302 BT_DBG("%s %x", hdev->name, encrypt);
305 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
308 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
310 __le16 policy = cpu_to_le16(opt);
312 BT_DBG("%s %x", hdev->name, policy);
314 /* Default link policy */
315 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
318 /* Get HCI device by index.
319 * Device is held on return. */
320 struct hci_dev *hci_dev_get(int index)
322 struct hci_dev *hdev = NULL;
330 read_lock(&hci_dev_list_lock);
331 list_for_each(p, &hci_dev_list) {
332 struct hci_dev *d = list_entry(p, struct hci_dev, list);
333 if (d->id == index) {
334 hdev = hci_dev_hold(d);
338 read_unlock(&hci_dev_list_lock);
342 /* ---- Inquiry support ---- */
343 static void inquiry_cache_flush(struct hci_dev *hdev)
345 struct inquiry_cache *cache = &hdev->inq_cache;
346 struct inquiry_entry *next = cache->list, *e;
348 BT_DBG("cache %p", cache);
357 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
359 struct inquiry_cache *cache = &hdev->inq_cache;
360 struct inquiry_entry *e;
362 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
364 for (e = cache->list; e; e = e->next)
365 if (!bacmp(&e->data.bdaddr, bdaddr))
370 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
372 struct inquiry_cache *cache = &hdev->inq_cache;
373 struct inquiry_entry *ie;
375 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
377 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
379 /* Entry not in the cache. Add new one. */
380 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
384 ie->next = cache->list;
388 memcpy(&ie->data, data, sizeof(*data));
389 ie->timestamp = jiffies;
390 cache->timestamp = jiffies;
393 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
395 struct inquiry_cache *cache = &hdev->inq_cache;
396 struct inquiry_info *info = (struct inquiry_info *) buf;
397 struct inquiry_entry *e;
400 for (e = cache->list; e && copied < num; e = e->next, copied++) {
401 struct inquiry_data *data = &e->data;
402 bacpy(&info->bdaddr, &data->bdaddr);
403 info->pscan_rep_mode = data->pscan_rep_mode;
404 info->pscan_period_mode = data->pscan_period_mode;
405 info->pscan_mode = data->pscan_mode;
406 memcpy(info->dev_class, data->dev_class, 3);
407 info->clock_offset = data->clock_offset;
411 BT_DBG("cache %p, copied %d", cache, copied);
415 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
417 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
418 struct hci_cp_inquiry cp;
420 BT_DBG("%s", hdev->name);
422 if (test_bit(HCI_INQUIRY, &hdev->flags))
426 memcpy(&cp.lap, &ir->lap, 3);
427 cp.length = ir->length;
428 cp.num_rsp = ir->num_rsp;
429 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
432 int hci_inquiry(void __user *arg)
434 __u8 __user *ptr = arg;
435 struct hci_inquiry_req ir;
436 struct hci_dev *hdev;
437 int err = 0, do_inquiry = 0, max_rsp;
441 if (copy_from_user(&ir, ptr, sizeof(ir)))
444 hdev = hci_dev_get(ir.dev_id);
448 hci_dev_lock_bh(hdev);
449 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
450 inquiry_cache_empty(hdev) ||
451 ir.flags & IREQ_CACHE_FLUSH) {
452 inquiry_cache_flush(hdev);
455 hci_dev_unlock_bh(hdev);
457 timeo = ir.length * msecs_to_jiffies(2000);
460 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
465 /* for unlimited number of responses we will use buffer with 255 entries */
466 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
468 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
469 * copy it to the user space.
471 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
477 hci_dev_lock_bh(hdev);
478 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
479 hci_dev_unlock_bh(hdev);
481 BT_DBG("num_rsp %d", ir.num_rsp);
483 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
485 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
498 /* ---- HCI ioctl helpers ---- */
500 int hci_dev_open(__u16 dev)
502 struct hci_dev *hdev;
505 hdev = hci_dev_get(dev);
509 BT_DBG("%s %p", hdev->name, hdev);
513 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
518 if (test_bit(HCI_UP, &hdev->flags)) {
523 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
524 set_bit(HCI_RAW, &hdev->flags);
526 /* Treat all non BR/EDR controllers as raw devices for now */
527 if (hdev->dev_type != HCI_BREDR)
528 set_bit(HCI_RAW, &hdev->flags);
530 if (hdev->open(hdev)) {
535 if (!test_bit(HCI_RAW, &hdev->flags)) {
536 atomic_set(&hdev->cmd_cnt, 1);
537 set_bit(HCI_INIT, &hdev->flags);
538 hdev->init_last_cmd = 0;
540 ret = __hci_request(hdev, hci_init_req, 0,
541 msecs_to_jiffies(HCI_INIT_TIMEOUT));
543 if (lmp_host_le_capable(hdev))
544 ret = __hci_request(hdev, hci_le_init_req, 0,
545 msecs_to_jiffies(HCI_INIT_TIMEOUT));
547 clear_bit(HCI_INIT, &hdev->flags);
552 set_bit(HCI_UP, &hdev->flags);
553 hci_notify(hdev, HCI_DEV_UP);
554 if (!test_bit(HCI_SETUP, &hdev->flags))
555 mgmt_powered(hdev->id, 1);
557 /* Init failed, cleanup */
558 tasklet_kill(&hdev->rx_task);
559 tasklet_kill(&hdev->tx_task);
560 tasklet_kill(&hdev->cmd_task);
562 skb_queue_purge(&hdev->cmd_q);
563 skb_queue_purge(&hdev->rx_q);
568 if (hdev->sent_cmd) {
569 kfree_skb(hdev->sent_cmd);
570 hdev->sent_cmd = NULL;
578 hci_req_unlock(hdev);
583 static int hci_dev_do_close(struct hci_dev *hdev)
585 BT_DBG("%s %p", hdev->name, hdev);
587 hci_req_cancel(hdev, ENODEV);
590 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
591 del_timer_sync(&hdev->cmd_timer);
592 hci_req_unlock(hdev);
596 /* Kill RX and TX tasks */
597 tasklet_kill(&hdev->rx_task);
598 tasklet_kill(&hdev->tx_task);
600 hci_dev_lock_bh(hdev);
601 inquiry_cache_flush(hdev);
602 hci_conn_hash_flush(hdev);
603 hci_dev_unlock_bh(hdev);
605 hci_notify(hdev, HCI_DEV_DOWN);
611 skb_queue_purge(&hdev->cmd_q);
612 atomic_set(&hdev->cmd_cnt, 1);
613 if (!test_bit(HCI_RAW, &hdev->flags)) {
614 set_bit(HCI_INIT, &hdev->flags);
615 __hci_request(hdev, hci_reset_req, 0,
616 msecs_to_jiffies(250));
617 clear_bit(HCI_INIT, &hdev->flags);
621 tasklet_kill(&hdev->cmd_task);
624 skb_queue_purge(&hdev->rx_q);
625 skb_queue_purge(&hdev->cmd_q);
626 skb_queue_purge(&hdev->raw_q);
628 /* Drop last sent command */
629 if (hdev->sent_cmd) {
630 del_timer_sync(&hdev->cmd_timer);
631 kfree_skb(hdev->sent_cmd);
632 hdev->sent_cmd = NULL;
635 /* After this point our queues are empty
636 * and no tasks are scheduled. */
639 mgmt_powered(hdev->id, 0);
644 hci_req_unlock(hdev);
650 int hci_dev_close(__u16 dev)
652 struct hci_dev *hdev;
655 hdev = hci_dev_get(dev);
658 err = hci_dev_do_close(hdev);
663 int hci_dev_reset(__u16 dev)
665 struct hci_dev *hdev;
668 hdev = hci_dev_get(dev);
673 tasklet_disable(&hdev->tx_task);
675 if (!test_bit(HCI_UP, &hdev->flags))
679 skb_queue_purge(&hdev->rx_q);
680 skb_queue_purge(&hdev->cmd_q);
682 hci_dev_lock_bh(hdev);
683 inquiry_cache_flush(hdev);
684 hci_conn_hash_flush(hdev);
685 hci_dev_unlock_bh(hdev);
690 atomic_set(&hdev->cmd_cnt, 1);
691 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
693 if (!test_bit(HCI_RAW, &hdev->flags))
694 ret = __hci_request(hdev, hci_reset_req, 0,
695 msecs_to_jiffies(HCI_INIT_TIMEOUT));
698 tasklet_enable(&hdev->tx_task);
699 hci_req_unlock(hdev);
704 int hci_dev_reset_stat(__u16 dev)
706 struct hci_dev *hdev;
709 hdev = hci_dev_get(dev);
713 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
720 int hci_dev_cmd(unsigned int cmd, void __user *arg)
722 struct hci_dev *hdev;
723 struct hci_dev_req dr;
726 if (copy_from_user(&dr, arg, sizeof(dr)))
729 hdev = hci_dev_get(dr.dev_id);
735 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
736 msecs_to_jiffies(HCI_INIT_TIMEOUT));
740 if (!lmp_encrypt_capable(hdev)) {
745 if (!test_bit(HCI_AUTH, &hdev->flags)) {
746 /* Auth must be enabled first */
747 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
748 msecs_to_jiffies(HCI_INIT_TIMEOUT));
753 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
754 msecs_to_jiffies(HCI_INIT_TIMEOUT));
758 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
759 msecs_to_jiffies(HCI_INIT_TIMEOUT));
763 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
764 msecs_to_jiffies(HCI_INIT_TIMEOUT));
768 hdev->link_mode = ((__u16) dr.dev_opt) &
769 (HCI_LM_MASTER | HCI_LM_ACCEPT);
773 hdev->pkt_type = (__u16) dr.dev_opt;
777 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
778 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
782 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
783 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
795 int hci_get_dev_list(void __user *arg)
797 struct hci_dev_list_req *dl;
798 struct hci_dev_req *dr;
800 int n = 0, size, err;
803 if (get_user(dev_num, (__u16 __user *) arg))
806 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
809 size = sizeof(*dl) + dev_num * sizeof(*dr);
811 dl = kzalloc(size, GFP_KERNEL);
817 read_lock_bh(&hci_dev_list_lock);
818 list_for_each(p, &hci_dev_list) {
819 struct hci_dev *hdev;
821 hdev = list_entry(p, struct hci_dev, list);
823 hci_del_off_timer(hdev);
825 if (!test_bit(HCI_MGMT, &hdev->flags))
826 set_bit(HCI_PAIRABLE, &hdev->flags);
828 (dr + n)->dev_id = hdev->id;
829 (dr + n)->dev_opt = hdev->flags;
834 read_unlock_bh(&hci_dev_list_lock);
837 size = sizeof(*dl) + n * sizeof(*dr);
839 err = copy_to_user(arg, dl, size);
842 return err ? -EFAULT : 0;
845 int hci_get_dev_info(void __user *arg)
847 struct hci_dev *hdev;
848 struct hci_dev_info di;
851 if (copy_from_user(&di, arg, sizeof(di)))
854 hdev = hci_dev_get(di.dev_id);
858 hci_del_off_timer(hdev);
860 if (!test_bit(HCI_MGMT, &hdev->flags))
861 set_bit(HCI_PAIRABLE, &hdev->flags);
863 strcpy(di.name, hdev->name);
864 di.bdaddr = hdev->bdaddr;
865 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
866 di.flags = hdev->flags;
867 di.pkt_type = hdev->pkt_type;
868 di.acl_mtu = hdev->acl_mtu;
869 di.acl_pkts = hdev->acl_pkts;
870 di.sco_mtu = hdev->sco_mtu;
871 di.sco_pkts = hdev->sco_pkts;
872 di.link_policy = hdev->link_policy;
873 di.link_mode = hdev->link_mode;
875 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
876 memcpy(&di.features, &hdev->features, sizeof(di.features));
878 if (copy_to_user(arg, &di, sizeof(di)))
886 /* ---- Interface to HCI drivers ---- */
888 static int hci_rfkill_set_block(void *data, bool blocked)
890 struct hci_dev *hdev = data;
892 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
897 hci_dev_do_close(hdev);
902 static const struct rfkill_ops hci_rfkill_ops = {
903 .set_block = hci_rfkill_set_block,
906 /* Alloc HCI device */
907 struct hci_dev *hci_alloc_dev(void)
909 struct hci_dev *hdev;
911 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
915 hci_init_sysfs(hdev);
916 skb_queue_head_init(&hdev->driver_init);
920 EXPORT_SYMBOL(hci_alloc_dev);
922 /* Free HCI device */
923 void hci_free_dev(struct hci_dev *hdev)
925 skb_queue_purge(&hdev->driver_init);
927 /* will free via device release */
928 put_device(&hdev->dev);
930 EXPORT_SYMBOL(hci_free_dev);
932 static void hci_power_on(struct work_struct *work)
934 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
936 BT_DBG("%s", hdev->name);
938 if (hci_dev_open(hdev->id) < 0)
941 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
942 mod_timer(&hdev->off_timer,
943 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
945 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
946 mgmt_index_added(hdev->id);
949 static void hci_power_off(struct work_struct *work)
951 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
953 BT_DBG("%s", hdev->name);
955 hci_dev_close(hdev->id);
958 static void hci_auto_off(unsigned long data)
960 struct hci_dev *hdev = (struct hci_dev *) data;
962 BT_DBG("%s", hdev->name);
964 clear_bit(HCI_AUTO_OFF, &hdev->flags);
966 queue_work(hdev->workqueue, &hdev->power_off);
969 void hci_del_off_timer(struct hci_dev *hdev)
971 BT_DBG("%s", hdev->name);
973 clear_bit(HCI_AUTO_OFF, &hdev->flags);
974 del_timer(&hdev->off_timer);
977 int hci_uuids_clear(struct hci_dev *hdev)
979 struct list_head *p, *n;
981 list_for_each_safe(p, n, &hdev->uuids) {
982 struct bt_uuid *uuid;
984 uuid = list_entry(p, struct bt_uuid, list);
993 int hci_link_keys_clear(struct hci_dev *hdev)
995 struct list_head *p, *n;
997 list_for_each_safe(p, n, &hdev->link_keys) {
998 struct link_key *key;
1000 key = list_entry(p, struct link_key, list);
1009 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1011 struct list_head *p;
1013 list_for_each(p, &hdev->link_keys) {
1016 k = list_entry(p, struct link_key, list);
1018 if (bacmp(bdaddr, &k->bdaddr) == 0)
1025 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1026 u8 key_type, u8 old_key_type)
1029 if (key_type < 0x03)
1032 /* Debug keys are insecure so don't store them persistently */
1033 if (key_type == HCI_LK_DEBUG_COMBINATION)
1036 /* Changed combination key and there's no previous one */
1037 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1040 /* Security mode 3 case */
1044 /* Neither local nor remote side had no-bonding as requirement */
1045 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1048 /* Local side had dedicated bonding as requirement */
1049 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1052 /* Remote side had dedicated bonding as requirement */
1053 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1056 /* If none of the above criteria match, then don't store the key
1061 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1065 list_for_each_entry(k, &hdev->link_keys, list) {
1066 struct key_master_id *id;
1068 if (k->type != HCI_LK_SMP_LTK)
1071 if (k->dlen != sizeof(*id))
1074 id = (void *) &k->data;
1075 if (id->ediv == ediv &&
1076 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1082 EXPORT_SYMBOL(hci_find_ltk);
1084 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1085 bdaddr_t *bdaddr, u8 type)
1089 list_for_each_entry(k, &hdev->link_keys, list)
1090 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1095 EXPORT_SYMBOL(hci_find_link_key_type);
1097 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1098 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1100 struct link_key *key, *old_key;
1101 u8 old_key_type, persistent;
1103 old_key = hci_find_link_key(hdev, bdaddr);
1105 old_key_type = old_key->type;
1108 old_key_type = conn ? conn->key_type : 0xff;
1109 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1112 list_add(&key->list, &hdev->link_keys);
1115 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1117 /* Some buggy controller combinations generate a changed
1118 * combination key for legacy pairing even when there's no
1120 if (type == HCI_LK_CHANGED_COMBINATION &&
1121 (!conn || conn->remote_auth == 0xff) &&
1122 old_key_type == 0xff) {
1123 type = HCI_LK_COMBINATION;
1125 conn->key_type = type;
1128 bacpy(&key->bdaddr, bdaddr);
1129 memcpy(key->val, val, 16);
1130 key->pin_len = pin_len;
1132 if (type == HCI_LK_CHANGED_COMBINATION)
1133 key->type = old_key_type;
1140 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1142 mgmt_new_key(hdev->id, key, persistent);
1145 list_del(&key->list);
1152 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1153 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1155 struct link_key *key, *old_key;
1156 struct key_master_id *id;
1159 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1161 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1164 old_key_type = old_key->type;
1166 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1169 list_add(&key->list, &hdev->link_keys);
1170 old_key_type = 0xff;
1173 key->dlen = sizeof(*id);
1175 bacpy(&key->bdaddr, bdaddr);
1176 memcpy(key->val, ltk, sizeof(key->val));
1177 key->type = HCI_LK_SMP_LTK;
1178 key->pin_len = key_size;
1180 id = (void *) &key->data;
1182 memcpy(id->rand, rand, sizeof(id->rand));
1185 mgmt_new_key(hdev->id, key, old_key_type);
1190 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1192 struct link_key *key;
1194 key = hci_find_link_key(hdev, bdaddr);
1198 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1200 list_del(&key->list);
1206 /* HCI command timer function */
1207 static void hci_cmd_timer(unsigned long arg)
1209 struct hci_dev *hdev = (void *) arg;
1211 BT_ERR("%s command tx timeout", hdev->name);
1212 atomic_set(&hdev->cmd_cnt, 1);
1213 tasklet_schedule(&hdev->cmd_task);
1216 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1219 struct oob_data *data;
1221 list_for_each_entry(data, &hdev->remote_oob_data, list)
1222 if (bacmp(bdaddr, &data->bdaddr) == 0)
1228 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1230 struct oob_data *data;
1232 data = hci_find_remote_oob_data(hdev, bdaddr);
1236 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1238 list_del(&data->list);
1244 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1246 struct oob_data *data, *n;
1248 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1249 list_del(&data->list);
1256 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1259 struct oob_data *data;
1261 data = hci_find_remote_oob_data(hdev, bdaddr);
1264 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1268 bacpy(&data->bdaddr, bdaddr);
1269 list_add(&data->list, &hdev->remote_oob_data);
1272 memcpy(data->hash, hash, sizeof(data->hash));
1273 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1275 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1280 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1283 struct list_head *p;
1285 list_for_each(p, &hdev->blacklist) {
1286 struct bdaddr_list *b;
1288 b = list_entry(p, struct bdaddr_list, list);
1290 if (bacmp(bdaddr, &b->bdaddr) == 0)
1297 int hci_blacklist_clear(struct hci_dev *hdev)
1299 struct list_head *p, *n;
1301 list_for_each_safe(p, n, &hdev->blacklist) {
1302 struct bdaddr_list *b;
1304 b = list_entry(p, struct bdaddr_list, list);
1313 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1315 struct bdaddr_list *entry;
1317 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1320 if (hci_blacklist_lookup(hdev, bdaddr))
1323 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1327 bacpy(&entry->bdaddr, bdaddr);
1329 list_add(&entry->list, &hdev->blacklist);
1331 return mgmt_device_blocked(hdev->id, bdaddr);
1334 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1336 struct bdaddr_list *entry;
1338 if (bacmp(bdaddr, BDADDR_ANY) == 0) {
1339 return hci_blacklist_clear(hdev);
1342 entry = hci_blacklist_lookup(hdev, bdaddr);
1347 list_del(&entry->list);
1350 return mgmt_device_unblocked(hdev->id, bdaddr);
1353 static void hci_clear_adv_cache(unsigned long arg)
1355 struct hci_dev *hdev = (void *) arg;
1359 hci_adv_entries_clear(hdev);
1361 hci_dev_unlock(hdev);
1364 int hci_adv_entries_clear(struct hci_dev *hdev)
1366 struct adv_entry *entry, *tmp;
1368 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1369 list_del(&entry->list);
1373 BT_DBG("%s adv cache cleared", hdev->name);
1378 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1380 struct adv_entry *entry;
1382 list_for_each_entry(entry, &hdev->adv_entries, list)
1383 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1389 static inline int is_connectable_adv(u8 evt_type)
1391 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1397 int hci_add_adv_entry(struct hci_dev *hdev,
1398 struct hci_ev_le_advertising_info *ev)
1400 struct adv_entry *entry;
1402 if (!is_connectable_adv(ev->evt_type))
1405 /* Only new entries should be added to adv_entries. So, if
1406 * bdaddr was found, don't add it. */
1407 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1410 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1414 bacpy(&entry->bdaddr, &ev->bdaddr);
1415 entry->bdaddr_type = ev->bdaddr_type;
1417 list_add(&entry->list, &hdev->adv_entries);
1419 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1420 batostr(&entry->bdaddr), entry->bdaddr_type);
1425 /* Register HCI device */
1426 int hci_register_dev(struct hci_dev *hdev)
1428 struct list_head *head = &hci_dev_list, *p;
1429 int i, id = 0, error;
1431 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1432 hdev->bus, hdev->owner);
1434 if (!hdev->open || !hdev->close || !hdev->destruct)
1437 write_lock_bh(&hci_dev_list_lock);
1439 /* Find first available device id */
1440 list_for_each(p, &hci_dev_list) {
1441 if (list_entry(p, struct hci_dev, list)->id != id)
1446 sprintf(hdev->name, "hci%d", id);
1448 list_add(&hdev->list, head);
1450 atomic_set(&hdev->refcnt, 1);
1451 spin_lock_init(&hdev->lock);
1454 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1455 hdev->esco_type = (ESCO_HV1);
1456 hdev->link_mode = (HCI_LM_ACCEPT);
1457 hdev->io_capability = 0x03; /* No Input No Output */
1459 hdev->idle_timeout = 0;
1460 hdev->sniff_max_interval = 800;
1461 hdev->sniff_min_interval = 80;
1463 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1464 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1465 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1467 skb_queue_head_init(&hdev->rx_q);
1468 skb_queue_head_init(&hdev->cmd_q);
1469 skb_queue_head_init(&hdev->raw_q);
1471 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1473 for (i = 0; i < NUM_REASSEMBLY; i++)
1474 hdev->reassembly[i] = NULL;
1476 init_waitqueue_head(&hdev->req_wait_q);
1477 mutex_init(&hdev->req_lock);
1479 inquiry_cache_init(hdev);
1481 hci_conn_hash_init(hdev);
1483 INIT_LIST_HEAD(&hdev->blacklist);
1485 INIT_LIST_HEAD(&hdev->uuids);
1487 INIT_LIST_HEAD(&hdev->link_keys);
1489 INIT_LIST_HEAD(&hdev->remote_oob_data);
1491 INIT_LIST_HEAD(&hdev->adv_entries);
1492 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1493 (unsigned long) hdev);
1495 INIT_WORK(&hdev->power_on, hci_power_on);
1496 INIT_WORK(&hdev->power_off, hci_power_off);
1497 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1499 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1501 atomic_set(&hdev->promisc, 0);
1503 write_unlock_bh(&hci_dev_list_lock);
1505 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1506 if (!hdev->workqueue) {
1511 error = hci_add_sysfs(hdev);
1515 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1516 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1518 if (rfkill_register(hdev->rfkill) < 0) {
1519 rfkill_destroy(hdev->rfkill);
1520 hdev->rfkill = NULL;
1524 set_bit(HCI_AUTO_OFF, &hdev->flags);
1525 set_bit(HCI_SETUP, &hdev->flags);
1526 queue_work(hdev->workqueue, &hdev->power_on);
1528 hci_notify(hdev, HCI_DEV_REG);
1533 destroy_workqueue(hdev->workqueue);
1535 write_lock_bh(&hci_dev_list_lock);
1536 list_del(&hdev->list);
1537 write_unlock_bh(&hci_dev_list_lock);
1541 EXPORT_SYMBOL(hci_register_dev);
1543 /* Unregister HCI device */
1544 void hci_unregister_dev(struct hci_dev *hdev)
1548 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1550 write_lock_bh(&hci_dev_list_lock);
1551 list_del(&hdev->list);
1552 write_unlock_bh(&hci_dev_list_lock);
1554 hci_dev_do_close(hdev);
1556 for (i = 0; i < NUM_REASSEMBLY; i++)
1557 kfree_skb(hdev->reassembly[i]);
1559 if (!test_bit(HCI_INIT, &hdev->flags) &&
1560 !test_bit(HCI_SETUP, &hdev->flags))
1561 mgmt_index_removed(hdev->id);
1563 hci_notify(hdev, HCI_DEV_UNREG);
1566 rfkill_unregister(hdev->rfkill);
1567 rfkill_destroy(hdev->rfkill);
1570 hci_del_sysfs(hdev);
1572 hci_del_off_timer(hdev);
1573 del_timer(&hdev->adv_timer);
1575 destroy_workqueue(hdev->workqueue);
1577 hci_dev_lock_bh(hdev);
1578 hci_blacklist_clear(hdev);
1579 hci_uuids_clear(hdev);
1580 hci_link_keys_clear(hdev);
1581 hci_remote_oob_data_clear(hdev);
1582 hci_adv_entries_clear(hdev);
1583 hci_dev_unlock_bh(hdev);
1585 __hci_dev_put(hdev);
1587 EXPORT_SYMBOL(hci_unregister_dev);
1589 /* Suspend HCI device */
1590 int hci_suspend_dev(struct hci_dev *hdev)
1592 hci_notify(hdev, HCI_DEV_SUSPEND);
1595 EXPORT_SYMBOL(hci_suspend_dev);
1597 /* Resume HCI device */
1598 int hci_resume_dev(struct hci_dev *hdev)
1600 hci_notify(hdev, HCI_DEV_RESUME);
1603 EXPORT_SYMBOL(hci_resume_dev);
1605 /* Receive frame from HCI drivers */
1606 int hci_recv_frame(struct sk_buff *skb)
1608 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1609 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1610 && !test_bit(HCI_INIT, &hdev->flags))) {
1616 bt_cb(skb)->incoming = 1;
1619 __net_timestamp(skb);
1621 /* Queue frame for rx task */
1622 skb_queue_tail(&hdev->rx_q, skb);
1623 tasklet_schedule(&hdev->rx_task);
1627 EXPORT_SYMBOL(hci_recv_frame);
1629 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1630 int count, __u8 index)
1635 struct sk_buff *skb;
1636 struct bt_skb_cb *scb;
1638 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1639 index >= NUM_REASSEMBLY)
1642 skb = hdev->reassembly[index];
1646 case HCI_ACLDATA_PKT:
1647 len = HCI_MAX_FRAME_SIZE;
1648 hlen = HCI_ACL_HDR_SIZE;
1651 len = HCI_MAX_EVENT_SIZE;
1652 hlen = HCI_EVENT_HDR_SIZE;
1654 case HCI_SCODATA_PKT:
1655 len = HCI_MAX_SCO_SIZE;
1656 hlen = HCI_SCO_HDR_SIZE;
1660 skb = bt_skb_alloc(len, GFP_ATOMIC);
1664 scb = (void *) skb->cb;
1666 scb->pkt_type = type;
1668 skb->dev = (void *) hdev;
1669 hdev->reassembly[index] = skb;
1673 scb = (void *) skb->cb;
1674 len = min(scb->expect, (__u16)count);
1676 memcpy(skb_put(skb, len), data, len);
1685 if (skb->len == HCI_EVENT_HDR_SIZE) {
1686 struct hci_event_hdr *h = hci_event_hdr(skb);
1687 scb->expect = h->plen;
1689 if (skb_tailroom(skb) < scb->expect) {
1691 hdev->reassembly[index] = NULL;
1697 case HCI_ACLDATA_PKT:
1698 if (skb->len == HCI_ACL_HDR_SIZE) {
1699 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1700 scb->expect = __le16_to_cpu(h->dlen);
1702 if (skb_tailroom(skb) < scb->expect) {
1704 hdev->reassembly[index] = NULL;
1710 case HCI_SCODATA_PKT:
1711 if (skb->len == HCI_SCO_HDR_SIZE) {
1712 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1713 scb->expect = h->dlen;
1715 if (skb_tailroom(skb) < scb->expect) {
1717 hdev->reassembly[index] = NULL;
1724 if (scb->expect == 0) {
1725 /* Complete frame */
1727 bt_cb(skb)->pkt_type = type;
1728 hci_recv_frame(skb);
1730 hdev->reassembly[index] = NULL;
1738 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1742 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1746 rem = hci_reassembly(hdev, type, data, count, type - 1);
1750 data += (count - rem);
1756 EXPORT_SYMBOL(hci_recv_fragment);
1758 #define STREAM_REASSEMBLY 0
1760 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1766 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1769 struct { char type; } *pkt;
1771 /* Start of the frame */
1778 type = bt_cb(skb)->pkt_type;
1780 rem = hci_reassembly(hdev, type, data, count,
1785 data += (count - rem);
1791 EXPORT_SYMBOL(hci_recv_stream_fragment);
1793 /* ---- Interface to upper protocols ---- */
1795 /* Register/Unregister protocols.
1796 * hci_task_lock is used to ensure that no tasks are running. */
1797 int hci_register_proto(struct hci_proto *hp)
1801 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1803 if (hp->id >= HCI_MAX_PROTO)
1806 write_lock_bh(&hci_task_lock);
1808 if (!hci_proto[hp->id])
1809 hci_proto[hp->id] = hp;
1813 write_unlock_bh(&hci_task_lock);
1817 EXPORT_SYMBOL(hci_register_proto);
1819 int hci_unregister_proto(struct hci_proto *hp)
1823 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1825 if (hp->id >= HCI_MAX_PROTO)
1828 write_lock_bh(&hci_task_lock);
1830 if (hci_proto[hp->id])
1831 hci_proto[hp->id] = NULL;
1835 write_unlock_bh(&hci_task_lock);
1839 EXPORT_SYMBOL(hci_unregister_proto);
1841 int hci_register_cb(struct hci_cb *cb)
1843 BT_DBG("%p name %s", cb, cb->name);
1845 write_lock_bh(&hci_cb_list_lock);
1846 list_add(&cb->list, &hci_cb_list);
1847 write_unlock_bh(&hci_cb_list_lock);
1851 EXPORT_SYMBOL(hci_register_cb);
1853 int hci_unregister_cb(struct hci_cb *cb)
1855 BT_DBG("%p name %s", cb, cb->name);
1857 write_lock_bh(&hci_cb_list_lock);
1858 list_del(&cb->list);
1859 write_unlock_bh(&hci_cb_list_lock);
1863 EXPORT_SYMBOL(hci_unregister_cb);
1865 static int hci_send_frame(struct sk_buff *skb)
1867 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1874 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1876 if (atomic_read(&hdev->promisc)) {
1878 __net_timestamp(skb);
1880 hci_send_to_sock(hdev, skb, NULL);
1883 /* Get rid of skb owner, prior to sending to the driver. */
1886 return hdev->send(skb);
1889 /* Send HCI command */
1890 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1892 int len = HCI_COMMAND_HDR_SIZE + plen;
1893 struct hci_command_hdr *hdr;
1894 struct sk_buff *skb;
1896 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1898 skb = bt_skb_alloc(len, GFP_ATOMIC);
1900 BT_ERR("%s no memory for command", hdev->name);
1904 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1905 hdr->opcode = cpu_to_le16(opcode);
1909 memcpy(skb_put(skb, plen), param, plen);
1911 BT_DBG("skb len %d", skb->len);
1913 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1914 skb->dev = (void *) hdev;
1916 if (test_bit(HCI_INIT, &hdev->flags))
1917 hdev->init_last_cmd = opcode;
1919 skb_queue_tail(&hdev->cmd_q, skb);
1920 tasklet_schedule(&hdev->cmd_task);
1925 /* Get data from the previously sent command */
1926 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1928 struct hci_command_hdr *hdr;
1930 if (!hdev->sent_cmd)
1933 hdr = (void *) hdev->sent_cmd->data;
1935 if (hdr->opcode != cpu_to_le16(opcode))
1938 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1940 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1944 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1946 struct hci_acl_hdr *hdr;
1949 skb_push(skb, HCI_ACL_HDR_SIZE);
1950 skb_reset_transport_header(skb);
1951 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1952 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1953 hdr->dlen = cpu_to_le16(len);
1956 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1958 struct hci_dev *hdev = conn->hdev;
1959 struct sk_buff *list;
1961 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1963 skb->dev = (void *) hdev;
1964 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1965 hci_add_acl_hdr(skb, conn->handle, flags);
1967 list = skb_shinfo(skb)->frag_list;
1969 /* Non fragmented */
1970 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1972 skb_queue_tail(&conn->data_q, skb);
1975 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1977 skb_shinfo(skb)->frag_list = NULL;
1979 /* Queue all fragments atomically */
1980 spin_lock_bh(&conn->data_q.lock);
1982 __skb_queue_tail(&conn->data_q, skb);
1984 flags &= ~ACL_START;
1987 skb = list; list = list->next;
1989 skb->dev = (void *) hdev;
1990 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1991 hci_add_acl_hdr(skb, conn->handle, flags);
1993 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1995 __skb_queue_tail(&conn->data_q, skb);
1998 spin_unlock_bh(&conn->data_q.lock);
2001 tasklet_schedule(&hdev->tx_task);
2003 EXPORT_SYMBOL(hci_send_acl);
2006 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2008 struct hci_dev *hdev = conn->hdev;
2009 struct hci_sco_hdr hdr;
2011 BT_DBG("%s len %d", hdev->name, skb->len);
2013 hdr.handle = cpu_to_le16(conn->handle);
2014 hdr.dlen = skb->len;
2016 skb_push(skb, HCI_SCO_HDR_SIZE);
2017 skb_reset_transport_header(skb);
2018 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2020 skb->dev = (void *) hdev;
2021 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2023 skb_queue_tail(&conn->data_q, skb);
2024 tasklet_schedule(&hdev->tx_task);
2026 EXPORT_SYMBOL(hci_send_sco);
2028 /* ---- HCI TX task (outgoing data) ---- */
2030 /* HCI Connection scheduler */
2031 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2033 struct hci_conn_hash *h = &hdev->conn_hash;
2034 struct hci_conn *conn = NULL;
2035 int num = 0, min = ~0;
2036 struct list_head *p;
2038 /* We don't have to lock device here. Connections are always
2039 * added and removed with TX task disabled. */
2040 list_for_each(p, &h->list) {
2042 c = list_entry(p, struct hci_conn, list);
2044 if (c->type != type || skb_queue_empty(&c->data_q))
2047 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2052 if (c->sent < min) {
2057 if (hci_conn_num(hdev, type) == num)
2064 switch (conn->type) {
2066 cnt = hdev->acl_cnt;
2070 cnt = hdev->sco_cnt;
2073 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2077 BT_ERR("Unknown link type");
2085 BT_DBG("conn %p quote %d", conn, *quote);
2089 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2091 struct hci_conn_hash *h = &hdev->conn_hash;
2092 struct list_head *p;
2095 BT_ERR("%s link tx timeout", hdev->name);
2097 /* Kill stalled connections */
2098 list_for_each(p, &h->list) {
2099 c = list_entry(p, struct hci_conn, list);
2100 if (c->type == type && c->sent) {
2101 BT_ERR("%s killing stalled connection %s",
2102 hdev->name, batostr(&c->dst));
2103 hci_acl_disconn(c, 0x13);
2108 static inline void hci_sched_acl(struct hci_dev *hdev)
2110 struct hci_conn *conn;
2111 struct sk_buff *skb;
2114 BT_DBG("%s", hdev->name);
2116 if (!hci_conn_num(hdev, ACL_LINK))
2119 if (!test_bit(HCI_RAW, &hdev->flags)) {
2120 /* ACL tx timeout must be longer than maximum
2121 * link supervision timeout (40.9 seconds) */
2122 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2123 hci_link_tx_to(hdev, ACL_LINK);
2126 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
2127 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2128 BT_DBG("skb %p len %d", skb, skb->len);
2130 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2132 hci_send_frame(skb);
2133 hdev->acl_last_tx = jiffies;
2142 static inline void hci_sched_sco(struct hci_dev *hdev)
2144 struct hci_conn *conn;
2145 struct sk_buff *skb;
2148 BT_DBG("%s", hdev->name);
2150 if (!hci_conn_num(hdev, SCO_LINK))
2153 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
2154 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2155 BT_DBG("skb %p len %d", skb, skb->len);
2156 hci_send_frame(skb);
2159 if (conn->sent == ~0)
2165 static inline void hci_sched_esco(struct hci_dev *hdev)
2167 struct hci_conn *conn;
2168 struct sk_buff *skb;
2171 BT_DBG("%s", hdev->name);
2173 if (!hci_conn_num(hdev, ESCO_LINK))
2176 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
2177 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2178 BT_DBG("skb %p len %d", skb, skb->len);
2179 hci_send_frame(skb);
2182 if (conn->sent == ~0)
2188 static inline void hci_sched_le(struct hci_dev *hdev)
2190 struct hci_conn *conn;
2191 struct sk_buff *skb;
2194 BT_DBG("%s", hdev->name);
2196 if (!hci_conn_num(hdev, LE_LINK))
2199 if (!test_bit(HCI_RAW, &hdev->flags)) {
2200 /* LE tx timeout must be longer than maximum
2201 * link supervision timeout (40.9 seconds) */
2202 if (!hdev->le_cnt && hdev->le_pkts &&
2203 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2204 hci_link_tx_to(hdev, LE_LINK);
2207 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2208 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, "e))) {
2209 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2210 BT_DBG("skb %p len %d", skb, skb->len);
2212 hci_send_frame(skb);
2213 hdev->le_last_tx = jiffies;
2222 hdev->acl_cnt = cnt;
2225 static void hci_tx_task(unsigned long arg)
2227 struct hci_dev *hdev = (struct hci_dev *) arg;
2228 struct sk_buff *skb;
2230 read_lock(&hci_task_lock);
2232 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2233 hdev->sco_cnt, hdev->le_cnt);
2235 /* Schedule queues and send stuff to HCI driver */
2237 hci_sched_acl(hdev);
2239 hci_sched_sco(hdev);
2241 hci_sched_esco(hdev);
2245 /* Send next queued raw (unknown type) packet */
2246 while ((skb = skb_dequeue(&hdev->raw_q)))
2247 hci_send_frame(skb);
2249 read_unlock(&hci_task_lock);
2252 /* ----- HCI RX task (incoming data processing) ----- */
2254 /* ACL data packet */
2255 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2257 struct hci_acl_hdr *hdr = (void *) skb->data;
2258 struct hci_conn *conn;
2259 __u16 handle, flags;
2261 skb_pull(skb, HCI_ACL_HDR_SIZE);
2263 handle = __le16_to_cpu(hdr->handle);
2264 flags = hci_flags(handle);
2265 handle = hci_handle(handle);
2267 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2269 hdev->stat.acl_rx++;
2272 conn = hci_conn_hash_lookup_handle(hdev, handle);
2273 hci_dev_unlock(hdev);
2276 register struct hci_proto *hp;
2278 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2280 /* Send to upper protocol */
2281 hp = hci_proto[HCI_PROTO_L2CAP];
2282 if (hp && hp->recv_acldata) {
2283 hp->recv_acldata(conn, skb, flags);
2287 BT_ERR("%s ACL packet for unknown connection handle %d",
2288 hdev->name, handle);
2294 /* SCO data packet */
2295 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2297 struct hci_sco_hdr *hdr = (void *) skb->data;
2298 struct hci_conn *conn;
2301 skb_pull(skb, HCI_SCO_HDR_SIZE);
2303 handle = __le16_to_cpu(hdr->handle);
2305 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2307 hdev->stat.sco_rx++;
2310 conn = hci_conn_hash_lookup_handle(hdev, handle);
2311 hci_dev_unlock(hdev);
2314 register struct hci_proto *hp;
2316 /* Send to upper protocol */
2317 hp = hci_proto[HCI_PROTO_SCO];
2318 if (hp && hp->recv_scodata) {
2319 hp->recv_scodata(conn, skb);
2323 BT_ERR("%s SCO packet for unknown connection handle %d",
2324 hdev->name, handle);
2330 static void hci_rx_task(unsigned long arg)
2332 struct hci_dev *hdev = (struct hci_dev *) arg;
2333 struct sk_buff *skb;
2335 BT_DBG("%s", hdev->name);
2337 read_lock(&hci_task_lock);
2339 while ((skb = skb_dequeue(&hdev->rx_q))) {
2340 if (atomic_read(&hdev->promisc)) {
2341 /* Send copy to the sockets */
2342 hci_send_to_sock(hdev, skb, NULL);
2345 if (test_bit(HCI_RAW, &hdev->flags)) {
2350 if (test_bit(HCI_INIT, &hdev->flags)) {
2351 /* Don't process data packets in this states. */
2352 switch (bt_cb(skb)->pkt_type) {
2353 case HCI_ACLDATA_PKT:
2354 case HCI_SCODATA_PKT:
2361 switch (bt_cb(skb)->pkt_type) {
2363 hci_event_packet(hdev, skb);
2366 case HCI_ACLDATA_PKT:
2367 BT_DBG("%s ACL data packet", hdev->name);
2368 hci_acldata_packet(hdev, skb);
2371 case HCI_SCODATA_PKT:
2372 BT_DBG("%s SCO data packet", hdev->name);
2373 hci_scodata_packet(hdev, skb);
2382 read_unlock(&hci_task_lock);
2385 static void hci_cmd_task(unsigned long arg)
2387 struct hci_dev *hdev = (struct hci_dev *) arg;
2388 struct sk_buff *skb;
2390 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2392 /* Send queued commands */
2393 if (atomic_read(&hdev->cmd_cnt)) {
2394 skb = skb_dequeue(&hdev->cmd_q);
2398 kfree_skb(hdev->sent_cmd);
2400 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2401 if (hdev->sent_cmd) {
2402 atomic_dec(&hdev->cmd_cnt);
2403 hci_send_frame(skb);
2404 if (test_bit(HCI_RESET, &hdev->flags))
2405 del_timer(&hdev->cmd_timer);
2407 mod_timer(&hdev->cmd_timer,
2408 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2410 skb_queue_head(&hdev->cmd_q, skb);
2411 tasklet_schedule(&hdev->cmd_task);