2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
56 #define AUTO_OFF_TIMEOUT 2000
60 static void hci_rx_work(struct work_struct *work);
61 static void hci_cmd_work(struct work_struct *work);
62 static void hci_tx_work(struct work_struct *work);
64 static DEFINE_MUTEX(hci_task_lock);
67 LIST_HEAD(hci_dev_list);
68 DEFINE_RWLOCK(hci_dev_list_lock);
70 /* HCI callback list */
71 LIST_HEAD(hci_cb_list);
72 DEFINE_RWLOCK(hci_cb_list_lock);
75 #define HCI_MAX_PROTO 2
76 struct hci_proto *hci_proto[HCI_MAX_PROTO];
78 /* HCI notifiers list */
79 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
81 /* ---- HCI notifications ---- */
83 int hci_register_notifier(struct notifier_block *nb)
85 return atomic_notifier_chain_register(&hci_notifier, nb);
88 int hci_unregister_notifier(struct notifier_block *nb)
90 return atomic_notifier_chain_unregister(&hci_notifier, nb);
93 static void hci_notify(struct hci_dev *hdev, int event)
95 atomic_notifier_call_chain(&hci_notifier, event, hdev);
98 /* ---- HCI requests ---- */
100 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
102 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
104 /* If this is the init phase check if the completed command matches
105 * the last init command, and if not just return.
107 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
110 if (hdev->req_status == HCI_REQ_PEND) {
111 hdev->req_result = result;
112 hdev->req_status = HCI_REQ_DONE;
113 wake_up_interruptible(&hdev->req_wait_q);
117 static void hci_req_cancel(struct hci_dev *hdev, int err)
119 BT_DBG("%s err 0x%2.2x", hdev->name, err);
121 if (hdev->req_status == HCI_REQ_PEND) {
122 hdev->req_result = err;
123 hdev->req_status = HCI_REQ_CANCELED;
124 wake_up_interruptible(&hdev->req_wait_q);
128 /* Execute request and wait for completion. */
129 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
130 unsigned long opt, __u32 timeout)
132 DECLARE_WAITQUEUE(wait, current);
135 BT_DBG("%s start", hdev->name);
137 hdev->req_status = HCI_REQ_PEND;
139 add_wait_queue(&hdev->req_wait_q, &wait);
140 set_current_state(TASK_INTERRUPTIBLE);
143 schedule_timeout(timeout);
145 remove_wait_queue(&hdev->req_wait_q, &wait);
147 if (signal_pending(current))
150 switch (hdev->req_status) {
152 err = -bt_to_errno(hdev->req_result);
155 case HCI_REQ_CANCELED:
156 err = -hdev->req_result;
164 hdev->req_status = hdev->req_result = 0;
166 BT_DBG("%s end: err %d", hdev->name, err);
171 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
172 unsigned long opt, __u32 timeout)
176 if (!test_bit(HCI_UP, &hdev->flags))
179 /* Serialize all requests */
181 ret = __hci_request(hdev, req, opt, timeout);
182 hci_req_unlock(hdev);
187 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
189 BT_DBG("%s %ld", hdev->name, opt);
192 set_bit(HCI_RESET, &hdev->flags);
193 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
196 static void bredr_init(struct hci_dev *hdev)
198 struct hci_cp_delete_stored_link_key cp;
202 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
204 /* Mandatory initialization */
207 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
208 set_bit(HCI_RESET, &hdev->flags);
209 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
212 /* Read Local Supported Features */
213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
215 /* Read Local Version */
216 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
218 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
219 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
221 /* Read BD Address */
222 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
224 /* Read Class of Device */
225 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
227 /* Read Local Name */
228 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
230 /* Read Voice Setting */
231 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
233 /* Optional initialization */
235 /* Clear Event Filters */
236 flt_type = HCI_FLT_CLEAR_ALL;
237 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
239 /* Connection accept timeout ~20 secs */
240 param = cpu_to_le16(0x7d00);
241 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
243 bacpy(&cp.bdaddr, BDADDR_ANY);
245 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
248 static void amp_init(struct hci_dev *hdev)
250 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
253 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
255 /* Read Local Version */
256 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
259 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
263 BT_DBG("%s %ld", hdev->name, opt);
265 /* Driver initialization */
267 /* Special commands */
268 while ((skb = skb_dequeue(&hdev->driver_init))) {
269 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
270 skb->dev = (void *) hdev;
272 skb_queue_tail(&hdev->cmd_q, skb);
273 queue_work(hdev->workqueue, &hdev->cmd_work);
275 skb_queue_purge(&hdev->driver_init);
277 switch (hdev->dev_type) {
287 BT_ERR("Unknown device type %d", hdev->dev_type);
293 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
295 BT_DBG("%s", hdev->name);
297 /* Read LE buffer size */
298 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
301 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
305 BT_DBG("%s %x", hdev->name, scan);
307 /* Inquiry and Page scans */
308 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
311 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
315 BT_DBG("%s %x", hdev->name, auth);
318 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
321 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
325 BT_DBG("%s %x", hdev->name, encrypt);
328 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
331 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
333 __le16 policy = cpu_to_le16(opt);
335 BT_DBG("%s %x", hdev->name, policy);
337 /* Default link policy */
338 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
341 /* Get HCI device by index.
342 * Device is held on return. */
343 struct hci_dev *hci_dev_get(int index)
345 struct hci_dev *hdev = NULL, *d;
352 read_lock(&hci_dev_list_lock);
353 list_for_each_entry(d, &hci_dev_list, list) {
354 if (d->id == index) {
355 hdev = hci_dev_hold(d);
359 read_unlock(&hci_dev_list_lock);
363 /* ---- Inquiry support ---- */
364 static void inquiry_cache_flush(struct hci_dev *hdev)
366 struct inquiry_cache *cache = &hdev->inq_cache;
367 struct inquiry_entry *next = cache->list, *e;
369 BT_DBG("cache %p", cache);
378 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
380 struct inquiry_cache *cache = &hdev->inq_cache;
381 struct inquiry_entry *e;
383 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
385 for (e = cache->list; e; e = e->next)
386 if (!bacmp(&e->data.bdaddr, bdaddr))
391 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
393 struct inquiry_cache *cache = &hdev->inq_cache;
394 struct inquiry_entry *ie;
396 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
398 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
400 /* Entry not in the cache. Add new one. */
401 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
405 ie->next = cache->list;
409 memcpy(&ie->data, data, sizeof(*data));
410 ie->timestamp = jiffies;
411 cache->timestamp = jiffies;
414 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
416 struct inquiry_cache *cache = &hdev->inq_cache;
417 struct inquiry_info *info = (struct inquiry_info *) buf;
418 struct inquiry_entry *e;
421 for (e = cache->list; e && copied < num; e = e->next, copied++) {
422 struct inquiry_data *data = &e->data;
423 bacpy(&info->bdaddr, &data->bdaddr);
424 info->pscan_rep_mode = data->pscan_rep_mode;
425 info->pscan_period_mode = data->pscan_period_mode;
426 info->pscan_mode = data->pscan_mode;
427 memcpy(info->dev_class, data->dev_class, 3);
428 info->clock_offset = data->clock_offset;
432 BT_DBG("cache %p, copied %d", cache, copied);
436 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
438 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
439 struct hci_cp_inquiry cp;
441 BT_DBG("%s", hdev->name);
443 if (test_bit(HCI_INQUIRY, &hdev->flags))
447 memcpy(&cp.lap, &ir->lap, 3);
448 cp.length = ir->length;
449 cp.num_rsp = ir->num_rsp;
450 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
453 int hci_inquiry(void __user *arg)
455 __u8 __user *ptr = arg;
456 struct hci_inquiry_req ir;
457 struct hci_dev *hdev;
458 int err = 0, do_inquiry = 0, max_rsp;
462 if (copy_from_user(&ir, ptr, sizeof(ir)))
465 hdev = hci_dev_get(ir.dev_id);
470 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
471 inquiry_cache_empty(hdev) ||
472 ir.flags & IREQ_CACHE_FLUSH) {
473 inquiry_cache_flush(hdev);
476 hci_dev_unlock(hdev);
478 timeo = ir.length * msecs_to_jiffies(2000);
481 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
486 /* for unlimited number of responses we will use buffer with 255 entries */
487 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
489 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
490 * copy it to the user space.
492 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
499 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
500 hci_dev_unlock(hdev);
502 BT_DBG("num_rsp %d", ir.num_rsp);
504 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
506 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
519 /* ---- HCI ioctl helpers ---- */
521 int hci_dev_open(__u16 dev)
523 struct hci_dev *hdev;
526 hdev = hci_dev_get(dev);
530 BT_DBG("%s %p", hdev->name, hdev);
534 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
539 if (test_bit(HCI_UP, &hdev->flags)) {
544 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
545 set_bit(HCI_RAW, &hdev->flags);
547 /* Treat all non BR/EDR controllers as raw devices if
548 enable_hs is not set */
549 if (hdev->dev_type != HCI_BREDR && !enable_hs)
550 set_bit(HCI_RAW, &hdev->flags);
552 if (hdev->open(hdev)) {
557 if (!test_bit(HCI_RAW, &hdev->flags)) {
558 atomic_set(&hdev->cmd_cnt, 1);
559 set_bit(HCI_INIT, &hdev->flags);
560 hdev->init_last_cmd = 0;
562 ret = __hci_request(hdev, hci_init_req, 0,
563 msecs_to_jiffies(HCI_INIT_TIMEOUT));
565 if (lmp_host_le_capable(hdev))
566 ret = __hci_request(hdev, hci_le_init_req, 0,
567 msecs_to_jiffies(HCI_INIT_TIMEOUT));
569 clear_bit(HCI_INIT, &hdev->flags);
574 set_bit(HCI_UP, &hdev->flags);
575 hci_notify(hdev, HCI_DEV_UP);
576 if (!test_bit(HCI_SETUP, &hdev->flags)) {
578 mgmt_powered(hdev, 1);
579 hci_dev_unlock(hdev);
582 /* Init failed, cleanup */
583 flush_work(&hdev->tx_work);
584 flush_work(&hdev->cmd_work);
585 flush_work(&hdev->rx_work);
587 skb_queue_purge(&hdev->cmd_q);
588 skb_queue_purge(&hdev->rx_q);
593 if (hdev->sent_cmd) {
594 kfree_skb(hdev->sent_cmd);
595 hdev->sent_cmd = NULL;
603 hci_req_unlock(hdev);
608 static int hci_dev_do_close(struct hci_dev *hdev)
610 BT_DBG("%s %p", hdev->name, hdev);
612 hci_req_cancel(hdev, ENODEV);
615 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
616 del_timer_sync(&hdev->cmd_timer);
617 hci_req_unlock(hdev);
621 /* Flush RX and TX works */
622 flush_work(&hdev->tx_work);
623 flush_work(&hdev->rx_work);
625 if (hdev->discov_timeout > 0) {
626 cancel_delayed_work(&hdev->discov_off);
627 hdev->discov_timeout = 0;
630 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
631 cancel_delayed_work(&hdev->power_off);
633 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
634 cancel_delayed_work(&hdev->service_cache);
637 inquiry_cache_flush(hdev);
638 hci_conn_hash_flush(hdev);
639 hci_dev_unlock(hdev);
641 hci_notify(hdev, HCI_DEV_DOWN);
647 skb_queue_purge(&hdev->cmd_q);
648 atomic_set(&hdev->cmd_cnt, 1);
649 if (!test_bit(HCI_RAW, &hdev->flags)) {
650 set_bit(HCI_INIT, &hdev->flags);
651 __hci_request(hdev, hci_reset_req, 0,
652 msecs_to_jiffies(HCI_INIT_TIMEOUT));
653 clear_bit(HCI_INIT, &hdev->flags);
657 flush_work(&hdev->cmd_work);
660 skb_queue_purge(&hdev->rx_q);
661 skb_queue_purge(&hdev->cmd_q);
662 skb_queue_purge(&hdev->raw_q);
664 /* Drop last sent command */
665 if (hdev->sent_cmd) {
666 del_timer_sync(&hdev->cmd_timer);
667 kfree_skb(hdev->sent_cmd);
668 hdev->sent_cmd = NULL;
671 /* After this point our queues are empty
672 * and no tasks are scheduled. */
676 mgmt_powered(hdev, 0);
677 hci_dev_unlock(hdev);
682 hci_req_unlock(hdev);
688 int hci_dev_close(__u16 dev)
690 struct hci_dev *hdev;
693 hdev = hci_dev_get(dev);
696 err = hci_dev_do_close(hdev);
701 int hci_dev_reset(__u16 dev)
703 struct hci_dev *hdev;
706 hdev = hci_dev_get(dev);
712 if (!test_bit(HCI_UP, &hdev->flags))
716 skb_queue_purge(&hdev->rx_q);
717 skb_queue_purge(&hdev->cmd_q);
720 inquiry_cache_flush(hdev);
721 hci_conn_hash_flush(hdev);
722 hci_dev_unlock(hdev);
727 atomic_set(&hdev->cmd_cnt, 1);
728 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
730 if (!test_bit(HCI_RAW, &hdev->flags))
731 ret = __hci_request(hdev, hci_reset_req, 0,
732 msecs_to_jiffies(HCI_INIT_TIMEOUT));
735 hci_req_unlock(hdev);
740 int hci_dev_reset_stat(__u16 dev)
742 struct hci_dev *hdev;
745 hdev = hci_dev_get(dev);
749 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
756 int hci_dev_cmd(unsigned int cmd, void __user *arg)
758 struct hci_dev *hdev;
759 struct hci_dev_req dr;
762 if (copy_from_user(&dr, arg, sizeof(dr)))
765 hdev = hci_dev_get(dr.dev_id);
771 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
772 msecs_to_jiffies(HCI_INIT_TIMEOUT));
776 if (!lmp_encrypt_capable(hdev)) {
781 if (!test_bit(HCI_AUTH, &hdev->flags)) {
782 /* Auth must be enabled first */
783 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
784 msecs_to_jiffies(HCI_INIT_TIMEOUT));
789 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
790 msecs_to_jiffies(HCI_INIT_TIMEOUT));
794 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
795 msecs_to_jiffies(HCI_INIT_TIMEOUT));
799 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
800 msecs_to_jiffies(HCI_INIT_TIMEOUT));
804 hdev->link_mode = ((__u16) dr.dev_opt) &
805 (HCI_LM_MASTER | HCI_LM_ACCEPT);
809 hdev->pkt_type = (__u16) dr.dev_opt;
813 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
814 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
818 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
819 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
831 int hci_get_dev_list(void __user *arg)
833 struct hci_dev *hdev;
834 struct hci_dev_list_req *dl;
835 struct hci_dev_req *dr;
836 int n = 0, size, err;
839 if (get_user(dev_num, (__u16 __user *) arg))
842 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
845 size = sizeof(*dl) + dev_num * sizeof(*dr);
847 dl = kzalloc(size, GFP_KERNEL);
853 read_lock_bh(&hci_dev_list_lock);
854 list_for_each_entry(hdev, &hci_dev_list, list) {
855 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
856 cancel_delayed_work(&hdev->power_off);
858 if (!test_bit(HCI_MGMT, &hdev->flags))
859 set_bit(HCI_PAIRABLE, &hdev->flags);
861 (dr + n)->dev_id = hdev->id;
862 (dr + n)->dev_opt = hdev->flags;
867 read_unlock_bh(&hci_dev_list_lock);
870 size = sizeof(*dl) + n * sizeof(*dr);
872 err = copy_to_user(arg, dl, size);
875 return err ? -EFAULT : 0;
878 int hci_get_dev_info(void __user *arg)
880 struct hci_dev *hdev;
881 struct hci_dev_info di;
884 if (copy_from_user(&di, arg, sizeof(di)))
887 hdev = hci_dev_get(di.dev_id);
891 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
892 cancel_delayed_work_sync(&hdev->power_off);
894 if (!test_bit(HCI_MGMT, &hdev->flags))
895 set_bit(HCI_PAIRABLE, &hdev->flags);
897 strcpy(di.name, hdev->name);
898 di.bdaddr = hdev->bdaddr;
899 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
900 di.flags = hdev->flags;
901 di.pkt_type = hdev->pkt_type;
902 di.acl_mtu = hdev->acl_mtu;
903 di.acl_pkts = hdev->acl_pkts;
904 di.sco_mtu = hdev->sco_mtu;
905 di.sco_pkts = hdev->sco_pkts;
906 di.link_policy = hdev->link_policy;
907 di.link_mode = hdev->link_mode;
909 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
910 memcpy(&di.features, &hdev->features, sizeof(di.features));
912 if (copy_to_user(arg, &di, sizeof(di)))
920 /* ---- Interface to HCI drivers ---- */
922 static int hci_rfkill_set_block(void *data, bool blocked)
924 struct hci_dev *hdev = data;
926 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
931 hci_dev_do_close(hdev);
936 static const struct rfkill_ops hci_rfkill_ops = {
937 .set_block = hci_rfkill_set_block,
940 /* Alloc HCI device */
941 struct hci_dev *hci_alloc_dev(void)
943 struct hci_dev *hdev;
945 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
949 hci_init_sysfs(hdev);
950 skb_queue_head_init(&hdev->driver_init);
954 EXPORT_SYMBOL(hci_alloc_dev);
956 /* Free HCI device */
957 void hci_free_dev(struct hci_dev *hdev)
959 skb_queue_purge(&hdev->driver_init);
961 /* will free via device release */
962 put_device(&hdev->dev);
964 EXPORT_SYMBOL(hci_free_dev);
966 static void hci_power_on(struct work_struct *work)
968 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
970 BT_DBG("%s", hdev->name);
972 if (hci_dev_open(hdev->id) < 0)
975 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
976 schedule_delayed_work(&hdev->power_off,
977 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
979 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
980 mgmt_index_added(hdev);
983 static void hci_power_off(struct work_struct *work)
985 struct hci_dev *hdev = container_of(work, struct hci_dev,
988 BT_DBG("%s", hdev->name);
990 clear_bit(HCI_AUTO_OFF, &hdev->flags);
992 hci_dev_close(hdev->id);
995 static void hci_discov_off(struct work_struct *work)
997 struct hci_dev *hdev;
1000 hdev = container_of(work, struct hci_dev, discov_off.work);
1002 BT_DBG("%s", hdev->name);
1006 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1008 hdev->discov_timeout = 0;
1010 hci_dev_unlock(hdev);
1013 int hci_uuids_clear(struct hci_dev *hdev)
1015 struct list_head *p, *n;
1017 list_for_each_safe(p, n, &hdev->uuids) {
1018 struct bt_uuid *uuid;
1020 uuid = list_entry(p, struct bt_uuid, list);
1029 int hci_link_keys_clear(struct hci_dev *hdev)
1031 struct list_head *p, *n;
1033 list_for_each_safe(p, n, &hdev->link_keys) {
1034 struct link_key *key;
1036 key = list_entry(p, struct link_key, list);
1045 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1049 list_for_each_entry(k, &hdev->link_keys, list)
1050 if (bacmp(bdaddr, &k->bdaddr) == 0)
1056 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1057 u8 key_type, u8 old_key_type)
1060 if (key_type < 0x03)
1063 /* Debug keys are insecure so don't store them persistently */
1064 if (key_type == HCI_LK_DEBUG_COMBINATION)
1067 /* Changed combination key and there's no previous one */
1068 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1071 /* Security mode 3 case */
1075 /* Neither local nor remote side had no-bonding as requirement */
1076 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1079 /* Local side had dedicated bonding as requirement */
1080 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1083 /* Remote side had dedicated bonding as requirement */
1084 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1087 /* If none of the above criteria match, then don't store the key
1092 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1096 list_for_each_entry(k, &hdev->link_keys, list) {
1097 struct key_master_id *id;
1099 if (k->type != HCI_LK_SMP_LTK)
1102 if (k->dlen != sizeof(*id))
1105 id = (void *) &k->data;
1106 if (id->ediv == ediv &&
1107 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1113 EXPORT_SYMBOL(hci_find_ltk);
1115 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1116 bdaddr_t *bdaddr, u8 type)
1120 list_for_each_entry(k, &hdev->link_keys, list)
1121 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1126 EXPORT_SYMBOL(hci_find_link_key_type);
1128 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1129 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1131 struct link_key *key, *old_key;
1132 u8 old_key_type, persistent;
1134 old_key = hci_find_link_key(hdev, bdaddr);
1136 old_key_type = old_key->type;
1139 old_key_type = conn ? conn->key_type : 0xff;
1140 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1143 list_add(&key->list, &hdev->link_keys);
1146 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1148 /* Some buggy controller combinations generate a changed
1149 * combination key for legacy pairing even when there's no
1151 if (type == HCI_LK_CHANGED_COMBINATION &&
1152 (!conn || conn->remote_auth == 0xff) &&
1153 old_key_type == 0xff) {
1154 type = HCI_LK_COMBINATION;
1156 conn->key_type = type;
1159 bacpy(&key->bdaddr, bdaddr);
1160 memcpy(key->val, val, 16);
1161 key->pin_len = pin_len;
1163 if (type == HCI_LK_CHANGED_COMBINATION)
1164 key->type = old_key_type;
1171 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1173 mgmt_new_link_key(hdev, key, persistent);
1176 list_del(&key->list);
1183 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1184 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1186 struct link_key *key, *old_key;
1187 struct key_master_id *id;
1190 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1192 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1195 old_key_type = old_key->type;
1197 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1200 list_add(&key->list, &hdev->link_keys);
1201 old_key_type = 0xff;
1204 key->dlen = sizeof(*id);
1206 bacpy(&key->bdaddr, bdaddr);
1207 memcpy(key->val, ltk, sizeof(key->val));
1208 key->type = HCI_LK_SMP_LTK;
1209 key->pin_len = key_size;
1211 id = (void *) &key->data;
1213 memcpy(id->rand, rand, sizeof(id->rand));
1216 mgmt_new_link_key(hdev, key, old_key_type);
1221 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1223 struct link_key *key;
1225 key = hci_find_link_key(hdev, bdaddr);
1229 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1231 list_del(&key->list);
1237 /* HCI command timer function */
1238 static void hci_cmd_timer(unsigned long arg)
1240 struct hci_dev *hdev = (void *) arg;
1242 BT_ERR("%s command tx timeout", hdev->name);
1243 atomic_set(&hdev->cmd_cnt, 1);
1244 queue_work(hdev->workqueue, &hdev->cmd_work);
1247 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1250 struct oob_data *data;
1252 list_for_each_entry(data, &hdev->remote_oob_data, list)
1253 if (bacmp(bdaddr, &data->bdaddr) == 0)
1259 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1261 struct oob_data *data;
1263 data = hci_find_remote_oob_data(hdev, bdaddr);
1267 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1269 list_del(&data->list);
1275 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1277 struct oob_data *data, *n;
1279 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1280 list_del(&data->list);
1287 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1290 struct oob_data *data;
1292 data = hci_find_remote_oob_data(hdev, bdaddr);
1295 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1299 bacpy(&data->bdaddr, bdaddr);
1300 list_add(&data->list, &hdev->remote_oob_data);
1303 memcpy(data->hash, hash, sizeof(data->hash));
1304 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1306 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1311 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1314 struct bdaddr_list *b;
1316 list_for_each_entry(b, &hdev->blacklist, list)
1317 if (bacmp(bdaddr, &b->bdaddr) == 0)
1323 int hci_blacklist_clear(struct hci_dev *hdev)
1325 struct list_head *p, *n;
1327 list_for_each_safe(p, n, &hdev->blacklist) {
1328 struct bdaddr_list *b;
1330 b = list_entry(p, struct bdaddr_list, list);
1339 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1341 struct bdaddr_list *entry;
1343 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1346 if (hci_blacklist_lookup(hdev, bdaddr))
1349 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1353 bacpy(&entry->bdaddr, bdaddr);
1355 list_add(&entry->list, &hdev->blacklist);
1357 return mgmt_device_blocked(hdev, bdaddr);
1360 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1362 struct bdaddr_list *entry;
1364 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1365 return hci_blacklist_clear(hdev);
1367 entry = hci_blacklist_lookup(hdev, bdaddr);
1371 list_del(&entry->list);
1374 return mgmt_device_unblocked(hdev, bdaddr);
1377 static void hci_clear_adv_cache(struct work_struct *work)
1379 struct hci_dev *hdev = container_of(work, struct hci_dev,
1384 hci_adv_entries_clear(hdev);
1386 hci_dev_unlock(hdev);
1389 int hci_adv_entries_clear(struct hci_dev *hdev)
1391 struct adv_entry *entry, *tmp;
1393 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1394 list_del(&entry->list);
1398 BT_DBG("%s adv cache cleared", hdev->name);
1403 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1405 struct adv_entry *entry;
1407 list_for_each_entry(entry, &hdev->adv_entries, list)
1408 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1414 static inline int is_connectable_adv(u8 evt_type)
1416 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1422 int hci_add_adv_entry(struct hci_dev *hdev,
1423 struct hci_ev_le_advertising_info *ev)
1425 struct adv_entry *entry;
1427 if (!is_connectable_adv(ev->evt_type))
1430 /* Only new entries should be added to adv_entries. So, if
1431 * bdaddr was found, don't add it. */
1432 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1435 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1439 bacpy(&entry->bdaddr, &ev->bdaddr);
1440 entry->bdaddr_type = ev->bdaddr_type;
1442 list_add(&entry->list, &hdev->adv_entries);
1444 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1445 batostr(&entry->bdaddr), entry->bdaddr_type);
1450 /* Register HCI device */
1451 int hci_register_dev(struct hci_dev *hdev)
1453 struct list_head *head = &hci_dev_list, *p;
1456 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1457 hdev->bus, hdev->owner);
1459 if (!hdev->open || !hdev->close || !hdev->destruct)
1462 /* Do not allow HCI_AMP devices to register at index 0,
1463 * so the index can be used as the AMP controller ID.
1465 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1467 write_lock_bh(&hci_dev_list_lock);
1469 /* Find first available device id */
1470 list_for_each(p, &hci_dev_list) {
1471 if (list_entry(p, struct hci_dev, list)->id != id)
1476 sprintf(hdev->name, "hci%d", id);
1478 list_add_tail(&hdev->list, head);
1480 atomic_set(&hdev->refcnt, 1);
1481 mutex_init(&hdev->lock);
1484 hdev->dev_flags = 0;
1485 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1486 hdev->esco_type = (ESCO_HV1);
1487 hdev->link_mode = (HCI_LM_ACCEPT);
1488 hdev->io_capability = 0x03; /* No Input No Output */
1490 hdev->idle_timeout = 0;
1491 hdev->sniff_max_interval = 800;
1492 hdev->sniff_min_interval = 80;
1494 INIT_WORK(&hdev->rx_work, hci_rx_work);
1495 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1496 INIT_WORK(&hdev->tx_work, hci_tx_work);
1499 skb_queue_head_init(&hdev->rx_q);
1500 skb_queue_head_init(&hdev->cmd_q);
1501 skb_queue_head_init(&hdev->raw_q);
1503 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1505 for (i = 0; i < NUM_REASSEMBLY; i++)
1506 hdev->reassembly[i] = NULL;
1508 init_waitqueue_head(&hdev->req_wait_q);
1509 mutex_init(&hdev->req_lock);
1511 inquiry_cache_init(hdev);
1513 hci_conn_hash_init(hdev);
1515 INIT_LIST_HEAD(&hdev->mgmt_pending);
1517 INIT_LIST_HEAD(&hdev->blacklist);
1519 INIT_LIST_HEAD(&hdev->uuids);
1521 INIT_LIST_HEAD(&hdev->link_keys);
1523 INIT_LIST_HEAD(&hdev->remote_oob_data);
1525 INIT_LIST_HEAD(&hdev->adv_entries);
1527 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1528 INIT_WORK(&hdev->power_on, hci_power_on);
1529 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1531 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1533 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1535 atomic_set(&hdev->promisc, 0);
1537 write_unlock_bh(&hci_dev_list_lock);
1539 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1541 if (!hdev->workqueue) {
1546 error = hci_add_sysfs(hdev);
1550 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1551 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1553 if (rfkill_register(hdev->rfkill) < 0) {
1554 rfkill_destroy(hdev->rfkill);
1555 hdev->rfkill = NULL;
1559 set_bit(HCI_AUTO_OFF, &hdev->flags);
1560 set_bit(HCI_SETUP, &hdev->flags);
1561 schedule_work(&hdev->power_on);
1563 hci_notify(hdev, HCI_DEV_REG);
1568 destroy_workqueue(hdev->workqueue);
1570 write_lock_bh(&hci_dev_list_lock);
1571 list_del(&hdev->list);
1572 write_unlock_bh(&hci_dev_list_lock);
1576 EXPORT_SYMBOL(hci_register_dev);
1578 /* Unregister HCI device */
1579 void hci_unregister_dev(struct hci_dev *hdev)
1583 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1585 write_lock_bh(&hci_dev_list_lock);
1586 list_del(&hdev->list);
1587 write_unlock_bh(&hci_dev_list_lock);
1589 hci_dev_do_close(hdev);
1591 for (i = 0; i < NUM_REASSEMBLY; i++)
1592 kfree_skb(hdev->reassembly[i]);
1594 if (!test_bit(HCI_INIT, &hdev->flags) &&
1595 !test_bit(HCI_SETUP, &hdev->flags)) {
1597 mgmt_index_removed(hdev);
1598 hci_dev_unlock(hdev);
1601 /* mgmt_index_removed should take care of emptying the
1603 BUG_ON(!list_empty(&hdev->mgmt_pending));
1605 hci_notify(hdev, HCI_DEV_UNREG);
1608 rfkill_unregister(hdev->rfkill);
1609 rfkill_destroy(hdev->rfkill);
1612 hci_del_sysfs(hdev);
1614 cancel_delayed_work_sync(&hdev->adv_work);
1616 destroy_workqueue(hdev->workqueue);
1619 hci_blacklist_clear(hdev);
1620 hci_uuids_clear(hdev);
1621 hci_link_keys_clear(hdev);
1622 hci_remote_oob_data_clear(hdev);
1623 hci_adv_entries_clear(hdev);
1624 hci_dev_unlock(hdev);
1626 __hci_dev_put(hdev);
1628 EXPORT_SYMBOL(hci_unregister_dev);
1630 /* Suspend HCI device */
1631 int hci_suspend_dev(struct hci_dev *hdev)
1633 hci_notify(hdev, HCI_DEV_SUSPEND);
1636 EXPORT_SYMBOL(hci_suspend_dev);
1638 /* Resume HCI device */
1639 int hci_resume_dev(struct hci_dev *hdev)
1641 hci_notify(hdev, HCI_DEV_RESUME);
1644 EXPORT_SYMBOL(hci_resume_dev);
1646 /* Receive frame from HCI drivers */
1647 int hci_recv_frame(struct sk_buff *skb)
1649 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1650 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1651 && !test_bit(HCI_INIT, &hdev->flags))) {
1657 bt_cb(skb)->incoming = 1;
1660 __net_timestamp(skb);
1662 skb_queue_tail(&hdev->rx_q, skb);
1663 queue_work(hdev->workqueue, &hdev->rx_work);
1667 EXPORT_SYMBOL(hci_recv_frame);
1669 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1670 int count, __u8 index)
1675 struct sk_buff *skb;
1676 struct bt_skb_cb *scb;
1678 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1679 index >= NUM_REASSEMBLY)
1682 skb = hdev->reassembly[index];
1686 case HCI_ACLDATA_PKT:
1687 len = HCI_MAX_FRAME_SIZE;
1688 hlen = HCI_ACL_HDR_SIZE;
1691 len = HCI_MAX_EVENT_SIZE;
1692 hlen = HCI_EVENT_HDR_SIZE;
1694 case HCI_SCODATA_PKT:
1695 len = HCI_MAX_SCO_SIZE;
1696 hlen = HCI_SCO_HDR_SIZE;
1700 skb = bt_skb_alloc(len, GFP_ATOMIC);
1704 scb = (void *) skb->cb;
1706 scb->pkt_type = type;
1708 skb->dev = (void *) hdev;
1709 hdev->reassembly[index] = skb;
1713 scb = (void *) skb->cb;
1714 len = min(scb->expect, (__u16)count);
1716 memcpy(skb_put(skb, len), data, len);
1725 if (skb->len == HCI_EVENT_HDR_SIZE) {
1726 struct hci_event_hdr *h = hci_event_hdr(skb);
1727 scb->expect = h->plen;
1729 if (skb_tailroom(skb) < scb->expect) {
1731 hdev->reassembly[index] = NULL;
1737 case HCI_ACLDATA_PKT:
1738 if (skb->len == HCI_ACL_HDR_SIZE) {
1739 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1740 scb->expect = __le16_to_cpu(h->dlen);
1742 if (skb_tailroom(skb) < scb->expect) {
1744 hdev->reassembly[index] = NULL;
1750 case HCI_SCODATA_PKT:
1751 if (skb->len == HCI_SCO_HDR_SIZE) {
1752 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1753 scb->expect = h->dlen;
1755 if (skb_tailroom(skb) < scb->expect) {
1757 hdev->reassembly[index] = NULL;
1764 if (scb->expect == 0) {
1765 /* Complete frame */
1767 bt_cb(skb)->pkt_type = type;
1768 hci_recv_frame(skb);
1770 hdev->reassembly[index] = NULL;
1778 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1782 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1786 rem = hci_reassembly(hdev, type, data, count, type - 1);
1790 data += (count - rem);
1796 EXPORT_SYMBOL(hci_recv_fragment);
1798 #define STREAM_REASSEMBLY 0
1800 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1806 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1809 struct { char type; } *pkt;
1811 /* Start of the frame */
1818 type = bt_cb(skb)->pkt_type;
1820 rem = hci_reassembly(hdev, type, data, count,
1825 data += (count - rem);
1831 EXPORT_SYMBOL(hci_recv_stream_fragment);
1833 /* ---- Interface to upper protocols ---- */
1835 /* Register/Unregister protocols.
1836 * hci_task_lock is used to ensure that no tasks are running. */
1837 int hci_register_proto(struct hci_proto *hp)
1841 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1843 if (hp->id >= HCI_MAX_PROTO)
1846 mutex_lock(&hci_task_lock);
1848 if (!hci_proto[hp->id])
1849 hci_proto[hp->id] = hp;
1853 mutex_unlock(&hci_task_lock);
1857 EXPORT_SYMBOL(hci_register_proto);
1859 int hci_unregister_proto(struct hci_proto *hp)
1863 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1865 if (hp->id >= HCI_MAX_PROTO)
1868 mutex_lock(&hci_task_lock);
1870 if (hci_proto[hp->id])
1871 hci_proto[hp->id] = NULL;
1875 mutex_unlock(&hci_task_lock);
1879 EXPORT_SYMBOL(hci_unregister_proto);
1881 int hci_register_cb(struct hci_cb *cb)
1883 BT_DBG("%p name %s", cb, cb->name);
1885 write_lock_bh(&hci_cb_list_lock);
1886 list_add(&cb->list, &hci_cb_list);
1887 write_unlock_bh(&hci_cb_list_lock);
1891 EXPORT_SYMBOL(hci_register_cb);
1893 int hci_unregister_cb(struct hci_cb *cb)
1895 BT_DBG("%p name %s", cb, cb->name);
1897 write_lock_bh(&hci_cb_list_lock);
1898 list_del(&cb->list);
1899 write_unlock_bh(&hci_cb_list_lock);
1903 EXPORT_SYMBOL(hci_unregister_cb);
1905 static int hci_send_frame(struct sk_buff *skb)
1907 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1914 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1916 if (atomic_read(&hdev->promisc)) {
1918 __net_timestamp(skb);
1920 hci_send_to_sock(hdev, skb, NULL);
1923 /* Get rid of skb owner, prior to sending to the driver. */
1926 return hdev->send(skb);
1929 /* Send HCI command */
1930 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1932 int len = HCI_COMMAND_HDR_SIZE + plen;
1933 struct hci_command_hdr *hdr;
1934 struct sk_buff *skb;
1936 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1938 skb = bt_skb_alloc(len, GFP_ATOMIC);
1940 BT_ERR("%s no memory for command", hdev->name);
1944 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1945 hdr->opcode = cpu_to_le16(opcode);
1949 memcpy(skb_put(skb, plen), param, plen);
1951 BT_DBG("skb len %d", skb->len);
1953 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1954 skb->dev = (void *) hdev;
1956 if (test_bit(HCI_INIT, &hdev->flags))
1957 hdev->init_last_cmd = opcode;
1959 skb_queue_tail(&hdev->cmd_q, skb);
1960 queue_work(hdev->workqueue, &hdev->cmd_work);
1965 /* Get data from the previously sent command */
1966 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1968 struct hci_command_hdr *hdr;
1970 if (!hdev->sent_cmd)
1973 hdr = (void *) hdev->sent_cmd->data;
1975 if (hdr->opcode != cpu_to_le16(opcode))
1978 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1980 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1984 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1986 struct hci_acl_hdr *hdr;
1989 skb_push(skb, HCI_ACL_HDR_SIZE);
1990 skb_reset_transport_header(skb);
1991 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1992 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1993 hdr->dlen = cpu_to_le16(len);
1996 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1997 struct sk_buff *skb, __u16 flags)
1999 struct hci_dev *hdev = conn->hdev;
2000 struct sk_buff *list;
2002 list = skb_shinfo(skb)->frag_list;
2004 /* Non fragmented */
2005 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2007 skb_queue_tail(queue, skb);
2010 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2012 skb_shinfo(skb)->frag_list = NULL;
2014 /* Queue all fragments atomically */
2015 spin_lock_bh(&queue->lock);
2017 __skb_queue_tail(queue, skb);
2019 flags &= ~ACL_START;
2022 skb = list; list = list->next;
2024 skb->dev = (void *) hdev;
2025 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2026 hci_add_acl_hdr(skb, conn->handle, flags);
2028 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2030 __skb_queue_tail(queue, skb);
2033 spin_unlock_bh(&queue->lock);
2037 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2039 struct hci_conn *conn = chan->conn;
2040 struct hci_dev *hdev = conn->hdev;
2042 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2044 skb->dev = (void *) hdev;
2045 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2046 hci_add_acl_hdr(skb, conn->handle, flags);
2048 hci_queue_acl(conn, &chan->data_q, skb, flags);
2050 queue_work(hdev->workqueue, &hdev->tx_work);
2052 EXPORT_SYMBOL(hci_send_acl);
2055 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2057 struct hci_dev *hdev = conn->hdev;
2058 struct hci_sco_hdr hdr;
2060 BT_DBG("%s len %d", hdev->name, skb->len);
2062 hdr.handle = cpu_to_le16(conn->handle);
2063 hdr.dlen = skb->len;
2065 skb_push(skb, HCI_SCO_HDR_SIZE);
2066 skb_reset_transport_header(skb);
2067 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2069 skb->dev = (void *) hdev;
2070 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2072 skb_queue_tail(&conn->data_q, skb);
2073 queue_work(hdev->workqueue, &hdev->tx_work);
2075 EXPORT_SYMBOL(hci_send_sco);
2077 /* ---- HCI TX task (outgoing data) ---- */
2079 /* HCI Connection scheduler */
2080 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2082 struct hci_conn_hash *h = &hdev->conn_hash;
2083 struct hci_conn *conn = NULL, *c;
2084 int num = 0, min = ~0;
2086 /* We don't have to lock device here. Connections are always
2087 * added and removed with TX task disabled. */
2091 list_for_each_entry_rcu(c, &h->list, list) {
2092 if (c->type != type || skb_queue_empty(&c->data_q))
2095 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2100 if (c->sent < min) {
2105 if (hci_conn_num(hdev, type) == num)
2114 switch (conn->type) {
2116 cnt = hdev->acl_cnt;
2120 cnt = hdev->sco_cnt;
2123 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2127 BT_ERR("Unknown link type");
2135 BT_DBG("conn %p quote %d", conn, *quote);
2139 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2141 struct hci_conn_hash *h = &hdev->conn_hash;
2144 BT_ERR("%s link tx timeout", hdev->name);
2148 /* Kill stalled connections */
2149 list_for_each_entry_rcu(c, &h->list, list) {
2150 if (c->type == type && c->sent) {
2151 BT_ERR("%s killing stalled connection %s",
2152 hdev->name, batostr(&c->dst));
2153 hci_acl_disconn(c, 0x13);
2160 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2163 struct hci_conn_hash *h = &hdev->conn_hash;
2164 struct hci_chan *chan = NULL;
2165 int num = 0, min = ~0, cur_prio = 0;
2166 struct hci_conn *conn;
2167 int cnt, q, conn_num = 0;
2169 BT_DBG("%s", hdev->name);
2173 list_for_each_entry_rcu(conn, &h->list, list) {
2174 struct hci_chan *tmp;
2176 if (conn->type != type)
2179 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2184 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2185 struct sk_buff *skb;
2187 if (skb_queue_empty(&tmp->data_q))
2190 skb = skb_peek(&tmp->data_q);
2191 if (skb->priority < cur_prio)
2194 if (skb->priority > cur_prio) {
2197 cur_prio = skb->priority;
2202 if (conn->sent < min) {
2208 if (hci_conn_num(hdev, type) == conn_num)
2217 switch (chan->conn->type) {
2219 cnt = hdev->acl_cnt;
2223 cnt = hdev->sco_cnt;
2226 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2230 BT_ERR("Unknown link type");
2235 BT_DBG("chan %p quote %d", chan, *quote);
2239 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2241 struct hci_conn_hash *h = &hdev->conn_hash;
2242 struct hci_conn *conn;
2245 BT_DBG("%s", hdev->name);
2249 list_for_each_entry_rcu(conn, &h->list, list) {
2250 struct hci_chan *chan;
2252 if (conn->type != type)
2255 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2260 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2261 struct sk_buff *skb;
2268 if (skb_queue_empty(&chan->data_q))
2271 skb = skb_peek(&chan->data_q);
2272 if (skb->priority >= HCI_PRIO_MAX - 1)
2275 skb->priority = HCI_PRIO_MAX - 1;
2277 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2281 if (hci_conn_num(hdev, type) == num)
2289 static inline void hci_sched_acl(struct hci_dev *hdev)
2291 struct hci_chan *chan;
2292 struct sk_buff *skb;
2296 BT_DBG("%s", hdev->name);
2298 if (!hci_conn_num(hdev, ACL_LINK))
2301 if (!test_bit(HCI_RAW, &hdev->flags)) {
2302 /* ACL tx timeout must be longer than maximum
2303 * link supervision timeout (40.9 seconds) */
2304 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2305 hci_link_tx_to(hdev, ACL_LINK);
2308 cnt = hdev->acl_cnt;
2310 while (hdev->acl_cnt &&
2311 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2312 u32 priority = (skb_peek(&chan->data_q))->priority;
2313 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2314 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2315 skb->len, skb->priority);
2317 /* Stop if priority has changed */
2318 if (skb->priority < priority)
2321 skb = skb_dequeue(&chan->data_q);
2323 hci_conn_enter_active_mode(chan->conn,
2324 bt_cb(skb)->force_active);
2326 hci_send_frame(skb);
2327 hdev->acl_last_tx = jiffies;
2335 if (cnt != hdev->acl_cnt)
2336 hci_prio_recalculate(hdev, ACL_LINK);
2340 static inline void hci_sched_sco(struct hci_dev *hdev)
2342 struct hci_conn *conn;
2343 struct sk_buff *skb;
2346 BT_DBG("%s", hdev->name);
2348 if (!hci_conn_num(hdev, SCO_LINK))
2351 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
2352 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2353 BT_DBG("skb %p len %d", skb, skb->len);
2354 hci_send_frame(skb);
2357 if (conn->sent == ~0)
2363 static inline void hci_sched_esco(struct hci_dev *hdev)
2365 struct hci_conn *conn;
2366 struct sk_buff *skb;
2369 BT_DBG("%s", hdev->name);
2371 if (!hci_conn_num(hdev, ESCO_LINK))
2374 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
2375 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2376 BT_DBG("skb %p len %d", skb, skb->len);
2377 hci_send_frame(skb);
2380 if (conn->sent == ~0)
2386 static inline void hci_sched_le(struct hci_dev *hdev)
2388 struct hci_chan *chan;
2389 struct sk_buff *skb;
2390 int quote, cnt, tmp;
2392 BT_DBG("%s", hdev->name);
2394 if (!hci_conn_num(hdev, LE_LINK))
2397 if (!test_bit(HCI_RAW, &hdev->flags)) {
2398 /* LE tx timeout must be longer than maximum
2399 * link supervision timeout (40.9 seconds) */
2400 if (!hdev->le_cnt && hdev->le_pkts &&
2401 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2402 hci_link_tx_to(hdev, LE_LINK);
2405 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2407 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
2408 u32 priority = (skb_peek(&chan->data_q))->priority;
2409 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2410 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2411 skb->len, skb->priority);
2413 /* Stop if priority has changed */
2414 if (skb->priority < priority)
2417 skb = skb_dequeue(&chan->data_q);
2419 hci_send_frame(skb);
2420 hdev->le_last_tx = jiffies;
2431 hdev->acl_cnt = cnt;
2434 hci_prio_recalculate(hdev, LE_LINK);
2437 static void hci_tx_work(struct work_struct *work)
2439 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2440 struct sk_buff *skb;
2442 mutex_lock(&hci_task_lock);
2444 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2445 hdev->sco_cnt, hdev->le_cnt);
2447 /* Schedule queues and send stuff to HCI driver */
2449 hci_sched_acl(hdev);
2451 hci_sched_sco(hdev);
2453 hci_sched_esco(hdev);
2457 /* Send next queued raw (unknown type) packet */
2458 while ((skb = skb_dequeue(&hdev->raw_q)))
2459 hci_send_frame(skb);
2461 mutex_unlock(&hci_task_lock);
2464 /* ----- HCI RX task (incoming data processing) ----- */
2466 /* ACL data packet */
2467 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2469 struct hci_acl_hdr *hdr = (void *) skb->data;
2470 struct hci_conn *conn;
2471 __u16 handle, flags;
2473 skb_pull(skb, HCI_ACL_HDR_SIZE);
2475 handle = __le16_to_cpu(hdr->handle);
2476 flags = hci_flags(handle);
2477 handle = hci_handle(handle);
2479 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2481 hdev->stat.acl_rx++;
2484 conn = hci_conn_hash_lookup_handle(hdev, handle);
2485 hci_dev_unlock(hdev);
2488 register struct hci_proto *hp;
2490 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2492 /* Send to upper protocol */
2493 hp = hci_proto[HCI_PROTO_L2CAP];
2494 if (hp && hp->recv_acldata) {
2495 hp->recv_acldata(conn, skb, flags);
2499 BT_ERR("%s ACL packet for unknown connection handle %d",
2500 hdev->name, handle);
2506 /* SCO data packet */
2507 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2509 struct hci_sco_hdr *hdr = (void *) skb->data;
2510 struct hci_conn *conn;
2513 skb_pull(skb, HCI_SCO_HDR_SIZE);
2515 handle = __le16_to_cpu(hdr->handle);
2517 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2519 hdev->stat.sco_rx++;
2522 conn = hci_conn_hash_lookup_handle(hdev, handle);
2523 hci_dev_unlock(hdev);
2526 register struct hci_proto *hp;
2528 /* Send to upper protocol */
2529 hp = hci_proto[HCI_PROTO_SCO];
2530 if (hp && hp->recv_scodata) {
2531 hp->recv_scodata(conn, skb);
2535 BT_ERR("%s SCO packet for unknown connection handle %d",
2536 hdev->name, handle);
2542 static void hci_rx_work(struct work_struct *work)
2544 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2545 struct sk_buff *skb;
2547 BT_DBG("%s", hdev->name);
2549 mutex_lock(&hci_task_lock);
2551 while ((skb = skb_dequeue(&hdev->rx_q))) {
2552 if (atomic_read(&hdev->promisc)) {
2553 /* Send copy to the sockets */
2554 hci_send_to_sock(hdev, skb, NULL);
2557 if (test_bit(HCI_RAW, &hdev->flags)) {
2562 if (test_bit(HCI_INIT, &hdev->flags)) {
2563 /* Don't process data packets in this states. */
2564 switch (bt_cb(skb)->pkt_type) {
2565 case HCI_ACLDATA_PKT:
2566 case HCI_SCODATA_PKT:
2573 switch (bt_cb(skb)->pkt_type) {
2575 BT_DBG("%s Event packet", hdev->name);
2576 hci_event_packet(hdev, skb);
2579 case HCI_ACLDATA_PKT:
2580 BT_DBG("%s ACL data packet", hdev->name);
2581 hci_acldata_packet(hdev, skb);
2584 case HCI_SCODATA_PKT:
2585 BT_DBG("%s SCO data packet", hdev->name);
2586 hci_scodata_packet(hdev, skb);
2595 mutex_unlock(&hci_task_lock);
2598 static void hci_cmd_work(struct work_struct *work)
2600 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2601 struct sk_buff *skb;
2603 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2605 /* Send queued commands */
2606 if (atomic_read(&hdev->cmd_cnt)) {
2607 skb = skb_dequeue(&hdev->cmd_q);
2611 kfree_skb(hdev->sent_cmd);
2613 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2614 if (hdev->sent_cmd) {
2615 atomic_dec(&hdev->cmd_cnt);
2616 hci_send_frame(skb);
2617 if (test_bit(HCI_RESET, &hdev->flags))
2618 del_timer(&hdev->cmd_timer);
2620 mod_timer(&hdev->cmd_timer,
2621 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2623 skb_queue_head(&hdev->cmd_q, skb);
2624 queue_work(hdev->workqueue, &hdev->cmd_work);
2629 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2631 /* General inquiry access code (GIAC) */
2632 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2633 struct hci_cp_inquiry cp;
2635 BT_DBG("%s", hdev->name);
2637 if (test_bit(HCI_INQUIRY, &hdev->flags))
2638 return -EINPROGRESS;
2640 memset(&cp, 0, sizeof(cp));
2641 memcpy(&cp.lap, lap, sizeof(cp.lap));
2644 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2647 int hci_cancel_inquiry(struct hci_dev *hdev)
2649 BT_DBG("%s", hdev->name);
2651 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2654 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2657 module_param(enable_hs, bool, 0644);
2658 MODULE_PARM_DESC(enable_hs, "Enable High Speed");