2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
55 #define AUTO_OFF_TIMEOUT 2000
57 static void hci_rx_work(struct work_struct *work);
58 static void hci_cmd_work(struct work_struct *work);
59 static void hci_tx_work(struct work_struct *work);
62 LIST_HEAD(hci_dev_list);
63 DEFINE_RWLOCK(hci_dev_list_lock);
65 /* HCI callback list */
66 LIST_HEAD(hci_cb_list);
67 DEFINE_RWLOCK(hci_cb_list_lock);
69 /* ---- HCI notifications ---- */
71 static void hci_notify(struct hci_dev *hdev, int event)
73 hci_sock_dev_event(hdev, event);
76 /* ---- HCI requests ---- */
78 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
80 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
82 /* If this is the init phase check if the completed command matches
83 * the last init command, and if not just return.
85 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
88 if (hdev->req_status == HCI_REQ_PEND) {
89 hdev->req_result = result;
90 hdev->req_status = HCI_REQ_DONE;
91 wake_up_interruptible(&hdev->req_wait_q);
95 static void hci_req_cancel(struct hci_dev *hdev, int err)
97 BT_DBG("%s err 0x%2.2x", hdev->name, err);
99 if (hdev->req_status == HCI_REQ_PEND) {
100 hdev->req_result = err;
101 hdev->req_status = HCI_REQ_CANCELED;
102 wake_up_interruptible(&hdev->req_wait_q);
106 /* Execute request and wait for completion. */
107 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
108 unsigned long opt, __u32 timeout)
110 DECLARE_WAITQUEUE(wait, current);
113 BT_DBG("%s start", hdev->name);
115 hdev->req_status = HCI_REQ_PEND;
117 add_wait_queue(&hdev->req_wait_q, &wait);
118 set_current_state(TASK_INTERRUPTIBLE);
121 schedule_timeout(timeout);
123 remove_wait_queue(&hdev->req_wait_q, &wait);
125 if (signal_pending(current))
128 switch (hdev->req_status) {
130 err = -bt_to_errno(hdev->req_result);
133 case HCI_REQ_CANCELED:
134 err = -hdev->req_result;
142 hdev->req_status = hdev->req_result = 0;
144 BT_DBG("%s end: err %d", hdev->name, err);
149 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
150 unsigned long opt, __u32 timeout)
154 if (!test_bit(HCI_UP, &hdev->flags))
157 /* Serialize all requests */
159 ret = __hci_request(hdev, req, opt, timeout);
160 hci_req_unlock(hdev);
165 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
167 BT_DBG("%s %ld", hdev->name, opt);
170 set_bit(HCI_RESET, &hdev->flags);
171 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
174 static void bredr_init(struct hci_dev *hdev)
176 struct hci_cp_delete_stored_link_key cp;
180 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
182 /* Mandatory initialization */
185 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
186 set_bit(HCI_RESET, &hdev->flags);
187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
190 /* Read Local Supported Features */
191 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
193 /* Read Local Version */
194 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
196 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
197 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
199 /* Read BD Address */
200 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
202 /* Read Class of Device */
203 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
205 /* Read Local Name */
206 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
208 /* Read Voice Setting */
209 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
211 /* Optional initialization */
213 /* Clear Event Filters */
214 flt_type = HCI_FLT_CLEAR_ALL;
215 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
217 /* Connection accept timeout ~20 secs */
218 param = cpu_to_le16(0x7d00);
219 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
221 bacpy(&cp.bdaddr, BDADDR_ANY);
223 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
226 static void amp_init(struct hci_dev *hdev)
228 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
231 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
233 /* Read Local Version */
234 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
237 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
241 BT_DBG("%s %ld", hdev->name, opt);
243 /* Driver initialization */
245 /* Special commands */
246 while ((skb = skb_dequeue(&hdev->driver_init))) {
247 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
248 skb->dev = (void *) hdev;
250 skb_queue_tail(&hdev->cmd_q, skb);
251 queue_work(hdev->workqueue, &hdev->cmd_work);
253 skb_queue_purge(&hdev->driver_init);
255 switch (hdev->dev_type) {
265 BT_ERR("Unknown device type %d", hdev->dev_type);
271 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
273 BT_DBG("%s", hdev->name);
275 /* Read LE buffer size */
276 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
279 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
283 BT_DBG("%s %x", hdev->name, scan);
285 /* Inquiry and Page scans */
286 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
289 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
293 BT_DBG("%s %x", hdev->name, auth);
296 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
299 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
303 BT_DBG("%s %x", hdev->name, encrypt);
306 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
309 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
311 __le16 policy = cpu_to_le16(opt);
313 BT_DBG("%s %x", hdev->name, policy);
315 /* Default link policy */
316 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
319 /* Get HCI device by index.
320 * Device is held on return. */
321 struct hci_dev *hci_dev_get(int index)
323 struct hci_dev *hdev = NULL, *d;
330 read_lock(&hci_dev_list_lock);
331 list_for_each_entry(d, &hci_dev_list, list) {
332 if (d->id == index) {
333 hdev = hci_dev_hold(d);
337 read_unlock(&hci_dev_list_lock);
341 /* ---- Inquiry support ---- */
343 bool hci_discovery_active(struct hci_dev *hdev)
345 struct discovery_state *discov = &hdev->discovery;
347 switch (discov->state) {
348 case DISCOVERY_FINDING:
349 case DISCOVERY_RESOLVING:
357 void hci_discovery_set_state(struct hci_dev *hdev, int state)
359 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
361 if (hdev->discovery.state == state)
365 case DISCOVERY_STOPPED:
366 if (hdev->discovery.state != DISCOVERY_STARTING)
367 mgmt_discovering(hdev, 0);
368 hdev->discovery.type = 0;
370 case DISCOVERY_STARTING:
372 case DISCOVERY_FINDING:
373 mgmt_discovering(hdev, 1);
375 case DISCOVERY_RESOLVING:
377 case DISCOVERY_STOPPING:
381 hdev->discovery.state = state;
384 static void inquiry_cache_flush(struct hci_dev *hdev)
386 struct discovery_state *cache = &hdev->discovery;
387 struct inquiry_entry *p, *n;
389 list_for_each_entry_safe(p, n, &cache->all, all) {
394 INIT_LIST_HEAD(&cache->unknown);
395 INIT_LIST_HEAD(&cache->resolve);
396 cache->state = DISCOVERY_STOPPED;
399 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
401 struct discovery_state *cache = &hdev->discovery;
402 struct inquiry_entry *e;
404 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
406 list_for_each_entry(e, &cache->all, all) {
407 if (!bacmp(&e->data.bdaddr, bdaddr))
414 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
417 struct discovery_state *cache = &hdev->discovery;
418 struct inquiry_entry *e;
420 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
422 list_for_each_entry(e, &cache->unknown, list) {
423 if (!bacmp(&e->data.bdaddr, bdaddr))
430 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
434 struct discovery_state *cache = &hdev->discovery;
435 struct inquiry_entry *e;
437 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
439 list_for_each_entry(e, &cache->resolve, list) {
440 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
442 if (!bacmp(&e->data.bdaddr, bdaddr))
449 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
450 struct inquiry_entry *ie)
452 struct discovery_state *cache = &hdev->discovery;
453 struct list_head *pos = &cache->resolve;
454 struct inquiry_entry *p;
458 list_for_each_entry(p, &cache->resolve, list) {
459 if (p->name_state != NAME_PENDING &&
460 abs(p->data.rssi) >= abs(ie->data.rssi))
465 list_add(&ie->list, pos);
468 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
469 bool name_known, bool *ssp)
471 struct discovery_state *cache = &hdev->discovery;
472 struct inquiry_entry *ie;
474 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
477 *ssp = data->ssp_mode;
479 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
481 if (ie->data.ssp_mode && ssp)
484 if (ie->name_state == NAME_NEEDED &&
485 data->rssi != ie->data.rssi) {
486 ie->data.rssi = data->rssi;
487 hci_inquiry_cache_update_resolve(hdev, ie);
493 /* Entry not in the cache. Add new one. */
494 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
498 list_add(&ie->all, &cache->all);
501 ie->name_state = NAME_KNOWN;
503 ie->name_state = NAME_NOT_KNOWN;
504 list_add(&ie->list, &cache->unknown);
508 if (name_known && ie->name_state != NAME_KNOWN &&
509 ie->name_state != NAME_PENDING) {
510 ie->name_state = NAME_KNOWN;
514 memcpy(&ie->data, data, sizeof(*data));
515 ie->timestamp = jiffies;
516 cache->timestamp = jiffies;
518 if (ie->name_state == NAME_NOT_KNOWN)
524 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
526 struct discovery_state *cache = &hdev->discovery;
527 struct inquiry_info *info = (struct inquiry_info *) buf;
528 struct inquiry_entry *e;
531 list_for_each_entry(e, &cache->all, all) {
532 struct inquiry_data *data = &e->data;
537 bacpy(&info->bdaddr, &data->bdaddr);
538 info->pscan_rep_mode = data->pscan_rep_mode;
539 info->pscan_period_mode = data->pscan_period_mode;
540 info->pscan_mode = data->pscan_mode;
541 memcpy(info->dev_class, data->dev_class, 3);
542 info->clock_offset = data->clock_offset;
548 BT_DBG("cache %p, copied %d", cache, copied);
552 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
554 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
555 struct hci_cp_inquiry cp;
557 BT_DBG("%s", hdev->name);
559 if (test_bit(HCI_INQUIRY, &hdev->flags))
563 memcpy(&cp.lap, &ir->lap, 3);
564 cp.length = ir->length;
565 cp.num_rsp = ir->num_rsp;
566 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
569 int hci_inquiry(void __user *arg)
571 __u8 __user *ptr = arg;
572 struct hci_inquiry_req ir;
573 struct hci_dev *hdev;
574 int err = 0, do_inquiry = 0, max_rsp;
578 if (copy_from_user(&ir, ptr, sizeof(ir)))
581 hdev = hci_dev_get(ir.dev_id);
586 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
587 inquiry_cache_empty(hdev) ||
588 ir.flags & IREQ_CACHE_FLUSH) {
589 inquiry_cache_flush(hdev);
592 hci_dev_unlock(hdev);
594 timeo = ir.length * msecs_to_jiffies(2000);
597 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
602 /* for unlimited number of responses we will use buffer with 255 entries */
603 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
605 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
606 * copy it to the user space.
608 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
615 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
616 hci_dev_unlock(hdev);
618 BT_DBG("num_rsp %d", ir.num_rsp);
620 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
622 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
635 /* ---- HCI ioctl helpers ---- */
637 int hci_dev_open(__u16 dev)
639 struct hci_dev *hdev;
642 hdev = hci_dev_get(dev);
646 BT_DBG("%s %p", hdev->name, hdev);
650 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
655 if (test_bit(HCI_UP, &hdev->flags)) {
660 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
661 set_bit(HCI_RAW, &hdev->flags);
663 /* Treat all non BR/EDR controllers as raw devices if
664 enable_hs is not set */
665 if (hdev->dev_type != HCI_BREDR && !enable_hs)
666 set_bit(HCI_RAW, &hdev->flags);
668 if (hdev->open(hdev)) {
673 if (!test_bit(HCI_RAW, &hdev->flags)) {
674 atomic_set(&hdev->cmd_cnt, 1);
675 set_bit(HCI_INIT, &hdev->flags);
676 hdev->init_last_cmd = 0;
678 ret = __hci_request(hdev, hci_init_req, 0,
679 msecs_to_jiffies(HCI_INIT_TIMEOUT));
681 if (lmp_host_le_capable(hdev))
682 ret = __hci_request(hdev, hci_le_init_req, 0,
683 msecs_to_jiffies(HCI_INIT_TIMEOUT));
685 clear_bit(HCI_INIT, &hdev->flags);
690 set_bit(HCI_UP, &hdev->flags);
691 hci_notify(hdev, HCI_DEV_UP);
692 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
694 mgmt_powered(hdev, 1);
695 hci_dev_unlock(hdev);
698 /* Init failed, cleanup */
699 flush_work(&hdev->tx_work);
700 flush_work(&hdev->cmd_work);
701 flush_work(&hdev->rx_work);
703 skb_queue_purge(&hdev->cmd_q);
704 skb_queue_purge(&hdev->rx_q);
709 if (hdev->sent_cmd) {
710 kfree_skb(hdev->sent_cmd);
711 hdev->sent_cmd = NULL;
719 hci_req_unlock(hdev);
724 static int hci_dev_do_close(struct hci_dev *hdev)
726 BT_DBG("%s %p", hdev->name, hdev);
728 cancel_work_sync(&hdev->le_scan);
730 hci_req_cancel(hdev, ENODEV);
733 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
734 del_timer_sync(&hdev->cmd_timer);
735 hci_req_unlock(hdev);
739 /* Flush RX and TX works */
740 flush_work(&hdev->tx_work);
741 flush_work(&hdev->rx_work);
743 if (hdev->discov_timeout > 0) {
744 cancel_delayed_work(&hdev->discov_off);
745 hdev->discov_timeout = 0;
746 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
749 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
750 cancel_delayed_work(&hdev->service_cache);
752 cancel_delayed_work_sync(&hdev->le_scan_disable);
755 inquiry_cache_flush(hdev);
756 hci_conn_hash_flush(hdev);
757 hci_dev_unlock(hdev);
759 hci_notify(hdev, HCI_DEV_DOWN);
765 skb_queue_purge(&hdev->cmd_q);
766 atomic_set(&hdev->cmd_cnt, 1);
767 if (!test_bit(HCI_RAW, &hdev->flags) &&
768 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
769 set_bit(HCI_INIT, &hdev->flags);
770 __hci_request(hdev, hci_reset_req, 0,
771 msecs_to_jiffies(250));
772 clear_bit(HCI_INIT, &hdev->flags);
776 flush_work(&hdev->cmd_work);
779 skb_queue_purge(&hdev->rx_q);
780 skb_queue_purge(&hdev->cmd_q);
781 skb_queue_purge(&hdev->raw_q);
783 /* Drop last sent command */
784 if (hdev->sent_cmd) {
785 del_timer_sync(&hdev->cmd_timer);
786 kfree_skb(hdev->sent_cmd);
787 hdev->sent_cmd = NULL;
790 /* After this point our queues are empty
791 * and no tasks are scheduled. */
794 if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
796 mgmt_powered(hdev, 0);
797 hci_dev_unlock(hdev);
803 memset(hdev->eir, 0, sizeof(hdev->eir));
804 memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
806 hci_req_unlock(hdev);
812 int hci_dev_close(__u16 dev)
814 struct hci_dev *hdev;
817 hdev = hci_dev_get(dev);
821 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
822 cancel_delayed_work(&hdev->power_off);
824 err = hci_dev_do_close(hdev);
830 int hci_dev_reset(__u16 dev)
832 struct hci_dev *hdev;
835 hdev = hci_dev_get(dev);
841 if (!test_bit(HCI_UP, &hdev->flags))
845 skb_queue_purge(&hdev->rx_q);
846 skb_queue_purge(&hdev->cmd_q);
849 inquiry_cache_flush(hdev);
850 hci_conn_hash_flush(hdev);
851 hci_dev_unlock(hdev);
856 atomic_set(&hdev->cmd_cnt, 1);
857 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
859 if (!test_bit(HCI_RAW, &hdev->flags))
860 ret = __hci_request(hdev, hci_reset_req, 0,
861 msecs_to_jiffies(HCI_INIT_TIMEOUT));
864 hci_req_unlock(hdev);
869 int hci_dev_reset_stat(__u16 dev)
871 struct hci_dev *hdev;
874 hdev = hci_dev_get(dev);
878 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
885 int hci_dev_cmd(unsigned int cmd, void __user *arg)
887 struct hci_dev *hdev;
888 struct hci_dev_req dr;
891 if (copy_from_user(&dr, arg, sizeof(dr)))
894 hdev = hci_dev_get(dr.dev_id);
900 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
901 msecs_to_jiffies(HCI_INIT_TIMEOUT));
905 if (!lmp_encrypt_capable(hdev)) {
910 if (!test_bit(HCI_AUTH, &hdev->flags)) {
911 /* Auth must be enabled first */
912 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
913 msecs_to_jiffies(HCI_INIT_TIMEOUT));
918 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
919 msecs_to_jiffies(HCI_INIT_TIMEOUT));
923 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
924 msecs_to_jiffies(HCI_INIT_TIMEOUT));
928 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
929 msecs_to_jiffies(HCI_INIT_TIMEOUT));
933 hdev->link_mode = ((__u16) dr.dev_opt) &
934 (HCI_LM_MASTER | HCI_LM_ACCEPT);
938 hdev->pkt_type = (__u16) dr.dev_opt;
942 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
943 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
947 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
948 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
960 int hci_get_dev_list(void __user *arg)
962 struct hci_dev *hdev;
963 struct hci_dev_list_req *dl;
964 struct hci_dev_req *dr;
965 int n = 0, size, err;
968 if (get_user(dev_num, (__u16 __user *) arg))
971 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
974 size = sizeof(*dl) + dev_num * sizeof(*dr);
976 dl = kzalloc(size, GFP_KERNEL);
982 read_lock(&hci_dev_list_lock);
983 list_for_each_entry(hdev, &hci_dev_list, list) {
984 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
985 cancel_delayed_work(&hdev->power_off);
987 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
988 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
990 (dr + n)->dev_id = hdev->id;
991 (dr + n)->dev_opt = hdev->flags;
996 read_unlock(&hci_dev_list_lock);
999 size = sizeof(*dl) + n * sizeof(*dr);
1001 err = copy_to_user(arg, dl, size);
1004 return err ? -EFAULT : 0;
1007 int hci_get_dev_info(void __user *arg)
1009 struct hci_dev *hdev;
1010 struct hci_dev_info di;
1013 if (copy_from_user(&di, arg, sizeof(di)))
1016 hdev = hci_dev_get(di.dev_id);
1020 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1021 cancel_delayed_work_sync(&hdev->power_off);
1023 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1024 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1026 strcpy(di.name, hdev->name);
1027 di.bdaddr = hdev->bdaddr;
1028 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1029 di.flags = hdev->flags;
1030 di.pkt_type = hdev->pkt_type;
1031 di.acl_mtu = hdev->acl_mtu;
1032 di.acl_pkts = hdev->acl_pkts;
1033 di.sco_mtu = hdev->sco_mtu;
1034 di.sco_pkts = hdev->sco_pkts;
1035 di.link_policy = hdev->link_policy;
1036 di.link_mode = hdev->link_mode;
1038 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1039 memcpy(&di.features, &hdev->features, sizeof(di.features));
1041 if (copy_to_user(arg, &di, sizeof(di)))
1049 /* ---- Interface to HCI drivers ---- */
1051 static int hci_rfkill_set_block(void *data, bool blocked)
1053 struct hci_dev *hdev = data;
1055 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1060 hci_dev_do_close(hdev);
1065 static const struct rfkill_ops hci_rfkill_ops = {
1066 .set_block = hci_rfkill_set_block,
1069 /* Alloc HCI device */
1070 struct hci_dev *hci_alloc_dev(void)
1072 struct hci_dev *hdev;
1074 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1078 hci_init_sysfs(hdev);
1079 skb_queue_head_init(&hdev->driver_init);
1083 EXPORT_SYMBOL(hci_alloc_dev);
1085 /* Free HCI device */
1086 void hci_free_dev(struct hci_dev *hdev)
1088 skb_queue_purge(&hdev->driver_init);
1090 /* will free via device release */
1091 put_device(&hdev->dev);
1093 EXPORT_SYMBOL(hci_free_dev);
1095 static void hci_power_on(struct work_struct *work)
1097 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1099 BT_DBG("%s", hdev->name);
1101 if (hci_dev_open(hdev->id) < 0)
1104 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1105 schedule_delayed_work(&hdev->power_off,
1106 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1108 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1109 mgmt_index_added(hdev);
1112 static void hci_power_off(struct work_struct *work)
1114 struct hci_dev *hdev = container_of(work, struct hci_dev,
1117 BT_DBG("%s", hdev->name);
1119 hci_dev_do_close(hdev);
1122 static void hci_discov_off(struct work_struct *work)
1124 struct hci_dev *hdev;
1125 u8 scan = SCAN_PAGE;
1127 hdev = container_of(work, struct hci_dev, discov_off.work);
1129 BT_DBG("%s", hdev->name);
1133 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1135 hdev->discov_timeout = 0;
1137 hci_dev_unlock(hdev);
1140 int hci_uuids_clear(struct hci_dev *hdev)
1142 struct list_head *p, *n;
1144 list_for_each_safe(p, n, &hdev->uuids) {
1145 struct bt_uuid *uuid;
1147 uuid = list_entry(p, struct bt_uuid, list);
1156 int hci_link_keys_clear(struct hci_dev *hdev)
1158 struct list_head *p, *n;
1160 list_for_each_safe(p, n, &hdev->link_keys) {
1161 struct link_key *key;
1163 key = list_entry(p, struct link_key, list);
1172 int hci_smp_ltks_clear(struct hci_dev *hdev)
1174 struct smp_ltk *k, *tmp;
1176 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1184 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1188 list_for_each_entry(k, &hdev->link_keys, list)
1189 if (bacmp(bdaddr, &k->bdaddr) == 0)
1195 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1196 u8 key_type, u8 old_key_type)
1199 if (key_type < 0x03)
1202 /* Debug keys are insecure so don't store them persistently */
1203 if (key_type == HCI_LK_DEBUG_COMBINATION)
1206 /* Changed combination key and there's no previous one */
1207 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1210 /* Security mode 3 case */
1214 /* Neither local nor remote side had no-bonding as requirement */
1215 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1218 /* Local side had dedicated bonding as requirement */
1219 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1222 /* Remote side had dedicated bonding as requirement */
1223 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1226 /* If none of the above criteria match, then don't store the key
1231 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1235 list_for_each_entry(k, &hdev->long_term_keys, list) {
1236 if (k->ediv != ediv ||
1237 memcmp(rand, k->rand, sizeof(k->rand)))
1245 EXPORT_SYMBOL(hci_find_ltk);
1247 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1252 list_for_each_entry(k, &hdev->long_term_keys, list)
1253 if (addr_type == k->bdaddr_type &&
1254 bacmp(bdaddr, &k->bdaddr) == 0)
1259 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1261 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1262 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1264 struct link_key *key, *old_key;
1265 u8 old_key_type, persistent;
1267 old_key = hci_find_link_key(hdev, bdaddr);
1269 old_key_type = old_key->type;
1272 old_key_type = conn ? conn->key_type : 0xff;
1273 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1276 list_add(&key->list, &hdev->link_keys);
1279 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1281 /* Some buggy controller combinations generate a changed
1282 * combination key for legacy pairing even when there's no
1284 if (type == HCI_LK_CHANGED_COMBINATION &&
1285 (!conn || conn->remote_auth == 0xff) &&
1286 old_key_type == 0xff) {
1287 type = HCI_LK_COMBINATION;
1289 conn->key_type = type;
1292 bacpy(&key->bdaddr, bdaddr);
1293 memcpy(key->val, val, 16);
1294 key->pin_len = pin_len;
1296 if (type == HCI_LK_CHANGED_COMBINATION)
1297 key->type = old_key_type;
1304 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1306 mgmt_new_link_key(hdev, key, persistent);
1309 list_del(&key->list);
1316 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1317 int new_key, u8 authenticated, u8 tk[16],
1318 u8 enc_size, u16 ediv, u8 rand[8])
1320 struct smp_ltk *key, *old_key;
1322 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1325 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1329 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1332 list_add(&key->list, &hdev->long_term_keys);
1335 bacpy(&key->bdaddr, bdaddr);
1336 key->bdaddr_type = addr_type;
1337 memcpy(key->val, tk, sizeof(key->val));
1338 key->authenticated = authenticated;
1340 key->enc_size = enc_size;
1342 memcpy(key->rand, rand, sizeof(key->rand));
1347 if (type & HCI_SMP_LTK)
1348 mgmt_new_ltk(hdev, key, 1);
1353 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1355 struct link_key *key;
1357 key = hci_find_link_key(hdev, bdaddr);
1361 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1363 list_del(&key->list);
1369 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1371 struct smp_ltk *k, *tmp;
1373 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1374 if (bacmp(bdaddr, &k->bdaddr))
1377 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1386 /* HCI command timer function */
1387 static void hci_cmd_timer(unsigned long arg)
1389 struct hci_dev *hdev = (void *) arg;
1391 BT_ERR("%s command tx timeout", hdev->name);
1392 atomic_set(&hdev->cmd_cnt, 1);
1393 queue_work(hdev->workqueue, &hdev->cmd_work);
1396 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1399 struct oob_data *data;
1401 list_for_each_entry(data, &hdev->remote_oob_data, list)
1402 if (bacmp(bdaddr, &data->bdaddr) == 0)
1408 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1410 struct oob_data *data;
1412 data = hci_find_remote_oob_data(hdev, bdaddr);
1416 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1418 list_del(&data->list);
1424 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1426 struct oob_data *data, *n;
1428 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1429 list_del(&data->list);
1436 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1439 struct oob_data *data;
1441 data = hci_find_remote_oob_data(hdev, bdaddr);
1444 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1448 bacpy(&data->bdaddr, bdaddr);
1449 list_add(&data->list, &hdev->remote_oob_data);
1452 memcpy(data->hash, hash, sizeof(data->hash));
1453 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1455 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1460 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1463 struct bdaddr_list *b;
1465 list_for_each_entry(b, &hdev->blacklist, list)
1466 if (bacmp(bdaddr, &b->bdaddr) == 0)
1472 int hci_blacklist_clear(struct hci_dev *hdev)
1474 struct list_head *p, *n;
1476 list_for_each_safe(p, n, &hdev->blacklist) {
1477 struct bdaddr_list *b;
1479 b = list_entry(p, struct bdaddr_list, list);
1488 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1490 struct bdaddr_list *entry;
1492 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1495 if (hci_blacklist_lookup(hdev, bdaddr))
1498 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1502 bacpy(&entry->bdaddr, bdaddr);
1504 list_add(&entry->list, &hdev->blacklist);
1506 return mgmt_device_blocked(hdev, bdaddr, type);
1509 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1511 struct bdaddr_list *entry;
1513 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1514 return hci_blacklist_clear(hdev);
1516 entry = hci_blacklist_lookup(hdev, bdaddr);
1520 list_del(&entry->list);
1523 return mgmt_device_unblocked(hdev, bdaddr, type);
1526 static void hci_clear_adv_cache(struct work_struct *work)
1528 struct hci_dev *hdev = container_of(work, struct hci_dev,
1533 hci_adv_entries_clear(hdev);
1535 hci_dev_unlock(hdev);
1538 int hci_adv_entries_clear(struct hci_dev *hdev)
1540 struct adv_entry *entry, *tmp;
1542 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1543 list_del(&entry->list);
1547 BT_DBG("%s adv cache cleared", hdev->name);
1552 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1554 struct adv_entry *entry;
1556 list_for_each_entry(entry, &hdev->adv_entries, list)
1557 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1563 static inline int is_connectable_adv(u8 evt_type)
1565 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1571 int hci_add_adv_entry(struct hci_dev *hdev,
1572 struct hci_ev_le_advertising_info *ev)
1574 struct adv_entry *entry;
1576 if (!is_connectable_adv(ev->evt_type))
1579 /* Only new entries should be added to adv_entries. So, if
1580 * bdaddr was found, don't add it. */
1581 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1584 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1588 bacpy(&entry->bdaddr, &ev->bdaddr);
1589 entry->bdaddr_type = ev->bdaddr_type;
1591 list_add(&entry->list, &hdev->adv_entries);
1593 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1594 batostr(&entry->bdaddr), entry->bdaddr_type);
1599 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1601 struct le_scan_params *param = (struct le_scan_params *) opt;
1602 struct hci_cp_le_set_scan_param cp;
1604 memset(&cp, 0, sizeof(cp));
1605 cp.type = param->type;
1606 cp.interval = cpu_to_le16(param->interval);
1607 cp.window = cpu_to_le16(param->window);
1609 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1612 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1614 struct hci_cp_le_set_scan_enable cp;
1616 memset(&cp, 0, sizeof(cp));
1619 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1622 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1623 u16 window, int timeout)
1625 long timeo = msecs_to_jiffies(3000);
1626 struct le_scan_params param;
1629 BT_DBG("%s", hdev->name);
1631 if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1632 return -EINPROGRESS;
1635 param.interval = interval;
1636 param.window = window;
1640 err = __hci_request(hdev, le_scan_param_req, (unsigned long) ¶m,
1643 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1645 hci_req_unlock(hdev);
1650 schedule_delayed_work(&hdev->le_scan_disable,
1651 msecs_to_jiffies(timeout));
1656 static void le_scan_disable_work(struct work_struct *work)
1658 struct hci_dev *hdev = container_of(work, struct hci_dev,
1659 le_scan_disable.work);
1660 struct hci_cp_le_set_scan_enable cp;
1662 BT_DBG("%s", hdev->name);
1664 memset(&cp, 0, sizeof(cp));
1666 hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1669 static void le_scan_work(struct work_struct *work)
1671 struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1672 struct le_scan_params *param = &hdev->le_scan_params;
1674 BT_DBG("%s", hdev->name);
1676 hci_do_le_scan(hdev, param->type, param->interval,
1677 param->window, param->timeout);
1680 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1683 struct le_scan_params *param = &hdev->le_scan_params;
1685 BT_DBG("%s", hdev->name);
1687 if (work_busy(&hdev->le_scan))
1688 return -EINPROGRESS;
1691 param->interval = interval;
1692 param->window = window;
1693 param->timeout = timeout;
1695 queue_work(system_long_wq, &hdev->le_scan);
1700 /* Register HCI device */
1701 int hci_register_dev(struct hci_dev *hdev)
1703 struct list_head *head = &hci_dev_list, *p;
1706 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1708 if (!hdev->open || !hdev->close)
1711 /* Do not allow HCI_AMP devices to register at index 0,
1712 * so the index can be used as the AMP controller ID.
1714 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1716 write_lock(&hci_dev_list_lock);
1718 /* Find first available device id */
1719 list_for_each(p, &hci_dev_list) {
1720 if (list_entry(p, struct hci_dev, list)->id != id)
1725 sprintf(hdev->name, "hci%d", id);
1727 list_add_tail(&hdev->list, head);
1729 mutex_init(&hdev->lock);
1732 hdev->dev_flags = 0;
1733 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1734 hdev->esco_type = (ESCO_HV1);
1735 hdev->link_mode = (HCI_LM_ACCEPT);
1736 hdev->io_capability = 0x03; /* No Input No Output */
1738 hdev->idle_timeout = 0;
1739 hdev->sniff_max_interval = 800;
1740 hdev->sniff_min_interval = 80;
1742 INIT_WORK(&hdev->rx_work, hci_rx_work);
1743 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1744 INIT_WORK(&hdev->tx_work, hci_tx_work);
1747 skb_queue_head_init(&hdev->rx_q);
1748 skb_queue_head_init(&hdev->cmd_q);
1749 skb_queue_head_init(&hdev->raw_q);
1751 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1753 for (i = 0; i < NUM_REASSEMBLY; i++)
1754 hdev->reassembly[i] = NULL;
1756 init_waitqueue_head(&hdev->req_wait_q);
1757 mutex_init(&hdev->req_lock);
1759 discovery_init(hdev);
1761 hci_conn_hash_init(hdev);
1763 INIT_LIST_HEAD(&hdev->mgmt_pending);
1765 INIT_LIST_HEAD(&hdev->blacklist);
1767 INIT_LIST_HEAD(&hdev->uuids);
1769 INIT_LIST_HEAD(&hdev->link_keys);
1770 INIT_LIST_HEAD(&hdev->long_term_keys);
1772 INIT_LIST_HEAD(&hdev->remote_oob_data);
1774 INIT_LIST_HEAD(&hdev->adv_entries);
1776 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1777 INIT_WORK(&hdev->power_on, hci_power_on);
1778 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1780 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1782 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1784 atomic_set(&hdev->promisc, 0);
1786 INIT_WORK(&hdev->le_scan, le_scan_work);
1788 INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1790 write_unlock(&hci_dev_list_lock);
1792 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1794 if (!hdev->workqueue) {
1799 error = hci_add_sysfs(hdev);
1803 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1804 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1806 if (rfkill_register(hdev->rfkill) < 0) {
1807 rfkill_destroy(hdev->rfkill);
1808 hdev->rfkill = NULL;
1812 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1813 set_bit(HCI_SETUP, &hdev->dev_flags);
1814 schedule_work(&hdev->power_on);
1816 hci_notify(hdev, HCI_DEV_REG);
1822 destroy_workqueue(hdev->workqueue);
1824 write_lock(&hci_dev_list_lock);
1825 list_del(&hdev->list);
1826 write_unlock(&hci_dev_list_lock);
1830 EXPORT_SYMBOL(hci_register_dev);
1832 /* Unregister HCI device */
1833 void hci_unregister_dev(struct hci_dev *hdev)
1837 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1839 write_lock(&hci_dev_list_lock);
1840 list_del(&hdev->list);
1841 write_unlock(&hci_dev_list_lock);
1843 hci_dev_do_close(hdev);
1845 for (i = 0; i < NUM_REASSEMBLY; i++)
1846 kfree_skb(hdev->reassembly[i]);
1848 if (!test_bit(HCI_INIT, &hdev->flags) &&
1849 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1851 mgmt_index_removed(hdev);
1852 hci_dev_unlock(hdev);
1855 /* mgmt_index_removed should take care of emptying the
1857 BUG_ON(!list_empty(&hdev->mgmt_pending));
1859 hci_notify(hdev, HCI_DEV_UNREG);
1862 rfkill_unregister(hdev->rfkill);
1863 rfkill_destroy(hdev->rfkill);
1866 hci_del_sysfs(hdev);
1868 cancel_delayed_work_sync(&hdev->adv_work);
1870 destroy_workqueue(hdev->workqueue);
1873 hci_blacklist_clear(hdev);
1874 hci_uuids_clear(hdev);
1875 hci_link_keys_clear(hdev);
1876 hci_smp_ltks_clear(hdev);
1877 hci_remote_oob_data_clear(hdev);
1878 hci_adv_entries_clear(hdev);
1879 hci_dev_unlock(hdev);
1883 EXPORT_SYMBOL(hci_unregister_dev);
1885 /* Suspend HCI device */
1886 int hci_suspend_dev(struct hci_dev *hdev)
1888 hci_notify(hdev, HCI_DEV_SUSPEND);
1891 EXPORT_SYMBOL(hci_suspend_dev);
1893 /* Resume HCI device */
1894 int hci_resume_dev(struct hci_dev *hdev)
1896 hci_notify(hdev, HCI_DEV_RESUME);
1899 EXPORT_SYMBOL(hci_resume_dev);
1901 /* Receive frame from HCI drivers */
1902 int hci_recv_frame(struct sk_buff *skb)
1904 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1905 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1906 && !test_bit(HCI_INIT, &hdev->flags))) {
1912 bt_cb(skb)->incoming = 1;
1915 __net_timestamp(skb);
1917 skb_queue_tail(&hdev->rx_q, skb);
1918 queue_work(hdev->workqueue, &hdev->rx_work);
1922 EXPORT_SYMBOL(hci_recv_frame);
1924 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1925 int count, __u8 index)
1930 struct sk_buff *skb;
1931 struct bt_skb_cb *scb;
1933 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1934 index >= NUM_REASSEMBLY)
1937 skb = hdev->reassembly[index];
1941 case HCI_ACLDATA_PKT:
1942 len = HCI_MAX_FRAME_SIZE;
1943 hlen = HCI_ACL_HDR_SIZE;
1946 len = HCI_MAX_EVENT_SIZE;
1947 hlen = HCI_EVENT_HDR_SIZE;
1949 case HCI_SCODATA_PKT:
1950 len = HCI_MAX_SCO_SIZE;
1951 hlen = HCI_SCO_HDR_SIZE;
1955 skb = bt_skb_alloc(len, GFP_ATOMIC);
1959 scb = (void *) skb->cb;
1961 scb->pkt_type = type;
1963 skb->dev = (void *) hdev;
1964 hdev->reassembly[index] = skb;
1968 scb = (void *) skb->cb;
1969 len = min_t(uint, scb->expect, count);
1971 memcpy(skb_put(skb, len), data, len);
1980 if (skb->len == HCI_EVENT_HDR_SIZE) {
1981 struct hci_event_hdr *h = hci_event_hdr(skb);
1982 scb->expect = h->plen;
1984 if (skb_tailroom(skb) < scb->expect) {
1986 hdev->reassembly[index] = NULL;
1992 case HCI_ACLDATA_PKT:
1993 if (skb->len == HCI_ACL_HDR_SIZE) {
1994 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1995 scb->expect = __le16_to_cpu(h->dlen);
1997 if (skb_tailroom(skb) < scb->expect) {
1999 hdev->reassembly[index] = NULL;
2005 case HCI_SCODATA_PKT:
2006 if (skb->len == HCI_SCO_HDR_SIZE) {
2007 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2008 scb->expect = h->dlen;
2010 if (skb_tailroom(skb) < scb->expect) {
2012 hdev->reassembly[index] = NULL;
2019 if (scb->expect == 0) {
2020 /* Complete frame */
2022 bt_cb(skb)->pkt_type = type;
2023 hci_recv_frame(skb);
2025 hdev->reassembly[index] = NULL;
2033 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2037 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2041 rem = hci_reassembly(hdev, type, data, count, type - 1);
2045 data += (count - rem);
2051 EXPORT_SYMBOL(hci_recv_fragment);
2053 #define STREAM_REASSEMBLY 0
2055 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2061 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2064 struct { char type; } *pkt;
2066 /* Start of the frame */
2073 type = bt_cb(skb)->pkt_type;
2075 rem = hci_reassembly(hdev, type, data, count,
2080 data += (count - rem);
2086 EXPORT_SYMBOL(hci_recv_stream_fragment);
2088 /* ---- Interface to upper protocols ---- */
2090 int hci_register_cb(struct hci_cb *cb)
2092 BT_DBG("%p name %s", cb, cb->name);
2094 write_lock(&hci_cb_list_lock);
2095 list_add(&cb->list, &hci_cb_list);
2096 write_unlock(&hci_cb_list_lock);
2100 EXPORT_SYMBOL(hci_register_cb);
2102 int hci_unregister_cb(struct hci_cb *cb)
2104 BT_DBG("%p name %s", cb, cb->name);
2106 write_lock(&hci_cb_list_lock);
2107 list_del(&cb->list);
2108 write_unlock(&hci_cb_list_lock);
2112 EXPORT_SYMBOL(hci_unregister_cb);
2114 static int hci_send_frame(struct sk_buff *skb)
2116 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2123 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2126 __net_timestamp(skb);
2128 /* Send copy to monitor */
2129 hci_send_to_monitor(hdev, skb);
2131 if (atomic_read(&hdev->promisc)) {
2132 /* Send copy to the sockets */
2133 hci_send_to_sock(hdev, skb);
2136 /* Get rid of skb owner, prior to sending to the driver. */
2139 return hdev->send(skb);
2142 /* Send HCI command */
2143 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2145 int len = HCI_COMMAND_HDR_SIZE + plen;
2146 struct hci_command_hdr *hdr;
2147 struct sk_buff *skb;
2149 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2151 skb = bt_skb_alloc(len, GFP_ATOMIC);
2153 BT_ERR("%s no memory for command", hdev->name);
2157 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2158 hdr->opcode = cpu_to_le16(opcode);
2162 memcpy(skb_put(skb, plen), param, plen);
2164 BT_DBG("skb len %d", skb->len);
2166 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2167 skb->dev = (void *) hdev;
2169 if (test_bit(HCI_INIT, &hdev->flags))
2170 hdev->init_last_cmd = opcode;
2172 skb_queue_tail(&hdev->cmd_q, skb);
2173 queue_work(hdev->workqueue, &hdev->cmd_work);
2178 /* Get data from the previously sent command */
2179 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2181 struct hci_command_hdr *hdr;
2183 if (!hdev->sent_cmd)
2186 hdr = (void *) hdev->sent_cmd->data;
2188 if (hdr->opcode != cpu_to_le16(opcode))
2191 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2193 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2197 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2199 struct hci_acl_hdr *hdr;
2202 skb_push(skb, HCI_ACL_HDR_SIZE);
2203 skb_reset_transport_header(skb);
2204 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2205 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2206 hdr->dlen = cpu_to_le16(len);
2209 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2210 struct sk_buff *skb, __u16 flags)
2212 struct hci_dev *hdev = conn->hdev;
2213 struct sk_buff *list;
2215 list = skb_shinfo(skb)->frag_list;
2217 /* Non fragmented */
2218 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2220 skb_queue_tail(queue, skb);
2223 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2225 skb_shinfo(skb)->frag_list = NULL;
2227 /* Queue all fragments atomically */
2228 spin_lock(&queue->lock);
2230 __skb_queue_tail(queue, skb);
2232 flags &= ~ACL_START;
2235 skb = list; list = list->next;
2237 skb->dev = (void *) hdev;
2238 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2239 hci_add_acl_hdr(skb, conn->handle, flags);
2241 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2243 __skb_queue_tail(queue, skb);
2246 spin_unlock(&queue->lock);
2250 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2252 struct hci_conn *conn = chan->conn;
2253 struct hci_dev *hdev = conn->hdev;
2255 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2257 skb->dev = (void *) hdev;
2258 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2259 hci_add_acl_hdr(skb, conn->handle, flags);
2261 hci_queue_acl(conn, &chan->data_q, skb, flags);
2263 queue_work(hdev->workqueue, &hdev->tx_work);
2265 EXPORT_SYMBOL(hci_send_acl);
2268 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2270 struct hci_dev *hdev = conn->hdev;
2271 struct hci_sco_hdr hdr;
2273 BT_DBG("%s len %d", hdev->name, skb->len);
2275 hdr.handle = cpu_to_le16(conn->handle);
2276 hdr.dlen = skb->len;
2278 skb_push(skb, HCI_SCO_HDR_SIZE);
2279 skb_reset_transport_header(skb);
2280 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2282 skb->dev = (void *) hdev;
2283 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2285 skb_queue_tail(&conn->data_q, skb);
2286 queue_work(hdev->workqueue, &hdev->tx_work);
2288 EXPORT_SYMBOL(hci_send_sco);
2290 /* ---- HCI TX task (outgoing data) ---- */
2292 /* HCI Connection scheduler */
2293 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2295 struct hci_conn_hash *h = &hdev->conn_hash;
2296 struct hci_conn *conn = NULL, *c;
2297 int num = 0, min = ~0;
2299 /* We don't have to lock device here. Connections are always
2300 * added and removed with TX task disabled. */
2304 list_for_each_entry_rcu(c, &h->list, list) {
2305 if (c->type != type || skb_queue_empty(&c->data_q))
2308 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2313 if (c->sent < min) {
2318 if (hci_conn_num(hdev, type) == num)
2327 switch (conn->type) {
2329 cnt = hdev->acl_cnt;
2333 cnt = hdev->sco_cnt;
2336 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2340 BT_ERR("Unknown link type");
2348 BT_DBG("conn %p quote %d", conn, *quote);
2352 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2354 struct hci_conn_hash *h = &hdev->conn_hash;
2357 BT_ERR("%s link tx timeout", hdev->name);
2361 /* Kill stalled connections */
2362 list_for_each_entry_rcu(c, &h->list, list) {
2363 if (c->type == type && c->sent) {
2364 BT_ERR("%s killing stalled connection %s",
2365 hdev->name, batostr(&c->dst));
2366 hci_acl_disconn(c, 0x13);
2373 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2376 struct hci_conn_hash *h = &hdev->conn_hash;
2377 struct hci_chan *chan = NULL;
2378 int num = 0, min = ~0, cur_prio = 0;
2379 struct hci_conn *conn;
2380 int cnt, q, conn_num = 0;
2382 BT_DBG("%s", hdev->name);
2386 list_for_each_entry_rcu(conn, &h->list, list) {
2387 struct hci_chan *tmp;
2389 if (conn->type != type)
2392 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2397 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2398 struct sk_buff *skb;
2400 if (skb_queue_empty(&tmp->data_q))
2403 skb = skb_peek(&tmp->data_q);
2404 if (skb->priority < cur_prio)
2407 if (skb->priority > cur_prio) {
2410 cur_prio = skb->priority;
2415 if (conn->sent < min) {
2421 if (hci_conn_num(hdev, type) == conn_num)
2430 switch (chan->conn->type) {
2432 cnt = hdev->acl_cnt;
2436 cnt = hdev->sco_cnt;
2439 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2443 BT_ERR("Unknown link type");
2448 BT_DBG("chan %p quote %d", chan, *quote);
2452 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2454 struct hci_conn_hash *h = &hdev->conn_hash;
2455 struct hci_conn *conn;
2458 BT_DBG("%s", hdev->name);
2462 list_for_each_entry_rcu(conn, &h->list, list) {
2463 struct hci_chan *chan;
2465 if (conn->type != type)
2468 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2473 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2474 struct sk_buff *skb;
2481 if (skb_queue_empty(&chan->data_q))
2484 skb = skb_peek(&chan->data_q);
2485 if (skb->priority >= HCI_PRIO_MAX - 1)
2488 skb->priority = HCI_PRIO_MAX - 1;
2490 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2494 if (hci_conn_num(hdev, type) == num)
2502 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2504 /* Calculate count of blocks used by this packet */
2505 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2508 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2510 if (!test_bit(HCI_RAW, &hdev->flags)) {
2511 /* ACL tx timeout must be longer than maximum
2512 * link supervision timeout (40.9 seconds) */
2513 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2514 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2515 hci_link_tx_to(hdev, ACL_LINK);
2519 static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2521 unsigned int cnt = hdev->acl_cnt;
2522 struct hci_chan *chan;
2523 struct sk_buff *skb;
2526 __check_timeout(hdev, cnt);
2528 while (hdev->acl_cnt &&
2529 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2530 u32 priority = (skb_peek(&chan->data_q))->priority;
2531 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2532 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2533 skb->len, skb->priority);
2535 /* Stop if priority has changed */
2536 if (skb->priority < priority)
2539 skb = skb_dequeue(&chan->data_q);
2541 hci_conn_enter_active_mode(chan->conn,
2542 bt_cb(skb)->force_active);
2544 hci_send_frame(skb);
2545 hdev->acl_last_tx = jiffies;
2553 if (cnt != hdev->acl_cnt)
2554 hci_prio_recalculate(hdev, ACL_LINK);
2557 static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2559 unsigned int cnt = hdev->block_cnt;
2560 struct hci_chan *chan;
2561 struct sk_buff *skb;
2564 __check_timeout(hdev, cnt);
2566 while (hdev->block_cnt > 0 &&
2567 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2568 u32 priority = (skb_peek(&chan->data_q))->priority;
2569 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2572 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2573 skb->len, skb->priority);
2575 /* Stop if priority has changed */
2576 if (skb->priority < priority)
2579 skb = skb_dequeue(&chan->data_q);
2581 blocks = __get_blocks(hdev, skb);
2582 if (blocks > hdev->block_cnt)
2585 hci_conn_enter_active_mode(chan->conn,
2586 bt_cb(skb)->force_active);
2588 hci_send_frame(skb);
2589 hdev->acl_last_tx = jiffies;
2591 hdev->block_cnt -= blocks;
2594 chan->sent += blocks;
2595 chan->conn->sent += blocks;
2599 if (cnt != hdev->block_cnt)
2600 hci_prio_recalculate(hdev, ACL_LINK);
2603 static inline void hci_sched_acl(struct hci_dev *hdev)
2605 BT_DBG("%s", hdev->name);
2607 if (!hci_conn_num(hdev, ACL_LINK))
2610 switch (hdev->flow_ctl_mode) {
2611 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2612 hci_sched_acl_pkt(hdev);
2615 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2616 hci_sched_acl_blk(hdev);
2622 static inline void hci_sched_sco(struct hci_dev *hdev)
2624 struct hci_conn *conn;
2625 struct sk_buff *skb;
2628 BT_DBG("%s", hdev->name);
2630 if (!hci_conn_num(hdev, SCO_LINK))
2633 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
2634 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2635 BT_DBG("skb %p len %d", skb, skb->len);
2636 hci_send_frame(skb);
2639 if (conn->sent == ~0)
2645 static inline void hci_sched_esco(struct hci_dev *hdev)
2647 struct hci_conn *conn;
2648 struct sk_buff *skb;
2651 BT_DBG("%s", hdev->name);
2653 if (!hci_conn_num(hdev, ESCO_LINK))
2656 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
2657 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2658 BT_DBG("skb %p len %d", skb, skb->len);
2659 hci_send_frame(skb);
2662 if (conn->sent == ~0)
2668 static inline void hci_sched_le(struct hci_dev *hdev)
2670 struct hci_chan *chan;
2671 struct sk_buff *skb;
2672 int quote, cnt, tmp;
2674 BT_DBG("%s", hdev->name);
2676 if (!hci_conn_num(hdev, LE_LINK))
2679 if (!test_bit(HCI_RAW, &hdev->flags)) {
2680 /* LE tx timeout must be longer than maximum
2681 * link supervision timeout (40.9 seconds) */
2682 if (!hdev->le_cnt && hdev->le_pkts &&
2683 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2684 hci_link_tx_to(hdev, LE_LINK);
2687 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2689 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
2690 u32 priority = (skb_peek(&chan->data_q))->priority;
2691 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2692 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2693 skb->len, skb->priority);
2695 /* Stop if priority has changed */
2696 if (skb->priority < priority)
2699 skb = skb_dequeue(&chan->data_q);
2701 hci_send_frame(skb);
2702 hdev->le_last_tx = jiffies;
2713 hdev->acl_cnt = cnt;
2716 hci_prio_recalculate(hdev, LE_LINK);
2719 static void hci_tx_work(struct work_struct *work)
2721 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2722 struct sk_buff *skb;
2724 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2725 hdev->sco_cnt, hdev->le_cnt);
2727 /* Schedule queues and send stuff to HCI driver */
2729 hci_sched_acl(hdev);
2731 hci_sched_sco(hdev);
2733 hci_sched_esco(hdev);
2737 /* Send next queued raw (unknown type) packet */
2738 while ((skb = skb_dequeue(&hdev->raw_q)))
2739 hci_send_frame(skb);
2742 /* ----- HCI RX task (incoming data processing) ----- */
2744 /* ACL data packet */
2745 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2747 struct hci_acl_hdr *hdr = (void *) skb->data;
2748 struct hci_conn *conn;
2749 __u16 handle, flags;
2751 skb_pull(skb, HCI_ACL_HDR_SIZE);
2753 handle = __le16_to_cpu(hdr->handle);
2754 flags = hci_flags(handle);
2755 handle = hci_handle(handle);
2757 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2759 hdev->stat.acl_rx++;
2762 conn = hci_conn_hash_lookup_handle(hdev, handle);
2763 hci_dev_unlock(hdev);
2766 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2768 /* Send to upper protocol */
2769 l2cap_recv_acldata(conn, skb, flags);
2772 BT_ERR("%s ACL packet for unknown connection handle %d",
2773 hdev->name, handle);
2779 /* SCO data packet */
2780 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2782 struct hci_sco_hdr *hdr = (void *) skb->data;
2783 struct hci_conn *conn;
2786 skb_pull(skb, HCI_SCO_HDR_SIZE);
2788 handle = __le16_to_cpu(hdr->handle);
2790 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2792 hdev->stat.sco_rx++;
2795 conn = hci_conn_hash_lookup_handle(hdev, handle);
2796 hci_dev_unlock(hdev);
2799 /* Send to upper protocol */
2800 sco_recv_scodata(conn, skb);
2803 BT_ERR("%s SCO packet for unknown connection handle %d",
2804 hdev->name, handle);
2810 static void hci_rx_work(struct work_struct *work)
2812 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2813 struct sk_buff *skb;
2815 BT_DBG("%s", hdev->name);
2817 while ((skb = skb_dequeue(&hdev->rx_q))) {
2818 /* Send copy to monitor */
2819 hci_send_to_monitor(hdev, skb);
2821 if (atomic_read(&hdev->promisc)) {
2822 /* Send copy to the sockets */
2823 hci_send_to_sock(hdev, skb);
2826 if (test_bit(HCI_RAW, &hdev->flags)) {
2831 if (test_bit(HCI_INIT, &hdev->flags)) {
2832 /* Don't process data packets in this states. */
2833 switch (bt_cb(skb)->pkt_type) {
2834 case HCI_ACLDATA_PKT:
2835 case HCI_SCODATA_PKT:
2842 switch (bt_cb(skb)->pkt_type) {
2844 BT_DBG("%s Event packet", hdev->name);
2845 hci_event_packet(hdev, skb);
2848 case HCI_ACLDATA_PKT:
2849 BT_DBG("%s ACL data packet", hdev->name);
2850 hci_acldata_packet(hdev, skb);
2853 case HCI_SCODATA_PKT:
2854 BT_DBG("%s SCO data packet", hdev->name);
2855 hci_scodata_packet(hdev, skb);
2865 static void hci_cmd_work(struct work_struct *work)
2867 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2868 struct sk_buff *skb;
2870 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2872 /* Send queued commands */
2873 if (atomic_read(&hdev->cmd_cnt)) {
2874 skb = skb_dequeue(&hdev->cmd_q);
2878 kfree_skb(hdev->sent_cmd);
2880 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2881 if (hdev->sent_cmd) {
2882 atomic_dec(&hdev->cmd_cnt);
2883 hci_send_frame(skb);
2884 if (test_bit(HCI_RESET, &hdev->flags))
2885 del_timer(&hdev->cmd_timer);
2887 mod_timer(&hdev->cmd_timer,
2888 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2890 skb_queue_head(&hdev->cmd_q, skb);
2891 queue_work(hdev->workqueue, &hdev->cmd_work);
2896 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2898 /* General inquiry access code (GIAC) */
2899 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2900 struct hci_cp_inquiry cp;
2902 BT_DBG("%s", hdev->name);
2904 if (test_bit(HCI_INQUIRY, &hdev->flags))
2905 return -EINPROGRESS;
2907 inquiry_cache_flush(hdev);
2909 memset(&cp, 0, sizeof(cp));
2910 memcpy(&cp.lap, lap, sizeof(cp.lap));
2913 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2916 int hci_cancel_inquiry(struct hci_dev *hdev)
2918 BT_DBG("%s", hdev->name);
2920 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2923 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);