2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
56 #define AUTO_OFF_TIMEOUT 2000
60 static void hci_rx_work(struct work_struct *work);
61 static void hci_cmd_work(struct work_struct *work);
62 static void hci_tx_work(struct work_struct *work);
65 LIST_HEAD(hci_dev_list);
66 DEFINE_RWLOCK(hci_dev_list_lock);
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list);
70 DEFINE_RWLOCK(hci_cb_list_lock);
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block *nb)
79 return atomic_notifier_chain_register(&hci_notifier, nb);
82 int hci_unregister_notifier(struct notifier_block *nb)
84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 static void hci_notify(struct hci_dev *hdev, int event)
89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
111 static void hci_req_cancel(struct hci_dev *hdev, int err)
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
122 /* Execute request and wait for completion. */
123 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
124 unsigned long opt, __u32 timeout)
126 DECLARE_WAITQUEUE(wait, current);
129 BT_DBG("%s start", hdev->name);
131 hdev->req_status = HCI_REQ_PEND;
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
137 schedule_timeout(timeout);
139 remove_wait_queue(&hdev->req_wait_q, &wait);
141 if (signal_pending(current))
144 switch (hdev->req_status) {
146 err = -bt_to_errno(hdev->req_result);
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
158 hdev->req_status = hdev->req_result = 0;
160 BT_DBG("%s end: err %d", hdev->name, err);
165 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
166 unsigned long opt, __u32 timeout)
170 if (!test_bit(HCI_UP, &hdev->flags))
173 /* Serialize all requests */
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
181 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
183 BT_DBG("%s %ld", hdev->name, opt);
186 set_bit(HCI_RESET, &hdev->flags);
187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
190 static void bredr_init(struct hci_dev *hdev)
192 struct hci_cp_delete_stored_link_key cp;
196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
198 /* Mandatory initialization */
201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209 /* Read Local Version */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
215 /* Read BD Address */
216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
224 /* Read Voice Setting */
225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
227 /* Optional initialization */
229 /* Clear Event Filters */
230 flt_type = HCI_FLT_CLEAR_ALL;
231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
233 /* Connection accept timeout ~20 secs */
234 param = cpu_to_le16(0x7d00);
235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
237 bacpy(&cp.bdaddr, BDADDR_ANY);
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
242 static void amp_init(struct hci_dev *hdev)
244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
253 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
257 BT_DBG("%s %ld", hdev->name, opt);
259 /* Driver initialization */
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
269 skb_queue_purge(&hdev->driver_init);
271 switch (hdev->dev_type) {
281 BT_ERR("Unknown device type %d", hdev->dev_type);
287 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
289 BT_DBG("%s", hdev->name);
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
295 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
299 BT_DBG("%s %x", hdev->name, scan);
301 /* Inquiry and Page scans */
302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
305 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
309 BT_DBG("%s %x", hdev->name, auth);
312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
315 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
319 BT_DBG("%s %x", hdev->name, encrypt);
322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
325 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
327 __le16 policy = cpu_to_le16(opt);
329 BT_DBG("%s %x", hdev->name, policy);
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
335 /* Get HCI device by index.
336 * Device is held on return. */
337 struct hci_dev *hci_dev_get(int index)
339 struct hci_dev *hdev = NULL, *d;
346 read_lock(&hci_dev_list_lock);
347 list_for_each_entry(d, &hci_dev_list, list) {
348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
353 read_unlock(&hci_dev_list_lock);
357 /* ---- Inquiry support ---- */
359 bool hci_discovery_active(struct hci_dev *hdev)
361 struct discovery_state *discov = &hdev->discovery;
363 if (discov->state == DISCOVERY_INQUIRY ||
364 discov->state == DISCOVERY_RESOLVING)
370 void hci_discovery_set_state(struct hci_dev *hdev, int state)
372 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
374 if (hdev->discovery.state == state)
378 case DISCOVERY_STOPPED:
379 mgmt_discovering(hdev, 0);
381 case DISCOVERY_STARTING:
383 case DISCOVERY_INQUIRY:
384 mgmt_discovering(hdev, 1);
386 case DISCOVERY_RESOLVING:
388 case DISCOVERY_STOPPING:
392 hdev->discovery.state = state;
395 static void inquiry_cache_flush(struct hci_dev *hdev)
397 struct discovery_state *cache = &hdev->discovery;
398 struct inquiry_entry *p, *n;
400 list_for_each_entry_safe(p, n, &cache->all, all) {
405 INIT_LIST_HEAD(&cache->unknown);
406 INIT_LIST_HEAD(&cache->resolve);
407 cache->state = DISCOVERY_STOPPED;
410 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
412 struct discovery_state *cache = &hdev->discovery;
413 struct inquiry_entry *e;
415 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
417 list_for_each_entry(e, &cache->all, all) {
418 if (!bacmp(&e->data.bdaddr, bdaddr))
425 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
428 struct discovery_state *cache = &hdev->discovery;
429 struct inquiry_entry *e;
431 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
433 list_for_each_entry(e, &cache->unknown, list) {
434 if (!bacmp(&e->data.bdaddr, bdaddr))
441 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
445 struct discovery_state *cache = &hdev->discovery;
446 struct inquiry_entry *e;
448 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
450 list_for_each_entry(e, &cache->resolve, list) {
451 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
453 if (!bacmp(&e->data.bdaddr, bdaddr))
460 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
461 struct inquiry_entry *ie)
463 struct discovery_state *cache = &hdev->discovery;
464 struct list_head *pos = &cache->resolve;
465 struct inquiry_entry *p;
469 list_for_each_entry(p, &cache->resolve, list) {
470 if (p->name_state != NAME_PENDING &&
471 abs(p->data.rssi) >= abs(ie->data.rssi))
476 list_add(&ie->list, pos);
479 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
482 struct discovery_state *cache = &hdev->discovery;
483 struct inquiry_entry *ie;
485 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
487 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
489 if (ie->name_state == NAME_NEEDED &&
490 data->rssi != ie->data.rssi) {
491 ie->data.rssi = data->rssi;
492 hci_inquiry_cache_update_resolve(hdev, ie);
498 /* Entry not in the cache. Add new one. */
499 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
503 list_add(&ie->all, &cache->all);
506 ie->name_state = NAME_KNOWN;
508 ie->name_state = NAME_NOT_KNOWN;
509 list_add(&ie->list, &cache->unknown);
513 if (name_known && ie->name_state != NAME_KNOWN &&
514 ie->name_state != NAME_PENDING) {
515 ie->name_state = NAME_KNOWN;
519 memcpy(&ie->data, data, sizeof(*data));
520 ie->timestamp = jiffies;
521 cache->timestamp = jiffies;
523 if (ie->name_state == NAME_NOT_KNOWN)
529 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
531 struct discovery_state *cache = &hdev->discovery;
532 struct inquiry_info *info = (struct inquiry_info *) buf;
533 struct inquiry_entry *e;
536 list_for_each_entry(e, &cache->all, all) {
537 struct inquiry_data *data = &e->data;
542 bacpy(&info->bdaddr, &data->bdaddr);
543 info->pscan_rep_mode = data->pscan_rep_mode;
544 info->pscan_period_mode = data->pscan_period_mode;
545 info->pscan_mode = data->pscan_mode;
546 memcpy(info->dev_class, data->dev_class, 3);
547 info->clock_offset = data->clock_offset;
553 BT_DBG("cache %p, copied %d", cache, copied);
557 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
559 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
560 struct hci_cp_inquiry cp;
562 BT_DBG("%s", hdev->name);
564 if (test_bit(HCI_INQUIRY, &hdev->flags))
568 memcpy(&cp.lap, &ir->lap, 3);
569 cp.length = ir->length;
570 cp.num_rsp = ir->num_rsp;
571 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
574 int hci_inquiry(void __user *arg)
576 __u8 __user *ptr = arg;
577 struct hci_inquiry_req ir;
578 struct hci_dev *hdev;
579 int err = 0, do_inquiry = 0, max_rsp;
583 if (copy_from_user(&ir, ptr, sizeof(ir)))
586 hdev = hci_dev_get(ir.dev_id);
591 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
592 inquiry_cache_empty(hdev) ||
593 ir.flags & IREQ_CACHE_FLUSH) {
594 inquiry_cache_flush(hdev);
597 hci_dev_unlock(hdev);
599 timeo = ir.length * msecs_to_jiffies(2000);
602 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
607 /* for unlimited number of responses we will use buffer with 255 entries */
608 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
610 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
611 * copy it to the user space.
613 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
620 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
621 hci_dev_unlock(hdev);
623 BT_DBG("num_rsp %d", ir.num_rsp);
625 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
627 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
640 /* ---- HCI ioctl helpers ---- */
642 int hci_dev_open(__u16 dev)
644 struct hci_dev *hdev;
647 hdev = hci_dev_get(dev);
651 BT_DBG("%s %p", hdev->name, hdev);
655 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
660 if (test_bit(HCI_UP, &hdev->flags)) {
665 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
666 set_bit(HCI_RAW, &hdev->flags);
668 /* Treat all non BR/EDR controllers as raw devices if
669 enable_hs is not set */
670 if (hdev->dev_type != HCI_BREDR && !enable_hs)
671 set_bit(HCI_RAW, &hdev->flags);
673 if (hdev->open(hdev)) {
678 if (!test_bit(HCI_RAW, &hdev->flags)) {
679 atomic_set(&hdev->cmd_cnt, 1);
680 set_bit(HCI_INIT, &hdev->flags);
681 hdev->init_last_cmd = 0;
683 ret = __hci_request(hdev, hci_init_req, 0,
684 msecs_to_jiffies(HCI_INIT_TIMEOUT));
686 if (lmp_host_le_capable(hdev))
687 ret = __hci_request(hdev, hci_le_init_req, 0,
688 msecs_to_jiffies(HCI_INIT_TIMEOUT));
690 clear_bit(HCI_INIT, &hdev->flags);
695 set_bit(HCI_UP, &hdev->flags);
696 hci_notify(hdev, HCI_DEV_UP);
697 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
699 mgmt_powered(hdev, 1);
700 hci_dev_unlock(hdev);
703 /* Init failed, cleanup */
704 flush_work(&hdev->tx_work);
705 flush_work(&hdev->cmd_work);
706 flush_work(&hdev->rx_work);
708 skb_queue_purge(&hdev->cmd_q);
709 skb_queue_purge(&hdev->rx_q);
714 if (hdev->sent_cmd) {
715 kfree_skb(hdev->sent_cmd);
716 hdev->sent_cmd = NULL;
724 hci_req_unlock(hdev);
729 static int hci_dev_do_close(struct hci_dev *hdev)
731 BT_DBG("%s %p", hdev->name, hdev);
733 hci_req_cancel(hdev, ENODEV);
736 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
737 del_timer_sync(&hdev->cmd_timer);
738 hci_req_unlock(hdev);
742 /* Flush RX and TX works */
743 flush_work(&hdev->tx_work);
744 flush_work(&hdev->rx_work);
746 if (hdev->discov_timeout > 0) {
747 cancel_delayed_work(&hdev->discov_off);
748 hdev->discov_timeout = 0;
751 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
752 cancel_delayed_work(&hdev->power_off);
754 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
755 cancel_delayed_work(&hdev->service_cache);
758 inquiry_cache_flush(hdev);
759 hci_conn_hash_flush(hdev);
760 hci_dev_unlock(hdev);
762 hci_notify(hdev, HCI_DEV_DOWN);
768 skb_queue_purge(&hdev->cmd_q);
769 atomic_set(&hdev->cmd_cnt, 1);
770 if (!test_bit(HCI_RAW, &hdev->flags)) {
771 set_bit(HCI_INIT, &hdev->flags);
772 __hci_request(hdev, hci_reset_req, 0,
773 msecs_to_jiffies(250));
774 clear_bit(HCI_INIT, &hdev->flags);
778 flush_work(&hdev->cmd_work);
781 skb_queue_purge(&hdev->rx_q);
782 skb_queue_purge(&hdev->cmd_q);
783 skb_queue_purge(&hdev->raw_q);
785 /* Drop last sent command */
786 if (hdev->sent_cmd) {
787 del_timer_sync(&hdev->cmd_timer);
788 kfree_skb(hdev->sent_cmd);
789 hdev->sent_cmd = NULL;
792 /* After this point our queues are empty
793 * and no tasks are scheduled. */
797 mgmt_powered(hdev, 0);
798 hci_dev_unlock(hdev);
803 hci_req_unlock(hdev);
809 int hci_dev_close(__u16 dev)
811 struct hci_dev *hdev;
814 hdev = hci_dev_get(dev);
817 err = hci_dev_do_close(hdev);
822 int hci_dev_reset(__u16 dev)
824 struct hci_dev *hdev;
827 hdev = hci_dev_get(dev);
833 if (!test_bit(HCI_UP, &hdev->flags))
837 skb_queue_purge(&hdev->rx_q);
838 skb_queue_purge(&hdev->cmd_q);
841 inquiry_cache_flush(hdev);
842 hci_conn_hash_flush(hdev);
843 hci_dev_unlock(hdev);
848 atomic_set(&hdev->cmd_cnt, 1);
849 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
851 if (!test_bit(HCI_RAW, &hdev->flags))
852 ret = __hci_request(hdev, hci_reset_req, 0,
853 msecs_to_jiffies(HCI_INIT_TIMEOUT));
856 hci_req_unlock(hdev);
861 int hci_dev_reset_stat(__u16 dev)
863 struct hci_dev *hdev;
866 hdev = hci_dev_get(dev);
870 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
877 int hci_dev_cmd(unsigned int cmd, void __user *arg)
879 struct hci_dev *hdev;
880 struct hci_dev_req dr;
883 if (copy_from_user(&dr, arg, sizeof(dr)))
886 hdev = hci_dev_get(dr.dev_id);
892 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
893 msecs_to_jiffies(HCI_INIT_TIMEOUT));
897 if (!lmp_encrypt_capable(hdev)) {
902 if (!test_bit(HCI_AUTH, &hdev->flags)) {
903 /* Auth must be enabled first */
904 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
905 msecs_to_jiffies(HCI_INIT_TIMEOUT));
910 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
911 msecs_to_jiffies(HCI_INIT_TIMEOUT));
915 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
916 msecs_to_jiffies(HCI_INIT_TIMEOUT));
920 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
921 msecs_to_jiffies(HCI_INIT_TIMEOUT));
925 hdev->link_mode = ((__u16) dr.dev_opt) &
926 (HCI_LM_MASTER | HCI_LM_ACCEPT);
930 hdev->pkt_type = (__u16) dr.dev_opt;
934 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
935 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
939 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
940 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
952 int hci_get_dev_list(void __user *arg)
954 struct hci_dev *hdev;
955 struct hci_dev_list_req *dl;
956 struct hci_dev_req *dr;
957 int n = 0, size, err;
960 if (get_user(dev_num, (__u16 __user *) arg))
963 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
966 size = sizeof(*dl) + dev_num * sizeof(*dr);
968 dl = kzalloc(size, GFP_KERNEL);
974 read_lock(&hci_dev_list_lock);
975 list_for_each_entry(hdev, &hci_dev_list, list) {
976 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
977 cancel_delayed_work(&hdev->power_off);
979 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
980 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
982 (dr + n)->dev_id = hdev->id;
983 (dr + n)->dev_opt = hdev->flags;
988 read_unlock(&hci_dev_list_lock);
991 size = sizeof(*dl) + n * sizeof(*dr);
993 err = copy_to_user(arg, dl, size);
996 return err ? -EFAULT : 0;
999 int hci_get_dev_info(void __user *arg)
1001 struct hci_dev *hdev;
1002 struct hci_dev_info di;
1005 if (copy_from_user(&di, arg, sizeof(di)))
1008 hdev = hci_dev_get(di.dev_id);
1012 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1013 cancel_delayed_work_sync(&hdev->power_off);
1015 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1016 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1018 strcpy(di.name, hdev->name);
1019 di.bdaddr = hdev->bdaddr;
1020 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1021 di.flags = hdev->flags;
1022 di.pkt_type = hdev->pkt_type;
1023 di.acl_mtu = hdev->acl_mtu;
1024 di.acl_pkts = hdev->acl_pkts;
1025 di.sco_mtu = hdev->sco_mtu;
1026 di.sco_pkts = hdev->sco_pkts;
1027 di.link_policy = hdev->link_policy;
1028 di.link_mode = hdev->link_mode;
1030 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1031 memcpy(&di.features, &hdev->features, sizeof(di.features));
1033 if (copy_to_user(arg, &di, sizeof(di)))
1041 /* ---- Interface to HCI drivers ---- */
1043 static int hci_rfkill_set_block(void *data, bool blocked)
1045 struct hci_dev *hdev = data;
1047 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1052 hci_dev_do_close(hdev);
1057 static const struct rfkill_ops hci_rfkill_ops = {
1058 .set_block = hci_rfkill_set_block,
1061 /* Alloc HCI device */
1062 struct hci_dev *hci_alloc_dev(void)
1064 struct hci_dev *hdev;
1066 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1070 hci_init_sysfs(hdev);
1071 skb_queue_head_init(&hdev->driver_init);
1075 EXPORT_SYMBOL(hci_alloc_dev);
1077 /* Free HCI device */
1078 void hci_free_dev(struct hci_dev *hdev)
1080 skb_queue_purge(&hdev->driver_init);
1082 /* will free via device release */
1083 put_device(&hdev->dev);
1085 EXPORT_SYMBOL(hci_free_dev);
1087 static void hci_power_on(struct work_struct *work)
1089 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1091 BT_DBG("%s", hdev->name);
1093 if (hci_dev_open(hdev->id) < 0)
1096 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1097 schedule_delayed_work(&hdev->power_off,
1098 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1100 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1101 mgmt_index_added(hdev);
1104 static void hci_power_off(struct work_struct *work)
1106 struct hci_dev *hdev = container_of(work, struct hci_dev,
1109 BT_DBG("%s", hdev->name);
1111 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1113 hci_dev_close(hdev->id);
1116 static void hci_discov_off(struct work_struct *work)
1118 struct hci_dev *hdev;
1119 u8 scan = SCAN_PAGE;
1121 hdev = container_of(work, struct hci_dev, discov_off.work);
1123 BT_DBG("%s", hdev->name);
1127 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1129 hdev->discov_timeout = 0;
1131 hci_dev_unlock(hdev);
1134 int hci_uuids_clear(struct hci_dev *hdev)
1136 struct list_head *p, *n;
1138 list_for_each_safe(p, n, &hdev->uuids) {
1139 struct bt_uuid *uuid;
1141 uuid = list_entry(p, struct bt_uuid, list);
1150 int hci_link_keys_clear(struct hci_dev *hdev)
1152 struct list_head *p, *n;
1154 list_for_each_safe(p, n, &hdev->link_keys) {
1155 struct link_key *key;
1157 key = list_entry(p, struct link_key, list);
1166 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1170 list_for_each_entry(k, &hdev->link_keys, list)
1171 if (bacmp(bdaddr, &k->bdaddr) == 0)
1177 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1178 u8 key_type, u8 old_key_type)
1181 if (key_type < 0x03)
1184 /* Debug keys are insecure so don't store them persistently */
1185 if (key_type == HCI_LK_DEBUG_COMBINATION)
1188 /* Changed combination key and there's no previous one */
1189 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1192 /* Security mode 3 case */
1196 /* Neither local nor remote side had no-bonding as requirement */
1197 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1200 /* Local side had dedicated bonding as requirement */
1201 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1204 /* Remote side had dedicated bonding as requirement */
1205 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1208 /* If none of the above criteria match, then don't store the key
1213 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1217 list_for_each_entry(k, &hdev->link_keys, list) {
1218 struct key_master_id *id;
1220 if (k->type != HCI_LK_SMP_LTK)
1223 if (k->dlen != sizeof(*id))
1226 id = (void *) &k->data;
1227 if (id->ediv == ediv &&
1228 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1234 EXPORT_SYMBOL(hci_find_ltk);
1236 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1237 bdaddr_t *bdaddr, u8 type)
1241 list_for_each_entry(k, &hdev->link_keys, list)
1242 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1247 EXPORT_SYMBOL(hci_find_link_key_type);
1249 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1250 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1252 struct link_key *key, *old_key;
1253 u8 old_key_type, persistent;
1255 old_key = hci_find_link_key(hdev, bdaddr);
1257 old_key_type = old_key->type;
1260 old_key_type = conn ? conn->key_type : 0xff;
1261 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1264 list_add(&key->list, &hdev->link_keys);
1267 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1269 /* Some buggy controller combinations generate a changed
1270 * combination key for legacy pairing even when there's no
1272 if (type == HCI_LK_CHANGED_COMBINATION &&
1273 (!conn || conn->remote_auth == 0xff) &&
1274 old_key_type == 0xff) {
1275 type = HCI_LK_COMBINATION;
1277 conn->key_type = type;
1280 bacpy(&key->bdaddr, bdaddr);
1281 memcpy(key->val, val, 16);
1282 key->pin_len = pin_len;
1284 if (type == HCI_LK_CHANGED_COMBINATION)
1285 key->type = old_key_type;
1292 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1294 mgmt_new_link_key(hdev, key, persistent);
1297 list_del(&key->list);
1304 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1305 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1307 struct link_key *key, *old_key;
1308 struct key_master_id *id;
1311 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1313 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1316 old_key_type = old_key->type;
1318 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1321 list_add(&key->list, &hdev->link_keys);
1322 old_key_type = 0xff;
1325 key->dlen = sizeof(*id);
1327 bacpy(&key->bdaddr, bdaddr);
1328 memcpy(key->val, ltk, sizeof(key->val));
1329 key->type = HCI_LK_SMP_LTK;
1330 key->pin_len = key_size;
1332 id = (void *) &key->data;
1334 memcpy(id->rand, rand, sizeof(id->rand));
1337 mgmt_new_link_key(hdev, key, old_key_type);
1342 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1344 struct link_key *key;
1346 key = hci_find_link_key(hdev, bdaddr);
1350 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1352 list_del(&key->list);
1358 /* HCI command timer function */
1359 static void hci_cmd_timer(unsigned long arg)
1361 struct hci_dev *hdev = (void *) arg;
1363 BT_ERR("%s command tx timeout", hdev->name);
1364 atomic_set(&hdev->cmd_cnt, 1);
1365 queue_work(hdev->workqueue, &hdev->cmd_work);
1368 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1371 struct oob_data *data;
1373 list_for_each_entry(data, &hdev->remote_oob_data, list)
1374 if (bacmp(bdaddr, &data->bdaddr) == 0)
1380 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1382 struct oob_data *data;
1384 data = hci_find_remote_oob_data(hdev, bdaddr);
1388 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1390 list_del(&data->list);
1396 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1398 struct oob_data *data, *n;
1400 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1401 list_del(&data->list);
1408 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1411 struct oob_data *data;
1413 data = hci_find_remote_oob_data(hdev, bdaddr);
1416 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1420 bacpy(&data->bdaddr, bdaddr);
1421 list_add(&data->list, &hdev->remote_oob_data);
1424 memcpy(data->hash, hash, sizeof(data->hash));
1425 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1427 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1432 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1435 struct bdaddr_list *b;
1437 list_for_each_entry(b, &hdev->blacklist, list)
1438 if (bacmp(bdaddr, &b->bdaddr) == 0)
1444 int hci_blacklist_clear(struct hci_dev *hdev)
1446 struct list_head *p, *n;
1448 list_for_each_safe(p, n, &hdev->blacklist) {
1449 struct bdaddr_list *b;
1451 b = list_entry(p, struct bdaddr_list, list);
1460 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1462 struct bdaddr_list *entry;
1464 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1467 if (hci_blacklist_lookup(hdev, bdaddr))
1470 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1474 bacpy(&entry->bdaddr, bdaddr);
1476 list_add(&entry->list, &hdev->blacklist);
1478 return mgmt_device_blocked(hdev, bdaddr);
1481 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1483 struct bdaddr_list *entry;
1485 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1486 return hci_blacklist_clear(hdev);
1488 entry = hci_blacklist_lookup(hdev, bdaddr);
1492 list_del(&entry->list);
1495 return mgmt_device_unblocked(hdev, bdaddr);
1498 static void hci_clear_adv_cache(struct work_struct *work)
1500 struct hci_dev *hdev = container_of(work, struct hci_dev,
1505 hci_adv_entries_clear(hdev);
1507 hci_dev_unlock(hdev);
1510 int hci_adv_entries_clear(struct hci_dev *hdev)
1512 struct adv_entry *entry, *tmp;
1514 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1515 list_del(&entry->list);
1519 BT_DBG("%s adv cache cleared", hdev->name);
1524 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1526 struct adv_entry *entry;
1528 list_for_each_entry(entry, &hdev->adv_entries, list)
1529 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1535 static inline int is_connectable_adv(u8 evt_type)
1537 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1543 int hci_add_adv_entry(struct hci_dev *hdev,
1544 struct hci_ev_le_advertising_info *ev)
1546 struct adv_entry *entry;
1548 if (!is_connectable_adv(ev->evt_type))
1551 /* Only new entries should be added to adv_entries. So, if
1552 * bdaddr was found, don't add it. */
1553 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1556 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1560 bacpy(&entry->bdaddr, &ev->bdaddr);
1561 entry->bdaddr_type = ev->bdaddr_type;
1563 list_add(&entry->list, &hdev->adv_entries);
1565 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1566 batostr(&entry->bdaddr), entry->bdaddr_type);
1571 /* Register HCI device */
1572 int hci_register_dev(struct hci_dev *hdev)
1574 struct list_head *head = &hci_dev_list, *p;
1577 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1579 if (!hdev->open || !hdev->close)
1582 /* Do not allow HCI_AMP devices to register at index 0,
1583 * so the index can be used as the AMP controller ID.
1585 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1587 write_lock(&hci_dev_list_lock);
1589 /* Find first available device id */
1590 list_for_each(p, &hci_dev_list) {
1591 if (list_entry(p, struct hci_dev, list)->id != id)
1596 sprintf(hdev->name, "hci%d", id);
1598 list_add_tail(&hdev->list, head);
1600 mutex_init(&hdev->lock);
1603 hdev->dev_flags = 0;
1604 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1605 hdev->esco_type = (ESCO_HV1);
1606 hdev->link_mode = (HCI_LM_ACCEPT);
1607 hdev->io_capability = 0x03; /* No Input No Output */
1609 hdev->idle_timeout = 0;
1610 hdev->sniff_max_interval = 800;
1611 hdev->sniff_min_interval = 80;
1613 INIT_WORK(&hdev->rx_work, hci_rx_work);
1614 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1615 INIT_WORK(&hdev->tx_work, hci_tx_work);
1618 skb_queue_head_init(&hdev->rx_q);
1619 skb_queue_head_init(&hdev->cmd_q);
1620 skb_queue_head_init(&hdev->raw_q);
1622 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1624 for (i = 0; i < NUM_REASSEMBLY; i++)
1625 hdev->reassembly[i] = NULL;
1627 init_waitqueue_head(&hdev->req_wait_q);
1628 mutex_init(&hdev->req_lock);
1630 discovery_init(hdev);
1632 hci_conn_hash_init(hdev);
1634 INIT_LIST_HEAD(&hdev->mgmt_pending);
1636 INIT_LIST_HEAD(&hdev->blacklist);
1638 INIT_LIST_HEAD(&hdev->uuids);
1640 INIT_LIST_HEAD(&hdev->link_keys);
1642 INIT_LIST_HEAD(&hdev->remote_oob_data);
1644 INIT_LIST_HEAD(&hdev->adv_entries);
1646 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1647 INIT_WORK(&hdev->power_on, hci_power_on);
1648 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1650 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1652 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1654 atomic_set(&hdev->promisc, 0);
1656 write_unlock(&hci_dev_list_lock);
1658 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1660 if (!hdev->workqueue) {
1665 error = hci_add_sysfs(hdev);
1669 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1670 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1672 if (rfkill_register(hdev->rfkill) < 0) {
1673 rfkill_destroy(hdev->rfkill);
1674 hdev->rfkill = NULL;
1678 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1679 set_bit(HCI_SETUP, &hdev->dev_flags);
1680 schedule_work(&hdev->power_on);
1682 hci_notify(hdev, HCI_DEV_REG);
1688 destroy_workqueue(hdev->workqueue);
1690 write_lock(&hci_dev_list_lock);
1691 list_del(&hdev->list);
1692 write_unlock(&hci_dev_list_lock);
1696 EXPORT_SYMBOL(hci_register_dev);
1698 /* Unregister HCI device */
1699 void hci_unregister_dev(struct hci_dev *hdev)
1703 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1705 write_lock(&hci_dev_list_lock);
1706 list_del(&hdev->list);
1707 write_unlock(&hci_dev_list_lock);
1709 hci_dev_do_close(hdev);
1711 for (i = 0; i < NUM_REASSEMBLY; i++)
1712 kfree_skb(hdev->reassembly[i]);
1714 if (!test_bit(HCI_INIT, &hdev->flags) &&
1715 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1717 mgmt_index_removed(hdev);
1718 hci_dev_unlock(hdev);
1721 /* mgmt_index_removed should take care of emptying the
1723 BUG_ON(!list_empty(&hdev->mgmt_pending));
1725 hci_notify(hdev, HCI_DEV_UNREG);
1728 rfkill_unregister(hdev->rfkill);
1729 rfkill_destroy(hdev->rfkill);
1732 hci_del_sysfs(hdev);
1734 cancel_delayed_work_sync(&hdev->adv_work);
1736 destroy_workqueue(hdev->workqueue);
1739 hci_blacklist_clear(hdev);
1740 hci_uuids_clear(hdev);
1741 hci_link_keys_clear(hdev);
1742 hci_remote_oob_data_clear(hdev);
1743 hci_adv_entries_clear(hdev);
1744 hci_dev_unlock(hdev);
1748 EXPORT_SYMBOL(hci_unregister_dev);
1750 /* Suspend HCI device */
1751 int hci_suspend_dev(struct hci_dev *hdev)
1753 hci_notify(hdev, HCI_DEV_SUSPEND);
1756 EXPORT_SYMBOL(hci_suspend_dev);
1758 /* Resume HCI device */
1759 int hci_resume_dev(struct hci_dev *hdev)
1761 hci_notify(hdev, HCI_DEV_RESUME);
1764 EXPORT_SYMBOL(hci_resume_dev);
1766 /* Receive frame from HCI drivers */
1767 int hci_recv_frame(struct sk_buff *skb)
1769 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1770 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1771 && !test_bit(HCI_INIT, &hdev->flags))) {
1777 bt_cb(skb)->incoming = 1;
1780 __net_timestamp(skb);
1782 skb_queue_tail(&hdev->rx_q, skb);
1783 queue_work(hdev->workqueue, &hdev->rx_work);
1787 EXPORT_SYMBOL(hci_recv_frame);
1789 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1790 int count, __u8 index)
1795 struct sk_buff *skb;
1796 struct bt_skb_cb *scb;
1798 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1799 index >= NUM_REASSEMBLY)
1802 skb = hdev->reassembly[index];
1806 case HCI_ACLDATA_PKT:
1807 len = HCI_MAX_FRAME_SIZE;
1808 hlen = HCI_ACL_HDR_SIZE;
1811 len = HCI_MAX_EVENT_SIZE;
1812 hlen = HCI_EVENT_HDR_SIZE;
1814 case HCI_SCODATA_PKT:
1815 len = HCI_MAX_SCO_SIZE;
1816 hlen = HCI_SCO_HDR_SIZE;
1820 skb = bt_skb_alloc(len, GFP_ATOMIC);
1824 scb = (void *) skb->cb;
1826 scb->pkt_type = type;
1828 skb->dev = (void *) hdev;
1829 hdev->reassembly[index] = skb;
1833 scb = (void *) skb->cb;
1834 len = min(scb->expect, (__u16)count);
1836 memcpy(skb_put(skb, len), data, len);
1845 if (skb->len == HCI_EVENT_HDR_SIZE) {
1846 struct hci_event_hdr *h = hci_event_hdr(skb);
1847 scb->expect = h->plen;
1849 if (skb_tailroom(skb) < scb->expect) {
1851 hdev->reassembly[index] = NULL;
1857 case HCI_ACLDATA_PKT:
1858 if (skb->len == HCI_ACL_HDR_SIZE) {
1859 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1860 scb->expect = __le16_to_cpu(h->dlen);
1862 if (skb_tailroom(skb) < scb->expect) {
1864 hdev->reassembly[index] = NULL;
1870 case HCI_SCODATA_PKT:
1871 if (skb->len == HCI_SCO_HDR_SIZE) {
1872 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1873 scb->expect = h->dlen;
1875 if (skb_tailroom(skb) < scb->expect) {
1877 hdev->reassembly[index] = NULL;
1884 if (scb->expect == 0) {
1885 /* Complete frame */
1887 bt_cb(skb)->pkt_type = type;
1888 hci_recv_frame(skb);
1890 hdev->reassembly[index] = NULL;
1898 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1902 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1906 rem = hci_reassembly(hdev, type, data, count, type - 1);
1910 data += (count - rem);
1916 EXPORT_SYMBOL(hci_recv_fragment);
1918 #define STREAM_REASSEMBLY 0
1920 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1926 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1929 struct { char type; } *pkt;
1931 /* Start of the frame */
1938 type = bt_cb(skb)->pkt_type;
1940 rem = hci_reassembly(hdev, type, data, count,
1945 data += (count - rem);
1951 EXPORT_SYMBOL(hci_recv_stream_fragment);
1953 /* ---- Interface to upper protocols ---- */
1955 int hci_register_cb(struct hci_cb *cb)
1957 BT_DBG("%p name %s", cb, cb->name);
1959 write_lock(&hci_cb_list_lock);
1960 list_add(&cb->list, &hci_cb_list);
1961 write_unlock(&hci_cb_list_lock);
1965 EXPORT_SYMBOL(hci_register_cb);
1967 int hci_unregister_cb(struct hci_cb *cb)
1969 BT_DBG("%p name %s", cb, cb->name);
1971 write_lock(&hci_cb_list_lock);
1972 list_del(&cb->list);
1973 write_unlock(&hci_cb_list_lock);
1977 EXPORT_SYMBOL(hci_unregister_cb);
1979 static int hci_send_frame(struct sk_buff *skb)
1981 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1988 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1990 if (atomic_read(&hdev->promisc)) {
1992 __net_timestamp(skb);
1994 hci_send_to_sock(hdev, skb, NULL);
1997 /* Get rid of skb owner, prior to sending to the driver. */
2000 return hdev->send(skb);
2003 /* Send HCI command */
2004 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2006 int len = HCI_COMMAND_HDR_SIZE + plen;
2007 struct hci_command_hdr *hdr;
2008 struct sk_buff *skb;
2010 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2012 skb = bt_skb_alloc(len, GFP_ATOMIC);
2014 BT_ERR("%s no memory for command", hdev->name);
2018 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2019 hdr->opcode = cpu_to_le16(opcode);
2023 memcpy(skb_put(skb, plen), param, plen);
2025 BT_DBG("skb len %d", skb->len);
2027 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2028 skb->dev = (void *) hdev;
2030 if (test_bit(HCI_INIT, &hdev->flags))
2031 hdev->init_last_cmd = opcode;
2033 skb_queue_tail(&hdev->cmd_q, skb);
2034 queue_work(hdev->workqueue, &hdev->cmd_work);
2039 /* Get data from the previously sent command */
2040 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2042 struct hci_command_hdr *hdr;
2044 if (!hdev->sent_cmd)
2047 hdr = (void *) hdev->sent_cmd->data;
2049 if (hdr->opcode != cpu_to_le16(opcode))
2052 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2054 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2058 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2060 struct hci_acl_hdr *hdr;
2063 skb_push(skb, HCI_ACL_HDR_SIZE);
2064 skb_reset_transport_header(skb);
2065 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2066 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2067 hdr->dlen = cpu_to_le16(len);
2070 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2071 struct sk_buff *skb, __u16 flags)
2073 struct hci_dev *hdev = conn->hdev;
2074 struct sk_buff *list;
2076 list = skb_shinfo(skb)->frag_list;
2078 /* Non fragmented */
2079 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2081 skb_queue_tail(queue, skb);
2084 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2086 skb_shinfo(skb)->frag_list = NULL;
2088 /* Queue all fragments atomically */
2089 spin_lock(&queue->lock);
2091 __skb_queue_tail(queue, skb);
2093 flags &= ~ACL_START;
2096 skb = list; list = list->next;
2098 skb->dev = (void *) hdev;
2099 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2100 hci_add_acl_hdr(skb, conn->handle, flags);
2102 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2104 __skb_queue_tail(queue, skb);
2107 spin_unlock(&queue->lock);
2111 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2113 struct hci_conn *conn = chan->conn;
2114 struct hci_dev *hdev = conn->hdev;
2116 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2118 skb->dev = (void *) hdev;
2119 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2120 hci_add_acl_hdr(skb, conn->handle, flags);
2122 hci_queue_acl(conn, &chan->data_q, skb, flags);
2124 queue_work(hdev->workqueue, &hdev->tx_work);
2126 EXPORT_SYMBOL(hci_send_acl);
2129 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2131 struct hci_dev *hdev = conn->hdev;
2132 struct hci_sco_hdr hdr;
2134 BT_DBG("%s len %d", hdev->name, skb->len);
2136 hdr.handle = cpu_to_le16(conn->handle);
2137 hdr.dlen = skb->len;
2139 skb_push(skb, HCI_SCO_HDR_SIZE);
2140 skb_reset_transport_header(skb);
2141 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2143 skb->dev = (void *) hdev;
2144 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2146 skb_queue_tail(&conn->data_q, skb);
2147 queue_work(hdev->workqueue, &hdev->tx_work);
2149 EXPORT_SYMBOL(hci_send_sco);
2151 /* ---- HCI TX task (outgoing data) ---- */
2153 /* HCI Connection scheduler */
2154 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2156 struct hci_conn_hash *h = &hdev->conn_hash;
2157 struct hci_conn *conn = NULL, *c;
2158 int num = 0, min = ~0;
2160 /* We don't have to lock device here. Connections are always
2161 * added and removed with TX task disabled. */
2165 list_for_each_entry_rcu(c, &h->list, list) {
2166 if (c->type != type || skb_queue_empty(&c->data_q))
2169 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2174 if (c->sent < min) {
2179 if (hci_conn_num(hdev, type) == num)
2188 switch (conn->type) {
2190 cnt = hdev->acl_cnt;
2194 cnt = hdev->sco_cnt;
2197 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2201 BT_ERR("Unknown link type");
2209 BT_DBG("conn %p quote %d", conn, *quote);
2213 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2215 struct hci_conn_hash *h = &hdev->conn_hash;
2218 BT_ERR("%s link tx timeout", hdev->name);
2222 /* Kill stalled connections */
2223 list_for_each_entry_rcu(c, &h->list, list) {
2224 if (c->type == type && c->sent) {
2225 BT_ERR("%s killing stalled connection %s",
2226 hdev->name, batostr(&c->dst));
2227 hci_acl_disconn(c, 0x13);
2234 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2237 struct hci_conn_hash *h = &hdev->conn_hash;
2238 struct hci_chan *chan = NULL;
2239 int num = 0, min = ~0, cur_prio = 0;
2240 struct hci_conn *conn;
2241 int cnt, q, conn_num = 0;
2243 BT_DBG("%s", hdev->name);
2247 list_for_each_entry_rcu(conn, &h->list, list) {
2248 struct hci_chan *tmp;
2250 if (conn->type != type)
2253 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2258 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2259 struct sk_buff *skb;
2261 if (skb_queue_empty(&tmp->data_q))
2264 skb = skb_peek(&tmp->data_q);
2265 if (skb->priority < cur_prio)
2268 if (skb->priority > cur_prio) {
2271 cur_prio = skb->priority;
2276 if (conn->sent < min) {
2282 if (hci_conn_num(hdev, type) == conn_num)
2291 switch (chan->conn->type) {
2293 cnt = hdev->acl_cnt;
2297 cnt = hdev->sco_cnt;
2300 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2304 BT_ERR("Unknown link type");
2309 BT_DBG("chan %p quote %d", chan, *quote);
2313 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2315 struct hci_conn_hash *h = &hdev->conn_hash;
2316 struct hci_conn *conn;
2319 BT_DBG("%s", hdev->name);
2323 list_for_each_entry_rcu(conn, &h->list, list) {
2324 struct hci_chan *chan;
2326 if (conn->type != type)
2329 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2334 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2335 struct sk_buff *skb;
2342 if (skb_queue_empty(&chan->data_q))
2345 skb = skb_peek(&chan->data_q);
2346 if (skb->priority >= HCI_PRIO_MAX - 1)
2349 skb->priority = HCI_PRIO_MAX - 1;
2351 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2355 if (hci_conn_num(hdev, type) == num)
2363 static inline void hci_sched_acl(struct hci_dev *hdev)
2365 struct hci_chan *chan;
2366 struct sk_buff *skb;
2370 BT_DBG("%s", hdev->name);
2372 if (!hci_conn_num(hdev, ACL_LINK))
2375 if (!test_bit(HCI_RAW, &hdev->flags)) {
2376 /* ACL tx timeout must be longer than maximum
2377 * link supervision timeout (40.9 seconds) */
2378 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx +
2379 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2380 hci_link_tx_to(hdev, ACL_LINK);
2383 cnt = hdev->acl_cnt;
2385 while (hdev->acl_cnt &&
2386 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2387 u32 priority = (skb_peek(&chan->data_q))->priority;
2388 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2389 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2390 skb->len, skb->priority);
2392 /* Stop if priority has changed */
2393 if (skb->priority < priority)
2396 skb = skb_dequeue(&chan->data_q);
2398 hci_conn_enter_active_mode(chan->conn,
2399 bt_cb(skb)->force_active);
2401 hci_send_frame(skb);
2402 hdev->acl_last_tx = jiffies;
2410 if (cnt != hdev->acl_cnt)
2411 hci_prio_recalculate(hdev, ACL_LINK);
2415 static inline void hci_sched_sco(struct hci_dev *hdev)
2417 struct hci_conn *conn;
2418 struct sk_buff *skb;
2421 BT_DBG("%s", hdev->name);
2423 if (!hci_conn_num(hdev, SCO_LINK))
2426 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
2427 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2428 BT_DBG("skb %p len %d", skb, skb->len);
2429 hci_send_frame(skb);
2432 if (conn->sent == ~0)
2438 static inline void hci_sched_esco(struct hci_dev *hdev)
2440 struct hci_conn *conn;
2441 struct sk_buff *skb;
2444 BT_DBG("%s", hdev->name);
2446 if (!hci_conn_num(hdev, ESCO_LINK))
2449 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
2450 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2451 BT_DBG("skb %p len %d", skb, skb->len);
2452 hci_send_frame(skb);
2455 if (conn->sent == ~0)
2461 static inline void hci_sched_le(struct hci_dev *hdev)
2463 struct hci_chan *chan;
2464 struct sk_buff *skb;
2465 int quote, cnt, tmp;
2467 BT_DBG("%s", hdev->name);
2469 if (!hci_conn_num(hdev, LE_LINK))
2472 if (!test_bit(HCI_RAW, &hdev->flags)) {
2473 /* LE tx timeout must be longer than maximum
2474 * link supervision timeout (40.9 seconds) */
2475 if (!hdev->le_cnt && hdev->le_pkts &&
2476 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2477 hci_link_tx_to(hdev, LE_LINK);
2480 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2482 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
2483 u32 priority = (skb_peek(&chan->data_q))->priority;
2484 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2485 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2486 skb->len, skb->priority);
2488 /* Stop if priority has changed */
2489 if (skb->priority < priority)
2492 skb = skb_dequeue(&chan->data_q);
2494 hci_send_frame(skb);
2495 hdev->le_last_tx = jiffies;
2506 hdev->acl_cnt = cnt;
2509 hci_prio_recalculate(hdev, LE_LINK);
2512 static void hci_tx_work(struct work_struct *work)
2514 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2515 struct sk_buff *skb;
2517 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2518 hdev->sco_cnt, hdev->le_cnt);
2520 /* Schedule queues and send stuff to HCI driver */
2522 hci_sched_acl(hdev);
2524 hci_sched_sco(hdev);
2526 hci_sched_esco(hdev);
2530 /* Send next queued raw (unknown type) packet */
2531 while ((skb = skb_dequeue(&hdev->raw_q)))
2532 hci_send_frame(skb);
2535 /* ----- HCI RX task (incoming data processing) ----- */
2537 /* ACL data packet */
2538 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2540 struct hci_acl_hdr *hdr = (void *) skb->data;
2541 struct hci_conn *conn;
2542 __u16 handle, flags;
2544 skb_pull(skb, HCI_ACL_HDR_SIZE);
2546 handle = __le16_to_cpu(hdr->handle);
2547 flags = hci_flags(handle);
2548 handle = hci_handle(handle);
2550 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2552 hdev->stat.acl_rx++;
2555 conn = hci_conn_hash_lookup_handle(hdev, handle);
2556 hci_dev_unlock(hdev);
2559 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2561 /* Send to upper protocol */
2562 l2cap_recv_acldata(conn, skb, flags);
2565 BT_ERR("%s ACL packet for unknown connection handle %d",
2566 hdev->name, handle);
2572 /* SCO data packet */
2573 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2575 struct hci_sco_hdr *hdr = (void *) skb->data;
2576 struct hci_conn *conn;
2579 skb_pull(skb, HCI_SCO_HDR_SIZE);
2581 handle = __le16_to_cpu(hdr->handle);
2583 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2585 hdev->stat.sco_rx++;
2588 conn = hci_conn_hash_lookup_handle(hdev, handle);
2589 hci_dev_unlock(hdev);
2592 /* Send to upper protocol */
2593 sco_recv_scodata(conn, skb);
2596 BT_ERR("%s SCO packet for unknown connection handle %d",
2597 hdev->name, handle);
2603 static void hci_rx_work(struct work_struct *work)
2605 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2606 struct sk_buff *skb;
2608 BT_DBG("%s", hdev->name);
2610 while ((skb = skb_dequeue(&hdev->rx_q))) {
2611 if (atomic_read(&hdev->promisc)) {
2612 /* Send copy to the sockets */
2613 hci_send_to_sock(hdev, skb, NULL);
2616 if (test_bit(HCI_RAW, &hdev->flags)) {
2621 if (test_bit(HCI_INIT, &hdev->flags)) {
2622 /* Don't process data packets in this states. */
2623 switch (bt_cb(skb)->pkt_type) {
2624 case HCI_ACLDATA_PKT:
2625 case HCI_SCODATA_PKT:
2632 switch (bt_cb(skb)->pkt_type) {
2634 BT_DBG("%s Event packet", hdev->name);
2635 hci_event_packet(hdev, skb);
2638 case HCI_ACLDATA_PKT:
2639 BT_DBG("%s ACL data packet", hdev->name);
2640 hci_acldata_packet(hdev, skb);
2643 case HCI_SCODATA_PKT:
2644 BT_DBG("%s SCO data packet", hdev->name);
2645 hci_scodata_packet(hdev, skb);
2655 static void hci_cmd_work(struct work_struct *work)
2657 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2658 struct sk_buff *skb;
2660 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2662 /* Send queued commands */
2663 if (atomic_read(&hdev->cmd_cnt)) {
2664 skb = skb_dequeue(&hdev->cmd_q);
2668 kfree_skb(hdev->sent_cmd);
2670 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2671 if (hdev->sent_cmd) {
2672 atomic_dec(&hdev->cmd_cnt);
2673 hci_send_frame(skb);
2674 if (test_bit(HCI_RESET, &hdev->flags))
2675 del_timer(&hdev->cmd_timer);
2677 mod_timer(&hdev->cmd_timer,
2678 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2680 skb_queue_head(&hdev->cmd_q, skb);
2681 queue_work(hdev->workqueue, &hdev->cmd_work);
2686 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2688 /* General inquiry access code (GIAC) */
2689 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2690 struct hci_cp_inquiry cp;
2692 BT_DBG("%s", hdev->name);
2694 if (test_bit(HCI_INQUIRY, &hdev->flags))
2695 return -EINPROGRESS;
2697 inquiry_cache_flush(hdev);
2699 memset(&cp, 0, sizeof(cp));
2700 memcpy(&cp.lap, lap, sizeof(cp.lap));
2703 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2706 int hci_cancel_inquiry(struct hci_dev *hdev)
2708 BT_DBG("%s", hdev->name);
2710 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2713 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2716 module_param(enable_hs, bool, 0644);
2717 MODULE_PARM_DESC(enable_hs, "Enable High Speed");