2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
55 #define AUTO_OFF_TIMEOUT 2000
59 static void hci_rx_work(struct work_struct *work);
60 static void hci_cmd_work(struct work_struct *work);
61 static void hci_tx_work(struct work_struct *work);
63 static DEFINE_MUTEX(hci_task_lock);
66 LIST_HEAD(hci_dev_list);
67 DEFINE_RWLOCK(hci_dev_list_lock);
69 /* HCI callback list */
70 LIST_HEAD(hci_cb_list);
71 DEFINE_RWLOCK(hci_cb_list_lock);
74 #define HCI_MAX_PROTO 2
75 struct hci_proto *hci_proto[HCI_MAX_PROTO];
77 /* HCI notifiers list */
78 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
80 /* ---- HCI notifications ---- */
82 int hci_register_notifier(struct notifier_block *nb)
84 return atomic_notifier_chain_register(&hci_notifier, nb);
87 int hci_unregister_notifier(struct notifier_block *nb)
89 return atomic_notifier_chain_unregister(&hci_notifier, nb);
92 static void hci_notify(struct hci_dev *hdev, int event)
94 atomic_notifier_call_chain(&hci_notifier, event, hdev);
97 /* ---- HCI requests ---- */
99 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
101 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
103 /* If this is the init phase check if the completed command matches
104 * the last init command, and if not just return.
106 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = result;
111 hdev->req_status = HCI_REQ_DONE;
112 wake_up_interruptible(&hdev->req_wait_q);
116 static void hci_req_cancel(struct hci_dev *hdev, int err)
118 BT_DBG("%s err 0x%2.2x", hdev->name, err);
120 if (hdev->req_status == HCI_REQ_PEND) {
121 hdev->req_result = err;
122 hdev->req_status = HCI_REQ_CANCELED;
123 wake_up_interruptible(&hdev->req_wait_q);
127 /* Execute request and wait for completion. */
128 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
129 unsigned long opt, __u32 timeout)
131 DECLARE_WAITQUEUE(wait, current);
134 BT_DBG("%s start", hdev->name);
136 hdev->req_status = HCI_REQ_PEND;
138 add_wait_queue(&hdev->req_wait_q, &wait);
139 set_current_state(TASK_INTERRUPTIBLE);
142 schedule_timeout(timeout);
144 remove_wait_queue(&hdev->req_wait_q, &wait);
146 if (signal_pending(current))
149 switch (hdev->req_status) {
151 err = -bt_to_errno(hdev->req_result);
154 case HCI_REQ_CANCELED:
155 err = -hdev->req_result;
163 hdev->req_status = hdev->req_result = 0;
165 BT_DBG("%s end: err %d", hdev->name, err);
170 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
171 unsigned long opt, __u32 timeout)
175 if (!test_bit(HCI_UP, &hdev->flags))
178 /* Serialize all requests */
180 ret = __hci_request(hdev, req, opt, timeout);
181 hci_req_unlock(hdev);
186 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
188 BT_DBG("%s %ld", hdev->name, opt);
191 set_bit(HCI_RESET, &hdev->flags);
192 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
195 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
197 struct hci_cp_delete_stored_link_key cp;
202 BT_DBG("%s %ld", hdev->name, opt);
204 /* Driver initialization */
206 /* Special commands */
207 while ((skb = skb_dequeue(&hdev->driver_init))) {
208 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
209 skb->dev = (void *) hdev;
211 skb_queue_tail(&hdev->cmd_q, skb);
212 queue_work(hdev->workqueue, &hdev->cmd_work);
214 skb_queue_purge(&hdev->driver_init);
216 /* Mandatory initialization */
219 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
220 set_bit(HCI_RESET, &hdev->flags);
221 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
224 /* Read Local Supported Features */
225 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
227 /* Read Local Version */
228 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
230 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
231 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
233 /* Read BD Address */
234 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
236 /* Read Class of Device */
237 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
239 /* Read Local Name */
240 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
242 /* Read Voice Setting */
243 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
245 /* Optional initialization */
247 /* Clear Event Filters */
248 flt_type = HCI_FLT_CLEAR_ALL;
249 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
251 /* Connection accept timeout ~20 secs */
252 param = cpu_to_le16(0x7d00);
253 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
255 bacpy(&cp.bdaddr, BDADDR_ANY);
257 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
260 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
262 BT_DBG("%s", hdev->name);
264 /* Read LE buffer size */
265 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
268 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
272 BT_DBG("%s %x", hdev->name, scan);
274 /* Inquiry and Page scans */
275 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
278 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
282 BT_DBG("%s %x", hdev->name, auth);
285 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
288 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
292 BT_DBG("%s %x", hdev->name, encrypt);
295 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
298 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
300 __le16 policy = cpu_to_le16(opt);
302 BT_DBG("%s %x", hdev->name, policy);
304 /* Default link policy */
305 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
308 /* Get HCI device by index.
309 * Device is held on return. */
310 struct hci_dev *hci_dev_get(int index)
312 struct hci_dev *hdev = NULL, *d;
319 read_lock(&hci_dev_list_lock);
320 list_for_each_entry(d, &hci_dev_list, list) {
321 if (d->id == index) {
322 hdev = hci_dev_hold(d);
326 read_unlock(&hci_dev_list_lock);
330 /* ---- Inquiry support ---- */
331 static void inquiry_cache_flush(struct hci_dev *hdev)
333 struct inquiry_cache *cache = &hdev->inq_cache;
334 struct inquiry_entry *next = cache->list, *e;
336 BT_DBG("cache %p", cache);
345 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
347 struct inquiry_cache *cache = &hdev->inq_cache;
348 struct inquiry_entry *e;
350 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
352 for (e = cache->list; e; e = e->next)
353 if (!bacmp(&e->data.bdaddr, bdaddr))
358 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
360 struct inquiry_cache *cache = &hdev->inq_cache;
361 struct inquiry_entry *ie;
363 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
365 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
367 /* Entry not in the cache. Add new one. */
368 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
372 ie->next = cache->list;
376 memcpy(&ie->data, data, sizeof(*data));
377 ie->timestamp = jiffies;
378 cache->timestamp = jiffies;
381 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
383 struct inquiry_cache *cache = &hdev->inq_cache;
384 struct inquiry_info *info = (struct inquiry_info *) buf;
385 struct inquiry_entry *e;
388 for (e = cache->list; e && copied < num; e = e->next, copied++) {
389 struct inquiry_data *data = &e->data;
390 bacpy(&info->bdaddr, &data->bdaddr);
391 info->pscan_rep_mode = data->pscan_rep_mode;
392 info->pscan_period_mode = data->pscan_period_mode;
393 info->pscan_mode = data->pscan_mode;
394 memcpy(info->dev_class, data->dev_class, 3);
395 info->clock_offset = data->clock_offset;
399 BT_DBG("cache %p, copied %d", cache, copied);
403 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
405 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
406 struct hci_cp_inquiry cp;
408 BT_DBG("%s", hdev->name);
410 if (test_bit(HCI_INQUIRY, &hdev->flags))
414 memcpy(&cp.lap, &ir->lap, 3);
415 cp.length = ir->length;
416 cp.num_rsp = ir->num_rsp;
417 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
420 int hci_inquiry(void __user *arg)
422 __u8 __user *ptr = arg;
423 struct hci_inquiry_req ir;
424 struct hci_dev *hdev;
425 int err = 0, do_inquiry = 0, max_rsp;
429 if (copy_from_user(&ir, ptr, sizeof(ir)))
432 hdev = hci_dev_get(ir.dev_id);
437 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
438 inquiry_cache_empty(hdev) ||
439 ir.flags & IREQ_CACHE_FLUSH) {
440 inquiry_cache_flush(hdev);
443 hci_dev_unlock(hdev);
445 timeo = ir.length * msecs_to_jiffies(2000);
448 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
453 /* for unlimited number of responses we will use buffer with 255 entries */
454 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
456 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
457 * copy it to the user space.
459 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
466 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
467 hci_dev_unlock(hdev);
469 BT_DBG("num_rsp %d", ir.num_rsp);
471 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
473 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
486 /* ---- HCI ioctl helpers ---- */
488 int hci_dev_open(__u16 dev)
490 struct hci_dev *hdev;
493 hdev = hci_dev_get(dev);
497 BT_DBG("%s %p", hdev->name, hdev);
501 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
506 if (test_bit(HCI_UP, &hdev->flags)) {
511 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
512 set_bit(HCI_RAW, &hdev->flags);
514 /* Treat all non BR/EDR controllers as raw devices if
515 enable_hs is not set */
516 if (hdev->dev_type != HCI_BREDR && !enable_hs)
517 set_bit(HCI_RAW, &hdev->flags);
519 if (hdev->open(hdev)) {
524 if (!test_bit(HCI_RAW, &hdev->flags)) {
525 atomic_set(&hdev->cmd_cnt, 1);
526 set_bit(HCI_INIT, &hdev->flags);
527 hdev->init_last_cmd = 0;
529 ret = __hci_request(hdev, hci_init_req, 0,
530 msecs_to_jiffies(HCI_INIT_TIMEOUT));
532 if (lmp_host_le_capable(hdev))
533 ret = __hci_request(hdev, hci_le_init_req, 0,
534 msecs_to_jiffies(HCI_INIT_TIMEOUT));
536 clear_bit(HCI_INIT, &hdev->flags);
541 set_bit(HCI_UP, &hdev->flags);
542 hci_notify(hdev, HCI_DEV_UP);
543 if (!test_bit(HCI_SETUP, &hdev->flags)) {
545 mgmt_powered(hdev, 1);
546 hci_dev_unlock(hdev);
549 /* Init failed, cleanup */
550 flush_work(&hdev->tx_work);
551 flush_work(&hdev->cmd_work);
552 flush_work(&hdev->rx_work);
554 skb_queue_purge(&hdev->cmd_q);
555 skb_queue_purge(&hdev->rx_q);
560 if (hdev->sent_cmd) {
561 kfree_skb(hdev->sent_cmd);
562 hdev->sent_cmd = NULL;
570 hci_req_unlock(hdev);
575 static int hci_dev_do_close(struct hci_dev *hdev)
577 BT_DBG("%s %p", hdev->name, hdev);
579 hci_req_cancel(hdev, ENODEV);
582 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
583 del_timer_sync(&hdev->cmd_timer);
584 hci_req_unlock(hdev);
588 /* Flush RX and TX works */
589 flush_work(&hdev->tx_work);
590 flush_work(&hdev->rx_work);
592 if (hdev->discov_timeout > 0) {
593 cancel_delayed_work(&hdev->discov_off);
594 hdev->discov_timeout = 0;
597 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
598 cancel_delayed_work(&hdev->power_off);
601 inquiry_cache_flush(hdev);
602 hci_conn_hash_flush(hdev);
603 hci_dev_unlock(hdev);
605 hci_notify(hdev, HCI_DEV_DOWN);
611 skb_queue_purge(&hdev->cmd_q);
612 atomic_set(&hdev->cmd_cnt, 1);
613 if (!test_bit(HCI_RAW, &hdev->flags)) {
614 set_bit(HCI_INIT, &hdev->flags);
615 __hci_request(hdev, hci_reset_req, 0,
616 msecs_to_jiffies(HCI_INIT_TIMEOUT));
617 clear_bit(HCI_INIT, &hdev->flags);
621 flush_work(&hdev->cmd_work);
624 skb_queue_purge(&hdev->rx_q);
625 skb_queue_purge(&hdev->cmd_q);
626 skb_queue_purge(&hdev->raw_q);
628 /* Drop last sent command */
629 if (hdev->sent_cmd) {
630 del_timer_sync(&hdev->cmd_timer);
631 kfree_skb(hdev->sent_cmd);
632 hdev->sent_cmd = NULL;
635 /* After this point our queues are empty
636 * and no tasks are scheduled. */
640 mgmt_powered(hdev, 0);
641 hci_dev_unlock(hdev);
646 hci_req_unlock(hdev);
652 int hci_dev_close(__u16 dev)
654 struct hci_dev *hdev;
657 hdev = hci_dev_get(dev);
660 err = hci_dev_do_close(hdev);
665 int hci_dev_reset(__u16 dev)
667 struct hci_dev *hdev;
670 hdev = hci_dev_get(dev);
676 if (!test_bit(HCI_UP, &hdev->flags))
680 skb_queue_purge(&hdev->rx_q);
681 skb_queue_purge(&hdev->cmd_q);
684 inquiry_cache_flush(hdev);
685 hci_conn_hash_flush(hdev);
686 hci_dev_unlock(hdev);
691 atomic_set(&hdev->cmd_cnt, 1);
692 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
694 if (!test_bit(HCI_RAW, &hdev->flags))
695 ret = __hci_request(hdev, hci_reset_req, 0,
696 msecs_to_jiffies(HCI_INIT_TIMEOUT));
699 hci_req_unlock(hdev);
704 int hci_dev_reset_stat(__u16 dev)
706 struct hci_dev *hdev;
709 hdev = hci_dev_get(dev);
713 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
720 int hci_dev_cmd(unsigned int cmd, void __user *arg)
722 struct hci_dev *hdev;
723 struct hci_dev_req dr;
726 if (copy_from_user(&dr, arg, sizeof(dr)))
729 hdev = hci_dev_get(dr.dev_id);
735 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
736 msecs_to_jiffies(HCI_INIT_TIMEOUT));
740 if (!lmp_encrypt_capable(hdev)) {
745 if (!test_bit(HCI_AUTH, &hdev->flags)) {
746 /* Auth must be enabled first */
747 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
748 msecs_to_jiffies(HCI_INIT_TIMEOUT));
753 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
754 msecs_to_jiffies(HCI_INIT_TIMEOUT));
758 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
759 msecs_to_jiffies(HCI_INIT_TIMEOUT));
763 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
764 msecs_to_jiffies(HCI_INIT_TIMEOUT));
768 hdev->link_mode = ((__u16) dr.dev_opt) &
769 (HCI_LM_MASTER | HCI_LM_ACCEPT);
773 hdev->pkt_type = (__u16) dr.dev_opt;
777 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
778 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
782 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
783 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
795 int hci_get_dev_list(void __user *arg)
797 struct hci_dev *hdev;
798 struct hci_dev_list_req *dl;
799 struct hci_dev_req *dr;
800 int n = 0, size, err;
803 if (get_user(dev_num, (__u16 __user *) arg))
806 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
809 size = sizeof(*dl) + dev_num * sizeof(*dr);
811 dl = kzalloc(size, GFP_KERNEL);
817 read_lock_bh(&hci_dev_list_lock);
818 list_for_each_entry(hdev, &hci_dev_list, list) {
819 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
820 cancel_delayed_work(&hdev->power_off);
822 if (!test_bit(HCI_MGMT, &hdev->flags))
823 set_bit(HCI_PAIRABLE, &hdev->flags);
825 (dr + n)->dev_id = hdev->id;
826 (dr + n)->dev_opt = hdev->flags;
831 read_unlock_bh(&hci_dev_list_lock);
834 size = sizeof(*dl) + n * sizeof(*dr);
836 err = copy_to_user(arg, dl, size);
839 return err ? -EFAULT : 0;
842 int hci_get_dev_info(void __user *arg)
844 struct hci_dev *hdev;
845 struct hci_dev_info di;
848 if (copy_from_user(&di, arg, sizeof(di)))
851 hdev = hci_dev_get(di.dev_id);
855 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
856 cancel_delayed_work_sync(&hdev->power_off);
858 if (!test_bit(HCI_MGMT, &hdev->flags))
859 set_bit(HCI_PAIRABLE, &hdev->flags);
861 strcpy(di.name, hdev->name);
862 di.bdaddr = hdev->bdaddr;
863 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
864 di.flags = hdev->flags;
865 di.pkt_type = hdev->pkt_type;
866 di.acl_mtu = hdev->acl_mtu;
867 di.acl_pkts = hdev->acl_pkts;
868 di.sco_mtu = hdev->sco_mtu;
869 di.sco_pkts = hdev->sco_pkts;
870 di.link_policy = hdev->link_policy;
871 di.link_mode = hdev->link_mode;
873 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
874 memcpy(&di.features, &hdev->features, sizeof(di.features));
876 if (copy_to_user(arg, &di, sizeof(di)))
884 /* ---- Interface to HCI drivers ---- */
886 static int hci_rfkill_set_block(void *data, bool blocked)
888 struct hci_dev *hdev = data;
890 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
895 hci_dev_do_close(hdev);
900 static const struct rfkill_ops hci_rfkill_ops = {
901 .set_block = hci_rfkill_set_block,
904 /* Alloc HCI device */
905 struct hci_dev *hci_alloc_dev(void)
907 struct hci_dev *hdev;
909 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
913 hci_init_sysfs(hdev);
914 skb_queue_head_init(&hdev->driver_init);
918 EXPORT_SYMBOL(hci_alloc_dev);
920 /* Free HCI device */
921 void hci_free_dev(struct hci_dev *hdev)
923 skb_queue_purge(&hdev->driver_init);
925 /* will free via device release */
926 put_device(&hdev->dev);
928 EXPORT_SYMBOL(hci_free_dev);
930 static void hci_power_on(struct work_struct *work)
932 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
934 BT_DBG("%s", hdev->name);
936 if (hci_dev_open(hdev->id) < 0)
939 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
940 schedule_delayed_work(&hdev->power_off,
941 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
943 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
944 mgmt_index_added(hdev);
947 static void hci_power_off(struct work_struct *work)
949 struct hci_dev *hdev = container_of(work, struct hci_dev,
952 BT_DBG("%s", hdev->name);
954 clear_bit(HCI_AUTO_OFF, &hdev->flags);
956 hci_dev_close(hdev->id);
959 static void hci_discov_off(struct work_struct *work)
961 struct hci_dev *hdev;
964 hdev = container_of(work, struct hci_dev, discov_off.work);
966 BT_DBG("%s", hdev->name);
970 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
972 hdev->discov_timeout = 0;
974 hci_dev_unlock(hdev);
977 int hci_uuids_clear(struct hci_dev *hdev)
979 struct list_head *p, *n;
981 list_for_each_safe(p, n, &hdev->uuids) {
982 struct bt_uuid *uuid;
984 uuid = list_entry(p, struct bt_uuid, list);
993 int hci_link_keys_clear(struct hci_dev *hdev)
995 struct list_head *p, *n;
997 list_for_each_safe(p, n, &hdev->link_keys) {
998 struct link_key *key;
1000 key = list_entry(p, struct link_key, list);
1009 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1013 list_for_each_entry(k, &hdev->link_keys, list)
1014 if (bacmp(bdaddr, &k->bdaddr) == 0)
1020 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1021 u8 key_type, u8 old_key_type)
1024 if (key_type < 0x03)
1027 /* Debug keys are insecure so don't store them persistently */
1028 if (key_type == HCI_LK_DEBUG_COMBINATION)
1031 /* Changed combination key and there's no previous one */
1032 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1035 /* Security mode 3 case */
1039 /* Neither local nor remote side had no-bonding as requirement */
1040 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1043 /* Local side had dedicated bonding as requirement */
1044 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1047 /* Remote side had dedicated bonding as requirement */
1048 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1051 /* If none of the above criteria match, then don't store the key
1056 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1060 list_for_each_entry(k, &hdev->link_keys, list) {
1061 struct key_master_id *id;
1063 if (k->type != HCI_LK_SMP_LTK)
1066 if (k->dlen != sizeof(*id))
1069 id = (void *) &k->data;
1070 if (id->ediv == ediv &&
1071 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1077 EXPORT_SYMBOL(hci_find_ltk);
1079 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1080 bdaddr_t *bdaddr, u8 type)
1084 list_for_each_entry(k, &hdev->link_keys, list)
1085 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1090 EXPORT_SYMBOL(hci_find_link_key_type);
1092 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1093 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1095 struct link_key *key, *old_key;
1096 u8 old_key_type, persistent;
1098 old_key = hci_find_link_key(hdev, bdaddr);
1100 old_key_type = old_key->type;
1103 old_key_type = conn ? conn->key_type : 0xff;
1104 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1107 list_add(&key->list, &hdev->link_keys);
1110 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1112 /* Some buggy controller combinations generate a changed
1113 * combination key for legacy pairing even when there's no
1115 if (type == HCI_LK_CHANGED_COMBINATION &&
1116 (!conn || conn->remote_auth == 0xff) &&
1117 old_key_type == 0xff) {
1118 type = HCI_LK_COMBINATION;
1120 conn->key_type = type;
1123 bacpy(&key->bdaddr, bdaddr);
1124 memcpy(key->val, val, 16);
1125 key->pin_len = pin_len;
1127 if (type == HCI_LK_CHANGED_COMBINATION)
1128 key->type = old_key_type;
1135 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1137 mgmt_new_link_key(hdev, key, persistent);
1140 list_del(&key->list);
1147 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1148 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1150 struct link_key *key, *old_key;
1151 struct key_master_id *id;
1154 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1156 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1159 old_key_type = old_key->type;
1161 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1164 list_add(&key->list, &hdev->link_keys);
1165 old_key_type = 0xff;
1168 key->dlen = sizeof(*id);
1170 bacpy(&key->bdaddr, bdaddr);
1171 memcpy(key->val, ltk, sizeof(key->val));
1172 key->type = HCI_LK_SMP_LTK;
1173 key->pin_len = key_size;
1175 id = (void *) &key->data;
1177 memcpy(id->rand, rand, sizeof(id->rand));
1180 mgmt_new_link_key(hdev, key, old_key_type);
1185 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1187 struct link_key *key;
1189 key = hci_find_link_key(hdev, bdaddr);
1193 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1195 list_del(&key->list);
1201 /* HCI command timer function */
1202 static void hci_cmd_timer(unsigned long arg)
1204 struct hci_dev *hdev = (void *) arg;
1206 BT_ERR("%s command tx timeout", hdev->name);
1207 atomic_set(&hdev->cmd_cnt, 1);
1208 queue_work(hdev->workqueue, &hdev->cmd_work);
1211 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1214 struct oob_data *data;
1216 list_for_each_entry(data, &hdev->remote_oob_data, list)
1217 if (bacmp(bdaddr, &data->bdaddr) == 0)
1223 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1225 struct oob_data *data;
1227 data = hci_find_remote_oob_data(hdev, bdaddr);
1231 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1233 list_del(&data->list);
1239 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1241 struct oob_data *data, *n;
1243 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1244 list_del(&data->list);
1251 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1254 struct oob_data *data;
1256 data = hci_find_remote_oob_data(hdev, bdaddr);
1259 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1263 bacpy(&data->bdaddr, bdaddr);
1264 list_add(&data->list, &hdev->remote_oob_data);
1267 memcpy(data->hash, hash, sizeof(data->hash));
1268 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1270 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1275 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1278 struct bdaddr_list *b;
1280 list_for_each_entry(b, &hdev->blacklist, list)
1281 if (bacmp(bdaddr, &b->bdaddr) == 0)
1287 int hci_blacklist_clear(struct hci_dev *hdev)
1289 struct list_head *p, *n;
1291 list_for_each_safe(p, n, &hdev->blacklist) {
1292 struct bdaddr_list *b;
1294 b = list_entry(p, struct bdaddr_list, list);
1303 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1305 struct bdaddr_list *entry;
1307 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1310 if (hci_blacklist_lookup(hdev, bdaddr))
1313 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1317 bacpy(&entry->bdaddr, bdaddr);
1319 list_add(&entry->list, &hdev->blacklist);
1321 return mgmt_device_blocked(hdev, bdaddr);
1324 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1326 struct bdaddr_list *entry;
1328 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1329 return hci_blacklist_clear(hdev);
1331 entry = hci_blacklist_lookup(hdev, bdaddr);
1335 list_del(&entry->list);
1338 return mgmt_device_unblocked(hdev, bdaddr);
1341 static void hci_clear_adv_cache(struct work_struct *work)
1343 struct hci_dev *hdev = container_of(work, struct hci_dev,
1348 hci_adv_entries_clear(hdev);
1350 hci_dev_unlock(hdev);
1353 int hci_adv_entries_clear(struct hci_dev *hdev)
1355 struct adv_entry *entry, *tmp;
1357 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1358 list_del(&entry->list);
1362 BT_DBG("%s adv cache cleared", hdev->name);
1367 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1369 struct adv_entry *entry;
1371 list_for_each_entry(entry, &hdev->adv_entries, list)
1372 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1378 static inline int is_connectable_adv(u8 evt_type)
1380 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1386 int hci_add_adv_entry(struct hci_dev *hdev,
1387 struct hci_ev_le_advertising_info *ev)
1389 struct adv_entry *entry;
1391 if (!is_connectable_adv(ev->evt_type))
1394 /* Only new entries should be added to adv_entries. So, if
1395 * bdaddr was found, don't add it. */
1396 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1399 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1403 bacpy(&entry->bdaddr, &ev->bdaddr);
1404 entry->bdaddr_type = ev->bdaddr_type;
1406 list_add(&entry->list, &hdev->adv_entries);
1408 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1409 batostr(&entry->bdaddr), entry->bdaddr_type);
1414 /* Register HCI device */
1415 int hci_register_dev(struct hci_dev *hdev)
1417 struct list_head *head = &hci_dev_list, *p;
1420 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1421 hdev->bus, hdev->owner);
1423 if (!hdev->open || !hdev->close || !hdev->destruct)
1426 /* Do not allow HCI_AMP devices to register at index 0,
1427 * so the index can be used as the AMP controller ID.
1429 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1431 write_lock_bh(&hci_dev_list_lock);
1433 /* Find first available device id */
1434 list_for_each(p, &hci_dev_list) {
1435 if (list_entry(p, struct hci_dev, list)->id != id)
1440 sprintf(hdev->name, "hci%d", id);
1442 list_add_tail(&hdev->list, head);
1444 atomic_set(&hdev->refcnt, 1);
1445 mutex_init(&hdev->lock);
1448 hdev->dev_flags = 0;
1449 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1450 hdev->esco_type = (ESCO_HV1);
1451 hdev->link_mode = (HCI_LM_ACCEPT);
1452 hdev->io_capability = 0x03; /* No Input No Output */
1454 hdev->idle_timeout = 0;
1455 hdev->sniff_max_interval = 800;
1456 hdev->sniff_min_interval = 80;
1458 INIT_WORK(&hdev->rx_work, hci_rx_work);
1459 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1460 INIT_WORK(&hdev->tx_work, hci_tx_work);
1463 skb_queue_head_init(&hdev->rx_q);
1464 skb_queue_head_init(&hdev->cmd_q);
1465 skb_queue_head_init(&hdev->raw_q);
1467 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1469 for (i = 0; i < NUM_REASSEMBLY; i++)
1470 hdev->reassembly[i] = NULL;
1472 init_waitqueue_head(&hdev->req_wait_q);
1473 mutex_init(&hdev->req_lock);
1475 inquiry_cache_init(hdev);
1477 hci_conn_hash_init(hdev);
1479 INIT_LIST_HEAD(&hdev->mgmt_pending);
1481 INIT_LIST_HEAD(&hdev->blacklist);
1483 INIT_LIST_HEAD(&hdev->uuids);
1485 INIT_LIST_HEAD(&hdev->link_keys);
1487 INIT_LIST_HEAD(&hdev->remote_oob_data);
1489 INIT_LIST_HEAD(&hdev->adv_entries);
1491 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1492 INIT_WORK(&hdev->power_on, hci_power_on);
1493 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1495 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1497 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1499 atomic_set(&hdev->promisc, 0);
1501 write_unlock_bh(&hci_dev_list_lock);
1503 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1504 if (!hdev->workqueue) {
1509 error = hci_add_sysfs(hdev);
1513 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1514 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1516 if (rfkill_register(hdev->rfkill) < 0) {
1517 rfkill_destroy(hdev->rfkill);
1518 hdev->rfkill = NULL;
1522 set_bit(HCI_AUTO_OFF, &hdev->flags);
1523 set_bit(HCI_SETUP, &hdev->flags);
1524 queue_work(hdev->workqueue, &hdev->power_on);
1526 hci_notify(hdev, HCI_DEV_REG);
1531 destroy_workqueue(hdev->workqueue);
1533 write_lock_bh(&hci_dev_list_lock);
1534 list_del(&hdev->list);
1535 write_unlock_bh(&hci_dev_list_lock);
1539 EXPORT_SYMBOL(hci_register_dev);
1541 /* Unregister HCI device */
1542 void hci_unregister_dev(struct hci_dev *hdev)
1546 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1548 write_lock_bh(&hci_dev_list_lock);
1549 list_del(&hdev->list);
1550 write_unlock_bh(&hci_dev_list_lock);
1552 hci_dev_do_close(hdev);
1554 for (i = 0; i < NUM_REASSEMBLY; i++)
1555 kfree_skb(hdev->reassembly[i]);
1557 if (!test_bit(HCI_INIT, &hdev->flags) &&
1558 !test_bit(HCI_SETUP, &hdev->flags)) {
1560 mgmt_index_removed(hdev);
1561 hci_dev_unlock(hdev);
1564 /* mgmt_index_removed should take care of emptying the
1566 BUG_ON(!list_empty(&hdev->mgmt_pending));
1568 hci_notify(hdev, HCI_DEV_UNREG);
1571 rfkill_unregister(hdev->rfkill);
1572 rfkill_destroy(hdev->rfkill);
1575 hci_del_sysfs(hdev);
1577 cancel_delayed_work_sync(&hdev->adv_work);
1579 destroy_workqueue(hdev->workqueue);
1582 hci_blacklist_clear(hdev);
1583 hci_uuids_clear(hdev);
1584 hci_link_keys_clear(hdev);
1585 hci_remote_oob_data_clear(hdev);
1586 hci_adv_entries_clear(hdev);
1587 hci_dev_unlock(hdev);
1589 __hci_dev_put(hdev);
1591 EXPORT_SYMBOL(hci_unregister_dev);
1593 /* Suspend HCI device */
1594 int hci_suspend_dev(struct hci_dev *hdev)
1596 hci_notify(hdev, HCI_DEV_SUSPEND);
1599 EXPORT_SYMBOL(hci_suspend_dev);
1601 /* Resume HCI device */
1602 int hci_resume_dev(struct hci_dev *hdev)
1604 hci_notify(hdev, HCI_DEV_RESUME);
1607 EXPORT_SYMBOL(hci_resume_dev);
1609 /* Receive frame from HCI drivers */
1610 int hci_recv_frame(struct sk_buff *skb)
1612 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1613 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1614 && !test_bit(HCI_INIT, &hdev->flags))) {
1620 bt_cb(skb)->incoming = 1;
1623 __net_timestamp(skb);
1625 skb_queue_tail(&hdev->rx_q, skb);
1626 queue_work(hdev->workqueue, &hdev->rx_work);
1630 EXPORT_SYMBOL(hci_recv_frame);
1632 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1633 int count, __u8 index)
1638 struct sk_buff *skb;
1639 struct bt_skb_cb *scb;
1641 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1642 index >= NUM_REASSEMBLY)
1645 skb = hdev->reassembly[index];
1649 case HCI_ACLDATA_PKT:
1650 len = HCI_MAX_FRAME_SIZE;
1651 hlen = HCI_ACL_HDR_SIZE;
1654 len = HCI_MAX_EVENT_SIZE;
1655 hlen = HCI_EVENT_HDR_SIZE;
1657 case HCI_SCODATA_PKT:
1658 len = HCI_MAX_SCO_SIZE;
1659 hlen = HCI_SCO_HDR_SIZE;
1663 skb = bt_skb_alloc(len, GFP_ATOMIC);
1667 scb = (void *) skb->cb;
1669 scb->pkt_type = type;
1671 skb->dev = (void *) hdev;
1672 hdev->reassembly[index] = skb;
1676 scb = (void *) skb->cb;
1677 len = min(scb->expect, (__u16)count);
1679 memcpy(skb_put(skb, len), data, len);
1688 if (skb->len == HCI_EVENT_HDR_SIZE) {
1689 struct hci_event_hdr *h = hci_event_hdr(skb);
1690 scb->expect = h->plen;
1692 if (skb_tailroom(skb) < scb->expect) {
1694 hdev->reassembly[index] = NULL;
1700 case HCI_ACLDATA_PKT:
1701 if (skb->len == HCI_ACL_HDR_SIZE) {
1702 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1703 scb->expect = __le16_to_cpu(h->dlen);
1705 if (skb_tailroom(skb) < scb->expect) {
1707 hdev->reassembly[index] = NULL;
1713 case HCI_SCODATA_PKT:
1714 if (skb->len == HCI_SCO_HDR_SIZE) {
1715 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1716 scb->expect = h->dlen;
1718 if (skb_tailroom(skb) < scb->expect) {
1720 hdev->reassembly[index] = NULL;
1727 if (scb->expect == 0) {
1728 /* Complete frame */
1730 bt_cb(skb)->pkt_type = type;
1731 hci_recv_frame(skb);
1733 hdev->reassembly[index] = NULL;
1741 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1745 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1749 rem = hci_reassembly(hdev, type, data, count, type - 1);
1753 data += (count - rem);
1759 EXPORT_SYMBOL(hci_recv_fragment);
1761 #define STREAM_REASSEMBLY 0
1763 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1769 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1772 struct { char type; } *pkt;
1774 /* Start of the frame */
1781 type = bt_cb(skb)->pkt_type;
1783 rem = hci_reassembly(hdev, type, data, count,
1788 data += (count - rem);
1794 EXPORT_SYMBOL(hci_recv_stream_fragment);
1796 /* ---- Interface to upper protocols ---- */
1798 /* Register/Unregister protocols.
1799 * hci_task_lock is used to ensure that no tasks are running. */
1800 int hci_register_proto(struct hci_proto *hp)
1804 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1806 if (hp->id >= HCI_MAX_PROTO)
1809 mutex_lock(&hci_task_lock);
1811 if (!hci_proto[hp->id])
1812 hci_proto[hp->id] = hp;
1816 mutex_unlock(&hci_task_lock);
1820 EXPORT_SYMBOL(hci_register_proto);
1822 int hci_unregister_proto(struct hci_proto *hp)
1826 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1828 if (hp->id >= HCI_MAX_PROTO)
1831 mutex_lock(&hci_task_lock);
1833 if (hci_proto[hp->id])
1834 hci_proto[hp->id] = NULL;
1838 mutex_unlock(&hci_task_lock);
1842 EXPORT_SYMBOL(hci_unregister_proto);
1844 int hci_register_cb(struct hci_cb *cb)
1846 BT_DBG("%p name %s", cb, cb->name);
1848 write_lock_bh(&hci_cb_list_lock);
1849 list_add(&cb->list, &hci_cb_list);
1850 write_unlock_bh(&hci_cb_list_lock);
1854 EXPORT_SYMBOL(hci_register_cb);
1856 int hci_unregister_cb(struct hci_cb *cb)
1858 BT_DBG("%p name %s", cb, cb->name);
1860 write_lock_bh(&hci_cb_list_lock);
1861 list_del(&cb->list);
1862 write_unlock_bh(&hci_cb_list_lock);
1866 EXPORT_SYMBOL(hci_unregister_cb);
1868 static int hci_send_frame(struct sk_buff *skb)
1870 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1877 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1879 if (atomic_read(&hdev->promisc)) {
1881 __net_timestamp(skb);
1883 hci_send_to_sock(hdev, skb, NULL);
1886 /* Get rid of skb owner, prior to sending to the driver. */
1889 return hdev->send(skb);
1892 /* Send HCI command */
1893 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1895 int len = HCI_COMMAND_HDR_SIZE + plen;
1896 struct hci_command_hdr *hdr;
1897 struct sk_buff *skb;
1899 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1901 skb = bt_skb_alloc(len, GFP_ATOMIC);
1903 BT_ERR("%s no memory for command", hdev->name);
1907 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1908 hdr->opcode = cpu_to_le16(opcode);
1912 memcpy(skb_put(skb, plen), param, plen);
1914 BT_DBG("skb len %d", skb->len);
1916 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1917 skb->dev = (void *) hdev;
1919 if (test_bit(HCI_INIT, &hdev->flags))
1920 hdev->init_last_cmd = opcode;
1922 skb_queue_tail(&hdev->cmd_q, skb);
1923 queue_work(hdev->workqueue, &hdev->cmd_work);
1928 /* Get data from the previously sent command */
1929 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1931 struct hci_command_hdr *hdr;
1933 if (!hdev->sent_cmd)
1936 hdr = (void *) hdev->sent_cmd->data;
1938 if (hdr->opcode != cpu_to_le16(opcode))
1941 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1943 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1947 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1949 struct hci_acl_hdr *hdr;
1952 skb_push(skb, HCI_ACL_HDR_SIZE);
1953 skb_reset_transport_header(skb);
1954 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1955 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1956 hdr->dlen = cpu_to_le16(len);
1959 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1960 struct sk_buff *skb, __u16 flags)
1962 struct hci_dev *hdev = conn->hdev;
1963 struct sk_buff *list;
1965 list = skb_shinfo(skb)->frag_list;
1967 /* Non fragmented */
1968 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1970 skb_queue_tail(queue, skb);
1973 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1975 skb_shinfo(skb)->frag_list = NULL;
1977 /* Queue all fragments atomically */
1978 spin_lock_bh(&queue->lock);
1980 __skb_queue_tail(queue, skb);
1982 flags &= ~ACL_START;
1985 skb = list; list = list->next;
1987 skb->dev = (void *) hdev;
1988 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1989 hci_add_acl_hdr(skb, conn->handle, flags);
1991 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1993 __skb_queue_tail(queue, skb);
1996 spin_unlock_bh(&queue->lock);
2000 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2002 struct hci_conn *conn = chan->conn;
2003 struct hci_dev *hdev = conn->hdev;
2005 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2007 skb->dev = (void *) hdev;
2008 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2009 hci_add_acl_hdr(skb, conn->handle, flags);
2011 hci_queue_acl(conn, &chan->data_q, skb, flags);
2013 queue_work(hdev->workqueue, &hdev->tx_work);
2015 EXPORT_SYMBOL(hci_send_acl);
2018 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2020 struct hci_dev *hdev = conn->hdev;
2021 struct hci_sco_hdr hdr;
2023 BT_DBG("%s len %d", hdev->name, skb->len);
2025 hdr.handle = cpu_to_le16(conn->handle);
2026 hdr.dlen = skb->len;
2028 skb_push(skb, HCI_SCO_HDR_SIZE);
2029 skb_reset_transport_header(skb);
2030 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2032 skb->dev = (void *) hdev;
2033 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2035 skb_queue_tail(&conn->data_q, skb);
2036 queue_work(hdev->workqueue, &hdev->tx_work);
2038 EXPORT_SYMBOL(hci_send_sco);
2040 /* ---- HCI TX task (outgoing data) ---- */
2042 /* HCI Connection scheduler */
2043 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2045 struct hci_conn_hash *h = &hdev->conn_hash;
2046 struct hci_conn *conn = NULL, *c;
2047 int num = 0, min = ~0;
2049 /* We don't have to lock device here. Connections are always
2050 * added and removed with TX task disabled. */
2054 list_for_each_entry_rcu(c, &h->list, list) {
2055 if (c->type != type || skb_queue_empty(&c->data_q))
2058 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2063 if (c->sent < min) {
2068 if (hci_conn_num(hdev, type) == num)
2077 switch (conn->type) {
2079 cnt = hdev->acl_cnt;
2083 cnt = hdev->sco_cnt;
2086 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2090 BT_ERR("Unknown link type");
2098 BT_DBG("conn %p quote %d", conn, *quote);
2102 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2104 struct hci_conn_hash *h = &hdev->conn_hash;
2107 BT_ERR("%s link tx timeout", hdev->name);
2111 /* Kill stalled connections */
2112 list_for_each_entry_rcu(c, &h->list, list) {
2113 if (c->type == type && c->sent) {
2114 BT_ERR("%s killing stalled connection %s",
2115 hdev->name, batostr(&c->dst));
2116 hci_acl_disconn(c, 0x13);
2123 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2126 struct hci_conn_hash *h = &hdev->conn_hash;
2127 struct hci_chan *chan = NULL;
2128 int num = 0, min = ~0, cur_prio = 0;
2129 struct hci_conn *conn;
2130 int cnt, q, conn_num = 0;
2132 BT_DBG("%s", hdev->name);
2136 list_for_each_entry_rcu(conn, &h->list, list) {
2137 struct hci_chan *tmp;
2139 if (conn->type != type)
2142 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2147 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2148 struct sk_buff *skb;
2150 if (skb_queue_empty(&tmp->data_q))
2153 skb = skb_peek(&tmp->data_q);
2154 if (skb->priority < cur_prio)
2157 if (skb->priority > cur_prio) {
2160 cur_prio = skb->priority;
2165 if (conn->sent < min) {
2171 if (hci_conn_num(hdev, type) == conn_num)
2180 switch (chan->conn->type) {
2182 cnt = hdev->acl_cnt;
2186 cnt = hdev->sco_cnt;
2189 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2193 BT_ERR("Unknown link type");
2198 BT_DBG("chan %p quote %d", chan, *quote);
2202 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2204 struct hci_conn_hash *h = &hdev->conn_hash;
2205 struct hci_conn *conn;
2208 BT_DBG("%s", hdev->name);
2212 list_for_each_entry_rcu(conn, &h->list, list) {
2213 struct hci_chan *chan;
2215 if (conn->type != type)
2218 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2223 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2224 struct sk_buff *skb;
2231 if (skb_queue_empty(&chan->data_q))
2234 skb = skb_peek(&chan->data_q);
2235 if (skb->priority >= HCI_PRIO_MAX - 1)
2238 skb->priority = HCI_PRIO_MAX - 1;
2240 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2244 if (hci_conn_num(hdev, type) == num)
2252 static inline void hci_sched_acl(struct hci_dev *hdev)
2254 struct hci_chan *chan;
2255 struct sk_buff *skb;
2259 BT_DBG("%s", hdev->name);
2261 if (!hci_conn_num(hdev, ACL_LINK))
2264 if (!test_bit(HCI_RAW, &hdev->flags)) {
2265 /* ACL tx timeout must be longer than maximum
2266 * link supervision timeout (40.9 seconds) */
2267 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2268 hci_link_tx_to(hdev, ACL_LINK);
2271 cnt = hdev->acl_cnt;
2273 while (hdev->acl_cnt &&
2274 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2275 u32 priority = (skb_peek(&chan->data_q))->priority;
2276 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2277 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2278 skb->len, skb->priority);
2280 /* Stop if priority has changed */
2281 if (skb->priority < priority)
2284 skb = skb_dequeue(&chan->data_q);
2286 hci_conn_enter_active_mode(chan->conn,
2287 bt_cb(skb)->force_active);
2289 hci_send_frame(skb);
2290 hdev->acl_last_tx = jiffies;
2298 if (cnt != hdev->acl_cnt)
2299 hci_prio_recalculate(hdev, ACL_LINK);
2303 static inline void hci_sched_sco(struct hci_dev *hdev)
2305 struct hci_conn *conn;
2306 struct sk_buff *skb;
2309 BT_DBG("%s", hdev->name);
2311 if (!hci_conn_num(hdev, SCO_LINK))
2314 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
2315 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2316 BT_DBG("skb %p len %d", skb, skb->len);
2317 hci_send_frame(skb);
2320 if (conn->sent == ~0)
2326 static inline void hci_sched_esco(struct hci_dev *hdev)
2328 struct hci_conn *conn;
2329 struct sk_buff *skb;
2332 BT_DBG("%s", hdev->name);
2334 if (!hci_conn_num(hdev, ESCO_LINK))
2337 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
2338 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2339 BT_DBG("skb %p len %d", skb, skb->len);
2340 hci_send_frame(skb);
2343 if (conn->sent == ~0)
2349 static inline void hci_sched_le(struct hci_dev *hdev)
2351 struct hci_chan *chan;
2352 struct sk_buff *skb;
2353 int quote, cnt, tmp;
2355 BT_DBG("%s", hdev->name);
2357 if (!hci_conn_num(hdev, LE_LINK))
2360 if (!test_bit(HCI_RAW, &hdev->flags)) {
2361 /* LE tx timeout must be longer than maximum
2362 * link supervision timeout (40.9 seconds) */
2363 if (!hdev->le_cnt && hdev->le_pkts &&
2364 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2365 hci_link_tx_to(hdev, LE_LINK);
2368 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2370 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
2371 u32 priority = (skb_peek(&chan->data_q))->priority;
2372 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2373 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2374 skb->len, skb->priority);
2376 /* Stop if priority has changed */
2377 if (skb->priority < priority)
2380 skb = skb_dequeue(&chan->data_q);
2382 hci_send_frame(skb);
2383 hdev->le_last_tx = jiffies;
2394 hdev->acl_cnt = cnt;
2397 hci_prio_recalculate(hdev, LE_LINK);
2400 static void hci_tx_work(struct work_struct *work)
2402 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2403 struct sk_buff *skb;
2405 mutex_lock(&hci_task_lock);
2407 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2408 hdev->sco_cnt, hdev->le_cnt);
2410 /* Schedule queues and send stuff to HCI driver */
2412 hci_sched_acl(hdev);
2414 hci_sched_sco(hdev);
2416 hci_sched_esco(hdev);
2420 /* Send next queued raw (unknown type) packet */
2421 while ((skb = skb_dequeue(&hdev->raw_q)))
2422 hci_send_frame(skb);
2424 mutex_unlock(&hci_task_lock);
2427 /* ----- HCI RX task (incoming data processing) ----- */
2429 /* ACL data packet */
2430 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2432 struct hci_acl_hdr *hdr = (void *) skb->data;
2433 struct hci_conn *conn;
2434 __u16 handle, flags;
2436 skb_pull(skb, HCI_ACL_HDR_SIZE);
2438 handle = __le16_to_cpu(hdr->handle);
2439 flags = hci_flags(handle);
2440 handle = hci_handle(handle);
2442 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2444 hdev->stat.acl_rx++;
2447 conn = hci_conn_hash_lookup_handle(hdev, handle);
2448 hci_dev_unlock(hdev);
2451 register struct hci_proto *hp;
2453 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2455 /* Send to upper protocol */
2456 hp = hci_proto[HCI_PROTO_L2CAP];
2457 if (hp && hp->recv_acldata) {
2458 hp->recv_acldata(conn, skb, flags);
2462 BT_ERR("%s ACL packet for unknown connection handle %d",
2463 hdev->name, handle);
2469 /* SCO data packet */
2470 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2472 struct hci_sco_hdr *hdr = (void *) skb->data;
2473 struct hci_conn *conn;
2476 skb_pull(skb, HCI_SCO_HDR_SIZE);
2478 handle = __le16_to_cpu(hdr->handle);
2480 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2482 hdev->stat.sco_rx++;
2485 conn = hci_conn_hash_lookup_handle(hdev, handle);
2486 hci_dev_unlock(hdev);
2489 register struct hci_proto *hp;
2491 /* Send to upper protocol */
2492 hp = hci_proto[HCI_PROTO_SCO];
2493 if (hp && hp->recv_scodata) {
2494 hp->recv_scodata(conn, skb);
2498 BT_ERR("%s SCO packet for unknown connection handle %d",
2499 hdev->name, handle);
2505 static void hci_rx_work(struct work_struct *work)
2507 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2508 struct sk_buff *skb;
2510 BT_DBG("%s", hdev->name);
2512 mutex_lock(&hci_task_lock);
2514 while ((skb = skb_dequeue(&hdev->rx_q))) {
2515 if (atomic_read(&hdev->promisc)) {
2516 /* Send copy to the sockets */
2517 hci_send_to_sock(hdev, skb, NULL);
2520 if (test_bit(HCI_RAW, &hdev->flags)) {
2525 if (test_bit(HCI_INIT, &hdev->flags)) {
2526 /* Don't process data packets in this states. */
2527 switch (bt_cb(skb)->pkt_type) {
2528 case HCI_ACLDATA_PKT:
2529 case HCI_SCODATA_PKT:
2536 switch (bt_cb(skb)->pkt_type) {
2538 BT_DBG("%s Event packet", hdev->name);
2539 hci_event_packet(hdev, skb);
2542 case HCI_ACLDATA_PKT:
2543 BT_DBG("%s ACL data packet", hdev->name);
2544 hci_acldata_packet(hdev, skb);
2547 case HCI_SCODATA_PKT:
2548 BT_DBG("%s SCO data packet", hdev->name);
2549 hci_scodata_packet(hdev, skb);
2558 mutex_unlock(&hci_task_lock);
2561 static void hci_cmd_work(struct work_struct *work)
2563 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2564 struct sk_buff *skb;
2566 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2568 /* Send queued commands */
2569 if (atomic_read(&hdev->cmd_cnt)) {
2570 skb = skb_dequeue(&hdev->cmd_q);
2574 kfree_skb(hdev->sent_cmd);
2576 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2577 if (hdev->sent_cmd) {
2578 atomic_dec(&hdev->cmd_cnt);
2579 hci_send_frame(skb);
2580 if (test_bit(HCI_RESET, &hdev->flags))
2581 del_timer(&hdev->cmd_timer);
2583 mod_timer(&hdev->cmd_timer,
2584 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2586 skb_queue_head(&hdev->cmd_q, skb);
2587 queue_work(hdev->workqueue, &hdev->cmd_work);
2592 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2594 /* General inquiry access code (GIAC) */
2595 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2596 struct hci_cp_inquiry cp;
2598 BT_DBG("%s", hdev->name);
2600 if (test_bit(HCI_INQUIRY, &hdev->flags))
2601 return -EINPROGRESS;
2603 memset(&cp, 0, sizeof(cp));
2604 memcpy(&cp.lap, lap, sizeof(cp.lap));
2607 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2610 int hci_cancel_inquiry(struct hci_dev *hdev)
2612 BT_DBG("%s", hdev->name);
2614 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2617 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2620 module_param(enable_hs, bool, 0644);
2621 MODULE_PARM_DESC(enable_hs, "Enable High Speed");