2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
55 #define AUTO_OFF_TIMEOUT 2000
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
61 static DEFINE_RWLOCK(hci_task_lock);
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
72 #define HCI_MAX_PROTO 2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
78 /* ---- HCI notifications ---- */
80 int hci_register_notifier(struct notifier_block *nb)
82 return atomic_notifier_chain_register(&hci_notifier, nb);
85 int hci_unregister_notifier(struct notifier_block *nb)
87 return atomic_notifier_chain_unregister(&hci_notifier, nb);
90 static void hci_notify(struct hci_dev *hdev, int event)
92 atomic_notifier_call_chain(&hci_notifier, event, hdev);
95 /* ---- HCI requests ---- */
97 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
99 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
101 /* If this is the init phase check if the completed command matches
102 * the last init command, and if not just return.
104 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
107 if (hdev->req_status == HCI_REQ_PEND) {
108 hdev->req_result = result;
109 hdev->req_status = HCI_REQ_DONE;
110 wake_up_interruptible(&hdev->req_wait_q);
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
116 BT_DBG("%s err 0x%2.2x", hdev->name, err);
118 if (hdev->req_status == HCI_REQ_PEND) {
119 hdev->req_result = err;
120 hdev->req_status = HCI_REQ_CANCELED;
121 wake_up_interruptible(&hdev->req_wait_q);
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127 unsigned long opt, __u32 timeout)
129 DECLARE_WAITQUEUE(wait, current);
132 BT_DBG("%s start", hdev->name);
134 hdev->req_status = HCI_REQ_PEND;
136 add_wait_queue(&hdev->req_wait_q, &wait);
137 set_current_state(TASK_INTERRUPTIBLE);
140 schedule_timeout(timeout);
142 remove_wait_queue(&hdev->req_wait_q, &wait);
144 if (signal_pending(current))
147 switch (hdev->req_status) {
149 err = -bt_to_errno(hdev->req_result);
152 case HCI_REQ_CANCELED:
153 err = -hdev->req_result;
161 hdev->req_status = hdev->req_result = 0;
163 BT_DBG("%s end: err %d", hdev->name, err);
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169 unsigned long opt, __u32 timeout)
173 if (!test_bit(HCI_UP, &hdev->flags))
176 /* Serialize all requests */
178 ret = __hci_request(hdev, req, opt, timeout);
179 hci_req_unlock(hdev);
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186 BT_DBG("%s %ld", hdev->name, opt);
189 set_bit(HCI_RESET, &hdev->flags);
190 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
193 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
195 struct hci_cp_delete_stored_link_key cp;
200 BT_DBG("%s %ld", hdev->name, opt);
202 /* Driver initialization */
204 /* Special commands */
205 while ((skb = skb_dequeue(&hdev->driver_init))) {
206 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
207 skb->dev = (void *) hdev;
209 skb_queue_tail(&hdev->cmd_q, skb);
210 tasklet_schedule(&hdev->cmd_task);
212 skb_queue_purge(&hdev->driver_init);
214 /* Mandatory initialization */
217 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218 set_bit(HCI_RESET, &hdev->flags);
219 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
222 /* Read Local Supported Features */
223 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
225 /* Read Local Version */
226 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
228 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
229 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
232 /* Host buffer size */
234 struct hci_cp_host_buffer_size cp;
235 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
236 cp.sco_mtu = HCI_MAX_SCO_SIZE;
237 cp.acl_max_pkt = cpu_to_le16(0xffff);
238 cp.sco_max_pkt = cpu_to_le16(0xffff);
239 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
243 /* Read BD Address */
244 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
246 /* Read Class of Device */
247 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
249 /* Read Local Name */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
252 /* Read Voice Setting */
253 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
255 /* Optional initialization */
257 /* Clear Event Filters */
258 flt_type = HCI_FLT_CLEAR_ALL;
259 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
261 /* Connection accept timeout ~20 secs */
262 param = cpu_to_le16(0x7d00);
263 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
265 bacpy(&cp.bdaddr, BDADDR_ANY);
267 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
270 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
272 BT_DBG("%s", hdev->name);
274 /* Read LE buffer size */
275 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
278 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
282 BT_DBG("%s %x", hdev->name, scan);
284 /* Inquiry and Page scans */
285 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
288 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
292 BT_DBG("%s %x", hdev->name, auth);
295 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
298 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
302 BT_DBG("%s %x", hdev->name, encrypt);
305 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
308 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
310 __le16 policy = cpu_to_le16(opt);
312 BT_DBG("%s %x", hdev->name, policy);
314 /* Default link policy */
315 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
318 /* Get HCI device by index.
319 * Device is held on return. */
320 struct hci_dev *hci_dev_get(int index)
322 struct hci_dev *hdev = NULL, *d;
329 read_lock(&hci_dev_list_lock);
330 list_for_each_entry(d, &hci_dev_list, list) {
331 if (d->id == index) {
332 hdev = hci_dev_hold(d);
336 read_unlock(&hci_dev_list_lock);
340 /* ---- Inquiry support ---- */
341 static void inquiry_cache_flush(struct hci_dev *hdev)
343 struct inquiry_cache *cache = &hdev->inq_cache;
344 struct inquiry_entry *next = cache->list, *e;
346 BT_DBG("cache %p", cache);
355 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
357 struct inquiry_cache *cache = &hdev->inq_cache;
358 struct inquiry_entry *e;
360 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
362 for (e = cache->list; e; e = e->next)
363 if (!bacmp(&e->data.bdaddr, bdaddr))
368 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
370 struct inquiry_cache *cache = &hdev->inq_cache;
371 struct inquiry_entry *ie;
373 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
375 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
377 /* Entry not in the cache. Add new one. */
378 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
382 ie->next = cache->list;
386 memcpy(&ie->data, data, sizeof(*data));
387 ie->timestamp = jiffies;
388 cache->timestamp = jiffies;
391 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
393 struct inquiry_cache *cache = &hdev->inq_cache;
394 struct inquiry_info *info = (struct inquiry_info *) buf;
395 struct inquiry_entry *e;
398 for (e = cache->list; e && copied < num; e = e->next, copied++) {
399 struct inquiry_data *data = &e->data;
400 bacpy(&info->bdaddr, &data->bdaddr);
401 info->pscan_rep_mode = data->pscan_rep_mode;
402 info->pscan_period_mode = data->pscan_period_mode;
403 info->pscan_mode = data->pscan_mode;
404 memcpy(info->dev_class, data->dev_class, 3);
405 info->clock_offset = data->clock_offset;
409 BT_DBG("cache %p, copied %d", cache, copied);
413 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
415 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
416 struct hci_cp_inquiry cp;
418 BT_DBG("%s", hdev->name);
420 if (test_bit(HCI_INQUIRY, &hdev->flags))
424 memcpy(&cp.lap, &ir->lap, 3);
425 cp.length = ir->length;
426 cp.num_rsp = ir->num_rsp;
427 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
430 int hci_inquiry(void __user *arg)
432 __u8 __user *ptr = arg;
433 struct hci_inquiry_req ir;
434 struct hci_dev *hdev;
435 int err = 0, do_inquiry = 0, max_rsp;
439 if (copy_from_user(&ir, ptr, sizeof(ir)))
442 hdev = hci_dev_get(ir.dev_id);
446 hci_dev_lock_bh(hdev);
447 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
448 inquiry_cache_empty(hdev) ||
449 ir.flags & IREQ_CACHE_FLUSH) {
450 inquiry_cache_flush(hdev);
453 hci_dev_unlock_bh(hdev);
455 timeo = ir.length * msecs_to_jiffies(2000);
458 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
463 /* for unlimited number of responses we will use buffer with 255 entries */
464 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
466 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
467 * copy it to the user space.
469 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
475 hci_dev_lock_bh(hdev);
476 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
477 hci_dev_unlock_bh(hdev);
479 BT_DBG("num_rsp %d", ir.num_rsp);
481 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
483 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
496 /* ---- HCI ioctl helpers ---- */
498 int hci_dev_open(__u16 dev)
500 struct hci_dev *hdev;
503 hdev = hci_dev_get(dev);
507 BT_DBG("%s %p", hdev->name, hdev);
511 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
516 if (test_bit(HCI_UP, &hdev->flags)) {
521 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
522 set_bit(HCI_RAW, &hdev->flags);
524 /* Treat all non BR/EDR controllers as raw devices for now */
525 if (hdev->dev_type != HCI_BREDR)
526 set_bit(HCI_RAW, &hdev->flags);
528 if (hdev->open(hdev)) {
533 if (!test_bit(HCI_RAW, &hdev->flags)) {
534 atomic_set(&hdev->cmd_cnt, 1);
535 set_bit(HCI_INIT, &hdev->flags);
536 hdev->init_last_cmd = 0;
538 ret = __hci_request(hdev, hci_init_req, 0,
539 msecs_to_jiffies(HCI_INIT_TIMEOUT));
541 if (lmp_host_le_capable(hdev))
542 ret = __hci_request(hdev, hci_le_init_req, 0,
543 msecs_to_jiffies(HCI_INIT_TIMEOUT));
545 clear_bit(HCI_INIT, &hdev->flags);
550 set_bit(HCI_UP, &hdev->flags);
551 hci_notify(hdev, HCI_DEV_UP);
552 if (!test_bit(HCI_SETUP, &hdev->flags))
553 mgmt_powered(hdev->id, 1);
555 /* Init failed, cleanup */
556 tasklet_kill(&hdev->rx_task);
557 tasklet_kill(&hdev->tx_task);
558 tasklet_kill(&hdev->cmd_task);
560 skb_queue_purge(&hdev->cmd_q);
561 skb_queue_purge(&hdev->rx_q);
566 if (hdev->sent_cmd) {
567 kfree_skb(hdev->sent_cmd);
568 hdev->sent_cmd = NULL;
576 hci_req_unlock(hdev);
581 static int hci_dev_do_close(struct hci_dev *hdev)
583 BT_DBG("%s %p", hdev->name, hdev);
585 hci_req_cancel(hdev, ENODEV);
588 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
589 del_timer_sync(&hdev->cmd_timer);
590 hci_req_unlock(hdev);
594 /* Kill RX and TX tasks */
595 tasklet_kill(&hdev->rx_task);
596 tasklet_kill(&hdev->tx_task);
598 hci_dev_lock_bh(hdev);
599 inquiry_cache_flush(hdev);
600 hci_conn_hash_flush(hdev);
601 hci_dev_unlock_bh(hdev);
603 hci_notify(hdev, HCI_DEV_DOWN);
609 skb_queue_purge(&hdev->cmd_q);
610 atomic_set(&hdev->cmd_cnt, 1);
611 if (!test_bit(HCI_RAW, &hdev->flags)) {
612 set_bit(HCI_INIT, &hdev->flags);
613 __hci_request(hdev, hci_reset_req, 0,
614 msecs_to_jiffies(HCI_INIT_TIMEOUT));
615 clear_bit(HCI_INIT, &hdev->flags);
619 tasklet_kill(&hdev->cmd_task);
622 skb_queue_purge(&hdev->rx_q);
623 skb_queue_purge(&hdev->cmd_q);
624 skb_queue_purge(&hdev->raw_q);
626 /* Drop last sent command */
627 if (hdev->sent_cmd) {
628 del_timer_sync(&hdev->cmd_timer);
629 kfree_skb(hdev->sent_cmd);
630 hdev->sent_cmd = NULL;
633 /* After this point our queues are empty
634 * and no tasks are scheduled. */
637 mgmt_powered(hdev->id, 0);
642 hci_req_unlock(hdev);
648 int hci_dev_close(__u16 dev)
650 struct hci_dev *hdev;
653 hdev = hci_dev_get(dev);
656 err = hci_dev_do_close(hdev);
661 int hci_dev_reset(__u16 dev)
663 struct hci_dev *hdev;
666 hdev = hci_dev_get(dev);
671 tasklet_disable(&hdev->tx_task);
673 if (!test_bit(HCI_UP, &hdev->flags))
677 skb_queue_purge(&hdev->rx_q);
678 skb_queue_purge(&hdev->cmd_q);
680 hci_dev_lock_bh(hdev);
681 inquiry_cache_flush(hdev);
682 hci_conn_hash_flush(hdev);
683 hci_dev_unlock_bh(hdev);
688 atomic_set(&hdev->cmd_cnt, 1);
689 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
691 if (!test_bit(HCI_RAW, &hdev->flags))
692 ret = __hci_request(hdev, hci_reset_req, 0,
693 msecs_to_jiffies(HCI_INIT_TIMEOUT));
696 tasklet_enable(&hdev->tx_task);
697 hci_req_unlock(hdev);
702 int hci_dev_reset_stat(__u16 dev)
704 struct hci_dev *hdev;
707 hdev = hci_dev_get(dev);
711 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
718 int hci_dev_cmd(unsigned int cmd, void __user *arg)
720 struct hci_dev *hdev;
721 struct hci_dev_req dr;
724 if (copy_from_user(&dr, arg, sizeof(dr)))
727 hdev = hci_dev_get(dr.dev_id);
733 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
734 msecs_to_jiffies(HCI_INIT_TIMEOUT));
738 if (!lmp_encrypt_capable(hdev)) {
743 if (!test_bit(HCI_AUTH, &hdev->flags)) {
744 /* Auth must be enabled first */
745 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
746 msecs_to_jiffies(HCI_INIT_TIMEOUT));
751 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
752 msecs_to_jiffies(HCI_INIT_TIMEOUT));
756 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
757 msecs_to_jiffies(HCI_INIT_TIMEOUT));
761 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
762 msecs_to_jiffies(HCI_INIT_TIMEOUT));
766 hdev->link_mode = ((__u16) dr.dev_opt) &
767 (HCI_LM_MASTER | HCI_LM_ACCEPT);
771 hdev->pkt_type = (__u16) dr.dev_opt;
775 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
776 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
780 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
781 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
793 int hci_get_dev_list(void __user *arg)
795 struct hci_dev *hdev;
796 struct hci_dev_list_req *dl;
797 struct hci_dev_req *dr;
798 int n = 0, size, err;
801 if (get_user(dev_num, (__u16 __user *) arg))
804 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
807 size = sizeof(*dl) + dev_num * sizeof(*dr);
809 dl = kzalloc(size, GFP_KERNEL);
815 read_lock_bh(&hci_dev_list_lock);
816 list_for_each_entry(hdev, &hci_dev_list, list) {
817 hci_del_off_timer(hdev);
819 if (!test_bit(HCI_MGMT, &hdev->flags))
820 set_bit(HCI_PAIRABLE, &hdev->flags);
822 (dr + n)->dev_id = hdev->id;
823 (dr + n)->dev_opt = hdev->flags;
828 read_unlock_bh(&hci_dev_list_lock);
831 size = sizeof(*dl) + n * sizeof(*dr);
833 err = copy_to_user(arg, dl, size);
836 return err ? -EFAULT : 0;
839 int hci_get_dev_info(void __user *arg)
841 struct hci_dev *hdev;
842 struct hci_dev_info di;
845 if (copy_from_user(&di, arg, sizeof(di)))
848 hdev = hci_dev_get(di.dev_id);
852 hci_del_off_timer(hdev);
854 if (!test_bit(HCI_MGMT, &hdev->flags))
855 set_bit(HCI_PAIRABLE, &hdev->flags);
857 strcpy(di.name, hdev->name);
858 di.bdaddr = hdev->bdaddr;
859 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
860 di.flags = hdev->flags;
861 di.pkt_type = hdev->pkt_type;
862 di.acl_mtu = hdev->acl_mtu;
863 di.acl_pkts = hdev->acl_pkts;
864 di.sco_mtu = hdev->sco_mtu;
865 di.sco_pkts = hdev->sco_pkts;
866 di.link_policy = hdev->link_policy;
867 di.link_mode = hdev->link_mode;
869 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
870 memcpy(&di.features, &hdev->features, sizeof(di.features));
872 if (copy_to_user(arg, &di, sizeof(di)))
880 /* ---- Interface to HCI drivers ---- */
882 static int hci_rfkill_set_block(void *data, bool blocked)
884 struct hci_dev *hdev = data;
886 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
891 hci_dev_do_close(hdev);
896 static const struct rfkill_ops hci_rfkill_ops = {
897 .set_block = hci_rfkill_set_block,
900 /* Alloc HCI device */
901 struct hci_dev *hci_alloc_dev(void)
903 struct hci_dev *hdev;
905 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
909 hci_init_sysfs(hdev);
910 skb_queue_head_init(&hdev->driver_init);
914 EXPORT_SYMBOL(hci_alloc_dev);
916 /* Free HCI device */
917 void hci_free_dev(struct hci_dev *hdev)
919 skb_queue_purge(&hdev->driver_init);
921 /* will free via device release */
922 put_device(&hdev->dev);
924 EXPORT_SYMBOL(hci_free_dev);
926 static void hci_power_on(struct work_struct *work)
928 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
930 BT_DBG("%s", hdev->name);
932 if (hci_dev_open(hdev->id) < 0)
935 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
936 mod_timer(&hdev->off_timer,
937 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
939 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
940 mgmt_index_added(hdev->id);
943 static void hci_power_off(struct work_struct *work)
945 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
947 BT_DBG("%s", hdev->name);
949 hci_dev_close(hdev->id);
952 static void hci_auto_off(unsigned long data)
954 struct hci_dev *hdev = (struct hci_dev *) data;
956 BT_DBG("%s", hdev->name);
958 clear_bit(HCI_AUTO_OFF, &hdev->flags);
960 queue_work(hdev->workqueue, &hdev->power_off);
963 void hci_del_off_timer(struct hci_dev *hdev)
965 BT_DBG("%s", hdev->name);
967 clear_bit(HCI_AUTO_OFF, &hdev->flags);
968 del_timer(&hdev->off_timer);
971 int hci_uuids_clear(struct hci_dev *hdev)
973 struct list_head *p, *n;
975 list_for_each_safe(p, n, &hdev->uuids) {
976 struct bt_uuid *uuid;
978 uuid = list_entry(p, struct bt_uuid, list);
987 int hci_link_keys_clear(struct hci_dev *hdev)
989 struct list_head *p, *n;
991 list_for_each_safe(p, n, &hdev->link_keys) {
992 struct link_key *key;
994 key = list_entry(p, struct link_key, list);
1003 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1007 list_for_each_entry(k, &hdev->link_keys, list)
1008 if (bacmp(bdaddr, &k->bdaddr) == 0)
1014 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1015 u8 key_type, u8 old_key_type)
1018 if (key_type < 0x03)
1021 /* Debug keys are insecure so don't store them persistently */
1022 if (key_type == HCI_LK_DEBUG_COMBINATION)
1025 /* Changed combination key and there's no previous one */
1026 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1029 /* Security mode 3 case */
1033 /* Neither local nor remote side had no-bonding as requirement */
1034 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1037 /* Local side had dedicated bonding as requirement */
1038 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1041 /* Remote side had dedicated bonding as requirement */
1042 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1045 /* If none of the above criteria match, then don't store the key
1050 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1054 list_for_each_entry(k, &hdev->link_keys, list) {
1055 struct key_master_id *id;
1057 if (k->type != HCI_LK_SMP_LTK)
1060 if (k->dlen != sizeof(*id))
1063 id = (void *) &k->data;
1064 if (id->ediv == ediv &&
1065 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1071 EXPORT_SYMBOL(hci_find_ltk);
1073 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1074 bdaddr_t *bdaddr, u8 type)
1078 list_for_each_entry(k, &hdev->link_keys, list)
1079 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1084 EXPORT_SYMBOL(hci_find_link_key_type);
1086 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1087 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1089 struct link_key *key, *old_key;
1090 u8 old_key_type, persistent;
1092 old_key = hci_find_link_key(hdev, bdaddr);
1094 old_key_type = old_key->type;
1097 old_key_type = conn ? conn->key_type : 0xff;
1098 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1101 list_add(&key->list, &hdev->link_keys);
1104 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1106 /* Some buggy controller combinations generate a changed
1107 * combination key for legacy pairing even when there's no
1109 if (type == HCI_LK_CHANGED_COMBINATION &&
1110 (!conn || conn->remote_auth == 0xff) &&
1111 old_key_type == 0xff) {
1112 type = HCI_LK_COMBINATION;
1114 conn->key_type = type;
1117 bacpy(&key->bdaddr, bdaddr);
1118 memcpy(key->val, val, 16);
1119 key->pin_len = pin_len;
1121 if (type == HCI_LK_CHANGED_COMBINATION)
1122 key->type = old_key_type;
1129 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1131 mgmt_new_key(hdev->id, key, persistent);
1134 list_del(&key->list);
1141 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1142 u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1144 struct link_key *key, *old_key;
1145 struct key_master_id *id;
1148 BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1150 old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1153 old_key_type = old_key->type;
1155 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1158 list_add(&key->list, &hdev->link_keys);
1159 old_key_type = 0xff;
1162 key->dlen = sizeof(*id);
1164 bacpy(&key->bdaddr, bdaddr);
1165 memcpy(key->val, ltk, sizeof(key->val));
1166 key->type = HCI_LK_SMP_LTK;
1167 key->pin_len = key_size;
1169 id = (void *) &key->data;
1171 memcpy(id->rand, rand, sizeof(id->rand));
1174 mgmt_new_key(hdev->id, key, old_key_type);
1179 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1181 struct link_key *key;
1183 key = hci_find_link_key(hdev, bdaddr);
1187 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1189 list_del(&key->list);
1195 /* HCI command timer function */
1196 static void hci_cmd_timer(unsigned long arg)
1198 struct hci_dev *hdev = (void *) arg;
1200 BT_ERR("%s command tx timeout", hdev->name);
1201 atomic_set(&hdev->cmd_cnt, 1);
1202 tasklet_schedule(&hdev->cmd_task);
1205 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1208 struct oob_data *data;
1210 list_for_each_entry(data, &hdev->remote_oob_data, list)
1211 if (bacmp(bdaddr, &data->bdaddr) == 0)
1217 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1219 struct oob_data *data;
1221 data = hci_find_remote_oob_data(hdev, bdaddr);
1225 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1227 list_del(&data->list);
1233 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1235 struct oob_data *data, *n;
1237 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1238 list_del(&data->list);
1245 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1248 struct oob_data *data;
1250 data = hci_find_remote_oob_data(hdev, bdaddr);
1253 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1257 bacpy(&data->bdaddr, bdaddr);
1258 list_add(&data->list, &hdev->remote_oob_data);
1261 memcpy(data->hash, hash, sizeof(data->hash));
1262 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1264 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1269 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1272 struct bdaddr_list *b;
1274 list_for_each_entry(b, &hdev->blacklist, list)
1275 if (bacmp(bdaddr, &b->bdaddr) == 0)
1281 int hci_blacklist_clear(struct hci_dev *hdev)
1283 struct list_head *p, *n;
1285 list_for_each_safe(p, n, &hdev->blacklist) {
1286 struct bdaddr_list *b;
1288 b = list_entry(p, struct bdaddr_list, list);
1297 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1299 struct bdaddr_list *entry;
1301 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1304 if (hci_blacklist_lookup(hdev, bdaddr))
1307 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1311 bacpy(&entry->bdaddr, bdaddr);
1313 list_add(&entry->list, &hdev->blacklist);
1315 return mgmt_device_blocked(hdev->id, bdaddr);
1318 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1320 struct bdaddr_list *entry;
1322 if (bacmp(bdaddr, BDADDR_ANY) == 0) {
1323 return hci_blacklist_clear(hdev);
1326 entry = hci_blacklist_lookup(hdev, bdaddr);
1331 list_del(&entry->list);
1334 return mgmt_device_unblocked(hdev->id, bdaddr);
1337 static void hci_clear_adv_cache(unsigned long arg)
1339 struct hci_dev *hdev = (void *) arg;
1343 hci_adv_entries_clear(hdev);
1345 hci_dev_unlock(hdev);
1348 int hci_adv_entries_clear(struct hci_dev *hdev)
1350 struct adv_entry *entry, *tmp;
1352 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1353 list_del(&entry->list);
1357 BT_DBG("%s adv cache cleared", hdev->name);
1362 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1364 struct adv_entry *entry;
1366 list_for_each_entry(entry, &hdev->adv_entries, list)
1367 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1373 static inline int is_connectable_adv(u8 evt_type)
1375 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1381 int hci_add_adv_entry(struct hci_dev *hdev,
1382 struct hci_ev_le_advertising_info *ev)
1384 struct adv_entry *entry;
1386 if (!is_connectable_adv(ev->evt_type))
1389 /* Only new entries should be added to adv_entries. So, if
1390 * bdaddr was found, don't add it. */
1391 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1394 entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1398 bacpy(&entry->bdaddr, &ev->bdaddr);
1399 entry->bdaddr_type = ev->bdaddr_type;
1401 list_add(&entry->list, &hdev->adv_entries);
1403 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1404 batostr(&entry->bdaddr), entry->bdaddr_type);
1409 /* Register HCI device */
1410 int hci_register_dev(struct hci_dev *hdev)
1412 struct list_head *head = &hci_dev_list, *p;
1413 int i, id = 0, error;
1415 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1416 hdev->bus, hdev->owner);
1418 if (!hdev->open || !hdev->close || !hdev->destruct)
1421 write_lock_bh(&hci_dev_list_lock);
1423 /* Find first available device id */
1424 list_for_each(p, &hci_dev_list) {
1425 if (list_entry(p, struct hci_dev, list)->id != id)
1430 sprintf(hdev->name, "hci%d", id);
1432 list_add(&hdev->list, head);
1434 atomic_set(&hdev->refcnt, 1);
1435 spin_lock_init(&hdev->lock);
1438 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1439 hdev->esco_type = (ESCO_HV1);
1440 hdev->link_mode = (HCI_LM_ACCEPT);
1441 hdev->io_capability = 0x03; /* No Input No Output */
1443 hdev->idle_timeout = 0;
1444 hdev->sniff_max_interval = 800;
1445 hdev->sniff_min_interval = 80;
1447 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1448 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1449 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1451 skb_queue_head_init(&hdev->rx_q);
1452 skb_queue_head_init(&hdev->cmd_q);
1453 skb_queue_head_init(&hdev->raw_q);
1455 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1457 for (i = 0; i < NUM_REASSEMBLY; i++)
1458 hdev->reassembly[i] = NULL;
1460 init_waitqueue_head(&hdev->req_wait_q);
1461 mutex_init(&hdev->req_lock);
1463 inquiry_cache_init(hdev);
1465 hci_conn_hash_init(hdev);
1467 INIT_LIST_HEAD(&hdev->blacklist);
1469 INIT_LIST_HEAD(&hdev->uuids);
1471 INIT_LIST_HEAD(&hdev->link_keys);
1473 INIT_LIST_HEAD(&hdev->remote_oob_data);
1475 INIT_LIST_HEAD(&hdev->adv_entries);
1476 setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1477 (unsigned long) hdev);
1479 INIT_WORK(&hdev->power_on, hci_power_on);
1480 INIT_WORK(&hdev->power_off, hci_power_off);
1481 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1483 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1485 atomic_set(&hdev->promisc, 0);
1487 write_unlock_bh(&hci_dev_list_lock);
1489 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1490 if (!hdev->workqueue) {
1495 error = hci_add_sysfs(hdev);
1499 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1500 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1502 if (rfkill_register(hdev->rfkill) < 0) {
1503 rfkill_destroy(hdev->rfkill);
1504 hdev->rfkill = NULL;
1508 set_bit(HCI_AUTO_OFF, &hdev->flags);
1509 set_bit(HCI_SETUP, &hdev->flags);
1510 queue_work(hdev->workqueue, &hdev->power_on);
1512 hci_notify(hdev, HCI_DEV_REG);
1517 destroy_workqueue(hdev->workqueue);
1519 write_lock_bh(&hci_dev_list_lock);
1520 list_del(&hdev->list);
1521 write_unlock_bh(&hci_dev_list_lock);
1525 EXPORT_SYMBOL(hci_register_dev);
1527 /* Unregister HCI device */
1528 void hci_unregister_dev(struct hci_dev *hdev)
1532 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1534 write_lock_bh(&hci_dev_list_lock);
1535 list_del(&hdev->list);
1536 write_unlock_bh(&hci_dev_list_lock);
1538 hci_dev_do_close(hdev);
1540 for (i = 0; i < NUM_REASSEMBLY; i++)
1541 kfree_skb(hdev->reassembly[i]);
1543 if (!test_bit(HCI_INIT, &hdev->flags) &&
1544 !test_bit(HCI_SETUP, &hdev->flags))
1545 mgmt_index_removed(hdev->id);
1547 hci_notify(hdev, HCI_DEV_UNREG);
1550 rfkill_unregister(hdev->rfkill);
1551 rfkill_destroy(hdev->rfkill);
1554 hci_del_sysfs(hdev);
1556 hci_del_off_timer(hdev);
1557 del_timer(&hdev->adv_timer);
1559 destroy_workqueue(hdev->workqueue);
1561 hci_dev_lock_bh(hdev);
1562 hci_blacklist_clear(hdev);
1563 hci_uuids_clear(hdev);
1564 hci_link_keys_clear(hdev);
1565 hci_remote_oob_data_clear(hdev);
1566 hci_adv_entries_clear(hdev);
1567 hci_dev_unlock_bh(hdev);
1569 __hci_dev_put(hdev);
1571 EXPORT_SYMBOL(hci_unregister_dev);
1573 /* Suspend HCI device */
1574 int hci_suspend_dev(struct hci_dev *hdev)
1576 hci_notify(hdev, HCI_DEV_SUSPEND);
1579 EXPORT_SYMBOL(hci_suspend_dev);
1581 /* Resume HCI device */
1582 int hci_resume_dev(struct hci_dev *hdev)
1584 hci_notify(hdev, HCI_DEV_RESUME);
1587 EXPORT_SYMBOL(hci_resume_dev);
1589 /* Receive frame from HCI drivers */
1590 int hci_recv_frame(struct sk_buff *skb)
1592 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1593 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1594 && !test_bit(HCI_INIT, &hdev->flags))) {
1600 bt_cb(skb)->incoming = 1;
1603 __net_timestamp(skb);
1605 /* Queue frame for rx task */
1606 skb_queue_tail(&hdev->rx_q, skb);
1607 tasklet_schedule(&hdev->rx_task);
1611 EXPORT_SYMBOL(hci_recv_frame);
1613 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1614 int count, __u8 index)
1619 struct sk_buff *skb;
1620 struct bt_skb_cb *scb;
1622 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1623 index >= NUM_REASSEMBLY)
1626 skb = hdev->reassembly[index];
1630 case HCI_ACLDATA_PKT:
1631 len = HCI_MAX_FRAME_SIZE;
1632 hlen = HCI_ACL_HDR_SIZE;
1635 len = HCI_MAX_EVENT_SIZE;
1636 hlen = HCI_EVENT_HDR_SIZE;
1638 case HCI_SCODATA_PKT:
1639 len = HCI_MAX_SCO_SIZE;
1640 hlen = HCI_SCO_HDR_SIZE;
1644 skb = bt_skb_alloc(len, GFP_ATOMIC);
1648 scb = (void *) skb->cb;
1650 scb->pkt_type = type;
1652 skb->dev = (void *) hdev;
1653 hdev->reassembly[index] = skb;
1657 scb = (void *) skb->cb;
1658 len = min(scb->expect, (__u16)count);
1660 memcpy(skb_put(skb, len), data, len);
1669 if (skb->len == HCI_EVENT_HDR_SIZE) {
1670 struct hci_event_hdr *h = hci_event_hdr(skb);
1671 scb->expect = h->plen;
1673 if (skb_tailroom(skb) < scb->expect) {
1675 hdev->reassembly[index] = NULL;
1681 case HCI_ACLDATA_PKT:
1682 if (skb->len == HCI_ACL_HDR_SIZE) {
1683 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1684 scb->expect = __le16_to_cpu(h->dlen);
1686 if (skb_tailroom(skb) < scb->expect) {
1688 hdev->reassembly[index] = NULL;
1694 case HCI_SCODATA_PKT:
1695 if (skb->len == HCI_SCO_HDR_SIZE) {
1696 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1697 scb->expect = h->dlen;
1699 if (skb_tailroom(skb) < scb->expect) {
1701 hdev->reassembly[index] = NULL;
1708 if (scb->expect == 0) {
1709 /* Complete frame */
1711 bt_cb(skb)->pkt_type = type;
1712 hci_recv_frame(skb);
1714 hdev->reassembly[index] = NULL;
1722 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1726 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1730 rem = hci_reassembly(hdev, type, data, count, type - 1);
1734 data += (count - rem);
1740 EXPORT_SYMBOL(hci_recv_fragment);
1742 #define STREAM_REASSEMBLY 0
1744 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1750 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1753 struct { char type; } *pkt;
1755 /* Start of the frame */
1762 type = bt_cb(skb)->pkt_type;
1764 rem = hci_reassembly(hdev, type, data, count,
1769 data += (count - rem);
1775 EXPORT_SYMBOL(hci_recv_stream_fragment);
1777 /* ---- Interface to upper protocols ---- */
1779 /* Register/Unregister protocols.
1780 * hci_task_lock is used to ensure that no tasks are running. */
1781 int hci_register_proto(struct hci_proto *hp)
1785 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1787 if (hp->id >= HCI_MAX_PROTO)
1790 write_lock_bh(&hci_task_lock);
1792 if (!hci_proto[hp->id])
1793 hci_proto[hp->id] = hp;
1797 write_unlock_bh(&hci_task_lock);
1801 EXPORT_SYMBOL(hci_register_proto);
1803 int hci_unregister_proto(struct hci_proto *hp)
1807 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1809 if (hp->id >= HCI_MAX_PROTO)
1812 write_lock_bh(&hci_task_lock);
1814 if (hci_proto[hp->id])
1815 hci_proto[hp->id] = NULL;
1819 write_unlock_bh(&hci_task_lock);
1823 EXPORT_SYMBOL(hci_unregister_proto);
1825 int hci_register_cb(struct hci_cb *cb)
1827 BT_DBG("%p name %s", cb, cb->name);
1829 write_lock_bh(&hci_cb_list_lock);
1830 list_add(&cb->list, &hci_cb_list);
1831 write_unlock_bh(&hci_cb_list_lock);
1835 EXPORT_SYMBOL(hci_register_cb);
1837 int hci_unregister_cb(struct hci_cb *cb)
1839 BT_DBG("%p name %s", cb, cb->name);
1841 write_lock_bh(&hci_cb_list_lock);
1842 list_del(&cb->list);
1843 write_unlock_bh(&hci_cb_list_lock);
1847 EXPORT_SYMBOL(hci_unregister_cb);
1849 static int hci_send_frame(struct sk_buff *skb)
1851 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1858 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1860 if (atomic_read(&hdev->promisc)) {
1862 __net_timestamp(skb);
1864 hci_send_to_sock(hdev, skb, NULL);
1867 /* Get rid of skb owner, prior to sending to the driver. */
1870 return hdev->send(skb);
1873 /* Send HCI command */
1874 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1876 int len = HCI_COMMAND_HDR_SIZE + plen;
1877 struct hci_command_hdr *hdr;
1878 struct sk_buff *skb;
1880 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1882 skb = bt_skb_alloc(len, GFP_ATOMIC);
1884 BT_ERR("%s no memory for command", hdev->name);
1888 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1889 hdr->opcode = cpu_to_le16(opcode);
1893 memcpy(skb_put(skb, plen), param, plen);
1895 BT_DBG("skb len %d", skb->len);
1897 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1898 skb->dev = (void *) hdev;
1900 if (test_bit(HCI_INIT, &hdev->flags))
1901 hdev->init_last_cmd = opcode;
1903 skb_queue_tail(&hdev->cmd_q, skb);
1904 tasklet_schedule(&hdev->cmd_task);
1909 /* Get data from the previously sent command */
1910 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1912 struct hci_command_hdr *hdr;
1914 if (!hdev->sent_cmd)
1917 hdr = (void *) hdev->sent_cmd->data;
1919 if (hdr->opcode != cpu_to_le16(opcode))
1922 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1924 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1928 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1930 struct hci_acl_hdr *hdr;
1933 skb_push(skb, HCI_ACL_HDR_SIZE);
1934 skb_reset_transport_header(skb);
1935 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1936 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1937 hdr->dlen = cpu_to_le16(len);
1940 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1941 struct sk_buff *skb, __u16 flags)
1943 struct hci_dev *hdev = conn->hdev;
1944 struct sk_buff *list;
1946 list = skb_shinfo(skb)->frag_list;
1948 /* Non fragmented */
1949 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1951 skb_queue_tail(queue, skb);
1954 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1956 skb_shinfo(skb)->frag_list = NULL;
1958 /* Queue all fragments atomically */
1959 spin_lock_bh(&queue->lock);
1961 __skb_queue_tail(queue, skb);
1963 flags &= ~ACL_START;
1966 skb = list; list = list->next;
1968 skb->dev = (void *) hdev;
1969 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1970 hci_add_acl_hdr(skb, conn->handle, flags);
1972 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1974 __skb_queue_tail(queue, skb);
1977 spin_unlock_bh(&queue->lock);
1981 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
1983 struct hci_conn *conn = chan->conn;
1984 struct hci_dev *hdev = conn->hdev;
1986 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
1988 skb->dev = (void *) hdev;
1989 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1990 hci_add_acl_hdr(skb, conn->handle, flags);
1992 hci_queue_acl(conn, &chan->data_q, skb, flags);
1994 tasklet_schedule(&hdev->tx_task);
1996 EXPORT_SYMBOL(hci_send_acl);
1999 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2001 struct hci_dev *hdev = conn->hdev;
2002 struct hci_sco_hdr hdr;
2004 BT_DBG("%s len %d", hdev->name, skb->len);
2006 hdr.handle = cpu_to_le16(conn->handle);
2007 hdr.dlen = skb->len;
2009 skb_push(skb, HCI_SCO_HDR_SIZE);
2010 skb_reset_transport_header(skb);
2011 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2013 skb->dev = (void *) hdev;
2014 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2016 skb_queue_tail(&conn->data_q, skb);
2017 tasklet_schedule(&hdev->tx_task);
2019 EXPORT_SYMBOL(hci_send_sco);
2021 /* ---- HCI TX task (outgoing data) ---- */
2023 /* HCI Connection scheduler */
2024 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2026 struct hci_conn_hash *h = &hdev->conn_hash;
2027 struct hci_conn *conn = NULL, *c;
2028 int num = 0, min = ~0;
2030 /* We don't have to lock device here. Connections are always
2031 * added and removed with TX task disabled. */
2032 list_for_each_entry(c, &h->list, list) {
2033 if (c->type != type || skb_queue_empty(&c->data_q))
2036 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2041 if (c->sent < min) {
2046 if (hci_conn_num(hdev, type) == num)
2053 switch (conn->type) {
2055 cnt = hdev->acl_cnt;
2059 cnt = hdev->sco_cnt;
2062 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2066 BT_ERR("Unknown link type");
2074 BT_DBG("conn %p quote %d", conn, *quote);
2078 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2080 struct hci_conn_hash *h = &hdev->conn_hash;
2083 BT_ERR("%s link tx timeout", hdev->name);
2085 /* Kill stalled connections */
2086 list_for_each_entry(c, &h->list, list) {
2087 if (c->type == type && c->sent) {
2088 BT_ERR("%s killing stalled connection %s",
2089 hdev->name, batostr(&c->dst));
2090 hci_acl_disconn(c, 0x13);
2095 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2098 struct hci_conn_hash *h = &hdev->conn_hash;
2099 struct hci_chan *chan = NULL;
2100 int num = 0, min = ~0, cur_prio = 0;
2101 struct hci_conn *conn;
2102 int cnt, q, conn_num = 0;
2104 BT_DBG("%s", hdev->name);
2106 list_for_each_entry(conn, &h->list, list) {
2107 struct hci_chan_hash *ch;
2108 struct hci_chan *tmp;
2110 if (conn->type != type)
2113 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2118 ch = &conn->chan_hash;
2120 list_for_each_entry(tmp, &ch->list, list) {
2121 struct sk_buff *skb;
2123 if (skb_queue_empty(&tmp->data_q))
2126 skb = skb_peek(&tmp->data_q);
2127 if (skb->priority < cur_prio)
2130 if (skb->priority > cur_prio) {
2133 cur_prio = skb->priority;
2138 if (conn->sent < min) {
2144 if (hci_conn_num(hdev, type) == conn_num)
2151 switch (chan->conn->type) {
2153 cnt = hdev->acl_cnt;
2157 cnt = hdev->sco_cnt;
2160 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2164 BT_ERR("Unknown link type");
2169 BT_DBG("chan %p quote %d", chan, *quote);
2173 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2175 struct hci_conn_hash *h = &hdev->conn_hash;
2176 struct hci_conn *conn;
2179 BT_DBG("%s", hdev->name);
2181 list_for_each_entry(conn, &h->list, list) {
2182 struct hci_chan_hash *ch;
2183 struct hci_chan *chan;
2185 if (conn->type != type)
2188 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2193 ch = &conn->chan_hash;
2194 list_for_each_entry(chan, &ch->list, list) {
2195 struct sk_buff *skb;
2202 if (skb_queue_empty(&chan->data_q))
2205 skb = skb_peek(&chan->data_q);
2206 if (skb->priority >= HCI_PRIO_MAX - 1)
2209 skb->priority = HCI_PRIO_MAX - 1;
2211 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2215 if (hci_conn_num(hdev, type) == num)
2220 static inline void hci_sched_acl(struct hci_dev *hdev)
2222 struct hci_chan *chan;
2223 struct sk_buff *skb;
2227 BT_DBG("%s", hdev->name);
2229 if (!hci_conn_num(hdev, ACL_LINK))
2232 if (!test_bit(HCI_RAW, &hdev->flags)) {
2233 /* ACL tx timeout must be longer than maximum
2234 * link supervision timeout (40.9 seconds) */
2235 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2236 hci_link_tx_to(hdev, ACL_LINK);
2239 cnt = hdev->acl_cnt;
2241 while (hdev->acl_cnt &&
2242 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2243 u32 priority = (skb_peek(&chan->data_q))->priority;
2244 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2245 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2246 skb->len, skb->priority);
2248 /* Stop if priority has changed */
2249 if (skb->priority < priority)
2252 skb = skb_dequeue(&chan->data_q);
2254 hci_conn_enter_active_mode(chan->conn,
2255 bt_cb(skb)->force_active);
2257 hci_send_frame(skb);
2258 hdev->acl_last_tx = jiffies;
2266 if (cnt != hdev->acl_cnt)
2267 hci_prio_recalculate(hdev, ACL_LINK);
2271 static inline void hci_sched_sco(struct hci_dev *hdev)
2273 struct hci_conn *conn;
2274 struct sk_buff *skb;
2277 BT_DBG("%s", hdev->name);
2279 if (!hci_conn_num(hdev, SCO_LINK))
2282 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
2283 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2284 BT_DBG("skb %p len %d", skb, skb->len);
2285 hci_send_frame(skb);
2288 if (conn->sent == ~0)
2294 static inline void hci_sched_esco(struct hci_dev *hdev)
2296 struct hci_conn *conn;
2297 struct sk_buff *skb;
2300 BT_DBG("%s", hdev->name);
2302 if (!hci_conn_num(hdev, ESCO_LINK))
2305 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
2306 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2307 BT_DBG("skb %p len %d", skb, skb->len);
2308 hci_send_frame(skb);
2311 if (conn->sent == ~0)
2317 static inline void hci_sched_le(struct hci_dev *hdev)
2319 struct hci_chan *chan;
2320 struct sk_buff *skb;
2321 int quote, cnt, tmp;
2323 BT_DBG("%s", hdev->name);
2325 if (!hci_conn_num(hdev, LE_LINK))
2328 if (!test_bit(HCI_RAW, &hdev->flags)) {
2329 /* LE tx timeout must be longer than maximum
2330 * link supervision timeout (40.9 seconds) */
2331 if (!hdev->le_cnt && hdev->le_pkts &&
2332 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2333 hci_link_tx_to(hdev, LE_LINK);
2336 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2338 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
2339 u32 priority = (skb_peek(&chan->data_q))->priority;
2340 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2341 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2342 skb->len, skb->priority);
2344 /* Stop if priority has changed */
2345 if (skb->priority < priority)
2348 skb = skb_dequeue(&chan->data_q);
2350 hci_send_frame(skb);
2351 hdev->le_last_tx = jiffies;
2362 hdev->acl_cnt = cnt;
2365 hci_prio_recalculate(hdev, LE_LINK);
2368 static void hci_tx_task(unsigned long arg)
2370 struct hci_dev *hdev = (struct hci_dev *) arg;
2371 struct sk_buff *skb;
2373 read_lock(&hci_task_lock);
2375 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2376 hdev->sco_cnt, hdev->le_cnt);
2378 /* Schedule queues and send stuff to HCI driver */
2380 hci_sched_acl(hdev);
2382 hci_sched_sco(hdev);
2384 hci_sched_esco(hdev);
2388 /* Send next queued raw (unknown type) packet */
2389 while ((skb = skb_dequeue(&hdev->raw_q)))
2390 hci_send_frame(skb);
2392 read_unlock(&hci_task_lock);
2395 /* ----- HCI RX task (incoming data processing) ----- */
2397 /* ACL data packet */
2398 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2400 struct hci_acl_hdr *hdr = (void *) skb->data;
2401 struct hci_conn *conn;
2402 __u16 handle, flags;
2404 skb_pull(skb, HCI_ACL_HDR_SIZE);
2406 handle = __le16_to_cpu(hdr->handle);
2407 flags = hci_flags(handle);
2408 handle = hci_handle(handle);
2410 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2412 hdev->stat.acl_rx++;
2415 conn = hci_conn_hash_lookup_handle(hdev, handle);
2416 hci_dev_unlock(hdev);
2419 register struct hci_proto *hp;
2421 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2423 /* Send to upper protocol */
2424 hp = hci_proto[HCI_PROTO_L2CAP];
2425 if (hp && hp->recv_acldata) {
2426 hp->recv_acldata(conn, skb, flags);
2430 BT_ERR("%s ACL packet for unknown connection handle %d",
2431 hdev->name, handle);
2437 /* SCO data packet */
2438 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2440 struct hci_sco_hdr *hdr = (void *) skb->data;
2441 struct hci_conn *conn;
2444 skb_pull(skb, HCI_SCO_HDR_SIZE);
2446 handle = __le16_to_cpu(hdr->handle);
2448 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2450 hdev->stat.sco_rx++;
2453 conn = hci_conn_hash_lookup_handle(hdev, handle);
2454 hci_dev_unlock(hdev);
2457 register struct hci_proto *hp;
2459 /* Send to upper protocol */
2460 hp = hci_proto[HCI_PROTO_SCO];
2461 if (hp && hp->recv_scodata) {
2462 hp->recv_scodata(conn, skb);
2466 BT_ERR("%s SCO packet for unknown connection handle %d",
2467 hdev->name, handle);
2473 static void hci_rx_task(unsigned long arg)
2475 struct hci_dev *hdev = (struct hci_dev *) arg;
2476 struct sk_buff *skb;
2478 BT_DBG("%s", hdev->name);
2480 read_lock(&hci_task_lock);
2482 while ((skb = skb_dequeue(&hdev->rx_q))) {
2483 if (atomic_read(&hdev->promisc)) {
2484 /* Send copy to the sockets */
2485 hci_send_to_sock(hdev, skb, NULL);
2488 if (test_bit(HCI_RAW, &hdev->flags)) {
2493 if (test_bit(HCI_INIT, &hdev->flags)) {
2494 /* Don't process data packets in this states. */
2495 switch (bt_cb(skb)->pkt_type) {
2496 case HCI_ACLDATA_PKT:
2497 case HCI_SCODATA_PKT:
2504 switch (bt_cb(skb)->pkt_type) {
2506 hci_event_packet(hdev, skb);
2509 case HCI_ACLDATA_PKT:
2510 BT_DBG("%s ACL data packet", hdev->name);
2511 hci_acldata_packet(hdev, skb);
2514 case HCI_SCODATA_PKT:
2515 BT_DBG("%s SCO data packet", hdev->name);
2516 hci_scodata_packet(hdev, skb);
2525 read_unlock(&hci_task_lock);
2528 static void hci_cmd_task(unsigned long arg)
2530 struct hci_dev *hdev = (struct hci_dev *) arg;
2531 struct sk_buff *skb;
2533 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2535 /* Send queued commands */
2536 if (atomic_read(&hdev->cmd_cnt)) {
2537 skb = skb_dequeue(&hdev->cmd_q);
2541 kfree_skb(hdev->sent_cmd);
2543 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2544 if (hdev->sent_cmd) {
2545 atomic_dec(&hdev->cmd_cnt);
2546 hci_send_frame(skb);
2547 if (test_bit(HCI_RESET, &hdev->flags))
2548 del_timer(&hdev->cmd_timer);
2550 mod_timer(&hdev->cmd_timer,
2551 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2553 skb_queue_head(&hdev->cmd_q, skb);
2554 tasklet_schedule(&hdev->cmd_task);