2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
47 #include <asm/system.h>
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
54 #define AUTO_OFF_TIMEOUT 2000
56 static void hci_cmd_task(unsigned long arg);
57 static void hci_rx_task(unsigned long arg);
58 static void hci_tx_task(unsigned long arg);
60 static DEFINE_RWLOCK(hci_task_lock);
63 LIST_HEAD(hci_dev_list);
64 DEFINE_RWLOCK(hci_dev_list_lock);
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list);
68 DEFINE_RWLOCK(hci_cb_list_lock);
71 #define HCI_MAX_PROTO 2
72 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77 /* ---- HCI notifications ---- */
79 int hci_register_notifier(struct notifier_block *nb)
81 return atomic_notifier_chain_register(&hci_notifier, nb);
84 int hci_unregister_notifier(struct notifier_block *nb)
86 return atomic_notifier_chain_unregister(&hci_notifier, nb);
89 static void hci_notify(struct hci_dev *hdev, int event)
91 atomic_notifier_call_chain(&hci_notifier, event, hdev);
94 /* ---- HCI requests ---- */
96 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
98 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100 /* If this is the init phase check if the completed command matches
101 * the last init command, and if not just return.
103 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
106 if (hdev->req_status == HCI_REQ_PEND) {
107 hdev->req_result = result;
108 hdev->req_status = HCI_REQ_DONE;
109 wake_up_interruptible(&hdev->req_wait_q);
113 static void hci_req_cancel(struct hci_dev *hdev, int err)
115 BT_DBG("%s err 0x%2.2x", hdev->name, err);
117 if (hdev->req_status == HCI_REQ_PEND) {
118 hdev->req_result = err;
119 hdev->req_status = HCI_REQ_CANCELED;
120 wake_up_interruptible(&hdev->req_wait_q);
124 /* Execute request and wait for completion. */
125 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
126 unsigned long opt, __u32 timeout)
128 DECLARE_WAITQUEUE(wait, current);
131 BT_DBG("%s start", hdev->name);
133 hdev->req_status = HCI_REQ_PEND;
135 add_wait_queue(&hdev->req_wait_q, &wait);
136 set_current_state(TASK_INTERRUPTIBLE);
139 schedule_timeout(timeout);
141 remove_wait_queue(&hdev->req_wait_q, &wait);
143 if (signal_pending(current))
146 switch (hdev->req_status) {
148 err = -bt_err(hdev->req_result);
151 case HCI_REQ_CANCELED:
152 err = -hdev->req_result;
160 hdev->req_status = hdev->req_result = 0;
162 BT_DBG("%s end: err %d", hdev->name, err);
167 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168 unsigned long opt, __u32 timeout)
172 if (!test_bit(HCI_UP, &hdev->flags))
175 /* Serialize all requests */
177 ret = __hci_request(hdev, req, opt, timeout);
178 hci_req_unlock(hdev);
183 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185 BT_DBG("%s %ld", hdev->name, opt);
188 set_bit(HCI_RESET, &hdev->flags);
189 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
192 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194 struct hci_cp_delete_stored_link_key cp;
199 BT_DBG("%s %ld", hdev->name, opt);
201 /* Driver initialization */
203 /* Special commands */
204 while ((skb = skb_dequeue(&hdev->driver_init))) {
205 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
206 skb->dev = (void *) hdev;
208 skb_queue_tail(&hdev->cmd_q, skb);
209 tasklet_schedule(&hdev->cmd_task);
211 skb_queue_purge(&hdev->driver_init);
213 /* Mandatory initialization */
216 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
217 set_bit(HCI_RESET, &hdev->flags);
218 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
221 /* Read Local Supported Features */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
224 /* Read Local Version */
225 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
227 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
228 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
231 /* Host buffer size */
233 struct hci_cp_host_buffer_size cp;
234 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
235 cp.sco_mtu = HCI_MAX_SCO_SIZE;
236 cp.acl_max_pkt = cpu_to_le16(0xffff);
237 cp.sco_max_pkt = cpu_to_le16(0xffff);
238 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
242 /* Read BD Address */
243 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245 /* Read Class of Device */
246 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248 /* Read Local Name */
249 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
251 /* Read Voice Setting */
252 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
254 /* Optional initialization */
256 /* Clear Event Filters */
257 flt_type = HCI_FLT_CLEAR_ALL;
258 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
260 /* Connection accept timeout ~20 secs */
261 param = cpu_to_le16(0x7d00);
262 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
264 bacpy(&cp.bdaddr, BDADDR_ANY);
266 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
269 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271 BT_DBG("%s", hdev->name);
273 /* Read LE buffer size */
274 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
277 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
281 BT_DBG("%s %x", hdev->name, scan);
283 /* Inquiry and Page scans */
284 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
287 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
291 BT_DBG("%s %x", hdev->name, auth);
294 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
297 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
301 BT_DBG("%s %x", hdev->name, encrypt);
304 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
307 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309 __le16 policy = cpu_to_le16(opt);
311 BT_DBG("%s %x", hdev->name, policy);
313 /* Default link policy */
314 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
317 /* Get HCI device by index.
318 * Device is held on return. */
319 struct hci_dev *hci_dev_get(int index)
321 struct hci_dev *hdev = NULL;
329 read_lock(&hci_dev_list_lock);
330 list_for_each(p, &hci_dev_list) {
331 struct hci_dev *d = list_entry(p, struct hci_dev, list);
332 if (d->id == index) {
333 hdev = hci_dev_hold(d);
337 read_unlock(&hci_dev_list_lock);
341 /* ---- Inquiry support ---- */
342 static void inquiry_cache_flush(struct hci_dev *hdev)
344 struct inquiry_cache *cache = &hdev->inq_cache;
345 struct inquiry_entry *next = cache->list, *e;
347 BT_DBG("cache %p", cache);
356 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
358 struct inquiry_cache *cache = &hdev->inq_cache;
359 struct inquiry_entry *e;
361 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
363 for (e = cache->list; e; e = e->next)
364 if (!bacmp(&e->data.bdaddr, bdaddr))
369 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
371 struct inquiry_cache *cache = &hdev->inq_cache;
372 struct inquiry_entry *ie;
374 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
376 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
378 /* Entry not in the cache. Add new one. */
379 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
383 ie->next = cache->list;
387 memcpy(&ie->data, data, sizeof(*data));
388 ie->timestamp = jiffies;
389 cache->timestamp = jiffies;
392 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
394 struct inquiry_cache *cache = &hdev->inq_cache;
395 struct inquiry_info *info = (struct inquiry_info *) buf;
396 struct inquiry_entry *e;
399 for (e = cache->list; e && copied < num; e = e->next, copied++) {
400 struct inquiry_data *data = &e->data;
401 bacpy(&info->bdaddr, &data->bdaddr);
402 info->pscan_rep_mode = data->pscan_rep_mode;
403 info->pscan_period_mode = data->pscan_period_mode;
404 info->pscan_mode = data->pscan_mode;
405 memcpy(info->dev_class, data->dev_class, 3);
406 info->clock_offset = data->clock_offset;
410 BT_DBG("cache %p, copied %d", cache, copied);
414 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
416 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
417 struct hci_cp_inquiry cp;
419 BT_DBG("%s", hdev->name);
421 if (test_bit(HCI_INQUIRY, &hdev->flags))
425 memcpy(&cp.lap, &ir->lap, 3);
426 cp.length = ir->length;
427 cp.num_rsp = ir->num_rsp;
428 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
431 int hci_inquiry(void __user *arg)
433 __u8 __user *ptr = arg;
434 struct hci_inquiry_req ir;
435 struct hci_dev *hdev;
436 int err = 0, do_inquiry = 0, max_rsp;
440 if (copy_from_user(&ir, ptr, sizeof(ir)))
443 hdev = hci_dev_get(ir.dev_id);
447 hci_dev_lock_bh(hdev);
448 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
449 inquiry_cache_empty(hdev) ||
450 ir.flags & IREQ_CACHE_FLUSH) {
451 inquiry_cache_flush(hdev);
454 hci_dev_unlock_bh(hdev);
456 timeo = ir.length * msecs_to_jiffies(2000);
459 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
464 /* for unlimited number of responses we will use buffer with 255 entries */
465 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
467 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
468 * copy it to the user space.
470 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
476 hci_dev_lock_bh(hdev);
477 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
478 hci_dev_unlock_bh(hdev);
480 BT_DBG("num_rsp %d", ir.num_rsp);
482 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
484 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
497 /* ---- HCI ioctl helpers ---- */
499 int hci_dev_open(__u16 dev)
501 struct hci_dev *hdev;
504 hdev = hci_dev_get(dev);
508 BT_DBG("%s %p", hdev->name, hdev);
512 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
517 if (test_bit(HCI_UP, &hdev->flags)) {
522 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
523 set_bit(HCI_RAW, &hdev->flags);
525 /* Treat all non BR/EDR controllers as raw devices for now */
526 if (hdev->dev_type != HCI_BREDR)
527 set_bit(HCI_RAW, &hdev->flags);
529 if (hdev->open(hdev)) {
534 if (!test_bit(HCI_RAW, &hdev->flags)) {
535 atomic_set(&hdev->cmd_cnt, 1);
536 set_bit(HCI_INIT, &hdev->flags);
537 hdev->init_last_cmd = 0;
539 ret = __hci_request(hdev, hci_init_req, 0,
540 msecs_to_jiffies(HCI_INIT_TIMEOUT));
542 if (lmp_le_capable(hdev))
543 ret = __hci_request(hdev, hci_le_init_req, 0,
544 msecs_to_jiffies(HCI_INIT_TIMEOUT));
546 clear_bit(HCI_INIT, &hdev->flags);
551 set_bit(HCI_UP, &hdev->flags);
552 hci_notify(hdev, HCI_DEV_UP);
553 if (!test_bit(HCI_SETUP, &hdev->flags))
554 mgmt_powered(hdev->id, 1);
556 /* Init failed, cleanup */
557 tasklet_kill(&hdev->rx_task);
558 tasklet_kill(&hdev->tx_task);
559 tasklet_kill(&hdev->cmd_task);
561 skb_queue_purge(&hdev->cmd_q);
562 skb_queue_purge(&hdev->rx_q);
567 if (hdev->sent_cmd) {
568 kfree_skb(hdev->sent_cmd);
569 hdev->sent_cmd = NULL;
577 hci_req_unlock(hdev);
582 static int hci_dev_do_close(struct hci_dev *hdev)
584 BT_DBG("%s %p", hdev->name, hdev);
586 hci_req_cancel(hdev, ENODEV);
589 /* Stop timer, it might be running */
590 del_timer_sync(&hdev->cmd_timer);
592 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
593 del_timer_sync(&hdev->cmd_timer);
594 hci_req_unlock(hdev);
598 /* Kill RX and TX tasks */
599 tasklet_kill(&hdev->rx_task);
600 tasklet_kill(&hdev->tx_task);
602 hci_dev_lock_bh(hdev);
603 inquiry_cache_flush(hdev);
604 hci_conn_hash_flush(hdev);
605 hci_dev_unlock_bh(hdev);
607 hci_notify(hdev, HCI_DEV_DOWN);
613 skb_queue_purge(&hdev->cmd_q);
614 atomic_set(&hdev->cmd_cnt, 1);
615 if (!test_bit(HCI_RAW, &hdev->flags)) {
616 set_bit(HCI_INIT, &hdev->flags);
617 __hci_request(hdev, hci_reset_req, 0,
618 msecs_to_jiffies(250));
619 clear_bit(HCI_INIT, &hdev->flags);
623 tasklet_kill(&hdev->cmd_task);
626 skb_queue_purge(&hdev->rx_q);
627 skb_queue_purge(&hdev->cmd_q);
628 skb_queue_purge(&hdev->raw_q);
630 /* Drop last sent command */
631 if (hdev->sent_cmd) {
632 kfree_skb(hdev->sent_cmd);
633 hdev->sent_cmd = NULL;
636 /* After this point our queues are empty
637 * and no tasks are scheduled. */
640 mgmt_powered(hdev->id, 0);
645 hci_req_unlock(hdev);
651 int hci_dev_close(__u16 dev)
653 struct hci_dev *hdev;
656 hdev = hci_dev_get(dev);
659 err = hci_dev_do_close(hdev);
664 int hci_dev_reset(__u16 dev)
666 struct hci_dev *hdev;
669 hdev = hci_dev_get(dev);
674 tasklet_disable(&hdev->tx_task);
676 if (!test_bit(HCI_UP, &hdev->flags))
680 skb_queue_purge(&hdev->rx_q);
681 skb_queue_purge(&hdev->cmd_q);
683 hci_dev_lock_bh(hdev);
684 inquiry_cache_flush(hdev);
685 hci_conn_hash_flush(hdev);
686 hci_dev_unlock_bh(hdev);
691 atomic_set(&hdev->cmd_cnt, 1);
692 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
694 if (!test_bit(HCI_RAW, &hdev->flags))
695 ret = __hci_request(hdev, hci_reset_req, 0,
696 msecs_to_jiffies(HCI_INIT_TIMEOUT));
699 tasklet_enable(&hdev->tx_task);
700 hci_req_unlock(hdev);
705 int hci_dev_reset_stat(__u16 dev)
707 struct hci_dev *hdev;
710 hdev = hci_dev_get(dev);
714 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
721 int hci_dev_cmd(unsigned int cmd, void __user *arg)
723 struct hci_dev *hdev;
724 struct hci_dev_req dr;
727 if (copy_from_user(&dr, arg, sizeof(dr)))
730 hdev = hci_dev_get(dr.dev_id);
736 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
737 msecs_to_jiffies(HCI_INIT_TIMEOUT));
741 if (!lmp_encrypt_capable(hdev)) {
746 if (!test_bit(HCI_AUTH, &hdev->flags)) {
747 /* Auth must be enabled first */
748 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
749 msecs_to_jiffies(HCI_INIT_TIMEOUT));
754 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
755 msecs_to_jiffies(HCI_INIT_TIMEOUT));
759 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
760 msecs_to_jiffies(HCI_INIT_TIMEOUT));
764 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
765 msecs_to_jiffies(HCI_INIT_TIMEOUT));
769 hdev->link_mode = ((__u16) dr.dev_opt) &
770 (HCI_LM_MASTER | HCI_LM_ACCEPT);
774 hdev->pkt_type = (__u16) dr.dev_opt;
778 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
779 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
783 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
784 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
796 int hci_get_dev_list(void __user *arg)
798 struct hci_dev_list_req *dl;
799 struct hci_dev_req *dr;
801 int n = 0, size, err;
804 if (get_user(dev_num, (__u16 __user *) arg))
807 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
810 size = sizeof(*dl) + dev_num * sizeof(*dr);
812 dl = kzalloc(size, GFP_KERNEL);
818 read_lock_bh(&hci_dev_list_lock);
819 list_for_each(p, &hci_dev_list) {
820 struct hci_dev *hdev;
822 hdev = list_entry(p, struct hci_dev, list);
824 hci_del_off_timer(hdev);
826 if (!test_bit(HCI_MGMT, &hdev->flags))
827 set_bit(HCI_PAIRABLE, &hdev->flags);
829 (dr + n)->dev_id = hdev->id;
830 (dr + n)->dev_opt = hdev->flags;
835 read_unlock_bh(&hci_dev_list_lock);
838 size = sizeof(*dl) + n * sizeof(*dr);
840 err = copy_to_user(arg, dl, size);
843 return err ? -EFAULT : 0;
846 int hci_get_dev_info(void __user *arg)
848 struct hci_dev *hdev;
849 struct hci_dev_info di;
852 if (copy_from_user(&di, arg, sizeof(di)))
855 hdev = hci_dev_get(di.dev_id);
859 hci_del_off_timer(hdev);
861 if (!test_bit(HCI_MGMT, &hdev->flags))
862 set_bit(HCI_PAIRABLE, &hdev->flags);
864 strcpy(di.name, hdev->name);
865 di.bdaddr = hdev->bdaddr;
866 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
867 di.flags = hdev->flags;
868 di.pkt_type = hdev->pkt_type;
869 di.acl_mtu = hdev->acl_mtu;
870 di.acl_pkts = hdev->acl_pkts;
871 di.sco_mtu = hdev->sco_mtu;
872 di.sco_pkts = hdev->sco_pkts;
873 di.link_policy = hdev->link_policy;
874 di.link_mode = hdev->link_mode;
876 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
877 memcpy(&di.features, &hdev->features, sizeof(di.features));
879 if (copy_to_user(arg, &di, sizeof(di)))
887 /* ---- Interface to HCI drivers ---- */
889 static int hci_rfkill_set_block(void *data, bool blocked)
891 struct hci_dev *hdev = data;
893 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
898 hci_dev_do_close(hdev);
903 static const struct rfkill_ops hci_rfkill_ops = {
904 .set_block = hci_rfkill_set_block,
907 /* Alloc HCI device */
908 struct hci_dev *hci_alloc_dev(void)
910 struct hci_dev *hdev;
912 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
916 skb_queue_head_init(&hdev->driver_init);
920 EXPORT_SYMBOL(hci_alloc_dev);
922 /* Free HCI device */
923 void hci_free_dev(struct hci_dev *hdev)
925 skb_queue_purge(&hdev->driver_init);
927 /* will free via device release */
928 put_device(&hdev->dev);
930 EXPORT_SYMBOL(hci_free_dev);
932 static void hci_power_on(struct work_struct *work)
934 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
936 BT_DBG("%s", hdev->name);
938 if (hci_dev_open(hdev->id) < 0)
941 if (test_bit(HCI_AUTO_OFF, &hdev->flags))
942 mod_timer(&hdev->off_timer,
943 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
945 if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
946 mgmt_index_added(hdev->id);
949 static void hci_power_off(struct work_struct *work)
951 struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
953 BT_DBG("%s", hdev->name);
955 hci_dev_close(hdev->id);
958 static void hci_auto_off(unsigned long data)
960 struct hci_dev *hdev = (struct hci_dev *) data;
962 BT_DBG("%s", hdev->name);
964 clear_bit(HCI_AUTO_OFF, &hdev->flags);
966 queue_work(hdev->workqueue, &hdev->power_off);
969 void hci_del_off_timer(struct hci_dev *hdev)
971 BT_DBG("%s", hdev->name);
973 clear_bit(HCI_AUTO_OFF, &hdev->flags);
974 del_timer(&hdev->off_timer);
977 int hci_uuids_clear(struct hci_dev *hdev)
979 struct list_head *p, *n;
981 list_for_each_safe(p, n, &hdev->uuids) {
982 struct bt_uuid *uuid;
984 uuid = list_entry(p, struct bt_uuid, list);
993 int hci_link_keys_clear(struct hci_dev *hdev)
995 struct list_head *p, *n;
997 list_for_each_safe(p, n, &hdev->link_keys) {
998 struct link_key *key;
1000 key = list_entry(p, struct link_key, list);
1009 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1011 struct list_head *p;
1013 list_for_each(p, &hdev->link_keys) {
1016 k = list_entry(p, struct link_key, list);
1018 if (bacmp(bdaddr, &k->bdaddr) == 0)
1025 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1026 u8 key_type, u8 old_key_type)
1029 if (key_type < 0x03)
1032 /* Debug keys are insecure so don't store them persistently */
1033 if (key_type == HCI_LK_DEBUG_COMBINATION)
1036 /* Changed combination key and there's no previous one */
1037 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1040 /* Security mode 3 case */
1044 /* Neither local nor remote side had no-bonding as requirement */
1045 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1048 /* Local side had dedicated bonding as requirement */
1049 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1052 /* Remote side had dedicated bonding as requirement */
1053 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1056 /* If none of the above criteria match, then don't store the key
1061 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1062 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1064 struct link_key *key, *old_key;
1067 old_key = hci_find_link_key(hdev, bdaddr);
1069 old_key_type = old_key->type;
1072 old_key_type = conn ? conn->key_type : 0xff;
1073 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1076 list_add(&key->list, &hdev->link_keys);
1079 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1081 /* Some buggy controller combinations generate a changed
1082 * combination key for legacy pairing even when there's no
1084 if (type == HCI_LK_CHANGED_COMBINATION &&
1085 (!conn || conn->remote_auth == 0xff) &&
1086 old_key_type == 0xff)
1087 type = HCI_LK_COMBINATION;
1089 if (new_key && !hci_persistent_key(hdev, conn, type, old_key_type)) {
1090 list_del(&key->list);
1095 bacpy(&key->bdaddr, bdaddr);
1096 memcpy(key->val, val, 16);
1098 key->pin_len = pin_len;
1101 mgmt_new_key(hdev->id, key, old_key_type);
1103 if (type == HCI_LK_CHANGED_COMBINATION)
1104 key->type = old_key_type;
1109 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1111 struct link_key *key;
1113 key = hci_find_link_key(hdev, bdaddr);
1117 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1119 list_del(&key->list);
1125 /* HCI command timer function */
1126 static void hci_cmd_timer(unsigned long arg)
1128 struct hci_dev *hdev = (void *) arg;
1130 BT_ERR("%s command tx timeout", hdev->name);
1131 atomic_set(&hdev->cmd_cnt, 1);
1132 clear_bit(HCI_RESET, &hdev->flags);
1133 tasklet_schedule(&hdev->cmd_task);
1136 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1139 struct oob_data *data;
1141 list_for_each_entry(data, &hdev->remote_oob_data, list)
1142 if (bacmp(bdaddr, &data->bdaddr) == 0)
1148 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1150 struct oob_data *data;
1152 data = hci_find_remote_oob_data(hdev, bdaddr);
1156 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1158 list_del(&data->list);
1164 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1166 struct oob_data *data, *n;
1168 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1169 list_del(&data->list);
1176 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1179 struct oob_data *data;
1181 data = hci_find_remote_oob_data(hdev, bdaddr);
1184 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1188 bacpy(&data->bdaddr, bdaddr);
1189 list_add(&data->list, &hdev->remote_oob_data);
1192 memcpy(data->hash, hash, sizeof(data->hash));
1193 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1195 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1200 /* Register HCI device */
1201 int hci_register_dev(struct hci_dev *hdev)
1203 struct list_head *head = &hci_dev_list, *p;
1206 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1207 hdev->bus, hdev->owner);
1209 if (!hdev->open || !hdev->close || !hdev->destruct)
1212 write_lock_bh(&hci_dev_list_lock);
1214 /* Find first available device id */
1215 list_for_each(p, &hci_dev_list) {
1216 if (list_entry(p, struct hci_dev, list)->id != id)
1221 sprintf(hdev->name, "hci%d", id);
1223 list_add(&hdev->list, head);
1225 atomic_set(&hdev->refcnt, 1);
1226 spin_lock_init(&hdev->lock);
1229 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1230 hdev->esco_type = (ESCO_HV1);
1231 hdev->link_mode = (HCI_LM_ACCEPT);
1232 hdev->io_capability = 0x03; /* No Input No Output */
1234 hdev->idle_timeout = 0;
1235 hdev->sniff_max_interval = 800;
1236 hdev->sniff_min_interval = 80;
1238 tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1239 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1240 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1242 skb_queue_head_init(&hdev->rx_q);
1243 skb_queue_head_init(&hdev->cmd_q);
1244 skb_queue_head_init(&hdev->raw_q);
1246 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1248 for (i = 0; i < NUM_REASSEMBLY; i++)
1249 hdev->reassembly[i] = NULL;
1251 init_waitqueue_head(&hdev->req_wait_q);
1252 mutex_init(&hdev->req_lock);
1254 inquiry_cache_init(hdev);
1256 hci_conn_hash_init(hdev);
1258 INIT_LIST_HEAD(&hdev->blacklist);
1260 INIT_LIST_HEAD(&hdev->uuids);
1262 INIT_LIST_HEAD(&hdev->link_keys);
1264 INIT_LIST_HEAD(&hdev->remote_oob_data);
1266 INIT_WORK(&hdev->power_on, hci_power_on);
1267 INIT_WORK(&hdev->power_off, hci_power_off);
1268 setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1270 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1272 atomic_set(&hdev->promisc, 0);
1274 write_unlock_bh(&hci_dev_list_lock);
1276 hdev->workqueue = create_singlethread_workqueue(hdev->name);
1277 if (!hdev->workqueue)
1280 hci_register_sysfs(hdev);
1282 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1283 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1285 if (rfkill_register(hdev->rfkill) < 0) {
1286 rfkill_destroy(hdev->rfkill);
1287 hdev->rfkill = NULL;
1291 set_bit(HCI_AUTO_OFF, &hdev->flags);
1292 set_bit(HCI_SETUP, &hdev->flags);
1293 queue_work(hdev->workqueue, &hdev->power_on);
1295 hci_notify(hdev, HCI_DEV_REG);
1300 write_lock_bh(&hci_dev_list_lock);
1301 list_del(&hdev->list);
1302 write_unlock_bh(&hci_dev_list_lock);
1306 EXPORT_SYMBOL(hci_register_dev);
1308 /* Unregister HCI device */
1309 int hci_unregister_dev(struct hci_dev *hdev)
1313 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1315 write_lock_bh(&hci_dev_list_lock);
1316 list_del(&hdev->list);
1317 write_unlock_bh(&hci_dev_list_lock);
1319 hci_dev_do_close(hdev);
1321 for (i = 0; i < NUM_REASSEMBLY; i++)
1322 kfree_skb(hdev->reassembly[i]);
1324 if (!test_bit(HCI_INIT, &hdev->flags) &&
1325 !test_bit(HCI_SETUP, &hdev->flags))
1326 mgmt_index_removed(hdev->id);
1328 hci_notify(hdev, HCI_DEV_UNREG);
1331 rfkill_unregister(hdev->rfkill);
1332 rfkill_destroy(hdev->rfkill);
1335 hci_unregister_sysfs(hdev);
1337 hci_del_off_timer(hdev);
1339 destroy_workqueue(hdev->workqueue);
1341 hci_dev_lock_bh(hdev);
1342 hci_blacklist_clear(hdev);
1343 hci_uuids_clear(hdev);
1344 hci_link_keys_clear(hdev);
1345 hci_remote_oob_data_clear(hdev);
1346 hci_dev_unlock_bh(hdev);
1348 __hci_dev_put(hdev);
1352 EXPORT_SYMBOL(hci_unregister_dev);
1354 /* Suspend HCI device */
1355 int hci_suspend_dev(struct hci_dev *hdev)
1357 hci_notify(hdev, HCI_DEV_SUSPEND);
1360 EXPORT_SYMBOL(hci_suspend_dev);
1362 /* Resume HCI device */
1363 int hci_resume_dev(struct hci_dev *hdev)
1365 hci_notify(hdev, HCI_DEV_RESUME);
1368 EXPORT_SYMBOL(hci_resume_dev);
1370 /* Receive frame from HCI drivers */
1371 int hci_recv_frame(struct sk_buff *skb)
1373 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1374 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1375 && !test_bit(HCI_INIT, &hdev->flags))) {
1381 bt_cb(skb)->incoming = 1;
1384 __net_timestamp(skb);
1386 /* Queue frame for rx task */
1387 skb_queue_tail(&hdev->rx_q, skb);
1388 tasklet_schedule(&hdev->rx_task);
1392 EXPORT_SYMBOL(hci_recv_frame);
1394 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1395 int count, __u8 index)
1400 struct sk_buff *skb;
1401 struct bt_skb_cb *scb;
1403 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1404 index >= NUM_REASSEMBLY)
1407 skb = hdev->reassembly[index];
1411 case HCI_ACLDATA_PKT:
1412 len = HCI_MAX_FRAME_SIZE;
1413 hlen = HCI_ACL_HDR_SIZE;
1416 len = HCI_MAX_EVENT_SIZE;
1417 hlen = HCI_EVENT_HDR_SIZE;
1419 case HCI_SCODATA_PKT:
1420 len = HCI_MAX_SCO_SIZE;
1421 hlen = HCI_SCO_HDR_SIZE;
1425 skb = bt_skb_alloc(len, GFP_ATOMIC);
1429 scb = (void *) skb->cb;
1431 scb->pkt_type = type;
1433 skb->dev = (void *) hdev;
1434 hdev->reassembly[index] = skb;
1438 scb = (void *) skb->cb;
1439 len = min(scb->expect, (__u16)count);
1441 memcpy(skb_put(skb, len), data, len);
1450 if (skb->len == HCI_EVENT_HDR_SIZE) {
1451 struct hci_event_hdr *h = hci_event_hdr(skb);
1452 scb->expect = h->plen;
1454 if (skb_tailroom(skb) < scb->expect) {
1456 hdev->reassembly[index] = NULL;
1462 case HCI_ACLDATA_PKT:
1463 if (skb->len == HCI_ACL_HDR_SIZE) {
1464 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1465 scb->expect = __le16_to_cpu(h->dlen);
1467 if (skb_tailroom(skb) < scb->expect) {
1469 hdev->reassembly[index] = NULL;
1475 case HCI_SCODATA_PKT:
1476 if (skb->len == HCI_SCO_HDR_SIZE) {
1477 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1478 scb->expect = h->dlen;
1480 if (skb_tailroom(skb) < scb->expect) {
1482 hdev->reassembly[index] = NULL;
1489 if (scb->expect == 0) {
1490 /* Complete frame */
1492 bt_cb(skb)->pkt_type = type;
1493 hci_recv_frame(skb);
1495 hdev->reassembly[index] = NULL;
1503 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1507 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1511 rem = hci_reassembly(hdev, type, data, count, type - 1);
1515 data += (count - rem);
1521 EXPORT_SYMBOL(hci_recv_fragment);
1523 #define STREAM_REASSEMBLY 0
1525 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1531 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1534 struct { char type; } *pkt;
1536 /* Start of the frame */
1543 type = bt_cb(skb)->pkt_type;
1545 rem = hci_reassembly(hdev, type, data, count,
1550 data += (count - rem);
1556 EXPORT_SYMBOL(hci_recv_stream_fragment);
1558 /* ---- Interface to upper protocols ---- */
1560 /* Register/Unregister protocols.
1561 * hci_task_lock is used to ensure that no tasks are running. */
1562 int hci_register_proto(struct hci_proto *hp)
1566 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1568 if (hp->id >= HCI_MAX_PROTO)
1571 write_lock_bh(&hci_task_lock);
1573 if (!hci_proto[hp->id])
1574 hci_proto[hp->id] = hp;
1578 write_unlock_bh(&hci_task_lock);
1582 EXPORT_SYMBOL(hci_register_proto);
1584 int hci_unregister_proto(struct hci_proto *hp)
1588 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1590 if (hp->id >= HCI_MAX_PROTO)
1593 write_lock_bh(&hci_task_lock);
1595 if (hci_proto[hp->id])
1596 hci_proto[hp->id] = NULL;
1600 write_unlock_bh(&hci_task_lock);
1604 EXPORT_SYMBOL(hci_unregister_proto);
1606 int hci_register_cb(struct hci_cb *cb)
1608 BT_DBG("%p name %s", cb, cb->name);
1610 write_lock_bh(&hci_cb_list_lock);
1611 list_add(&cb->list, &hci_cb_list);
1612 write_unlock_bh(&hci_cb_list_lock);
1616 EXPORT_SYMBOL(hci_register_cb);
1618 int hci_unregister_cb(struct hci_cb *cb)
1620 BT_DBG("%p name %s", cb, cb->name);
1622 write_lock_bh(&hci_cb_list_lock);
1623 list_del(&cb->list);
1624 write_unlock_bh(&hci_cb_list_lock);
1628 EXPORT_SYMBOL(hci_unregister_cb);
1630 static int hci_send_frame(struct sk_buff *skb)
1632 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1639 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1641 if (atomic_read(&hdev->promisc)) {
1643 __net_timestamp(skb);
1645 hci_send_to_sock(hdev, skb, NULL);
1648 /* Get rid of skb owner, prior to sending to the driver. */
1651 return hdev->send(skb);
1654 /* Send HCI command */
1655 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1657 int len = HCI_COMMAND_HDR_SIZE + plen;
1658 struct hci_command_hdr *hdr;
1659 struct sk_buff *skb;
1661 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1663 skb = bt_skb_alloc(len, GFP_ATOMIC);
1665 BT_ERR("%s no memory for command", hdev->name);
1669 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1670 hdr->opcode = cpu_to_le16(opcode);
1674 memcpy(skb_put(skb, plen), param, plen);
1676 BT_DBG("skb len %d", skb->len);
1678 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1679 skb->dev = (void *) hdev;
1681 if (test_bit(HCI_INIT, &hdev->flags))
1682 hdev->init_last_cmd = opcode;
1684 skb_queue_tail(&hdev->cmd_q, skb);
1685 tasklet_schedule(&hdev->cmd_task);
1690 /* Get data from the previously sent command */
1691 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1693 struct hci_command_hdr *hdr;
1695 if (!hdev->sent_cmd)
1698 hdr = (void *) hdev->sent_cmd->data;
1700 if (hdr->opcode != cpu_to_le16(opcode))
1703 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1705 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1709 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1711 struct hci_acl_hdr *hdr;
1714 skb_push(skb, HCI_ACL_HDR_SIZE);
1715 skb_reset_transport_header(skb);
1716 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1717 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1718 hdr->dlen = cpu_to_le16(len);
1721 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1723 struct hci_dev *hdev = conn->hdev;
1724 struct sk_buff *list;
1726 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1728 skb->dev = (void *) hdev;
1729 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1730 hci_add_acl_hdr(skb, conn->handle, flags);
1732 list = skb_shinfo(skb)->frag_list;
1734 /* Non fragmented */
1735 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1737 skb_queue_tail(&conn->data_q, skb);
1740 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1742 skb_shinfo(skb)->frag_list = NULL;
1744 /* Queue all fragments atomically */
1745 spin_lock_bh(&conn->data_q.lock);
1747 __skb_queue_tail(&conn->data_q, skb);
1749 flags &= ~ACL_START;
1752 skb = list; list = list->next;
1754 skb->dev = (void *) hdev;
1755 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1756 hci_add_acl_hdr(skb, conn->handle, flags);
1758 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1760 __skb_queue_tail(&conn->data_q, skb);
1763 spin_unlock_bh(&conn->data_q.lock);
1766 tasklet_schedule(&hdev->tx_task);
1768 EXPORT_SYMBOL(hci_send_acl);
1771 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1773 struct hci_dev *hdev = conn->hdev;
1774 struct hci_sco_hdr hdr;
1776 BT_DBG("%s len %d", hdev->name, skb->len);
1778 hdr.handle = cpu_to_le16(conn->handle);
1779 hdr.dlen = skb->len;
1781 skb_push(skb, HCI_SCO_HDR_SIZE);
1782 skb_reset_transport_header(skb);
1783 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1785 skb->dev = (void *) hdev;
1786 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1788 skb_queue_tail(&conn->data_q, skb);
1789 tasklet_schedule(&hdev->tx_task);
1791 EXPORT_SYMBOL(hci_send_sco);
1793 /* ---- HCI TX task (outgoing data) ---- */
1795 /* HCI Connection scheduler */
1796 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1798 struct hci_conn_hash *h = &hdev->conn_hash;
1799 struct hci_conn *conn = NULL;
1800 int num = 0, min = ~0;
1801 struct list_head *p;
1803 /* We don't have to lock device here. Connections are always
1804 * added and removed with TX task disabled. */
1805 list_for_each(p, &h->list) {
1807 c = list_entry(p, struct hci_conn, list);
1809 if (c->type != type || skb_queue_empty(&c->data_q))
1812 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1817 if (c->sent < min) {
1826 switch (conn->type) {
1828 cnt = hdev->acl_cnt;
1832 cnt = hdev->sco_cnt;
1835 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1839 BT_ERR("Unknown link type");
1847 BT_DBG("conn %p quote %d", conn, *quote);
1851 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1853 struct hci_conn_hash *h = &hdev->conn_hash;
1854 struct list_head *p;
1857 BT_ERR("%s link tx timeout", hdev->name);
1859 /* Kill stalled connections */
1860 list_for_each(p, &h->list) {
1861 c = list_entry(p, struct hci_conn, list);
1862 if (c->type == type && c->sent) {
1863 BT_ERR("%s killing stalled connection %s",
1864 hdev->name, batostr(&c->dst));
1865 hci_acl_disconn(c, 0x13);
1870 static inline void hci_sched_acl(struct hci_dev *hdev)
1872 struct hci_conn *conn;
1873 struct sk_buff *skb;
1876 BT_DBG("%s", hdev->name);
1878 if (!test_bit(HCI_RAW, &hdev->flags)) {
1879 /* ACL tx timeout must be longer than maximum
1880 * link supervision timeout (40.9 seconds) */
1881 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1882 hci_link_tx_to(hdev, ACL_LINK);
1885 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
1886 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1887 BT_DBG("skb %p len %d", skb, skb->len);
1889 hci_conn_enter_active_mode(conn);
1891 hci_send_frame(skb);
1892 hdev->acl_last_tx = jiffies;
1901 static inline void hci_sched_sco(struct hci_dev *hdev)
1903 struct hci_conn *conn;
1904 struct sk_buff *skb;
1907 BT_DBG("%s", hdev->name);
1909 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
1910 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1911 BT_DBG("skb %p len %d", skb, skb->len);
1912 hci_send_frame(skb);
1915 if (conn->sent == ~0)
1921 static inline void hci_sched_esco(struct hci_dev *hdev)
1923 struct hci_conn *conn;
1924 struct sk_buff *skb;
1927 BT_DBG("%s", hdev->name);
1929 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
1930 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1931 BT_DBG("skb %p len %d", skb, skb->len);
1932 hci_send_frame(skb);
1935 if (conn->sent == ~0)
1941 static inline void hci_sched_le(struct hci_dev *hdev)
1943 struct hci_conn *conn;
1944 struct sk_buff *skb;
1947 BT_DBG("%s", hdev->name);
1949 if (!test_bit(HCI_RAW, &hdev->flags)) {
1950 /* LE tx timeout must be longer than maximum
1951 * link supervision timeout (40.9 seconds) */
1952 if (!hdev->le_cnt && hdev->le_pkts &&
1953 time_after(jiffies, hdev->le_last_tx + HZ * 45))
1954 hci_link_tx_to(hdev, LE_LINK);
1957 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1958 while (cnt && (conn = hci_low_sent(hdev, LE_LINK, "e))) {
1959 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1960 BT_DBG("skb %p len %d", skb, skb->len);
1962 hci_send_frame(skb);
1963 hdev->le_last_tx = jiffies;
1972 hdev->acl_cnt = cnt;
1975 static void hci_tx_task(unsigned long arg)
1977 struct hci_dev *hdev = (struct hci_dev *) arg;
1978 struct sk_buff *skb;
1980 read_lock(&hci_task_lock);
1982 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1983 hdev->sco_cnt, hdev->le_cnt);
1985 /* Schedule queues and send stuff to HCI driver */
1987 hci_sched_acl(hdev);
1989 hci_sched_sco(hdev);
1991 hci_sched_esco(hdev);
1995 /* Send next queued raw (unknown type) packet */
1996 while ((skb = skb_dequeue(&hdev->raw_q)))
1997 hci_send_frame(skb);
1999 read_unlock(&hci_task_lock);
2002 /* ----- HCI RX task (incoming data proccessing) ----- */
2004 /* ACL data packet */
2005 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2007 struct hci_acl_hdr *hdr = (void *) skb->data;
2008 struct hci_conn *conn;
2009 __u16 handle, flags;
2011 skb_pull(skb, HCI_ACL_HDR_SIZE);
2013 handle = __le16_to_cpu(hdr->handle);
2014 flags = hci_flags(handle);
2015 handle = hci_handle(handle);
2017 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2019 hdev->stat.acl_rx++;
2022 conn = hci_conn_hash_lookup_handle(hdev, handle);
2023 hci_dev_unlock(hdev);
2026 register struct hci_proto *hp;
2028 hci_conn_enter_active_mode(conn);
2030 /* Send to upper protocol */
2031 hp = hci_proto[HCI_PROTO_L2CAP];
2032 if (hp && hp->recv_acldata) {
2033 hp->recv_acldata(conn, skb, flags);
2037 BT_ERR("%s ACL packet for unknown connection handle %d",
2038 hdev->name, handle);
2044 /* SCO data packet */
2045 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2047 struct hci_sco_hdr *hdr = (void *) skb->data;
2048 struct hci_conn *conn;
2051 skb_pull(skb, HCI_SCO_HDR_SIZE);
2053 handle = __le16_to_cpu(hdr->handle);
2055 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2057 hdev->stat.sco_rx++;
2060 conn = hci_conn_hash_lookup_handle(hdev, handle);
2061 hci_dev_unlock(hdev);
2064 register struct hci_proto *hp;
2066 /* Send to upper protocol */
2067 hp = hci_proto[HCI_PROTO_SCO];
2068 if (hp && hp->recv_scodata) {
2069 hp->recv_scodata(conn, skb);
2073 BT_ERR("%s SCO packet for unknown connection handle %d",
2074 hdev->name, handle);
2080 static void hci_rx_task(unsigned long arg)
2082 struct hci_dev *hdev = (struct hci_dev *) arg;
2083 struct sk_buff *skb;
2085 BT_DBG("%s", hdev->name);
2087 read_lock(&hci_task_lock);
2089 while ((skb = skb_dequeue(&hdev->rx_q))) {
2090 if (atomic_read(&hdev->promisc)) {
2091 /* Send copy to the sockets */
2092 hci_send_to_sock(hdev, skb, NULL);
2095 if (test_bit(HCI_RAW, &hdev->flags)) {
2100 if (test_bit(HCI_INIT, &hdev->flags)) {
2101 /* Don't process data packets in this states. */
2102 switch (bt_cb(skb)->pkt_type) {
2103 case HCI_ACLDATA_PKT:
2104 case HCI_SCODATA_PKT:
2111 switch (bt_cb(skb)->pkt_type) {
2113 hci_event_packet(hdev, skb);
2116 case HCI_ACLDATA_PKT:
2117 BT_DBG("%s ACL data packet", hdev->name);
2118 hci_acldata_packet(hdev, skb);
2121 case HCI_SCODATA_PKT:
2122 BT_DBG("%s SCO data packet", hdev->name);
2123 hci_scodata_packet(hdev, skb);
2132 read_unlock(&hci_task_lock);
2135 static void hci_cmd_task(unsigned long arg)
2137 struct hci_dev *hdev = (struct hci_dev *) arg;
2138 struct sk_buff *skb;
2140 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2142 /* Send queued commands */
2143 if (atomic_read(&hdev->cmd_cnt)) {
2144 skb = skb_dequeue(&hdev->cmd_q);
2148 kfree_skb(hdev->sent_cmd);
2150 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2151 if (hdev->sent_cmd) {
2152 atomic_dec(&hdev->cmd_cnt);
2153 hci_send_frame(skb);
2154 mod_timer(&hdev->cmd_timer,
2155 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2157 skb_queue_head(&hdev->cmd_q, skb);
2158 tasklet_schedule(&hdev->cmd_task);