2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
5 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7 This program is free software; you can redistribute it and/or modify
8 it under the terms of the GNU General Public License version 2 as
9 published by the Free Software Foundation;
11 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22 SOFTWARE IS DISCLAIMED.
25 /* Bluetooth HCI core. */
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
46 #include <asm/system.h>
47 #include <asm/uaccess.h>
48 #include <asm/unaligned.h>
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
53 static void hci_cmd_task(unsigned long arg);
54 static void hci_rx_task(unsigned long arg);
55 static void hci_tx_task(unsigned long arg);
56 static void hci_notify(struct hci_dev *hdev, int event);
58 static DEFINE_RWLOCK(hci_task_lock);
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
69 #define HCI_MAX_PROTO 2
70 struct hci_proto *hci_proto[HCI_MAX_PROTO];
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block *nb)
79 return atomic_notifier_chain_register(&hci_notifier, nb);
82 int hci_unregister_notifier(struct notifier_block *nb)
84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 static void hci_notify(struct hci_dev *hdev, int event)
89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev *hdev, int result)
96 BT_DBG("%s result 0x%2.2x", hdev->name, result);
98 if (hdev->req_status == HCI_REQ_PEND) {
99 hdev->req_result = result;
100 hdev->req_status = HCI_REQ_DONE;
101 wake_up_interruptible(&hdev->req_wait_q);
105 static void hci_req_cancel(struct hci_dev *hdev, int err)
107 BT_DBG("%s err 0x%2.2x", hdev->name, err);
109 if (hdev->req_status == HCI_REQ_PEND) {
110 hdev->req_result = err;
111 hdev->req_status = HCI_REQ_CANCELED;
112 wake_up_interruptible(&hdev->req_wait_q);
116 /* Execute request and wait for completion. */
117 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
118 unsigned long opt, __u32 timeout)
120 DECLARE_WAITQUEUE(wait, current);
123 BT_DBG("%s start", hdev->name);
125 hdev->req_status = HCI_REQ_PEND;
127 add_wait_queue(&hdev->req_wait_q, &wait);
128 set_current_state(TASK_INTERRUPTIBLE);
131 schedule_timeout(timeout);
133 remove_wait_queue(&hdev->req_wait_q, &wait);
135 if (signal_pending(current))
138 switch (hdev->req_status) {
140 err = -bt_err(hdev->req_result);
143 case HCI_REQ_CANCELED:
144 err = -hdev->req_result;
152 hdev->req_status = hdev->req_result = 0;
154 BT_DBG("%s end: err %d", hdev->name, err);
159 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160 unsigned long opt, __u32 timeout)
164 if (!test_bit(HCI_UP, &hdev->flags))
167 /* Serialize all requests */
169 ret = __hci_request(hdev, req, opt, timeout);
170 hci_req_unlock(hdev);
175 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
177 BT_DBG("%s %ld", hdev->name, opt);
180 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
183 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
189 BT_DBG("%s %ld", hdev->name, opt);
191 /* Driver initialization */
193 /* Special commands */
194 while ((skb = skb_dequeue(&hdev->driver_init))) {
195 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
196 skb->dev = (void *) hdev;
198 skb_queue_tail(&hdev->cmd_q, skb);
199 tasklet_schedule(&hdev->cmd_task);
201 skb_queue_purge(&hdev->driver_init);
203 /* Mandatory initialization */
206 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
207 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
209 /* Read Local Supported Features */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
212 /* Read Local Version */
213 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
216 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
219 /* Host buffer size */
221 struct hci_cp_host_buffer_size cp;
222 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
223 cp.sco_mtu = HCI_MAX_SCO_SIZE;
224 cp.acl_max_pkt = cpu_to_le16(0xffff);
225 cp.sco_max_pkt = cpu_to_le16(0xffff);
226 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
230 /* Read BD Address */
231 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
233 /* Read Class of Device */
234 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
236 /* Read Local Name */
237 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
239 /* Read Voice Setting */
240 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
242 /* Optional initialization */
244 /* Clear Event Filters */
245 flt_type = HCI_FLT_CLEAR_ALL;
246 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
248 /* Page timeout ~20 secs */
249 param = cpu_to_le16(0x8000);
250 hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, ¶m);
252 /* Connection accept timeout ~20 secs */
253 param = cpu_to_le16(0x7d00);
254 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
257 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
261 BT_DBG("%s %x", hdev->name, scan);
263 /* Inquiry and Page scans */
264 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
267 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
271 BT_DBG("%s %x", hdev->name, auth);
274 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
277 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
281 BT_DBG("%s %x", hdev->name, encrypt);
284 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
287 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
289 __le16 policy = cpu_to_le16(opt);
291 BT_DBG("%s %x", hdev->name, policy);
293 /* Default link policy */
294 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
297 /* Get HCI device by index.
298 * Device is held on return. */
299 struct hci_dev *hci_dev_get(int index)
301 struct hci_dev *hdev = NULL;
309 read_lock(&hci_dev_list_lock);
310 list_for_each(p, &hci_dev_list) {
311 struct hci_dev *d = list_entry(p, struct hci_dev, list);
312 if (d->id == index) {
313 hdev = hci_dev_hold(d);
317 read_unlock(&hci_dev_list_lock);
321 /* ---- Inquiry support ---- */
322 static void inquiry_cache_flush(struct hci_dev *hdev)
324 struct inquiry_cache *cache = &hdev->inq_cache;
325 struct inquiry_entry *next = cache->list, *e;
327 BT_DBG("cache %p", cache);
336 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
338 struct inquiry_cache *cache = &hdev->inq_cache;
339 struct inquiry_entry *e;
341 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
343 for (e = cache->list; e; e = e->next)
344 if (!bacmp(&e->data.bdaddr, bdaddr))
349 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
351 struct inquiry_cache *cache = &hdev->inq_cache;
352 struct inquiry_entry *e;
354 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
356 if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
357 /* Entry not in the cache. Add new one. */
358 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
360 e->next = cache->list;
364 memcpy(&e->data, data, sizeof(*data));
365 e->timestamp = jiffies;
366 cache->timestamp = jiffies;
369 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
371 struct inquiry_cache *cache = &hdev->inq_cache;
372 struct inquiry_info *info = (struct inquiry_info *) buf;
373 struct inquiry_entry *e;
376 for (e = cache->list; e && copied < num; e = e->next, copied++) {
377 struct inquiry_data *data = &e->data;
378 bacpy(&info->bdaddr, &data->bdaddr);
379 info->pscan_rep_mode = data->pscan_rep_mode;
380 info->pscan_period_mode = data->pscan_period_mode;
381 info->pscan_mode = data->pscan_mode;
382 memcpy(info->dev_class, data->dev_class, 3);
383 info->clock_offset = data->clock_offset;
387 BT_DBG("cache %p, copied %d", cache, copied);
391 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
393 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
394 struct hci_cp_inquiry cp;
396 BT_DBG("%s", hdev->name);
398 if (test_bit(HCI_INQUIRY, &hdev->flags))
402 memcpy(&cp.lap, &ir->lap, 3);
403 cp.length = ir->length;
404 cp.num_rsp = ir->num_rsp;
405 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
408 int hci_inquiry(void __user *arg)
410 __u8 __user *ptr = arg;
411 struct hci_inquiry_req ir;
412 struct hci_dev *hdev;
413 int err = 0, do_inquiry = 0, max_rsp;
417 if (copy_from_user(&ir, ptr, sizeof(ir)))
420 if (!(hdev = hci_dev_get(ir.dev_id)))
423 hci_dev_lock_bh(hdev);
424 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
425 inquiry_cache_empty(hdev) ||
426 ir.flags & IREQ_CACHE_FLUSH) {
427 inquiry_cache_flush(hdev);
430 hci_dev_unlock_bh(hdev);
432 timeo = ir.length * msecs_to_jiffies(2000);
433 if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
436 /* for unlimited number of responses we will use buffer with 255 entries */
437 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
439 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
440 * copy it to the user space.
442 if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
447 hci_dev_lock_bh(hdev);
448 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
449 hci_dev_unlock_bh(hdev);
451 BT_DBG("num_rsp %d", ir.num_rsp);
453 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
455 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
468 /* ---- HCI ioctl helpers ---- */
470 int hci_dev_open(__u16 dev)
472 struct hci_dev *hdev;
475 if (!(hdev = hci_dev_get(dev)))
478 BT_DBG("%s %p", hdev->name, hdev);
482 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
487 if (test_bit(HCI_UP, &hdev->flags)) {
492 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
493 set_bit(HCI_RAW, &hdev->flags);
495 /* Treat all non BR/EDR controllers as raw devices for now */
496 if (hdev->dev_type != HCI_BREDR)
497 set_bit(HCI_RAW, &hdev->flags);
499 if (hdev->open(hdev)) {
504 if (!test_bit(HCI_RAW, &hdev->flags)) {
505 atomic_set(&hdev->cmd_cnt, 1);
506 set_bit(HCI_INIT, &hdev->flags);
508 //__hci_request(hdev, hci_reset_req, 0, HZ);
509 ret = __hci_request(hdev, hci_init_req, 0,
510 msecs_to_jiffies(HCI_INIT_TIMEOUT));
512 clear_bit(HCI_INIT, &hdev->flags);
517 set_bit(HCI_UP, &hdev->flags);
518 hci_notify(hdev, HCI_DEV_UP);
520 /* Init failed, cleanup */
521 tasklet_kill(&hdev->rx_task);
522 tasklet_kill(&hdev->tx_task);
523 tasklet_kill(&hdev->cmd_task);
525 skb_queue_purge(&hdev->cmd_q);
526 skb_queue_purge(&hdev->rx_q);
531 if (hdev->sent_cmd) {
532 kfree_skb(hdev->sent_cmd);
533 hdev->sent_cmd = NULL;
541 hci_req_unlock(hdev);
546 static int hci_dev_do_close(struct hci_dev *hdev)
548 BT_DBG("%s %p", hdev->name, hdev);
550 hci_req_cancel(hdev, ENODEV);
553 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
554 hci_req_unlock(hdev);
558 /* Kill RX and TX tasks */
559 tasklet_kill(&hdev->rx_task);
560 tasklet_kill(&hdev->tx_task);
562 hci_dev_lock_bh(hdev);
563 inquiry_cache_flush(hdev);
564 hci_conn_hash_flush(hdev);
565 hci_blacklist_clear(hdev);
566 hci_dev_unlock_bh(hdev);
568 hci_notify(hdev, HCI_DEV_DOWN);
574 skb_queue_purge(&hdev->cmd_q);
575 atomic_set(&hdev->cmd_cnt, 1);
576 if (!test_bit(HCI_RAW, &hdev->flags)) {
577 set_bit(HCI_INIT, &hdev->flags);
578 __hci_request(hdev, hci_reset_req, 0,
579 msecs_to_jiffies(250));
580 clear_bit(HCI_INIT, &hdev->flags);
584 tasklet_kill(&hdev->cmd_task);
587 skb_queue_purge(&hdev->rx_q);
588 skb_queue_purge(&hdev->cmd_q);
589 skb_queue_purge(&hdev->raw_q);
591 /* Drop last sent command */
592 if (hdev->sent_cmd) {
593 kfree_skb(hdev->sent_cmd);
594 hdev->sent_cmd = NULL;
597 /* After this point our queues are empty
598 * and no tasks are scheduled. */
604 hci_req_unlock(hdev);
610 int hci_dev_close(__u16 dev)
612 struct hci_dev *hdev;
615 if (!(hdev = hci_dev_get(dev)))
617 err = hci_dev_do_close(hdev);
622 int hci_dev_reset(__u16 dev)
624 struct hci_dev *hdev;
627 if (!(hdev = hci_dev_get(dev)))
631 tasklet_disable(&hdev->tx_task);
633 if (!test_bit(HCI_UP, &hdev->flags))
637 skb_queue_purge(&hdev->rx_q);
638 skb_queue_purge(&hdev->cmd_q);
640 hci_dev_lock_bh(hdev);
641 inquiry_cache_flush(hdev);
642 hci_conn_hash_flush(hdev);
643 hci_dev_unlock_bh(hdev);
648 atomic_set(&hdev->cmd_cnt, 1);
649 hdev->acl_cnt = 0; hdev->sco_cnt = 0;
651 if (!test_bit(HCI_RAW, &hdev->flags))
652 ret = __hci_request(hdev, hci_reset_req, 0,
653 msecs_to_jiffies(HCI_INIT_TIMEOUT));
656 tasklet_enable(&hdev->tx_task);
657 hci_req_unlock(hdev);
662 int hci_dev_reset_stat(__u16 dev)
664 struct hci_dev *hdev;
667 if (!(hdev = hci_dev_get(dev)))
670 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
677 int hci_dev_cmd(unsigned int cmd, void __user *arg)
679 struct hci_dev *hdev;
680 struct hci_dev_req dr;
683 if (copy_from_user(&dr, arg, sizeof(dr)))
686 if (!(hdev = hci_dev_get(dr.dev_id)))
691 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
692 msecs_to_jiffies(HCI_INIT_TIMEOUT));
696 if (!lmp_encrypt_capable(hdev)) {
701 if (!test_bit(HCI_AUTH, &hdev->flags)) {
702 /* Auth must be enabled first */
703 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
704 msecs_to_jiffies(HCI_INIT_TIMEOUT));
709 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
710 msecs_to_jiffies(HCI_INIT_TIMEOUT));
714 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
715 msecs_to_jiffies(HCI_INIT_TIMEOUT));
719 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
720 msecs_to_jiffies(HCI_INIT_TIMEOUT));
724 hdev->link_mode = ((__u16) dr.dev_opt) &
725 (HCI_LM_MASTER | HCI_LM_ACCEPT);
729 hdev->pkt_type = (__u16) dr.dev_opt;
733 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
734 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
738 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
739 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
751 int hci_get_dev_list(void __user *arg)
753 struct hci_dev_list_req *dl;
754 struct hci_dev_req *dr;
756 int n = 0, size, err;
759 if (get_user(dev_num, (__u16 __user *) arg))
762 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
765 size = sizeof(*dl) + dev_num * sizeof(*dr);
767 if (!(dl = kzalloc(size, GFP_KERNEL)))
772 read_lock_bh(&hci_dev_list_lock);
773 list_for_each(p, &hci_dev_list) {
774 struct hci_dev *hdev;
775 hdev = list_entry(p, struct hci_dev, list);
776 (dr + n)->dev_id = hdev->id;
777 (dr + n)->dev_opt = hdev->flags;
781 read_unlock_bh(&hci_dev_list_lock);
784 size = sizeof(*dl) + n * sizeof(*dr);
786 err = copy_to_user(arg, dl, size);
789 return err ? -EFAULT : 0;
792 int hci_get_dev_info(void __user *arg)
794 struct hci_dev *hdev;
795 struct hci_dev_info di;
798 if (copy_from_user(&di, arg, sizeof(di)))
801 if (!(hdev = hci_dev_get(di.dev_id)))
804 strcpy(di.name, hdev->name);
805 di.bdaddr = hdev->bdaddr;
806 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
807 di.flags = hdev->flags;
808 di.pkt_type = hdev->pkt_type;
809 di.acl_mtu = hdev->acl_mtu;
810 di.acl_pkts = hdev->acl_pkts;
811 di.sco_mtu = hdev->sco_mtu;
812 di.sco_pkts = hdev->sco_pkts;
813 di.link_policy = hdev->link_policy;
814 di.link_mode = hdev->link_mode;
816 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
817 memcpy(&di.features, &hdev->features, sizeof(di.features));
819 if (copy_to_user(arg, &di, sizeof(di)))
827 /* ---- Interface to HCI drivers ---- */
829 static int hci_rfkill_set_block(void *data, bool blocked)
831 struct hci_dev *hdev = data;
833 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
838 hci_dev_do_close(hdev);
843 static const struct rfkill_ops hci_rfkill_ops = {
844 .set_block = hci_rfkill_set_block,
847 /* Alloc HCI device */
848 struct hci_dev *hci_alloc_dev(void)
850 struct hci_dev *hdev;
852 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
856 skb_queue_head_init(&hdev->driver_init);
860 EXPORT_SYMBOL(hci_alloc_dev);
862 /* Free HCI device */
863 void hci_free_dev(struct hci_dev *hdev)
865 skb_queue_purge(&hdev->driver_init);
867 /* will free via device release */
868 put_device(&hdev->dev);
870 EXPORT_SYMBOL(hci_free_dev);
872 /* Register HCI device */
873 int hci_register_dev(struct hci_dev *hdev)
875 struct list_head *head = &hci_dev_list, *p;
878 BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
879 hdev->bus, hdev->owner);
881 if (!hdev->open || !hdev->close || !hdev->destruct)
884 write_lock_bh(&hci_dev_list_lock);
886 /* Find first available device id */
887 list_for_each(p, &hci_dev_list) {
888 if (list_entry(p, struct hci_dev, list)->id != id)
893 sprintf(hdev->name, "hci%d", id);
895 list_add(&hdev->list, head);
897 atomic_set(&hdev->refcnt, 1);
898 spin_lock_init(&hdev->lock);
901 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
902 hdev->esco_type = (ESCO_HV1);
903 hdev->link_mode = (HCI_LM_ACCEPT);
905 hdev->idle_timeout = 0;
906 hdev->sniff_max_interval = 800;
907 hdev->sniff_min_interval = 80;
909 tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
910 tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
911 tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
913 skb_queue_head_init(&hdev->rx_q);
914 skb_queue_head_init(&hdev->cmd_q);
915 skb_queue_head_init(&hdev->raw_q);
917 for (i = 0; i < NUM_REASSEMBLY; i++)
918 hdev->reassembly[i] = NULL;
920 init_waitqueue_head(&hdev->req_wait_q);
921 mutex_init(&hdev->req_lock);
923 inquiry_cache_init(hdev);
925 hci_conn_hash_init(hdev);
927 INIT_LIST_HEAD(&hdev->blacklist.list);
929 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
931 atomic_set(&hdev->promisc, 0);
933 write_unlock_bh(&hci_dev_list_lock);
935 hdev->workqueue = create_singlethread_workqueue(hdev->name);
936 if (!hdev->workqueue)
939 hci_register_sysfs(hdev);
941 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
942 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
944 if (rfkill_register(hdev->rfkill) < 0) {
945 rfkill_destroy(hdev->rfkill);
950 hci_notify(hdev, HCI_DEV_REG);
955 write_lock_bh(&hci_dev_list_lock);
956 list_del(&hdev->list);
957 write_unlock_bh(&hci_dev_list_lock);
961 EXPORT_SYMBOL(hci_register_dev);
963 /* Unregister HCI device */
964 int hci_unregister_dev(struct hci_dev *hdev)
968 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
970 write_lock_bh(&hci_dev_list_lock);
971 list_del(&hdev->list);
972 write_unlock_bh(&hci_dev_list_lock);
974 hci_dev_do_close(hdev);
976 for (i = 0; i < NUM_REASSEMBLY; i++)
977 kfree_skb(hdev->reassembly[i]);
979 hci_notify(hdev, HCI_DEV_UNREG);
982 rfkill_unregister(hdev->rfkill);
983 rfkill_destroy(hdev->rfkill);
986 hci_unregister_sysfs(hdev);
988 destroy_workqueue(hdev->workqueue);
994 EXPORT_SYMBOL(hci_unregister_dev);
996 /* Suspend HCI device */
997 int hci_suspend_dev(struct hci_dev *hdev)
999 hci_notify(hdev, HCI_DEV_SUSPEND);
1002 EXPORT_SYMBOL(hci_suspend_dev);
1004 /* Resume HCI device */
1005 int hci_resume_dev(struct hci_dev *hdev)
1007 hci_notify(hdev, HCI_DEV_RESUME);
1010 EXPORT_SYMBOL(hci_resume_dev);
1012 /* Receive frame from HCI drivers */
1013 int hci_recv_frame(struct sk_buff *skb)
1015 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1016 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1017 && !test_bit(HCI_INIT, &hdev->flags))) {
1023 bt_cb(skb)->incoming = 1;
1026 __net_timestamp(skb);
1028 /* Queue frame for rx task */
1029 skb_queue_tail(&hdev->rx_q, skb);
1030 tasklet_schedule(&hdev->rx_task);
1034 EXPORT_SYMBOL(hci_recv_frame);
1036 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1037 int count, __u8 index, gfp_t gfp_mask)
1042 struct sk_buff *skb;
1043 struct bt_skb_cb *scb;
1045 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1046 index >= NUM_REASSEMBLY)
1049 skb = hdev->reassembly[index];
1053 case HCI_ACLDATA_PKT:
1054 len = HCI_MAX_FRAME_SIZE;
1055 hlen = HCI_ACL_HDR_SIZE;
1058 len = HCI_MAX_EVENT_SIZE;
1059 hlen = HCI_EVENT_HDR_SIZE;
1061 case HCI_SCODATA_PKT:
1062 len = HCI_MAX_SCO_SIZE;
1063 hlen = HCI_SCO_HDR_SIZE;
1067 skb = bt_skb_alloc(len, gfp_mask);
1071 scb = (void *) skb->cb;
1073 scb->pkt_type = type;
1075 skb->dev = (void *) hdev;
1076 hdev->reassembly[index] = skb;
1080 scb = (void *) skb->cb;
1081 len = min(scb->expect, (__u16)count);
1083 memcpy(skb_put(skb, len), data, len);
1092 if (skb->len == HCI_EVENT_HDR_SIZE) {
1093 struct hci_event_hdr *h = hci_event_hdr(skb);
1094 scb->expect = h->plen;
1096 if (skb_tailroom(skb) < scb->expect) {
1098 hdev->reassembly[index] = NULL;
1104 case HCI_ACLDATA_PKT:
1105 if (skb->len == HCI_ACL_HDR_SIZE) {
1106 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1107 scb->expect = __le16_to_cpu(h->dlen);
1109 if (skb_tailroom(skb) < scb->expect) {
1111 hdev->reassembly[index] = NULL;
1117 case HCI_SCODATA_PKT:
1118 if (skb->len == HCI_SCO_HDR_SIZE) {
1119 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1120 scb->expect = h->dlen;
1122 if (skb_tailroom(skb) < scb->expect) {
1124 hdev->reassembly[index] = NULL;
1131 if (scb->expect == 0) {
1132 /* Complete frame */
1134 bt_cb(skb)->pkt_type = type;
1135 hci_recv_frame(skb);
1137 hdev->reassembly[index] = NULL;
1145 /* Receive packet type fragment */
1146 #define __reassembly(hdev, type) ((hdev)->reassembly[(type) - 1])
1148 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1150 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1154 struct sk_buff *skb = __reassembly(hdev, type);
1155 struct { int expect; } *scb;
1159 /* Start of the frame */
1163 if (count >= HCI_EVENT_HDR_SIZE) {
1164 struct hci_event_hdr *h = data;
1165 len = HCI_EVENT_HDR_SIZE + h->plen;
1170 case HCI_ACLDATA_PKT:
1171 if (count >= HCI_ACL_HDR_SIZE) {
1172 struct hci_acl_hdr *h = data;
1173 len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
1178 case HCI_SCODATA_PKT:
1179 if (count >= HCI_SCO_HDR_SIZE) {
1180 struct hci_sco_hdr *h = data;
1181 len = HCI_SCO_HDR_SIZE + h->dlen;
1187 skb = bt_skb_alloc(len, GFP_ATOMIC);
1189 BT_ERR("%s no memory for packet", hdev->name);
1193 skb->dev = (void *) hdev;
1194 bt_cb(skb)->pkt_type = type;
1196 __reassembly(hdev, type) = skb;
1198 scb = (void *) skb->cb;
1203 scb = (void *) skb->cb;
1207 len = min(len, count);
1209 memcpy(skb_put(skb, len), data, len);
1213 if (scb->expect == 0) {
1214 /* Complete frame */
1216 __reassembly(hdev, type) = NULL;
1218 bt_cb(skb)->pkt_type = type;
1219 hci_recv_frame(skb);
1222 count -= len; data += len;
1227 EXPORT_SYMBOL(hci_recv_fragment);
1229 /* ---- Interface to upper protocols ---- */
1231 /* Register/Unregister protocols.
1232 * hci_task_lock is used to ensure that no tasks are running. */
1233 int hci_register_proto(struct hci_proto *hp)
1237 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1239 if (hp->id >= HCI_MAX_PROTO)
1242 write_lock_bh(&hci_task_lock);
1244 if (!hci_proto[hp->id])
1245 hci_proto[hp->id] = hp;
1249 write_unlock_bh(&hci_task_lock);
1253 EXPORT_SYMBOL(hci_register_proto);
1255 int hci_unregister_proto(struct hci_proto *hp)
1259 BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1261 if (hp->id >= HCI_MAX_PROTO)
1264 write_lock_bh(&hci_task_lock);
1266 if (hci_proto[hp->id])
1267 hci_proto[hp->id] = NULL;
1271 write_unlock_bh(&hci_task_lock);
1275 EXPORT_SYMBOL(hci_unregister_proto);
1277 int hci_register_cb(struct hci_cb *cb)
1279 BT_DBG("%p name %s", cb, cb->name);
1281 write_lock_bh(&hci_cb_list_lock);
1282 list_add(&cb->list, &hci_cb_list);
1283 write_unlock_bh(&hci_cb_list_lock);
1287 EXPORT_SYMBOL(hci_register_cb);
1289 int hci_unregister_cb(struct hci_cb *cb)
1291 BT_DBG("%p name %s", cb, cb->name);
1293 write_lock_bh(&hci_cb_list_lock);
1294 list_del(&cb->list);
1295 write_unlock_bh(&hci_cb_list_lock);
1299 EXPORT_SYMBOL(hci_unregister_cb);
1301 static int hci_send_frame(struct sk_buff *skb)
1303 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1310 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1312 if (atomic_read(&hdev->promisc)) {
1314 __net_timestamp(skb);
1316 hci_send_to_sock(hdev, skb);
1319 /* Get rid of skb owner, prior to sending to the driver. */
1322 return hdev->send(skb);
1325 /* Send HCI command */
1326 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1328 int len = HCI_COMMAND_HDR_SIZE + plen;
1329 struct hci_command_hdr *hdr;
1330 struct sk_buff *skb;
1332 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1334 skb = bt_skb_alloc(len, GFP_ATOMIC);
1336 BT_ERR("%s no memory for command", hdev->name);
1340 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1341 hdr->opcode = cpu_to_le16(opcode);
1345 memcpy(skb_put(skb, plen), param, plen);
1347 BT_DBG("skb len %d", skb->len);
1349 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1350 skb->dev = (void *) hdev;
1352 skb_queue_tail(&hdev->cmd_q, skb);
1353 tasklet_schedule(&hdev->cmd_task);
1358 /* Get data from the previously sent command */
1359 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1361 struct hci_command_hdr *hdr;
1363 if (!hdev->sent_cmd)
1366 hdr = (void *) hdev->sent_cmd->data;
1368 if (hdr->opcode != cpu_to_le16(opcode))
1371 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1373 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1377 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1379 struct hci_acl_hdr *hdr;
1382 skb_push(skb, HCI_ACL_HDR_SIZE);
1383 skb_reset_transport_header(skb);
1384 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1385 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1386 hdr->dlen = cpu_to_le16(len);
1389 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1391 struct hci_dev *hdev = conn->hdev;
1392 struct sk_buff *list;
1394 BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1396 skb->dev = (void *) hdev;
1397 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1398 hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1400 if (!(list = skb_shinfo(skb)->frag_list)) {
1401 /* Non fragmented */
1402 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1404 skb_queue_tail(&conn->data_q, skb);
1407 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1409 skb_shinfo(skb)->frag_list = NULL;
1411 /* Queue all fragments atomically */
1412 spin_lock_bh(&conn->data_q.lock);
1414 __skb_queue_tail(&conn->data_q, skb);
1416 skb = list; list = list->next;
1418 skb->dev = (void *) hdev;
1419 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1420 hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1422 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1424 __skb_queue_tail(&conn->data_q, skb);
1427 spin_unlock_bh(&conn->data_q.lock);
1430 tasklet_schedule(&hdev->tx_task);
1432 EXPORT_SYMBOL(hci_send_acl);
1435 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1437 struct hci_dev *hdev = conn->hdev;
1438 struct hci_sco_hdr hdr;
1440 BT_DBG("%s len %d", hdev->name, skb->len);
1442 hdr.handle = cpu_to_le16(conn->handle);
1443 hdr.dlen = skb->len;
1445 skb_push(skb, HCI_SCO_HDR_SIZE);
1446 skb_reset_transport_header(skb);
1447 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1449 skb->dev = (void *) hdev;
1450 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1452 skb_queue_tail(&conn->data_q, skb);
1453 tasklet_schedule(&hdev->tx_task);
1455 EXPORT_SYMBOL(hci_send_sco);
1457 /* ---- HCI TX task (outgoing data) ---- */
1459 /* HCI Connection scheduler */
1460 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1462 struct hci_conn_hash *h = &hdev->conn_hash;
1463 struct hci_conn *conn = NULL;
1464 int num = 0, min = ~0;
1465 struct list_head *p;
1467 /* We don't have to lock device here. Connections are always
1468 * added and removed with TX task disabled. */
1469 list_for_each(p, &h->list) {
1471 c = list_entry(p, struct hci_conn, list);
1473 if (c->type != type || skb_queue_empty(&c->data_q))
1476 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1481 if (c->sent < min) {
1488 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1494 BT_DBG("conn %p quote %d", conn, *quote);
1498 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1500 struct hci_conn_hash *h = &hdev->conn_hash;
1501 struct list_head *p;
1504 BT_ERR("%s ACL tx timeout", hdev->name);
1506 /* Kill stalled connections */
1507 list_for_each(p, &h->list) {
1508 c = list_entry(p, struct hci_conn, list);
1509 if (c->type == ACL_LINK && c->sent) {
1510 BT_ERR("%s killing stalled ACL connection %s",
1511 hdev->name, batostr(&c->dst));
1512 hci_acl_disconn(c, 0x13);
1517 static inline void hci_sched_acl(struct hci_dev *hdev)
1519 struct hci_conn *conn;
1520 struct sk_buff *skb;
1523 BT_DBG("%s", hdev->name);
1525 if (!test_bit(HCI_RAW, &hdev->flags)) {
1526 /* ACL tx timeout must be longer than maximum
1527 * link supervision timeout (40.9 seconds) */
1528 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1529 hci_acl_tx_to(hdev);
1532 while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, "e))) {
1533 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1534 BT_DBG("skb %p len %d", skb, skb->len);
1536 hci_conn_enter_active_mode(conn);
1538 hci_send_frame(skb);
1539 hdev->acl_last_tx = jiffies;
1548 static inline void hci_sched_sco(struct hci_dev *hdev)
1550 struct hci_conn *conn;
1551 struct sk_buff *skb;
1554 BT_DBG("%s", hdev->name);
1556 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
1557 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1558 BT_DBG("skb %p len %d", skb, skb->len);
1559 hci_send_frame(skb);
1562 if (conn->sent == ~0)
1568 static inline void hci_sched_esco(struct hci_dev *hdev)
1570 struct hci_conn *conn;
1571 struct sk_buff *skb;
1574 BT_DBG("%s", hdev->name);
1576 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
1577 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1578 BT_DBG("skb %p len %d", skb, skb->len);
1579 hci_send_frame(skb);
1582 if (conn->sent == ~0)
1588 static void hci_tx_task(unsigned long arg)
1590 struct hci_dev *hdev = (struct hci_dev *) arg;
1591 struct sk_buff *skb;
1593 read_lock(&hci_task_lock);
1595 BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1597 /* Schedule queues and send stuff to HCI driver */
1599 hci_sched_acl(hdev);
1601 hci_sched_sco(hdev);
1603 hci_sched_esco(hdev);
1605 /* Send next queued raw (unknown type) packet */
1606 while ((skb = skb_dequeue(&hdev->raw_q)))
1607 hci_send_frame(skb);
1609 read_unlock(&hci_task_lock);
1612 /* ----- HCI RX task (incoming data proccessing) ----- */
1614 /* ACL data packet */
1615 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1617 struct hci_acl_hdr *hdr = (void *) skb->data;
1618 struct hci_conn *conn;
1619 __u16 handle, flags;
1621 skb_pull(skb, HCI_ACL_HDR_SIZE);
1623 handle = __le16_to_cpu(hdr->handle);
1624 flags = hci_flags(handle);
1625 handle = hci_handle(handle);
1627 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1629 hdev->stat.acl_rx++;
1632 conn = hci_conn_hash_lookup_handle(hdev, handle);
1633 hci_dev_unlock(hdev);
1636 register struct hci_proto *hp;
1638 hci_conn_enter_active_mode(conn);
1640 /* Send to upper protocol */
1641 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1642 hp->recv_acldata(conn, skb, flags);
1646 BT_ERR("%s ACL packet for unknown connection handle %d",
1647 hdev->name, handle);
1653 /* SCO data packet */
1654 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1656 struct hci_sco_hdr *hdr = (void *) skb->data;
1657 struct hci_conn *conn;
1660 skb_pull(skb, HCI_SCO_HDR_SIZE);
1662 handle = __le16_to_cpu(hdr->handle);
1664 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1666 hdev->stat.sco_rx++;
1669 conn = hci_conn_hash_lookup_handle(hdev, handle);
1670 hci_dev_unlock(hdev);
1673 register struct hci_proto *hp;
1675 /* Send to upper protocol */
1676 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1677 hp->recv_scodata(conn, skb);
1681 BT_ERR("%s SCO packet for unknown connection handle %d",
1682 hdev->name, handle);
1688 static void hci_rx_task(unsigned long arg)
1690 struct hci_dev *hdev = (struct hci_dev *) arg;
1691 struct sk_buff *skb;
1693 BT_DBG("%s", hdev->name);
1695 read_lock(&hci_task_lock);
1697 while ((skb = skb_dequeue(&hdev->rx_q))) {
1698 if (atomic_read(&hdev->promisc)) {
1699 /* Send copy to the sockets */
1700 hci_send_to_sock(hdev, skb);
1703 if (test_bit(HCI_RAW, &hdev->flags)) {
1708 if (test_bit(HCI_INIT, &hdev->flags)) {
1709 /* Don't process data packets in this states. */
1710 switch (bt_cb(skb)->pkt_type) {
1711 case HCI_ACLDATA_PKT:
1712 case HCI_SCODATA_PKT:
1719 switch (bt_cb(skb)->pkt_type) {
1721 hci_event_packet(hdev, skb);
1724 case HCI_ACLDATA_PKT:
1725 BT_DBG("%s ACL data packet", hdev->name);
1726 hci_acldata_packet(hdev, skb);
1729 case HCI_SCODATA_PKT:
1730 BT_DBG("%s SCO data packet", hdev->name);
1731 hci_scodata_packet(hdev, skb);
1740 read_unlock(&hci_task_lock);
1743 static void hci_cmd_task(unsigned long arg)
1745 struct hci_dev *hdev = (struct hci_dev *) arg;
1746 struct sk_buff *skb;
1748 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1750 if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1751 BT_ERR("%s command tx timeout", hdev->name);
1752 atomic_set(&hdev->cmd_cnt, 1);
1755 /* Send queued commands */
1756 if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1757 kfree_skb(hdev->sent_cmd);
1759 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1760 atomic_dec(&hdev->cmd_cnt);
1761 hci_send_frame(skb);
1762 hdev->cmd_last_tx = jiffies;
1764 skb_queue_head(&hdev->cmd_q, skb);
1765 tasklet_schedule(&hdev->cmd_task);