2 BlueZ - Bluetooth protocol stack for Linux
3 Copyright (C) 2000-2001 Qualcomm Incorporated
4 Copyright (C) 2011 ProFUSION Embedded Systems
6 Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
8 This program is free software; you can redistribute it and/or modify
9 it under the terms of the GNU General Public License version 2 as
10 published by the Free Software Foundation;
12 THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13 OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14 FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15 IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16 CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17 WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18 ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19 OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
21 ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22 COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23 SOFTWARE IS DISCLAIMED.
26 /* Bluetooth HCI core. */
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
56 #define AUTO_OFF_TIMEOUT 2000
60 static void hci_rx_work(struct work_struct *work);
61 static void hci_cmd_work(struct work_struct *work);
62 static void hci_tx_work(struct work_struct *work);
65 LIST_HEAD(hci_dev_list);
66 DEFINE_RWLOCK(hci_dev_list_lock);
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list);
70 DEFINE_RWLOCK(hci_cb_list_lock);
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
75 /* ---- HCI notifications ---- */
77 int hci_register_notifier(struct notifier_block *nb)
79 return atomic_notifier_chain_register(&hci_notifier, nb);
82 int hci_unregister_notifier(struct notifier_block *nb)
84 return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 static void hci_notify(struct hci_dev *hdev, int event)
89 atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 /* ---- HCI requests ---- */
94 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
96 BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
98 /* If this is the init phase check if the completed command matches
99 * the last init command, and if not just return.
101 if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
104 if (hdev->req_status == HCI_REQ_PEND) {
105 hdev->req_result = result;
106 hdev->req_status = HCI_REQ_DONE;
107 wake_up_interruptible(&hdev->req_wait_q);
111 static void hci_req_cancel(struct hci_dev *hdev, int err)
113 BT_DBG("%s err 0x%2.2x", hdev->name, err);
115 if (hdev->req_status == HCI_REQ_PEND) {
116 hdev->req_result = err;
117 hdev->req_status = HCI_REQ_CANCELED;
118 wake_up_interruptible(&hdev->req_wait_q);
122 /* Execute request and wait for completion. */
123 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
124 unsigned long opt, __u32 timeout)
126 DECLARE_WAITQUEUE(wait, current);
129 BT_DBG("%s start", hdev->name);
131 hdev->req_status = HCI_REQ_PEND;
133 add_wait_queue(&hdev->req_wait_q, &wait);
134 set_current_state(TASK_INTERRUPTIBLE);
137 schedule_timeout(timeout);
139 remove_wait_queue(&hdev->req_wait_q, &wait);
141 if (signal_pending(current))
144 switch (hdev->req_status) {
146 err = -bt_to_errno(hdev->req_result);
149 case HCI_REQ_CANCELED:
150 err = -hdev->req_result;
158 hdev->req_status = hdev->req_result = 0;
160 BT_DBG("%s end: err %d", hdev->name, err);
165 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
166 unsigned long opt, __u32 timeout)
170 if (!test_bit(HCI_UP, &hdev->flags))
173 /* Serialize all requests */
175 ret = __hci_request(hdev, req, opt, timeout);
176 hci_req_unlock(hdev);
181 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
183 BT_DBG("%s %ld", hdev->name, opt);
186 set_bit(HCI_RESET, &hdev->flags);
187 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
190 static void bredr_init(struct hci_dev *hdev)
192 struct hci_cp_delete_stored_link_key cp;
196 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
198 /* Mandatory initialization */
201 if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
202 set_bit(HCI_RESET, &hdev->flags);
203 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
206 /* Read Local Supported Features */
207 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
209 /* Read Local Version */
210 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
212 /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213 hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
215 /* Read BD Address */
216 hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
218 /* Read Class of Device */
219 hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
221 /* Read Local Name */
222 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
224 /* Read Voice Setting */
225 hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
227 /* Optional initialization */
229 /* Clear Event Filters */
230 flt_type = HCI_FLT_CLEAR_ALL;
231 hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
233 /* Connection accept timeout ~20 secs */
234 param = cpu_to_le16(0x7d00);
235 hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, ¶m);
237 bacpy(&cp.bdaddr, BDADDR_ANY);
239 hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
242 static void amp_init(struct hci_dev *hdev)
244 hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
247 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
249 /* Read Local Version */
250 hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
253 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
257 BT_DBG("%s %ld", hdev->name, opt);
259 /* Driver initialization */
261 /* Special commands */
262 while ((skb = skb_dequeue(&hdev->driver_init))) {
263 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264 skb->dev = (void *) hdev;
266 skb_queue_tail(&hdev->cmd_q, skb);
267 queue_work(hdev->workqueue, &hdev->cmd_work);
269 skb_queue_purge(&hdev->driver_init);
271 switch (hdev->dev_type) {
281 BT_ERR("Unknown device type %d", hdev->dev_type);
287 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
289 BT_DBG("%s", hdev->name);
291 /* Read LE buffer size */
292 hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
295 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
299 BT_DBG("%s %x", hdev->name, scan);
301 /* Inquiry and Page scans */
302 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
305 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
309 BT_DBG("%s %x", hdev->name, auth);
312 hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
315 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
319 BT_DBG("%s %x", hdev->name, encrypt);
322 hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
325 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
327 __le16 policy = cpu_to_le16(opt);
329 BT_DBG("%s %x", hdev->name, policy);
331 /* Default link policy */
332 hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
335 /* Get HCI device by index.
336 * Device is held on return. */
337 struct hci_dev *hci_dev_get(int index)
339 struct hci_dev *hdev = NULL, *d;
346 read_lock(&hci_dev_list_lock);
347 list_for_each_entry(d, &hci_dev_list, list) {
348 if (d->id == index) {
349 hdev = hci_dev_hold(d);
353 read_unlock(&hci_dev_list_lock);
357 /* ---- Inquiry support ---- */
359 bool hci_discovery_active(struct hci_dev *hdev)
361 struct discovery_state *discov = &hdev->discovery;
363 if (discov->state == DISCOVERY_INQUIRY ||
364 discov->state == DISCOVERY_LE_SCAN ||
365 discov->state == DISCOVERY_RESOLVING)
371 void hci_discovery_set_state(struct hci_dev *hdev, int state)
373 BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
375 if (hdev->discovery.state == state)
379 case DISCOVERY_STOPPED:
380 mgmt_discovering(hdev, 0);
382 case DISCOVERY_STARTING:
384 case DISCOVERY_INQUIRY:
385 case DISCOVERY_LE_SCAN:
386 mgmt_discovering(hdev, 1);
388 case DISCOVERY_RESOLVING:
390 case DISCOVERY_STOPPING:
394 hdev->discovery.state = state;
397 static void inquiry_cache_flush(struct hci_dev *hdev)
399 struct discovery_state *cache = &hdev->discovery;
400 struct inquiry_entry *p, *n;
402 list_for_each_entry_safe(p, n, &cache->all, all) {
407 INIT_LIST_HEAD(&cache->unknown);
408 INIT_LIST_HEAD(&cache->resolve);
409 cache->state = DISCOVERY_STOPPED;
412 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
414 struct discovery_state *cache = &hdev->discovery;
415 struct inquiry_entry *e;
417 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
419 list_for_each_entry(e, &cache->all, all) {
420 if (!bacmp(&e->data.bdaddr, bdaddr))
427 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
430 struct discovery_state *cache = &hdev->discovery;
431 struct inquiry_entry *e;
433 BT_DBG("cache %p, %s", cache, batostr(bdaddr));
435 list_for_each_entry(e, &cache->unknown, list) {
436 if (!bacmp(&e->data.bdaddr, bdaddr))
443 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
447 struct discovery_state *cache = &hdev->discovery;
448 struct inquiry_entry *e;
450 BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
452 list_for_each_entry(e, &cache->resolve, list) {
453 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
455 if (!bacmp(&e->data.bdaddr, bdaddr))
462 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
463 struct inquiry_entry *ie)
465 struct discovery_state *cache = &hdev->discovery;
466 struct list_head *pos = &cache->resolve;
467 struct inquiry_entry *p;
471 list_for_each_entry(p, &cache->resolve, list) {
472 if (p->name_state != NAME_PENDING &&
473 abs(p->data.rssi) >= abs(ie->data.rssi))
478 list_add(&ie->list, pos);
481 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
484 struct discovery_state *cache = &hdev->discovery;
485 struct inquiry_entry *ie;
487 BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
489 ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
491 if (ie->name_state == NAME_NEEDED &&
492 data->rssi != ie->data.rssi) {
493 ie->data.rssi = data->rssi;
494 hci_inquiry_cache_update_resolve(hdev, ie);
500 /* Entry not in the cache. Add new one. */
501 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
505 list_add(&ie->all, &cache->all);
508 ie->name_state = NAME_KNOWN;
510 ie->name_state = NAME_NOT_KNOWN;
511 list_add(&ie->list, &cache->unknown);
515 if (name_known && ie->name_state != NAME_KNOWN &&
516 ie->name_state != NAME_PENDING) {
517 ie->name_state = NAME_KNOWN;
521 memcpy(&ie->data, data, sizeof(*data));
522 ie->timestamp = jiffies;
523 cache->timestamp = jiffies;
525 if (ie->name_state == NAME_NOT_KNOWN)
531 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
533 struct discovery_state *cache = &hdev->discovery;
534 struct inquiry_info *info = (struct inquiry_info *) buf;
535 struct inquiry_entry *e;
538 list_for_each_entry(e, &cache->all, all) {
539 struct inquiry_data *data = &e->data;
544 bacpy(&info->bdaddr, &data->bdaddr);
545 info->pscan_rep_mode = data->pscan_rep_mode;
546 info->pscan_period_mode = data->pscan_period_mode;
547 info->pscan_mode = data->pscan_mode;
548 memcpy(info->dev_class, data->dev_class, 3);
549 info->clock_offset = data->clock_offset;
555 BT_DBG("cache %p, copied %d", cache, copied);
559 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
561 struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
562 struct hci_cp_inquiry cp;
564 BT_DBG("%s", hdev->name);
566 if (test_bit(HCI_INQUIRY, &hdev->flags))
570 memcpy(&cp.lap, &ir->lap, 3);
571 cp.length = ir->length;
572 cp.num_rsp = ir->num_rsp;
573 hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
576 int hci_inquiry(void __user *arg)
578 __u8 __user *ptr = arg;
579 struct hci_inquiry_req ir;
580 struct hci_dev *hdev;
581 int err = 0, do_inquiry = 0, max_rsp;
585 if (copy_from_user(&ir, ptr, sizeof(ir)))
588 hdev = hci_dev_get(ir.dev_id);
593 if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
594 inquiry_cache_empty(hdev) ||
595 ir.flags & IREQ_CACHE_FLUSH) {
596 inquiry_cache_flush(hdev);
599 hci_dev_unlock(hdev);
601 timeo = ir.length * msecs_to_jiffies(2000);
604 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
609 /* for unlimited number of responses we will use buffer with 255 entries */
610 max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
612 /* cache_dump can't sleep. Therefore we allocate temp buffer and then
613 * copy it to the user space.
615 buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
622 ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
623 hci_dev_unlock(hdev);
625 BT_DBG("num_rsp %d", ir.num_rsp);
627 if (!copy_to_user(ptr, &ir, sizeof(ir))) {
629 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
642 /* ---- HCI ioctl helpers ---- */
644 int hci_dev_open(__u16 dev)
646 struct hci_dev *hdev;
649 hdev = hci_dev_get(dev);
653 BT_DBG("%s %p", hdev->name, hdev);
657 if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
662 if (test_bit(HCI_UP, &hdev->flags)) {
667 if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
668 set_bit(HCI_RAW, &hdev->flags);
670 /* Treat all non BR/EDR controllers as raw devices if
671 enable_hs is not set */
672 if (hdev->dev_type != HCI_BREDR && !enable_hs)
673 set_bit(HCI_RAW, &hdev->flags);
675 if (hdev->open(hdev)) {
680 if (!test_bit(HCI_RAW, &hdev->flags)) {
681 atomic_set(&hdev->cmd_cnt, 1);
682 set_bit(HCI_INIT, &hdev->flags);
683 hdev->init_last_cmd = 0;
685 ret = __hci_request(hdev, hci_init_req, 0,
686 msecs_to_jiffies(HCI_INIT_TIMEOUT));
688 if (lmp_host_le_capable(hdev))
689 ret = __hci_request(hdev, hci_le_init_req, 0,
690 msecs_to_jiffies(HCI_INIT_TIMEOUT));
692 clear_bit(HCI_INIT, &hdev->flags);
697 set_bit(HCI_UP, &hdev->flags);
698 hci_notify(hdev, HCI_DEV_UP);
699 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
701 mgmt_powered(hdev, 1);
702 hci_dev_unlock(hdev);
705 /* Init failed, cleanup */
706 flush_work(&hdev->tx_work);
707 flush_work(&hdev->cmd_work);
708 flush_work(&hdev->rx_work);
710 skb_queue_purge(&hdev->cmd_q);
711 skb_queue_purge(&hdev->rx_q);
716 if (hdev->sent_cmd) {
717 kfree_skb(hdev->sent_cmd);
718 hdev->sent_cmd = NULL;
726 hci_req_unlock(hdev);
731 static int hci_dev_do_close(struct hci_dev *hdev)
733 BT_DBG("%s %p", hdev->name, hdev);
735 hci_req_cancel(hdev, ENODEV);
738 if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
739 del_timer_sync(&hdev->cmd_timer);
740 hci_req_unlock(hdev);
744 /* Flush RX and TX works */
745 flush_work(&hdev->tx_work);
746 flush_work(&hdev->rx_work);
748 if (hdev->discov_timeout > 0) {
749 cancel_delayed_work(&hdev->discov_off);
750 hdev->discov_timeout = 0;
753 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
754 cancel_delayed_work(&hdev->power_off);
756 if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
757 cancel_delayed_work(&hdev->service_cache);
760 inquiry_cache_flush(hdev);
761 hci_conn_hash_flush(hdev);
762 hci_dev_unlock(hdev);
764 hci_notify(hdev, HCI_DEV_DOWN);
770 skb_queue_purge(&hdev->cmd_q);
771 atomic_set(&hdev->cmd_cnt, 1);
772 if (!test_bit(HCI_RAW, &hdev->flags) &&
773 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
774 set_bit(HCI_INIT, &hdev->flags);
775 __hci_request(hdev, hci_reset_req, 0,
776 msecs_to_jiffies(250));
777 clear_bit(HCI_INIT, &hdev->flags);
781 flush_work(&hdev->cmd_work);
784 skb_queue_purge(&hdev->rx_q);
785 skb_queue_purge(&hdev->cmd_q);
786 skb_queue_purge(&hdev->raw_q);
788 /* Drop last sent command */
789 if (hdev->sent_cmd) {
790 del_timer_sync(&hdev->cmd_timer);
791 kfree_skb(hdev->sent_cmd);
792 hdev->sent_cmd = NULL;
795 /* After this point our queues are empty
796 * and no tasks are scheduled. */
800 mgmt_powered(hdev, 0);
801 hci_dev_unlock(hdev);
806 hci_req_unlock(hdev);
812 int hci_dev_close(__u16 dev)
814 struct hci_dev *hdev;
817 hdev = hci_dev_get(dev);
820 err = hci_dev_do_close(hdev);
825 int hci_dev_reset(__u16 dev)
827 struct hci_dev *hdev;
830 hdev = hci_dev_get(dev);
836 if (!test_bit(HCI_UP, &hdev->flags))
840 skb_queue_purge(&hdev->rx_q);
841 skb_queue_purge(&hdev->cmd_q);
844 inquiry_cache_flush(hdev);
845 hci_conn_hash_flush(hdev);
846 hci_dev_unlock(hdev);
851 atomic_set(&hdev->cmd_cnt, 1);
852 hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
854 if (!test_bit(HCI_RAW, &hdev->flags))
855 ret = __hci_request(hdev, hci_reset_req, 0,
856 msecs_to_jiffies(HCI_INIT_TIMEOUT));
859 hci_req_unlock(hdev);
864 int hci_dev_reset_stat(__u16 dev)
866 struct hci_dev *hdev;
869 hdev = hci_dev_get(dev);
873 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
880 int hci_dev_cmd(unsigned int cmd, void __user *arg)
882 struct hci_dev *hdev;
883 struct hci_dev_req dr;
886 if (copy_from_user(&dr, arg, sizeof(dr)))
889 hdev = hci_dev_get(dr.dev_id);
895 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
896 msecs_to_jiffies(HCI_INIT_TIMEOUT));
900 if (!lmp_encrypt_capable(hdev)) {
905 if (!test_bit(HCI_AUTH, &hdev->flags)) {
906 /* Auth must be enabled first */
907 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
908 msecs_to_jiffies(HCI_INIT_TIMEOUT));
913 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
914 msecs_to_jiffies(HCI_INIT_TIMEOUT));
918 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
919 msecs_to_jiffies(HCI_INIT_TIMEOUT));
923 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
924 msecs_to_jiffies(HCI_INIT_TIMEOUT));
928 hdev->link_mode = ((__u16) dr.dev_opt) &
929 (HCI_LM_MASTER | HCI_LM_ACCEPT);
933 hdev->pkt_type = (__u16) dr.dev_opt;
937 hdev->acl_mtu = *((__u16 *) &dr.dev_opt + 1);
938 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
942 hdev->sco_mtu = *((__u16 *) &dr.dev_opt + 1);
943 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
955 int hci_get_dev_list(void __user *arg)
957 struct hci_dev *hdev;
958 struct hci_dev_list_req *dl;
959 struct hci_dev_req *dr;
960 int n = 0, size, err;
963 if (get_user(dev_num, (__u16 __user *) arg))
966 if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
969 size = sizeof(*dl) + dev_num * sizeof(*dr);
971 dl = kzalloc(size, GFP_KERNEL);
977 read_lock(&hci_dev_list_lock);
978 list_for_each_entry(hdev, &hci_dev_list, list) {
979 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
980 cancel_delayed_work(&hdev->power_off);
982 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
983 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
985 (dr + n)->dev_id = hdev->id;
986 (dr + n)->dev_opt = hdev->flags;
991 read_unlock(&hci_dev_list_lock);
994 size = sizeof(*dl) + n * sizeof(*dr);
996 err = copy_to_user(arg, dl, size);
999 return err ? -EFAULT : 0;
1002 int hci_get_dev_info(void __user *arg)
1004 struct hci_dev *hdev;
1005 struct hci_dev_info di;
1008 if (copy_from_user(&di, arg, sizeof(di)))
1011 hdev = hci_dev_get(di.dev_id);
1015 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1016 cancel_delayed_work_sync(&hdev->power_off);
1018 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1019 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1021 strcpy(di.name, hdev->name);
1022 di.bdaddr = hdev->bdaddr;
1023 di.type = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1024 di.flags = hdev->flags;
1025 di.pkt_type = hdev->pkt_type;
1026 di.acl_mtu = hdev->acl_mtu;
1027 di.acl_pkts = hdev->acl_pkts;
1028 di.sco_mtu = hdev->sco_mtu;
1029 di.sco_pkts = hdev->sco_pkts;
1030 di.link_policy = hdev->link_policy;
1031 di.link_mode = hdev->link_mode;
1033 memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1034 memcpy(&di.features, &hdev->features, sizeof(di.features));
1036 if (copy_to_user(arg, &di, sizeof(di)))
1044 /* ---- Interface to HCI drivers ---- */
1046 static int hci_rfkill_set_block(void *data, bool blocked)
1048 struct hci_dev *hdev = data;
1050 BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1055 hci_dev_do_close(hdev);
1060 static const struct rfkill_ops hci_rfkill_ops = {
1061 .set_block = hci_rfkill_set_block,
1064 /* Alloc HCI device */
1065 struct hci_dev *hci_alloc_dev(void)
1067 struct hci_dev *hdev;
1069 hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1073 hci_init_sysfs(hdev);
1074 skb_queue_head_init(&hdev->driver_init);
1078 EXPORT_SYMBOL(hci_alloc_dev);
1080 /* Free HCI device */
1081 void hci_free_dev(struct hci_dev *hdev)
1083 skb_queue_purge(&hdev->driver_init);
1085 /* will free via device release */
1086 put_device(&hdev->dev);
1088 EXPORT_SYMBOL(hci_free_dev);
1090 static void hci_power_on(struct work_struct *work)
1092 struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1094 BT_DBG("%s", hdev->name);
1096 if (hci_dev_open(hdev->id) < 0)
1099 if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1100 schedule_delayed_work(&hdev->power_off,
1101 msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1103 if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1104 mgmt_index_added(hdev);
1107 static void hci_power_off(struct work_struct *work)
1109 struct hci_dev *hdev = container_of(work, struct hci_dev,
1112 BT_DBG("%s", hdev->name);
1114 clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1116 hci_dev_close(hdev->id);
1119 static void hci_discov_off(struct work_struct *work)
1121 struct hci_dev *hdev;
1122 u8 scan = SCAN_PAGE;
1124 hdev = container_of(work, struct hci_dev, discov_off.work);
1126 BT_DBG("%s", hdev->name);
1130 hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1132 hdev->discov_timeout = 0;
1134 hci_dev_unlock(hdev);
1137 int hci_uuids_clear(struct hci_dev *hdev)
1139 struct list_head *p, *n;
1141 list_for_each_safe(p, n, &hdev->uuids) {
1142 struct bt_uuid *uuid;
1144 uuid = list_entry(p, struct bt_uuid, list);
1153 int hci_link_keys_clear(struct hci_dev *hdev)
1155 struct list_head *p, *n;
1157 list_for_each_safe(p, n, &hdev->link_keys) {
1158 struct link_key *key;
1160 key = list_entry(p, struct link_key, list);
1169 int hci_smp_ltks_clear(struct hci_dev *hdev)
1171 struct smp_ltk *k, *tmp;
1173 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1181 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1185 list_for_each_entry(k, &hdev->link_keys, list)
1186 if (bacmp(bdaddr, &k->bdaddr) == 0)
1192 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1193 u8 key_type, u8 old_key_type)
1196 if (key_type < 0x03)
1199 /* Debug keys are insecure so don't store them persistently */
1200 if (key_type == HCI_LK_DEBUG_COMBINATION)
1203 /* Changed combination key and there's no previous one */
1204 if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1207 /* Security mode 3 case */
1211 /* Neither local nor remote side had no-bonding as requirement */
1212 if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1215 /* Local side had dedicated bonding as requirement */
1216 if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1219 /* Remote side had dedicated bonding as requirement */
1220 if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1223 /* If none of the above criteria match, then don't store the key
1228 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1232 list_for_each_entry(k, &hdev->long_term_keys, list) {
1233 if (k->ediv != ediv ||
1234 memcmp(rand, k->rand, sizeof(k->rand)))
1242 EXPORT_SYMBOL(hci_find_ltk);
1244 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1249 list_for_each_entry(k, &hdev->long_term_keys, list)
1250 if (addr_type == k->bdaddr_type &&
1251 bacmp(bdaddr, &k->bdaddr) == 0)
1256 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1258 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1259 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1261 struct link_key *key, *old_key;
1262 u8 old_key_type, persistent;
1264 old_key = hci_find_link_key(hdev, bdaddr);
1266 old_key_type = old_key->type;
1269 old_key_type = conn ? conn->key_type : 0xff;
1270 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1273 list_add(&key->list, &hdev->link_keys);
1276 BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1278 /* Some buggy controller combinations generate a changed
1279 * combination key for legacy pairing even when there's no
1281 if (type == HCI_LK_CHANGED_COMBINATION &&
1282 (!conn || conn->remote_auth == 0xff) &&
1283 old_key_type == 0xff) {
1284 type = HCI_LK_COMBINATION;
1286 conn->key_type = type;
1289 bacpy(&key->bdaddr, bdaddr);
1290 memcpy(key->val, val, 16);
1291 key->pin_len = pin_len;
1293 if (type == HCI_LK_CHANGED_COMBINATION)
1294 key->type = old_key_type;
1301 persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1303 mgmt_new_link_key(hdev, key, persistent);
1306 list_del(&key->list);
1313 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1314 int new_key, u8 authenticated, u8 tk[16],
1315 u8 enc_size, u16 ediv, u8 rand[8])
1317 struct smp_ltk *key, *old_key;
1319 if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1322 old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1326 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1329 list_add(&key->list, &hdev->long_term_keys);
1332 bacpy(&key->bdaddr, bdaddr);
1333 key->bdaddr_type = addr_type;
1334 memcpy(key->val, tk, sizeof(key->val));
1335 key->authenticated = authenticated;
1337 key->enc_size = enc_size;
1339 memcpy(key->rand, rand, sizeof(key->rand));
1344 if (type & HCI_SMP_LTK)
1345 mgmt_new_ltk(hdev, key, 1);
1350 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1352 struct link_key *key;
1354 key = hci_find_link_key(hdev, bdaddr);
1358 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1360 list_del(&key->list);
1366 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1368 struct smp_ltk *k, *tmp;
1370 list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1371 if (bacmp(bdaddr, &k->bdaddr))
1374 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1383 /* HCI command timer function */
1384 static void hci_cmd_timer(unsigned long arg)
1386 struct hci_dev *hdev = (void *) arg;
1388 BT_ERR("%s command tx timeout", hdev->name);
1389 atomic_set(&hdev->cmd_cnt, 1);
1390 queue_work(hdev->workqueue, &hdev->cmd_work);
1393 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1396 struct oob_data *data;
1398 list_for_each_entry(data, &hdev->remote_oob_data, list)
1399 if (bacmp(bdaddr, &data->bdaddr) == 0)
1405 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1407 struct oob_data *data;
1409 data = hci_find_remote_oob_data(hdev, bdaddr);
1413 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1415 list_del(&data->list);
1421 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1423 struct oob_data *data, *n;
1425 list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1426 list_del(&data->list);
1433 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1436 struct oob_data *data;
1438 data = hci_find_remote_oob_data(hdev, bdaddr);
1441 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1445 bacpy(&data->bdaddr, bdaddr);
1446 list_add(&data->list, &hdev->remote_oob_data);
1449 memcpy(data->hash, hash, sizeof(data->hash));
1450 memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1452 BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1457 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1460 struct bdaddr_list *b;
1462 list_for_each_entry(b, &hdev->blacklist, list)
1463 if (bacmp(bdaddr, &b->bdaddr) == 0)
1469 int hci_blacklist_clear(struct hci_dev *hdev)
1471 struct list_head *p, *n;
1473 list_for_each_safe(p, n, &hdev->blacklist) {
1474 struct bdaddr_list *b;
1476 b = list_entry(p, struct bdaddr_list, list);
1485 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1487 struct bdaddr_list *entry;
1489 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1492 if (hci_blacklist_lookup(hdev, bdaddr))
1495 entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1499 bacpy(&entry->bdaddr, bdaddr);
1501 list_add(&entry->list, &hdev->blacklist);
1503 return mgmt_device_blocked(hdev, bdaddr);
1506 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1508 struct bdaddr_list *entry;
1510 if (bacmp(bdaddr, BDADDR_ANY) == 0)
1511 return hci_blacklist_clear(hdev);
1513 entry = hci_blacklist_lookup(hdev, bdaddr);
1517 list_del(&entry->list);
1520 return mgmt_device_unblocked(hdev, bdaddr);
1523 static void hci_clear_adv_cache(struct work_struct *work)
1525 struct hci_dev *hdev = container_of(work, struct hci_dev,
1530 hci_adv_entries_clear(hdev);
1532 hci_dev_unlock(hdev);
1535 int hci_adv_entries_clear(struct hci_dev *hdev)
1537 struct adv_entry *entry, *tmp;
1539 list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1540 list_del(&entry->list);
1544 BT_DBG("%s adv cache cleared", hdev->name);
1549 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1551 struct adv_entry *entry;
1553 list_for_each_entry(entry, &hdev->adv_entries, list)
1554 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1560 static inline int is_connectable_adv(u8 evt_type)
1562 if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1568 int hci_add_adv_entry(struct hci_dev *hdev,
1569 struct hci_ev_le_advertising_info *ev)
1571 struct adv_entry *entry;
1573 if (!is_connectable_adv(ev->evt_type))
1576 /* Only new entries should be added to adv_entries. So, if
1577 * bdaddr was found, don't add it. */
1578 if (hci_find_adv_entry(hdev, &ev->bdaddr))
1581 entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1585 bacpy(&entry->bdaddr, &ev->bdaddr);
1586 entry->bdaddr_type = ev->bdaddr_type;
1588 list_add(&entry->list, &hdev->adv_entries);
1590 BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1591 batostr(&entry->bdaddr), entry->bdaddr_type);
1596 /* Register HCI device */
1597 int hci_register_dev(struct hci_dev *hdev)
1599 struct list_head *head = &hci_dev_list, *p;
1602 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1604 if (!hdev->open || !hdev->close)
1607 /* Do not allow HCI_AMP devices to register at index 0,
1608 * so the index can be used as the AMP controller ID.
1610 id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1612 write_lock(&hci_dev_list_lock);
1614 /* Find first available device id */
1615 list_for_each(p, &hci_dev_list) {
1616 if (list_entry(p, struct hci_dev, list)->id != id)
1621 sprintf(hdev->name, "hci%d", id);
1623 list_add_tail(&hdev->list, head);
1625 mutex_init(&hdev->lock);
1628 hdev->dev_flags = 0;
1629 hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1630 hdev->esco_type = (ESCO_HV1);
1631 hdev->link_mode = (HCI_LM_ACCEPT);
1632 hdev->io_capability = 0x03; /* No Input No Output */
1634 hdev->idle_timeout = 0;
1635 hdev->sniff_max_interval = 800;
1636 hdev->sniff_min_interval = 80;
1638 INIT_WORK(&hdev->rx_work, hci_rx_work);
1639 INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1640 INIT_WORK(&hdev->tx_work, hci_tx_work);
1643 skb_queue_head_init(&hdev->rx_q);
1644 skb_queue_head_init(&hdev->cmd_q);
1645 skb_queue_head_init(&hdev->raw_q);
1647 setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1649 for (i = 0; i < NUM_REASSEMBLY; i++)
1650 hdev->reassembly[i] = NULL;
1652 init_waitqueue_head(&hdev->req_wait_q);
1653 mutex_init(&hdev->req_lock);
1655 discovery_init(hdev);
1657 hci_conn_hash_init(hdev);
1659 INIT_LIST_HEAD(&hdev->mgmt_pending);
1661 INIT_LIST_HEAD(&hdev->blacklist);
1663 INIT_LIST_HEAD(&hdev->uuids);
1665 INIT_LIST_HEAD(&hdev->link_keys);
1666 INIT_LIST_HEAD(&hdev->long_term_keys);
1668 INIT_LIST_HEAD(&hdev->remote_oob_data);
1670 INIT_LIST_HEAD(&hdev->adv_entries);
1672 INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1673 INIT_WORK(&hdev->power_on, hci_power_on);
1674 INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1676 INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1678 memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1680 atomic_set(&hdev->promisc, 0);
1682 write_unlock(&hci_dev_list_lock);
1684 hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1686 if (!hdev->workqueue) {
1691 error = hci_add_sysfs(hdev);
1695 hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1696 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1698 if (rfkill_register(hdev->rfkill) < 0) {
1699 rfkill_destroy(hdev->rfkill);
1700 hdev->rfkill = NULL;
1704 set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1705 set_bit(HCI_SETUP, &hdev->dev_flags);
1706 schedule_work(&hdev->power_on);
1708 hci_notify(hdev, HCI_DEV_REG);
1714 destroy_workqueue(hdev->workqueue);
1716 write_lock(&hci_dev_list_lock);
1717 list_del(&hdev->list);
1718 write_unlock(&hci_dev_list_lock);
1722 EXPORT_SYMBOL(hci_register_dev);
1724 /* Unregister HCI device */
1725 void hci_unregister_dev(struct hci_dev *hdev)
1729 BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1731 write_lock(&hci_dev_list_lock);
1732 list_del(&hdev->list);
1733 write_unlock(&hci_dev_list_lock);
1735 hci_dev_do_close(hdev);
1737 for (i = 0; i < NUM_REASSEMBLY; i++)
1738 kfree_skb(hdev->reassembly[i]);
1740 if (!test_bit(HCI_INIT, &hdev->flags) &&
1741 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1743 mgmt_index_removed(hdev);
1744 hci_dev_unlock(hdev);
1747 /* mgmt_index_removed should take care of emptying the
1749 BUG_ON(!list_empty(&hdev->mgmt_pending));
1751 hci_notify(hdev, HCI_DEV_UNREG);
1754 rfkill_unregister(hdev->rfkill);
1755 rfkill_destroy(hdev->rfkill);
1758 hci_del_sysfs(hdev);
1760 cancel_delayed_work_sync(&hdev->adv_work);
1762 destroy_workqueue(hdev->workqueue);
1765 hci_blacklist_clear(hdev);
1766 hci_uuids_clear(hdev);
1767 hci_link_keys_clear(hdev);
1768 hci_smp_ltks_clear(hdev);
1769 hci_remote_oob_data_clear(hdev);
1770 hci_adv_entries_clear(hdev);
1771 hci_dev_unlock(hdev);
1775 EXPORT_SYMBOL(hci_unregister_dev);
1777 /* Suspend HCI device */
1778 int hci_suspend_dev(struct hci_dev *hdev)
1780 hci_notify(hdev, HCI_DEV_SUSPEND);
1783 EXPORT_SYMBOL(hci_suspend_dev);
1785 /* Resume HCI device */
1786 int hci_resume_dev(struct hci_dev *hdev)
1788 hci_notify(hdev, HCI_DEV_RESUME);
1791 EXPORT_SYMBOL(hci_resume_dev);
1793 /* Receive frame from HCI drivers */
1794 int hci_recv_frame(struct sk_buff *skb)
1796 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1797 if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1798 && !test_bit(HCI_INIT, &hdev->flags))) {
1804 bt_cb(skb)->incoming = 1;
1807 __net_timestamp(skb);
1809 skb_queue_tail(&hdev->rx_q, skb);
1810 queue_work(hdev->workqueue, &hdev->rx_work);
1814 EXPORT_SYMBOL(hci_recv_frame);
1816 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1817 int count, __u8 index)
1822 struct sk_buff *skb;
1823 struct bt_skb_cb *scb;
1825 if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1826 index >= NUM_REASSEMBLY)
1829 skb = hdev->reassembly[index];
1833 case HCI_ACLDATA_PKT:
1834 len = HCI_MAX_FRAME_SIZE;
1835 hlen = HCI_ACL_HDR_SIZE;
1838 len = HCI_MAX_EVENT_SIZE;
1839 hlen = HCI_EVENT_HDR_SIZE;
1841 case HCI_SCODATA_PKT:
1842 len = HCI_MAX_SCO_SIZE;
1843 hlen = HCI_SCO_HDR_SIZE;
1847 skb = bt_skb_alloc(len, GFP_ATOMIC);
1851 scb = (void *) skb->cb;
1853 scb->pkt_type = type;
1855 skb->dev = (void *) hdev;
1856 hdev->reassembly[index] = skb;
1860 scb = (void *) skb->cb;
1861 len = min(scb->expect, (__u16)count);
1863 memcpy(skb_put(skb, len), data, len);
1872 if (skb->len == HCI_EVENT_HDR_SIZE) {
1873 struct hci_event_hdr *h = hci_event_hdr(skb);
1874 scb->expect = h->plen;
1876 if (skb_tailroom(skb) < scb->expect) {
1878 hdev->reassembly[index] = NULL;
1884 case HCI_ACLDATA_PKT:
1885 if (skb->len == HCI_ACL_HDR_SIZE) {
1886 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1887 scb->expect = __le16_to_cpu(h->dlen);
1889 if (skb_tailroom(skb) < scb->expect) {
1891 hdev->reassembly[index] = NULL;
1897 case HCI_SCODATA_PKT:
1898 if (skb->len == HCI_SCO_HDR_SIZE) {
1899 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1900 scb->expect = h->dlen;
1902 if (skb_tailroom(skb) < scb->expect) {
1904 hdev->reassembly[index] = NULL;
1911 if (scb->expect == 0) {
1912 /* Complete frame */
1914 bt_cb(skb)->pkt_type = type;
1915 hci_recv_frame(skb);
1917 hdev->reassembly[index] = NULL;
1925 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1929 if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1933 rem = hci_reassembly(hdev, type, data, count, type - 1);
1937 data += (count - rem);
1943 EXPORT_SYMBOL(hci_recv_fragment);
1945 #define STREAM_REASSEMBLY 0
1947 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1953 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1956 struct { char type; } *pkt;
1958 /* Start of the frame */
1965 type = bt_cb(skb)->pkt_type;
1967 rem = hci_reassembly(hdev, type, data, count,
1972 data += (count - rem);
1978 EXPORT_SYMBOL(hci_recv_stream_fragment);
1980 /* ---- Interface to upper protocols ---- */
1982 int hci_register_cb(struct hci_cb *cb)
1984 BT_DBG("%p name %s", cb, cb->name);
1986 write_lock(&hci_cb_list_lock);
1987 list_add(&cb->list, &hci_cb_list);
1988 write_unlock(&hci_cb_list_lock);
1992 EXPORT_SYMBOL(hci_register_cb);
1994 int hci_unregister_cb(struct hci_cb *cb)
1996 BT_DBG("%p name %s", cb, cb->name);
1998 write_lock(&hci_cb_list_lock);
1999 list_del(&cb->list);
2000 write_unlock(&hci_cb_list_lock);
2004 EXPORT_SYMBOL(hci_unregister_cb);
2006 static int hci_send_frame(struct sk_buff *skb)
2008 struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2015 BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2017 if (atomic_read(&hdev->promisc)) {
2019 __net_timestamp(skb);
2021 hci_send_to_sock(hdev, skb, NULL);
2024 /* Get rid of skb owner, prior to sending to the driver. */
2027 return hdev->send(skb);
2030 /* Send HCI command */
2031 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2033 int len = HCI_COMMAND_HDR_SIZE + plen;
2034 struct hci_command_hdr *hdr;
2035 struct sk_buff *skb;
2037 BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2039 skb = bt_skb_alloc(len, GFP_ATOMIC);
2041 BT_ERR("%s no memory for command", hdev->name);
2045 hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2046 hdr->opcode = cpu_to_le16(opcode);
2050 memcpy(skb_put(skb, plen), param, plen);
2052 BT_DBG("skb len %d", skb->len);
2054 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2055 skb->dev = (void *) hdev;
2057 if (test_bit(HCI_INIT, &hdev->flags))
2058 hdev->init_last_cmd = opcode;
2060 skb_queue_tail(&hdev->cmd_q, skb);
2061 queue_work(hdev->workqueue, &hdev->cmd_work);
2066 /* Get data from the previously sent command */
2067 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2069 struct hci_command_hdr *hdr;
2071 if (!hdev->sent_cmd)
2074 hdr = (void *) hdev->sent_cmd->data;
2076 if (hdr->opcode != cpu_to_le16(opcode))
2079 BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2081 return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2085 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2087 struct hci_acl_hdr *hdr;
2090 skb_push(skb, HCI_ACL_HDR_SIZE);
2091 skb_reset_transport_header(skb);
2092 hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2093 hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2094 hdr->dlen = cpu_to_le16(len);
2097 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2098 struct sk_buff *skb, __u16 flags)
2100 struct hci_dev *hdev = conn->hdev;
2101 struct sk_buff *list;
2103 list = skb_shinfo(skb)->frag_list;
2105 /* Non fragmented */
2106 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2108 skb_queue_tail(queue, skb);
2111 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2113 skb_shinfo(skb)->frag_list = NULL;
2115 /* Queue all fragments atomically */
2116 spin_lock(&queue->lock);
2118 __skb_queue_tail(queue, skb);
2120 flags &= ~ACL_START;
2123 skb = list; list = list->next;
2125 skb->dev = (void *) hdev;
2126 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2127 hci_add_acl_hdr(skb, conn->handle, flags);
2129 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2131 __skb_queue_tail(queue, skb);
2134 spin_unlock(&queue->lock);
2138 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2140 struct hci_conn *conn = chan->conn;
2141 struct hci_dev *hdev = conn->hdev;
2143 BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2145 skb->dev = (void *) hdev;
2146 bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2147 hci_add_acl_hdr(skb, conn->handle, flags);
2149 hci_queue_acl(conn, &chan->data_q, skb, flags);
2151 queue_work(hdev->workqueue, &hdev->tx_work);
2153 EXPORT_SYMBOL(hci_send_acl);
2156 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2158 struct hci_dev *hdev = conn->hdev;
2159 struct hci_sco_hdr hdr;
2161 BT_DBG("%s len %d", hdev->name, skb->len);
2163 hdr.handle = cpu_to_le16(conn->handle);
2164 hdr.dlen = skb->len;
2166 skb_push(skb, HCI_SCO_HDR_SIZE);
2167 skb_reset_transport_header(skb);
2168 memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2170 skb->dev = (void *) hdev;
2171 bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2173 skb_queue_tail(&conn->data_q, skb);
2174 queue_work(hdev->workqueue, &hdev->tx_work);
2176 EXPORT_SYMBOL(hci_send_sco);
2178 /* ---- HCI TX task (outgoing data) ---- */
2180 /* HCI Connection scheduler */
2181 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2183 struct hci_conn_hash *h = &hdev->conn_hash;
2184 struct hci_conn *conn = NULL, *c;
2185 int num = 0, min = ~0;
2187 /* We don't have to lock device here. Connections are always
2188 * added and removed with TX task disabled. */
2192 list_for_each_entry_rcu(c, &h->list, list) {
2193 if (c->type != type || skb_queue_empty(&c->data_q))
2196 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2201 if (c->sent < min) {
2206 if (hci_conn_num(hdev, type) == num)
2215 switch (conn->type) {
2217 cnt = hdev->acl_cnt;
2221 cnt = hdev->sco_cnt;
2224 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2228 BT_ERR("Unknown link type");
2236 BT_DBG("conn %p quote %d", conn, *quote);
2240 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2242 struct hci_conn_hash *h = &hdev->conn_hash;
2245 BT_ERR("%s link tx timeout", hdev->name);
2249 /* Kill stalled connections */
2250 list_for_each_entry_rcu(c, &h->list, list) {
2251 if (c->type == type && c->sent) {
2252 BT_ERR("%s killing stalled connection %s",
2253 hdev->name, batostr(&c->dst));
2254 hci_acl_disconn(c, 0x13);
2261 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2264 struct hci_conn_hash *h = &hdev->conn_hash;
2265 struct hci_chan *chan = NULL;
2266 int num = 0, min = ~0, cur_prio = 0;
2267 struct hci_conn *conn;
2268 int cnt, q, conn_num = 0;
2270 BT_DBG("%s", hdev->name);
2274 list_for_each_entry_rcu(conn, &h->list, list) {
2275 struct hci_chan *tmp;
2277 if (conn->type != type)
2280 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2285 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2286 struct sk_buff *skb;
2288 if (skb_queue_empty(&tmp->data_q))
2291 skb = skb_peek(&tmp->data_q);
2292 if (skb->priority < cur_prio)
2295 if (skb->priority > cur_prio) {
2298 cur_prio = skb->priority;
2303 if (conn->sent < min) {
2309 if (hci_conn_num(hdev, type) == conn_num)
2318 switch (chan->conn->type) {
2320 cnt = hdev->acl_cnt;
2324 cnt = hdev->sco_cnt;
2327 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2331 BT_ERR("Unknown link type");
2336 BT_DBG("chan %p quote %d", chan, *quote);
2340 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2342 struct hci_conn_hash *h = &hdev->conn_hash;
2343 struct hci_conn *conn;
2346 BT_DBG("%s", hdev->name);
2350 list_for_each_entry_rcu(conn, &h->list, list) {
2351 struct hci_chan *chan;
2353 if (conn->type != type)
2356 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2361 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2362 struct sk_buff *skb;
2369 if (skb_queue_empty(&chan->data_q))
2372 skb = skb_peek(&chan->data_q);
2373 if (skb->priority >= HCI_PRIO_MAX - 1)
2376 skb->priority = HCI_PRIO_MAX - 1;
2378 BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2382 if (hci_conn_num(hdev, type) == num)
2390 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2392 /* Calculate count of blocks used by this packet */
2393 return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2396 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2398 if (!test_bit(HCI_RAW, &hdev->flags)) {
2399 /* ACL tx timeout must be longer than maximum
2400 * link supervision timeout (40.9 seconds) */
2401 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2402 msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2403 hci_link_tx_to(hdev, ACL_LINK);
2407 static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2409 unsigned int cnt = hdev->acl_cnt;
2410 struct hci_chan *chan;
2411 struct sk_buff *skb;
2414 __check_timeout(hdev, cnt);
2416 while (hdev->acl_cnt &&
2417 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2418 u32 priority = (skb_peek(&chan->data_q))->priority;
2419 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2420 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2421 skb->len, skb->priority);
2423 /* Stop if priority has changed */
2424 if (skb->priority < priority)
2427 skb = skb_dequeue(&chan->data_q);
2429 hci_conn_enter_active_mode(chan->conn,
2430 bt_cb(skb)->force_active);
2432 hci_send_frame(skb);
2433 hdev->acl_last_tx = jiffies;
2441 if (cnt != hdev->acl_cnt)
2442 hci_prio_recalculate(hdev, ACL_LINK);
2445 static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2447 unsigned int cnt = hdev->block_cnt;
2448 struct hci_chan *chan;
2449 struct sk_buff *skb;
2452 __check_timeout(hdev, cnt);
2454 while (hdev->block_cnt > 0 &&
2455 (chan = hci_chan_sent(hdev, ACL_LINK, "e))) {
2456 u32 priority = (skb_peek(&chan->data_q))->priority;
2457 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2460 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2461 skb->len, skb->priority);
2463 /* Stop if priority has changed */
2464 if (skb->priority < priority)
2467 skb = skb_dequeue(&chan->data_q);
2469 blocks = __get_blocks(hdev, skb);
2470 if (blocks > hdev->block_cnt)
2473 hci_conn_enter_active_mode(chan->conn,
2474 bt_cb(skb)->force_active);
2476 hci_send_frame(skb);
2477 hdev->acl_last_tx = jiffies;
2479 hdev->block_cnt -= blocks;
2482 chan->sent += blocks;
2483 chan->conn->sent += blocks;
2487 if (cnt != hdev->block_cnt)
2488 hci_prio_recalculate(hdev, ACL_LINK);
2491 static inline void hci_sched_acl(struct hci_dev *hdev)
2493 BT_DBG("%s", hdev->name);
2495 if (!hci_conn_num(hdev, ACL_LINK))
2498 switch (hdev->flow_ctl_mode) {
2499 case HCI_FLOW_CTL_MODE_PACKET_BASED:
2500 hci_sched_acl_pkt(hdev);
2503 case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2504 hci_sched_acl_blk(hdev);
2510 static inline void hci_sched_sco(struct hci_dev *hdev)
2512 struct hci_conn *conn;
2513 struct sk_buff *skb;
2516 BT_DBG("%s", hdev->name);
2518 if (!hci_conn_num(hdev, SCO_LINK))
2521 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, "e))) {
2522 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2523 BT_DBG("skb %p len %d", skb, skb->len);
2524 hci_send_frame(skb);
2527 if (conn->sent == ~0)
2533 static inline void hci_sched_esco(struct hci_dev *hdev)
2535 struct hci_conn *conn;
2536 struct sk_buff *skb;
2539 BT_DBG("%s", hdev->name);
2541 if (!hci_conn_num(hdev, ESCO_LINK))
2544 while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, "e))) {
2545 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2546 BT_DBG("skb %p len %d", skb, skb->len);
2547 hci_send_frame(skb);
2550 if (conn->sent == ~0)
2556 static inline void hci_sched_le(struct hci_dev *hdev)
2558 struct hci_chan *chan;
2559 struct sk_buff *skb;
2560 int quote, cnt, tmp;
2562 BT_DBG("%s", hdev->name);
2564 if (!hci_conn_num(hdev, LE_LINK))
2567 if (!test_bit(HCI_RAW, &hdev->flags)) {
2568 /* LE tx timeout must be longer than maximum
2569 * link supervision timeout (40.9 seconds) */
2570 if (!hdev->le_cnt && hdev->le_pkts &&
2571 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2572 hci_link_tx_to(hdev, LE_LINK);
2575 cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2577 while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, "e))) {
2578 u32 priority = (skb_peek(&chan->data_q))->priority;
2579 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2580 BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2581 skb->len, skb->priority);
2583 /* Stop if priority has changed */
2584 if (skb->priority < priority)
2587 skb = skb_dequeue(&chan->data_q);
2589 hci_send_frame(skb);
2590 hdev->le_last_tx = jiffies;
2601 hdev->acl_cnt = cnt;
2604 hci_prio_recalculate(hdev, LE_LINK);
2607 static void hci_tx_work(struct work_struct *work)
2609 struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2610 struct sk_buff *skb;
2612 BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2613 hdev->sco_cnt, hdev->le_cnt);
2615 /* Schedule queues and send stuff to HCI driver */
2617 hci_sched_acl(hdev);
2619 hci_sched_sco(hdev);
2621 hci_sched_esco(hdev);
2625 /* Send next queued raw (unknown type) packet */
2626 while ((skb = skb_dequeue(&hdev->raw_q)))
2627 hci_send_frame(skb);
2630 /* ----- HCI RX task (incoming data processing) ----- */
2632 /* ACL data packet */
2633 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2635 struct hci_acl_hdr *hdr = (void *) skb->data;
2636 struct hci_conn *conn;
2637 __u16 handle, flags;
2639 skb_pull(skb, HCI_ACL_HDR_SIZE);
2641 handle = __le16_to_cpu(hdr->handle);
2642 flags = hci_flags(handle);
2643 handle = hci_handle(handle);
2645 BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2647 hdev->stat.acl_rx++;
2650 conn = hci_conn_hash_lookup_handle(hdev, handle);
2651 hci_dev_unlock(hdev);
2654 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2656 /* Send to upper protocol */
2657 l2cap_recv_acldata(conn, skb, flags);
2660 BT_ERR("%s ACL packet for unknown connection handle %d",
2661 hdev->name, handle);
2667 /* SCO data packet */
2668 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2670 struct hci_sco_hdr *hdr = (void *) skb->data;
2671 struct hci_conn *conn;
2674 skb_pull(skb, HCI_SCO_HDR_SIZE);
2676 handle = __le16_to_cpu(hdr->handle);
2678 BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2680 hdev->stat.sco_rx++;
2683 conn = hci_conn_hash_lookup_handle(hdev, handle);
2684 hci_dev_unlock(hdev);
2687 /* Send to upper protocol */
2688 sco_recv_scodata(conn, skb);
2691 BT_ERR("%s SCO packet for unknown connection handle %d",
2692 hdev->name, handle);
2698 static void hci_rx_work(struct work_struct *work)
2700 struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2701 struct sk_buff *skb;
2703 BT_DBG("%s", hdev->name);
2705 while ((skb = skb_dequeue(&hdev->rx_q))) {
2706 if (atomic_read(&hdev->promisc)) {
2707 /* Send copy to the sockets */
2708 hci_send_to_sock(hdev, skb, NULL);
2711 if (test_bit(HCI_RAW, &hdev->flags)) {
2716 if (test_bit(HCI_INIT, &hdev->flags)) {
2717 /* Don't process data packets in this states. */
2718 switch (bt_cb(skb)->pkt_type) {
2719 case HCI_ACLDATA_PKT:
2720 case HCI_SCODATA_PKT:
2727 switch (bt_cb(skb)->pkt_type) {
2729 BT_DBG("%s Event packet", hdev->name);
2730 hci_event_packet(hdev, skb);
2733 case HCI_ACLDATA_PKT:
2734 BT_DBG("%s ACL data packet", hdev->name);
2735 hci_acldata_packet(hdev, skb);
2738 case HCI_SCODATA_PKT:
2739 BT_DBG("%s SCO data packet", hdev->name);
2740 hci_scodata_packet(hdev, skb);
2750 static void hci_cmd_work(struct work_struct *work)
2752 struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2753 struct sk_buff *skb;
2755 BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2757 /* Send queued commands */
2758 if (atomic_read(&hdev->cmd_cnt)) {
2759 skb = skb_dequeue(&hdev->cmd_q);
2763 kfree_skb(hdev->sent_cmd);
2765 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2766 if (hdev->sent_cmd) {
2767 atomic_dec(&hdev->cmd_cnt);
2768 hci_send_frame(skb);
2769 if (test_bit(HCI_RESET, &hdev->flags))
2770 del_timer(&hdev->cmd_timer);
2772 mod_timer(&hdev->cmd_timer,
2773 jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2775 skb_queue_head(&hdev->cmd_q, skb);
2776 queue_work(hdev->workqueue, &hdev->cmd_work);
2781 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2783 /* General inquiry access code (GIAC) */
2784 u8 lap[3] = { 0x33, 0x8b, 0x9e };
2785 struct hci_cp_inquiry cp;
2787 BT_DBG("%s", hdev->name);
2789 if (test_bit(HCI_INQUIRY, &hdev->flags))
2790 return -EINPROGRESS;
2792 inquiry_cache_flush(hdev);
2794 memset(&cp, 0, sizeof(cp));
2795 memcpy(&cp.lap, lap, sizeof(cp.lap));
2798 return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2801 int hci_cancel_inquiry(struct hci_dev *hdev)
2803 BT_DBG("%s", hdev->name);
2805 if (!test_bit(HCI_INQUIRY, &hdev->flags))
2808 return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2811 module_param(enable_hs, bool, 0644);
2812 MODULE_PARM_DESC(enable_hs, "Enable High Speed");