Bluetooth: Move command task to workqueue
[linux-flexiantxendom0-3.2.10.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54
55 #define AUTO_OFF_TIMEOUT 2000
56
57 int enable_hs;
58
59 static void hci_rx_work(struct work_struct *work);
60 static void hci_cmd_work(struct work_struct *work);
61 static void hci_tx_task(unsigned long arg);
62
63 static DEFINE_MUTEX(hci_task_lock);
64
65 /* HCI device list */
66 LIST_HEAD(hci_dev_list);
67 DEFINE_RWLOCK(hci_dev_list_lock);
68
69 /* HCI callback list */
70 LIST_HEAD(hci_cb_list);
71 DEFINE_RWLOCK(hci_cb_list_lock);
72
73 /* HCI protocols */
74 #define HCI_MAX_PROTO   2
75 struct hci_proto *hci_proto[HCI_MAX_PROTO];
76
77 /* HCI notifiers list */
78 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
79
80 /* ---- HCI notifications ---- */
81
82 int hci_register_notifier(struct notifier_block *nb)
83 {
84         return atomic_notifier_chain_register(&hci_notifier, nb);
85 }
86
87 int hci_unregister_notifier(struct notifier_block *nb)
88 {
89         return atomic_notifier_chain_unregister(&hci_notifier, nb);
90 }
91
92 static void hci_notify(struct hci_dev *hdev, int event)
93 {
94         atomic_notifier_call_chain(&hci_notifier, event, hdev);
95 }
96
97 /* ---- HCI requests ---- */
98
99 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
100 {
101         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
102
103         /* If this is the init phase check if the completed command matches
104          * the last init command, and if not just return.
105          */
106         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
107                 return;
108
109         if (hdev->req_status == HCI_REQ_PEND) {
110                 hdev->req_result = result;
111                 hdev->req_status = HCI_REQ_DONE;
112                 wake_up_interruptible(&hdev->req_wait_q);
113         }
114 }
115
116 static void hci_req_cancel(struct hci_dev *hdev, int err)
117 {
118         BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120         if (hdev->req_status == HCI_REQ_PEND) {
121                 hdev->req_result = err;
122                 hdev->req_status = HCI_REQ_CANCELED;
123                 wake_up_interruptible(&hdev->req_wait_q);
124         }
125 }
126
127 /* Execute request and wait for completion. */
128 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
129                                         unsigned long opt, __u32 timeout)
130 {
131         DECLARE_WAITQUEUE(wait, current);
132         int err = 0;
133
134         BT_DBG("%s start", hdev->name);
135
136         hdev->req_status = HCI_REQ_PEND;
137
138         add_wait_queue(&hdev->req_wait_q, &wait);
139         set_current_state(TASK_INTERRUPTIBLE);
140
141         req(hdev, opt);
142         schedule_timeout(timeout);
143
144         remove_wait_queue(&hdev->req_wait_q, &wait);
145
146         if (signal_pending(current))
147                 return -EINTR;
148
149         switch (hdev->req_status) {
150         case HCI_REQ_DONE:
151                 err = -bt_to_errno(hdev->req_result);
152                 break;
153
154         case HCI_REQ_CANCELED:
155                 err = -hdev->req_result;
156                 break;
157
158         default:
159                 err = -ETIMEDOUT;
160                 break;
161         }
162
163         hdev->req_status = hdev->req_result = 0;
164
165         BT_DBG("%s end: err %d", hdev->name, err);
166
167         return err;
168 }
169
170 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
171                                         unsigned long opt, __u32 timeout)
172 {
173         int ret;
174
175         if (!test_bit(HCI_UP, &hdev->flags))
176                 return -ENETDOWN;
177
178         /* Serialize all requests */
179         hci_req_lock(hdev);
180         ret = __hci_request(hdev, req, opt, timeout);
181         hci_req_unlock(hdev);
182
183         return ret;
184 }
185
186 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
187 {
188         BT_DBG("%s %ld", hdev->name, opt);
189
190         /* Reset device */
191         set_bit(HCI_RESET, &hdev->flags);
192         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
193 }
194
195 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
196 {
197         struct hci_cp_delete_stored_link_key cp;
198         struct sk_buff *skb;
199         __le16 param;
200         __u8 flt_type;
201
202         BT_DBG("%s %ld", hdev->name, opt);
203
204         /* Driver initialization */
205
206         /* Special commands */
207         while ((skb = skb_dequeue(&hdev->driver_init))) {
208                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
209                 skb->dev = (void *) hdev;
210
211                 skb_queue_tail(&hdev->cmd_q, skb);
212                 queue_work(hdev->workqueue, &hdev->cmd_work);
213         }
214         skb_queue_purge(&hdev->driver_init);
215
216         /* Mandatory initialization */
217
218         /* Reset */
219         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
220                         set_bit(HCI_RESET, &hdev->flags);
221                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
222         }
223
224         /* Read Local Supported Features */
225         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
226
227         /* Read Local Version */
228         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
229
230         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
231         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
232
233         /* Read BD Address */
234         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
235
236         /* Read Class of Device */
237         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
238
239         /* Read Local Name */
240         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
241
242         /* Read Voice Setting */
243         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
244
245         /* Optional initialization */
246
247         /* Clear Event Filters */
248         flt_type = HCI_FLT_CLEAR_ALL;
249         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
250
251         /* Connection accept timeout ~20 secs */
252         param = cpu_to_le16(0x7d00);
253         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
254
255         bacpy(&cp.bdaddr, BDADDR_ANY);
256         cp.delete_all = 1;
257         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
258 }
259
260 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
261 {
262         BT_DBG("%s", hdev->name);
263
264         /* Read LE buffer size */
265         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
266 }
267
268 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
269 {
270         __u8 scan = opt;
271
272         BT_DBG("%s %x", hdev->name, scan);
273
274         /* Inquiry and Page scans */
275         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
276 }
277
278 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
279 {
280         __u8 auth = opt;
281
282         BT_DBG("%s %x", hdev->name, auth);
283
284         /* Authentication */
285         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
286 }
287
288 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
289 {
290         __u8 encrypt = opt;
291
292         BT_DBG("%s %x", hdev->name, encrypt);
293
294         /* Encryption */
295         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
296 }
297
298 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
299 {
300         __le16 policy = cpu_to_le16(opt);
301
302         BT_DBG("%s %x", hdev->name, policy);
303
304         /* Default link policy */
305         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
306 }
307
308 /* Get HCI device by index.
309  * Device is held on return. */
310 struct hci_dev *hci_dev_get(int index)
311 {
312         struct hci_dev *hdev = NULL, *d;
313
314         BT_DBG("%d", index);
315
316         if (index < 0)
317                 return NULL;
318
319         read_lock(&hci_dev_list_lock);
320         list_for_each_entry(d, &hci_dev_list, list) {
321                 if (d->id == index) {
322                         hdev = hci_dev_hold(d);
323                         break;
324                 }
325         }
326         read_unlock(&hci_dev_list_lock);
327         return hdev;
328 }
329
330 /* ---- Inquiry support ---- */
331 static void inquiry_cache_flush(struct hci_dev *hdev)
332 {
333         struct inquiry_cache *cache = &hdev->inq_cache;
334         struct inquiry_entry *next  = cache->list, *e;
335
336         BT_DBG("cache %p", cache);
337
338         cache->list = NULL;
339         while ((e = next)) {
340                 next = e->next;
341                 kfree(e);
342         }
343 }
344
345 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
346 {
347         struct inquiry_cache *cache = &hdev->inq_cache;
348         struct inquiry_entry *e;
349
350         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
351
352         for (e = cache->list; e; e = e->next)
353                 if (!bacmp(&e->data.bdaddr, bdaddr))
354                         break;
355         return e;
356 }
357
358 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
359 {
360         struct inquiry_cache *cache = &hdev->inq_cache;
361         struct inquiry_entry *ie;
362
363         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
364
365         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
366         if (!ie) {
367                 /* Entry not in the cache. Add new one. */
368                 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
369                 if (!ie)
370                         return;
371
372                 ie->next = cache->list;
373                 cache->list = ie;
374         }
375
376         memcpy(&ie->data, data, sizeof(*data));
377         ie->timestamp = jiffies;
378         cache->timestamp = jiffies;
379 }
380
381 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
382 {
383         struct inquiry_cache *cache = &hdev->inq_cache;
384         struct inquiry_info *info = (struct inquiry_info *) buf;
385         struct inquiry_entry *e;
386         int copied = 0;
387
388         for (e = cache->list; e && copied < num; e = e->next, copied++) {
389                 struct inquiry_data *data = &e->data;
390                 bacpy(&info->bdaddr, &data->bdaddr);
391                 info->pscan_rep_mode    = data->pscan_rep_mode;
392                 info->pscan_period_mode = data->pscan_period_mode;
393                 info->pscan_mode        = data->pscan_mode;
394                 memcpy(info->dev_class, data->dev_class, 3);
395                 info->clock_offset      = data->clock_offset;
396                 info++;
397         }
398
399         BT_DBG("cache %p, copied %d", cache, copied);
400         return copied;
401 }
402
403 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
404 {
405         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
406         struct hci_cp_inquiry cp;
407
408         BT_DBG("%s", hdev->name);
409
410         if (test_bit(HCI_INQUIRY, &hdev->flags))
411                 return;
412
413         /* Start Inquiry */
414         memcpy(&cp.lap, &ir->lap, 3);
415         cp.length  = ir->length;
416         cp.num_rsp = ir->num_rsp;
417         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
418 }
419
420 int hci_inquiry(void __user *arg)
421 {
422         __u8 __user *ptr = arg;
423         struct hci_inquiry_req ir;
424         struct hci_dev *hdev;
425         int err = 0, do_inquiry = 0, max_rsp;
426         long timeo;
427         __u8 *buf;
428
429         if (copy_from_user(&ir, ptr, sizeof(ir)))
430                 return -EFAULT;
431
432         hdev = hci_dev_get(ir.dev_id);
433         if (!hdev)
434                 return -ENODEV;
435
436         hci_dev_lock(hdev);
437         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
438                                 inquiry_cache_empty(hdev) ||
439                                 ir.flags & IREQ_CACHE_FLUSH) {
440                 inquiry_cache_flush(hdev);
441                 do_inquiry = 1;
442         }
443         hci_dev_unlock(hdev);
444
445         timeo = ir.length * msecs_to_jiffies(2000);
446
447         if (do_inquiry) {
448                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
449                 if (err < 0)
450                         goto done;
451         }
452
453         /* for unlimited number of responses we will use buffer with 255 entries */
454         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
455
456         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
457          * copy it to the user space.
458          */
459         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
460         if (!buf) {
461                 err = -ENOMEM;
462                 goto done;
463         }
464
465         hci_dev_lock(hdev);
466         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
467         hci_dev_unlock(hdev);
468
469         BT_DBG("num_rsp %d", ir.num_rsp);
470
471         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
472                 ptr += sizeof(ir);
473                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
474                                         ir.num_rsp))
475                         err = -EFAULT;
476         } else
477                 err = -EFAULT;
478
479         kfree(buf);
480
481 done:
482         hci_dev_put(hdev);
483         return err;
484 }
485
486 /* ---- HCI ioctl helpers ---- */
487
488 int hci_dev_open(__u16 dev)
489 {
490         struct hci_dev *hdev;
491         int ret = 0;
492
493         hdev = hci_dev_get(dev);
494         if (!hdev)
495                 return -ENODEV;
496
497         BT_DBG("%s %p", hdev->name, hdev);
498
499         hci_req_lock(hdev);
500
501         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
502                 ret = -ERFKILL;
503                 goto done;
504         }
505
506         if (test_bit(HCI_UP, &hdev->flags)) {
507                 ret = -EALREADY;
508                 goto done;
509         }
510
511         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
512                 set_bit(HCI_RAW, &hdev->flags);
513
514         /* Treat all non BR/EDR controllers as raw devices if
515            enable_hs is not set */
516         if (hdev->dev_type != HCI_BREDR && !enable_hs)
517                 set_bit(HCI_RAW, &hdev->flags);
518
519         if (hdev->open(hdev)) {
520                 ret = -EIO;
521                 goto done;
522         }
523
524         if (!test_bit(HCI_RAW, &hdev->flags)) {
525                 atomic_set(&hdev->cmd_cnt, 1);
526                 set_bit(HCI_INIT, &hdev->flags);
527                 hdev->init_last_cmd = 0;
528
529                 ret = __hci_request(hdev, hci_init_req, 0,
530                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
531
532                 if (lmp_host_le_capable(hdev))
533                         ret = __hci_request(hdev, hci_le_init_req, 0,
534                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
535
536                 clear_bit(HCI_INIT, &hdev->flags);
537         }
538
539         if (!ret) {
540                 hci_dev_hold(hdev);
541                 set_bit(HCI_UP, &hdev->flags);
542                 hci_notify(hdev, HCI_DEV_UP);
543                 if (!test_bit(HCI_SETUP, &hdev->flags)) {
544                         hci_dev_lock(hdev);
545                         mgmt_powered(hdev, 1);
546                         hci_dev_unlock(hdev);
547                 }
548         } else {
549                 /* Init failed, cleanup */
550                 tasklet_kill(&hdev->tx_task);
551                 flush_work(&hdev->cmd_work);
552                 flush_work(&hdev->rx_work);
553
554                 skb_queue_purge(&hdev->cmd_q);
555                 skb_queue_purge(&hdev->rx_q);
556
557                 if (hdev->flush)
558                         hdev->flush(hdev);
559
560                 if (hdev->sent_cmd) {
561                         kfree_skb(hdev->sent_cmd);
562                         hdev->sent_cmd = NULL;
563                 }
564
565                 hdev->close(hdev);
566                 hdev->flags = 0;
567         }
568
569 done:
570         hci_req_unlock(hdev);
571         hci_dev_put(hdev);
572         return ret;
573 }
574
575 static int hci_dev_do_close(struct hci_dev *hdev)
576 {
577         BT_DBG("%s %p", hdev->name, hdev);
578
579         hci_req_cancel(hdev, ENODEV);
580         hci_req_lock(hdev);
581
582         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
583                 del_timer_sync(&hdev->cmd_timer);
584                 hci_req_unlock(hdev);
585                 return 0;
586         }
587
588         /* Kill RX and TX tasks */
589         tasklet_kill(&hdev->tx_task);
590         flush_work(&hdev->rx_work);
591
592         if (hdev->discov_timeout > 0) {
593                 cancel_delayed_work(&hdev->discov_off);
594                 hdev->discov_timeout = 0;
595         }
596
597         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
598                 cancel_delayed_work(&hdev->power_off);
599
600         hci_dev_lock(hdev);
601         inquiry_cache_flush(hdev);
602         hci_conn_hash_flush(hdev);
603         hci_dev_unlock(hdev);
604
605         hci_notify(hdev, HCI_DEV_DOWN);
606
607         if (hdev->flush)
608                 hdev->flush(hdev);
609
610         /* Reset device */
611         skb_queue_purge(&hdev->cmd_q);
612         atomic_set(&hdev->cmd_cnt, 1);
613         if (!test_bit(HCI_RAW, &hdev->flags)) {
614                 set_bit(HCI_INIT, &hdev->flags);
615                 __hci_request(hdev, hci_reset_req, 0,
616                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
617                 clear_bit(HCI_INIT, &hdev->flags);
618         }
619
620         /* flush cmd  work */
621         flush_work(&hdev->cmd_work);
622
623         /* Drop queues */
624         skb_queue_purge(&hdev->rx_q);
625         skb_queue_purge(&hdev->cmd_q);
626         skb_queue_purge(&hdev->raw_q);
627
628         /* Drop last sent command */
629         if (hdev->sent_cmd) {
630                 del_timer_sync(&hdev->cmd_timer);
631                 kfree_skb(hdev->sent_cmd);
632                 hdev->sent_cmd = NULL;
633         }
634
635         /* After this point our queues are empty
636          * and no tasks are scheduled. */
637         hdev->close(hdev);
638
639         hci_dev_lock(hdev);
640         mgmt_powered(hdev, 0);
641         hci_dev_unlock(hdev);
642
643         /* Clear flags */
644         hdev->flags = 0;
645
646         hci_req_unlock(hdev);
647
648         hci_dev_put(hdev);
649         return 0;
650 }
651
652 int hci_dev_close(__u16 dev)
653 {
654         struct hci_dev *hdev;
655         int err;
656
657         hdev = hci_dev_get(dev);
658         if (!hdev)
659                 return -ENODEV;
660         err = hci_dev_do_close(hdev);
661         hci_dev_put(hdev);
662         return err;
663 }
664
665 int hci_dev_reset(__u16 dev)
666 {
667         struct hci_dev *hdev;
668         int ret = 0;
669
670         hdev = hci_dev_get(dev);
671         if (!hdev)
672                 return -ENODEV;
673
674         hci_req_lock(hdev);
675         tasklet_disable(&hdev->tx_task);
676
677         if (!test_bit(HCI_UP, &hdev->flags))
678                 goto done;
679
680         /* Drop queues */
681         skb_queue_purge(&hdev->rx_q);
682         skb_queue_purge(&hdev->cmd_q);
683
684         hci_dev_lock(hdev);
685         inquiry_cache_flush(hdev);
686         hci_conn_hash_flush(hdev);
687         hci_dev_unlock(hdev);
688
689         if (hdev->flush)
690                 hdev->flush(hdev);
691
692         atomic_set(&hdev->cmd_cnt, 1);
693         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
694
695         if (!test_bit(HCI_RAW, &hdev->flags))
696                 ret = __hci_request(hdev, hci_reset_req, 0,
697                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
698
699 done:
700         tasklet_enable(&hdev->tx_task);
701         hci_req_unlock(hdev);
702         hci_dev_put(hdev);
703         return ret;
704 }
705
706 int hci_dev_reset_stat(__u16 dev)
707 {
708         struct hci_dev *hdev;
709         int ret = 0;
710
711         hdev = hci_dev_get(dev);
712         if (!hdev)
713                 return -ENODEV;
714
715         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
716
717         hci_dev_put(hdev);
718
719         return ret;
720 }
721
722 int hci_dev_cmd(unsigned int cmd, void __user *arg)
723 {
724         struct hci_dev *hdev;
725         struct hci_dev_req dr;
726         int err = 0;
727
728         if (copy_from_user(&dr, arg, sizeof(dr)))
729                 return -EFAULT;
730
731         hdev = hci_dev_get(dr.dev_id);
732         if (!hdev)
733                 return -ENODEV;
734
735         switch (cmd) {
736         case HCISETAUTH:
737                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
738                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
739                 break;
740
741         case HCISETENCRYPT:
742                 if (!lmp_encrypt_capable(hdev)) {
743                         err = -EOPNOTSUPP;
744                         break;
745                 }
746
747                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
748                         /* Auth must be enabled first */
749                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
750                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
751                         if (err)
752                                 break;
753                 }
754
755                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
756                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
757                 break;
758
759         case HCISETSCAN:
760                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
761                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
762                 break;
763
764         case HCISETLINKPOL:
765                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
766                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
767                 break;
768
769         case HCISETLINKMODE:
770                 hdev->link_mode = ((__u16) dr.dev_opt) &
771                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
772                 break;
773
774         case HCISETPTYPE:
775                 hdev->pkt_type = (__u16) dr.dev_opt;
776                 break;
777
778         case HCISETACLMTU:
779                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
780                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
781                 break;
782
783         case HCISETSCOMTU:
784                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
785                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
786                 break;
787
788         default:
789                 err = -EINVAL;
790                 break;
791         }
792
793         hci_dev_put(hdev);
794         return err;
795 }
796
797 int hci_get_dev_list(void __user *arg)
798 {
799         struct hci_dev *hdev;
800         struct hci_dev_list_req *dl;
801         struct hci_dev_req *dr;
802         int n = 0, size, err;
803         __u16 dev_num;
804
805         if (get_user(dev_num, (__u16 __user *) arg))
806                 return -EFAULT;
807
808         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
809                 return -EINVAL;
810
811         size = sizeof(*dl) + dev_num * sizeof(*dr);
812
813         dl = kzalloc(size, GFP_KERNEL);
814         if (!dl)
815                 return -ENOMEM;
816
817         dr = dl->dev_req;
818
819         read_lock_bh(&hci_dev_list_lock);
820         list_for_each_entry(hdev, &hci_dev_list, list) {
821                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
822                         cancel_delayed_work(&hdev->power_off);
823
824                 if (!test_bit(HCI_MGMT, &hdev->flags))
825                         set_bit(HCI_PAIRABLE, &hdev->flags);
826
827                 (dr + n)->dev_id  = hdev->id;
828                 (dr + n)->dev_opt = hdev->flags;
829
830                 if (++n >= dev_num)
831                         break;
832         }
833         read_unlock_bh(&hci_dev_list_lock);
834
835         dl->dev_num = n;
836         size = sizeof(*dl) + n * sizeof(*dr);
837
838         err = copy_to_user(arg, dl, size);
839         kfree(dl);
840
841         return err ? -EFAULT : 0;
842 }
843
844 int hci_get_dev_info(void __user *arg)
845 {
846         struct hci_dev *hdev;
847         struct hci_dev_info di;
848         int err = 0;
849
850         if (copy_from_user(&di, arg, sizeof(di)))
851                 return -EFAULT;
852
853         hdev = hci_dev_get(di.dev_id);
854         if (!hdev)
855                 return -ENODEV;
856
857         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
858                 cancel_delayed_work_sync(&hdev->power_off);
859
860         if (!test_bit(HCI_MGMT, &hdev->flags))
861                 set_bit(HCI_PAIRABLE, &hdev->flags);
862
863         strcpy(di.name, hdev->name);
864         di.bdaddr   = hdev->bdaddr;
865         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
866         di.flags    = hdev->flags;
867         di.pkt_type = hdev->pkt_type;
868         di.acl_mtu  = hdev->acl_mtu;
869         di.acl_pkts = hdev->acl_pkts;
870         di.sco_mtu  = hdev->sco_mtu;
871         di.sco_pkts = hdev->sco_pkts;
872         di.link_policy = hdev->link_policy;
873         di.link_mode   = hdev->link_mode;
874
875         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
876         memcpy(&di.features, &hdev->features, sizeof(di.features));
877
878         if (copy_to_user(arg, &di, sizeof(di)))
879                 err = -EFAULT;
880
881         hci_dev_put(hdev);
882
883         return err;
884 }
885
886 /* ---- Interface to HCI drivers ---- */
887
888 static int hci_rfkill_set_block(void *data, bool blocked)
889 {
890         struct hci_dev *hdev = data;
891
892         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
893
894         if (!blocked)
895                 return 0;
896
897         hci_dev_do_close(hdev);
898
899         return 0;
900 }
901
902 static const struct rfkill_ops hci_rfkill_ops = {
903         .set_block = hci_rfkill_set_block,
904 };
905
906 /* Alloc HCI device */
907 struct hci_dev *hci_alloc_dev(void)
908 {
909         struct hci_dev *hdev;
910
911         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
912         if (!hdev)
913                 return NULL;
914
915         hci_init_sysfs(hdev);
916         skb_queue_head_init(&hdev->driver_init);
917
918         return hdev;
919 }
920 EXPORT_SYMBOL(hci_alloc_dev);
921
922 /* Free HCI device */
923 void hci_free_dev(struct hci_dev *hdev)
924 {
925         skb_queue_purge(&hdev->driver_init);
926
927         /* will free via device release */
928         put_device(&hdev->dev);
929 }
930 EXPORT_SYMBOL(hci_free_dev);
931
932 static void hci_power_on(struct work_struct *work)
933 {
934         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
935
936         BT_DBG("%s", hdev->name);
937
938         if (hci_dev_open(hdev->id) < 0)
939                 return;
940
941         if (test_bit(HCI_AUTO_OFF, &hdev->flags))
942                 queue_delayed_work(hdev->workqueue, &hdev->power_off,
943                                         msecs_to_jiffies(AUTO_OFF_TIMEOUT));
944
945         if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
946                 mgmt_index_added(hdev);
947 }
948
949 static void hci_power_off(struct work_struct *work)
950 {
951         struct hci_dev *hdev = container_of(work, struct hci_dev,
952                                                         power_off.work);
953
954         BT_DBG("%s", hdev->name);
955
956         clear_bit(HCI_AUTO_OFF, &hdev->flags);
957
958         hci_dev_close(hdev->id);
959 }
960
961 static void hci_discov_off(struct work_struct *work)
962 {
963         struct hci_dev *hdev;
964         u8 scan = SCAN_PAGE;
965
966         hdev = container_of(work, struct hci_dev, discov_off.work);
967
968         BT_DBG("%s", hdev->name);
969
970         hci_dev_lock(hdev);
971
972         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
973
974         hdev->discov_timeout = 0;
975
976         hci_dev_unlock(hdev);
977 }
978
979 int hci_uuids_clear(struct hci_dev *hdev)
980 {
981         struct list_head *p, *n;
982
983         list_for_each_safe(p, n, &hdev->uuids) {
984                 struct bt_uuid *uuid;
985
986                 uuid = list_entry(p, struct bt_uuid, list);
987
988                 list_del(p);
989                 kfree(uuid);
990         }
991
992         return 0;
993 }
994
995 int hci_link_keys_clear(struct hci_dev *hdev)
996 {
997         struct list_head *p, *n;
998
999         list_for_each_safe(p, n, &hdev->link_keys) {
1000                 struct link_key *key;
1001
1002                 key = list_entry(p, struct link_key, list);
1003
1004                 list_del(p);
1005                 kfree(key);
1006         }
1007
1008         return 0;
1009 }
1010
1011 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1012 {
1013         struct link_key *k;
1014
1015         list_for_each_entry(k, &hdev->link_keys, list)
1016                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1017                         return k;
1018
1019         return NULL;
1020 }
1021
1022 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1023                                                 u8 key_type, u8 old_key_type)
1024 {
1025         /* Legacy key */
1026         if (key_type < 0x03)
1027                 return 1;
1028
1029         /* Debug keys are insecure so don't store them persistently */
1030         if (key_type == HCI_LK_DEBUG_COMBINATION)
1031                 return 0;
1032
1033         /* Changed combination key and there's no previous one */
1034         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1035                 return 0;
1036
1037         /* Security mode 3 case */
1038         if (!conn)
1039                 return 1;
1040
1041         /* Neither local nor remote side had no-bonding as requirement */
1042         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1043                 return 1;
1044
1045         /* Local side had dedicated bonding as requirement */
1046         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1047                 return 1;
1048
1049         /* Remote side had dedicated bonding as requirement */
1050         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1051                 return 1;
1052
1053         /* If none of the above criteria match, then don't store the key
1054          * persistently */
1055         return 0;
1056 }
1057
1058 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1059 {
1060         struct link_key *k;
1061
1062         list_for_each_entry(k, &hdev->link_keys, list) {
1063                 struct key_master_id *id;
1064
1065                 if (k->type != HCI_LK_SMP_LTK)
1066                         continue;
1067
1068                 if (k->dlen != sizeof(*id))
1069                         continue;
1070
1071                 id = (void *) &k->data;
1072                 if (id->ediv == ediv &&
1073                                 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1074                         return k;
1075         }
1076
1077         return NULL;
1078 }
1079 EXPORT_SYMBOL(hci_find_ltk);
1080
1081 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1082                                         bdaddr_t *bdaddr, u8 type)
1083 {
1084         struct link_key *k;
1085
1086         list_for_each_entry(k, &hdev->link_keys, list)
1087                 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1088                         return k;
1089
1090         return NULL;
1091 }
1092 EXPORT_SYMBOL(hci_find_link_key_type);
1093
1094 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1095                                 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1096 {
1097         struct link_key *key, *old_key;
1098         u8 old_key_type, persistent;
1099
1100         old_key = hci_find_link_key(hdev, bdaddr);
1101         if (old_key) {
1102                 old_key_type = old_key->type;
1103                 key = old_key;
1104         } else {
1105                 old_key_type = conn ? conn->key_type : 0xff;
1106                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1107                 if (!key)
1108                         return -ENOMEM;
1109                 list_add(&key->list, &hdev->link_keys);
1110         }
1111
1112         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1113
1114         /* Some buggy controller combinations generate a changed
1115          * combination key for legacy pairing even when there's no
1116          * previous key */
1117         if (type == HCI_LK_CHANGED_COMBINATION &&
1118                                         (!conn || conn->remote_auth == 0xff) &&
1119                                         old_key_type == 0xff) {
1120                 type = HCI_LK_COMBINATION;
1121                 if (conn)
1122                         conn->key_type = type;
1123         }
1124
1125         bacpy(&key->bdaddr, bdaddr);
1126         memcpy(key->val, val, 16);
1127         key->pin_len = pin_len;
1128
1129         if (type == HCI_LK_CHANGED_COMBINATION)
1130                 key->type = old_key_type;
1131         else
1132                 key->type = type;
1133
1134         if (!new_key)
1135                 return 0;
1136
1137         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1138
1139         mgmt_new_link_key(hdev, key, persistent);
1140
1141         if (!persistent) {
1142                 list_del(&key->list);
1143                 kfree(key);
1144         }
1145
1146         return 0;
1147 }
1148
1149 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1150                         u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1151 {
1152         struct link_key *key, *old_key;
1153         struct key_master_id *id;
1154         u8 old_key_type;
1155
1156         BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1157
1158         old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1159         if (old_key) {
1160                 key = old_key;
1161                 old_key_type = old_key->type;
1162         } else {
1163                 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1164                 if (!key)
1165                         return -ENOMEM;
1166                 list_add(&key->list, &hdev->link_keys);
1167                 old_key_type = 0xff;
1168         }
1169
1170         key->dlen = sizeof(*id);
1171
1172         bacpy(&key->bdaddr, bdaddr);
1173         memcpy(key->val, ltk, sizeof(key->val));
1174         key->type = HCI_LK_SMP_LTK;
1175         key->pin_len = key_size;
1176
1177         id = (void *) &key->data;
1178         id->ediv = ediv;
1179         memcpy(id->rand, rand, sizeof(id->rand));
1180
1181         if (new_key)
1182                 mgmt_new_link_key(hdev, key, old_key_type);
1183
1184         return 0;
1185 }
1186
1187 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1188 {
1189         struct link_key *key;
1190
1191         key = hci_find_link_key(hdev, bdaddr);
1192         if (!key)
1193                 return -ENOENT;
1194
1195         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1196
1197         list_del(&key->list);
1198         kfree(key);
1199
1200         return 0;
1201 }
1202
1203 /* HCI command timer function */
1204 static void hci_cmd_timer(unsigned long arg)
1205 {
1206         struct hci_dev *hdev = (void *) arg;
1207
1208         BT_ERR("%s command tx timeout", hdev->name);
1209         atomic_set(&hdev->cmd_cnt, 1);
1210         queue_work(hdev->workqueue, &hdev->cmd_work);
1211 }
1212
1213 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1214                                                         bdaddr_t *bdaddr)
1215 {
1216         struct oob_data *data;
1217
1218         list_for_each_entry(data, &hdev->remote_oob_data, list)
1219                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1220                         return data;
1221
1222         return NULL;
1223 }
1224
1225 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1226 {
1227         struct oob_data *data;
1228
1229         data = hci_find_remote_oob_data(hdev, bdaddr);
1230         if (!data)
1231                 return -ENOENT;
1232
1233         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1234
1235         list_del(&data->list);
1236         kfree(data);
1237
1238         return 0;
1239 }
1240
1241 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1242 {
1243         struct oob_data *data, *n;
1244
1245         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1246                 list_del(&data->list);
1247                 kfree(data);
1248         }
1249
1250         return 0;
1251 }
1252
1253 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1254                                                                 u8 *randomizer)
1255 {
1256         struct oob_data *data;
1257
1258         data = hci_find_remote_oob_data(hdev, bdaddr);
1259
1260         if (!data) {
1261                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1262                 if (!data)
1263                         return -ENOMEM;
1264
1265                 bacpy(&data->bdaddr, bdaddr);
1266                 list_add(&data->list, &hdev->remote_oob_data);
1267         }
1268
1269         memcpy(data->hash, hash, sizeof(data->hash));
1270         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1271
1272         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1273
1274         return 0;
1275 }
1276
1277 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1278                                                 bdaddr_t *bdaddr)
1279 {
1280         struct bdaddr_list *b;
1281
1282         list_for_each_entry(b, &hdev->blacklist, list)
1283                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1284                         return b;
1285
1286         return NULL;
1287 }
1288
1289 int hci_blacklist_clear(struct hci_dev *hdev)
1290 {
1291         struct list_head *p, *n;
1292
1293         list_for_each_safe(p, n, &hdev->blacklist) {
1294                 struct bdaddr_list *b;
1295
1296                 b = list_entry(p, struct bdaddr_list, list);
1297
1298                 list_del(p);
1299                 kfree(b);
1300         }
1301
1302         return 0;
1303 }
1304
1305 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1306 {
1307         struct bdaddr_list *entry;
1308
1309         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1310                 return -EBADF;
1311
1312         if (hci_blacklist_lookup(hdev, bdaddr))
1313                 return -EEXIST;
1314
1315         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1316         if (!entry)
1317                 return -ENOMEM;
1318
1319         bacpy(&entry->bdaddr, bdaddr);
1320
1321         list_add(&entry->list, &hdev->blacklist);
1322
1323         return mgmt_device_blocked(hdev, bdaddr);
1324 }
1325
1326 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1327 {
1328         struct bdaddr_list *entry;
1329
1330         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1331                 return hci_blacklist_clear(hdev);
1332
1333         entry = hci_blacklist_lookup(hdev, bdaddr);
1334         if (!entry)
1335                 return -ENOENT;
1336
1337         list_del(&entry->list);
1338         kfree(entry);
1339
1340         return mgmt_device_unblocked(hdev, bdaddr);
1341 }
1342
1343 static void hci_clear_adv_cache(struct work_struct *work)
1344 {
1345         struct hci_dev *hdev = container_of(work, struct hci_dev,
1346                                                         adv_work.work);
1347
1348         hci_dev_lock(hdev);
1349
1350         hci_adv_entries_clear(hdev);
1351
1352         hci_dev_unlock(hdev);
1353 }
1354
1355 int hci_adv_entries_clear(struct hci_dev *hdev)
1356 {
1357         struct adv_entry *entry, *tmp;
1358
1359         list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1360                 list_del(&entry->list);
1361                 kfree(entry);
1362         }
1363
1364         BT_DBG("%s adv cache cleared", hdev->name);
1365
1366         return 0;
1367 }
1368
1369 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1370 {
1371         struct adv_entry *entry;
1372
1373         list_for_each_entry(entry, &hdev->adv_entries, list)
1374                 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1375                         return entry;
1376
1377         return NULL;
1378 }
1379
1380 static inline int is_connectable_adv(u8 evt_type)
1381 {
1382         if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1383                 return 1;
1384
1385         return 0;
1386 }
1387
1388 int hci_add_adv_entry(struct hci_dev *hdev,
1389                                         struct hci_ev_le_advertising_info *ev)
1390 {
1391         struct adv_entry *entry;
1392
1393         if (!is_connectable_adv(ev->evt_type))
1394                 return -EINVAL;
1395
1396         /* Only new entries should be added to adv_entries. So, if
1397          * bdaddr was found, don't add it. */
1398         if (hci_find_adv_entry(hdev, &ev->bdaddr))
1399                 return 0;
1400
1401         entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1402         if (!entry)
1403                 return -ENOMEM;
1404
1405         bacpy(&entry->bdaddr, &ev->bdaddr);
1406         entry->bdaddr_type = ev->bdaddr_type;
1407
1408         list_add(&entry->list, &hdev->adv_entries);
1409
1410         BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1411                                 batostr(&entry->bdaddr), entry->bdaddr_type);
1412
1413         return 0;
1414 }
1415
1416 /* Register HCI device */
1417 int hci_register_dev(struct hci_dev *hdev)
1418 {
1419         struct list_head *head = &hci_dev_list, *p;
1420         int i, id, error;
1421
1422         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1423                                                 hdev->bus, hdev->owner);
1424
1425         if (!hdev->open || !hdev->close || !hdev->destruct)
1426                 return -EINVAL;
1427
1428         /* Do not allow HCI_AMP devices to register at index 0,
1429          * so the index can be used as the AMP controller ID.
1430          */
1431         id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1432
1433         write_lock_bh(&hci_dev_list_lock);
1434
1435         /* Find first available device id */
1436         list_for_each(p, &hci_dev_list) {
1437                 if (list_entry(p, struct hci_dev, list)->id != id)
1438                         break;
1439                 head = p; id++;
1440         }
1441
1442         sprintf(hdev->name, "hci%d", id);
1443         hdev->id = id;
1444         list_add_tail(&hdev->list, head);
1445
1446         atomic_set(&hdev->refcnt, 1);
1447         mutex_init(&hdev->lock);
1448
1449         hdev->flags = 0;
1450         hdev->dev_flags = 0;
1451         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1452         hdev->esco_type = (ESCO_HV1);
1453         hdev->link_mode = (HCI_LM_ACCEPT);
1454         hdev->io_capability = 0x03; /* No Input No Output */
1455
1456         hdev->idle_timeout = 0;
1457         hdev->sniff_max_interval = 800;
1458         hdev->sniff_min_interval = 80;
1459
1460         INIT_WORK(&hdev->rx_work, hci_rx_work);
1461         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1462
1463         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1464
1465         skb_queue_head_init(&hdev->rx_q);
1466         skb_queue_head_init(&hdev->cmd_q);
1467         skb_queue_head_init(&hdev->raw_q);
1468
1469         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1470
1471         for (i = 0; i < NUM_REASSEMBLY; i++)
1472                 hdev->reassembly[i] = NULL;
1473
1474         init_waitqueue_head(&hdev->req_wait_q);
1475         mutex_init(&hdev->req_lock);
1476
1477         inquiry_cache_init(hdev);
1478
1479         hci_conn_hash_init(hdev);
1480
1481         INIT_LIST_HEAD(&hdev->mgmt_pending);
1482
1483         INIT_LIST_HEAD(&hdev->blacklist);
1484
1485         INIT_LIST_HEAD(&hdev->uuids);
1486
1487         INIT_LIST_HEAD(&hdev->link_keys);
1488
1489         INIT_LIST_HEAD(&hdev->remote_oob_data);
1490
1491         INIT_LIST_HEAD(&hdev->adv_entries);
1492
1493         INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1494         INIT_WORK(&hdev->power_on, hci_power_on);
1495         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1496
1497         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1498
1499         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1500
1501         atomic_set(&hdev->promisc, 0);
1502
1503         write_unlock_bh(&hci_dev_list_lock);
1504
1505         hdev->workqueue = create_singlethread_workqueue(hdev->name);
1506         if (!hdev->workqueue) {
1507                 error = -ENOMEM;
1508                 goto err;
1509         }
1510
1511         error = hci_add_sysfs(hdev);
1512         if (error < 0)
1513                 goto err_wqueue;
1514
1515         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1516                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1517         if (hdev->rfkill) {
1518                 if (rfkill_register(hdev->rfkill) < 0) {
1519                         rfkill_destroy(hdev->rfkill);
1520                         hdev->rfkill = NULL;
1521                 }
1522         }
1523
1524         set_bit(HCI_AUTO_OFF, &hdev->flags);
1525         set_bit(HCI_SETUP, &hdev->flags);
1526         queue_work(hdev->workqueue, &hdev->power_on);
1527
1528         hci_notify(hdev, HCI_DEV_REG);
1529
1530         return id;
1531
1532 err_wqueue:
1533         destroy_workqueue(hdev->workqueue);
1534 err:
1535         write_lock_bh(&hci_dev_list_lock);
1536         list_del(&hdev->list);
1537         write_unlock_bh(&hci_dev_list_lock);
1538
1539         return error;
1540 }
1541 EXPORT_SYMBOL(hci_register_dev);
1542
1543 /* Unregister HCI device */
1544 void hci_unregister_dev(struct hci_dev *hdev)
1545 {
1546         int i;
1547
1548         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1549
1550         write_lock_bh(&hci_dev_list_lock);
1551         list_del(&hdev->list);
1552         write_unlock_bh(&hci_dev_list_lock);
1553
1554         hci_dev_do_close(hdev);
1555
1556         for (i = 0; i < NUM_REASSEMBLY; i++)
1557                 kfree_skb(hdev->reassembly[i]);
1558
1559         if (!test_bit(HCI_INIT, &hdev->flags) &&
1560                                         !test_bit(HCI_SETUP, &hdev->flags)) {
1561                 hci_dev_lock(hdev);
1562                 mgmt_index_removed(hdev);
1563                 hci_dev_unlock(hdev);
1564         }
1565
1566         /* mgmt_index_removed should take care of emptying the
1567          * pending list */
1568         BUG_ON(!list_empty(&hdev->mgmt_pending));
1569
1570         hci_notify(hdev, HCI_DEV_UNREG);
1571
1572         if (hdev->rfkill) {
1573                 rfkill_unregister(hdev->rfkill);
1574                 rfkill_destroy(hdev->rfkill);
1575         }
1576
1577         hci_del_sysfs(hdev);
1578
1579         cancel_delayed_work_sync(&hdev->adv_work);
1580
1581         destroy_workqueue(hdev->workqueue);
1582
1583         hci_dev_lock(hdev);
1584         hci_blacklist_clear(hdev);
1585         hci_uuids_clear(hdev);
1586         hci_link_keys_clear(hdev);
1587         hci_remote_oob_data_clear(hdev);
1588         hci_adv_entries_clear(hdev);
1589         hci_dev_unlock(hdev);
1590
1591         __hci_dev_put(hdev);
1592 }
1593 EXPORT_SYMBOL(hci_unregister_dev);
1594
1595 /* Suspend HCI device */
1596 int hci_suspend_dev(struct hci_dev *hdev)
1597 {
1598         hci_notify(hdev, HCI_DEV_SUSPEND);
1599         return 0;
1600 }
1601 EXPORT_SYMBOL(hci_suspend_dev);
1602
1603 /* Resume HCI device */
1604 int hci_resume_dev(struct hci_dev *hdev)
1605 {
1606         hci_notify(hdev, HCI_DEV_RESUME);
1607         return 0;
1608 }
1609 EXPORT_SYMBOL(hci_resume_dev);
1610
1611 /* Receive frame from HCI drivers */
1612 int hci_recv_frame(struct sk_buff *skb)
1613 {
1614         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1615         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1616                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1617                 kfree_skb(skb);
1618                 return -ENXIO;
1619         }
1620
1621         /* Incomming skb */
1622         bt_cb(skb)->incoming = 1;
1623
1624         /* Time stamp */
1625         __net_timestamp(skb);
1626
1627         skb_queue_tail(&hdev->rx_q, skb);
1628         queue_work(hdev->workqueue, &hdev->rx_work);
1629
1630         return 0;
1631 }
1632 EXPORT_SYMBOL(hci_recv_frame);
1633
1634 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1635                                                   int count, __u8 index)
1636 {
1637         int len = 0;
1638         int hlen = 0;
1639         int remain = count;
1640         struct sk_buff *skb;
1641         struct bt_skb_cb *scb;
1642
1643         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1644                                 index >= NUM_REASSEMBLY)
1645                 return -EILSEQ;
1646
1647         skb = hdev->reassembly[index];
1648
1649         if (!skb) {
1650                 switch (type) {
1651                 case HCI_ACLDATA_PKT:
1652                         len = HCI_MAX_FRAME_SIZE;
1653                         hlen = HCI_ACL_HDR_SIZE;
1654                         break;
1655                 case HCI_EVENT_PKT:
1656                         len = HCI_MAX_EVENT_SIZE;
1657                         hlen = HCI_EVENT_HDR_SIZE;
1658                         break;
1659                 case HCI_SCODATA_PKT:
1660                         len = HCI_MAX_SCO_SIZE;
1661                         hlen = HCI_SCO_HDR_SIZE;
1662                         break;
1663                 }
1664
1665                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1666                 if (!skb)
1667                         return -ENOMEM;
1668
1669                 scb = (void *) skb->cb;
1670                 scb->expect = hlen;
1671                 scb->pkt_type = type;
1672
1673                 skb->dev = (void *) hdev;
1674                 hdev->reassembly[index] = skb;
1675         }
1676
1677         while (count) {
1678                 scb = (void *) skb->cb;
1679                 len = min(scb->expect, (__u16)count);
1680
1681                 memcpy(skb_put(skb, len), data, len);
1682
1683                 count -= len;
1684                 data += len;
1685                 scb->expect -= len;
1686                 remain = count;
1687
1688                 switch (type) {
1689                 case HCI_EVENT_PKT:
1690                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1691                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1692                                 scb->expect = h->plen;
1693
1694                                 if (skb_tailroom(skb) < scb->expect) {
1695                                         kfree_skb(skb);
1696                                         hdev->reassembly[index] = NULL;
1697                                         return -ENOMEM;
1698                                 }
1699                         }
1700                         break;
1701
1702                 case HCI_ACLDATA_PKT:
1703                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1704                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1705                                 scb->expect = __le16_to_cpu(h->dlen);
1706
1707                                 if (skb_tailroom(skb) < scb->expect) {
1708                                         kfree_skb(skb);
1709                                         hdev->reassembly[index] = NULL;
1710                                         return -ENOMEM;
1711                                 }
1712                         }
1713                         break;
1714
1715                 case HCI_SCODATA_PKT:
1716                         if (skb->len == HCI_SCO_HDR_SIZE) {
1717                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1718                                 scb->expect = h->dlen;
1719
1720                                 if (skb_tailroom(skb) < scb->expect) {
1721                                         kfree_skb(skb);
1722                                         hdev->reassembly[index] = NULL;
1723                                         return -ENOMEM;
1724                                 }
1725                         }
1726                         break;
1727                 }
1728
1729                 if (scb->expect == 0) {
1730                         /* Complete frame */
1731
1732                         bt_cb(skb)->pkt_type = type;
1733                         hci_recv_frame(skb);
1734
1735                         hdev->reassembly[index] = NULL;
1736                         return remain;
1737                 }
1738         }
1739
1740         return remain;
1741 }
1742
1743 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1744 {
1745         int rem = 0;
1746
1747         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1748                 return -EILSEQ;
1749
1750         while (count) {
1751                 rem = hci_reassembly(hdev, type, data, count, type - 1);
1752                 if (rem < 0)
1753                         return rem;
1754
1755                 data += (count - rem);
1756                 count = rem;
1757         }
1758
1759         return rem;
1760 }
1761 EXPORT_SYMBOL(hci_recv_fragment);
1762
1763 #define STREAM_REASSEMBLY 0
1764
1765 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1766 {
1767         int type;
1768         int rem = 0;
1769
1770         while (count) {
1771                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1772
1773                 if (!skb) {
1774                         struct { char type; } *pkt;
1775
1776                         /* Start of the frame */
1777                         pkt = data;
1778                         type = pkt->type;
1779
1780                         data++;
1781                         count--;
1782                 } else
1783                         type = bt_cb(skb)->pkt_type;
1784
1785                 rem = hci_reassembly(hdev, type, data, count,
1786                                                         STREAM_REASSEMBLY);
1787                 if (rem < 0)
1788                         return rem;
1789
1790                 data += (count - rem);
1791                 count = rem;
1792         }
1793
1794         return rem;
1795 }
1796 EXPORT_SYMBOL(hci_recv_stream_fragment);
1797
1798 /* ---- Interface to upper protocols ---- */
1799
1800 /* Register/Unregister protocols.
1801  * hci_task_lock is used to ensure that no tasks are running. */
1802 int hci_register_proto(struct hci_proto *hp)
1803 {
1804         int err = 0;
1805
1806         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1807
1808         if (hp->id >= HCI_MAX_PROTO)
1809                 return -EINVAL;
1810
1811         mutex_lock(&hci_task_lock);
1812
1813         if (!hci_proto[hp->id])
1814                 hci_proto[hp->id] = hp;
1815         else
1816                 err = -EEXIST;
1817
1818         mutex_unlock(&hci_task_lock);
1819
1820         return err;
1821 }
1822 EXPORT_SYMBOL(hci_register_proto);
1823
1824 int hci_unregister_proto(struct hci_proto *hp)
1825 {
1826         int err = 0;
1827
1828         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1829
1830         if (hp->id >= HCI_MAX_PROTO)
1831                 return -EINVAL;
1832
1833         mutex_lock(&hci_task_lock);
1834
1835         if (hci_proto[hp->id])
1836                 hci_proto[hp->id] = NULL;
1837         else
1838                 err = -ENOENT;
1839
1840         mutex_unlock(&hci_task_lock);
1841
1842         return err;
1843 }
1844 EXPORT_SYMBOL(hci_unregister_proto);
1845
1846 int hci_register_cb(struct hci_cb *cb)
1847 {
1848         BT_DBG("%p name %s", cb, cb->name);
1849
1850         write_lock_bh(&hci_cb_list_lock);
1851         list_add(&cb->list, &hci_cb_list);
1852         write_unlock_bh(&hci_cb_list_lock);
1853
1854         return 0;
1855 }
1856 EXPORT_SYMBOL(hci_register_cb);
1857
1858 int hci_unregister_cb(struct hci_cb *cb)
1859 {
1860         BT_DBG("%p name %s", cb, cb->name);
1861
1862         write_lock_bh(&hci_cb_list_lock);
1863         list_del(&cb->list);
1864         write_unlock_bh(&hci_cb_list_lock);
1865
1866         return 0;
1867 }
1868 EXPORT_SYMBOL(hci_unregister_cb);
1869
1870 static int hci_send_frame(struct sk_buff *skb)
1871 {
1872         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1873
1874         if (!hdev) {
1875                 kfree_skb(skb);
1876                 return -ENODEV;
1877         }
1878
1879         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1880
1881         if (atomic_read(&hdev->promisc)) {
1882                 /* Time stamp */
1883                 __net_timestamp(skb);
1884
1885                 hci_send_to_sock(hdev, skb, NULL);
1886         }
1887
1888         /* Get rid of skb owner, prior to sending to the driver. */
1889         skb_orphan(skb);
1890
1891         return hdev->send(skb);
1892 }
1893
1894 /* Send HCI command */
1895 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1896 {
1897         int len = HCI_COMMAND_HDR_SIZE + plen;
1898         struct hci_command_hdr *hdr;
1899         struct sk_buff *skb;
1900
1901         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1902
1903         skb = bt_skb_alloc(len, GFP_ATOMIC);
1904         if (!skb) {
1905                 BT_ERR("%s no memory for command", hdev->name);
1906                 return -ENOMEM;
1907         }
1908
1909         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1910         hdr->opcode = cpu_to_le16(opcode);
1911         hdr->plen   = plen;
1912
1913         if (plen)
1914                 memcpy(skb_put(skb, plen), param, plen);
1915
1916         BT_DBG("skb len %d", skb->len);
1917
1918         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1919         skb->dev = (void *) hdev;
1920
1921         if (test_bit(HCI_INIT, &hdev->flags))
1922                 hdev->init_last_cmd = opcode;
1923
1924         skb_queue_tail(&hdev->cmd_q, skb);
1925         queue_work(hdev->workqueue, &hdev->cmd_work);
1926
1927         return 0;
1928 }
1929
1930 /* Get data from the previously sent command */
1931 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1932 {
1933         struct hci_command_hdr *hdr;
1934
1935         if (!hdev->sent_cmd)
1936                 return NULL;
1937
1938         hdr = (void *) hdev->sent_cmd->data;
1939
1940         if (hdr->opcode != cpu_to_le16(opcode))
1941                 return NULL;
1942
1943         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1944
1945         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1946 }
1947
1948 /* Send ACL data */
1949 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1950 {
1951         struct hci_acl_hdr *hdr;
1952         int len = skb->len;
1953
1954         skb_push(skb, HCI_ACL_HDR_SIZE);
1955         skb_reset_transport_header(skb);
1956         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1957         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1958         hdr->dlen   = cpu_to_le16(len);
1959 }
1960
1961 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1962                                 struct sk_buff *skb, __u16 flags)
1963 {
1964         struct hci_dev *hdev = conn->hdev;
1965         struct sk_buff *list;
1966
1967         list = skb_shinfo(skb)->frag_list;
1968         if (!list) {
1969                 /* Non fragmented */
1970                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1971
1972                 skb_queue_tail(queue, skb);
1973         } else {
1974                 /* Fragmented */
1975                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1976
1977                 skb_shinfo(skb)->frag_list = NULL;
1978
1979                 /* Queue all fragments atomically */
1980                 spin_lock_bh(&queue->lock);
1981
1982                 __skb_queue_tail(queue, skb);
1983
1984                 flags &= ~ACL_START;
1985                 flags |= ACL_CONT;
1986                 do {
1987                         skb = list; list = list->next;
1988
1989                         skb->dev = (void *) hdev;
1990                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1991                         hci_add_acl_hdr(skb, conn->handle, flags);
1992
1993                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1994
1995                         __skb_queue_tail(queue, skb);
1996                 } while (list);
1997
1998                 spin_unlock_bh(&queue->lock);
1999         }
2000 }
2001
2002 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2003 {
2004         struct hci_conn *conn = chan->conn;
2005         struct hci_dev *hdev = conn->hdev;
2006
2007         BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2008
2009         skb->dev = (void *) hdev;
2010         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2011         hci_add_acl_hdr(skb, conn->handle, flags);
2012
2013         hci_queue_acl(conn, &chan->data_q, skb, flags);
2014
2015         tasklet_schedule(&hdev->tx_task);
2016 }
2017 EXPORT_SYMBOL(hci_send_acl);
2018
2019 /* Send SCO data */
2020 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2021 {
2022         struct hci_dev *hdev = conn->hdev;
2023         struct hci_sco_hdr hdr;
2024
2025         BT_DBG("%s len %d", hdev->name, skb->len);
2026
2027         hdr.handle = cpu_to_le16(conn->handle);
2028         hdr.dlen   = skb->len;
2029
2030         skb_push(skb, HCI_SCO_HDR_SIZE);
2031         skb_reset_transport_header(skb);
2032         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2033
2034         skb->dev = (void *) hdev;
2035         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2036
2037         skb_queue_tail(&conn->data_q, skb);
2038         tasklet_schedule(&hdev->tx_task);
2039 }
2040 EXPORT_SYMBOL(hci_send_sco);
2041
2042 /* ---- HCI TX task (outgoing data) ---- */
2043
2044 /* HCI Connection scheduler */
2045 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2046 {
2047         struct hci_conn_hash *h = &hdev->conn_hash;
2048         struct hci_conn *conn = NULL, *c;
2049         int num = 0, min = ~0;
2050
2051         /* We don't have to lock device here. Connections are always
2052          * added and removed with TX task disabled. */
2053
2054         rcu_read_lock();
2055
2056         list_for_each_entry_rcu(c, &h->list, list) {
2057                 if (c->type != type || skb_queue_empty(&c->data_q))
2058                         continue;
2059
2060                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2061                         continue;
2062
2063                 num++;
2064
2065                 if (c->sent < min) {
2066                         min  = c->sent;
2067                         conn = c;
2068                 }
2069
2070                 if (hci_conn_num(hdev, type) == num)
2071                         break;
2072         }
2073
2074         rcu_read_unlock();
2075
2076         if (conn) {
2077                 int cnt, q;
2078
2079                 switch (conn->type) {
2080                 case ACL_LINK:
2081                         cnt = hdev->acl_cnt;
2082                         break;
2083                 case SCO_LINK:
2084                 case ESCO_LINK:
2085                         cnt = hdev->sco_cnt;
2086                         break;
2087                 case LE_LINK:
2088                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2089                         break;
2090                 default:
2091                         cnt = 0;
2092                         BT_ERR("Unknown link type");
2093                 }
2094
2095                 q = cnt / num;
2096                 *quote = q ? q : 1;
2097         } else
2098                 *quote = 0;
2099
2100         BT_DBG("conn %p quote %d", conn, *quote);
2101         return conn;
2102 }
2103
2104 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2105 {
2106         struct hci_conn_hash *h = &hdev->conn_hash;
2107         struct hci_conn *c;
2108
2109         BT_ERR("%s link tx timeout", hdev->name);
2110
2111         rcu_read_lock();
2112
2113         /* Kill stalled connections */
2114         list_for_each_entry_rcu(c, &h->list, list) {
2115                 if (c->type == type && c->sent) {
2116                         BT_ERR("%s killing stalled connection %s",
2117                                 hdev->name, batostr(&c->dst));
2118                         hci_acl_disconn(c, 0x13);
2119                 }
2120         }
2121
2122         rcu_read_unlock();
2123 }
2124
2125 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2126                                                 int *quote)
2127 {
2128         struct hci_conn_hash *h = &hdev->conn_hash;
2129         struct hci_chan *chan = NULL;
2130         int num = 0, min = ~0, cur_prio = 0;
2131         struct hci_conn *conn;
2132         int cnt, q, conn_num = 0;
2133
2134         BT_DBG("%s", hdev->name);
2135
2136         rcu_read_lock();
2137
2138         list_for_each_entry_rcu(conn, &h->list, list) {
2139                 struct hci_chan *tmp;
2140
2141                 if (conn->type != type)
2142                         continue;
2143
2144                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2145                         continue;
2146
2147                 conn_num++;
2148
2149                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2150                         struct sk_buff *skb;
2151
2152                         if (skb_queue_empty(&tmp->data_q))
2153                                 continue;
2154
2155                         skb = skb_peek(&tmp->data_q);
2156                         if (skb->priority < cur_prio)
2157                                 continue;
2158
2159                         if (skb->priority > cur_prio) {
2160                                 num = 0;
2161                                 min = ~0;
2162                                 cur_prio = skb->priority;
2163                         }
2164
2165                         num++;
2166
2167                         if (conn->sent < min) {
2168                                 min  = conn->sent;
2169                                 chan = tmp;
2170                         }
2171                 }
2172
2173                 if (hci_conn_num(hdev, type) == conn_num)
2174                         break;
2175         }
2176
2177         rcu_read_unlock();
2178
2179         if (!chan)
2180                 return NULL;
2181
2182         switch (chan->conn->type) {
2183         case ACL_LINK:
2184                 cnt = hdev->acl_cnt;
2185                 break;
2186         case SCO_LINK:
2187         case ESCO_LINK:
2188                 cnt = hdev->sco_cnt;
2189                 break;
2190         case LE_LINK:
2191                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2192                 break;
2193         default:
2194                 cnt = 0;
2195                 BT_ERR("Unknown link type");
2196         }
2197
2198         q = cnt / num;
2199         *quote = q ? q : 1;
2200         BT_DBG("chan %p quote %d", chan, *quote);
2201         return chan;
2202 }
2203
2204 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2205 {
2206         struct hci_conn_hash *h = &hdev->conn_hash;
2207         struct hci_conn *conn;
2208         int num = 0;
2209
2210         BT_DBG("%s", hdev->name);
2211
2212         rcu_read_lock();
2213
2214         list_for_each_entry_rcu(conn, &h->list, list) {
2215                 struct hci_chan *chan;
2216
2217                 if (conn->type != type)
2218                         continue;
2219
2220                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2221                         continue;
2222
2223                 num++;
2224
2225                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2226                         struct sk_buff *skb;
2227
2228                         if (chan->sent) {
2229                                 chan->sent = 0;
2230                                 continue;
2231                         }
2232
2233                         if (skb_queue_empty(&chan->data_q))
2234                                 continue;
2235
2236                         skb = skb_peek(&chan->data_q);
2237                         if (skb->priority >= HCI_PRIO_MAX - 1)
2238                                 continue;
2239
2240                         skb->priority = HCI_PRIO_MAX - 1;
2241
2242                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2243                                                                 skb->priority);
2244                 }
2245
2246                 if (hci_conn_num(hdev, type) == num)
2247                         break;
2248         }
2249
2250         rcu_read_unlock();
2251
2252 }
2253
2254 static inline void hci_sched_acl(struct hci_dev *hdev)
2255 {
2256         struct hci_chan *chan;
2257         struct sk_buff *skb;
2258         int quote;
2259         unsigned int cnt;
2260
2261         BT_DBG("%s", hdev->name);
2262
2263         if (!hci_conn_num(hdev, ACL_LINK))
2264                 return;
2265
2266         if (!test_bit(HCI_RAW, &hdev->flags)) {
2267                 /* ACL tx timeout must be longer than maximum
2268                  * link supervision timeout (40.9 seconds) */
2269                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2270                         hci_link_tx_to(hdev, ACL_LINK);
2271         }
2272
2273         cnt = hdev->acl_cnt;
2274
2275         while (hdev->acl_cnt &&
2276                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2277                 u32 priority = (skb_peek(&chan->data_q))->priority;
2278                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2279                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2280                                         skb->len, skb->priority);
2281
2282                         /* Stop if priority has changed */
2283                         if (skb->priority < priority)
2284                                 break;
2285
2286                         skb = skb_dequeue(&chan->data_q);
2287
2288                         hci_conn_enter_active_mode(chan->conn,
2289                                                 bt_cb(skb)->force_active);
2290
2291                         hci_send_frame(skb);
2292                         hdev->acl_last_tx = jiffies;
2293
2294                         hdev->acl_cnt--;
2295                         chan->sent++;
2296                         chan->conn->sent++;
2297                 }
2298         }
2299
2300         if (cnt != hdev->acl_cnt)
2301                 hci_prio_recalculate(hdev, ACL_LINK);
2302 }
2303
2304 /* Schedule SCO */
2305 static inline void hci_sched_sco(struct hci_dev *hdev)
2306 {
2307         struct hci_conn *conn;
2308         struct sk_buff *skb;
2309         int quote;
2310
2311         BT_DBG("%s", hdev->name);
2312
2313         if (!hci_conn_num(hdev, SCO_LINK))
2314                 return;
2315
2316         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2317                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2318                         BT_DBG("skb %p len %d", skb, skb->len);
2319                         hci_send_frame(skb);
2320
2321                         conn->sent++;
2322                         if (conn->sent == ~0)
2323                                 conn->sent = 0;
2324                 }
2325         }
2326 }
2327
2328 static inline void hci_sched_esco(struct hci_dev *hdev)
2329 {
2330         struct hci_conn *conn;
2331         struct sk_buff *skb;
2332         int quote;
2333
2334         BT_DBG("%s", hdev->name);
2335
2336         if (!hci_conn_num(hdev, ESCO_LINK))
2337                 return;
2338
2339         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2340                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2341                         BT_DBG("skb %p len %d", skb, skb->len);
2342                         hci_send_frame(skb);
2343
2344                         conn->sent++;
2345                         if (conn->sent == ~0)
2346                                 conn->sent = 0;
2347                 }
2348         }
2349 }
2350
2351 static inline void hci_sched_le(struct hci_dev *hdev)
2352 {
2353         struct hci_chan *chan;
2354         struct sk_buff *skb;
2355         int quote, cnt, tmp;
2356
2357         BT_DBG("%s", hdev->name);
2358
2359         if (!hci_conn_num(hdev, LE_LINK))
2360                 return;
2361
2362         if (!test_bit(HCI_RAW, &hdev->flags)) {
2363                 /* LE tx timeout must be longer than maximum
2364                  * link supervision timeout (40.9 seconds) */
2365                 if (!hdev->le_cnt && hdev->le_pkts &&
2366                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2367                         hci_link_tx_to(hdev, LE_LINK);
2368         }
2369
2370         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2371         tmp = cnt;
2372         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2373                 u32 priority = (skb_peek(&chan->data_q))->priority;
2374                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2375                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2376                                         skb->len, skb->priority);
2377
2378                         /* Stop if priority has changed */
2379                         if (skb->priority < priority)
2380                                 break;
2381
2382                         skb = skb_dequeue(&chan->data_q);
2383
2384                         hci_send_frame(skb);
2385                         hdev->le_last_tx = jiffies;
2386
2387                         cnt--;
2388                         chan->sent++;
2389                         chan->conn->sent++;
2390                 }
2391         }
2392
2393         if (hdev->le_pkts)
2394                 hdev->le_cnt = cnt;
2395         else
2396                 hdev->acl_cnt = cnt;
2397
2398         if (cnt != tmp)
2399                 hci_prio_recalculate(hdev, LE_LINK);
2400 }
2401
2402 static void hci_tx_task(unsigned long arg)
2403 {
2404         struct hci_dev *hdev = (struct hci_dev *) arg;
2405         struct sk_buff *skb;
2406
2407         mutex_lock(&hci_task_lock);
2408
2409         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2410                 hdev->sco_cnt, hdev->le_cnt);
2411
2412         /* Schedule queues and send stuff to HCI driver */
2413
2414         hci_sched_acl(hdev);
2415
2416         hci_sched_sco(hdev);
2417
2418         hci_sched_esco(hdev);
2419
2420         hci_sched_le(hdev);
2421
2422         /* Send next queued raw (unknown type) packet */
2423         while ((skb = skb_dequeue(&hdev->raw_q)))
2424                 hci_send_frame(skb);
2425
2426         mutex_unlock(&hci_task_lock);
2427 }
2428
2429 /* ----- HCI RX task (incoming data processing) ----- */
2430
2431 /* ACL data packet */
2432 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2433 {
2434         struct hci_acl_hdr *hdr = (void *) skb->data;
2435         struct hci_conn *conn;
2436         __u16 handle, flags;
2437
2438         skb_pull(skb, HCI_ACL_HDR_SIZE);
2439
2440         handle = __le16_to_cpu(hdr->handle);
2441         flags  = hci_flags(handle);
2442         handle = hci_handle(handle);
2443
2444         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2445
2446         hdev->stat.acl_rx++;
2447
2448         hci_dev_lock(hdev);
2449         conn = hci_conn_hash_lookup_handle(hdev, handle);
2450         hci_dev_unlock(hdev);
2451
2452         if (conn) {
2453                 register struct hci_proto *hp;
2454
2455                 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2456
2457                 /* Send to upper protocol */
2458                 hp = hci_proto[HCI_PROTO_L2CAP];
2459                 if (hp && hp->recv_acldata) {
2460                         hp->recv_acldata(conn, skb, flags);
2461                         return;
2462                 }
2463         } else {
2464                 BT_ERR("%s ACL packet for unknown connection handle %d",
2465                         hdev->name, handle);
2466         }
2467
2468         kfree_skb(skb);
2469 }
2470
2471 /* SCO data packet */
2472 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2473 {
2474         struct hci_sco_hdr *hdr = (void *) skb->data;
2475         struct hci_conn *conn;
2476         __u16 handle;
2477
2478         skb_pull(skb, HCI_SCO_HDR_SIZE);
2479
2480         handle = __le16_to_cpu(hdr->handle);
2481
2482         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2483
2484         hdev->stat.sco_rx++;
2485
2486         hci_dev_lock(hdev);
2487         conn = hci_conn_hash_lookup_handle(hdev, handle);
2488         hci_dev_unlock(hdev);
2489
2490         if (conn) {
2491                 register struct hci_proto *hp;
2492
2493                 /* Send to upper protocol */
2494                 hp = hci_proto[HCI_PROTO_SCO];
2495                 if (hp && hp->recv_scodata) {
2496                         hp->recv_scodata(conn, skb);
2497                         return;
2498                 }
2499         } else {
2500                 BT_ERR("%s SCO packet for unknown connection handle %d",
2501                         hdev->name, handle);
2502         }
2503
2504         kfree_skb(skb);
2505 }
2506
2507 static void hci_rx_work(struct work_struct *work)
2508 {
2509         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2510         struct sk_buff *skb;
2511
2512         BT_DBG("%s", hdev->name);
2513
2514         mutex_lock(&hci_task_lock);
2515
2516         while ((skb = skb_dequeue(&hdev->rx_q))) {
2517                 if (atomic_read(&hdev->promisc)) {
2518                         /* Send copy to the sockets */
2519                         hci_send_to_sock(hdev, skb, NULL);
2520                 }
2521
2522                 if (test_bit(HCI_RAW, &hdev->flags)) {
2523                         kfree_skb(skb);
2524                         continue;
2525                 }
2526
2527                 if (test_bit(HCI_INIT, &hdev->flags)) {
2528                         /* Don't process data packets in this states. */
2529                         switch (bt_cb(skb)->pkt_type) {
2530                         case HCI_ACLDATA_PKT:
2531                         case HCI_SCODATA_PKT:
2532                                 kfree_skb(skb);
2533                                 continue;
2534                         }
2535                 }
2536
2537                 /* Process frame */
2538                 switch (bt_cb(skb)->pkt_type) {
2539                 case HCI_EVENT_PKT:
2540                         BT_DBG("%s Event packet", hdev->name);
2541                         hci_event_packet(hdev, skb);
2542                         break;
2543
2544                 case HCI_ACLDATA_PKT:
2545                         BT_DBG("%s ACL data packet", hdev->name);
2546                         hci_acldata_packet(hdev, skb);
2547                         break;
2548
2549                 case HCI_SCODATA_PKT:
2550                         BT_DBG("%s SCO data packet", hdev->name);
2551                         hci_scodata_packet(hdev, skb);
2552                         break;
2553
2554                 default:
2555                         kfree_skb(skb);
2556                         break;
2557                 }
2558         }
2559
2560         mutex_unlock(&hci_task_lock);
2561 }
2562
2563 static void hci_cmd_work(struct work_struct *work)
2564 {
2565         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2566         struct sk_buff *skb;
2567
2568         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2569
2570         /* Send queued commands */
2571         if (atomic_read(&hdev->cmd_cnt)) {
2572                 skb = skb_dequeue(&hdev->cmd_q);
2573                 if (!skb)
2574                         return;
2575
2576                 kfree_skb(hdev->sent_cmd);
2577
2578                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2579                 if (hdev->sent_cmd) {
2580                         atomic_dec(&hdev->cmd_cnt);
2581                         hci_send_frame(skb);
2582                         if (test_bit(HCI_RESET, &hdev->flags))
2583                                 del_timer(&hdev->cmd_timer);
2584                         else
2585                                 mod_timer(&hdev->cmd_timer,
2586                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2587                 } else {
2588                         skb_queue_head(&hdev->cmd_q, skb);
2589                         queue_work(hdev->workqueue, &hdev->cmd_work);
2590                 }
2591         }
2592 }
2593
2594 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2595 {
2596         /* General inquiry access code (GIAC) */
2597         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2598         struct hci_cp_inquiry cp;
2599
2600         BT_DBG("%s", hdev->name);
2601
2602         if (test_bit(HCI_INQUIRY, &hdev->flags))
2603                 return -EINPROGRESS;
2604
2605         memset(&cp, 0, sizeof(cp));
2606         memcpy(&cp.lap, lap, sizeof(cp.lap));
2607         cp.length  = length;
2608
2609         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2610 }
2611
2612 int hci_cancel_inquiry(struct hci_dev *hdev)
2613 {
2614         BT_DBG("%s", hdev->name);
2615
2616         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2617                 return -EPERM;
2618
2619         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2620 }
2621
2622 module_param(enable_hs, bool, 0644);
2623 MODULE_PARM_DESC(enable_hs, "Enable High Speed");