Bluetooth: move power_off to system workqueue
[linux-flexiantxendom0-3.2.10.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54
55 #define AUTO_OFF_TIMEOUT 2000
56
57 int enable_hs;
58
59 static void hci_rx_work(struct work_struct *work);
60 static void hci_cmd_work(struct work_struct *work);
61 static void hci_tx_work(struct work_struct *work);
62
63 static DEFINE_MUTEX(hci_task_lock);
64
65 /* HCI device list */
66 LIST_HEAD(hci_dev_list);
67 DEFINE_RWLOCK(hci_dev_list_lock);
68
69 /* HCI callback list */
70 LIST_HEAD(hci_cb_list);
71 DEFINE_RWLOCK(hci_cb_list_lock);
72
73 /* HCI protocols */
74 #define HCI_MAX_PROTO   2
75 struct hci_proto *hci_proto[HCI_MAX_PROTO];
76
77 /* HCI notifiers list */
78 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
79
80 /* ---- HCI notifications ---- */
81
82 int hci_register_notifier(struct notifier_block *nb)
83 {
84         return atomic_notifier_chain_register(&hci_notifier, nb);
85 }
86
87 int hci_unregister_notifier(struct notifier_block *nb)
88 {
89         return atomic_notifier_chain_unregister(&hci_notifier, nb);
90 }
91
92 static void hci_notify(struct hci_dev *hdev, int event)
93 {
94         atomic_notifier_call_chain(&hci_notifier, event, hdev);
95 }
96
97 /* ---- HCI requests ---- */
98
99 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
100 {
101         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
102
103         /* If this is the init phase check if the completed command matches
104          * the last init command, and if not just return.
105          */
106         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
107                 return;
108
109         if (hdev->req_status == HCI_REQ_PEND) {
110                 hdev->req_result = result;
111                 hdev->req_status = HCI_REQ_DONE;
112                 wake_up_interruptible(&hdev->req_wait_q);
113         }
114 }
115
116 static void hci_req_cancel(struct hci_dev *hdev, int err)
117 {
118         BT_DBG("%s err 0x%2.2x", hdev->name, err);
119
120         if (hdev->req_status == HCI_REQ_PEND) {
121                 hdev->req_result = err;
122                 hdev->req_status = HCI_REQ_CANCELED;
123                 wake_up_interruptible(&hdev->req_wait_q);
124         }
125 }
126
127 /* Execute request and wait for completion. */
128 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
129                                         unsigned long opt, __u32 timeout)
130 {
131         DECLARE_WAITQUEUE(wait, current);
132         int err = 0;
133
134         BT_DBG("%s start", hdev->name);
135
136         hdev->req_status = HCI_REQ_PEND;
137
138         add_wait_queue(&hdev->req_wait_q, &wait);
139         set_current_state(TASK_INTERRUPTIBLE);
140
141         req(hdev, opt);
142         schedule_timeout(timeout);
143
144         remove_wait_queue(&hdev->req_wait_q, &wait);
145
146         if (signal_pending(current))
147                 return -EINTR;
148
149         switch (hdev->req_status) {
150         case HCI_REQ_DONE:
151                 err = -bt_to_errno(hdev->req_result);
152                 break;
153
154         case HCI_REQ_CANCELED:
155                 err = -hdev->req_result;
156                 break;
157
158         default:
159                 err = -ETIMEDOUT;
160                 break;
161         }
162
163         hdev->req_status = hdev->req_result = 0;
164
165         BT_DBG("%s end: err %d", hdev->name, err);
166
167         return err;
168 }
169
170 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
171                                         unsigned long opt, __u32 timeout)
172 {
173         int ret;
174
175         if (!test_bit(HCI_UP, &hdev->flags))
176                 return -ENETDOWN;
177
178         /* Serialize all requests */
179         hci_req_lock(hdev);
180         ret = __hci_request(hdev, req, opt, timeout);
181         hci_req_unlock(hdev);
182
183         return ret;
184 }
185
186 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
187 {
188         BT_DBG("%s %ld", hdev->name, opt);
189
190         /* Reset device */
191         set_bit(HCI_RESET, &hdev->flags);
192         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
193 }
194
195 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
196 {
197         struct hci_cp_delete_stored_link_key cp;
198         struct sk_buff *skb;
199         __le16 param;
200         __u8 flt_type;
201
202         BT_DBG("%s %ld", hdev->name, opt);
203
204         /* Driver initialization */
205
206         /* Special commands */
207         while ((skb = skb_dequeue(&hdev->driver_init))) {
208                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
209                 skb->dev = (void *) hdev;
210
211                 skb_queue_tail(&hdev->cmd_q, skb);
212                 queue_work(hdev->workqueue, &hdev->cmd_work);
213         }
214         skb_queue_purge(&hdev->driver_init);
215
216         /* Mandatory initialization */
217
218         /* Reset */
219         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
220                         set_bit(HCI_RESET, &hdev->flags);
221                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
222         }
223
224         /* Read Local Supported Features */
225         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
226
227         /* Read Local Version */
228         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
229
230         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
231         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
232
233         /* Read BD Address */
234         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
235
236         /* Read Class of Device */
237         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
238
239         /* Read Local Name */
240         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
241
242         /* Read Voice Setting */
243         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
244
245         /* Optional initialization */
246
247         /* Clear Event Filters */
248         flt_type = HCI_FLT_CLEAR_ALL;
249         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
250
251         /* Connection accept timeout ~20 secs */
252         param = cpu_to_le16(0x7d00);
253         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
254
255         bacpy(&cp.bdaddr, BDADDR_ANY);
256         cp.delete_all = 1;
257         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
258 }
259
260 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
261 {
262         BT_DBG("%s", hdev->name);
263
264         /* Read LE buffer size */
265         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
266 }
267
268 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
269 {
270         __u8 scan = opt;
271
272         BT_DBG("%s %x", hdev->name, scan);
273
274         /* Inquiry and Page scans */
275         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
276 }
277
278 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
279 {
280         __u8 auth = opt;
281
282         BT_DBG("%s %x", hdev->name, auth);
283
284         /* Authentication */
285         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
286 }
287
288 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
289 {
290         __u8 encrypt = opt;
291
292         BT_DBG("%s %x", hdev->name, encrypt);
293
294         /* Encryption */
295         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
296 }
297
298 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
299 {
300         __le16 policy = cpu_to_le16(opt);
301
302         BT_DBG("%s %x", hdev->name, policy);
303
304         /* Default link policy */
305         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
306 }
307
308 /* Get HCI device by index.
309  * Device is held on return. */
310 struct hci_dev *hci_dev_get(int index)
311 {
312         struct hci_dev *hdev = NULL, *d;
313
314         BT_DBG("%d", index);
315
316         if (index < 0)
317                 return NULL;
318
319         read_lock(&hci_dev_list_lock);
320         list_for_each_entry(d, &hci_dev_list, list) {
321                 if (d->id == index) {
322                         hdev = hci_dev_hold(d);
323                         break;
324                 }
325         }
326         read_unlock(&hci_dev_list_lock);
327         return hdev;
328 }
329
330 /* ---- Inquiry support ---- */
331 static void inquiry_cache_flush(struct hci_dev *hdev)
332 {
333         struct inquiry_cache *cache = &hdev->inq_cache;
334         struct inquiry_entry *next  = cache->list, *e;
335
336         BT_DBG("cache %p", cache);
337
338         cache->list = NULL;
339         while ((e = next)) {
340                 next = e->next;
341                 kfree(e);
342         }
343 }
344
345 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
346 {
347         struct inquiry_cache *cache = &hdev->inq_cache;
348         struct inquiry_entry *e;
349
350         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
351
352         for (e = cache->list; e; e = e->next)
353                 if (!bacmp(&e->data.bdaddr, bdaddr))
354                         break;
355         return e;
356 }
357
358 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
359 {
360         struct inquiry_cache *cache = &hdev->inq_cache;
361         struct inquiry_entry *ie;
362
363         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
364
365         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
366         if (!ie) {
367                 /* Entry not in the cache. Add new one. */
368                 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
369                 if (!ie)
370                         return;
371
372                 ie->next = cache->list;
373                 cache->list = ie;
374         }
375
376         memcpy(&ie->data, data, sizeof(*data));
377         ie->timestamp = jiffies;
378         cache->timestamp = jiffies;
379 }
380
381 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
382 {
383         struct inquiry_cache *cache = &hdev->inq_cache;
384         struct inquiry_info *info = (struct inquiry_info *) buf;
385         struct inquiry_entry *e;
386         int copied = 0;
387
388         for (e = cache->list; e && copied < num; e = e->next, copied++) {
389                 struct inquiry_data *data = &e->data;
390                 bacpy(&info->bdaddr, &data->bdaddr);
391                 info->pscan_rep_mode    = data->pscan_rep_mode;
392                 info->pscan_period_mode = data->pscan_period_mode;
393                 info->pscan_mode        = data->pscan_mode;
394                 memcpy(info->dev_class, data->dev_class, 3);
395                 info->clock_offset      = data->clock_offset;
396                 info++;
397         }
398
399         BT_DBG("cache %p, copied %d", cache, copied);
400         return copied;
401 }
402
403 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
404 {
405         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
406         struct hci_cp_inquiry cp;
407
408         BT_DBG("%s", hdev->name);
409
410         if (test_bit(HCI_INQUIRY, &hdev->flags))
411                 return;
412
413         /* Start Inquiry */
414         memcpy(&cp.lap, &ir->lap, 3);
415         cp.length  = ir->length;
416         cp.num_rsp = ir->num_rsp;
417         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
418 }
419
420 int hci_inquiry(void __user *arg)
421 {
422         __u8 __user *ptr = arg;
423         struct hci_inquiry_req ir;
424         struct hci_dev *hdev;
425         int err = 0, do_inquiry = 0, max_rsp;
426         long timeo;
427         __u8 *buf;
428
429         if (copy_from_user(&ir, ptr, sizeof(ir)))
430                 return -EFAULT;
431
432         hdev = hci_dev_get(ir.dev_id);
433         if (!hdev)
434                 return -ENODEV;
435
436         hci_dev_lock(hdev);
437         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
438                                 inquiry_cache_empty(hdev) ||
439                                 ir.flags & IREQ_CACHE_FLUSH) {
440                 inquiry_cache_flush(hdev);
441                 do_inquiry = 1;
442         }
443         hci_dev_unlock(hdev);
444
445         timeo = ir.length * msecs_to_jiffies(2000);
446
447         if (do_inquiry) {
448                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
449                 if (err < 0)
450                         goto done;
451         }
452
453         /* for unlimited number of responses we will use buffer with 255 entries */
454         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
455
456         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
457          * copy it to the user space.
458          */
459         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
460         if (!buf) {
461                 err = -ENOMEM;
462                 goto done;
463         }
464
465         hci_dev_lock(hdev);
466         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
467         hci_dev_unlock(hdev);
468
469         BT_DBG("num_rsp %d", ir.num_rsp);
470
471         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
472                 ptr += sizeof(ir);
473                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
474                                         ir.num_rsp))
475                         err = -EFAULT;
476         } else
477                 err = -EFAULT;
478
479         kfree(buf);
480
481 done:
482         hci_dev_put(hdev);
483         return err;
484 }
485
486 /* ---- HCI ioctl helpers ---- */
487
488 int hci_dev_open(__u16 dev)
489 {
490         struct hci_dev *hdev;
491         int ret = 0;
492
493         hdev = hci_dev_get(dev);
494         if (!hdev)
495                 return -ENODEV;
496
497         BT_DBG("%s %p", hdev->name, hdev);
498
499         hci_req_lock(hdev);
500
501         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
502                 ret = -ERFKILL;
503                 goto done;
504         }
505
506         if (test_bit(HCI_UP, &hdev->flags)) {
507                 ret = -EALREADY;
508                 goto done;
509         }
510
511         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
512                 set_bit(HCI_RAW, &hdev->flags);
513
514         /* Treat all non BR/EDR controllers as raw devices if
515            enable_hs is not set */
516         if (hdev->dev_type != HCI_BREDR && !enable_hs)
517                 set_bit(HCI_RAW, &hdev->flags);
518
519         if (hdev->open(hdev)) {
520                 ret = -EIO;
521                 goto done;
522         }
523
524         if (!test_bit(HCI_RAW, &hdev->flags)) {
525                 atomic_set(&hdev->cmd_cnt, 1);
526                 set_bit(HCI_INIT, &hdev->flags);
527                 hdev->init_last_cmd = 0;
528
529                 ret = __hci_request(hdev, hci_init_req, 0,
530                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
531
532                 if (lmp_host_le_capable(hdev))
533                         ret = __hci_request(hdev, hci_le_init_req, 0,
534                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
535
536                 clear_bit(HCI_INIT, &hdev->flags);
537         }
538
539         if (!ret) {
540                 hci_dev_hold(hdev);
541                 set_bit(HCI_UP, &hdev->flags);
542                 hci_notify(hdev, HCI_DEV_UP);
543                 if (!test_bit(HCI_SETUP, &hdev->flags)) {
544                         hci_dev_lock(hdev);
545                         mgmt_powered(hdev, 1);
546                         hci_dev_unlock(hdev);
547                 }
548         } else {
549                 /* Init failed, cleanup */
550                 flush_work(&hdev->tx_work);
551                 flush_work(&hdev->cmd_work);
552                 flush_work(&hdev->rx_work);
553
554                 skb_queue_purge(&hdev->cmd_q);
555                 skb_queue_purge(&hdev->rx_q);
556
557                 if (hdev->flush)
558                         hdev->flush(hdev);
559
560                 if (hdev->sent_cmd) {
561                         kfree_skb(hdev->sent_cmd);
562                         hdev->sent_cmd = NULL;
563                 }
564
565                 hdev->close(hdev);
566                 hdev->flags = 0;
567         }
568
569 done:
570         hci_req_unlock(hdev);
571         hci_dev_put(hdev);
572         return ret;
573 }
574
575 static int hci_dev_do_close(struct hci_dev *hdev)
576 {
577         BT_DBG("%s %p", hdev->name, hdev);
578
579         hci_req_cancel(hdev, ENODEV);
580         hci_req_lock(hdev);
581
582         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
583                 del_timer_sync(&hdev->cmd_timer);
584                 hci_req_unlock(hdev);
585                 return 0;
586         }
587
588         /* Flush RX and TX works */
589         flush_work(&hdev->tx_work);
590         flush_work(&hdev->rx_work);
591
592         if (hdev->discov_timeout > 0) {
593                 cancel_delayed_work(&hdev->discov_off);
594                 hdev->discov_timeout = 0;
595         }
596
597         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
598                 cancel_delayed_work(&hdev->power_off);
599
600         hci_dev_lock(hdev);
601         inquiry_cache_flush(hdev);
602         hci_conn_hash_flush(hdev);
603         hci_dev_unlock(hdev);
604
605         hci_notify(hdev, HCI_DEV_DOWN);
606
607         if (hdev->flush)
608                 hdev->flush(hdev);
609
610         /* Reset device */
611         skb_queue_purge(&hdev->cmd_q);
612         atomic_set(&hdev->cmd_cnt, 1);
613         if (!test_bit(HCI_RAW, &hdev->flags)) {
614                 set_bit(HCI_INIT, &hdev->flags);
615                 __hci_request(hdev, hci_reset_req, 0,
616                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
617                 clear_bit(HCI_INIT, &hdev->flags);
618         }
619
620         /* flush cmd  work */
621         flush_work(&hdev->cmd_work);
622
623         /* Drop queues */
624         skb_queue_purge(&hdev->rx_q);
625         skb_queue_purge(&hdev->cmd_q);
626         skb_queue_purge(&hdev->raw_q);
627
628         /* Drop last sent command */
629         if (hdev->sent_cmd) {
630                 del_timer_sync(&hdev->cmd_timer);
631                 kfree_skb(hdev->sent_cmd);
632                 hdev->sent_cmd = NULL;
633         }
634
635         /* After this point our queues are empty
636          * and no tasks are scheduled. */
637         hdev->close(hdev);
638
639         hci_dev_lock(hdev);
640         mgmt_powered(hdev, 0);
641         hci_dev_unlock(hdev);
642
643         /* Clear flags */
644         hdev->flags = 0;
645
646         hci_req_unlock(hdev);
647
648         hci_dev_put(hdev);
649         return 0;
650 }
651
652 int hci_dev_close(__u16 dev)
653 {
654         struct hci_dev *hdev;
655         int err;
656
657         hdev = hci_dev_get(dev);
658         if (!hdev)
659                 return -ENODEV;
660         err = hci_dev_do_close(hdev);
661         hci_dev_put(hdev);
662         return err;
663 }
664
665 int hci_dev_reset(__u16 dev)
666 {
667         struct hci_dev *hdev;
668         int ret = 0;
669
670         hdev = hci_dev_get(dev);
671         if (!hdev)
672                 return -ENODEV;
673
674         hci_req_lock(hdev);
675
676         if (!test_bit(HCI_UP, &hdev->flags))
677                 goto done;
678
679         /* Drop queues */
680         skb_queue_purge(&hdev->rx_q);
681         skb_queue_purge(&hdev->cmd_q);
682
683         hci_dev_lock(hdev);
684         inquiry_cache_flush(hdev);
685         hci_conn_hash_flush(hdev);
686         hci_dev_unlock(hdev);
687
688         if (hdev->flush)
689                 hdev->flush(hdev);
690
691         atomic_set(&hdev->cmd_cnt, 1);
692         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
693
694         if (!test_bit(HCI_RAW, &hdev->flags))
695                 ret = __hci_request(hdev, hci_reset_req, 0,
696                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
697
698 done:
699         hci_req_unlock(hdev);
700         hci_dev_put(hdev);
701         return ret;
702 }
703
704 int hci_dev_reset_stat(__u16 dev)
705 {
706         struct hci_dev *hdev;
707         int ret = 0;
708
709         hdev = hci_dev_get(dev);
710         if (!hdev)
711                 return -ENODEV;
712
713         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
714
715         hci_dev_put(hdev);
716
717         return ret;
718 }
719
720 int hci_dev_cmd(unsigned int cmd, void __user *arg)
721 {
722         struct hci_dev *hdev;
723         struct hci_dev_req dr;
724         int err = 0;
725
726         if (copy_from_user(&dr, arg, sizeof(dr)))
727                 return -EFAULT;
728
729         hdev = hci_dev_get(dr.dev_id);
730         if (!hdev)
731                 return -ENODEV;
732
733         switch (cmd) {
734         case HCISETAUTH:
735                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
736                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
737                 break;
738
739         case HCISETENCRYPT:
740                 if (!lmp_encrypt_capable(hdev)) {
741                         err = -EOPNOTSUPP;
742                         break;
743                 }
744
745                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
746                         /* Auth must be enabled first */
747                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
748                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
749                         if (err)
750                                 break;
751                 }
752
753                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
754                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
755                 break;
756
757         case HCISETSCAN:
758                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
759                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
760                 break;
761
762         case HCISETLINKPOL:
763                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
764                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
765                 break;
766
767         case HCISETLINKMODE:
768                 hdev->link_mode = ((__u16) dr.dev_opt) &
769                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
770                 break;
771
772         case HCISETPTYPE:
773                 hdev->pkt_type = (__u16) dr.dev_opt;
774                 break;
775
776         case HCISETACLMTU:
777                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
778                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
779                 break;
780
781         case HCISETSCOMTU:
782                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
783                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
784                 break;
785
786         default:
787                 err = -EINVAL;
788                 break;
789         }
790
791         hci_dev_put(hdev);
792         return err;
793 }
794
795 int hci_get_dev_list(void __user *arg)
796 {
797         struct hci_dev *hdev;
798         struct hci_dev_list_req *dl;
799         struct hci_dev_req *dr;
800         int n = 0, size, err;
801         __u16 dev_num;
802
803         if (get_user(dev_num, (__u16 __user *) arg))
804                 return -EFAULT;
805
806         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
807                 return -EINVAL;
808
809         size = sizeof(*dl) + dev_num * sizeof(*dr);
810
811         dl = kzalloc(size, GFP_KERNEL);
812         if (!dl)
813                 return -ENOMEM;
814
815         dr = dl->dev_req;
816
817         read_lock_bh(&hci_dev_list_lock);
818         list_for_each_entry(hdev, &hci_dev_list, list) {
819                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
820                         cancel_delayed_work(&hdev->power_off);
821
822                 if (!test_bit(HCI_MGMT, &hdev->flags))
823                         set_bit(HCI_PAIRABLE, &hdev->flags);
824
825                 (dr + n)->dev_id  = hdev->id;
826                 (dr + n)->dev_opt = hdev->flags;
827
828                 if (++n >= dev_num)
829                         break;
830         }
831         read_unlock_bh(&hci_dev_list_lock);
832
833         dl->dev_num = n;
834         size = sizeof(*dl) + n * sizeof(*dr);
835
836         err = copy_to_user(arg, dl, size);
837         kfree(dl);
838
839         return err ? -EFAULT : 0;
840 }
841
842 int hci_get_dev_info(void __user *arg)
843 {
844         struct hci_dev *hdev;
845         struct hci_dev_info di;
846         int err = 0;
847
848         if (copy_from_user(&di, arg, sizeof(di)))
849                 return -EFAULT;
850
851         hdev = hci_dev_get(di.dev_id);
852         if (!hdev)
853                 return -ENODEV;
854
855         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
856                 cancel_delayed_work_sync(&hdev->power_off);
857
858         if (!test_bit(HCI_MGMT, &hdev->flags))
859                 set_bit(HCI_PAIRABLE, &hdev->flags);
860
861         strcpy(di.name, hdev->name);
862         di.bdaddr   = hdev->bdaddr;
863         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
864         di.flags    = hdev->flags;
865         di.pkt_type = hdev->pkt_type;
866         di.acl_mtu  = hdev->acl_mtu;
867         di.acl_pkts = hdev->acl_pkts;
868         di.sco_mtu  = hdev->sco_mtu;
869         di.sco_pkts = hdev->sco_pkts;
870         di.link_policy = hdev->link_policy;
871         di.link_mode   = hdev->link_mode;
872
873         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
874         memcpy(&di.features, &hdev->features, sizeof(di.features));
875
876         if (copy_to_user(arg, &di, sizeof(di)))
877                 err = -EFAULT;
878
879         hci_dev_put(hdev);
880
881         return err;
882 }
883
884 /* ---- Interface to HCI drivers ---- */
885
886 static int hci_rfkill_set_block(void *data, bool blocked)
887 {
888         struct hci_dev *hdev = data;
889
890         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
891
892         if (!blocked)
893                 return 0;
894
895         hci_dev_do_close(hdev);
896
897         return 0;
898 }
899
900 static const struct rfkill_ops hci_rfkill_ops = {
901         .set_block = hci_rfkill_set_block,
902 };
903
904 /* Alloc HCI device */
905 struct hci_dev *hci_alloc_dev(void)
906 {
907         struct hci_dev *hdev;
908
909         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
910         if (!hdev)
911                 return NULL;
912
913         hci_init_sysfs(hdev);
914         skb_queue_head_init(&hdev->driver_init);
915
916         return hdev;
917 }
918 EXPORT_SYMBOL(hci_alloc_dev);
919
920 /* Free HCI device */
921 void hci_free_dev(struct hci_dev *hdev)
922 {
923         skb_queue_purge(&hdev->driver_init);
924
925         /* will free via device release */
926         put_device(&hdev->dev);
927 }
928 EXPORT_SYMBOL(hci_free_dev);
929
930 static void hci_power_on(struct work_struct *work)
931 {
932         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
933
934         BT_DBG("%s", hdev->name);
935
936         if (hci_dev_open(hdev->id) < 0)
937                 return;
938
939         if (test_bit(HCI_AUTO_OFF, &hdev->flags))
940                 schedule_delayed_work(&hdev->power_off,
941                                         msecs_to_jiffies(AUTO_OFF_TIMEOUT));
942
943         if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
944                 mgmt_index_added(hdev);
945 }
946
947 static void hci_power_off(struct work_struct *work)
948 {
949         struct hci_dev *hdev = container_of(work, struct hci_dev,
950                                                         power_off.work);
951
952         BT_DBG("%s", hdev->name);
953
954         clear_bit(HCI_AUTO_OFF, &hdev->flags);
955
956         hci_dev_close(hdev->id);
957 }
958
959 static void hci_discov_off(struct work_struct *work)
960 {
961         struct hci_dev *hdev;
962         u8 scan = SCAN_PAGE;
963
964         hdev = container_of(work, struct hci_dev, discov_off.work);
965
966         BT_DBG("%s", hdev->name);
967
968         hci_dev_lock(hdev);
969
970         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
971
972         hdev->discov_timeout = 0;
973
974         hci_dev_unlock(hdev);
975 }
976
977 int hci_uuids_clear(struct hci_dev *hdev)
978 {
979         struct list_head *p, *n;
980
981         list_for_each_safe(p, n, &hdev->uuids) {
982                 struct bt_uuid *uuid;
983
984                 uuid = list_entry(p, struct bt_uuid, list);
985
986                 list_del(p);
987                 kfree(uuid);
988         }
989
990         return 0;
991 }
992
993 int hci_link_keys_clear(struct hci_dev *hdev)
994 {
995         struct list_head *p, *n;
996
997         list_for_each_safe(p, n, &hdev->link_keys) {
998                 struct link_key *key;
999
1000                 key = list_entry(p, struct link_key, list);
1001
1002                 list_del(p);
1003                 kfree(key);
1004         }
1005
1006         return 0;
1007 }
1008
1009 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1010 {
1011         struct link_key *k;
1012
1013         list_for_each_entry(k, &hdev->link_keys, list)
1014                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1015                         return k;
1016
1017         return NULL;
1018 }
1019
1020 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1021                                                 u8 key_type, u8 old_key_type)
1022 {
1023         /* Legacy key */
1024         if (key_type < 0x03)
1025                 return 1;
1026
1027         /* Debug keys are insecure so don't store them persistently */
1028         if (key_type == HCI_LK_DEBUG_COMBINATION)
1029                 return 0;
1030
1031         /* Changed combination key and there's no previous one */
1032         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1033                 return 0;
1034
1035         /* Security mode 3 case */
1036         if (!conn)
1037                 return 1;
1038
1039         /* Neither local nor remote side had no-bonding as requirement */
1040         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1041                 return 1;
1042
1043         /* Local side had dedicated bonding as requirement */
1044         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1045                 return 1;
1046
1047         /* Remote side had dedicated bonding as requirement */
1048         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1049                 return 1;
1050
1051         /* If none of the above criteria match, then don't store the key
1052          * persistently */
1053         return 0;
1054 }
1055
1056 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1057 {
1058         struct link_key *k;
1059
1060         list_for_each_entry(k, &hdev->link_keys, list) {
1061                 struct key_master_id *id;
1062
1063                 if (k->type != HCI_LK_SMP_LTK)
1064                         continue;
1065
1066                 if (k->dlen != sizeof(*id))
1067                         continue;
1068
1069                 id = (void *) &k->data;
1070                 if (id->ediv == ediv &&
1071                                 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1072                         return k;
1073         }
1074
1075         return NULL;
1076 }
1077 EXPORT_SYMBOL(hci_find_ltk);
1078
1079 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1080                                         bdaddr_t *bdaddr, u8 type)
1081 {
1082         struct link_key *k;
1083
1084         list_for_each_entry(k, &hdev->link_keys, list)
1085                 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1086                         return k;
1087
1088         return NULL;
1089 }
1090 EXPORT_SYMBOL(hci_find_link_key_type);
1091
1092 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1093                                 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1094 {
1095         struct link_key *key, *old_key;
1096         u8 old_key_type, persistent;
1097
1098         old_key = hci_find_link_key(hdev, bdaddr);
1099         if (old_key) {
1100                 old_key_type = old_key->type;
1101                 key = old_key;
1102         } else {
1103                 old_key_type = conn ? conn->key_type : 0xff;
1104                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1105                 if (!key)
1106                         return -ENOMEM;
1107                 list_add(&key->list, &hdev->link_keys);
1108         }
1109
1110         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1111
1112         /* Some buggy controller combinations generate a changed
1113          * combination key for legacy pairing even when there's no
1114          * previous key */
1115         if (type == HCI_LK_CHANGED_COMBINATION &&
1116                                         (!conn || conn->remote_auth == 0xff) &&
1117                                         old_key_type == 0xff) {
1118                 type = HCI_LK_COMBINATION;
1119                 if (conn)
1120                         conn->key_type = type;
1121         }
1122
1123         bacpy(&key->bdaddr, bdaddr);
1124         memcpy(key->val, val, 16);
1125         key->pin_len = pin_len;
1126
1127         if (type == HCI_LK_CHANGED_COMBINATION)
1128                 key->type = old_key_type;
1129         else
1130                 key->type = type;
1131
1132         if (!new_key)
1133                 return 0;
1134
1135         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1136
1137         mgmt_new_link_key(hdev, key, persistent);
1138
1139         if (!persistent) {
1140                 list_del(&key->list);
1141                 kfree(key);
1142         }
1143
1144         return 0;
1145 }
1146
1147 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1148                         u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1149 {
1150         struct link_key *key, *old_key;
1151         struct key_master_id *id;
1152         u8 old_key_type;
1153
1154         BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1155
1156         old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1157         if (old_key) {
1158                 key = old_key;
1159                 old_key_type = old_key->type;
1160         } else {
1161                 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1162                 if (!key)
1163                         return -ENOMEM;
1164                 list_add(&key->list, &hdev->link_keys);
1165                 old_key_type = 0xff;
1166         }
1167
1168         key->dlen = sizeof(*id);
1169
1170         bacpy(&key->bdaddr, bdaddr);
1171         memcpy(key->val, ltk, sizeof(key->val));
1172         key->type = HCI_LK_SMP_LTK;
1173         key->pin_len = key_size;
1174
1175         id = (void *) &key->data;
1176         id->ediv = ediv;
1177         memcpy(id->rand, rand, sizeof(id->rand));
1178
1179         if (new_key)
1180                 mgmt_new_link_key(hdev, key, old_key_type);
1181
1182         return 0;
1183 }
1184
1185 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1186 {
1187         struct link_key *key;
1188
1189         key = hci_find_link_key(hdev, bdaddr);
1190         if (!key)
1191                 return -ENOENT;
1192
1193         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1194
1195         list_del(&key->list);
1196         kfree(key);
1197
1198         return 0;
1199 }
1200
1201 /* HCI command timer function */
1202 static void hci_cmd_timer(unsigned long arg)
1203 {
1204         struct hci_dev *hdev = (void *) arg;
1205
1206         BT_ERR("%s command tx timeout", hdev->name);
1207         atomic_set(&hdev->cmd_cnt, 1);
1208         queue_work(hdev->workqueue, &hdev->cmd_work);
1209 }
1210
1211 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1212                                                         bdaddr_t *bdaddr)
1213 {
1214         struct oob_data *data;
1215
1216         list_for_each_entry(data, &hdev->remote_oob_data, list)
1217                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1218                         return data;
1219
1220         return NULL;
1221 }
1222
1223 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1224 {
1225         struct oob_data *data;
1226
1227         data = hci_find_remote_oob_data(hdev, bdaddr);
1228         if (!data)
1229                 return -ENOENT;
1230
1231         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1232
1233         list_del(&data->list);
1234         kfree(data);
1235
1236         return 0;
1237 }
1238
1239 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1240 {
1241         struct oob_data *data, *n;
1242
1243         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1244                 list_del(&data->list);
1245                 kfree(data);
1246         }
1247
1248         return 0;
1249 }
1250
1251 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1252                                                                 u8 *randomizer)
1253 {
1254         struct oob_data *data;
1255
1256         data = hci_find_remote_oob_data(hdev, bdaddr);
1257
1258         if (!data) {
1259                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1260                 if (!data)
1261                         return -ENOMEM;
1262
1263                 bacpy(&data->bdaddr, bdaddr);
1264                 list_add(&data->list, &hdev->remote_oob_data);
1265         }
1266
1267         memcpy(data->hash, hash, sizeof(data->hash));
1268         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1269
1270         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1271
1272         return 0;
1273 }
1274
1275 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1276                                                 bdaddr_t *bdaddr)
1277 {
1278         struct bdaddr_list *b;
1279
1280         list_for_each_entry(b, &hdev->blacklist, list)
1281                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1282                         return b;
1283
1284         return NULL;
1285 }
1286
1287 int hci_blacklist_clear(struct hci_dev *hdev)
1288 {
1289         struct list_head *p, *n;
1290
1291         list_for_each_safe(p, n, &hdev->blacklist) {
1292                 struct bdaddr_list *b;
1293
1294                 b = list_entry(p, struct bdaddr_list, list);
1295
1296                 list_del(p);
1297                 kfree(b);
1298         }
1299
1300         return 0;
1301 }
1302
1303 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1304 {
1305         struct bdaddr_list *entry;
1306
1307         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1308                 return -EBADF;
1309
1310         if (hci_blacklist_lookup(hdev, bdaddr))
1311                 return -EEXIST;
1312
1313         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1314         if (!entry)
1315                 return -ENOMEM;
1316
1317         bacpy(&entry->bdaddr, bdaddr);
1318
1319         list_add(&entry->list, &hdev->blacklist);
1320
1321         return mgmt_device_blocked(hdev, bdaddr);
1322 }
1323
1324 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1325 {
1326         struct bdaddr_list *entry;
1327
1328         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1329                 return hci_blacklist_clear(hdev);
1330
1331         entry = hci_blacklist_lookup(hdev, bdaddr);
1332         if (!entry)
1333                 return -ENOENT;
1334
1335         list_del(&entry->list);
1336         kfree(entry);
1337
1338         return mgmt_device_unblocked(hdev, bdaddr);
1339 }
1340
1341 static void hci_clear_adv_cache(struct work_struct *work)
1342 {
1343         struct hci_dev *hdev = container_of(work, struct hci_dev,
1344                                                         adv_work.work);
1345
1346         hci_dev_lock(hdev);
1347
1348         hci_adv_entries_clear(hdev);
1349
1350         hci_dev_unlock(hdev);
1351 }
1352
1353 int hci_adv_entries_clear(struct hci_dev *hdev)
1354 {
1355         struct adv_entry *entry, *tmp;
1356
1357         list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1358                 list_del(&entry->list);
1359                 kfree(entry);
1360         }
1361
1362         BT_DBG("%s adv cache cleared", hdev->name);
1363
1364         return 0;
1365 }
1366
1367 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1368 {
1369         struct adv_entry *entry;
1370
1371         list_for_each_entry(entry, &hdev->adv_entries, list)
1372                 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1373                         return entry;
1374
1375         return NULL;
1376 }
1377
1378 static inline int is_connectable_adv(u8 evt_type)
1379 {
1380         if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1381                 return 1;
1382
1383         return 0;
1384 }
1385
1386 int hci_add_adv_entry(struct hci_dev *hdev,
1387                                         struct hci_ev_le_advertising_info *ev)
1388 {
1389         struct adv_entry *entry;
1390
1391         if (!is_connectable_adv(ev->evt_type))
1392                 return -EINVAL;
1393
1394         /* Only new entries should be added to adv_entries. So, if
1395          * bdaddr was found, don't add it. */
1396         if (hci_find_adv_entry(hdev, &ev->bdaddr))
1397                 return 0;
1398
1399         entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1400         if (!entry)
1401                 return -ENOMEM;
1402
1403         bacpy(&entry->bdaddr, &ev->bdaddr);
1404         entry->bdaddr_type = ev->bdaddr_type;
1405
1406         list_add(&entry->list, &hdev->adv_entries);
1407
1408         BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1409                                 batostr(&entry->bdaddr), entry->bdaddr_type);
1410
1411         return 0;
1412 }
1413
1414 /* Register HCI device */
1415 int hci_register_dev(struct hci_dev *hdev)
1416 {
1417         struct list_head *head = &hci_dev_list, *p;
1418         int i, id, error;
1419
1420         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1421                                                 hdev->bus, hdev->owner);
1422
1423         if (!hdev->open || !hdev->close || !hdev->destruct)
1424                 return -EINVAL;
1425
1426         /* Do not allow HCI_AMP devices to register at index 0,
1427          * so the index can be used as the AMP controller ID.
1428          */
1429         id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1430
1431         write_lock_bh(&hci_dev_list_lock);
1432
1433         /* Find first available device id */
1434         list_for_each(p, &hci_dev_list) {
1435                 if (list_entry(p, struct hci_dev, list)->id != id)
1436                         break;
1437                 head = p; id++;
1438         }
1439
1440         sprintf(hdev->name, "hci%d", id);
1441         hdev->id = id;
1442         list_add_tail(&hdev->list, head);
1443
1444         atomic_set(&hdev->refcnt, 1);
1445         mutex_init(&hdev->lock);
1446
1447         hdev->flags = 0;
1448         hdev->dev_flags = 0;
1449         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1450         hdev->esco_type = (ESCO_HV1);
1451         hdev->link_mode = (HCI_LM_ACCEPT);
1452         hdev->io_capability = 0x03; /* No Input No Output */
1453
1454         hdev->idle_timeout = 0;
1455         hdev->sniff_max_interval = 800;
1456         hdev->sniff_min_interval = 80;
1457
1458         INIT_WORK(&hdev->rx_work, hci_rx_work);
1459         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1460         INIT_WORK(&hdev->tx_work, hci_tx_work);
1461
1462
1463         skb_queue_head_init(&hdev->rx_q);
1464         skb_queue_head_init(&hdev->cmd_q);
1465         skb_queue_head_init(&hdev->raw_q);
1466
1467         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1468
1469         for (i = 0; i < NUM_REASSEMBLY; i++)
1470                 hdev->reassembly[i] = NULL;
1471
1472         init_waitqueue_head(&hdev->req_wait_q);
1473         mutex_init(&hdev->req_lock);
1474
1475         inquiry_cache_init(hdev);
1476
1477         hci_conn_hash_init(hdev);
1478
1479         INIT_LIST_HEAD(&hdev->mgmt_pending);
1480
1481         INIT_LIST_HEAD(&hdev->blacklist);
1482
1483         INIT_LIST_HEAD(&hdev->uuids);
1484
1485         INIT_LIST_HEAD(&hdev->link_keys);
1486
1487         INIT_LIST_HEAD(&hdev->remote_oob_data);
1488
1489         INIT_LIST_HEAD(&hdev->adv_entries);
1490
1491         INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1492         INIT_WORK(&hdev->power_on, hci_power_on);
1493         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1494
1495         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1496
1497         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1498
1499         atomic_set(&hdev->promisc, 0);
1500
1501         write_unlock_bh(&hci_dev_list_lock);
1502
1503         hdev->workqueue = create_singlethread_workqueue(hdev->name);
1504         if (!hdev->workqueue) {
1505                 error = -ENOMEM;
1506                 goto err;
1507         }
1508
1509         error = hci_add_sysfs(hdev);
1510         if (error < 0)
1511                 goto err_wqueue;
1512
1513         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1514                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1515         if (hdev->rfkill) {
1516                 if (rfkill_register(hdev->rfkill) < 0) {
1517                         rfkill_destroy(hdev->rfkill);
1518                         hdev->rfkill = NULL;
1519                 }
1520         }
1521
1522         set_bit(HCI_AUTO_OFF, &hdev->flags);
1523         set_bit(HCI_SETUP, &hdev->flags);
1524         queue_work(hdev->workqueue, &hdev->power_on);
1525
1526         hci_notify(hdev, HCI_DEV_REG);
1527
1528         return id;
1529
1530 err_wqueue:
1531         destroy_workqueue(hdev->workqueue);
1532 err:
1533         write_lock_bh(&hci_dev_list_lock);
1534         list_del(&hdev->list);
1535         write_unlock_bh(&hci_dev_list_lock);
1536
1537         return error;
1538 }
1539 EXPORT_SYMBOL(hci_register_dev);
1540
1541 /* Unregister HCI device */
1542 void hci_unregister_dev(struct hci_dev *hdev)
1543 {
1544         int i;
1545
1546         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1547
1548         write_lock_bh(&hci_dev_list_lock);
1549         list_del(&hdev->list);
1550         write_unlock_bh(&hci_dev_list_lock);
1551
1552         hci_dev_do_close(hdev);
1553
1554         for (i = 0; i < NUM_REASSEMBLY; i++)
1555                 kfree_skb(hdev->reassembly[i]);
1556
1557         if (!test_bit(HCI_INIT, &hdev->flags) &&
1558                                         !test_bit(HCI_SETUP, &hdev->flags)) {
1559                 hci_dev_lock(hdev);
1560                 mgmt_index_removed(hdev);
1561                 hci_dev_unlock(hdev);
1562         }
1563
1564         /* mgmt_index_removed should take care of emptying the
1565          * pending list */
1566         BUG_ON(!list_empty(&hdev->mgmt_pending));
1567
1568         hci_notify(hdev, HCI_DEV_UNREG);
1569
1570         if (hdev->rfkill) {
1571                 rfkill_unregister(hdev->rfkill);
1572                 rfkill_destroy(hdev->rfkill);
1573         }
1574
1575         hci_del_sysfs(hdev);
1576
1577         cancel_delayed_work_sync(&hdev->adv_work);
1578
1579         destroy_workqueue(hdev->workqueue);
1580
1581         hci_dev_lock(hdev);
1582         hci_blacklist_clear(hdev);
1583         hci_uuids_clear(hdev);
1584         hci_link_keys_clear(hdev);
1585         hci_remote_oob_data_clear(hdev);
1586         hci_adv_entries_clear(hdev);
1587         hci_dev_unlock(hdev);
1588
1589         __hci_dev_put(hdev);
1590 }
1591 EXPORT_SYMBOL(hci_unregister_dev);
1592
1593 /* Suspend HCI device */
1594 int hci_suspend_dev(struct hci_dev *hdev)
1595 {
1596         hci_notify(hdev, HCI_DEV_SUSPEND);
1597         return 0;
1598 }
1599 EXPORT_SYMBOL(hci_suspend_dev);
1600
1601 /* Resume HCI device */
1602 int hci_resume_dev(struct hci_dev *hdev)
1603 {
1604         hci_notify(hdev, HCI_DEV_RESUME);
1605         return 0;
1606 }
1607 EXPORT_SYMBOL(hci_resume_dev);
1608
1609 /* Receive frame from HCI drivers */
1610 int hci_recv_frame(struct sk_buff *skb)
1611 {
1612         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1613         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1614                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1615                 kfree_skb(skb);
1616                 return -ENXIO;
1617         }
1618
1619         /* Incomming skb */
1620         bt_cb(skb)->incoming = 1;
1621
1622         /* Time stamp */
1623         __net_timestamp(skb);
1624
1625         skb_queue_tail(&hdev->rx_q, skb);
1626         queue_work(hdev->workqueue, &hdev->rx_work);
1627
1628         return 0;
1629 }
1630 EXPORT_SYMBOL(hci_recv_frame);
1631
1632 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1633                                                   int count, __u8 index)
1634 {
1635         int len = 0;
1636         int hlen = 0;
1637         int remain = count;
1638         struct sk_buff *skb;
1639         struct bt_skb_cb *scb;
1640
1641         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1642                                 index >= NUM_REASSEMBLY)
1643                 return -EILSEQ;
1644
1645         skb = hdev->reassembly[index];
1646
1647         if (!skb) {
1648                 switch (type) {
1649                 case HCI_ACLDATA_PKT:
1650                         len = HCI_MAX_FRAME_SIZE;
1651                         hlen = HCI_ACL_HDR_SIZE;
1652                         break;
1653                 case HCI_EVENT_PKT:
1654                         len = HCI_MAX_EVENT_SIZE;
1655                         hlen = HCI_EVENT_HDR_SIZE;
1656                         break;
1657                 case HCI_SCODATA_PKT:
1658                         len = HCI_MAX_SCO_SIZE;
1659                         hlen = HCI_SCO_HDR_SIZE;
1660                         break;
1661                 }
1662
1663                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1664                 if (!skb)
1665                         return -ENOMEM;
1666
1667                 scb = (void *) skb->cb;
1668                 scb->expect = hlen;
1669                 scb->pkt_type = type;
1670
1671                 skb->dev = (void *) hdev;
1672                 hdev->reassembly[index] = skb;
1673         }
1674
1675         while (count) {
1676                 scb = (void *) skb->cb;
1677                 len = min(scb->expect, (__u16)count);
1678
1679                 memcpy(skb_put(skb, len), data, len);
1680
1681                 count -= len;
1682                 data += len;
1683                 scb->expect -= len;
1684                 remain = count;
1685
1686                 switch (type) {
1687                 case HCI_EVENT_PKT:
1688                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1689                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1690                                 scb->expect = h->plen;
1691
1692                                 if (skb_tailroom(skb) < scb->expect) {
1693                                         kfree_skb(skb);
1694                                         hdev->reassembly[index] = NULL;
1695                                         return -ENOMEM;
1696                                 }
1697                         }
1698                         break;
1699
1700                 case HCI_ACLDATA_PKT:
1701                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1702                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1703                                 scb->expect = __le16_to_cpu(h->dlen);
1704
1705                                 if (skb_tailroom(skb) < scb->expect) {
1706                                         kfree_skb(skb);
1707                                         hdev->reassembly[index] = NULL;
1708                                         return -ENOMEM;
1709                                 }
1710                         }
1711                         break;
1712
1713                 case HCI_SCODATA_PKT:
1714                         if (skb->len == HCI_SCO_HDR_SIZE) {
1715                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1716                                 scb->expect = h->dlen;
1717
1718                                 if (skb_tailroom(skb) < scb->expect) {
1719                                         kfree_skb(skb);
1720                                         hdev->reassembly[index] = NULL;
1721                                         return -ENOMEM;
1722                                 }
1723                         }
1724                         break;
1725                 }
1726
1727                 if (scb->expect == 0) {
1728                         /* Complete frame */
1729
1730                         bt_cb(skb)->pkt_type = type;
1731                         hci_recv_frame(skb);
1732
1733                         hdev->reassembly[index] = NULL;
1734                         return remain;
1735                 }
1736         }
1737
1738         return remain;
1739 }
1740
1741 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1742 {
1743         int rem = 0;
1744
1745         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1746                 return -EILSEQ;
1747
1748         while (count) {
1749                 rem = hci_reassembly(hdev, type, data, count, type - 1);
1750                 if (rem < 0)
1751                         return rem;
1752
1753                 data += (count - rem);
1754                 count = rem;
1755         }
1756
1757         return rem;
1758 }
1759 EXPORT_SYMBOL(hci_recv_fragment);
1760
1761 #define STREAM_REASSEMBLY 0
1762
1763 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1764 {
1765         int type;
1766         int rem = 0;
1767
1768         while (count) {
1769                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1770
1771                 if (!skb) {
1772                         struct { char type; } *pkt;
1773
1774                         /* Start of the frame */
1775                         pkt = data;
1776                         type = pkt->type;
1777
1778                         data++;
1779                         count--;
1780                 } else
1781                         type = bt_cb(skb)->pkt_type;
1782
1783                 rem = hci_reassembly(hdev, type, data, count,
1784                                                         STREAM_REASSEMBLY);
1785                 if (rem < 0)
1786                         return rem;
1787
1788                 data += (count - rem);
1789                 count = rem;
1790         }
1791
1792         return rem;
1793 }
1794 EXPORT_SYMBOL(hci_recv_stream_fragment);
1795
1796 /* ---- Interface to upper protocols ---- */
1797
1798 /* Register/Unregister protocols.
1799  * hci_task_lock is used to ensure that no tasks are running. */
1800 int hci_register_proto(struct hci_proto *hp)
1801 {
1802         int err = 0;
1803
1804         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1805
1806         if (hp->id >= HCI_MAX_PROTO)
1807                 return -EINVAL;
1808
1809         mutex_lock(&hci_task_lock);
1810
1811         if (!hci_proto[hp->id])
1812                 hci_proto[hp->id] = hp;
1813         else
1814                 err = -EEXIST;
1815
1816         mutex_unlock(&hci_task_lock);
1817
1818         return err;
1819 }
1820 EXPORT_SYMBOL(hci_register_proto);
1821
1822 int hci_unregister_proto(struct hci_proto *hp)
1823 {
1824         int err = 0;
1825
1826         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1827
1828         if (hp->id >= HCI_MAX_PROTO)
1829                 return -EINVAL;
1830
1831         mutex_lock(&hci_task_lock);
1832
1833         if (hci_proto[hp->id])
1834                 hci_proto[hp->id] = NULL;
1835         else
1836                 err = -ENOENT;
1837
1838         mutex_unlock(&hci_task_lock);
1839
1840         return err;
1841 }
1842 EXPORT_SYMBOL(hci_unregister_proto);
1843
1844 int hci_register_cb(struct hci_cb *cb)
1845 {
1846         BT_DBG("%p name %s", cb, cb->name);
1847
1848         write_lock_bh(&hci_cb_list_lock);
1849         list_add(&cb->list, &hci_cb_list);
1850         write_unlock_bh(&hci_cb_list_lock);
1851
1852         return 0;
1853 }
1854 EXPORT_SYMBOL(hci_register_cb);
1855
1856 int hci_unregister_cb(struct hci_cb *cb)
1857 {
1858         BT_DBG("%p name %s", cb, cb->name);
1859
1860         write_lock_bh(&hci_cb_list_lock);
1861         list_del(&cb->list);
1862         write_unlock_bh(&hci_cb_list_lock);
1863
1864         return 0;
1865 }
1866 EXPORT_SYMBOL(hci_unregister_cb);
1867
1868 static int hci_send_frame(struct sk_buff *skb)
1869 {
1870         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1871
1872         if (!hdev) {
1873                 kfree_skb(skb);
1874                 return -ENODEV;
1875         }
1876
1877         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1878
1879         if (atomic_read(&hdev->promisc)) {
1880                 /* Time stamp */
1881                 __net_timestamp(skb);
1882
1883                 hci_send_to_sock(hdev, skb, NULL);
1884         }
1885
1886         /* Get rid of skb owner, prior to sending to the driver. */
1887         skb_orphan(skb);
1888
1889         return hdev->send(skb);
1890 }
1891
1892 /* Send HCI command */
1893 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1894 {
1895         int len = HCI_COMMAND_HDR_SIZE + plen;
1896         struct hci_command_hdr *hdr;
1897         struct sk_buff *skb;
1898
1899         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1900
1901         skb = bt_skb_alloc(len, GFP_ATOMIC);
1902         if (!skb) {
1903                 BT_ERR("%s no memory for command", hdev->name);
1904                 return -ENOMEM;
1905         }
1906
1907         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1908         hdr->opcode = cpu_to_le16(opcode);
1909         hdr->plen   = plen;
1910
1911         if (plen)
1912                 memcpy(skb_put(skb, plen), param, plen);
1913
1914         BT_DBG("skb len %d", skb->len);
1915
1916         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1917         skb->dev = (void *) hdev;
1918
1919         if (test_bit(HCI_INIT, &hdev->flags))
1920                 hdev->init_last_cmd = opcode;
1921
1922         skb_queue_tail(&hdev->cmd_q, skb);
1923         queue_work(hdev->workqueue, &hdev->cmd_work);
1924
1925         return 0;
1926 }
1927
1928 /* Get data from the previously sent command */
1929 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1930 {
1931         struct hci_command_hdr *hdr;
1932
1933         if (!hdev->sent_cmd)
1934                 return NULL;
1935
1936         hdr = (void *) hdev->sent_cmd->data;
1937
1938         if (hdr->opcode != cpu_to_le16(opcode))
1939                 return NULL;
1940
1941         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1942
1943         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1944 }
1945
1946 /* Send ACL data */
1947 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1948 {
1949         struct hci_acl_hdr *hdr;
1950         int len = skb->len;
1951
1952         skb_push(skb, HCI_ACL_HDR_SIZE);
1953         skb_reset_transport_header(skb);
1954         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1955         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1956         hdr->dlen   = cpu_to_le16(len);
1957 }
1958
1959 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1960                                 struct sk_buff *skb, __u16 flags)
1961 {
1962         struct hci_dev *hdev = conn->hdev;
1963         struct sk_buff *list;
1964
1965         list = skb_shinfo(skb)->frag_list;
1966         if (!list) {
1967                 /* Non fragmented */
1968                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1969
1970                 skb_queue_tail(queue, skb);
1971         } else {
1972                 /* Fragmented */
1973                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1974
1975                 skb_shinfo(skb)->frag_list = NULL;
1976
1977                 /* Queue all fragments atomically */
1978                 spin_lock_bh(&queue->lock);
1979
1980                 __skb_queue_tail(queue, skb);
1981
1982                 flags &= ~ACL_START;
1983                 flags |= ACL_CONT;
1984                 do {
1985                         skb = list; list = list->next;
1986
1987                         skb->dev = (void *) hdev;
1988                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1989                         hci_add_acl_hdr(skb, conn->handle, flags);
1990
1991                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1992
1993                         __skb_queue_tail(queue, skb);
1994                 } while (list);
1995
1996                 spin_unlock_bh(&queue->lock);
1997         }
1998 }
1999
2000 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2001 {
2002         struct hci_conn *conn = chan->conn;
2003         struct hci_dev *hdev = conn->hdev;
2004
2005         BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2006
2007         skb->dev = (void *) hdev;
2008         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2009         hci_add_acl_hdr(skb, conn->handle, flags);
2010
2011         hci_queue_acl(conn, &chan->data_q, skb, flags);
2012
2013         queue_work(hdev->workqueue, &hdev->tx_work);
2014 }
2015 EXPORT_SYMBOL(hci_send_acl);
2016
2017 /* Send SCO data */
2018 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2019 {
2020         struct hci_dev *hdev = conn->hdev;
2021         struct hci_sco_hdr hdr;
2022
2023         BT_DBG("%s len %d", hdev->name, skb->len);
2024
2025         hdr.handle = cpu_to_le16(conn->handle);
2026         hdr.dlen   = skb->len;
2027
2028         skb_push(skb, HCI_SCO_HDR_SIZE);
2029         skb_reset_transport_header(skb);
2030         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2031
2032         skb->dev = (void *) hdev;
2033         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2034
2035         skb_queue_tail(&conn->data_q, skb);
2036         queue_work(hdev->workqueue, &hdev->tx_work);
2037 }
2038 EXPORT_SYMBOL(hci_send_sco);
2039
2040 /* ---- HCI TX task (outgoing data) ---- */
2041
2042 /* HCI Connection scheduler */
2043 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2044 {
2045         struct hci_conn_hash *h = &hdev->conn_hash;
2046         struct hci_conn *conn = NULL, *c;
2047         int num = 0, min = ~0;
2048
2049         /* We don't have to lock device here. Connections are always
2050          * added and removed with TX task disabled. */
2051
2052         rcu_read_lock();
2053
2054         list_for_each_entry_rcu(c, &h->list, list) {
2055                 if (c->type != type || skb_queue_empty(&c->data_q))
2056                         continue;
2057
2058                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2059                         continue;
2060
2061                 num++;
2062
2063                 if (c->sent < min) {
2064                         min  = c->sent;
2065                         conn = c;
2066                 }
2067
2068                 if (hci_conn_num(hdev, type) == num)
2069                         break;
2070         }
2071
2072         rcu_read_unlock();
2073
2074         if (conn) {
2075                 int cnt, q;
2076
2077                 switch (conn->type) {
2078                 case ACL_LINK:
2079                         cnt = hdev->acl_cnt;
2080                         break;
2081                 case SCO_LINK:
2082                 case ESCO_LINK:
2083                         cnt = hdev->sco_cnt;
2084                         break;
2085                 case LE_LINK:
2086                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2087                         break;
2088                 default:
2089                         cnt = 0;
2090                         BT_ERR("Unknown link type");
2091                 }
2092
2093                 q = cnt / num;
2094                 *quote = q ? q : 1;
2095         } else
2096                 *quote = 0;
2097
2098         BT_DBG("conn %p quote %d", conn, *quote);
2099         return conn;
2100 }
2101
2102 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2103 {
2104         struct hci_conn_hash *h = &hdev->conn_hash;
2105         struct hci_conn *c;
2106
2107         BT_ERR("%s link tx timeout", hdev->name);
2108
2109         rcu_read_lock();
2110
2111         /* Kill stalled connections */
2112         list_for_each_entry_rcu(c, &h->list, list) {
2113                 if (c->type == type && c->sent) {
2114                         BT_ERR("%s killing stalled connection %s",
2115                                 hdev->name, batostr(&c->dst));
2116                         hci_acl_disconn(c, 0x13);
2117                 }
2118         }
2119
2120         rcu_read_unlock();
2121 }
2122
2123 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2124                                                 int *quote)
2125 {
2126         struct hci_conn_hash *h = &hdev->conn_hash;
2127         struct hci_chan *chan = NULL;
2128         int num = 0, min = ~0, cur_prio = 0;
2129         struct hci_conn *conn;
2130         int cnt, q, conn_num = 0;
2131
2132         BT_DBG("%s", hdev->name);
2133
2134         rcu_read_lock();
2135
2136         list_for_each_entry_rcu(conn, &h->list, list) {
2137                 struct hci_chan *tmp;
2138
2139                 if (conn->type != type)
2140                         continue;
2141
2142                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2143                         continue;
2144
2145                 conn_num++;
2146
2147                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2148                         struct sk_buff *skb;
2149
2150                         if (skb_queue_empty(&tmp->data_q))
2151                                 continue;
2152
2153                         skb = skb_peek(&tmp->data_q);
2154                         if (skb->priority < cur_prio)
2155                                 continue;
2156
2157                         if (skb->priority > cur_prio) {
2158                                 num = 0;
2159                                 min = ~0;
2160                                 cur_prio = skb->priority;
2161                         }
2162
2163                         num++;
2164
2165                         if (conn->sent < min) {
2166                                 min  = conn->sent;
2167                                 chan = tmp;
2168                         }
2169                 }
2170
2171                 if (hci_conn_num(hdev, type) == conn_num)
2172                         break;
2173         }
2174
2175         rcu_read_unlock();
2176
2177         if (!chan)
2178                 return NULL;
2179
2180         switch (chan->conn->type) {
2181         case ACL_LINK:
2182                 cnt = hdev->acl_cnt;
2183                 break;
2184         case SCO_LINK:
2185         case ESCO_LINK:
2186                 cnt = hdev->sco_cnt;
2187                 break;
2188         case LE_LINK:
2189                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2190                 break;
2191         default:
2192                 cnt = 0;
2193                 BT_ERR("Unknown link type");
2194         }
2195
2196         q = cnt / num;
2197         *quote = q ? q : 1;
2198         BT_DBG("chan %p quote %d", chan, *quote);
2199         return chan;
2200 }
2201
2202 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2203 {
2204         struct hci_conn_hash *h = &hdev->conn_hash;
2205         struct hci_conn *conn;
2206         int num = 0;
2207
2208         BT_DBG("%s", hdev->name);
2209
2210         rcu_read_lock();
2211
2212         list_for_each_entry_rcu(conn, &h->list, list) {
2213                 struct hci_chan *chan;
2214
2215                 if (conn->type != type)
2216                         continue;
2217
2218                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2219                         continue;
2220
2221                 num++;
2222
2223                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2224                         struct sk_buff *skb;
2225
2226                         if (chan->sent) {
2227                                 chan->sent = 0;
2228                                 continue;
2229                         }
2230
2231                         if (skb_queue_empty(&chan->data_q))
2232                                 continue;
2233
2234                         skb = skb_peek(&chan->data_q);
2235                         if (skb->priority >= HCI_PRIO_MAX - 1)
2236                                 continue;
2237
2238                         skb->priority = HCI_PRIO_MAX - 1;
2239
2240                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2241                                                                 skb->priority);
2242                 }
2243
2244                 if (hci_conn_num(hdev, type) == num)
2245                         break;
2246         }
2247
2248         rcu_read_unlock();
2249
2250 }
2251
2252 static inline void hci_sched_acl(struct hci_dev *hdev)
2253 {
2254         struct hci_chan *chan;
2255         struct sk_buff *skb;
2256         int quote;
2257         unsigned int cnt;
2258
2259         BT_DBG("%s", hdev->name);
2260
2261         if (!hci_conn_num(hdev, ACL_LINK))
2262                 return;
2263
2264         if (!test_bit(HCI_RAW, &hdev->flags)) {
2265                 /* ACL tx timeout must be longer than maximum
2266                  * link supervision timeout (40.9 seconds) */
2267                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2268                         hci_link_tx_to(hdev, ACL_LINK);
2269         }
2270
2271         cnt = hdev->acl_cnt;
2272
2273         while (hdev->acl_cnt &&
2274                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2275                 u32 priority = (skb_peek(&chan->data_q))->priority;
2276                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2277                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2278                                         skb->len, skb->priority);
2279
2280                         /* Stop if priority has changed */
2281                         if (skb->priority < priority)
2282                                 break;
2283
2284                         skb = skb_dequeue(&chan->data_q);
2285
2286                         hci_conn_enter_active_mode(chan->conn,
2287                                                 bt_cb(skb)->force_active);
2288
2289                         hci_send_frame(skb);
2290                         hdev->acl_last_tx = jiffies;
2291
2292                         hdev->acl_cnt--;
2293                         chan->sent++;
2294                         chan->conn->sent++;
2295                 }
2296         }
2297
2298         if (cnt != hdev->acl_cnt)
2299                 hci_prio_recalculate(hdev, ACL_LINK);
2300 }
2301
2302 /* Schedule SCO */
2303 static inline void hci_sched_sco(struct hci_dev *hdev)
2304 {
2305         struct hci_conn *conn;
2306         struct sk_buff *skb;
2307         int quote;
2308
2309         BT_DBG("%s", hdev->name);
2310
2311         if (!hci_conn_num(hdev, SCO_LINK))
2312                 return;
2313
2314         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2315                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2316                         BT_DBG("skb %p len %d", skb, skb->len);
2317                         hci_send_frame(skb);
2318
2319                         conn->sent++;
2320                         if (conn->sent == ~0)
2321                                 conn->sent = 0;
2322                 }
2323         }
2324 }
2325
2326 static inline void hci_sched_esco(struct hci_dev *hdev)
2327 {
2328         struct hci_conn *conn;
2329         struct sk_buff *skb;
2330         int quote;
2331
2332         BT_DBG("%s", hdev->name);
2333
2334         if (!hci_conn_num(hdev, ESCO_LINK))
2335                 return;
2336
2337         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2338                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2339                         BT_DBG("skb %p len %d", skb, skb->len);
2340                         hci_send_frame(skb);
2341
2342                         conn->sent++;
2343                         if (conn->sent == ~0)
2344                                 conn->sent = 0;
2345                 }
2346         }
2347 }
2348
2349 static inline void hci_sched_le(struct hci_dev *hdev)
2350 {
2351         struct hci_chan *chan;
2352         struct sk_buff *skb;
2353         int quote, cnt, tmp;
2354
2355         BT_DBG("%s", hdev->name);
2356
2357         if (!hci_conn_num(hdev, LE_LINK))
2358                 return;
2359
2360         if (!test_bit(HCI_RAW, &hdev->flags)) {
2361                 /* LE tx timeout must be longer than maximum
2362                  * link supervision timeout (40.9 seconds) */
2363                 if (!hdev->le_cnt && hdev->le_pkts &&
2364                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2365                         hci_link_tx_to(hdev, LE_LINK);
2366         }
2367
2368         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2369         tmp = cnt;
2370         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2371                 u32 priority = (skb_peek(&chan->data_q))->priority;
2372                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2373                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2374                                         skb->len, skb->priority);
2375
2376                         /* Stop if priority has changed */
2377                         if (skb->priority < priority)
2378                                 break;
2379
2380                         skb = skb_dequeue(&chan->data_q);
2381
2382                         hci_send_frame(skb);
2383                         hdev->le_last_tx = jiffies;
2384
2385                         cnt--;
2386                         chan->sent++;
2387                         chan->conn->sent++;
2388                 }
2389         }
2390
2391         if (hdev->le_pkts)
2392                 hdev->le_cnt = cnt;
2393         else
2394                 hdev->acl_cnt = cnt;
2395
2396         if (cnt != tmp)
2397                 hci_prio_recalculate(hdev, LE_LINK);
2398 }
2399
2400 static void hci_tx_work(struct work_struct *work)
2401 {
2402         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2403         struct sk_buff *skb;
2404
2405         mutex_lock(&hci_task_lock);
2406
2407         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2408                 hdev->sco_cnt, hdev->le_cnt);
2409
2410         /* Schedule queues and send stuff to HCI driver */
2411
2412         hci_sched_acl(hdev);
2413
2414         hci_sched_sco(hdev);
2415
2416         hci_sched_esco(hdev);
2417
2418         hci_sched_le(hdev);
2419
2420         /* Send next queued raw (unknown type) packet */
2421         while ((skb = skb_dequeue(&hdev->raw_q)))
2422                 hci_send_frame(skb);
2423
2424         mutex_unlock(&hci_task_lock);
2425 }
2426
2427 /* ----- HCI RX task (incoming data processing) ----- */
2428
2429 /* ACL data packet */
2430 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2431 {
2432         struct hci_acl_hdr *hdr = (void *) skb->data;
2433         struct hci_conn *conn;
2434         __u16 handle, flags;
2435
2436         skb_pull(skb, HCI_ACL_HDR_SIZE);
2437
2438         handle = __le16_to_cpu(hdr->handle);
2439         flags  = hci_flags(handle);
2440         handle = hci_handle(handle);
2441
2442         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2443
2444         hdev->stat.acl_rx++;
2445
2446         hci_dev_lock(hdev);
2447         conn = hci_conn_hash_lookup_handle(hdev, handle);
2448         hci_dev_unlock(hdev);
2449
2450         if (conn) {
2451                 register struct hci_proto *hp;
2452
2453                 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2454
2455                 /* Send to upper protocol */
2456                 hp = hci_proto[HCI_PROTO_L2CAP];
2457                 if (hp && hp->recv_acldata) {
2458                         hp->recv_acldata(conn, skb, flags);
2459                         return;
2460                 }
2461         } else {
2462                 BT_ERR("%s ACL packet for unknown connection handle %d",
2463                         hdev->name, handle);
2464         }
2465
2466         kfree_skb(skb);
2467 }
2468
2469 /* SCO data packet */
2470 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2471 {
2472         struct hci_sco_hdr *hdr = (void *) skb->data;
2473         struct hci_conn *conn;
2474         __u16 handle;
2475
2476         skb_pull(skb, HCI_SCO_HDR_SIZE);
2477
2478         handle = __le16_to_cpu(hdr->handle);
2479
2480         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2481
2482         hdev->stat.sco_rx++;
2483
2484         hci_dev_lock(hdev);
2485         conn = hci_conn_hash_lookup_handle(hdev, handle);
2486         hci_dev_unlock(hdev);
2487
2488         if (conn) {
2489                 register struct hci_proto *hp;
2490
2491                 /* Send to upper protocol */
2492                 hp = hci_proto[HCI_PROTO_SCO];
2493                 if (hp && hp->recv_scodata) {
2494                         hp->recv_scodata(conn, skb);
2495                         return;
2496                 }
2497         } else {
2498                 BT_ERR("%s SCO packet for unknown connection handle %d",
2499                         hdev->name, handle);
2500         }
2501
2502         kfree_skb(skb);
2503 }
2504
2505 static void hci_rx_work(struct work_struct *work)
2506 {
2507         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2508         struct sk_buff *skb;
2509
2510         BT_DBG("%s", hdev->name);
2511
2512         mutex_lock(&hci_task_lock);
2513
2514         while ((skb = skb_dequeue(&hdev->rx_q))) {
2515                 if (atomic_read(&hdev->promisc)) {
2516                         /* Send copy to the sockets */
2517                         hci_send_to_sock(hdev, skb, NULL);
2518                 }
2519
2520                 if (test_bit(HCI_RAW, &hdev->flags)) {
2521                         kfree_skb(skb);
2522                         continue;
2523                 }
2524
2525                 if (test_bit(HCI_INIT, &hdev->flags)) {
2526                         /* Don't process data packets in this states. */
2527                         switch (bt_cb(skb)->pkt_type) {
2528                         case HCI_ACLDATA_PKT:
2529                         case HCI_SCODATA_PKT:
2530                                 kfree_skb(skb);
2531                                 continue;
2532                         }
2533                 }
2534
2535                 /* Process frame */
2536                 switch (bt_cb(skb)->pkt_type) {
2537                 case HCI_EVENT_PKT:
2538                         BT_DBG("%s Event packet", hdev->name);
2539                         hci_event_packet(hdev, skb);
2540                         break;
2541
2542                 case HCI_ACLDATA_PKT:
2543                         BT_DBG("%s ACL data packet", hdev->name);
2544                         hci_acldata_packet(hdev, skb);
2545                         break;
2546
2547                 case HCI_SCODATA_PKT:
2548                         BT_DBG("%s SCO data packet", hdev->name);
2549                         hci_scodata_packet(hdev, skb);
2550                         break;
2551
2552                 default:
2553                         kfree_skb(skb);
2554                         break;
2555                 }
2556         }
2557
2558         mutex_unlock(&hci_task_lock);
2559 }
2560
2561 static void hci_cmd_work(struct work_struct *work)
2562 {
2563         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2564         struct sk_buff *skb;
2565
2566         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2567
2568         /* Send queued commands */
2569         if (atomic_read(&hdev->cmd_cnt)) {
2570                 skb = skb_dequeue(&hdev->cmd_q);
2571                 if (!skb)
2572                         return;
2573
2574                 kfree_skb(hdev->sent_cmd);
2575
2576                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2577                 if (hdev->sent_cmd) {
2578                         atomic_dec(&hdev->cmd_cnt);
2579                         hci_send_frame(skb);
2580                         if (test_bit(HCI_RESET, &hdev->flags))
2581                                 del_timer(&hdev->cmd_timer);
2582                         else
2583                                 mod_timer(&hdev->cmd_timer,
2584                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2585                 } else {
2586                         skb_queue_head(&hdev->cmd_q, skb);
2587                         queue_work(hdev->workqueue, &hdev->cmd_work);
2588                 }
2589         }
2590 }
2591
2592 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2593 {
2594         /* General inquiry access code (GIAC) */
2595         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2596         struct hci_cp_inquiry cp;
2597
2598         BT_DBG("%s", hdev->name);
2599
2600         if (test_bit(HCI_INQUIRY, &hdev->flags))
2601                 return -EINPROGRESS;
2602
2603         memset(&cp, 0, sizeof(cp));
2604         memcpy(&cp.lap, lap, sizeof(cp.lap));
2605         cp.length  = length;
2606
2607         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2608 }
2609
2610 int hci_cancel_inquiry(struct hci_dev *hdev)
2611 {
2612         BT_DBG("%s", hdev->name);
2613
2614         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2615                 return -EPERM;
2616
2617         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2618 }
2619
2620 module_param(enable_hs, bool, 0644);
2621 MODULE_PARM_DESC(enable_hs, "Enable High Speed");