Bluetooth: replace list_for_each with list_for_each_entry whenever possible
[linux-flexiantxendom0-3.2.10.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54
55 #define AUTO_OFF_TIMEOUT 2000
56
57 static void hci_cmd_task(unsigned long arg);
58 static void hci_rx_task(unsigned long arg);
59 static void hci_tx_task(unsigned long arg);
60
61 static DEFINE_RWLOCK(hci_task_lock);
62
63 /* HCI device list */
64 LIST_HEAD(hci_dev_list);
65 DEFINE_RWLOCK(hci_dev_list_lock);
66
67 /* HCI callback list */
68 LIST_HEAD(hci_cb_list);
69 DEFINE_RWLOCK(hci_cb_list_lock);
70
71 /* HCI protocols */
72 #define HCI_MAX_PROTO   2
73 struct hci_proto *hci_proto[HCI_MAX_PROTO];
74
75 /* HCI notifiers list */
76 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
77
78 /* ---- HCI notifications ---- */
79
80 int hci_register_notifier(struct notifier_block *nb)
81 {
82         return atomic_notifier_chain_register(&hci_notifier, nb);
83 }
84
85 int hci_unregister_notifier(struct notifier_block *nb)
86 {
87         return atomic_notifier_chain_unregister(&hci_notifier, nb);
88 }
89
90 static void hci_notify(struct hci_dev *hdev, int event)
91 {
92         atomic_notifier_call_chain(&hci_notifier, event, hdev);
93 }
94
95 /* ---- HCI requests ---- */
96
97 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
98 {
99         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
100
101         /* If this is the init phase check if the completed command matches
102          * the last init command, and if not just return.
103          */
104         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
105                 return;
106
107         if (hdev->req_status == HCI_REQ_PEND) {
108                 hdev->req_result = result;
109                 hdev->req_status = HCI_REQ_DONE;
110                 wake_up_interruptible(&hdev->req_wait_q);
111         }
112 }
113
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
115 {
116         BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118         if (hdev->req_status == HCI_REQ_PEND) {
119                 hdev->req_result = err;
120                 hdev->req_status = HCI_REQ_CANCELED;
121                 wake_up_interruptible(&hdev->req_wait_q);
122         }
123 }
124
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127                                         unsigned long opt, __u32 timeout)
128 {
129         DECLARE_WAITQUEUE(wait, current);
130         int err = 0;
131
132         BT_DBG("%s start", hdev->name);
133
134         hdev->req_status = HCI_REQ_PEND;
135
136         add_wait_queue(&hdev->req_wait_q, &wait);
137         set_current_state(TASK_INTERRUPTIBLE);
138
139         req(hdev, opt);
140         schedule_timeout(timeout);
141
142         remove_wait_queue(&hdev->req_wait_q, &wait);
143
144         if (signal_pending(current))
145                 return -EINTR;
146
147         switch (hdev->req_status) {
148         case HCI_REQ_DONE:
149                 err = -bt_to_errno(hdev->req_result);
150                 break;
151
152         case HCI_REQ_CANCELED:
153                 err = -hdev->req_result;
154                 break;
155
156         default:
157                 err = -ETIMEDOUT;
158                 break;
159         }
160
161         hdev->req_status = hdev->req_result = 0;
162
163         BT_DBG("%s end: err %d", hdev->name, err);
164
165         return err;
166 }
167
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169                                         unsigned long opt, __u32 timeout)
170 {
171         int ret;
172
173         if (!test_bit(HCI_UP, &hdev->flags))
174                 return -ENETDOWN;
175
176         /* Serialize all requests */
177         hci_req_lock(hdev);
178         ret = __hci_request(hdev, req, opt, timeout);
179         hci_req_unlock(hdev);
180
181         return ret;
182 }
183
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185 {
186         BT_DBG("%s %ld", hdev->name, opt);
187
188         /* Reset device */
189         set_bit(HCI_RESET, &hdev->flags);
190         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
191 }
192
193 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
194 {
195         struct hci_cp_delete_stored_link_key cp;
196         struct sk_buff *skb;
197         __le16 param;
198         __u8 flt_type;
199
200         BT_DBG("%s %ld", hdev->name, opt);
201
202         /* Driver initialization */
203
204         /* Special commands */
205         while ((skb = skb_dequeue(&hdev->driver_init))) {
206                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
207                 skb->dev = (void *) hdev;
208
209                 skb_queue_tail(&hdev->cmd_q, skb);
210                 tasklet_schedule(&hdev->cmd_task);
211         }
212         skb_queue_purge(&hdev->driver_init);
213
214         /* Mandatory initialization */
215
216         /* Reset */
217         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
218                         set_bit(HCI_RESET, &hdev->flags);
219                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
220         }
221
222         /* Read Local Supported Features */
223         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
224
225         /* Read Local Version */
226         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
227
228         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
229         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
230
231 #if 0
232         /* Host buffer size */
233         {
234                 struct hci_cp_host_buffer_size cp;
235                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
236                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
237                 cp.acl_max_pkt = cpu_to_le16(0xffff);
238                 cp.sco_max_pkt = cpu_to_le16(0xffff);
239                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
240         }
241 #endif
242
243         /* Read BD Address */
244         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
245
246         /* Read Class of Device */
247         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
248
249         /* Read Local Name */
250         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
251
252         /* Read Voice Setting */
253         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
254
255         /* Optional initialization */
256
257         /* Clear Event Filters */
258         flt_type = HCI_FLT_CLEAR_ALL;
259         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
260
261         /* Connection accept timeout ~20 secs */
262         param = cpu_to_le16(0x7d00);
263         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
264
265         bacpy(&cp.bdaddr, BDADDR_ANY);
266         cp.delete_all = 1;
267         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
268 }
269
270 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
271 {
272         BT_DBG("%s", hdev->name);
273
274         /* Read LE buffer size */
275         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
276 }
277
278 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
279 {
280         __u8 scan = opt;
281
282         BT_DBG("%s %x", hdev->name, scan);
283
284         /* Inquiry and Page scans */
285         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
286 }
287
288 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
289 {
290         __u8 auth = opt;
291
292         BT_DBG("%s %x", hdev->name, auth);
293
294         /* Authentication */
295         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
296 }
297
298 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
299 {
300         __u8 encrypt = opt;
301
302         BT_DBG("%s %x", hdev->name, encrypt);
303
304         /* Encryption */
305         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
306 }
307
308 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
309 {
310         __le16 policy = cpu_to_le16(opt);
311
312         BT_DBG("%s %x", hdev->name, policy);
313
314         /* Default link policy */
315         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
316 }
317
318 /* Get HCI device by index.
319  * Device is held on return. */
320 struct hci_dev *hci_dev_get(int index)
321 {
322         struct hci_dev *hdev = NULL, *d;
323
324         BT_DBG("%d", index);
325
326         if (index < 0)
327                 return NULL;
328
329         read_lock(&hci_dev_list_lock);
330         list_for_each_entry(d, &hci_dev_list, list) {
331                 if (d->id == index) {
332                         hdev = hci_dev_hold(d);
333                         break;
334                 }
335         }
336         read_unlock(&hci_dev_list_lock);
337         return hdev;
338 }
339
340 /* ---- Inquiry support ---- */
341 static void inquiry_cache_flush(struct hci_dev *hdev)
342 {
343         struct inquiry_cache *cache = &hdev->inq_cache;
344         struct inquiry_entry *next  = cache->list, *e;
345
346         BT_DBG("cache %p", cache);
347
348         cache->list = NULL;
349         while ((e = next)) {
350                 next = e->next;
351                 kfree(e);
352         }
353 }
354
355 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
356 {
357         struct inquiry_cache *cache = &hdev->inq_cache;
358         struct inquiry_entry *e;
359
360         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
361
362         for (e = cache->list; e; e = e->next)
363                 if (!bacmp(&e->data.bdaddr, bdaddr))
364                         break;
365         return e;
366 }
367
368 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
369 {
370         struct inquiry_cache *cache = &hdev->inq_cache;
371         struct inquiry_entry *ie;
372
373         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
374
375         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
376         if (!ie) {
377                 /* Entry not in the cache. Add new one. */
378                 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
379                 if (!ie)
380                         return;
381
382                 ie->next = cache->list;
383                 cache->list = ie;
384         }
385
386         memcpy(&ie->data, data, sizeof(*data));
387         ie->timestamp = jiffies;
388         cache->timestamp = jiffies;
389 }
390
391 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
392 {
393         struct inquiry_cache *cache = &hdev->inq_cache;
394         struct inquiry_info *info = (struct inquiry_info *) buf;
395         struct inquiry_entry *e;
396         int copied = 0;
397
398         for (e = cache->list; e && copied < num; e = e->next, copied++) {
399                 struct inquiry_data *data = &e->data;
400                 bacpy(&info->bdaddr, &data->bdaddr);
401                 info->pscan_rep_mode    = data->pscan_rep_mode;
402                 info->pscan_period_mode = data->pscan_period_mode;
403                 info->pscan_mode        = data->pscan_mode;
404                 memcpy(info->dev_class, data->dev_class, 3);
405                 info->clock_offset      = data->clock_offset;
406                 info++;
407         }
408
409         BT_DBG("cache %p, copied %d", cache, copied);
410         return copied;
411 }
412
413 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
414 {
415         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
416         struct hci_cp_inquiry cp;
417
418         BT_DBG("%s", hdev->name);
419
420         if (test_bit(HCI_INQUIRY, &hdev->flags))
421                 return;
422
423         /* Start Inquiry */
424         memcpy(&cp.lap, &ir->lap, 3);
425         cp.length  = ir->length;
426         cp.num_rsp = ir->num_rsp;
427         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
428 }
429
430 int hci_inquiry(void __user *arg)
431 {
432         __u8 __user *ptr = arg;
433         struct hci_inquiry_req ir;
434         struct hci_dev *hdev;
435         int err = 0, do_inquiry = 0, max_rsp;
436         long timeo;
437         __u8 *buf;
438
439         if (copy_from_user(&ir, ptr, sizeof(ir)))
440                 return -EFAULT;
441
442         hdev = hci_dev_get(ir.dev_id);
443         if (!hdev)
444                 return -ENODEV;
445
446         hci_dev_lock_bh(hdev);
447         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
448                                 inquiry_cache_empty(hdev) ||
449                                 ir.flags & IREQ_CACHE_FLUSH) {
450                 inquiry_cache_flush(hdev);
451                 do_inquiry = 1;
452         }
453         hci_dev_unlock_bh(hdev);
454
455         timeo = ir.length * msecs_to_jiffies(2000);
456
457         if (do_inquiry) {
458                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
459                 if (err < 0)
460                         goto done;
461         }
462
463         /* for unlimited number of responses we will use buffer with 255 entries */
464         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
465
466         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
467          * copy it to the user space.
468          */
469         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
470         if (!buf) {
471                 err = -ENOMEM;
472                 goto done;
473         }
474
475         hci_dev_lock_bh(hdev);
476         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
477         hci_dev_unlock_bh(hdev);
478
479         BT_DBG("num_rsp %d", ir.num_rsp);
480
481         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
482                 ptr += sizeof(ir);
483                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
484                                         ir.num_rsp))
485                         err = -EFAULT;
486         } else
487                 err = -EFAULT;
488
489         kfree(buf);
490
491 done:
492         hci_dev_put(hdev);
493         return err;
494 }
495
496 /* ---- HCI ioctl helpers ---- */
497
498 int hci_dev_open(__u16 dev)
499 {
500         struct hci_dev *hdev;
501         int ret = 0;
502
503         hdev = hci_dev_get(dev);
504         if (!hdev)
505                 return -ENODEV;
506
507         BT_DBG("%s %p", hdev->name, hdev);
508
509         hci_req_lock(hdev);
510
511         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
512                 ret = -ERFKILL;
513                 goto done;
514         }
515
516         if (test_bit(HCI_UP, &hdev->flags)) {
517                 ret = -EALREADY;
518                 goto done;
519         }
520
521         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
522                 set_bit(HCI_RAW, &hdev->flags);
523
524         /* Treat all non BR/EDR controllers as raw devices for now */
525         if (hdev->dev_type != HCI_BREDR)
526                 set_bit(HCI_RAW, &hdev->flags);
527
528         if (hdev->open(hdev)) {
529                 ret = -EIO;
530                 goto done;
531         }
532
533         if (!test_bit(HCI_RAW, &hdev->flags)) {
534                 atomic_set(&hdev->cmd_cnt, 1);
535                 set_bit(HCI_INIT, &hdev->flags);
536                 hdev->init_last_cmd = 0;
537
538                 ret = __hci_request(hdev, hci_init_req, 0,
539                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
540
541                 if (lmp_host_le_capable(hdev))
542                         ret = __hci_request(hdev, hci_le_init_req, 0,
543                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
544
545                 clear_bit(HCI_INIT, &hdev->flags);
546         }
547
548         if (!ret) {
549                 hci_dev_hold(hdev);
550                 set_bit(HCI_UP, &hdev->flags);
551                 hci_notify(hdev, HCI_DEV_UP);
552                 if (!test_bit(HCI_SETUP, &hdev->flags))
553                         mgmt_powered(hdev->id, 1);
554         } else {
555                 /* Init failed, cleanup */
556                 tasklet_kill(&hdev->rx_task);
557                 tasklet_kill(&hdev->tx_task);
558                 tasklet_kill(&hdev->cmd_task);
559
560                 skb_queue_purge(&hdev->cmd_q);
561                 skb_queue_purge(&hdev->rx_q);
562
563                 if (hdev->flush)
564                         hdev->flush(hdev);
565
566                 if (hdev->sent_cmd) {
567                         kfree_skb(hdev->sent_cmd);
568                         hdev->sent_cmd = NULL;
569                 }
570
571                 hdev->close(hdev);
572                 hdev->flags = 0;
573         }
574
575 done:
576         hci_req_unlock(hdev);
577         hci_dev_put(hdev);
578         return ret;
579 }
580
581 static int hci_dev_do_close(struct hci_dev *hdev)
582 {
583         BT_DBG("%s %p", hdev->name, hdev);
584
585         hci_req_cancel(hdev, ENODEV);
586         hci_req_lock(hdev);
587
588         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
589                 del_timer_sync(&hdev->cmd_timer);
590                 hci_req_unlock(hdev);
591                 return 0;
592         }
593
594         /* Kill RX and TX tasks */
595         tasklet_kill(&hdev->rx_task);
596         tasklet_kill(&hdev->tx_task);
597
598         hci_dev_lock_bh(hdev);
599         inquiry_cache_flush(hdev);
600         hci_conn_hash_flush(hdev);
601         hci_dev_unlock_bh(hdev);
602
603         hci_notify(hdev, HCI_DEV_DOWN);
604
605         if (hdev->flush)
606                 hdev->flush(hdev);
607
608         /* Reset device */
609         skb_queue_purge(&hdev->cmd_q);
610         atomic_set(&hdev->cmd_cnt, 1);
611         if (!test_bit(HCI_RAW, &hdev->flags)) {
612                 set_bit(HCI_INIT, &hdev->flags);
613                 __hci_request(hdev, hci_reset_req, 0,
614                                         msecs_to_jiffies(250));
615                 clear_bit(HCI_INIT, &hdev->flags);
616         }
617
618         /* Kill cmd task */
619         tasklet_kill(&hdev->cmd_task);
620
621         /* Drop queues */
622         skb_queue_purge(&hdev->rx_q);
623         skb_queue_purge(&hdev->cmd_q);
624         skb_queue_purge(&hdev->raw_q);
625
626         /* Drop last sent command */
627         if (hdev->sent_cmd) {
628                 del_timer_sync(&hdev->cmd_timer);
629                 kfree_skb(hdev->sent_cmd);
630                 hdev->sent_cmd = NULL;
631         }
632
633         /* After this point our queues are empty
634          * and no tasks are scheduled. */
635         hdev->close(hdev);
636
637         mgmt_powered(hdev->id, 0);
638
639         /* Clear flags */
640         hdev->flags = 0;
641
642         hci_req_unlock(hdev);
643
644         hci_dev_put(hdev);
645         return 0;
646 }
647
648 int hci_dev_close(__u16 dev)
649 {
650         struct hci_dev *hdev;
651         int err;
652
653         hdev = hci_dev_get(dev);
654         if (!hdev)
655                 return -ENODEV;
656         err = hci_dev_do_close(hdev);
657         hci_dev_put(hdev);
658         return err;
659 }
660
661 int hci_dev_reset(__u16 dev)
662 {
663         struct hci_dev *hdev;
664         int ret = 0;
665
666         hdev = hci_dev_get(dev);
667         if (!hdev)
668                 return -ENODEV;
669
670         hci_req_lock(hdev);
671         tasklet_disable(&hdev->tx_task);
672
673         if (!test_bit(HCI_UP, &hdev->flags))
674                 goto done;
675
676         /* Drop queues */
677         skb_queue_purge(&hdev->rx_q);
678         skb_queue_purge(&hdev->cmd_q);
679
680         hci_dev_lock_bh(hdev);
681         inquiry_cache_flush(hdev);
682         hci_conn_hash_flush(hdev);
683         hci_dev_unlock_bh(hdev);
684
685         if (hdev->flush)
686                 hdev->flush(hdev);
687
688         atomic_set(&hdev->cmd_cnt, 1);
689         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
690
691         if (!test_bit(HCI_RAW, &hdev->flags))
692                 ret = __hci_request(hdev, hci_reset_req, 0,
693                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
694
695 done:
696         tasklet_enable(&hdev->tx_task);
697         hci_req_unlock(hdev);
698         hci_dev_put(hdev);
699         return ret;
700 }
701
702 int hci_dev_reset_stat(__u16 dev)
703 {
704         struct hci_dev *hdev;
705         int ret = 0;
706
707         hdev = hci_dev_get(dev);
708         if (!hdev)
709                 return -ENODEV;
710
711         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
712
713         hci_dev_put(hdev);
714
715         return ret;
716 }
717
718 int hci_dev_cmd(unsigned int cmd, void __user *arg)
719 {
720         struct hci_dev *hdev;
721         struct hci_dev_req dr;
722         int err = 0;
723
724         if (copy_from_user(&dr, arg, sizeof(dr)))
725                 return -EFAULT;
726
727         hdev = hci_dev_get(dr.dev_id);
728         if (!hdev)
729                 return -ENODEV;
730
731         switch (cmd) {
732         case HCISETAUTH:
733                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
734                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
735                 break;
736
737         case HCISETENCRYPT:
738                 if (!lmp_encrypt_capable(hdev)) {
739                         err = -EOPNOTSUPP;
740                         break;
741                 }
742
743                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
744                         /* Auth must be enabled first */
745                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
746                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
747                         if (err)
748                                 break;
749                 }
750
751                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
752                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
753                 break;
754
755         case HCISETSCAN:
756                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
757                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
758                 break;
759
760         case HCISETLINKPOL:
761                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
762                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
763                 break;
764
765         case HCISETLINKMODE:
766                 hdev->link_mode = ((__u16) dr.dev_opt) &
767                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
768                 break;
769
770         case HCISETPTYPE:
771                 hdev->pkt_type = (__u16) dr.dev_opt;
772                 break;
773
774         case HCISETACLMTU:
775                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
776                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
777                 break;
778
779         case HCISETSCOMTU:
780                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
781                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
782                 break;
783
784         default:
785                 err = -EINVAL;
786                 break;
787         }
788
789         hci_dev_put(hdev);
790         return err;
791 }
792
793 int hci_get_dev_list(void __user *arg)
794 {
795         struct hci_dev *hdev;
796         struct hci_dev_list_req *dl;
797         struct hci_dev_req *dr;
798         int n = 0, size, err;
799         __u16 dev_num;
800
801         if (get_user(dev_num, (__u16 __user *) arg))
802                 return -EFAULT;
803
804         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
805                 return -EINVAL;
806
807         size = sizeof(*dl) + dev_num * sizeof(*dr);
808
809         dl = kzalloc(size, GFP_KERNEL);
810         if (!dl)
811                 return -ENOMEM;
812
813         dr = dl->dev_req;
814
815         read_lock_bh(&hci_dev_list_lock);
816         list_for_each_entry(hdev, &hci_dev_list, list) {
817                 hci_del_off_timer(hdev);
818
819                 if (!test_bit(HCI_MGMT, &hdev->flags))
820                         set_bit(HCI_PAIRABLE, &hdev->flags);
821
822                 (dr + n)->dev_id  = hdev->id;
823                 (dr + n)->dev_opt = hdev->flags;
824
825                 if (++n >= dev_num)
826                         break;
827         }
828         read_unlock_bh(&hci_dev_list_lock);
829
830         dl->dev_num = n;
831         size = sizeof(*dl) + n * sizeof(*dr);
832
833         err = copy_to_user(arg, dl, size);
834         kfree(dl);
835
836         return err ? -EFAULT : 0;
837 }
838
839 int hci_get_dev_info(void __user *arg)
840 {
841         struct hci_dev *hdev;
842         struct hci_dev_info di;
843         int err = 0;
844
845         if (copy_from_user(&di, arg, sizeof(di)))
846                 return -EFAULT;
847
848         hdev = hci_dev_get(di.dev_id);
849         if (!hdev)
850                 return -ENODEV;
851
852         hci_del_off_timer(hdev);
853
854         if (!test_bit(HCI_MGMT, &hdev->flags))
855                 set_bit(HCI_PAIRABLE, &hdev->flags);
856
857         strcpy(di.name, hdev->name);
858         di.bdaddr   = hdev->bdaddr;
859         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
860         di.flags    = hdev->flags;
861         di.pkt_type = hdev->pkt_type;
862         di.acl_mtu  = hdev->acl_mtu;
863         di.acl_pkts = hdev->acl_pkts;
864         di.sco_mtu  = hdev->sco_mtu;
865         di.sco_pkts = hdev->sco_pkts;
866         di.link_policy = hdev->link_policy;
867         di.link_mode   = hdev->link_mode;
868
869         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
870         memcpy(&di.features, &hdev->features, sizeof(di.features));
871
872         if (copy_to_user(arg, &di, sizeof(di)))
873                 err = -EFAULT;
874
875         hci_dev_put(hdev);
876
877         return err;
878 }
879
880 /* ---- Interface to HCI drivers ---- */
881
882 static int hci_rfkill_set_block(void *data, bool blocked)
883 {
884         struct hci_dev *hdev = data;
885
886         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
887
888         if (!blocked)
889                 return 0;
890
891         hci_dev_do_close(hdev);
892
893         return 0;
894 }
895
896 static const struct rfkill_ops hci_rfkill_ops = {
897         .set_block = hci_rfkill_set_block,
898 };
899
900 /* Alloc HCI device */
901 struct hci_dev *hci_alloc_dev(void)
902 {
903         struct hci_dev *hdev;
904
905         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
906         if (!hdev)
907                 return NULL;
908
909         hci_init_sysfs(hdev);
910         skb_queue_head_init(&hdev->driver_init);
911
912         return hdev;
913 }
914 EXPORT_SYMBOL(hci_alloc_dev);
915
916 /* Free HCI device */
917 void hci_free_dev(struct hci_dev *hdev)
918 {
919         skb_queue_purge(&hdev->driver_init);
920
921         /* will free via device release */
922         put_device(&hdev->dev);
923 }
924 EXPORT_SYMBOL(hci_free_dev);
925
926 static void hci_power_on(struct work_struct *work)
927 {
928         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
929
930         BT_DBG("%s", hdev->name);
931
932         if (hci_dev_open(hdev->id) < 0)
933                 return;
934
935         if (test_bit(HCI_AUTO_OFF, &hdev->flags))
936                 mod_timer(&hdev->off_timer,
937                                 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
938
939         if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
940                 mgmt_index_added(hdev->id);
941 }
942
943 static void hci_power_off(struct work_struct *work)
944 {
945         struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
946
947         BT_DBG("%s", hdev->name);
948
949         hci_dev_close(hdev->id);
950 }
951
952 static void hci_auto_off(unsigned long data)
953 {
954         struct hci_dev *hdev = (struct hci_dev *) data;
955
956         BT_DBG("%s", hdev->name);
957
958         clear_bit(HCI_AUTO_OFF, &hdev->flags);
959
960         queue_work(hdev->workqueue, &hdev->power_off);
961 }
962
963 void hci_del_off_timer(struct hci_dev *hdev)
964 {
965         BT_DBG("%s", hdev->name);
966
967         clear_bit(HCI_AUTO_OFF, &hdev->flags);
968         del_timer(&hdev->off_timer);
969 }
970
971 int hci_uuids_clear(struct hci_dev *hdev)
972 {
973         struct list_head *p, *n;
974
975         list_for_each_safe(p, n, &hdev->uuids) {
976                 struct bt_uuid *uuid;
977
978                 uuid = list_entry(p, struct bt_uuid, list);
979
980                 list_del(p);
981                 kfree(uuid);
982         }
983
984         return 0;
985 }
986
987 int hci_link_keys_clear(struct hci_dev *hdev)
988 {
989         struct list_head *p, *n;
990
991         list_for_each_safe(p, n, &hdev->link_keys) {
992                 struct link_key *key;
993
994                 key = list_entry(p, struct link_key, list);
995
996                 list_del(p);
997                 kfree(key);
998         }
999
1000         return 0;
1001 }
1002
1003 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1004 {
1005         struct link_key *k;
1006
1007         list_for_each_entry(k, &hdev->link_keys, list)
1008                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1009                         return k;
1010
1011         return NULL;
1012 }
1013
1014 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1015                                                 u8 key_type, u8 old_key_type)
1016 {
1017         /* Legacy key */
1018         if (key_type < 0x03)
1019                 return 1;
1020
1021         /* Debug keys are insecure so don't store them persistently */
1022         if (key_type == HCI_LK_DEBUG_COMBINATION)
1023                 return 0;
1024
1025         /* Changed combination key and there's no previous one */
1026         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1027                 return 0;
1028
1029         /* Security mode 3 case */
1030         if (!conn)
1031                 return 1;
1032
1033         /* Neither local nor remote side had no-bonding as requirement */
1034         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1035                 return 1;
1036
1037         /* Local side had dedicated bonding as requirement */
1038         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1039                 return 1;
1040
1041         /* Remote side had dedicated bonding as requirement */
1042         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1043                 return 1;
1044
1045         /* If none of the above criteria match, then don't store the key
1046          * persistently */
1047         return 0;
1048 }
1049
1050 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1051 {
1052         struct link_key *k;
1053
1054         list_for_each_entry(k, &hdev->link_keys, list) {
1055                 struct key_master_id *id;
1056
1057                 if (k->type != HCI_LK_SMP_LTK)
1058                         continue;
1059
1060                 if (k->dlen != sizeof(*id))
1061                         continue;
1062
1063                 id = (void *) &k->data;
1064                 if (id->ediv == ediv &&
1065                                 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1066                         return k;
1067         }
1068
1069         return NULL;
1070 }
1071 EXPORT_SYMBOL(hci_find_ltk);
1072
1073 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1074                                         bdaddr_t *bdaddr, u8 type)
1075 {
1076         struct link_key *k;
1077
1078         list_for_each_entry(k, &hdev->link_keys, list)
1079                 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1080                         return k;
1081
1082         return NULL;
1083 }
1084 EXPORT_SYMBOL(hci_find_link_key_type);
1085
1086 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1087                                 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1088 {
1089         struct link_key *key, *old_key;
1090         u8 old_key_type, persistent;
1091
1092         old_key = hci_find_link_key(hdev, bdaddr);
1093         if (old_key) {
1094                 old_key_type = old_key->type;
1095                 key = old_key;
1096         } else {
1097                 old_key_type = conn ? conn->key_type : 0xff;
1098                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1099                 if (!key)
1100                         return -ENOMEM;
1101                 list_add(&key->list, &hdev->link_keys);
1102         }
1103
1104         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1105
1106         /* Some buggy controller combinations generate a changed
1107          * combination key for legacy pairing even when there's no
1108          * previous key */
1109         if (type == HCI_LK_CHANGED_COMBINATION &&
1110                                         (!conn || conn->remote_auth == 0xff) &&
1111                                         old_key_type == 0xff) {
1112                 type = HCI_LK_COMBINATION;
1113                 if (conn)
1114                         conn->key_type = type;
1115         }
1116
1117         bacpy(&key->bdaddr, bdaddr);
1118         memcpy(key->val, val, 16);
1119         key->pin_len = pin_len;
1120
1121         if (type == HCI_LK_CHANGED_COMBINATION)
1122                 key->type = old_key_type;
1123         else
1124                 key->type = type;
1125
1126         if (!new_key)
1127                 return 0;
1128
1129         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1130
1131         mgmt_new_key(hdev->id, key, persistent);
1132
1133         if (!persistent) {
1134                 list_del(&key->list);
1135                 kfree(key);
1136         }
1137
1138         return 0;
1139 }
1140
1141 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1142                         u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1143 {
1144         struct link_key *key, *old_key;
1145         struct key_master_id *id;
1146         u8 old_key_type;
1147
1148         BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1149
1150         old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1151         if (old_key) {
1152                 key = old_key;
1153                 old_key_type = old_key->type;
1154         } else {
1155                 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1156                 if (!key)
1157                         return -ENOMEM;
1158                 list_add(&key->list, &hdev->link_keys);
1159                 old_key_type = 0xff;
1160         }
1161
1162         key->dlen = sizeof(*id);
1163
1164         bacpy(&key->bdaddr, bdaddr);
1165         memcpy(key->val, ltk, sizeof(key->val));
1166         key->type = HCI_LK_SMP_LTK;
1167         key->pin_len = key_size;
1168
1169         id = (void *) &key->data;
1170         id->ediv = ediv;
1171         memcpy(id->rand, rand, sizeof(id->rand));
1172
1173         if (new_key)
1174                 mgmt_new_key(hdev->id, key, old_key_type);
1175
1176         return 0;
1177 }
1178
1179 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1180 {
1181         struct link_key *key;
1182
1183         key = hci_find_link_key(hdev, bdaddr);
1184         if (!key)
1185                 return -ENOENT;
1186
1187         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1188
1189         list_del(&key->list);
1190         kfree(key);
1191
1192         return 0;
1193 }
1194
1195 /* HCI command timer function */
1196 static void hci_cmd_timer(unsigned long arg)
1197 {
1198         struct hci_dev *hdev = (void *) arg;
1199
1200         BT_ERR("%s command tx timeout", hdev->name);
1201         atomic_set(&hdev->cmd_cnt, 1);
1202         tasklet_schedule(&hdev->cmd_task);
1203 }
1204
1205 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1206                                                         bdaddr_t *bdaddr)
1207 {
1208         struct oob_data *data;
1209
1210         list_for_each_entry(data, &hdev->remote_oob_data, list)
1211                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1212                         return data;
1213
1214         return NULL;
1215 }
1216
1217 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1218 {
1219         struct oob_data *data;
1220
1221         data = hci_find_remote_oob_data(hdev, bdaddr);
1222         if (!data)
1223                 return -ENOENT;
1224
1225         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1226
1227         list_del(&data->list);
1228         kfree(data);
1229
1230         return 0;
1231 }
1232
1233 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1234 {
1235         struct oob_data *data, *n;
1236
1237         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1238                 list_del(&data->list);
1239                 kfree(data);
1240         }
1241
1242         return 0;
1243 }
1244
1245 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1246                                                                 u8 *randomizer)
1247 {
1248         struct oob_data *data;
1249
1250         data = hci_find_remote_oob_data(hdev, bdaddr);
1251
1252         if (!data) {
1253                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1254                 if (!data)
1255                         return -ENOMEM;
1256
1257                 bacpy(&data->bdaddr, bdaddr);
1258                 list_add(&data->list, &hdev->remote_oob_data);
1259         }
1260
1261         memcpy(data->hash, hash, sizeof(data->hash));
1262         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1263
1264         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1265
1266         return 0;
1267 }
1268
1269 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1270                                                 bdaddr_t *bdaddr)
1271 {
1272         struct bdaddr_list *b;
1273
1274         list_for_each_entry(b, &hdev->blacklist, list)
1275                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1276                         return b;
1277
1278         return NULL;
1279 }
1280
1281 int hci_blacklist_clear(struct hci_dev *hdev)
1282 {
1283         struct list_head *p, *n;
1284
1285         list_for_each_safe(p, n, &hdev->blacklist) {
1286                 struct bdaddr_list *b;
1287
1288                 b = list_entry(p, struct bdaddr_list, list);
1289
1290                 list_del(p);
1291                 kfree(b);
1292         }
1293
1294         return 0;
1295 }
1296
1297 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1298 {
1299         struct bdaddr_list *entry;
1300
1301         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1302                 return -EBADF;
1303
1304         if (hci_blacklist_lookup(hdev, bdaddr))
1305                 return -EEXIST;
1306
1307         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1308         if (!entry)
1309                 return -ENOMEM;
1310
1311         bacpy(&entry->bdaddr, bdaddr);
1312
1313         list_add(&entry->list, &hdev->blacklist);
1314
1315         return mgmt_device_blocked(hdev->id, bdaddr);
1316 }
1317
1318 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1319 {
1320         struct bdaddr_list *entry;
1321
1322         if (bacmp(bdaddr, BDADDR_ANY) == 0) {
1323                 return hci_blacklist_clear(hdev);
1324         }
1325
1326         entry = hci_blacklist_lookup(hdev, bdaddr);
1327         if (!entry) {
1328                 return -ENOENT;
1329         }
1330
1331         list_del(&entry->list);
1332         kfree(entry);
1333
1334         return mgmt_device_unblocked(hdev->id, bdaddr);
1335 }
1336
1337 static void hci_clear_adv_cache(unsigned long arg)
1338 {
1339         struct hci_dev *hdev = (void *) arg;
1340
1341         hci_dev_lock(hdev);
1342
1343         hci_adv_entries_clear(hdev);
1344
1345         hci_dev_unlock(hdev);
1346 }
1347
1348 int hci_adv_entries_clear(struct hci_dev *hdev)
1349 {
1350         struct adv_entry *entry, *tmp;
1351
1352         list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1353                 list_del(&entry->list);
1354                 kfree(entry);
1355         }
1356
1357         BT_DBG("%s adv cache cleared", hdev->name);
1358
1359         return 0;
1360 }
1361
1362 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1363 {
1364         struct adv_entry *entry;
1365
1366         list_for_each_entry(entry, &hdev->adv_entries, list)
1367                 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1368                         return entry;
1369
1370         return NULL;
1371 }
1372
1373 static inline int is_connectable_adv(u8 evt_type)
1374 {
1375         if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1376                 return 1;
1377
1378         return 0;
1379 }
1380
1381 int hci_add_adv_entry(struct hci_dev *hdev,
1382                                         struct hci_ev_le_advertising_info *ev)
1383 {
1384         struct adv_entry *entry;
1385
1386         if (!is_connectable_adv(ev->evt_type))
1387                 return -EINVAL;
1388
1389         /* Only new entries should be added to adv_entries. So, if
1390          * bdaddr was found, don't add it. */
1391         if (hci_find_adv_entry(hdev, &ev->bdaddr))
1392                 return 0;
1393
1394         entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1395         if (!entry)
1396                 return -ENOMEM;
1397
1398         bacpy(&entry->bdaddr, &ev->bdaddr);
1399         entry->bdaddr_type = ev->bdaddr_type;
1400
1401         list_add(&entry->list, &hdev->adv_entries);
1402
1403         BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1404                                 batostr(&entry->bdaddr), entry->bdaddr_type);
1405
1406         return 0;
1407 }
1408
1409 /* Register HCI device */
1410 int hci_register_dev(struct hci_dev *hdev)
1411 {
1412         struct list_head *head = &hci_dev_list, *p;
1413         int i, id = 0, error;
1414
1415         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1416                                                 hdev->bus, hdev->owner);
1417
1418         if (!hdev->open || !hdev->close || !hdev->destruct)
1419                 return -EINVAL;
1420
1421         write_lock_bh(&hci_dev_list_lock);
1422
1423         /* Find first available device id */
1424         list_for_each(p, &hci_dev_list) {
1425                 if (list_entry(p, struct hci_dev, list)->id != id)
1426                         break;
1427                 head = p; id++;
1428         }
1429
1430         sprintf(hdev->name, "hci%d", id);
1431         hdev->id = id;
1432         list_add(&hdev->list, head);
1433
1434         atomic_set(&hdev->refcnt, 1);
1435         spin_lock_init(&hdev->lock);
1436
1437         hdev->flags = 0;
1438         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1439         hdev->esco_type = (ESCO_HV1);
1440         hdev->link_mode = (HCI_LM_ACCEPT);
1441         hdev->io_capability = 0x03; /* No Input No Output */
1442
1443         hdev->idle_timeout = 0;
1444         hdev->sniff_max_interval = 800;
1445         hdev->sniff_min_interval = 80;
1446
1447         tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1448         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1449         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1450
1451         skb_queue_head_init(&hdev->rx_q);
1452         skb_queue_head_init(&hdev->cmd_q);
1453         skb_queue_head_init(&hdev->raw_q);
1454
1455         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1456
1457         for (i = 0; i < NUM_REASSEMBLY; i++)
1458                 hdev->reassembly[i] = NULL;
1459
1460         init_waitqueue_head(&hdev->req_wait_q);
1461         mutex_init(&hdev->req_lock);
1462
1463         inquiry_cache_init(hdev);
1464
1465         hci_conn_hash_init(hdev);
1466
1467         INIT_LIST_HEAD(&hdev->blacklist);
1468
1469         INIT_LIST_HEAD(&hdev->uuids);
1470
1471         INIT_LIST_HEAD(&hdev->link_keys);
1472
1473         INIT_LIST_HEAD(&hdev->remote_oob_data);
1474
1475         INIT_LIST_HEAD(&hdev->adv_entries);
1476         setup_timer(&hdev->adv_timer, hci_clear_adv_cache,
1477                                                 (unsigned long) hdev);
1478
1479         INIT_WORK(&hdev->power_on, hci_power_on);
1480         INIT_WORK(&hdev->power_off, hci_power_off);
1481         setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1482
1483         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1484
1485         atomic_set(&hdev->promisc, 0);
1486
1487         write_unlock_bh(&hci_dev_list_lock);
1488
1489         hdev->workqueue = create_singlethread_workqueue(hdev->name);
1490         if (!hdev->workqueue) {
1491                 error = -ENOMEM;
1492                 goto err;
1493         }
1494
1495         error = hci_add_sysfs(hdev);
1496         if (error < 0)
1497                 goto err_wqueue;
1498
1499         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1500                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1501         if (hdev->rfkill) {
1502                 if (rfkill_register(hdev->rfkill) < 0) {
1503                         rfkill_destroy(hdev->rfkill);
1504                         hdev->rfkill = NULL;
1505                 }
1506         }
1507
1508         set_bit(HCI_AUTO_OFF, &hdev->flags);
1509         set_bit(HCI_SETUP, &hdev->flags);
1510         queue_work(hdev->workqueue, &hdev->power_on);
1511
1512         hci_notify(hdev, HCI_DEV_REG);
1513
1514         return id;
1515
1516 err_wqueue:
1517         destroy_workqueue(hdev->workqueue);
1518 err:
1519         write_lock_bh(&hci_dev_list_lock);
1520         list_del(&hdev->list);
1521         write_unlock_bh(&hci_dev_list_lock);
1522
1523         return error;
1524 }
1525 EXPORT_SYMBOL(hci_register_dev);
1526
1527 /* Unregister HCI device */
1528 void hci_unregister_dev(struct hci_dev *hdev)
1529 {
1530         int i;
1531
1532         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1533
1534         write_lock_bh(&hci_dev_list_lock);
1535         list_del(&hdev->list);
1536         write_unlock_bh(&hci_dev_list_lock);
1537
1538         hci_dev_do_close(hdev);
1539
1540         for (i = 0; i < NUM_REASSEMBLY; i++)
1541                 kfree_skb(hdev->reassembly[i]);
1542
1543         if (!test_bit(HCI_INIT, &hdev->flags) &&
1544                                         !test_bit(HCI_SETUP, &hdev->flags))
1545                 mgmt_index_removed(hdev->id);
1546
1547         hci_notify(hdev, HCI_DEV_UNREG);
1548
1549         if (hdev->rfkill) {
1550                 rfkill_unregister(hdev->rfkill);
1551                 rfkill_destroy(hdev->rfkill);
1552         }
1553
1554         hci_del_sysfs(hdev);
1555
1556         hci_del_off_timer(hdev);
1557         del_timer(&hdev->adv_timer);
1558
1559         destroy_workqueue(hdev->workqueue);
1560
1561         hci_dev_lock_bh(hdev);
1562         hci_blacklist_clear(hdev);
1563         hci_uuids_clear(hdev);
1564         hci_link_keys_clear(hdev);
1565         hci_remote_oob_data_clear(hdev);
1566         hci_adv_entries_clear(hdev);
1567         hci_dev_unlock_bh(hdev);
1568
1569         __hci_dev_put(hdev);
1570 }
1571 EXPORT_SYMBOL(hci_unregister_dev);
1572
1573 /* Suspend HCI device */
1574 int hci_suspend_dev(struct hci_dev *hdev)
1575 {
1576         hci_notify(hdev, HCI_DEV_SUSPEND);
1577         return 0;
1578 }
1579 EXPORT_SYMBOL(hci_suspend_dev);
1580
1581 /* Resume HCI device */
1582 int hci_resume_dev(struct hci_dev *hdev)
1583 {
1584         hci_notify(hdev, HCI_DEV_RESUME);
1585         return 0;
1586 }
1587 EXPORT_SYMBOL(hci_resume_dev);
1588
1589 /* Receive frame from HCI drivers */
1590 int hci_recv_frame(struct sk_buff *skb)
1591 {
1592         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1593         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1594                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1595                 kfree_skb(skb);
1596                 return -ENXIO;
1597         }
1598
1599         /* Incomming skb */
1600         bt_cb(skb)->incoming = 1;
1601
1602         /* Time stamp */
1603         __net_timestamp(skb);
1604
1605         /* Queue frame for rx task */
1606         skb_queue_tail(&hdev->rx_q, skb);
1607         tasklet_schedule(&hdev->rx_task);
1608
1609         return 0;
1610 }
1611 EXPORT_SYMBOL(hci_recv_frame);
1612
1613 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1614                                                   int count, __u8 index)
1615 {
1616         int len = 0;
1617         int hlen = 0;
1618         int remain = count;
1619         struct sk_buff *skb;
1620         struct bt_skb_cb *scb;
1621
1622         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1623                                 index >= NUM_REASSEMBLY)
1624                 return -EILSEQ;
1625
1626         skb = hdev->reassembly[index];
1627
1628         if (!skb) {
1629                 switch (type) {
1630                 case HCI_ACLDATA_PKT:
1631                         len = HCI_MAX_FRAME_SIZE;
1632                         hlen = HCI_ACL_HDR_SIZE;
1633                         break;
1634                 case HCI_EVENT_PKT:
1635                         len = HCI_MAX_EVENT_SIZE;
1636                         hlen = HCI_EVENT_HDR_SIZE;
1637                         break;
1638                 case HCI_SCODATA_PKT:
1639                         len = HCI_MAX_SCO_SIZE;
1640                         hlen = HCI_SCO_HDR_SIZE;
1641                         break;
1642                 }
1643
1644                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1645                 if (!skb)
1646                         return -ENOMEM;
1647
1648                 scb = (void *) skb->cb;
1649                 scb->expect = hlen;
1650                 scb->pkt_type = type;
1651
1652                 skb->dev = (void *) hdev;
1653                 hdev->reassembly[index] = skb;
1654         }
1655
1656         while (count) {
1657                 scb = (void *) skb->cb;
1658                 len = min(scb->expect, (__u16)count);
1659
1660                 memcpy(skb_put(skb, len), data, len);
1661
1662                 count -= len;
1663                 data += len;
1664                 scb->expect -= len;
1665                 remain = count;
1666
1667                 switch (type) {
1668                 case HCI_EVENT_PKT:
1669                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1670                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1671                                 scb->expect = h->plen;
1672
1673                                 if (skb_tailroom(skb) < scb->expect) {
1674                                         kfree_skb(skb);
1675                                         hdev->reassembly[index] = NULL;
1676                                         return -ENOMEM;
1677                                 }
1678                         }
1679                         break;
1680
1681                 case HCI_ACLDATA_PKT:
1682                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1683                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1684                                 scb->expect = __le16_to_cpu(h->dlen);
1685
1686                                 if (skb_tailroom(skb) < scb->expect) {
1687                                         kfree_skb(skb);
1688                                         hdev->reassembly[index] = NULL;
1689                                         return -ENOMEM;
1690                                 }
1691                         }
1692                         break;
1693
1694                 case HCI_SCODATA_PKT:
1695                         if (skb->len == HCI_SCO_HDR_SIZE) {
1696                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1697                                 scb->expect = h->dlen;
1698
1699                                 if (skb_tailroom(skb) < scb->expect) {
1700                                         kfree_skb(skb);
1701                                         hdev->reassembly[index] = NULL;
1702                                         return -ENOMEM;
1703                                 }
1704                         }
1705                         break;
1706                 }
1707
1708                 if (scb->expect == 0) {
1709                         /* Complete frame */
1710
1711                         bt_cb(skb)->pkt_type = type;
1712                         hci_recv_frame(skb);
1713
1714                         hdev->reassembly[index] = NULL;
1715                         return remain;
1716                 }
1717         }
1718
1719         return remain;
1720 }
1721
1722 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1723 {
1724         int rem = 0;
1725
1726         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1727                 return -EILSEQ;
1728
1729         while (count) {
1730                 rem = hci_reassembly(hdev, type, data, count, type - 1);
1731                 if (rem < 0)
1732                         return rem;
1733
1734                 data += (count - rem);
1735                 count = rem;
1736         }
1737
1738         return rem;
1739 }
1740 EXPORT_SYMBOL(hci_recv_fragment);
1741
1742 #define STREAM_REASSEMBLY 0
1743
1744 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1745 {
1746         int type;
1747         int rem = 0;
1748
1749         while (count) {
1750                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1751
1752                 if (!skb) {
1753                         struct { char type; } *pkt;
1754
1755                         /* Start of the frame */
1756                         pkt = data;
1757                         type = pkt->type;
1758
1759                         data++;
1760                         count--;
1761                 } else
1762                         type = bt_cb(skb)->pkt_type;
1763
1764                 rem = hci_reassembly(hdev, type, data, count,
1765                                                         STREAM_REASSEMBLY);
1766                 if (rem < 0)
1767                         return rem;
1768
1769                 data += (count - rem);
1770                 count = rem;
1771         }
1772
1773         return rem;
1774 }
1775 EXPORT_SYMBOL(hci_recv_stream_fragment);
1776
1777 /* ---- Interface to upper protocols ---- */
1778
1779 /* Register/Unregister protocols.
1780  * hci_task_lock is used to ensure that no tasks are running. */
1781 int hci_register_proto(struct hci_proto *hp)
1782 {
1783         int err = 0;
1784
1785         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1786
1787         if (hp->id >= HCI_MAX_PROTO)
1788                 return -EINVAL;
1789
1790         write_lock_bh(&hci_task_lock);
1791
1792         if (!hci_proto[hp->id])
1793                 hci_proto[hp->id] = hp;
1794         else
1795                 err = -EEXIST;
1796
1797         write_unlock_bh(&hci_task_lock);
1798
1799         return err;
1800 }
1801 EXPORT_SYMBOL(hci_register_proto);
1802
1803 int hci_unregister_proto(struct hci_proto *hp)
1804 {
1805         int err = 0;
1806
1807         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1808
1809         if (hp->id >= HCI_MAX_PROTO)
1810                 return -EINVAL;
1811
1812         write_lock_bh(&hci_task_lock);
1813
1814         if (hci_proto[hp->id])
1815                 hci_proto[hp->id] = NULL;
1816         else
1817                 err = -ENOENT;
1818
1819         write_unlock_bh(&hci_task_lock);
1820
1821         return err;
1822 }
1823 EXPORT_SYMBOL(hci_unregister_proto);
1824
1825 int hci_register_cb(struct hci_cb *cb)
1826 {
1827         BT_DBG("%p name %s", cb, cb->name);
1828
1829         write_lock_bh(&hci_cb_list_lock);
1830         list_add(&cb->list, &hci_cb_list);
1831         write_unlock_bh(&hci_cb_list_lock);
1832
1833         return 0;
1834 }
1835 EXPORT_SYMBOL(hci_register_cb);
1836
1837 int hci_unregister_cb(struct hci_cb *cb)
1838 {
1839         BT_DBG("%p name %s", cb, cb->name);
1840
1841         write_lock_bh(&hci_cb_list_lock);
1842         list_del(&cb->list);
1843         write_unlock_bh(&hci_cb_list_lock);
1844
1845         return 0;
1846 }
1847 EXPORT_SYMBOL(hci_unregister_cb);
1848
1849 static int hci_send_frame(struct sk_buff *skb)
1850 {
1851         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1852
1853         if (!hdev) {
1854                 kfree_skb(skb);
1855                 return -ENODEV;
1856         }
1857
1858         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1859
1860         if (atomic_read(&hdev->promisc)) {
1861                 /* Time stamp */
1862                 __net_timestamp(skb);
1863
1864                 hci_send_to_sock(hdev, skb, NULL);
1865         }
1866
1867         /* Get rid of skb owner, prior to sending to the driver. */
1868         skb_orphan(skb);
1869
1870         return hdev->send(skb);
1871 }
1872
1873 /* Send HCI command */
1874 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1875 {
1876         int len = HCI_COMMAND_HDR_SIZE + plen;
1877         struct hci_command_hdr *hdr;
1878         struct sk_buff *skb;
1879
1880         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1881
1882         skb = bt_skb_alloc(len, GFP_ATOMIC);
1883         if (!skb) {
1884                 BT_ERR("%s no memory for command", hdev->name);
1885                 return -ENOMEM;
1886         }
1887
1888         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1889         hdr->opcode = cpu_to_le16(opcode);
1890         hdr->plen   = plen;
1891
1892         if (plen)
1893                 memcpy(skb_put(skb, plen), param, plen);
1894
1895         BT_DBG("skb len %d", skb->len);
1896
1897         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1898         skb->dev = (void *) hdev;
1899
1900         if (test_bit(HCI_INIT, &hdev->flags))
1901                 hdev->init_last_cmd = opcode;
1902
1903         skb_queue_tail(&hdev->cmd_q, skb);
1904         tasklet_schedule(&hdev->cmd_task);
1905
1906         return 0;
1907 }
1908
1909 /* Get data from the previously sent command */
1910 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1911 {
1912         struct hci_command_hdr *hdr;
1913
1914         if (!hdev->sent_cmd)
1915                 return NULL;
1916
1917         hdr = (void *) hdev->sent_cmd->data;
1918
1919         if (hdr->opcode != cpu_to_le16(opcode))
1920                 return NULL;
1921
1922         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1923
1924         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1925 }
1926
1927 /* Send ACL data */
1928 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1929 {
1930         struct hci_acl_hdr *hdr;
1931         int len = skb->len;
1932
1933         skb_push(skb, HCI_ACL_HDR_SIZE);
1934         skb_reset_transport_header(skb);
1935         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1936         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1937         hdr->dlen   = cpu_to_le16(len);
1938 }
1939
1940 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1941 {
1942         struct hci_dev *hdev = conn->hdev;
1943         struct sk_buff *list;
1944
1945         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1946
1947         skb->dev = (void *) hdev;
1948         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1949         hci_add_acl_hdr(skb, conn->handle, flags);
1950
1951         list = skb_shinfo(skb)->frag_list;
1952         if (!list) {
1953                 /* Non fragmented */
1954                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1955
1956                 skb_queue_tail(&conn->data_q, skb);
1957         } else {
1958                 /* Fragmented */
1959                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1960
1961                 skb_shinfo(skb)->frag_list = NULL;
1962
1963                 /* Queue all fragments atomically */
1964                 spin_lock_bh(&conn->data_q.lock);
1965
1966                 __skb_queue_tail(&conn->data_q, skb);
1967
1968                 flags &= ~ACL_START;
1969                 flags |= ACL_CONT;
1970                 do {
1971                         skb = list; list = list->next;
1972
1973                         skb->dev = (void *) hdev;
1974                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1975                         hci_add_acl_hdr(skb, conn->handle, flags);
1976
1977                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1978
1979                         __skb_queue_tail(&conn->data_q, skb);
1980                 } while (list);
1981
1982                 spin_unlock_bh(&conn->data_q.lock);
1983         }
1984
1985         tasklet_schedule(&hdev->tx_task);
1986 }
1987 EXPORT_SYMBOL(hci_send_acl);
1988
1989 /* Send SCO data */
1990 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1991 {
1992         struct hci_dev *hdev = conn->hdev;
1993         struct hci_sco_hdr hdr;
1994
1995         BT_DBG("%s len %d", hdev->name, skb->len);
1996
1997         hdr.handle = cpu_to_le16(conn->handle);
1998         hdr.dlen   = skb->len;
1999
2000         skb_push(skb, HCI_SCO_HDR_SIZE);
2001         skb_reset_transport_header(skb);
2002         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2003
2004         skb->dev = (void *) hdev;
2005         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2006
2007         skb_queue_tail(&conn->data_q, skb);
2008         tasklet_schedule(&hdev->tx_task);
2009 }
2010 EXPORT_SYMBOL(hci_send_sco);
2011
2012 /* ---- HCI TX task (outgoing data) ---- */
2013
2014 /* HCI Connection scheduler */
2015 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2016 {
2017         struct hci_conn_hash *h = &hdev->conn_hash;
2018         struct hci_conn *conn = NULL, *c;
2019         int num = 0, min = ~0;
2020
2021         /* We don't have to lock device here. Connections are always
2022          * added and removed with TX task disabled. */
2023         list_for_each_entry(c, &h->list, list) {
2024                 if (c->type != type || skb_queue_empty(&c->data_q))
2025                         continue;
2026
2027                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2028                         continue;
2029
2030                 num++;
2031
2032                 if (c->sent < min) {
2033                         min  = c->sent;
2034                         conn = c;
2035                 }
2036
2037                 if (hci_conn_num(hdev, type) == num)
2038                         break;
2039         }
2040
2041         if (conn) {
2042                 int cnt, q;
2043
2044                 switch (conn->type) {
2045                 case ACL_LINK:
2046                         cnt = hdev->acl_cnt;
2047                         break;
2048                 case SCO_LINK:
2049                 case ESCO_LINK:
2050                         cnt = hdev->sco_cnt;
2051                         break;
2052                 case LE_LINK:
2053                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2054                         break;
2055                 default:
2056                         cnt = 0;
2057                         BT_ERR("Unknown link type");
2058                 }
2059
2060                 q = cnt / num;
2061                 *quote = q ? q : 1;
2062         } else
2063                 *quote = 0;
2064
2065         BT_DBG("conn %p quote %d", conn, *quote);
2066         return conn;
2067 }
2068
2069 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2070 {
2071         struct hci_conn_hash *h = &hdev->conn_hash;
2072         struct hci_conn *c;
2073
2074         BT_ERR("%s link tx timeout", hdev->name);
2075
2076         /* Kill stalled connections */
2077         list_for_each_entry(c, &h->list, list) {
2078                 if (c->type == type && c->sent) {
2079                         BT_ERR("%s killing stalled connection %s",
2080                                 hdev->name, batostr(&c->dst));
2081                         hci_acl_disconn(c, 0x13);
2082                 }
2083         }
2084 }
2085
2086 static inline void hci_sched_acl(struct hci_dev *hdev)
2087 {
2088         struct hci_conn *conn;
2089         struct sk_buff *skb;
2090         int quote;
2091
2092         BT_DBG("%s", hdev->name);
2093
2094         if (!hci_conn_num(hdev, ACL_LINK))
2095                 return;
2096
2097         if (!test_bit(HCI_RAW, &hdev->flags)) {
2098                 /* ACL tx timeout must be longer than maximum
2099                  * link supervision timeout (40.9 seconds) */
2100                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2101                         hci_link_tx_to(hdev, ACL_LINK);
2102         }
2103
2104         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
2105                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2106                         BT_DBG("skb %p len %d", skb, skb->len);
2107
2108                         hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2109
2110                         hci_send_frame(skb);
2111                         hdev->acl_last_tx = jiffies;
2112
2113                         hdev->acl_cnt--;
2114                         conn->sent++;
2115                 }
2116         }
2117 }
2118
2119 /* Schedule SCO */
2120 static inline void hci_sched_sco(struct hci_dev *hdev)
2121 {
2122         struct hci_conn *conn;
2123         struct sk_buff *skb;
2124         int quote;
2125
2126         BT_DBG("%s", hdev->name);
2127
2128         if (!hci_conn_num(hdev, SCO_LINK))
2129                 return;
2130
2131         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2132                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2133                         BT_DBG("skb %p len %d", skb, skb->len);
2134                         hci_send_frame(skb);
2135
2136                         conn->sent++;
2137                         if (conn->sent == ~0)
2138                                 conn->sent = 0;
2139                 }
2140         }
2141 }
2142
2143 static inline void hci_sched_esco(struct hci_dev *hdev)
2144 {
2145         struct hci_conn *conn;
2146         struct sk_buff *skb;
2147         int quote;
2148
2149         BT_DBG("%s", hdev->name);
2150
2151         if (!hci_conn_num(hdev, ESCO_LINK))
2152                 return;
2153
2154         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2155                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2156                         BT_DBG("skb %p len %d", skb, skb->len);
2157                         hci_send_frame(skb);
2158
2159                         conn->sent++;
2160                         if (conn->sent == ~0)
2161                                 conn->sent = 0;
2162                 }
2163         }
2164 }
2165
2166 static inline void hci_sched_le(struct hci_dev *hdev)
2167 {
2168         struct hci_conn *conn;
2169         struct sk_buff *skb;
2170         int quote, cnt;
2171
2172         BT_DBG("%s", hdev->name);
2173
2174         if (!hci_conn_num(hdev, LE_LINK))
2175                 return;
2176
2177         if (!test_bit(HCI_RAW, &hdev->flags)) {
2178                 /* LE tx timeout must be longer than maximum
2179                  * link supervision timeout (40.9 seconds) */
2180                 if (!hdev->le_cnt && hdev->le_pkts &&
2181                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2182                         hci_link_tx_to(hdev, LE_LINK);
2183         }
2184
2185         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2186         while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
2187                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2188                         BT_DBG("skb %p len %d", skb, skb->len);
2189
2190                         hci_send_frame(skb);
2191                         hdev->le_last_tx = jiffies;
2192
2193                         cnt--;
2194                         conn->sent++;
2195                 }
2196         }
2197         if (hdev->le_pkts)
2198                 hdev->le_cnt = cnt;
2199         else
2200                 hdev->acl_cnt = cnt;
2201 }
2202
2203 static void hci_tx_task(unsigned long arg)
2204 {
2205         struct hci_dev *hdev = (struct hci_dev *) arg;
2206         struct sk_buff *skb;
2207
2208         read_lock(&hci_task_lock);
2209
2210         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2211                 hdev->sco_cnt, hdev->le_cnt);
2212
2213         /* Schedule queues and send stuff to HCI driver */
2214
2215         hci_sched_acl(hdev);
2216
2217         hci_sched_sco(hdev);
2218
2219         hci_sched_esco(hdev);
2220
2221         hci_sched_le(hdev);
2222
2223         /* Send next queued raw (unknown type) packet */
2224         while ((skb = skb_dequeue(&hdev->raw_q)))
2225                 hci_send_frame(skb);
2226
2227         read_unlock(&hci_task_lock);
2228 }
2229
2230 /* ----- HCI RX task (incoming data processing) ----- */
2231
2232 /* ACL data packet */
2233 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2234 {
2235         struct hci_acl_hdr *hdr = (void *) skb->data;
2236         struct hci_conn *conn;
2237         __u16 handle, flags;
2238
2239         skb_pull(skb, HCI_ACL_HDR_SIZE);
2240
2241         handle = __le16_to_cpu(hdr->handle);
2242         flags  = hci_flags(handle);
2243         handle = hci_handle(handle);
2244
2245         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2246
2247         hdev->stat.acl_rx++;
2248
2249         hci_dev_lock(hdev);
2250         conn = hci_conn_hash_lookup_handle(hdev, handle);
2251         hci_dev_unlock(hdev);
2252
2253         if (conn) {
2254                 register struct hci_proto *hp;
2255
2256                 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2257
2258                 /* Send to upper protocol */
2259                 hp = hci_proto[HCI_PROTO_L2CAP];
2260                 if (hp && hp->recv_acldata) {
2261                         hp->recv_acldata(conn, skb, flags);
2262                         return;
2263                 }
2264         } else {
2265                 BT_ERR("%s ACL packet for unknown connection handle %d",
2266                         hdev->name, handle);
2267         }
2268
2269         kfree_skb(skb);
2270 }
2271
2272 /* SCO data packet */
2273 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2274 {
2275         struct hci_sco_hdr *hdr = (void *) skb->data;
2276         struct hci_conn *conn;
2277         __u16 handle;
2278
2279         skb_pull(skb, HCI_SCO_HDR_SIZE);
2280
2281         handle = __le16_to_cpu(hdr->handle);
2282
2283         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2284
2285         hdev->stat.sco_rx++;
2286
2287         hci_dev_lock(hdev);
2288         conn = hci_conn_hash_lookup_handle(hdev, handle);
2289         hci_dev_unlock(hdev);
2290
2291         if (conn) {
2292                 register struct hci_proto *hp;
2293
2294                 /* Send to upper protocol */
2295                 hp = hci_proto[HCI_PROTO_SCO];
2296                 if (hp && hp->recv_scodata) {
2297                         hp->recv_scodata(conn, skb);
2298                         return;
2299                 }
2300         } else {
2301                 BT_ERR("%s SCO packet for unknown connection handle %d",
2302                         hdev->name, handle);
2303         }
2304
2305         kfree_skb(skb);
2306 }
2307
2308 static void hci_rx_task(unsigned long arg)
2309 {
2310         struct hci_dev *hdev = (struct hci_dev *) arg;
2311         struct sk_buff *skb;
2312
2313         BT_DBG("%s", hdev->name);
2314
2315         read_lock(&hci_task_lock);
2316
2317         while ((skb = skb_dequeue(&hdev->rx_q))) {
2318                 if (atomic_read(&hdev->promisc)) {
2319                         /* Send copy to the sockets */
2320                         hci_send_to_sock(hdev, skb, NULL);
2321                 }
2322
2323                 if (test_bit(HCI_RAW, &hdev->flags)) {
2324                         kfree_skb(skb);
2325                         continue;
2326                 }
2327
2328                 if (test_bit(HCI_INIT, &hdev->flags)) {
2329                         /* Don't process data packets in this states. */
2330                         switch (bt_cb(skb)->pkt_type) {
2331                         case HCI_ACLDATA_PKT:
2332                         case HCI_SCODATA_PKT:
2333                                 kfree_skb(skb);
2334                                 continue;
2335                         }
2336                 }
2337
2338                 /* Process frame */
2339                 switch (bt_cb(skb)->pkt_type) {
2340                 case HCI_EVENT_PKT:
2341                         hci_event_packet(hdev, skb);
2342                         break;
2343
2344                 case HCI_ACLDATA_PKT:
2345                         BT_DBG("%s ACL data packet", hdev->name);
2346                         hci_acldata_packet(hdev, skb);
2347                         break;
2348
2349                 case HCI_SCODATA_PKT:
2350                         BT_DBG("%s SCO data packet", hdev->name);
2351                         hci_scodata_packet(hdev, skb);
2352                         break;
2353
2354                 default:
2355                         kfree_skb(skb);
2356                         break;
2357                 }
2358         }
2359
2360         read_unlock(&hci_task_lock);
2361 }
2362
2363 static void hci_cmd_task(unsigned long arg)
2364 {
2365         struct hci_dev *hdev = (struct hci_dev *) arg;
2366         struct sk_buff *skb;
2367
2368         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2369
2370         /* Send queued commands */
2371         if (atomic_read(&hdev->cmd_cnt)) {
2372                 skb = skb_dequeue(&hdev->cmd_q);
2373                 if (!skb)
2374                         return;
2375
2376                 kfree_skb(hdev->sent_cmd);
2377
2378                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2379                 if (hdev->sent_cmd) {
2380                         atomic_dec(&hdev->cmd_cnt);
2381                         hci_send_frame(skb);
2382                         if (test_bit(HCI_RESET, &hdev->flags))
2383                                 del_timer(&hdev->cmd_timer);
2384                         else
2385                                 mod_timer(&hdev->cmd_timer,
2386                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2387                 } else {
2388                         skb_queue_head(&hdev->cmd_q, skb);
2389                         tasklet_schedule(&hdev->cmd_task);
2390                 }
2391         }
2392 }