Bluetooth: Fix old_key_type logic for non-persistent keys
[linux-flexiantxendom0.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <net/sock.h>
46
47 #include <asm/system.h>
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
50
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53
54 #define AUTO_OFF_TIMEOUT 2000
55
56 static void hci_cmd_task(unsigned long arg);
57 static void hci_rx_task(unsigned long arg);
58 static void hci_tx_task(unsigned long arg);
59
60 static DEFINE_RWLOCK(hci_task_lock);
61
62 /* HCI device list */
63 LIST_HEAD(hci_dev_list);
64 DEFINE_RWLOCK(hci_dev_list_lock);
65
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list);
68 DEFINE_RWLOCK(hci_cb_list_lock);
69
70 /* HCI protocols */
71 #define HCI_MAX_PROTO   2
72 struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
76
77 /* ---- HCI notifications ---- */
78
79 int hci_register_notifier(struct notifier_block *nb)
80 {
81         return atomic_notifier_chain_register(&hci_notifier, nb);
82 }
83
84 int hci_unregister_notifier(struct notifier_block *nb)
85 {
86         return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 }
88
89 static void hci_notify(struct hci_dev *hdev, int event)
90 {
91         atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 }
93
94 /* ---- HCI requests ---- */
95
96 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
97 {
98         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
100         /* If this is the init phase check if the completed command matches
101          * the last init command, and if not just return.
102          */
103         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
104                 return;
105
106         if (hdev->req_status == HCI_REQ_PEND) {
107                 hdev->req_result = result;
108                 hdev->req_status = HCI_REQ_DONE;
109                 wake_up_interruptible(&hdev->req_wait_q);
110         }
111 }
112
113 static void hci_req_cancel(struct hci_dev *hdev, int err)
114 {
115         BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117         if (hdev->req_status == HCI_REQ_PEND) {
118                 hdev->req_result = err;
119                 hdev->req_status = HCI_REQ_CANCELED;
120                 wake_up_interruptible(&hdev->req_wait_q);
121         }
122 }
123
124 /* Execute request and wait for completion. */
125 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
126                                         unsigned long opt, __u32 timeout)
127 {
128         DECLARE_WAITQUEUE(wait, current);
129         int err = 0;
130
131         BT_DBG("%s start", hdev->name);
132
133         hdev->req_status = HCI_REQ_PEND;
134
135         add_wait_queue(&hdev->req_wait_q, &wait);
136         set_current_state(TASK_INTERRUPTIBLE);
137
138         req(hdev, opt);
139         schedule_timeout(timeout);
140
141         remove_wait_queue(&hdev->req_wait_q, &wait);
142
143         if (signal_pending(current))
144                 return -EINTR;
145
146         switch (hdev->req_status) {
147         case HCI_REQ_DONE:
148                 err = -bt_err(hdev->req_result);
149                 break;
150
151         case HCI_REQ_CANCELED:
152                 err = -hdev->req_result;
153                 break;
154
155         default:
156                 err = -ETIMEDOUT;
157                 break;
158         }
159
160         hdev->req_status = hdev->req_result = 0;
161
162         BT_DBG("%s end: err %d", hdev->name, err);
163
164         return err;
165 }
166
167 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168                                         unsigned long opt, __u32 timeout)
169 {
170         int ret;
171
172         if (!test_bit(HCI_UP, &hdev->flags))
173                 return -ENETDOWN;
174
175         /* Serialize all requests */
176         hci_req_lock(hdev);
177         ret = __hci_request(hdev, req, opt, timeout);
178         hci_req_unlock(hdev);
179
180         return ret;
181 }
182
183 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184 {
185         BT_DBG("%s %ld", hdev->name, opt);
186
187         /* Reset device */
188         set_bit(HCI_RESET, &hdev->flags);
189         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
190 }
191
192 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
193 {
194         struct hci_cp_delete_stored_link_key cp;
195         struct sk_buff *skb;
196         __le16 param;
197         __u8 flt_type;
198
199         BT_DBG("%s %ld", hdev->name, opt);
200
201         /* Driver initialization */
202
203         /* Special commands */
204         while ((skb = skb_dequeue(&hdev->driver_init))) {
205                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
206                 skb->dev = (void *) hdev;
207
208                 skb_queue_tail(&hdev->cmd_q, skb);
209                 tasklet_schedule(&hdev->cmd_task);
210         }
211         skb_queue_purge(&hdev->driver_init);
212
213         /* Mandatory initialization */
214
215         /* Reset */
216         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
217                         set_bit(HCI_RESET, &hdev->flags);
218                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
219         }
220
221         /* Read Local Supported Features */
222         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
223
224         /* Read Local Version */
225         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
226
227         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
228         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
229
230 #if 0
231         /* Host buffer size */
232         {
233                 struct hci_cp_host_buffer_size cp;
234                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
235                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
236                 cp.acl_max_pkt = cpu_to_le16(0xffff);
237                 cp.sco_max_pkt = cpu_to_le16(0xffff);
238                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
239         }
240 #endif
241
242         /* Read BD Address */
243         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
244
245         /* Read Class of Device */
246         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
247
248         /* Read Local Name */
249         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
250
251         /* Read Voice Setting */
252         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
253
254         /* Optional initialization */
255
256         /* Clear Event Filters */
257         flt_type = HCI_FLT_CLEAR_ALL;
258         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
259
260         /* Connection accept timeout ~20 secs */
261         param = cpu_to_le16(0x7d00);
262         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
263
264         bacpy(&cp.bdaddr, BDADDR_ANY);
265         cp.delete_all = 1;
266         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
267 }
268
269 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
270 {
271         BT_DBG("%s", hdev->name);
272
273         /* Read LE buffer size */
274         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
275 }
276
277 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
278 {
279         __u8 scan = opt;
280
281         BT_DBG("%s %x", hdev->name, scan);
282
283         /* Inquiry and Page scans */
284         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
285 }
286
287 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
288 {
289         __u8 auth = opt;
290
291         BT_DBG("%s %x", hdev->name, auth);
292
293         /* Authentication */
294         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
295 }
296
297 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
298 {
299         __u8 encrypt = opt;
300
301         BT_DBG("%s %x", hdev->name, encrypt);
302
303         /* Encryption */
304         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
305 }
306
307 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
308 {
309         __le16 policy = cpu_to_le16(opt);
310
311         BT_DBG("%s %x", hdev->name, policy);
312
313         /* Default link policy */
314         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
315 }
316
317 /* Get HCI device by index.
318  * Device is held on return. */
319 struct hci_dev *hci_dev_get(int index)
320 {
321         struct hci_dev *hdev = NULL;
322         struct list_head *p;
323
324         BT_DBG("%d", index);
325
326         if (index < 0)
327                 return NULL;
328
329         read_lock(&hci_dev_list_lock);
330         list_for_each(p, &hci_dev_list) {
331                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
332                 if (d->id == index) {
333                         hdev = hci_dev_hold(d);
334                         break;
335                 }
336         }
337         read_unlock(&hci_dev_list_lock);
338         return hdev;
339 }
340
341 /* ---- Inquiry support ---- */
342 static void inquiry_cache_flush(struct hci_dev *hdev)
343 {
344         struct inquiry_cache *cache = &hdev->inq_cache;
345         struct inquiry_entry *next  = cache->list, *e;
346
347         BT_DBG("cache %p", cache);
348
349         cache->list = NULL;
350         while ((e = next)) {
351                 next = e->next;
352                 kfree(e);
353         }
354 }
355
356 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
357 {
358         struct inquiry_cache *cache = &hdev->inq_cache;
359         struct inquiry_entry *e;
360
361         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
362
363         for (e = cache->list; e; e = e->next)
364                 if (!bacmp(&e->data.bdaddr, bdaddr))
365                         break;
366         return e;
367 }
368
369 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
370 {
371         struct inquiry_cache *cache = &hdev->inq_cache;
372         struct inquiry_entry *ie;
373
374         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
375
376         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
377         if (!ie) {
378                 /* Entry not in the cache. Add new one. */
379                 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
380                 if (!ie)
381                         return;
382
383                 ie->next = cache->list;
384                 cache->list = ie;
385         }
386
387         memcpy(&ie->data, data, sizeof(*data));
388         ie->timestamp = jiffies;
389         cache->timestamp = jiffies;
390 }
391
392 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
393 {
394         struct inquiry_cache *cache = &hdev->inq_cache;
395         struct inquiry_info *info = (struct inquiry_info *) buf;
396         struct inquiry_entry *e;
397         int copied = 0;
398
399         for (e = cache->list; e && copied < num; e = e->next, copied++) {
400                 struct inquiry_data *data = &e->data;
401                 bacpy(&info->bdaddr, &data->bdaddr);
402                 info->pscan_rep_mode    = data->pscan_rep_mode;
403                 info->pscan_period_mode = data->pscan_period_mode;
404                 info->pscan_mode        = data->pscan_mode;
405                 memcpy(info->dev_class, data->dev_class, 3);
406                 info->clock_offset      = data->clock_offset;
407                 info++;
408         }
409
410         BT_DBG("cache %p, copied %d", cache, copied);
411         return copied;
412 }
413
414 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
415 {
416         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
417         struct hci_cp_inquiry cp;
418
419         BT_DBG("%s", hdev->name);
420
421         if (test_bit(HCI_INQUIRY, &hdev->flags))
422                 return;
423
424         /* Start Inquiry */
425         memcpy(&cp.lap, &ir->lap, 3);
426         cp.length  = ir->length;
427         cp.num_rsp = ir->num_rsp;
428         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
429 }
430
431 int hci_inquiry(void __user *arg)
432 {
433         __u8 __user *ptr = arg;
434         struct hci_inquiry_req ir;
435         struct hci_dev *hdev;
436         int err = 0, do_inquiry = 0, max_rsp;
437         long timeo;
438         __u8 *buf;
439
440         if (copy_from_user(&ir, ptr, sizeof(ir)))
441                 return -EFAULT;
442
443         hdev = hci_dev_get(ir.dev_id);
444         if (!hdev)
445                 return -ENODEV;
446
447         hci_dev_lock_bh(hdev);
448         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
449                                 inquiry_cache_empty(hdev) ||
450                                 ir.flags & IREQ_CACHE_FLUSH) {
451                 inquiry_cache_flush(hdev);
452                 do_inquiry = 1;
453         }
454         hci_dev_unlock_bh(hdev);
455
456         timeo = ir.length * msecs_to_jiffies(2000);
457
458         if (do_inquiry) {
459                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
460                 if (err < 0)
461                         goto done;
462         }
463
464         /* for unlimited number of responses we will use buffer with 255 entries */
465         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
466
467         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
468          * copy it to the user space.
469          */
470         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
471         if (!buf) {
472                 err = -ENOMEM;
473                 goto done;
474         }
475
476         hci_dev_lock_bh(hdev);
477         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
478         hci_dev_unlock_bh(hdev);
479
480         BT_DBG("num_rsp %d", ir.num_rsp);
481
482         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
483                 ptr += sizeof(ir);
484                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
485                                         ir.num_rsp))
486                         err = -EFAULT;
487         } else
488                 err = -EFAULT;
489
490         kfree(buf);
491
492 done:
493         hci_dev_put(hdev);
494         return err;
495 }
496
497 /* ---- HCI ioctl helpers ---- */
498
499 int hci_dev_open(__u16 dev)
500 {
501         struct hci_dev *hdev;
502         int ret = 0;
503
504         hdev = hci_dev_get(dev);
505         if (!hdev)
506                 return -ENODEV;
507
508         BT_DBG("%s %p", hdev->name, hdev);
509
510         hci_req_lock(hdev);
511
512         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
513                 ret = -ERFKILL;
514                 goto done;
515         }
516
517         if (test_bit(HCI_UP, &hdev->flags)) {
518                 ret = -EALREADY;
519                 goto done;
520         }
521
522         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
523                 set_bit(HCI_RAW, &hdev->flags);
524
525         /* Treat all non BR/EDR controllers as raw devices for now */
526         if (hdev->dev_type != HCI_BREDR)
527                 set_bit(HCI_RAW, &hdev->flags);
528
529         if (hdev->open(hdev)) {
530                 ret = -EIO;
531                 goto done;
532         }
533
534         if (!test_bit(HCI_RAW, &hdev->flags)) {
535                 atomic_set(&hdev->cmd_cnt, 1);
536                 set_bit(HCI_INIT, &hdev->flags);
537                 hdev->init_last_cmd = 0;
538
539                 ret = __hci_request(hdev, hci_init_req, 0,
540                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
541
542                 if (lmp_le_capable(hdev))
543                         ret = __hci_request(hdev, hci_le_init_req, 0,
544                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
545
546                 clear_bit(HCI_INIT, &hdev->flags);
547         }
548
549         if (!ret) {
550                 hci_dev_hold(hdev);
551                 set_bit(HCI_UP, &hdev->flags);
552                 hci_notify(hdev, HCI_DEV_UP);
553                 if (!test_bit(HCI_SETUP, &hdev->flags))
554                         mgmt_powered(hdev->id, 1);
555         } else {
556                 /* Init failed, cleanup */
557                 tasklet_kill(&hdev->rx_task);
558                 tasklet_kill(&hdev->tx_task);
559                 tasklet_kill(&hdev->cmd_task);
560
561                 skb_queue_purge(&hdev->cmd_q);
562                 skb_queue_purge(&hdev->rx_q);
563
564                 if (hdev->flush)
565                         hdev->flush(hdev);
566
567                 if (hdev->sent_cmd) {
568                         kfree_skb(hdev->sent_cmd);
569                         hdev->sent_cmd = NULL;
570                 }
571
572                 hdev->close(hdev);
573                 hdev->flags = 0;
574         }
575
576 done:
577         hci_req_unlock(hdev);
578         hci_dev_put(hdev);
579         return ret;
580 }
581
582 static int hci_dev_do_close(struct hci_dev *hdev)
583 {
584         BT_DBG("%s %p", hdev->name, hdev);
585
586         hci_req_cancel(hdev, ENODEV);
587         hci_req_lock(hdev);
588
589         /* Stop timer, it might be running */
590         del_timer_sync(&hdev->cmd_timer);
591
592         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
593                 del_timer_sync(&hdev->cmd_timer);
594                 hci_req_unlock(hdev);
595                 return 0;
596         }
597
598         /* Kill RX and TX tasks */
599         tasklet_kill(&hdev->rx_task);
600         tasklet_kill(&hdev->tx_task);
601
602         hci_dev_lock_bh(hdev);
603         inquiry_cache_flush(hdev);
604         hci_conn_hash_flush(hdev);
605         hci_dev_unlock_bh(hdev);
606
607         hci_notify(hdev, HCI_DEV_DOWN);
608
609         if (hdev->flush)
610                 hdev->flush(hdev);
611
612         /* Reset device */
613         skb_queue_purge(&hdev->cmd_q);
614         atomic_set(&hdev->cmd_cnt, 1);
615         if (!test_bit(HCI_RAW, &hdev->flags)) {
616                 set_bit(HCI_INIT, &hdev->flags);
617                 __hci_request(hdev, hci_reset_req, 0,
618                                         msecs_to_jiffies(250));
619                 clear_bit(HCI_INIT, &hdev->flags);
620         }
621
622         /* Kill cmd task */
623         tasklet_kill(&hdev->cmd_task);
624
625         /* Drop queues */
626         skb_queue_purge(&hdev->rx_q);
627         skb_queue_purge(&hdev->cmd_q);
628         skb_queue_purge(&hdev->raw_q);
629
630         /* Drop last sent command */
631         if (hdev->sent_cmd) {
632                 kfree_skb(hdev->sent_cmd);
633                 hdev->sent_cmd = NULL;
634         }
635
636         /* After this point our queues are empty
637          * and no tasks are scheduled. */
638         hdev->close(hdev);
639
640         mgmt_powered(hdev->id, 0);
641
642         /* Clear flags */
643         hdev->flags = 0;
644
645         hci_req_unlock(hdev);
646
647         hci_dev_put(hdev);
648         return 0;
649 }
650
651 int hci_dev_close(__u16 dev)
652 {
653         struct hci_dev *hdev;
654         int err;
655
656         hdev = hci_dev_get(dev);
657         if (!hdev)
658                 return -ENODEV;
659         err = hci_dev_do_close(hdev);
660         hci_dev_put(hdev);
661         return err;
662 }
663
664 int hci_dev_reset(__u16 dev)
665 {
666         struct hci_dev *hdev;
667         int ret = 0;
668
669         hdev = hci_dev_get(dev);
670         if (!hdev)
671                 return -ENODEV;
672
673         hci_req_lock(hdev);
674         tasklet_disable(&hdev->tx_task);
675
676         if (!test_bit(HCI_UP, &hdev->flags))
677                 goto done;
678
679         /* Drop queues */
680         skb_queue_purge(&hdev->rx_q);
681         skb_queue_purge(&hdev->cmd_q);
682
683         hci_dev_lock_bh(hdev);
684         inquiry_cache_flush(hdev);
685         hci_conn_hash_flush(hdev);
686         hci_dev_unlock_bh(hdev);
687
688         if (hdev->flush)
689                 hdev->flush(hdev);
690
691         atomic_set(&hdev->cmd_cnt, 1);
692         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
693
694         if (!test_bit(HCI_RAW, &hdev->flags))
695                 ret = __hci_request(hdev, hci_reset_req, 0,
696                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
697
698 done:
699         tasklet_enable(&hdev->tx_task);
700         hci_req_unlock(hdev);
701         hci_dev_put(hdev);
702         return ret;
703 }
704
705 int hci_dev_reset_stat(__u16 dev)
706 {
707         struct hci_dev *hdev;
708         int ret = 0;
709
710         hdev = hci_dev_get(dev);
711         if (!hdev)
712                 return -ENODEV;
713
714         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
715
716         hci_dev_put(hdev);
717
718         return ret;
719 }
720
721 int hci_dev_cmd(unsigned int cmd, void __user *arg)
722 {
723         struct hci_dev *hdev;
724         struct hci_dev_req dr;
725         int err = 0;
726
727         if (copy_from_user(&dr, arg, sizeof(dr)))
728                 return -EFAULT;
729
730         hdev = hci_dev_get(dr.dev_id);
731         if (!hdev)
732                 return -ENODEV;
733
734         switch (cmd) {
735         case HCISETAUTH:
736                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
737                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
738                 break;
739
740         case HCISETENCRYPT:
741                 if (!lmp_encrypt_capable(hdev)) {
742                         err = -EOPNOTSUPP;
743                         break;
744                 }
745
746                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
747                         /* Auth must be enabled first */
748                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
749                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
750                         if (err)
751                                 break;
752                 }
753
754                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
755                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
756                 break;
757
758         case HCISETSCAN:
759                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
760                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
761                 break;
762
763         case HCISETLINKPOL:
764                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
765                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
766                 break;
767
768         case HCISETLINKMODE:
769                 hdev->link_mode = ((__u16) dr.dev_opt) &
770                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
771                 break;
772
773         case HCISETPTYPE:
774                 hdev->pkt_type = (__u16) dr.dev_opt;
775                 break;
776
777         case HCISETACLMTU:
778                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
779                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
780                 break;
781
782         case HCISETSCOMTU:
783                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
784                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
785                 break;
786
787         default:
788                 err = -EINVAL;
789                 break;
790         }
791
792         hci_dev_put(hdev);
793         return err;
794 }
795
796 int hci_get_dev_list(void __user *arg)
797 {
798         struct hci_dev_list_req *dl;
799         struct hci_dev_req *dr;
800         struct list_head *p;
801         int n = 0, size, err;
802         __u16 dev_num;
803
804         if (get_user(dev_num, (__u16 __user *) arg))
805                 return -EFAULT;
806
807         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
808                 return -EINVAL;
809
810         size = sizeof(*dl) + dev_num * sizeof(*dr);
811
812         dl = kzalloc(size, GFP_KERNEL);
813         if (!dl)
814                 return -ENOMEM;
815
816         dr = dl->dev_req;
817
818         read_lock_bh(&hci_dev_list_lock);
819         list_for_each(p, &hci_dev_list) {
820                 struct hci_dev *hdev;
821
822                 hdev = list_entry(p, struct hci_dev, list);
823
824                 hci_del_off_timer(hdev);
825
826                 if (!test_bit(HCI_MGMT, &hdev->flags))
827                         set_bit(HCI_PAIRABLE, &hdev->flags);
828
829                 (dr + n)->dev_id  = hdev->id;
830                 (dr + n)->dev_opt = hdev->flags;
831
832                 if (++n >= dev_num)
833                         break;
834         }
835         read_unlock_bh(&hci_dev_list_lock);
836
837         dl->dev_num = n;
838         size = sizeof(*dl) + n * sizeof(*dr);
839
840         err = copy_to_user(arg, dl, size);
841         kfree(dl);
842
843         return err ? -EFAULT : 0;
844 }
845
846 int hci_get_dev_info(void __user *arg)
847 {
848         struct hci_dev *hdev;
849         struct hci_dev_info di;
850         int err = 0;
851
852         if (copy_from_user(&di, arg, sizeof(di)))
853                 return -EFAULT;
854
855         hdev = hci_dev_get(di.dev_id);
856         if (!hdev)
857                 return -ENODEV;
858
859         hci_del_off_timer(hdev);
860
861         if (!test_bit(HCI_MGMT, &hdev->flags))
862                 set_bit(HCI_PAIRABLE, &hdev->flags);
863
864         strcpy(di.name, hdev->name);
865         di.bdaddr   = hdev->bdaddr;
866         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
867         di.flags    = hdev->flags;
868         di.pkt_type = hdev->pkt_type;
869         di.acl_mtu  = hdev->acl_mtu;
870         di.acl_pkts = hdev->acl_pkts;
871         di.sco_mtu  = hdev->sco_mtu;
872         di.sco_pkts = hdev->sco_pkts;
873         di.link_policy = hdev->link_policy;
874         di.link_mode   = hdev->link_mode;
875
876         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
877         memcpy(&di.features, &hdev->features, sizeof(di.features));
878
879         if (copy_to_user(arg, &di, sizeof(di)))
880                 err = -EFAULT;
881
882         hci_dev_put(hdev);
883
884         return err;
885 }
886
887 /* ---- Interface to HCI drivers ---- */
888
889 static int hci_rfkill_set_block(void *data, bool blocked)
890 {
891         struct hci_dev *hdev = data;
892
893         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
894
895         if (!blocked)
896                 return 0;
897
898         hci_dev_do_close(hdev);
899
900         return 0;
901 }
902
903 static const struct rfkill_ops hci_rfkill_ops = {
904         .set_block = hci_rfkill_set_block,
905 };
906
907 /* Alloc HCI device */
908 struct hci_dev *hci_alloc_dev(void)
909 {
910         struct hci_dev *hdev;
911
912         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
913         if (!hdev)
914                 return NULL;
915
916         skb_queue_head_init(&hdev->driver_init);
917
918         return hdev;
919 }
920 EXPORT_SYMBOL(hci_alloc_dev);
921
922 /* Free HCI device */
923 void hci_free_dev(struct hci_dev *hdev)
924 {
925         skb_queue_purge(&hdev->driver_init);
926
927         /* will free via device release */
928         put_device(&hdev->dev);
929 }
930 EXPORT_SYMBOL(hci_free_dev);
931
932 static void hci_power_on(struct work_struct *work)
933 {
934         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
935
936         BT_DBG("%s", hdev->name);
937
938         if (hci_dev_open(hdev->id) < 0)
939                 return;
940
941         if (test_bit(HCI_AUTO_OFF, &hdev->flags))
942                 mod_timer(&hdev->off_timer,
943                                 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
944
945         if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
946                 mgmt_index_added(hdev->id);
947 }
948
949 static void hci_power_off(struct work_struct *work)
950 {
951         struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
952
953         BT_DBG("%s", hdev->name);
954
955         hci_dev_close(hdev->id);
956 }
957
958 static void hci_auto_off(unsigned long data)
959 {
960         struct hci_dev *hdev = (struct hci_dev *) data;
961
962         BT_DBG("%s", hdev->name);
963
964         clear_bit(HCI_AUTO_OFF, &hdev->flags);
965
966         queue_work(hdev->workqueue, &hdev->power_off);
967 }
968
969 void hci_del_off_timer(struct hci_dev *hdev)
970 {
971         BT_DBG("%s", hdev->name);
972
973         clear_bit(HCI_AUTO_OFF, &hdev->flags);
974         del_timer(&hdev->off_timer);
975 }
976
977 int hci_uuids_clear(struct hci_dev *hdev)
978 {
979         struct list_head *p, *n;
980
981         list_for_each_safe(p, n, &hdev->uuids) {
982                 struct bt_uuid *uuid;
983
984                 uuid = list_entry(p, struct bt_uuid, list);
985
986                 list_del(p);
987                 kfree(uuid);
988         }
989
990         return 0;
991 }
992
993 int hci_link_keys_clear(struct hci_dev *hdev)
994 {
995         struct list_head *p, *n;
996
997         list_for_each_safe(p, n, &hdev->link_keys) {
998                 struct link_key *key;
999
1000                 key = list_entry(p, struct link_key, list);
1001
1002                 list_del(p);
1003                 kfree(key);
1004         }
1005
1006         return 0;
1007 }
1008
1009 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1010 {
1011         struct list_head *p;
1012
1013         list_for_each(p, &hdev->link_keys) {
1014                 struct link_key *k;
1015
1016                 k = list_entry(p, struct link_key, list);
1017
1018                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1019                         return k;
1020         }
1021
1022         return NULL;
1023 }
1024
1025 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1026                                                 u8 key_type, u8 old_key_type)
1027 {
1028         /* Legacy key */
1029         if (key_type < 0x03)
1030                 return 1;
1031
1032         /* Debug keys are insecure so don't store them persistently */
1033         if (key_type == HCI_LK_DEBUG_COMBINATION)
1034                 return 0;
1035
1036         /* Changed combination key and there's no previous one */
1037         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1038                 return 0;
1039
1040         /* Security mode 3 case */
1041         if (!conn)
1042                 return 1;
1043
1044         /* Neither local nor remote side had no-bonding as requirement */
1045         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1046                 return 1;
1047
1048         /* Local side had dedicated bonding as requirement */
1049         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1050                 return 1;
1051
1052         /* Remote side had dedicated bonding as requirement */
1053         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1054                 return 1;
1055
1056         /* If none of the above criteria match, then don't store the key
1057          * persistently */
1058         return 0;
1059 }
1060
1061 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1062                                 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1063 {
1064         struct link_key *key, *old_key;
1065         u8 old_key_type;
1066
1067         old_key = hci_find_link_key(hdev, bdaddr);
1068         if (old_key) {
1069                 old_key_type = old_key->type;
1070                 key = old_key;
1071         } else {
1072                 old_key_type = conn ? conn->key_type : 0xff;
1073                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1074                 if (!key)
1075                         return -ENOMEM;
1076                 list_add(&key->list, &hdev->link_keys);
1077         }
1078
1079         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1080
1081         /* Some buggy controller combinations generate a changed
1082          * combination key for legacy pairing even when there's no
1083          * previous key */
1084         if (type == HCI_LK_CHANGED_COMBINATION &&
1085                                         (!conn || conn->remote_auth == 0xff) &&
1086                                         old_key_type == 0xff)
1087                 type = HCI_LK_COMBINATION;
1088
1089         if (new_key && !hci_persistent_key(hdev, conn, type, old_key_type)) {
1090                 list_del(&key->list);
1091                 kfree(key);
1092                 return 0;
1093         }
1094
1095         bacpy(&key->bdaddr, bdaddr);
1096         memcpy(key->val, val, 16);
1097         key->type = type;
1098         key->pin_len = pin_len;
1099
1100         if (new_key)
1101                 mgmt_new_key(hdev->id, key, old_key_type);
1102
1103         if (type == HCI_LK_CHANGED_COMBINATION)
1104                 key->type = old_key_type;
1105
1106         return 0;
1107 }
1108
1109 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1110 {
1111         struct link_key *key;
1112
1113         key = hci_find_link_key(hdev, bdaddr);
1114         if (!key)
1115                 return -ENOENT;
1116
1117         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1118
1119         list_del(&key->list);
1120         kfree(key);
1121
1122         return 0;
1123 }
1124
1125 /* HCI command timer function */
1126 static void hci_cmd_timer(unsigned long arg)
1127 {
1128         struct hci_dev *hdev = (void *) arg;
1129
1130         BT_ERR("%s command tx timeout", hdev->name);
1131         atomic_set(&hdev->cmd_cnt, 1);
1132         clear_bit(HCI_RESET, &hdev->flags);
1133         tasklet_schedule(&hdev->cmd_task);
1134 }
1135
1136 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1137                                                         bdaddr_t *bdaddr)
1138 {
1139         struct oob_data *data;
1140
1141         list_for_each_entry(data, &hdev->remote_oob_data, list)
1142                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1143                         return data;
1144
1145         return NULL;
1146 }
1147
1148 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1149 {
1150         struct oob_data *data;
1151
1152         data = hci_find_remote_oob_data(hdev, bdaddr);
1153         if (!data)
1154                 return -ENOENT;
1155
1156         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1157
1158         list_del(&data->list);
1159         kfree(data);
1160
1161         return 0;
1162 }
1163
1164 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1165 {
1166         struct oob_data *data, *n;
1167
1168         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1169                 list_del(&data->list);
1170                 kfree(data);
1171         }
1172
1173         return 0;
1174 }
1175
1176 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1177                                                                 u8 *randomizer)
1178 {
1179         struct oob_data *data;
1180
1181         data = hci_find_remote_oob_data(hdev, bdaddr);
1182
1183         if (!data) {
1184                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1185                 if (!data)
1186                         return -ENOMEM;
1187
1188                 bacpy(&data->bdaddr, bdaddr);
1189                 list_add(&data->list, &hdev->remote_oob_data);
1190         }
1191
1192         memcpy(data->hash, hash, sizeof(data->hash));
1193         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1194
1195         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1196
1197         return 0;
1198 }
1199
1200 /* Register HCI device */
1201 int hci_register_dev(struct hci_dev *hdev)
1202 {
1203         struct list_head *head = &hci_dev_list, *p;
1204         int i, id = 0;
1205
1206         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1207                                                 hdev->bus, hdev->owner);
1208
1209         if (!hdev->open || !hdev->close || !hdev->destruct)
1210                 return -EINVAL;
1211
1212         write_lock_bh(&hci_dev_list_lock);
1213
1214         /* Find first available device id */
1215         list_for_each(p, &hci_dev_list) {
1216                 if (list_entry(p, struct hci_dev, list)->id != id)
1217                         break;
1218                 head = p; id++;
1219         }
1220
1221         sprintf(hdev->name, "hci%d", id);
1222         hdev->id = id;
1223         list_add(&hdev->list, head);
1224
1225         atomic_set(&hdev->refcnt, 1);
1226         spin_lock_init(&hdev->lock);
1227
1228         hdev->flags = 0;
1229         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1230         hdev->esco_type = (ESCO_HV1);
1231         hdev->link_mode = (HCI_LM_ACCEPT);
1232         hdev->io_capability = 0x03; /* No Input No Output */
1233
1234         hdev->idle_timeout = 0;
1235         hdev->sniff_max_interval = 800;
1236         hdev->sniff_min_interval = 80;
1237
1238         tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1239         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1240         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1241
1242         skb_queue_head_init(&hdev->rx_q);
1243         skb_queue_head_init(&hdev->cmd_q);
1244         skb_queue_head_init(&hdev->raw_q);
1245
1246         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1247
1248         for (i = 0; i < NUM_REASSEMBLY; i++)
1249                 hdev->reassembly[i] = NULL;
1250
1251         init_waitqueue_head(&hdev->req_wait_q);
1252         mutex_init(&hdev->req_lock);
1253
1254         inquiry_cache_init(hdev);
1255
1256         hci_conn_hash_init(hdev);
1257
1258         INIT_LIST_HEAD(&hdev->blacklist);
1259
1260         INIT_LIST_HEAD(&hdev->uuids);
1261
1262         INIT_LIST_HEAD(&hdev->link_keys);
1263
1264         INIT_LIST_HEAD(&hdev->remote_oob_data);
1265
1266         INIT_WORK(&hdev->power_on, hci_power_on);
1267         INIT_WORK(&hdev->power_off, hci_power_off);
1268         setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1269
1270         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1271
1272         atomic_set(&hdev->promisc, 0);
1273
1274         write_unlock_bh(&hci_dev_list_lock);
1275
1276         hdev->workqueue = create_singlethread_workqueue(hdev->name);
1277         if (!hdev->workqueue)
1278                 goto nomem;
1279
1280         hci_register_sysfs(hdev);
1281
1282         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1283                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1284         if (hdev->rfkill) {
1285                 if (rfkill_register(hdev->rfkill) < 0) {
1286                         rfkill_destroy(hdev->rfkill);
1287                         hdev->rfkill = NULL;
1288                 }
1289         }
1290
1291         set_bit(HCI_AUTO_OFF, &hdev->flags);
1292         set_bit(HCI_SETUP, &hdev->flags);
1293         queue_work(hdev->workqueue, &hdev->power_on);
1294
1295         hci_notify(hdev, HCI_DEV_REG);
1296
1297         return id;
1298
1299 nomem:
1300         write_lock_bh(&hci_dev_list_lock);
1301         list_del(&hdev->list);
1302         write_unlock_bh(&hci_dev_list_lock);
1303
1304         return -ENOMEM;
1305 }
1306 EXPORT_SYMBOL(hci_register_dev);
1307
1308 /* Unregister HCI device */
1309 int hci_unregister_dev(struct hci_dev *hdev)
1310 {
1311         int i;
1312
1313         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1314
1315         write_lock_bh(&hci_dev_list_lock);
1316         list_del(&hdev->list);
1317         write_unlock_bh(&hci_dev_list_lock);
1318
1319         hci_dev_do_close(hdev);
1320
1321         for (i = 0; i < NUM_REASSEMBLY; i++)
1322                 kfree_skb(hdev->reassembly[i]);
1323
1324         if (!test_bit(HCI_INIT, &hdev->flags) &&
1325                                         !test_bit(HCI_SETUP, &hdev->flags))
1326                 mgmt_index_removed(hdev->id);
1327
1328         hci_notify(hdev, HCI_DEV_UNREG);
1329
1330         if (hdev->rfkill) {
1331                 rfkill_unregister(hdev->rfkill);
1332                 rfkill_destroy(hdev->rfkill);
1333         }
1334
1335         hci_unregister_sysfs(hdev);
1336
1337         hci_del_off_timer(hdev);
1338
1339         destroy_workqueue(hdev->workqueue);
1340
1341         hci_dev_lock_bh(hdev);
1342         hci_blacklist_clear(hdev);
1343         hci_uuids_clear(hdev);
1344         hci_link_keys_clear(hdev);
1345         hci_remote_oob_data_clear(hdev);
1346         hci_dev_unlock_bh(hdev);
1347
1348         __hci_dev_put(hdev);
1349
1350         return 0;
1351 }
1352 EXPORT_SYMBOL(hci_unregister_dev);
1353
1354 /* Suspend HCI device */
1355 int hci_suspend_dev(struct hci_dev *hdev)
1356 {
1357         hci_notify(hdev, HCI_DEV_SUSPEND);
1358         return 0;
1359 }
1360 EXPORT_SYMBOL(hci_suspend_dev);
1361
1362 /* Resume HCI device */
1363 int hci_resume_dev(struct hci_dev *hdev)
1364 {
1365         hci_notify(hdev, HCI_DEV_RESUME);
1366         return 0;
1367 }
1368 EXPORT_SYMBOL(hci_resume_dev);
1369
1370 /* Receive frame from HCI drivers */
1371 int hci_recv_frame(struct sk_buff *skb)
1372 {
1373         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1374         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1375                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1376                 kfree_skb(skb);
1377                 return -ENXIO;
1378         }
1379
1380         /* Incomming skb */
1381         bt_cb(skb)->incoming = 1;
1382
1383         /* Time stamp */
1384         __net_timestamp(skb);
1385
1386         /* Queue frame for rx task */
1387         skb_queue_tail(&hdev->rx_q, skb);
1388         tasklet_schedule(&hdev->rx_task);
1389
1390         return 0;
1391 }
1392 EXPORT_SYMBOL(hci_recv_frame);
1393
1394 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1395                                                   int count, __u8 index)
1396 {
1397         int len = 0;
1398         int hlen = 0;
1399         int remain = count;
1400         struct sk_buff *skb;
1401         struct bt_skb_cb *scb;
1402
1403         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1404                                 index >= NUM_REASSEMBLY)
1405                 return -EILSEQ;
1406
1407         skb = hdev->reassembly[index];
1408
1409         if (!skb) {
1410                 switch (type) {
1411                 case HCI_ACLDATA_PKT:
1412                         len = HCI_MAX_FRAME_SIZE;
1413                         hlen = HCI_ACL_HDR_SIZE;
1414                         break;
1415                 case HCI_EVENT_PKT:
1416                         len = HCI_MAX_EVENT_SIZE;
1417                         hlen = HCI_EVENT_HDR_SIZE;
1418                         break;
1419                 case HCI_SCODATA_PKT:
1420                         len = HCI_MAX_SCO_SIZE;
1421                         hlen = HCI_SCO_HDR_SIZE;
1422                         break;
1423                 }
1424
1425                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1426                 if (!skb)
1427                         return -ENOMEM;
1428
1429                 scb = (void *) skb->cb;
1430                 scb->expect = hlen;
1431                 scb->pkt_type = type;
1432
1433                 skb->dev = (void *) hdev;
1434                 hdev->reassembly[index] = skb;
1435         }
1436
1437         while (count) {
1438                 scb = (void *) skb->cb;
1439                 len = min(scb->expect, (__u16)count);
1440
1441                 memcpy(skb_put(skb, len), data, len);
1442
1443                 count -= len;
1444                 data += len;
1445                 scb->expect -= len;
1446                 remain = count;
1447
1448                 switch (type) {
1449                 case HCI_EVENT_PKT:
1450                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1451                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1452                                 scb->expect = h->plen;
1453
1454                                 if (skb_tailroom(skb) < scb->expect) {
1455                                         kfree_skb(skb);
1456                                         hdev->reassembly[index] = NULL;
1457                                         return -ENOMEM;
1458                                 }
1459                         }
1460                         break;
1461
1462                 case HCI_ACLDATA_PKT:
1463                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1464                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1465                                 scb->expect = __le16_to_cpu(h->dlen);
1466
1467                                 if (skb_tailroom(skb) < scb->expect) {
1468                                         kfree_skb(skb);
1469                                         hdev->reassembly[index] = NULL;
1470                                         return -ENOMEM;
1471                                 }
1472                         }
1473                         break;
1474
1475                 case HCI_SCODATA_PKT:
1476                         if (skb->len == HCI_SCO_HDR_SIZE) {
1477                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1478                                 scb->expect = h->dlen;
1479
1480                                 if (skb_tailroom(skb) < scb->expect) {
1481                                         kfree_skb(skb);
1482                                         hdev->reassembly[index] = NULL;
1483                                         return -ENOMEM;
1484                                 }
1485                         }
1486                         break;
1487                 }
1488
1489                 if (scb->expect == 0) {
1490                         /* Complete frame */
1491
1492                         bt_cb(skb)->pkt_type = type;
1493                         hci_recv_frame(skb);
1494
1495                         hdev->reassembly[index] = NULL;
1496                         return remain;
1497                 }
1498         }
1499
1500         return remain;
1501 }
1502
1503 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1504 {
1505         int rem = 0;
1506
1507         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1508                 return -EILSEQ;
1509
1510         while (count) {
1511                 rem = hci_reassembly(hdev, type, data, count, type - 1);
1512                 if (rem < 0)
1513                         return rem;
1514
1515                 data += (count - rem);
1516                 count = rem;
1517         };
1518
1519         return rem;
1520 }
1521 EXPORT_SYMBOL(hci_recv_fragment);
1522
1523 #define STREAM_REASSEMBLY 0
1524
1525 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1526 {
1527         int type;
1528         int rem = 0;
1529
1530         while (count) {
1531                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1532
1533                 if (!skb) {
1534                         struct { char type; } *pkt;
1535
1536                         /* Start of the frame */
1537                         pkt = data;
1538                         type = pkt->type;
1539
1540                         data++;
1541                         count--;
1542                 } else
1543                         type = bt_cb(skb)->pkt_type;
1544
1545                 rem = hci_reassembly(hdev, type, data, count,
1546                                                         STREAM_REASSEMBLY);
1547                 if (rem < 0)
1548                         return rem;
1549
1550                 data += (count - rem);
1551                 count = rem;
1552         };
1553
1554         return rem;
1555 }
1556 EXPORT_SYMBOL(hci_recv_stream_fragment);
1557
1558 /* ---- Interface to upper protocols ---- */
1559
1560 /* Register/Unregister protocols.
1561  * hci_task_lock is used to ensure that no tasks are running. */
1562 int hci_register_proto(struct hci_proto *hp)
1563 {
1564         int err = 0;
1565
1566         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1567
1568         if (hp->id >= HCI_MAX_PROTO)
1569                 return -EINVAL;
1570
1571         write_lock_bh(&hci_task_lock);
1572
1573         if (!hci_proto[hp->id])
1574                 hci_proto[hp->id] = hp;
1575         else
1576                 err = -EEXIST;
1577
1578         write_unlock_bh(&hci_task_lock);
1579
1580         return err;
1581 }
1582 EXPORT_SYMBOL(hci_register_proto);
1583
1584 int hci_unregister_proto(struct hci_proto *hp)
1585 {
1586         int err = 0;
1587
1588         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1589
1590         if (hp->id >= HCI_MAX_PROTO)
1591                 return -EINVAL;
1592
1593         write_lock_bh(&hci_task_lock);
1594
1595         if (hci_proto[hp->id])
1596                 hci_proto[hp->id] = NULL;
1597         else
1598                 err = -ENOENT;
1599
1600         write_unlock_bh(&hci_task_lock);
1601
1602         return err;
1603 }
1604 EXPORT_SYMBOL(hci_unregister_proto);
1605
1606 int hci_register_cb(struct hci_cb *cb)
1607 {
1608         BT_DBG("%p name %s", cb, cb->name);
1609
1610         write_lock_bh(&hci_cb_list_lock);
1611         list_add(&cb->list, &hci_cb_list);
1612         write_unlock_bh(&hci_cb_list_lock);
1613
1614         return 0;
1615 }
1616 EXPORT_SYMBOL(hci_register_cb);
1617
1618 int hci_unregister_cb(struct hci_cb *cb)
1619 {
1620         BT_DBG("%p name %s", cb, cb->name);
1621
1622         write_lock_bh(&hci_cb_list_lock);
1623         list_del(&cb->list);
1624         write_unlock_bh(&hci_cb_list_lock);
1625
1626         return 0;
1627 }
1628 EXPORT_SYMBOL(hci_unregister_cb);
1629
1630 static int hci_send_frame(struct sk_buff *skb)
1631 {
1632         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1633
1634         if (!hdev) {
1635                 kfree_skb(skb);
1636                 return -ENODEV;
1637         }
1638
1639         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1640
1641         if (atomic_read(&hdev->promisc)) {
1642                 /* Time stamp */
1643                 __net_timestamp(skb);
1644
1645                 hci_send_to_sock(hdev, skb, NULL);
1646         }
1647
1648         /* Get rid of skb owner, prior to sending to the driver. */
1649         skb_orphan(skb);
1650
1651         return hdev->send(skb);
1652 }
1653
1654 /* Send HCI command */
1655 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1656 {
1657         int len = HCI_COMMAND_HDR_SIZE + plen;
1658         struct hci_command_hdr *hdr;
1659         struct sk_buff *skb;
1660
1661         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1662
1663         skb = bt_skb_alloc(len, GFP_ATOMIC);
1664         if (!skb) {
1665                 BT_ERR("%s no memory for command", hdev->name);
1666                 return -ENOMEM;
1667         }
1668
1669         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1670         hdr->opcode = cpu_to_le16(opcode);
1671         hdr->plen   = plen;
1672
1673         if (plen)
1674                 memcpy(skb_put(skb, plen), param, plen);
1675
1676         BT_DBG("skb len %d", skb->len);
1677
1678         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1679         skb->dev = (void *) hdev;
1680
1681         if (test_bit(HCI_INIT, &hdev->flags))
1682                 hdev->init_last_cmd = opcode;
1683
1684         skb_queue_tail(&hdev->cmd_q, skb);
1685         tasklet_schedule(&hdev->cmd_task);
1686
1687         return 0;
1688 }
1689
1690 /* Get data from the previously sent command */
1691 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1692 {
1693         struct hci_command_hdr *hdr;
1694
1695         if (!hdev->sent_cmd)
1696                 return NULL;
1697
1698         hdr = (void *) hdev->sent_cmd->data;
1699
1700         if (hdr->opcode != cpu_to_le16(opcode))
1701                 return NULL;
1702
1703         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1704
1705         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1706 }
1707
1708 /* Send ACL data */
1709 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1710 {
1711         struct hci_acl_hdr *hdr;
1712         int len = skb->len;
1713
1714         skb_push(skb, HCI_ACL_HDR_SIZE);
1715         skb_reset_transport_header(skb);
1716         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1717         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1718         hdr->dlen   = cpu_to_le16(len);
1719 }
1720
1721 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1722 {
1723         struct hci_dev *hdev = conn->hdev;
1724         struct sk_buff *list;
1725
1726         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1727
1728         skb->dev = (void *) hdev;
1729         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1730         hci_add_acl_hdr(skb, conn->handle, flags);
1731
1732         list = skb_shinfo(skb)->frag_list;
1733         if (!list) {
1734                 /* Non fragmented */
1735                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1736
1737                 skb_queue_tail(&conn->data_q, skb);
1738         } else {
1739                 /* Fragmented */
1740                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1741
1742                 skb_shinfo(skb)->frag_list = NULL;
1743
1744                 /* Queue all fragments atomically */
1745                 spin_lock_bh(&conn->data_q.lock);
1746
1747                 __skb_queue_tail(&conn->data_q, skb);
1748
1749                 flags &= ~ACL_START;
1750                 flags |= ACL_CONT;
1751                 do {
1752                         skb = list; list = list->next;
1753
1754                         skb->dev = (void *) hdev;
1755                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1756                         hci_add_acl_hdr(skb, conn->handle, flags);
1757
1758                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1759
1760                         __skb_queue_tail(&conn->data_q, skb);
1761                 } while (list);
1762
1763                 spin_unlock_bh(&conn->data_q.lock);
1764         }
1765
1766         tasklet_schedule(&hdev->tx_task);
1767 }
1768 EXPORT_SYMBOL(hci_send_acl);
1769
1770 /* Send SCO data */
1771 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1772 {
1773         struct hci_dev *hdev = conn->hdev;
1774         struct hci_sco_hdr hdr;
1775
1776         BT_DBG("%s len %d", hdev->name, skb->len);
1777
1778         hdr.handle = cpu_to_le16(conn->handle);
1779         hdr.dlen   = skb->len;
1780
1781         skb_push(skb, HCI_SCO_HDR_SIZE);
1782         skb_reset_transport_header(skb);
1783         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1784
1785         skb->dev = (void *) hdev;
1786         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1787
1788         skb_queue_tail(&conn->data_q, skb);
1789         tasklet_schedule(&hdev->tx_task);
1790 }
1791 EXPORT_SYMBOL(hci_send_sco);
1792
1793 /* ---- HCI TX task (outgoing data) ---- */
1794
1795 /* HCI Connection scheduler */
1796 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1797 {
1798         struct hci_conn_hash *h = &hdev->conn_hash;
1799         struct hci_conn *conn = NULL;
1800         int num = 0, min = ~0;
1801         struct list_head *p;
1802
1803         /* We don't have to lock device here. Connections are always
1804          * added and removed with TX task disabled. */
1805         list_for_each(p, &h->list) {
1806                 struct hci_conn *c;
1807                 c = list_entry(p, struct hci_conn, list);
1808
1809                 if (c->type != type || skb_queue_empty(&c->data_q))
1810                         continue;
1811
1812                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1813                         continue;
1814
1815                 num++;
1816
1817                 if (c->sent < min) {
1818                         min  = c->sent;
1819                         conn = c;
1820                 }
1821         }
1822
1823         if (conn) {
1824                 int cnt, q;
1825
1826                 switch (conn->type) {
1827                 case ACL_LINK:
1828                         cnt = hdev->acl_cnt;
1829                         break;
1830                 case SCO_LINK:
1831                 case ESCO_LINK:
1832                         cnt = hdev->sco_cnt;
1833                         break;
1834                 case LE_LINK:
1835                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1836                         break;
1837                 default:
1838                         cnt = 0;
1839                         BT_ERR("Unknown link type");
1840                 }
1841
1842                 q = cnt / num;
1843                 *quote = q ? q : 1;
1844         } else
1845                 *quote = 0;
1846
1847         BT_DBG("conn %p quote %d", conn, *quote);
1848         return conn;
1849 }
1850
1851 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1852 {
1853         struct hci_conn_hash *h = &hdev->conn_hash;
1854         struct list_head *p;
1855         struct hci_conn  *c;
1856
1857         BT_ERR("%s link tx timeout", hdev->name);
1858
1859         /* Kill stalled connections */
1860         list_for_each(p, &h->list) {
1861                 c = list_entry(p, struct hci_conn, list);
1862                 if (c->type == type && c->sent) {
1863                         BT_ERR("%s killing stalled connection %s",
1864                                 hdev->name, batostr(&c->dst));
1865                         hci_acl_disconn(c, 0x13);
1866                 }
1867         }
1868 }
1869
1870 static inline void hci_sched_acl(struct hci_dev *hdev)
1871 {
1872         struct hci_conn *conn;
1873         struct sk_buff *skb;
1874         int quote;
1875
1876         BT_DBG("%s", hdev->name);
1877
1878         if (!test_bit(HCI_RAW, &hdev->flags)) {
1879                 /* ACL tx timeout must be longer than maximum
1880                  * link supervision timeout (40.9 seconds) */
1881                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1882                         hci_link_tx_to(hdev, ACL_LINK);
1883         }
1884
1885         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1886                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1887                         BT_DBG("skb %p len %d", skb, skb->len);
1888
1889                         hci_conn_enter_active_mode(conn);
1890
1891                         hci_send_frame(skb);
1892                         hdev->acl_last_tx = jiffies;
1893
1894                         hdev->acl_cnt--;
1895                         conn->sent++;
1896                 }
1897         }
1898 }
1899
1900 /* Schedule SCO */
1901 static inline void hci_sched_sco(struct hci_dev *hdev)
1902 {
1903         struct hci_conn *conn;
1904         struct sk_buff *skb;
1905         int quote;
1906
1907         BT_DBG("%s", hdev->name);
1908
1909         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1910                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1911                         BT_DBG("skb %p len %d", skb, skb->len);
1912                         hci_send_frame(skb);
1913
1914                         conn->sent++;
1915                         if (conn->sent == ~0)
1916                                 conn->sent = 0;
1917                 }
1918         }
1919 }
1920
1921 static inline void hci_sched_esco(struct hci_dev *hdev)
1922 {
1923         struct hci_conn *conn;
1924         struct sk_buff *skb;
1925         int quote;
1926
1927         BT_DBG("%s", hdev->name);
1928
1929         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1930                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1931                         BT_DBG("skb %p len %d", skb, skb->len);
1932                         hci_send_frame(skb);
1933
1934                         conn->sent++;
1935                         if (conn->sent == ~0)
1936                                 conn->sent = 0;
1937                 }
1938         }
1939 }
1940
1941 static inline void hci_sched_le(struct hci_dev *hdev)
1942 {
1943         struct hci_conn *conn;
1944         struct sk_buff *skb;
1945         int quote, cnt;
1946
1947         BT_DBG("%s", hdev->name);
1948
1949         if (!test_bit(HCI_RAW, &hdev->flags)) {
1950                 /* LE tx timeout must be longer than maximum
1951                  * link supervision timeout (40.9 seconds) */
1952                 if (!hdev->le_cnt && hdev->le_pkts &&
1953                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
1954                         hci_link_tx_to(hdev, LE_LINK);
1955         }
1956
1957         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1958         while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1959                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1960                         BT_DBG("skb %p len %d", skb, skb->len);
1961
1962                         hci_send_frame(skb);
1963                         hdev->le_last_tx = jiffies;
1964
1965                         cnt--;
1966                         conn->sent++;
1967                 }
1968         }
1969         if (hdev->le_pkts)
1970                 hdev->le_cnt = cnt;
1971         else
1972                 hdev->acl_cnt = cnt;
1973 }
1974
1975 static void hci_tx_task(unsigned long arg)
1976 {
1977         struct hci_dev *hdev = (struct hci_dev *) arg;
1978         struct sk_buff *skb;
1979
1980         read_lock(&hci_task_lock);
1981
1982         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1983                 hdev->sco_cnt, hdev->le_cnt);
1984
1985         /* Schedule queues and send stuff to HCI driver */
1986
1987         hci_sched_acl(hdev);
1988
1989         hci_sched_sco(hdev);
1990
1991         hci_sched_esco(hdev);
1992
1993         hci_sched_le(hdev);
1994
1995         /* Send next queued raw (unknown type) packet */
1996         while ((skb = skb_dequeue(&hdev->raw_q)))
1997                 hci_send_frame(skb);
1998
1999         read_unlock(&hci_task_lock);
2000 }
2001
2002 /* ----- HCI RX task (incoming data proccessing) ----- */
2003
2004 /* ACL data packet */
2005 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2006 {
2007         struct hci_acl_hdr *hdr = (void *) skb->data;
2008         struct hci_conn *conn;
2009         __u16 handle, flags;
2010
2011         skb_pull(skb, HCI_ACL_HDR_SIZE);
2012
2013         handle = __le16_to_cpu(hdr->handle);
2014         flags  = hci_flags(handle);
2015         handle = hci_handle(handle);
2016
2017         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2018
2019         hdev->stat.acl_rx++;
2020
2021         hci_dev_lock(hdev);
2022         conn = hci_conn_hash_lookup_handle(hdev, handle);
2023         hci_dev_unlock(hdev);
2024
2025         if (conn) {
2026                 register struct hci_proto *hp;
2027
2028                 hci_conn_enter_active_mode(conn);
2029
2030                 /* Send to upper protocol */
2031                 hp = hci_proto[HCI_PROTO_L2CAP];
2032                 if (hp && hp->recv_acldata) {
2033                         hp->recv_acldata(conn, skb, flags);
2034                         return;
2035                 }
2036         } else {
2037                 BT_ERR("%s ACL packet for unknown connection handle %d",
2038                         hdev->name, handle);
2039         }
2040
2041         kfree_skb(skb);
2042 }
2043
2044 /* SCO data packet */
2045 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2046 {
2047         struct hci_sco_hdr *hdr = (void *) skb->data;
2048         struct hci_conn *conn;
2049         __u16 handle;
2050
2051         skb_pull(skb, HCI_SCO_HDR_SIZE);
2052
2053         handle = __le16_to_cpu(hdr->handle);
2054
2055         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2056
2057         hdev->stat.sco_rx++;
2058
2059         hci_dev_lock(hdev);
2060         conn = hci_conn_hash_lookup_handle(hdev, handle);
2061         hci_dev_unlock(hdev);
2062
2063         if (conn) {
2064                 register struct hci_proto *hp;
2065
2066                 /* Send to upper protocol */
2067                 hp = hci_proto[HCI_PROTO_SCO];
2068                 if (hp && hp->recv_scodata) {
2069                         hp->recv_scodata(conn, skb);
2070                         return;
2071                 }
2072         } else {
2073                 BT_ERR("%s SCO packet for unknown connection handle %d",
2074                         hdev->name, handle);
2075         }
2076
2077         kfree_skb(skb);
2078 }
2079
2080 static void hci_rx_task(unsigned long arg)
2081 {
2082         struct hci_dev *hdev = (struct hci_dev *) arg;
2083         struct sk_buff *skb;
2084
2085         BT_DBG("%s", hdev->name);
2086
2087         read_lock(&hci_task_lock);
2088
2089         while ((skb = skb_dequeue(&hdev->rx_q))) {
2090                 if (atomic_read(&hdev->promisc)) {
2091                         /* Send copy to the sockets */
2092                         hci_send_to_sock(hdev, skb, NULL);
2093                 }
2094
2095                 if (test_bit(HCI_RAW, &hdev->flags)) {
2096                         kfree_skb(skb);
2097                         continue;
2098                 }
2099
2100                 if (test_bit(HCI_INIT, &hdev->flags)) {
2101                         /* Don't process data packets in this states. */
2102                         switch (bt_cb(skb)->pkt_type) {
2103                         case HCI_ACLDATA_PKT:
2104                         case HCI_SCODATA_PKT:
2105                                 kfree_skb(skb);
2106                                 continue;
2107                         }
2108                 }
2109
2110                 /* Process frame */
2111                 switch (bt_cb(skb)->pkt_type) {
2112                 case HCI_EVENT_PKT:
2113                         hci_event_packet(hdev, skb);
2114                         break;
2115
2116                 case HCI_ACLDATA_PKT:
2117                         BT_DBG("%s ACL data packet", hdev->name);
2118                         hci_acldata_packet(hdev, skb);
2119                         break;
2120
2121                 case HCI_SCODATA_PKT:
2122                         BT_DBG("%s SCO data packet", hdev->name);
2123                         hci_scodata_packet(hdev, skb);
2124                         break;
2125
2126                 default:
2127                         kfree_skb(skb);
2128                         break;
2129                 }
2130         }
2131
2132         read_unlock(&hci_task_lock);
2133 }
2134
2135 static void hci_cmd_task(unsigned long arg)
2136 {
2137         struct hci_dev *hdev = (struct hci_dev *) arg;
2138         struct sk_buff *skb;
2139
2140         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2141
2142         /* Send queued commands */
2143         if (atomic_read(&hdev->cmd_cnt)) {
2144                 skb = skb_dequeue(&hdev->cmd_q);
2145                 if (!skb)
2146                         return;
2147
2148                 kfree_skb(hdev->sent_cmd);
2149
2150                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2151                 if (hdev->sent_cmd) {
2152                         atomic_dec(&hdev->cmd_cnt);
2153                         hci_send_frame(skb);
2154                         mod_timer(&hdev->cmd_timer,
2155                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2156                 } else {
2157                         skb_queue_head(&hdev->cmd_q, skb);
2158                         tasklet_schedule(&hdev->cmd_task);
2159                 }
2160         }
2161 }