Bluetooth: hci_core: fix NULL-pointer dereference at unregister
[linux-flexiantxendom0-3.2.10.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54
55 #define AUTO_OFF_TIMEOUT 2000
56
57 static void hci_rx_work(struct work_struct *work);
58 static void hci_cmd_work(struct work_struct *work);
59 static void hci_tx_work(struct work_struct *work);
60
61 /* HCI device list */
62 LIST_HEAD(hci_dev_list);
63 DEFINE_RWLOCK(hci_dev_list_lock);
64
65 /* HCI callback list */
66 LIST_HEAD(hci_cb_list);
67 DEFINE_RWLOCK(hci_cb_list_lock);
68
69 /* ---- HCI notifications ---- */
70
71 static void hci_notify(struct hci_dev *hdev, int event)
72 {
73         hci_sock_dev_event(hdev, event);
74 }
75
76 /* ---- HCI requests ---- */
77
78 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
79 {
80         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
81
82         /* If this is the init phase check if the completed command matches
83          * the last init command, and if not just return.
84          */
85         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
86                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
87                 struct sk_buff *skb;
88
89                 /* Some CSR based controllers generate a spontaneous
90                  * reset complete event during init and any pending
91                  * command will never be completed. In such a case we
92                  * need to resend whatever was the last sent
93                  * command.
94                  */
95
96                 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
97                         return;
98
99                 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100                 if (skb) {
101                         skb_queue_head(&hdev->cmd_q, skb);
102                         queue_work(hdev->workqueue, &hdev->cmd_work);
103                 }
104
105                 return;
106         }
107
108         if (hdev->req_status == HCI_REQ_PEND) {
109                 hdev->req_result = result;
110                 hdev->req_status = HCI_REQ_DONE;
111                 wake_up_interruptible(&hdev->req_wait_q);
112         }
113 }
114
115 static void hci_req_cancel(struct hci_dev *hdev, int err)
116 {
117         BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119         if (hdev->req_status == HCI_REQ_PEND) {
120                 hdev->req_result = err;
121                 hdev->req_status = HCI_REQ_CANCELED;
122                 wake_up_interruptible(&hdev->req_wait_q);
123         }
124 }
125
126 /* Execute request and wait for completion. */
127 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
128                                         unsigned long opt, __u32 timeout)
129 {
130         DECLARE_WAITQUEUE(wait, current);
131         int err = 0;
132
133         BT_DBG("%s start", hdev->name);
134
135         hdev->req_status = HCI_REQ_PEND;
136
137         add_wait_queue(&hdev->req_wait_q, &wait);
138         set_current_state(TASK_INTERRUPTIBLE);
139
140         req(hdev, opt);
141         schedule_timeout(timeout);
142
143         remove_wait_queue(&hdev->req_wait_q, &wait);
144
145         if (signal_pending(current))
146                 return -EINTR;
147
148         switch (hdev->req_status) {
149         case HCI_REQ_DONE:
150                 err = -bt_to_errno(hdev->req_result);
151                 break;
152
153         case HCI_REQ_CANCELED:
154                 err = -hdev->req_result;
155                 break;
156
157         default:
158                 err = -ETIMEDOUT;
159                 break;
160         }
161
162         hdev->req_status = hdev->req_result = 0;
163
164         BT_DBG("%s end: err %d", hdev->name, err);
165
166         return err;
167 }
168
169 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
170                                         unsigned long opt, __u32 timeout)
171 {
172         int ret;
173
174         if (!test_bit(HCI_UP, &hdev->flags))
175                 return -ENETDOWN;
176
177         /* Serialize all requests */
178         hci_req_lock(hdev);
179         ret = __hci_request(hdev, req, opt, timeout);
180         hci_req_unlock(hdev);
181
182         return ret;
183 }
184
185 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186 {
187         BT_DBG("%s %ld", hdev->name, opt);
188
189         /* Reset device */
190         set_bit(HCI_RESET, &hdev->flags);
191         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
192 }
193
194 static void bredr_init(struct hci_dev *hdev)
195 {
196         struct hci_cp_delete_stored_link_key cp;
197         __le16 param;
198         __u8 flt_type;
199
200         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
202         /* Mandatory initialization */
203
204         /* Reset */
205         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
206                 set_bit(HCI_RESET, &hdev->flags);
207                 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
208         }
209
210         /* Read Local Supported Features */
211         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
212
213         /* Read Local Version */
214         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215
216         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
217         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
218
219         /* Read BD Address */
220         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221
222         /* Read Class of Device */
223         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224
225         /* Read Local Name */
226         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
227
228         /* Read Voice Setting */
229         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
230
231         /* Optional initialization */
232
233         /* Clear Event Filters */
234         flt_type = HCI_FLT_CLEAR_ALL;
235         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
236
237         /* Connection accept timeout ~20 secs */
238         param = cpu_to_le16(0x7d00);
239         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
240
241         bacpy(&cp.bdaddr, BDADDR_ANY);
242         cp.delete_all = 1;
243         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
244 }
245
246 static void amp_init(struct hci_dev *hdev)
247 {
248         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249
250         /* Reset */
251         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252
253         /* Read Local Version */
254         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
255 }
256
257 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
258 {
259         struct sk_buff *skb;
260
261         BT_DBG("%s %ld", hdev->name, opt);
262
263         /* Driver initialization */
264
265         /* Special commands */
266         while ((skb = skb_dequeue(&hdev->driver_init))) {
267                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
268                 skb->dev = (void *) hdev;
269
270                 skb_queue_tail(&hdev->cmd_q, skb);
271                 queue_work(hdev->workqueue, &hdev->cmd_work);
272         }
273         skb_queue_purge(&hdev->driver_init);
274
275         switch (hdev->dev_type) {
276         case HCI_BREDR:
277                 bredr_init(hdev);
278                 break;
279
280         case HCI_AMP:
281                 amp_init(hdev);
282                 break;
283
284         default:
285                 BT_ERR("Unknown device type %d", hdev->dev_type);
286                 break;
287         }
288
289 }
290
291 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
292 {
293         BT_DBG("%s", hdev->name);
294
295         /* Read LE buffer size */
296         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
297 }
298
299 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
300 {
301         __u8 scan = opt;
302
303         BT_DBG("%s %x", hdev->name, scan);
304
305         /* Inquiry and Page scans */
306         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
307 }
308
309 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
310 {
311         __u8 auth = opt;
312
313         BT_DBG("%s %x", hdev->name, auth);
314
315         /* Authentication */
316         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
317 }
318
319 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
320 {
321         __u8 encrypt = opt;
322
323         BT_DBG("%s %x", hdev->name, encrypt);
324
325         /* Encryption */
326         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
327 }
328
329 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
330 {
331         __le16 policy = cpu_to_le16(opt);
332
333         BT_DBG("%s %x", hdev->name, policy);
334
335         /* Default link policy */
336         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
337 }
338
339 /* Get HCI device by index.
340  * Device is held on return. */
341 struct hci_dev *hci_dev_get(int index)
342 {
343         struct hci_dev *hdev = NULL, *d;
344
345         BT_DBG("%d", index);
346
347         if (index < 0)
348                 return NULL;
349
350         read_lock(&hci_dev_list_lock);
351         list_for_each_entry(d, &hci_dev_list, list) {
352                 if (d->id == index) {
353                         hdev = hci_dev_hold(d);
354                         break;
355                 }
356         }
357         read_unlock(&hci_dev_list_lock);
358         return hdev;
359 }
360
361 /* ---- Inquiry support ---- */
362
363 bool hci_discovery_active(struct hci_dev *hdev)
364 {
365         struct discovery_state *discov = &hdev->discovery;
366
367         switch (discov->state) {
368         case DISCOVERY_FINDING:
369         case DISCOVERY_RESOLVING:
370                 return true;
371
372         default:
373                 return false;
374         }
375 }
376
377 void hci_discovery_set_state(struct hci_dev *hdev, int state)
378 {
379         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
380
381         if (hdev->discovery.state == state)
382                 return;
383
384         switch (state) {
385         case DISCOVERY_STOPPED:
386                 if (hdev->discovery.state != DISCOVERY_STARTING)
387                         mgmt_discovering(hdev, 0);
388                 hdev->discovery.type = 0;
389                 break;
390         case DISCOVERY_STARTING:
391                 break;
392         case DISCOVERY_FINDING:
393                 mgmt_discovering(hdev, 1);
394                 break;
395         case DISCOVERY_RESOLVING:
396                 break;
397         case DISCOVERY_STOPPING:
398                 break;
399         }
400
401         hdev->discovery.state = state;
402 }
403
404 static void inquiry_cache_flush(struct hci_dev *hdev)
405 {
406         struct discovery_state *cache = &hdev->discovery;
407         struct inquiry_entry *p, *n;
408
409         list_for_each_entry_safe(p, n, &cache->all, all) {
410                 list_del(&p->all);
411                 kfree(p);
412         }
413
414         INIT_LIST_HEAD(&cache->unknown);
415         INIT_LIST_HEAD(&cache->resolve);
416 }
417
418 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
419 {
420         struct discovery_state *cache = &hdev->discovery;
421         struct inquiry_entry *e;
422
423         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
424
425         list_for_each_entry(e, &cache->all, all) {
426                 if (!bacmp(&e->data.bdaddr, bdaddr))
427                         return e;
428         }
429
430         return NULL;
431 }
432
433 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
434                                                        bdaddr_t *bdaddr)
435 {
436         struct discovery_state *cache = &hdev->discovery;
437         struct inquiry_entry *e;
438
439         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
440
441         list_for_each_entry(e, &cache->unknown, list) {
442                 if (!bacmp(&e->data.bdaddr, bdaddr))
443                         return e;
444         }
445
446         return NULL;
447 }
448
449 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
450                                                        bdaddr_t *bdaddr,
451                                                        int state)
452 {
453         struct discovery_state *cache = &hdev->discovery;
454         struct inquiry_entry *e;
455
456         BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
457
458         list_for_each_entry(e, &cache->resolve, list) {
459                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
460                         return e;
461                 if (!bacmp(&e->data.bdaddr, bdaddr))
462                         return e;
463         }
464
465         return NULL;
466 }
467
468 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
469                                       struct inquiry_entry *ie)
470 {
471         struct discovery_state *cache = &hdev->discovery;
472         struct list_head *pos = &cache->resolve;
473         struct inquiry_entry *p;
474
475         list_del(&ie->list);
476
477         list_for_each_entry(p, &cache->resolve, list) {
478                 if (p->name_state != NAME_PENDING &&
479                                 abs(p->data.rssi) >= abs(ie->data.rssi))
480                         break;
481                 pos = &p->list;
482         }
483
484         list_add(&ie->list, pos);
485 }
486
487 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
488                               bool name_known, bool *ssp)
489 {
490         struct discovery_state *cache = &hdev->discovery;
491         struct inquiry_entry *ie;
492
493         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
494
495         if (ssp)
496                 *ssp = data->ssp_mode;
497
498         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
499         if (ie) {
500                 if (ie->data.ssp_mode && ssp)
501                         *ssp = true;
502
503                 if (ie->name_state == NAME_NEEDED &&
504                                                 data->rssi != ie->data.rssi) {
505                         ie->data.rssi = data->rssi;
506                         hci_inquiry_cache_update_resolve(hdev, ie);
507                 }
508
509                 goto update;
510         }
511
512         /* Entry not in the cache. Add new one. */
513         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
514         if (!ie)
515                 return false;
516
517         list_add(&ie->all, &cache->all);
518
519         if (name_known) {
520                 ie->name_state = NAME_KNOWN;
521         } else {
522                 ie->name_state = NAME_NOT_KNOWN;
523                 list_add(&ie->list, &cache->unknown);
524         }
525
526 update:
527         if (name_known && ie->name_state != NAME_KNOWN &&
528                                         ie->name_state != NAME_PENDING) {
529                 ie->name_state = NAME_KNOWN;
530                 list_del(&ie->list);
531         }
532
533         memcpy(&ie->data, data, sizeof(*data));
534         ie->timestamp = jiffies;
535         cache->timestamp = jiffies;
536
537         if (ie->name_state == NAME_NOT_KNOWN)
538                 return false;
539
540         return true;
541 }
542
543 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
544 {
545         struct discovery_state *cache = &hdev->discovery;
546         struct inquiry_info *info = (struct inquiry_info *) buf;
547         struct inquiry_entry *e;
548         int copied = 0;
549
550         list_for_each_entry(e, &cache->all, all) {
551                 struct inquiry_data *data = &e->data;
552
553                 if (copied >= num)
554                         break;
555
556                 bacpy(&info->bdaddr, &data->bdaddr);
557                 info->pscan_rep_mode    = data->pscan_rep_mode;
558                 info->pscan_period_mode = data->pscan_period_mode;
559                 info->pscan_mode        = data->pscan_mode;
560                 memcpy(info->dev_class, data->dev_class, 3);
561                 info->clock_offset      = data->clock_offset;
562
563                 info++;
564                 copied++;
565         }
566
567         BT_DBG("cache %p, copied %d", cache, copied);
568         return copied;
569 }
570
571 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
572 {
573         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
574         struct hci_cp_inquiry cp;
575
576         BT_DBG("%s", hdev->name);
577
578         if (test_bit(HCI_INQUIRY, &hdev->flags))
579                 return;
580
581         /* Start Inquiry */
582         memcpy(&cp.lap, &ir->lap, 3);
583         cp.length  = ir->length;
584         cp.num_rsp = ir->num_rsp;
585         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
586 }
587
588 int hci_inquiry(void __user *arg)
589 {
590         __u8 __user *ptr = arg;
591         struct hci_inquiry_req ir;
592         struct hci_dev *hdev;
593         int err = 0, do_inquiry = 0, max_rsp;
594         long timeo;
595         __u8 *buf;
596
597         if (copy_from_user(&ir, ptr, sizeof(ir)))
598                 return -EFAULT;
599
600         hdev = hci_dev_get(ir.dev_id);
601         if (!hdev)
602                 return -ENODEV;
603
604         hci_dev_lock(hdev);
605         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
606                                 inquiry_cache_empty(hdev) ||
607                                 ir.flags & IREQ_CACHE_FLUSH) {
608                 inquiry_cache_flush(hdev);
609                 do_inquiry = 1;
610         }
611         hci_dev_unlock(hdev);
612
613         timeo = ir.length * msecs_to_jiffies(2000);
614
615         if (do_inquiry) {
616                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
617                 if (err < 0)
618                         goto done;
619         }
620
621         /* for unlimited number of responses we will use buffer with 255 entries */
622         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
623
624         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
625          * copy it to the user space.
626          */
627         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
628         if (!buf) {
629                 err = -ENOMEM;
630                 goto done;
631         }
632
633         hci_dev_lock(hdev);
634         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
635         hci_dev_unlock(hdev);
636
637         BT_DBG("num_rsp %d", ir.num_rsp);
638
639         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
640                 ptr += sizeof(ir);
641                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
642                                         ir.num_rsp))
643                         err = -EFAULT;
644         } else
645                 err = -EFAULT;
646
647         kfree(buf);
648
649 done:
650         hci_dev_put(hdev);
651         return err;
652 }
653
654 /* ---- HCI ioctl helpers ---- */
655
656 int hci_dev_open(__u16 dev)
657 {
658         struct hci_dev *hdev;
659         int ret = 0;
660
661         hdev = hci_dev_get(dev);
662         if (!hdev)
663                 return -ENODEV;
664
665         BT_DBG("%s %p", hdev->name, hdev);
666
667         hci_req_lock(hdev);
668
669         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
670                 ret = -ENODEV;
671                 goto done;
672         }
673
674         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
675                 ret = -ERFKILL;
676                 goto done;
677         }
678
679         if (test_bit(HCI_UP, &hdev->flags)) {
680                 ret = -EALREADY;
681                 goto done;
682         }
683
684         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
685                 set_bit(HCI_RAW, &hdev->flags);
686
687         /* Treat all non BR/EDR controllers as raw devices if
688            enable_hs is not set */
689         if (hdev->dev_type != HCI_BREDR && !enable_hs)
690                 set_bit(HCI_RAW, &hdev->flags);
691
692         if (hdev->open(hdev)) {
693                 ret = -EIO;
694                 goto done;
695         }
696
697         if (!test_bit(HCI_RAW, &hdev->flags)) {
698                 atomic_set(&hdev->cmd_cnt, 1);
699                 set_bit(HCI_INIT, &hdev->flags);
700                 hdev->init_last_cmd = 0;
701
702                 ret = __hci_request(hdev, hci_init_req, 0,
703                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
704
705                 if (lmp_host_le_capable(hdev))
706                         ret = __hci_request(hdev, hci_le_init_req, 0,
707                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
708
709                 clear_bit(HCI_INIT, &hdev->flags);
710         }
711
712         if (!ret) {
713                 hci_dev_hold(hdev);
714                 set_bit(HCI_UP, &hdev->flags);
715                 hci_notify(hdev, HCI_DEV_UP);
716                 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
717                         hci_dev_lock(hdev);
718                         mgmt_powered(hdev, 1);
719                         hci_dev_unlock(hdev);
720                 }
721         } else {
722                 /* Init failed, cleanup */
723                 flush_work(&hdev->tx_work);
724                 flush_work(&hdev->cmd_work);
725                 flush_work(&hdev->rx_work);
726
727                 skb_queue_purge(&hdev->cmd_q);
728                 skb_queue_purge(&hdev->rx_q);
729
730                 if (hdev->flush)
731                         hdev->flush(hdev);
732
733                 if (hdev->sent_cmd) {
734                         kfree_skb(hdev->sent_cmd);
735                         hdev->sent_cmd = NULL;
736                 }
737
738                 hdev->close(hdev);
739                 hdev->flags = 0;
740         }
741
742 done:
743         hci_req_unlock(hdev);
744         hci_dev_put(hdev);
745         return ret;
746 }
747
748 static int hci_dev_do_close(struct hci_dev *hdev)
749 {
750         BT_DBG("%s %p", hdev->name, hdev);
751
752         cancel_work_sync(&hdev->le_scan);
753
754         hci_req_cancel(hdev, ENODEV);
755         hci_req_lock(hdev);
756
757         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
758                 del_timer_sync(&hdev->cmd_timer);
759                 hci_req_unlock(hdev);
760                 return 0;
761         }
762
763         /* Flush RX and TX works */
764         flush_work(&hdev->tx_work);
765         flush_work(&hdev->rx_work);
766
767         if (hdev->discov_timeout > 0) {
768                 cancel_delayed_work(&hdev->discov_off);
769                 hdev->discov_timeout = 0;
770                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
771         }
772
773         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
774                 cancel_delayed_work(&hdev->service_cache);
775
776         cancel_delayed_work_sync(&hdev->le_scan_disable);
777
778         hci_dev_lock(hdev);
779         inquiry_cache_flush(hdev);
780         hci_conn_hash_flush(hdev);
781         hci_dev_unlock(hdev);
782
783         hci_notify(hdev, HCI_DEV_DOWN);
784
785         if (hdev->flush)
786                 hdev->flush(hdev);
787
788         /* Reset device */
789         skb_queue_purge(&hdev->cmd_q);
790         atomic_set(&hdev->cmd_cnt, 1);
791         if (!test_bit(HCI_RAW, &hdev->flags) &&
792                                 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
793                 set_bit(HCI_INIT, &hdev->flags);
794                 __hci_request(hdev, hci_reset_req, 0,
795                                         msecs_to_jiffies(250));
796                 clear_bit(HCI_INIT, &hdev->flags);
797         }
798
799         /* flush cmd  work */
800         flush_work(&hdev->cmd_work);
801
802         /* Drop queues */
803         skb_queue_purge(&hdev->rx_q);
804         skb_queue_purge(&hdev->cmd_q);
805         skb_queue_purge(&hdev->raw_q);
806
807         /* Drop last sent command */
808         if (hdev->sent_cmd) {
809                 del_timer_sync(&hdev->cmd_timer);
810                 kfree_skb(hdev->sent_cmd);
811                 hdev->sent_cmd = NULL;
812         }
813
814         /* After this point our queues are empty
815          * and no tasks are scheduled. */
816         hdev->close(hdev);
817
818         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
819                 hci_dev_lock(hdev);
820                 mgmt_powered(hdev, 0);
821                 hci_dev_unlock(hdev);
822         }
823
824         /* Clear flags */
825         hdev->flags = 0;
826
827         memset(hdev->eir, 0, sizeof(hdev->eir));
828         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
829
830         hci_req_unlock(hdev);
831
832         hci_dev_put(hdev);
833         return 0;
834 }
835
836 int hci_dev_close(__u16 dev)
837 {
838         struct hci_dev *hdev;
839         int err;
840
841         hdev = hci_dev_get(dev);
842         if (!hdev)
843                 return -ENODEV;
844
845         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
846                 cancel_delayed_work(&hdev->power_off);
847
848         err = hci_dev_do_close(hdev);
849
850         hci_dev_put(hdev);
851         return err;
852 }
853
854 int hci_dev_reset(__u16 dev)
855 {
856         struct hci_dev *hdev;
857         int ret = 0;
858
859         hdev = hci_dev_get(dev);
860         if (!hdev)
861                 return -ENODEV;
862
863         hci_req_lock(hdev);
864
865         if (!test_bit(HCI_UP, &hdev->flags))
866                 goto done;
867
868         /* Drop queues */
869         skb_queue_purge(&hdev->rx_q);
870         skb_queue_purge(&hdev->cmd_q);
871
872         hci_dev_lock(hdev);
873         inquiry_cache_flush(hdev);
874         hci_conn_hash_flush(hdev);
875         hci_dev_unlock(hdev);
876
877         if (hdev->flush)
878                 hdev->flush(hdev);
879
880         atomic_set(&hdev->cmd_cnt, 1);
881         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
882
883         if (!test_bit(HCI_RAW, &hdev->flags))
884                 ret = __hci_request(hdev, hci_reset_req, 0,
885                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
886
887 done:
888         hci_req_unlock(hdev);
889         hci_dev_put(hdev);
890         return ret;
891 }
892
893 int hci_dev_reset_stat(__u16 dev)
894 {
895         struct hci_dev *hdev;
896         int ret = 0;
897
898         hdev = hci_dev_get(dev);
899         if (!hdev)
900                 return -ENODEV;
901
902         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
903
904         hci_dev_put(hdev);
905
906         return ret;
907 }
908
909 int hci_dev_cmd(unsigned int cmd, void __user *arg)
910 {
911         struct hci_dev *hdev;
912         struct hci_dev_req dr;
913         int err = 0;
914
915         if (copy_from_user(&dr, arg, sizeof(dr)))
916                 return -EFAULT;
917
918         hdev = hci_dev_get(dr.dev_id);
919         if (!hdev)
920                 return -ENODEV;
921
922         switch (cmd) {
923         case HCISETAUTH:
924                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
925                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
926                 break;
927
928         case HCISETENCRYPT:
929                 if (!lmp_encrypt_capable(hdev)) {
930                         err = -EOPNOTSUPP;
931                         break;
932                 }
933
934                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
935                         /* Auth must be enabled first */
936                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
937                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
938                         if (err)
939                                 break;
940                 }
941
942                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
943                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
944                 break;
945
946         case HCISETSCAN:
947                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
948                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
949                 break;
950
951         case HCISETLINKPOL:
952                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
953                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
954                 break;
955
956         case HCISETLINKMODE:
957                 hdev->link_mode = ((__u16) dr.dev_opt) &
958                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
959                 break;
960
961         case HCISETPTYPE:
962                 hdev->pkt_type = (__u16) dr.dev_opt;
963                 break;
964
965         case HCISETACLMTU:
966                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
967                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
968                 break;
969
970         case HCISETSCOMTU:
971                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
972                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
973                 break;
974
975         default:
976                 err = -EINVAL;
977                 break;
978         }
979
980         hci_dev_put(hdev);
981         return err;
982 }
983
984 int hci_get_dev_list(void __user *arg)
985 {
986         struct hci_dev *hdev;
987         struct hci_dev_list_req *dl;
988         struct hci_dev_req *dr;
989         int n = 0, size, err;
990         __u16 dev_num;
991
992         if (get_user(dev_num, (__u16 __user *) arg))
993                 return -EFAULT;
994
995         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
996                 return -EINVAL;
997
998         size = sizeof(*dl) + dev_num * sizeof(*dr);
999
1000         dl = kzalloc(size, GFP_KERNEL);
1001         if (!dl)
1002                 return -ENOMEM;
1003
1004         dr = dl->dev_req;
1005
1006         read_lock(&hci_dev_list_lock);
1007         list_for_each_entry(hdev, &hci_dev_list, list) {
1008                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1009                         cancel_delayed_work(&hdev->power_off);
1010
1011                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1012                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1013
1014                 (dr + n)->dev_id  = hdev->id;
1015                 (dr + n)->dev_opt = hdev->flags;
1016
1017                 if (++n >= dev_num)
1018                         break;
1019         }
1020         read_unlock(&hci_dev_list_lock);
1021
1022         dl->dev_num = n;
1023         size = sizeof(*dl) + n * sizeof(*dr);
1024
1025         err = copy_to_user(arg, dl, size);
1026         kfree(dl);
1027
1028         return err ? -EFAULT : 0;
1029 }
1030
1031 int hci_get_dev_info(void __user *arg)
1032 {
1033         struct hci_dev *hdev;
1034         struct hci_dev_info di;
1035         int err = 0;
1036
1037         if (copy_from_user(&di, arg, sizeof(di)))
1038                 return -EFAULT;
1039
1040         hdev = hci_dev_get(di.dev_id);
1041         if (!hdev)
1042                 return -ENODEV;
1043
1044         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1045                 cancel_delayed_work_sync(&hdev->power_off);
1046
1047         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1048                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1049
1050         strcpy(di.name, hdev->name);
1051         di.bdaddr   = hdev->bdaddr;
1052         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1053         di.flags    = hdev->flags;
1054         di.pkt_type = hdev->pkt_type;
1055         di.acl_mtu  = hdev->acl_mtu;
1056         di.acl_pkts = hdev->acl_pkts;
1057         di.sco_mtu  = hdev->sco_mtu;
1058         di.sco_pkts = hdev->sco_pkts;
1059         di.link_policy = hdev->link_policy;
1060         di.link_mode   = hdev->link_mode;
1061
1062         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1063         memcpy(&di.features, &hdev->features, sizeof(di.features));
1064
1065         if (copy_to_user(arg, &di, sizeof(di)))
1066                 err = -EFAULT;
1067
1068         hci_dev_put(hdev);
1069
1070         return err;
1071 }
1072
1073 /* ---- Interface to HCI drivers ---- */
1074
1075 static int hci_rfkill_set_block(void *data, bool blocked)
1076 {
1077         struct hci_dev *hdev = data;
1078
1079         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1080
1081         if (!blocked)
1082                 return 0;
1083
1084         hci_dev_do_close(hdev);
1085
1086         return 0;
1087 }
1088
1089 static const struct rfkill_ops hci_rfkill_ops = {
1090         .set_block = hci_rfkill_set_block,
1091 };
1092
1093 /* Alloc HCI device */
1094 struct hci_dev *hci_alloc_dev(void)
1095 {
1096         struct hci_dev *hdev;
1097
1098         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1099         if (!hdev)
1100                 return NULL;
1101
1102         hci_init_sysfs(hdev);
1103         skb_queue_head_init(&hdev->driver_init);
1104
1105         return hdev;
1106 }
1107 EXPORT_SYMBOL(hci_alloc_dev);
1108
1109 /* Free HCI device */
1110 void hci_free_dev(struct hci_dev *hdev)
1111 {
1112         skb_queue_purge(&hdev->driver_init);
1113
1114         /* will free via device release */
1115         put_device(&hdev->dev);
1116 }
1117 EXPORT_SYMBOL(hci_free_dev);
1118
1119 static void hci_power_on(struct work_struct *work)
1120 {
1121         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1122
1123         BT_DBG("%s", hdev->name);
1124
1125         if (hci_dev_open(hdev->id) < 0)
1126                 return;
1127
1128         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1129                 schedule_delayed_work(&hdev->power_off,
1130                                         msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1131
1132         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1133                 mgmt_index_added(hdev);
1134 }
1135
1136 static void hci_power_off(struct work_struct *work)
1137 {
1138         struct hci_dev *hdev = container_of(work, struct hci_dev,
1139                                                         power_off.work);
1140
1141         BT_DBG("%s", hdev->name);
1142
1143         hci_dev_do_close(hdev);
1144 }
1145
1146 static void hci_discov_off(struct work_struct *work)
1147 {
1148         struct hci_dev *hdev;
1149         u8 scan = SCAN_PAGE;
1150
1151         hdev = container_of(work, struct hci_dev, discov_off.work);
1152
1153         BT_DBG("%s", hdev->name);
1154
1155         hci_dev_lock(hdev);
1156
1157         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1158
1159         hdev->discov_timeout = 0;
1160
1161         hci_dev_unlock(hdev);
1162 }
1163
1164 int hci_uuids_clear(struct hci_dev *hdev)
1165 {
1166         struct list_head *p, *n;
1167
1168         list_for_each_safe(p, n, &hdev->uuids) {
1169                 struct bt_uuid *uuid;
1170
1171                 uuid = list_entry(p, struct bt_uuid, list);
1172
1173                 list_del(p);
1174                 kfree(uuid);
1175         }
1176
1177         return 0;
1178 }
1179
1180 int hci_link_keys_clear(struct hci_dev *hdev)
1181 {
1182         struct list_head *p, *n;
1183
1184         list_for_each_safe(p, n, &hdev->link_keys) {
1185                 struct link_key *key;
1186
1187                 key = list_entry(p, struct link_key, list);
1188
1189                 list_del(p);
1190                 kfree(key);
1191         }
1192
1193         return 0;
1194 }
1195
1196 int hci_smp_ltks_clear(struct hci_dev *hdev)
1197 {
1198         struct smp_ltk *k, *tmp;
1199
1200         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1201                 list_del(&k->list);
1202                 kfree(k);
1203         }
1204
1205         return 0;
1206 }
1207
1208 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1209 {
1210         struct link_key *k;
1211
1212         list_for_each_entry(k, &hdev->link_keys, list)
1213                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1214                         return k;
1215
1216         return NULL;
1217 }
1218
1219 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1220                                                 u8 key_type, u8 old_key_type)
1221 {
1222         /* Legacy key */
1223         if (key_type < 0x03)
1224                 return 1;
1225
1226         /* Debug keys are insecure so don't store them persistently */
1227         if (key_type == HCI_LK_DEBUG_COMBINATION)
1228                 return 0;
1229
1230         /* Changed combination key and there's no previous one */
1231         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1232                 return 0;
1233
1234         /* Security mode 3 case */
1235         if (!conn)
1236                 return 1;
1237
1238         /* Neither local nor remote side had no-bonding as requirement */
1239         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1240                 return 1;
1241
1242         /* Local side had dedicated bonding as requirement */
1243         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1244                 return 1;
1245
1246         /* Remote side had dedicated bonding as requirement */
1247         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1248                 return 1;
1249
1250         /* If none of the above criteria match, then don't store the key
1251          * persistently */
1252         return 0;
1253 }
1254
1255 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1256 {
1257         struct smp_ltk *k;
1258
1259         list_for_each_entry(k, &hdev->long_term_keys, list) {
1260                 if (k->ediv != ediv ||
1261                                 memcmp(rand, k->rand, sizeof(k->rand)))
1262                         continue;
1263
1264                 return k;
1265         }
1266
1267         return NULL;
1268 }
1269 EXPORT_SYMBOL(hci_find_ltk);
1270
1271 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1272                                      u8 addr_type)
1273 {
1274         struct smp_ltk *k;
1275
1276         list_for_each_entry(k, &hdev->long_term_keys, list)
1277                 if (addr_type == k->bdaddr_type &&
1278                                         bacmp(bdaddr, &k->bdaddr) == 0)
1279                         return k;
1280
1281         return NULL;
1282 }
1283 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1284
1285 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1286                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1287 {
1288         struct link_key *key, *old_key;
1289         u8 old_key_type, persistent;
1290
1291         old_key = hci_find_link_key(hdev, bdaddr);
1292         if (old_key) {
1293                 old_key_type = old_key->type;
1294                 key = old_key;
1295         } else {
1296                 old_key_type = conn ? conn->key_type : 0xff;
1297                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1298                 if (!key)
1299                         return -ENOMEM;
1300                 list_add(&key->list, &hdev->link_keys);
1301         }
1302
1303         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1304
1305         /* Some buggy controller combinations generate a changed
1306          * combination key for legacy pairing even when there's no
1307          * previous key */
1308         if (type == HCI_LK_CHANGED_COMBINATION &&
1309                                         (!conn || conn->remote_auth == 0xff) &&
1310                                         old_key_type == 0xff) {
1311                 type = HCI_LK_COMBINATION;
1312                 if (conn)
1313                         conn->key_type = type;
1314         }
1315
1316         bacpy(&key->bdaddr, bdaddr);
1317         memcpy(key->val, val, 16);
1318         key->pin_len = pin_len;
1319
1320         if (type == HCI_LK_CHANGED_COMBINATION)
1321                 key->type = old_key_type;
1322         else
1323                 key->type = type;
1324
1325         if (!new_key)
1326                 return 0;
1327
1328         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1329
1330         mgmt_new_link_key(hdev, key, persistent);
1331
1332         if (!persistent) {
1333                 list_del(&key->list);
1334                 kfree(key);
1335         }
1336
1337         return 0;
1338 }
1339
1340 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1341                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
1342                 ediv, u8 rand[8])
1343 {
1344         struct smp_ltk *key, *old_key;
1345
1346         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1347                 return 0;
1348
1349         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1350         if (old_key)
1351                 key = old_key;
1352         else {
1353                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1354                 if (!key)
1355                         return -ENOMEM;
1356                 list_add(&key->list, &hdev->long_term_keys);
1357         }
1358
1359         bacpy(&key->bdaddr, bdaddr);
1360         key->bdaddr_type = addr_type;
1361         memcpy(key->val, tk, sizeof(key->val));
1362         key->authenticated = authenticated;
1363         key->ediv = ediv;
1364         key->enc_size = enc_size;
1365         key->type = type;
1366         memcpy(key->rand, rand, sizeof(key->rand));
1367
1368         if (!new_key)
1369                 return 0;
1370
1371         if (type & HCI_SMP_LTK)
1372                 mgmt_new_ltk(hdev, key, 1);
1373
1374         return 0;
1375 }
1376
1377 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1378 {
1379         struct link_key *key;
1380
1381         key = hci_find_link_key(hdev, bdaddr);
1382         if (!key)
1383                 return -ENOENT;
1384
1385         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1386
1387         list_del(&key->list);
1388         kfree(key);
1389
1390         return 0;
1391 }
1392
1393 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1394 {
1395         struct smp_ltk *k, *tmp;
1396
1397         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1398                 if (bacmp(bdaddr, &k->bdaddr))
1399                         continue;
1400
1401                 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1402
1403                 list_del(&k->list);
1404                 kfree(k);
1405         }
1406
1407         return 0;
1408 }
1409
1410 /* HCI command timer function */
1411 static void hci_cmd_timer(unsigned long arg)
1412 {
1413         struct hci_dev *hdev = (void *) arg;
1414
1415         BT_ERR("%s command tx timeout", hdev->name);
1416         atomic_set(&hdev->cmd_cnt, 1);
1417         queue_work(hdev->workqueue, &hdev->cmd_work);
1418 }
1419
1420 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1421                                           bdaddr_t *bdaddr)
1422 {
1423         struct oob_data *data;
1424
1425         list_for_each_entry(data, &hdev->remote_oob_data, list)
1426                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1427                         return data;
1428
1429         return NULL;
1430 }
1431
1432 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1433 {
1434         struct oob_data *data;
1435
1436         data = hci_find_remote_oob_data(hdev, bdaddr);
1437         if (!data)
1438                 return -ENOENT;
1439
1440         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1441
1442         list_del(&data->list);
1443         kfree(data);
1444
1445         return 0;
1446 }
1447
1448 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1449 {
1450         struct oob_data *data, *n;
1451
1452         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1453                 list_del(&data->list);
1454                 kfree(data);
1455         }
1456
1457         return 0;
1458 }
1459
1460 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1461                             u8 *randomizer)
1462 {
1463         struct oob_data *data;
1464
1465         data = hci_find_remote_oob_data(hdev, bdaddr);
1466
1467         if (!data) {
1468                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1469                 if (!data)
1470                         return -ENOMEM;
1471
1472                 bacpy(&data->bdaddr, bdaddr);
1473                 list_add(&data->list, &hdev->remote_oob_data);
1474         }
1475
1476         memcpy(data->hash, hash, sizeof(data->hash));
1477         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1478
1479         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1480
1481         return 0;
1482 }
1483
1484 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1485 {
1486         struct bdaddr_list *b;
1487
1488         list_for_each_entry(b, &hdev->blacklist, list)
1489                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1490                         return b;
1491
1492         return NULL;
1493 }
1494
1495 int hci_blacklist_clear(struct hci_dev *hdev)
1496 {
1497         struct list_head *p, *n;
1498
1499         list_for_each_safe(p, n, &hdev->blacklist) {
1500                 struct bdaddr_list *b;
1501
1502                 b = list_entry(p, struct bdaddr_list, list);
1503
1504                 list_del(p);
1505                 kfree(b);
1506         }
1507
1508         return 0;
1509 }
1510
1511 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1512 {
1513         struct bdaddr_list *entry;
1514
1515         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1516                 return -EBADF;
1517
1518         if (hci_blacklist_lookup(hdev, bdaddr))
1519                 return -EEXIST;
1520
1521         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1522         if (!entry)
1523                 return -ENOMEM;
1524
1525         bacpy(&entry->bdaddr, bdaddr);
1526
1527         list_add(&entry->list, &hdev->blacklist);
1528
1529         return mgmt_device_blocked(hdev, bdaddr, type);
1530 }
1531
1532 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1533 {
1534         struct bdaddr_list *entry;
1535
1536         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1537                 return hci_blacklist_clear(hdev);
1538
1539         entry = hci_blacklist_lookup(hdev, bdaddr);
1540         if (!entry)
1541                 return -ENOENT;
1542
1543         list_del(&entry->list);
1544         kfree(entry);
1545
1546         return mgmt_device_unblocked(hdev, bdaddr, type);
1547 }
1548
1549 static void hci_clear_adv_cache(struct work_struct *work)
1550 {
1551         struct hci_dev *hdev = container_of(work, struct hci_dev,
1552                                             adv_work.work);
1553
1554         hci_dev_lock(hdev);
1555
1556         hci_adv_entries_clear(hdev);
1557
1558         hci_dev_unlock(hdev);
1559 }
1560
1561 int hci_adv_entries_clear(struct hci_dev *hdev)
1562 {
1563         struct adv_entry *entry, *tmp;
1564
1565         list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1566                 list_del(&entry->list);
1567                 kfree(entry);
1568         }
1569
1570         BT_DBG("%s adv cache cleared", hdev->name);
1571
1572         return 0;
1573 }
1574
1575 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1576 {
1577         struct adv_entry *entry;
1578
1579         list_for_each_entry(entry, &hdev->adv_entries, list)
1580                 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1581                         return entry;
1582
1583         return NULL;
1584 }
1585
1586 static inline int is_connectable_adv(u8 evt_type)
1587 {
1588         if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1589                 return 1;
1590
1591         return 0;
1592 }
1593
1594 int hci_add_adv_entry(struct hci_dev *hdev,
1595                                         struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
1596                 return -EINVAL;
1597
1598         /* Only new entries should be added to adv_entries. So, if
1599          * bdaddr was found, don't add it. */
1600         if (hci_find_adv_entry(hdev, &ev->bdaddr))
1601                 return 0;
1602
1603         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1604         if (!entry)
1605                 return -ENOMEM;
1606
1607         bacpy(&entry->bdaddr, &ev->bdaddr);
1608         entry->bdaddr_type = ev->bdaddr_type;
1609
1610         list_add(&entry->list, &hdev->adv_entries);
1611
1612         BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1613                                 batostr(&entry->bdaddr), entry->bdaddr_type);
1614
1615         return 0;
1616 }
1617
1618 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1619 {
1620         struct le_scan_params *param =  (struct le_scan_params *) opt;
1621         struct hci_cp_le_set_scan_param cp;
1622
1623         memset(&cp, 0, sizeof(cp));
1624         cp.type = param->type;
1625         cp.interval = cpu_to_le16(param->interval);
1626         cp.window = cpu_to_le16(param->window);
1627
1628         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1629 }
1630
1631 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1632 {
1633         struct hci_cp_le_set_scan_enable cp;
1634
1635         memset(&cp, 0, sizeof(cp));
1636         cp.enable = 1;
1637
1638         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1639 }
1640
1641 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1642                           u16 window, int timeout)
1643 {
1644         long timeo = msecs_to_jiffies(3000);
1645         struct le_scan_params param;
1646         int err;
1647
1648         BT_DBG("%s", hdev->name);
1649
1650         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1651                 return -EINPROGRESS;
1652
1653         param.type = type;
1654         param.interval = interval;
1655         param.window = window;
1656
1657         hci_req_lock(hdev);
1658
1659         err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1660                             timeo);
1661         if (!err)
1662                 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1663
1664         hci_req_unlock(hdev);
1665
1666         if (err < 0)
1667                 return err;
1668
1669         schedule_delayed_work(&hdev->le_scan_disable,
1670                               msecs_to_jiffies(timeout));
1671
1672         return 0;
1673 }
1674
1675 static void le_scan_disable_work(struct work_struct *work)
1676 {
1677         struct hci_dev *hdev = container_of(work, struct hci_dev,
1678                                             le_scan_disable.work);
1679         struct hci_cp_le_set_scan_enable cp;
1680
1681         BT_DBG("%s", hdev->name);
1682
1683         memset(&cp, 0, sizeof(cp));
1684
1685         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1686 }
1687
1688 static void le_scan_work(struct work_struct *work)
1689 {
1690         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1691         struct le_scan_params *param = &hdev->le_scan_params;
1692
1693         BT_DBG("%s", hdev->name);
1694
1695         hci_do_le_scan(hdev, param->type, param->interval, param->window,
1696                        param->timeout);
1697 }
1698
1699 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1700                 int timeout)
1701 {
1702         struct le_scan_params *param = &hdev->le_scan_params;
1703
1704         BT_DBG("%s", hdev->name);
1705
1706         if (work_busy(&hdev->le_scan))
1707                 return -EINPROGRESS;
1708
1709         param->type = type;
1710         param->interval = interval;
1711         param->window = window;
1712         param->timeout = timeout;
1713
1714         queue_work(system_long_wq, &hdev->le_scan);
1715
1716         return 0;
1717 }
1718
1719 /* Register HCI device */
1720 int hci_register_dev(struct hci_dev *hdev)
1721 {
1722         struct list_head *head = &hci_dev_list, *p;
1723         int i, id, error;
1724
1725         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1726
1727         if (!hdev->open || !hdev->close)
1728                 return -EINVAL;
1729
1730         /* Do not allow HCI_AMP devices to register at index 0,
1731          * so the index can be used as the AMP controller ID.
1732          */
1733         id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1734
1735         write_lock(&hci_dev_list_lock);
1736
1737         /* Find first available device id */
1738         list_for_each(p, &hci_dev_list) {
1739                 if (list_entry(p, struct hci_dev, list)->id != id)
1740                         break;
1741                 head = p; id++;
1742         }
1743
1744         sprintf(hdev->name, "hci%d", id);
1745         hdev->id = id;
1746         list_add_tail(&hdev->list, head);
1747
1748         mutex_init(&hdev->lock);
1749
1750         hdev->flags = 0;
1751         hdev->dev_flags = 0;
1752         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1753         hdev->esco_type = (ESCO_HV1);
1754         hdev->link_mode = (HCI_LM_ACCEPT);
1755         hdev->io_capability = 0x03; /* No Input No Output */
1756
1757         hdev->idle_timeout = 0;
1758         hdev->sniff_max_interval = 800;
1759         hdev->sniff_min_interval = 80;
1760
1761         INIT_WORK(&hdev->rx_work, hci_rx_work);
1762         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1763         INIT_WORK(&hdev->tx_work, hci_tx_work);
1764
1765
1766         skb_queue_head_init(&hdev->rx_q);
1767         skb_queue_head_init(&hdev->cmd_q);
1768         skb_queue_head_init(&hdev->raw_q);
1769
1770         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1771
1772         for (i = 0; i < NUM_REASSEMBLY; i++)
1773                 hdev->reassembly[i] = NULL;
1774
1775         init_waitqueue_head(&hdev->req_wait_q);
1776         mutex_init(&hdev->req_lock);
1777
1778         discovery_init(hdev);
1779
1780         hci_conn_hash_init(hdev);
1781
1782         INIT_LIST_HEAD(&hdev->mgmt_pending);
1783
1784         INIT_LIST_HEAD(&hdev->blacklist);
1785
1786         INIT_LIST_HEAD(&hdev->uuids);
1787
1788         INIT_LIST_HEAD(&hdev->link_keys);
1789         INIT_LIST_HEAD(&hdev->long_term_keys);
1790
1791         INIT_LIST_HEAD(&hdev->remote_oob_data);
1792
1793         INIT_LIST_HEAD(&hdev->adv_entries);
1794
1795         INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1796         INIT_WORK(&hdev->power_on, hci_power_on);
1797         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1798
1799         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1800
1801         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1802
1803         atomic_set(&hdev->promisc, 0);
1804
1805         INIT_WORK(&hdev->le_scan, le_scan_work);
1806
1807         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1808
1809         write_unlock(&hci_dev_list_lock);
1810
1811         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1812                                                         WQ_MEM_RECLAIM, 1);
1813         if (!hdev->workqueue) {
1814                 error = -ENOMEM;
1815                 goto err;
1816         }
1817
1818         error = hci_add_sysfs(hdev);
1819         if (error < 0)
1820                 goto err_wqueue;
1821
1822         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1823                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1824         if (hdev->rfkill) {
1825                 if (rfkill_register(hdev->rfkill) < 0) {
1826                         rfkill_destroy(hdev->rfkill);
1827                         hdev->rfkill = NULL;
1828                 }
1829         }
1830
1831         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1832         set_bit(HCI_SETUP, &hdev->dev_flags);
1833         schedule_work(&hdev->power_on);
1834
1835         hci_notify(hdev, HCI_DEV_REG);
1836         hci_dev_hold(hdev);
1837
1838         return id;
1839
1840 err_wqueue:
1841         destroy_workqueue(hdev->workqueue);
1842 err:
1843         write_lock(&hci_dev_list_lock);
1844         list_del(&hdev->list);
1845         write_unlock(&hci_dev_list_lock);
1846
1847         return error;
1848 }
1849 EXPORT_SYMBOL(hci_register_dev);
1850
1851 /* Unregister HCI device */
1852 void hci_unregister_dev(struct hci_dev *hdev)
1853 {
1854         int i;
1855
1856         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1857
1858         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1859
1860         write_lock(&hci_dev_list_lock);
1861         list_del(&hdev->list);
1862         write_unlock(&hci_dev_list_lock);
1863
1864         hci_dev_do_close(hdev);
1865
1866         for (i = 0; i < NUM_REASSEMBLY; i++)
1867                 kfree_skb(hdev->reassembly[i]);
1868
1869         if (!test_bit(HCI_INIT, &hdev->flags) &&
1870                                 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1871                 hci_dev_lock(hdev);
1872                 mgmt_index_removed(hdev);
1873                 hci_dev_unlock(hdev);
1874         }
1875
1876         /* mgmt_index_removed should take care of emptying the
1877          * pending list */
1878         BUG_ON(!list_empty(&hdev->mgmt_pending));
1879
1880         hci_notify(hdev, HCI_DEV_UNREG);
1881
1882         if (hdev->rfkill) {
1883                 rfkill_unregister(hdev->rfkill);
1884                 rfkill_destroy(hdev->rfkill);
1885         }
1886
1887         hci_del_sysfs(hdev);
1888
1889         cancel_delayed_work_sync(&hdev->adv_work);
1890
1891         destroy_workqueue(hdev->workqueue);
1892
1893         hci_dev_lock(hdev);
1894         hci_blacklist_clear(hdev);
1895         hci_uuids_clear(hdev);
1896         hci_link_keys_clear(hdev);
1897         hci_smp_ltks_clear(hdev);
1898         hci_remote_oob_data_clear(hdev);
1899         hci_adv_entries_clear(hdev);
1900         hci_dev_unlock(hdev);
1901
1902         hci_dev_put(hdev);
1903 }
1904 EXPORT_SYMBOL(hci_unregister_dev);
1905
1906 /* Suspend HCI device */
1907 int hci_suspend_dev(struct hci_dev *hdev)
1908 {
1909         hci_notify(hdev, HCI_DEV_SUSPEND);
1910         return 0;
1911 }
1912 EXPORT_SYMBOL(hci_suspend_dev);
1913
1914 /* Resume HCI device */
1915 int hci_resume_dev(struct hci_dev *hdev)
1916 {
1917         hci_notify(hdev, HCI_DEV_RESUME);
1918         return 0;
1919 }
1920 EXPORT_SYMBOL(hci_resume_dev);
1921
1922 /* Receive frame from HCI drivers */
1923 int hci_recv_frame(struct sk_buff *skb)
1924 {
1925         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1926         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1927                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1928                 kfree_skb(skb);
1929                 return -ENXIO;
1930         }
1931
1932         /* Incomming skb */
1933         bt_cb(skb)->incoming = 1;
1934
1935         /* Time stamp */
1936         __net_timestamp(skb);
1937
1938         skb_queue_tail(&hdev->rx_q, skb);
1939         queue_work(hdev->workqueue, &hdev->rx_work);
1940
1941         return 0;
1942 }
1943 EXPORT_SYMBOL(hci_recv_frame);
1944
1945 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1946                                                   int count, __u8 index)
1947 {
1948         int len = 0;
1949         int hlen = 0;
1950         int remain = count;
1951         struct sk_buff *skb;
1952         struct bt_skb_cb *scb;
1953
1954         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1955                                 index >= NUM_REASSEMBLY)
1956                 return -EILSEQ;
1957
1958         skb = hdev->reassembly[index];
1959
1960         if (!skb) {
1961                 switch (type) {
1962                 case HCI_ACLDATA_PKT:
1963                         len = HCI_MAX_FRAME_SIZE;
1964                         hlen = HCI_ACL_HDR_SIZE;
1965                         break;
1966                 case HCI_EVENT_PKT:
1967                         len = HCI_MAX_EVENT_SIZE;
1968                         hlen = HCI_EVENT_HDR_SIZE;
1969                         break;
1970                 case HCI_SCODATA_PKT:
1971                         len = HCI_MAX_SCO_SIZE;
1972                         hlen = HCI_SCO_HDR_SIZE;
1973                         break;
1974                 }
1975
1976                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1977                 if (!skb)
1978                         return -ENOMEM;
1979
1980                 scb = (void *) skb->cb;
1981                 scb->expect = hlen;
1982                 scb->pkt_type = type;
1983
1984                 skb->dev = (void *) hdev;
1985                 hdev->reassembly[index] = skb;
1986         }
1987
1988         while (count) {
1989                 scb = (void *) skb->cb;
1990                 len = min_t(uint, scb->expect, count);
1991
1992                 memcpy(skb_put(skb, len), data, len);
1993
1994                 count -= len;
1995                 data += len;
1996                 scb->expect -= len;
1997                 remain = count;
1998
1999                 switch (type) {
2000                 case HCI_EVENT_PKT:
2001                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2002                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2003                                 scb->expect = h->plen;
2004
2005                                 if (skb_tailroom(skb) < scb->expect) {
2006                                         kfree_skb(skb);
2007                                         hdev->reassembly[index] = NULL;
2008                                         return -ENOMEM;
2009                                 }
2010                         }
2011                         break;
2012
2013                 case HCI_ACLDATA_PKT:
2014                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2015                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2016                                 scb->expect = __le16_to_cpu(h->dlen);
2017
2018                                 if (skb_tailroom(skb) < scb->expect) {
2019                                         kfree_skb(skb);
2020                                         hdev->reassembly[index] = NULL;
2021                                         return -ENOMEM;
2022                                 }
2023                         }
2024                         break;
2025
2026                 case HCI_SCODATA_PKT:
2027                         if (skb->len == HCI_SCO_HDR_SIZE) {
2028                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2029                                 scb->expect = h->dlen;
2030
2031                                 if (skb_tailroom(skb) < scb->expect) {
2032                                         kfree_skb(skb);
2033                                         hdev->reassembly[index] = NULL;
2034                                         return -ENOMEM;
2035                                 }
2036                         }
2037                         break;
2038                 }
2039
2040                 if (scb->expect == 0) {
2041                         /* Complete frame */
2042
2043                         bt_cb(skb)->pkt_type = type;
2044                         hci_recv_frame(skb);
2045
2046                         hdev->reassembly[index] = NULL;
2047                         return remain;
2048                 }
2049         }
2050
2051         return remain;
2052 }
2053
2054 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2055 {
2056         int rem = 0;
2057
2058         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2059                 return -EILSEQ;
2060
2061         while (count) {
2062                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2063                 if (rem < 0)
2064                         return rem;
2065
2066                 data += (count - rem);
2067                 count = rem;
2068         }
2069
2070         return rem;
2071 }
2072 EXPORT_SYMBOL(hci_recv_fragment);
2073
2074 #define STREAM_REASSEMBLY 0
2075
2076 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2077 {
2078         int type;
2079         int rem = 0;
2080
2081         while (count) {
2082                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2083
2084                 if (!skb) {
2085                         struct { char type; } *pkt;
2086
2087                         /* Start of the frame */
2088                         pkt = data;
2089                         type = pkt->type;
2090
2091                         data++;
2092                         count--;
2093                 } else
2094                         type = bt_cb(skb)->pkt_type;
2095
2096                 rem = hci_reassembly(hdev, type, data, count,
2097                                                         STREAM_REASSEMBLY);
2098                 if (rem < 0)
2099                         return rem;
2100
2101                 data += (count - rem);
2102                 count = rem;
2103         }
2104
2105         return rem;
2106 }
2107 EXPORT_SYMBOL(hci_recv_stream_fragment);
2108
2109 /* ---- Interface to upper protocols ---- */
2110
2111 int hci_register_cb(struct hci_cb *cb)
2112 {
2113         BT_DBG("%p name %s", cb, cb->name);
2114
2115         write_lock(&hci_cb_list_lock);
2116         list_add(&cb->list, &hci_cb_list);
2117         write_unlock(&hci_cb_list_lock);
2118
2119         return 0;
2120 }
2121 EXPORT_SYMBOL(hci_register_cb);
2122
2123 int hci_unregister_cb(struct hci_cb *cb)
2124 {
2125         BT_DBG("%p name %s", cb, cb->name);
2126
2127         write_lock(&hci_cb_list_lock);
2128         list_del(&cb->list);
2129         write_unlock(&hci_cb_list_lock);
2130
2131         return 0;
2132 }
2133 EXPORT_SYMBOL(hci_unregister_cb);
2134
2135 static int hci_send_frame(struct sk_buff *skb)
2136 {
2137         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2138
2139         if (!hdev) {
2140                 kfree_skb(skb);
2141                 return -ENODEV;
2142         }
2143
2144         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2145
2146         /* Time stamp */
2147         __net_timestamp(skb);
2148
2149         /* Send copy to monitor */
2150         hci_send_to_monitor(hdev, skb);
2151
2152         if (atomic_read(&hdev->promisc)) {
2153                 /* Send copy to the sockets */
2154                 hci_send_to_sock(hdev, skb);
2155         }
2156
2157         /* Get rid of skb owner, prior to sending to the driver. */
2158         skb_orphan(skb);
2159
2160         return hdev->send(skb);
2161 }
2162
2163 /* Send HCI command */
2164 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2165 {
2166         int len = HCI_COMMAND_HDR_SIZE + plen;
2167         struct hci_command_hdr *hdr;
2168         struct sk_buff *skb;
2169
2170         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2171
2172         skb = bt_skb_alloc(len, GFP_ATOMIC);
2173         if (!skb) {
2174                 BT_ERR("%s no memory for command", hdev->name);
2175                 return -ENOMEM;
2176         }
2177
2178         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2179         hdr->opcode = cpu_to_le16(opcode);
2180         hdr->plen   = plen;
2181
2182         if (plen)
2183                 memcpy(skb_put(skb, plen), param, plen);
2184
2185         BT_DBG("skb len %d", skb->len);
2186
2187         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2188         skb->dev = (void *) hdev;
2189
2190         if (test_bit(HCI_INIT, &hdev->flags))
2191                 hdev->init_last_cmd = opcode;
2192
2193         skb_queue_tail(&hdev->cmd_q, skb);
2194         queue_work(hdev->workqueue, &hdev->cmd_work);
2195
2196         return 0;
2197 }
2198
2199 /* Get data from the previously sent command */
2200 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2201 {
2202         struct hci_command_hdr *hdr;
2203
2204         if (!hdev->sent_cmd)
2205                 return NULL;
2206
2207         hdr = (void *) hdev->sent_cmd->data;
2208
2209         if (hdr->opcode != cpu_to_le16(opcode))
2210                 return NULL;
2211
2212         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2213
2214         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2215 }
2216
2217 /* Send ACL data */
2218 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2219 {
2220         struct hci_acl_hdr *hdr;
2221         int len = skb->len;
2222
2223         skb_push(skb, HCI_ACL_HDR_SIZE);
2224         skb_reset_transport_header(skb);
2225         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2226         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2227         hdr->dlen   = cpu_to_le16(len);
2228 }
2229
2230 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2231                                 struct sk_buff *skb, __u16 flags)
2232 {
2233         struct hci_dev *hdev = conn->hdev;
2234         struct sk_buff *list;
2235
2236         list = skb_shinfo(skb)->frag_list;
2237         if (!list) {
2238                 /* Non fragmented */
2239                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2240
2241                 skb_queue_tail(queue, skb);
2242         } else {
2243                 /* Fragmented */
2244                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2245
2246                 skb_shinfo(skb)->frag_list = NULL;
2247
2248                 /* Queue all fragments atomically */
2249                 spin_lock(&queue->lock);
2250
2251                 __skb_queue_tail(queue, skb);
2252
2253                 flags &= ~ACL_START;
2254                 flags |= ACL_CONT;
2255                 do {
2256                         skb = list; list = list->next;
2257
2258                         skb->dev = (void *) hdev;
2259                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2260                         hci_add_acl_hdr(skb, conn->handle, flags);
2261
2262                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2263
2264                         __skb_queue_tail(queue, skb);
2265                 } while (list);
2266
2267                 spin_unlock(&queue->lock);
2268         }
2269 }
2270
2271 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2272 {
2273         struct hci_conn *conn = chan->conn;
2274         struct hci_dev *hdev = conn->hdev;
2275
2276         BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2277
2278         skb->dev = (void *) hdev;
2279         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2280         hci_add_acl_hdr(skb, conn->handle, flags);
2281
2282         hci_queue_acl(conn, &chan->data_q, skb, flags);
2283
2284         queue_work(hdev->workqueue, &hdev->tx_work);
2285 }
2286 EXPORT_SYMBOL(hci_send_acl);
2287
2288 /* Send SCO data */
2289 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2290 {
2291         struct hci_dev *hdev = conn->hdev;
2292         struct hci_sco_hdr hdr;
2293
2294         BT_DBG("%s len %d", hdev->name, skb->len);
2295
2296         hdr.handle = cpu_to_le16(conn->handle);
2297         hdr.dlen   = skb->len;
2298
2299         skb_push(skb, HCI_SCO_HDR_SIZE);
2300         skb_reset_transport_header(skb);
2301         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2302
2303         skb->dev = (void *) hdev;
2304         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2305
2306         skb_queue_tail(&conn->data_q, skb);
2307         queue_work(hdev->workqueue, &hdev->tx_work);
2308 }
2309 EXPORT_SYMBOL(hci_send_sco);
2310
2311 /* ---- HCI TX task (outgoing data) ---- */
2312
2313 /* HCI Connection scheduler */
2314 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2315 {
2316         struct hci_conn_hash *h = &hdev->conn_hash;
2317         struct hci_conn *conn = NULL, *c;
2318         int num = 0, min = ~0;
2319
2320         /* We don't have to lock device here. Connections are always
2321          * added and removed with TX task disabled. */
2322
2323         rcu_read_lock();
2324
2325         list_for_each_entry_rcu(c, &h->list, list) {
2326                 if (c->type != type || skb_queue_empty(&c->data_q))
2327                         continue;
2328
2329                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2330                         continue;
2331
2332                 num++;
2333
2334                 if (c->sent < min) {
2335                         min  = c->sent;
2336                         conn = c;
2337                 }
2338
2339                 if (hci_conn_num(hdev, type) == num)
2340                         break;
2341         }
2342
2343         rcu_read_unlock();
2344
2345         if (conn) {
2346                 int cnt, q;
2347
2348                 switch (conn->type) {
2349                 case ACL_LINK:
2350                         cnt = hdev->acl_cnt;
2351                         break;
2352                 case SCO_LINK:
2353                 case ESCO_LINK:
2354                         cnt = hdev->sco_cnt;
2355                         break;
2356                 case LE_LINK:
2357                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2358                         break;
2359                 default:
2360                         cnt = 0;
2361                         BT_ERR("Unknown link type");
2362                 }
2363
2364                 q = cnt / num;
2365                 *quote = q ? q : 1;
2366         } else
2367                 *quote = 0;
2368
2369         BT_DBG("conn %p quote %d", conn, *quote);
2370         return conn;
2371 }
2372
2373 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2374 {
2375         struct hci_conn_hash *h = &hdev->conn_hash;
2376         struct hci_conn *c;
2377
2378         BT_ERR("%s link tx timeout", hdev->name);
2379
2380         rcu_read_lock();
2381
2382         /* Kill stalled connections */
2383         list_for_each_entry_rcu(c, &h->list, list) {
2384                 if (c->type == type && c->sent) {
2385                         BT_ERR("%s killing stalled connection %s",
2386                                 hdev->name, batostr(&c->dst));
2387                         hci_acl_disconn(c, 0x13);
2388                 }
2389         }
2390
2391         rcu_read_unlock();
2392 }
2393
2394 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2395                                                 int *quote)
2396 {
2397         struct hci_conn_hash *h = &hdev->conn_hash;
2398         struct hci_chan *chan = NULL;
2399         int num = 0, min = ~0, cur_prio = 0;
2400         struct hci_conn *conn;
2401         int cnt, q, conn_num = 0;
2402
2403         BT_DBG("%s", hdev->name);
2404
2405         rcu_read_lock();
2406
2407         list_for_each_entry_rcu(conn, &h->list, list) {
2408                 struct hci_chan *tmp;
2409
2410                 if (conn->type != type)
2411                         continue;
2412
2413                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2414                         continue;
2415
2416                 conn_num++;
2417
2418                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2419                         struct sk_buff *skb;
2420
2421                         if (skb_queue_empty(&tmp->data_q))
2422                                 continue;
2423
2424                         skb = skb_peek(&tmp->data_q);
2425                         if (skb->priority < cur_prio)
2426                                 continue;
2427
2428                         if (skb->priority > cur_prio) {
2429                                 num = 0;
2430                                 min = ~0;
2431                                 cur_prio = skb->priority;
2432                         }
2433
2434                         num++;
2435
2436                         if (conn->sent < min) {
2437                                 min  = conn->sent;
2438                                 chan = tmp;
2439                         }
2440                 }
2441
2442                 if (hci_conn_num(hdev, type) == conn_num)
2443                         break;
2444         }
2445
2446         rcu_read_unlock();
2447
2448         if (!chan)
2449                 return NULL;
2450
2451         switch (chan->conn->type) {
2452         case ACL_LINK:
2453                 cnt = hdev->acl_cnt;
2454                 break;
2455         case SCO_LINK:
2456         case ESCO_LINK:
2457                 cnt = hdev->sco_cnt;
2458                 break;
2459         case LE_LINK:
2460                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2461                 break;
2462         default:
2463                 cnt = 0;
2464                 BT_ERR("Unknown link type");
2465         }
2466
2467         q = cnt / num;
2468         *quote = q ? q : 1;
2469         BT_DBG("chan %p quote %d", chan, *quote);
2470         return chan;
2471 }
2472
2473 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2474 {
2475         struct hci_conn_hash *h = &hdev->conn_hash;
2476         struct hci_conn *conn;
2477         int num = 0;
2478
2479         BT_DBG("%s", hdev->name);
2480
2481         rcu_read_lock();
2482
2483         list_for_each_entry_rcu(conn, &h->list, list) {
2484                 struct hci_chan *chan;
2485
2486                 if (conn->type != type)
2487                         continue;
2488
2489                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2490                         continue;
2491
2492                 num++;
2493
2494                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2495                         struct sk_buff *skb;
2496
2497                         if (chan->sent) {
2498                                 chan->sent = 0;
2499                                 continue;
2500                         }
2501
2502                         if (skb_queue_empty(&chan->data_q))
2503                                 continue;
2504
2505                         skb = skb_peek(&chan->data_q);
2506                         if (skb->priority >= HCI_PRIO_MAX - 1)
2507                                 continue;
2508
2509                         skb->priority = HCI_PRIO_MAX - 1;
2510
2511                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2512                                                                 skb->priority);
2513                 }
2514
2515                 if (hci_conn_num(hdev, type) == num)
2516                         break;
2517         }
2518
2519         rcu_read_unlock();
2520
2521 }
2522
2523 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2524 {
2525         /* Calculate count of blocks used by this packet */
2526         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2527 }
2528
2529 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2530 {
2531         if (!test_bit(HCI_RAW, &hdev->flags)) {
2532                 /* ACL tx timeout must be longer than maximum
2533                  * link supervision timeout (40.9 seconds) */
2534                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2535                                         msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2536                         hci_link_tx_to(hdev, ACL_LINK);
2537         }
2538 }
2539
2540 static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2541 {
2542         unsigned int cnt = hdev->acl_cnt;
2543         struct hci_chan *chan;
2544         struct sk_buff *skb;
2545         int quote;
2546
2547         __check_timeout(hdev, cnt);
2548
2549         while (hdev->acl_cnt &&
2550                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2551                 u32 priority = (skb_peek(&chan->data_q))->priority;
2552                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2553                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2554                                         skb->len, skb->priority);
2555
2556                         /* Stop if priority has changed */
2557                         if (skb->priority < priority)
2558                                 break;
2559
2560                         skb = skb_dequeue(&chan->data_q);
2561
2562                         hci_conn_enter_active_mode(chan->conn,
2563                                                    bt_cb(skb)->force_active);
2564
2565                         hci_send_frame(skb);
2566                         hdev->acl_last_tx = jiffies;
2567
2568                         hdev->acl_cnt--;
2569                         chan->sent++;
2570                         chan->conn->sent++;
2571                 }
2572         }
2573
2574         if (cnt != hdev->acl_cnt)
2575                 hci_prio_recalculate(hdev, ACL_LINK);
2576 }
2577
2578 static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2579 {
2580         unsigned int cnt = hdev->block_cnt;
2581         struct hci_chan *chan;
2582         struct sk_buff *skb;
2583         int quote;
2584
2585         __check_timeout(hdev, cnt);
2586
2587         while (hdev->block_cnt > 0 &&
2588                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2589                 u32 priority = (skb_peek(&chan->data_q))->priority;
2590                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2591                         int blocks;
2592
2593                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2594                                                 skb->len, skb->priority);
2595
2596                         /* Stop if priority has changed */
2597                         if (skb->priority < priority)
2598                                 break;
2599
2600                         skb = skb_dequeue(&chan->data_q);
2601
2602                         blocks = __get_blocks(hdev, skb);
2603                         if (blocks > hdev->block_cnt)
2604                                 return;
2605
2606                         hci_conn_enter_active_mode(chan->conn,
2607                                                 bt_cb(skb)->force_active);
2608
2609                         hci_send_frame(skb);
2610                         hdev->acl_last_tx = jiffies;
2611
2612                         hdev->block_cnt -= blocks;
2613                         quote -= blocks;
2614
2615                         chan->sent += blocks;
2616                         chan->conn->sent += blocks;
2617                 }
2618         }
2619
2620         if (cnt != hdev->block_cnt)
2621                 hci_prio_recalculate(hdev, ACL_LINK);
2622 }
2623
2624 static inline void hci_sched_acl(struct hci_dev *hdev)
2625 {
2626         BT_DBG("%s", hdev->name);
2627
2628         if (!hci_conn_num(hdev, ACL_LINK))
2629                 return;
2630
2631         switch (hdev->flow_ctl_mode) {
2632         case HCI_FLOW_CTL_MODE_PACKET_BASED:
2633                 hci_sched_acl_pkt(hdev);
2634                 break;
2635
2636         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2637                 hci_sched_acl_blk(hdev);
2638                 break;
2639         }
2640 }
2641
2642 /* Schedule SCO */
2643 static inline void hci_sched_sco(struct hci_dev *hdev)
2644 {
2645         struct hci_conn *conn;
2646         struct sk_buff *skb;
2647         int quote;
2648
2649         BT_DBG("%s", hdev->name);
2650
2651         if (!hci_conn_num(hdev, SCO_LINK))
2652                 return;
2653
2654         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2655                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2656                         BT_DBG("skb %p len %d", skb, skb->len);
2657                         hci_send_frame(skb);
2658
2659                         conn->sent++;
2660                         if (conn->sent == ~0)
2661                                 conn->sent = 0;
2662                 }
2663         }
2664 }
2665
2666 static inline void hci_sched_esco(struct hci_dev *hdev)
2667 {
2668         struct hci_conn *conn;
2669         struct sk_buff *skb;
2670         int quote;
2671
2672         BT_DBG("%s", hdev->name);
2673
2674         if (!hci_conn_num(hdev, ESCO_LINK))
2675                 return;
2676
2677         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2678                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2679                         BT_DBG("skb %p len %d", skb, skb->len);
2680                         hci_send_frame(skb);
2681
2682                         conn->sent++;
2683                         if (conn->sent == ~0)
2684                                 conn->sent = 0;
2685                 }
2686         }
2687 }
2688
2689 static inline void hci_sched_le(struct hci_dev *hdev)
2690 {
2691         struct hci_chan *chan;
2692         struct sk_buff *skb;
2693         int quote, cnt, tmp;
2694
2695         BT_DBG("%s", hdev->name);
2696
2697         if (!hci_conn_num(hdev, LE_LINK))
2698                 return;
2699
2700         if (!test_bit(HCI_RAW, &hdev->flags)) {
2701                 /* LE tx timeout must be longer than maximum
2702                  * link supervision timeout (40.9 seconds) */
2703                 if (!hdev->le_cnt && hdev->le_pkts &&
2704                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2705                         hci_link_tx_to(hdev, LE_LINK);
2706         }
2707
2708         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2709         tmp = cnt;
2710         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2711                 u32 priority = (skb_peek(&chan->data_q))->priority;
2712                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2713                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2714                                         skb->len, skb->priority);
2715
2716                         /* Stop if priority has changed */
2717                         if (skb->priority < priority)
2718                                 break;
2719
2720                         skb = skb_dequeue(&chan->data_q);
2721
2722                         hci_send_frame(skb);
2723                         hdev->le_last_tx = jiffies;
2724
2725                         cnt--;
2726                         chan->sent++;
2727                         chan->conn->sent++;
2728                 }
2729         }
2730
2731         if (hdev->le_pkts)
2732                 hdev->le_cnt = cnt;
2733         else
2734                 hdev->acl_cnt = cnt;
2735
2736         if (cnt != tmp)
2737                 hci_prio_recalculate(hdev, LE_LINK);
2738 }
2739
2740 static void hci_tx_work(struct work_struct *work)
2741 {
2742         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2743         struct sk_buff *skb;
2744
2745         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2746                 hdev->sco_cnt, hdev->le_cnt);
2747
2748         /* Schedule queues and send stuff to HCI driver */
2749
2750         hci_sched_acl(hdev);
2751
2752         hci_sched_sco(hdev);
2753
2754         hci_sched_esco(hdev);
2755
2756         hci_sched_le(hdev);
2757
2758         /* Send next queued raw (unknown type) packet */
2759         while ((skb = skb_dequeue(&hdev->raw_q)))
2760                 hci_send_frame(skb);
2761 }
2762
2763 /* ----- HCI RX task (incoming data processing) ----- */
2764
2765 /* ACL data packet */
2766 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2767 {
2768         struct hci_acl_hdr *hdr = (void *) skb->data;
2769         struct hci_conn *conn;
2770         __u16 handle, flags;
2771
2772         skb_pull(skb, HCI_ACL_HDR_SIZE);
2773
2774         handle = __le16_to_cpu(hdr->handle);
2775         flags  = hci_flags(handle);
2776         handle = hci_handle(handle);
2777
2778         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2779
2780         hdev->stat.acl_rx++;
2781
2782         hci_dev_lock(hdev);
2783         conn = hci_conn_hash_lookup_handle(hdev, handle);
2784         hci_dev_unlock(hdev);
2785
2786         if (conn) {
2787                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2788
2789                 /* Send to upper protocol */
2790                 l2cap_recv_acldata(conn, skb, flags);
2791                 return;
2792         } else {
2793                 BT_ERR("%s ACL packet for unknown connection handle %d",
2794                         hdev->name, handle);
2795         }
2796
2797         kfree_skb(skb);
2798 }
2799
2800 /* SCO data packet */
2801 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2802 {
2803         struct hci_sco_hdr *hdr = (void *) skb->data;
2804         struct hci_conn *conn;
2805         __u16 handle;
2806
2807         skb_pull(skb, HCI_SCO_HDR_SIZE);
2808
2809         handle = __le16_to_cpu(hdr->handle);
2810
2811         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2812
2813         hdev->stat.sco_rx++;
2814
2815         hci_dev_lock(hdev);
2816         conn = hci_conn_hash_lookup_handle(hdev, handle);
2817         hci_dev_unlock(hdev);
2818
2819         if (conn) {
2820                 /* Send to upper protocol */
2821                 sco_recv_scodata(conn, skb);
2822                 return;
2823         } else {
2824                 BT_ERR("%s SCO packet for unknown connection handle %d",
2825                         hdev->name, handle);
2826         }
2827
2828         kfree_skb(skb);
2829 }
2830
2831 static void hci_rx_work(struct work_struct *work)
2832 {
2833         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2834         struct sk_buff *skb;
2835
2836         BT_DBG("%s", hdev->name);
2837
2838         while ((skb = skb_dequeue(&hdev->rx_q))) {
2839                 /* Send copy to monitor */
2840                 hci_send_to_monitor(hdev, skb);
2841
2842                 if (atomic_read(&hdev->promisc)) {
2843                         /* Send copy to the sockets */
2844                         hci_send_to_sock(hdev, skb);
2845                 }
2846
2847                 if (test_bit(HCI_RAW, &hdev->flags)) {
2848                         kfree_skb(skb);
2849                         continue;
2850                 }
2851
2852                 if (test_bit(HCI_INIT, &hdev->flags)) {
2853                         /* Don't process data packets in this states. */
2854                         switch (bt_cb(skb)->pkt_type) {
2855                         case HCI_ACLDATA_PKT:
2856                         case HCI_SCODATA_PKT:
2857                                 kfree_skb(skb);
2858                                 continue;
2859                         }
2860                 }
2861
2862                 /* Process frame */
2863                 switch (bt_cb(skb)->pkt_type) {
2864                 case HCI_EVENT_PKT:
2865                         BT_DBG("%s Event packet", hdev->name);
2866                         hci_event_packet(hdev, skb);
2867                         break;
2868
2869                 case HCI_ACLDATA_PKT:
2870                         BT_DBG("%s ACL data packet", hdev->name);
2871                         hci_acldata_packet(hdev, skb);
2872                         break;
2873
2874                 case HCI_SCODATA_PKT:
2875                         BT_DBG("%s SCO data packet", hdev->name);
2876                         hci_scodata_packet(hdev, skb);
2877                         break;
2878
2879                 default:
2880                         kfree_skb(skb);
2881                         break;
2882                 }
2883         }
2884 }
2885
2886 static void hci_cmd_work(struct work_struct *work)
2887 {
2888         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2889         struct sk_buff *skb;
2890
2891         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2892
2893         /* Send queued commands */
2894         if (atomic_read(&hdev->cmd_cnt)) {
2895                 skb = skb_dequeue(&hdev->cmd_q);
2896                 if (!skb)
2897                         return;
2898
2899                 kfree_skb(hdev->sent_cmd);
2900
2901                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2902                 if (hdev->sent_cmd) {
2903                         atomic_dec(&hdev->cmd_cnt);
2904                         hci_send_frame(skb);
2905                         if (test_bit(HCI_RESET, &hdev->flags))
2906                                 del_timer(&hdev->cmd_timer);
2907                         else
2908                                 mod_timer(&hdev->cmd_timer,
2909                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2910                 } else {
2911                         skb_queue_head(&hdev->cmd_q, skb);
2912                         queue_work(hdev->workqueue, &hdev->cmd_work);
2913                 }
2914         }
2915 }
2916
2917 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2918 {
2919         /* General inquiry access code (GIAC) */
2920         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2921         struct hci_cp_inquiry cp;
2922
2923         BT_DBG("%s", hdev->name);
2924
2925         if (test_bit(HCI_INQUIRY, &hdev->flags))
2926                 return -EINPROGRESS;
2927
2928         inquiry_cache_flush(hdev);
2929
2930         memset(&cp, 0, sizeof(cp));
2931         memcpy(&cp.lap, lap, sizeof(cp.lap));
2932         cp.length  = length;
2933
2934         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2935 }
2936
2937 int hci_cancel_inquiry(struct hci_dev *hdev)
2938 {
2939         BT_DBG("%s", hdev->name);
2940
2941         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2942                 return -EPERM;
2943
2944         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2945 }