Bluetooth: hci_persistent_key should return bool
[linux-flexiantxendom0-3.2.10.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47
48 #include <asm/system.h>
49 #include <linux/uaccess.h>
50 #include <asm/unaligned.h>
51
52 #include <net/bluetooth/bluetooth.h>
53 #include <net/bluetooth/hci_core.h>
54
55 #define AUTO_OFF_TIMEOUT 2000
56
57 static void hci_rx_work(struct work_struct *work);
58 static void hci_cmd_work(struct work_struct *work);
59 static void hci_tx_work(struct work_struct *work);
60
61 /* HCI device list */
62 LIST_HEAD(hci_dev_list);
63 DEFINE_RWLOCK(hci_dev_list_lock);
64
65 /* HCI callback list */
66 LIST_HEAD(hci_cb_list);
67 DEFINE_RWLOCK(hci_cb_list_lock);
68
69 /* ---- HCI notifications ---- */
70
71 static void hci_notify(struct hci_dev *hdev, int event)
72 {
73         hci_sock_dev_event(hdev, event);
74 }
75
76 /* ---- HCI requests ---- */
77
78 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
79 {
80         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
81
82         /* If this is the init phase check if the completed command matches
83          * the last init command, and if not just return.
84          */
85         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
86                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
87                 struct sk_buff *skb;
88
89                 /* Some CSR based controllers generate a spontaneous
90                  * reset complete event during init and any pending
91                  * command will never be completed. In such a case we
92                  * need to resend whatever was the last sent
93                  * command.
94                  */
95
96                 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
97                         return;
98
99                 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
100                 if (skb) {
101                         skb_queue_head(&hdev->cmd_q, skb);
102                         queue_work(hdev->workqueue, &hdev->cmd_work);
103                 }
104
105                 return;
106         }
107
108         if (hdev->req_status == HCI_REQ_PEND) {
109                 hdev->req_result = result;
110                 hdev->req_status = HCI_REQ_DONE;
111                 wake_up_interruptible(&hdev->req_wait_q);
112         }
113 }
114
115 static void hci_req_cancel(struct hci_dev *hdev, int err)
116 {
117         BT_DBG("%s err 0x%2.2x", hdev->name, err);
118
119         if (hdev->req_status == HCI_REQ_PEND) {
120                 hdev->req_result = err;
121                 hdev->req_status = HCI_REQ_CANCELED;
122                 wake_up_interruptible(&hdev->req_wait_q);
123         }
124 }
125
126 /* Execute request and wait for completion. */
127 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
128                                         unsigned long opt, __u32 timeout)
129 {
130         DECLARE_WAITQUEUE(wait, current);
131         int err = 0;
132
133         BT_DBG("%s start", hdev->name);
134
135         hdev->req_status = HCI_REQ_PEND;
136
137         add_wait_queue(&hdev->req_wait_q, &wait);
138         set_current_state(TASK_INTERRUPTIBLE);
139
140         req(hdev, opt);
141         schedule_timeout(timeout);
142
143         remove_wait_queue(&hdev->req_wait_q, &wait);
144
145         if (signal_pending(current))
146                 return -EINTR;
147
148         switch (hdev->req_status) {
149         case HCI_REQ_DONE:
150                 err = -bt_to_errno(hdev->req_result);
151                 break;
152
153         case HCI_REQ_CANCELED:
154                 err = -hdev->req_result;
155                 break;
156
157         default:
158                 err = -ETIMEDOUT;
159                 break;
160         }
161
162         hdev->req_status = hdev->req_result = 0;
163
164         BT_DBG("%s end: err %d", hdev->name, err);
165
166         return err;
167 }
168
169 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
170                                         unsigned long opt, __u32 timeout)
171 {
172         int ret;
173
174         if (!test_bit(HCI_UP, &hdev->flags))
175                 return -ENETDOWN;
176
177         /* Serialize all requests */
178         hci_req_lock(hdev);
179         ret = __hci_request(hdev, req, opt, timeout);
180         hci_req_unlock(hdev);
181
182         return ret;
183 }
184
185 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
186 {
187         BT_DBG("%s %ld", hdev->name, opt);
188
189         /* Reset device */
190         set_bit(HCI_RESET, &hdev->flags);
191         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
192 }
193
194 static void bredr_init(struct hci_dev *hdev)
195 {
196         struct hci_cp_delete_stored_link_key cp;
197         __le16 param;
198         __u8 flt_type;
199
200         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
201
202         /* Mandatory initialization */
203
204         /* Reset */
205         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
206                 set_bit(HCI_RESET, &hdev->flags);
207                 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
208         }
209
210         /* Read Local Supported Features */
211         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
212
213         /* Read Local Version */
214         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
215
216         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
217         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
218
219         /* Read BD Address */
220         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
221
222         /* Read Class of Device */
223         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
224
225         /* Read Local Name */
226         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
227
228         /* Read Voice Setting */
229         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
230
231         /* Optional initialization */
232
233         /* Clear Event Filters */
234         flt_type = HCI_FLT_CLEAR_ALL;
235         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
236
237         /* Connection accept timeout ~20 secs */
238         param = cpu_to_le16(0x7d00);
239         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
240
241         bacpy(&cp.bdaddr, BDADDR_ANY);
242         cp.delete_all = 1;
243         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
244 }
245
246 static void amp_init(struct hci_dev *hdev)
247 {
248         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
249
250         /* Reset */
251         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
252
253         /* Read Local Version */
254         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
255 }
256
257 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
258 {
259         struct sk_buff *skb;
260
261         BT_DBG("%s %ld", hdev->name, opt);
262
263         /* Driver initialization */
264
265         /* Special commands */
266         while ((skb = skb_dequeue(&hdev->driver_init))) {
267                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
268                 skb->dev = (void *) hdev;
269
270                 skb_queue_tail(&hdev->cmd_q, skb);
271                 queue_work(hdev->workqueue, &hdev->cmd_work);
272         }
273         skb_queue_purge(&hdev->driver_init);
274
275         switch (hdev->dev_type) {
276         case HCI_BREDR:
277                 bredr_init(hdev);
278                 break;
279
280         case HCI_AMP:
281                 amp_init(hdev);
282                 break;
283
284         default:
285                 BT_ERR("Unknown device type %d", hdev->dev_type);
286                 break;
287         }
288
289 }
290
291 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
292 {
293         BT_DBG("%s", hdev->name);
294
295         /* Read LE buffer size */
296         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
297 }
298
299 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
300 {
301         __u8 scan = opt;
302
303         BT_DBG("%s %x", hdev->name, scan);
304
305         /* Inquiry and Page scans */
306         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
307 }
308
309 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
310 {
311         __u8 auth = opt;
312
313         BT_DBG("%s %x", hdev->name, auth);
314
315         /* Authentication */
316         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
317 }
318
319 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
320 {
321         __u8 encrypt = opt;
322
323         BT_DBG("%s %x", hdev->name, encrypt);
324
325         /* Encryption */
326         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
327 }
328
329 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
330 {
331         __le16 policy = cpu_to_le16(opt);
332
333         BT_DBG("%s %x", hdev->name, policy);
334
335         /* Default link policy */
336         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
337 }
338
339 /* Get HCI device by index.
340  * Device is held on return. */
341 struct hci_dev *hci_dev_get(int index)
342 {
343         struct hci_dev *hdev = NULL, *d;
344
345         BT_DBG("%d", index);
346
347         if (index < 0)
348                 return NULL;
349
350         read_lock(&hci_dev_list_lock);
351         list_for_each_entry(d, &hci_dev_list, list) {
352                 if (d->id == index) {
353                         hdev = hci_dev_hold(d);
354                         break;
355                 }
356         }
357         read_unlock(&hci_dev_list_lock);
358         return hdev;
359 }
360
361 /* ---- Inquiry support ---- */
362
363 bool hci_discovery_active(struct hci_dev *hdev)
364 {
365         struct discovery_state *discov = &hdev->discovery;
366
367         switch (discov->state) {
368         case DISCOVERY_FINDING:
369         case DISCOVERY_RESOLVING:
370                 return true;
371
372         default:
373                 return false;
374         }
375 }
376
377 void hci_discovery_set_state(struct hci_dev *hdev, int state)
378 {
379         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
380
381         if (hdev->discovery.state == state)
382                 return;
383
384         switch (state) {
385         case DISCOVERY_STOPPED:
386                 if (hdev->discovery.state != DISCOVERY_STARTING)
387                         mgmt_discovering(hdev, 0);
388                 hdev->discovery.type = 0;
389                 break;
390         case DISCOVERY_STARTING:
391                 break;
392         case DISCOVERY_FINDING:
393                 mgmt_discovering(hdev, 1);
394                 break;
395         case DISCOVERY_RESOLVING:
396                 break;
397         case DISCOVERY_STOPPING:
398                 break;
399         }
400
401         hdev->discovery.state = state;
402 }
403
404 static void inquiry_cache_flush(struct hci_dev *hdev)
405 {
406         struct discovery_state *cache = &hdev->discovery;
407         struct inquiry_entry *p, *n;
408
409         list_for_each_entry_safe(p, n, &cache->all, all) {
410                 list_del(&p->all);
411                 kfree(p);
412         }
413
414         INIT_LIST_HEAD(&cache->unknown);
415         INIT_LIST_HEAD(&cache->resolve);
416 }
417
418 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
419 {
420         struct discovery_state *cache = &hdev->discovery;
421         struct inquiry_entry *e;
422
423         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
424
425         list_for_each_entry(e, &cache->all, all) {
426                 if (!bacmp(&e->data.bdaddr, bdaddr))
427                         return e;
428         }
429
430         return NULL;
431 }
432
433 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
434                                                        bdaddr_t *bdaddr)
435 {
436         struct discovery_state *cache = &hdev->discovery;
437         struct inquiry_entry *e;
438
439         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
440
441         list_for_each_entry(e, &cache->unknown, list) {
442                 if (!bacmp(&e->data.bdaddr, bdaddr))
443                         return e;
444         }
445
446         return NULL;
447 }
448
449 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
450                                                        bdaddr_t *bdaddr,
451                                                        int state)
452 {
453         struct discovery_state *cache = &hdev->discovery;
454         struct inquiry_entry *e;
455
456         BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
457
458         list_for_each_entry(e, &cache->resolve, list) {
459                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
460                         return e;
461                 if (!bacmp(&e->data.bdaddr, bdaddr))
462                         return e;
463         }
464
465         return NULL;
466 }
467
468 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
469                                       struct inquiry_entry *ie)
470 {
471         struct discovery_state *cache = &hdev->discovery;
472         struct list_head *pos = &cache->resolve;
473         struct inquiry_entry *p;
474
475         list_del(&ie->list);
476
477         list_for_each_entry(p, &cache->resolve, list) {
478                 if (p->name_state != NAME_PENDING &&
479                                 abs(p->data.rssi) >= abs(ie->data.rssi))
480                         break;
481                 pos = &p->list;
482         }
483
484         list_add(&ie->list, pos);
485 }
486
487 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
488                               bool name_known, bool *ssp)
489 {
490         struct discovery_state *cache = &hdev->discovery;
491         struct inquiry_entry *ie;
492
493         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
494
495         if (ssp)
496                 *ssp = data->ssp_mode;
497
498         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
499         if (ie) {
500                 if (ie->data.ssp_mode && ssp)
501                         *ssp = true;
502
503                 if (ie->name_state == NAME_NEEDED &&
504                                                 data->rssi != ie->data.rssi) {
505                         ie->data.rssi = data->rssi;
506                         hci_inquiry_cache_update_resolve(hdev, ie);
507                 }
508
509                 goto update;
510         }
511
512         /* Entry not in the cache. Add new one. */
513         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
514         if (!ie)
515                 return false;
516
517         list_add(&ie->all, &cache->all);
518
519         if (name_known) {
520                 ie->name_state = NAME_KNOWN;
521         } else {
522                 ie->name_state = NAME_NOT_KNOWN;
523                 list_add(&ie->list, &cache->unknown);
524         }
525
526 update:
527         if (name_known && ie->name_state != NAME_KNOWN &&
528                                         ie->name_state != NAME_PENDING) {
529                 ie->name_state = NAME_KNOWN;
530                 list_del(&ie->list);
531         }
532
533         memcpy(&ie->data, data, sizeof(*data));
534         ie->timestamp = jiffies;
535         cache->timestamp = jiffies;
536
537         if (ie->name_state == NAME_NOT_KNOWN)
538                 return false;
539
540         return true;
541 }
542
543 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
544 {
545         struct discovery_state *cache = &hdev->discovery;
546         struct inquiry_info *info = (struct inquiry_info *) buf;
547         struct inquiry_entry *e;
548         int copied = 0;
549
550         list_for_each_entry(e, &cache->all, all) {
551                 struct inquiry_data *data = &e->data;
552
553                 if (copied >= num)
554                         break;
555
556                 bacpy(&info->bdaddr, &data->bdaddr);
557                 info->pscan_rep_mode    = data->pscan_rep_mode;
558                 info->pscan_period_mode = data->pscan_period_mode;
559                 info->pscan_mode        = data->pscan_mode;
560                 memcpy(info->dev_class, data->dev_class, 3);
561                 info->clock_offset      = data->clock_offset;
562
563                 info++;
564                 copied++;
565         }
566
567         BT_DBG("cache %p, copied %d", cache, copied);
568         return copied;
569 }
570
571 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
572 {
573         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
574         struct hci_cp_inquiry cp;
575
576         BT_DBG("%s", hdev->name);
577
578         if (test_bit(HCI_INQUIRY, &hdev->flags))
579                 return;
580
581         /* Start Inquiry */
582         memcpy(&cp.lap, &ir->lap, 3);
583         cp.length  = ir->length;
584         cp.num_rsp = ir->num_rsp;
585         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
586 }
587
588 int hci_inquiry(void __user *arg)
589 {
590         __u8 __user *ptr = arg;
591         struct hci_inquiry_req ir;
592         struct hci_dev *hdev;
593         int err = 0, do_inquiry = 0, max_rsp;
594         long timeo;
595         __u8 *buf;
596
597         if (copy_from_user(&ir, ptr, sizeof(ir)))
598                 return -EFAULT;
599
600         hdev = hci_dev_get(ir.dev_id);
601         if (!hdev)
602                 return -ENODEV;
603
604         hci_dev_lock(hdev);
605         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
606                                 inquiry_cache_empty(hdev) ||
607                                 ir.flags & IREQ_CACHE_FLUSH) {
608                 inquiry_cache_flush(hdev);
609                 do_inquiry = 1;
610         }
611         hci_dev_unlock(hdev);
612
613         timeo = ir.length * msecs_to_jiffies(2000);
614
615         if (do_inquiry) {
616                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
617                 if (err < 0)
618                         goto done;
619         }
620
621         /* for unlimited number of responses we will use buffer with 255 entries */
622         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
623
624         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
625          * copy it to the user space.
626          */
627         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
628         if (!buf) {
629                 err = -ENOMEM;
630                 goto done;
631         }
632
633         hci_dev_lock(hdev);
634         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
635         hci_dev_unlock(hdev);
636
637         BT_DBG("num_rsp %d", ir.num_rsp);
638
639         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
640                 ptr += sizeof(ir);
641                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
642                                         ir.num_rsp))
643                         err = -EFAULT;
644         } else
645                 err = -EFAULT;
646
647         kfree(buf);
648
649 done:
650         hci_dev_put(hdev);
651         return err;
652 }
653
654 /* ---- HCI ioctl helpers ---- */
655
656 int hci_dev_open(__u16 dev)
657 {
658         struct hci_dev *hdev;
659         int ret = 0;
660
661         hdev = hci_dev_get(dev);
662         if (!hdev)
663                 return -ENODEV;
664
665         BT_DBG("%s %p", hdev->name, hdev);
666
667         hci_req_lock(hdev);
668
669         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
670                 ret = -ENODEV;
671                 goto done;
672         }
673
674         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
675                 ret = -ERFKILL;
676                 goto done;
677         }
678
679         if (test_bit(HCI_UP, &hdev->flags)) {
680                 ret = -EALREADY;
681                 goto done;
682         }
683
684         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
685                 set_bit(HCI_RAW, &hdev->flags);
686
687         /* Treat all non BR/EDR controllers as raw devices if
688            enable_hs is not set */
689         if (hdev->dev_type != HCI_BREDR && !enable_hs)
690                 set_bit(HCI_RAW, &hdev->flags);
691
692         if (hdev->open(hdev)) {
693                 ret = -EIO;
694                 goto done;
695         }
696
697         if (!test_bit(HCI_RAW, &hdev->flags)) {
698                 atomic_set(&hdev->cmd_cnt, 1);
699                 set_bit(HCI_INIT, &hdev->flags);
700                 hdev->init_last_cmd = 0;
701
702                 ret = __hci_request(hdev, hci_init_req, 0,
703                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
704
705                 if (lmp_host_le_capable(hdev))
706                         ret = __hci_request(hdev, hci_le_init_req, 0,
707                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
708
709                 clear_bit(HCI_INIT, &hdev->flags);
710         }
711
712         if (!ret) {
713                 hci_dev_hold(hdev);
714                 set_bit(HCI_UP, &hdev->flags);
715                 hci_notify(hdev, HCI_DEV_UP);
716                 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
717                         hci_dev_lock(hdev);
718                         mgmt_powered(hdev, 1);
719                         hci_dev_unlock(hdev);
720                 }
721         } else {
722                 /* Init failed, cleanup */
723                 flush_work(&hdev->tx_work);
724                 flush_work(&hdev->cmd_work);
725                 flush_work(&hdev->rx_work);
726
727                 skb_queue_purge(&hdev->cmd_q);
728                 skb_queue_purge(&hdev->rx_q);
729
730                 if (hdev->flush)
731                         hdev->flush(hdev);
732
733                 if (hdev->sent_cmd) {
734                         kfree_skb(hdev->sent_cmd);
735                         hdev->sent_cmd = NULL;
736                 }
737
738                 hdev->close(hdev);
739                 hdev->flags = 0;
740         }
741
742 done:
743         hci_req_unlock(hdev);
744         hci_dev_put(hdev);
745         return ret;
746 }
747
748 static int hci_dev_do_close(struct hci_dev *hdev)
749 {
750         BT_DBG("%s %p", hdev->name, hdev);
751
752         cancel_work_sync(&hdev->le_scan);
753
754         hci_req_cancel(hdev, ENODEV);
755         hci_req_lock(hdev);
756
757         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
758                 del_timer_sync(&hdev->cmd_timer);
759                 hci_req_unlock(hdev);
760                 return 0;
761         }
762
763         /* Flush RX and TX works */
764         flush_work(&hdev->tx_work);
765         flush_work(&hdev->rx_work);
766
767         if (hdev->discov_timeout > 0) {
768                 cancel_delayed_work(&hdev->discov_off);
769                 hdev->discov_timeout = 0;
770                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
771         }
772
773         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
774                 cancel_delayed_work(&hdev->service_cache);
775
776         cancel_delayed_work_sync(&hdev->le_scan_disable);
777
778         hci_dev_lock(hdev);
779         inquiry_cache_flush(hdev);
780         hci_conn_hash_flush(hdev);
781         hci_dev_unlock(hdev);
782
783         hci_notify(hdev, HCI_DEV_DOWN);
784
785         if (hdev->flush)
786                 hdev->flush(hdev);
787
788         /* Reset device */
789         skb_queue_purge(&hdev->cmd_q);
790         atomic_set(&hdev->cmd_cnt, 1);
791         if (!test_bit(HCI_RAW, &hdev->flags) &&
792                                 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
793                 set_bit(HCI_INIT, &hdev->flags);
794                 __hci_request(hdev, hci_reset_req, 0,
795                                         msecs_to_jiffies(250));
796                 clear_bit(HCI_INIT, &hdev->flags);
797         }
798
799         /* flush cmd  work */
800         flush_work(&hdev->cmd_work);
801
802         /* Drop queues */
803         skb_queue_purge(&hdev->rx_q);
804         skb_queue_purge(&hdev->cmd_q);
805         skb_queue_purge(&hdev->raw_q);
806
807         /* Drop last sent command */
808         if (hdev->sent_cmd) {
809                 del_timer_sync(&hdev->cmd_timer);
810                 kfree_skb(hdev->sent_cmd);
811                 hdev->sent_cmd = NULL;
812         }
813
814         /* After this point our queues are empty
815          * and no tasks are scheduled. */
816         hdev->close(hdev);
817
818         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
819                 hci_dev_lock(hdev);
820                 mgmt_powered(hdev, 0);
821                 hci_dev_unlock(hdev);
822         }
823
824         /* Clear flags */
825         hdev->flags = 0;
826
827         memset(hdev->eir, 0, sizeof(hdev->eir));
828         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
829
830         hci_req_unlock(hdev);
831
832         hci_dev_put(hdev);
833         return 0;
834 }
835
836 int hci_dev_close(__u16 dev)
837 {
838         struct hci_dev *hdev;
839         int err;
840
841         hdev = hci_dev_get(dev);
842         if (!hdev)
843                 return -ENODEV;
844
845         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
846                 cancel_delayed_work(&hdev->power_off);
847
848         err = hci_dev_do_close(hdev);
849
850         hci_dev_put(hdev);
851         return err;
852 }
853
854 int hci_dev_reset(__u16 dev)
855 {
856         struct hci_dev *hdev;
857         int ret = 0;
858
859         hdev = hci_dev_get(dev);
860         if (!hdev)
861                 return -ENODEV;
862
863         hci_req_lock(hdev);
864
865         if (!test_bit(HCI_UP, &hdev->flags))
866                 goto done;
867
868         /* Drop queues */
869         skb_queue_purge(&hdev->rx_q);
870         skb_queue_purge(&hdev->cmd_q);
871
872         hci_dev_lock(hdev);
873         inquiry_cache_flush(hdev);
874         hci_conn_hash_flush(hdev);
875         hci_dev_unlock(hdev);
876
877         if (hdev->flush)
878                 hdev->flush(hdev);
879
880         atomic_set(&hdev->cmd_cnt, 1);
881         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
882
883         if (!test_bit(HCI_RAW, &hdev->flags))
884                 ret = __hci_request(hdev, hci_reset_req, 0,
885                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
886
887 done:
888         hci_req_unlock(hdev);
889         hci_dev_put(hdev);
890         return ret;
891 }
892
893 int hci_dev_reset_stat(__u16 dev)
894 {
895         struct hci_dev *hdev;
896         int ret = 0;
897
898         hdev = hci_dev_get(dev);
899         if (!hdev)
900                 return -ENODEV;
901
902         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
903
904         hci_dev_put(hdev);
905
906         return ret;
907 }
908
909 int hci_dev_cmd(unsigned int cmd, void __user *arg)
910 {
911         struct hci_dev *hdev;
912         struct hci_dev_req dr;
913         int err = 0;
914
915         if (copy_from_user(&dr, arg, sizeof(dr)))
916                 return -EFAULT;
917
918         hdev = hci_dev_get(dr.dev_id);
919         if (!hdev)
920                 return -ENODEV;
921
922         switch (cmd) {
923         case HCISETAUTH:
924                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
925                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
926                 break;
927
928         case HCISETENCRYPT:
929                 if (!lmp_encrypt_capable(hdev)) {
930                         err = -EOPNOTSUPP;
931                         break;
932                 }
933
934                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
935                         /* Auth must be enabled first */
936                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
937                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
938                         if (err)
939                                 break;
940                 }
941
942                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
943                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
944                 break;
945
946         case HCISETSCAN:
947                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
948                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
949                 break;
950
951         case HCISETLINKPOL:
952                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
953                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
954                 break;
955
956         case HCISETLINKMODE:
957                 hdev->link_mode = ((__u16) dr.dev_opt) &
958                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
959                 break;
960
961         case HCISETPTYPE:
962                 hdev->pkt_type = (__u16) dr.dev_opt;
963                 break;
964
965         case HCISETACLMTU:
966                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
967                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
968                 break;
969
970         case HCISETSCOMTU:
971                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
972                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
973                 break;
974
975         default:
976                 err = -EINVAL;
977                 break;
978         }
979
980         hci_dev_put(hdev);
981         return err;
982 }
983
984 int hci_get_dev_list(void __user *arg)
985 {
986         struct hci_dev *hdev;
987         struct hci_dev_list_req *dl;
988         struct hci_dev_req *dr;
989         int n = 0, size, err;
990         __u16 dev_num;
991
992         if (get_user(dev_num, (__u16 __user *) arg))
993                 return -EFAULT;
994
995         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
996                 return -EINVAL;
997
998         size = sizeof(*dl) + dev_num * sizeof(*dr);
999
1000         dl = kzalloc(size, GFP_KERNEL);
1001         if (!dl)
1002                 return -ENOMEM;
1003
1004         dr = dl->dev_req;
1005
1006         read_lock(&hci_dev_list_lock);
1007         list_for_each_entry(hdev, &hci_dev_list, list) {
1008                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1009                         cancel_delayed_work(&hdev->power_off);
1010
1011                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1012                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1013
1014                 (dr + n)->dev_id  = hdev->id;
1015                 (dr + n)->dev_opt = hdev->flags;
1016
1017                 if (++n >= dev_num)
1018                         break;
1019         }
1020         read_unlock(&hci_dev_list_lock);
1021
1022         dl->dev_num = n;
1023         size = sizeof(*dl) + n * sizeof(*dr);
1024
1025         err = copy_to_user(arg, dl, size);
1026         kfree(dl);
1027
1028         return err ? -EFAULT : 0;
1029 }
1030
1031 int hci_get_dev_info(void __user *arg)
1032 {
1033         struct hci_dev *hdev;
1034         struct hci_dev_info di;
1035         int err = 0;
1036
1037         if (copy_from_user(&di, arg, sizeof(di)))
1038                 return -EFAULT;
1039
1040         hdev = hci_dev_get(di.dev_id);
1041         if (!hdev)
1042                 return -ENODEV;
1043
1044         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1045                 cancel_delayed_work_sync(&hdev->power_off);
1046
1047         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1048                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1049
1050         strcpy(di.name, hdev->name);
1051         di.bdaddr   = hdev->bdaddr;
1052         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1053         di.flags    = hdev->flags;
1054         di.pkt_type = hdev->pkt_type;
1055         di.acl_mtu  = hdev->acl_mtu;
1056         di.acl_pkts = hdev->acl_pkts;
1057         di.sco_mtu  = hdev->sco_mtu;
1058         di.sco_pkts = hdev->sco_pkts;
1059         di.link_policy = hdev->link_policy;
1060         di.link_mode   = hdev->link_mode;
1061
1062         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1063         memcpy(&di.features, &hdev->features, sizeof(di.features));
1064
1065         if (copy_to_user(arg, &di, sizeof(di)))
1066                 err = -EFAULT;
1067
1068         hci_dev_put(hdev);
1069
1070         return err;
1071 }
1072
1073 /* ---- Interface to HCI drivers ---- */
1074
1075 static int hci_rfkill_set_block(void *data, bool blocked)
1076 {
1077         struct hci_dev *hdev = data;
1078
1079         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1080
1081         if (!blocked)
1082                 return 0;
1083
1084         hci_dev_do_close(hdev);
1085
1086         return 0;
1087 }
1088
1089 static const struct rfkill_ops hci_rfkill_ops = {
1090         .set_block = hci_rfkill_set_block,
1091 };
1092
1093 /* Alloc HCI device */
1094 struct hci_dev *hci_alloc_dev(void)
1095 {
1096         struct hci_dev *hdev;
1097
1098         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1099         if (!hdev)
1100                 return NULL;
1101
1102         hci_init_sysfs(hdev);
1103         skb_queue_head_init(&hdev->driver_init);
1104
1105         return hdev;
1106 }
1107 EXPORT_SYMBOL(hci_alloc_dev);
1108
1109 /* Free HCI device */
1110 void hci_free_dev(struct hci_dev *hdev)
1111 {
1112         skb_queue_purge(&hdev->driver_init);
1113
1114         /* will free via device release */
1115         put_device(&hdev->dev);
1116 }
1117 EXPORT_SYMBOL(hci_free_dev);
1118
1119 static void hci_power_on(struct work_struct *work)
1120 {
1121         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1122
1123         BT_DBG("%s", hdev->name);
1124
1125         if (hci_dev_open(hdev->id) < 0)
1126                 return;
1127
1128         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1129                 schedule_delayed_work(&hdev->power_off,
1130                                         msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1131
1132         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1133                 mgmt_index_added(hdev);
1134 }
1135
1136 static void hci_power_off(struct work_struct *work)
1137 {
1138         struct hci_dev *hdev = container_of(work, struct hci_dev,
1139                                                         power_off.work);
1140
1141         BT_DBG("%s", hdev->name);
1142
1143         hci_dev_do_close(hdev);
1144 }
1145
1146 static void hci_discov_off(struct work_struct *work)
1147 {
1148         struct hci_dev *hdev;
1149         u8 scan = SCAN_PAGE;
1150
1151         hdev = container_of(work, struct hci_dev, discov_off.work);
1152
1153         BT_DBG("%s", hdev->name);
1154
1155         hci_dev_lock(hdev);
1156
1157         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1158
1159         hdev->discov_timeout = 0;
1160
1161         hci_dev_unlock(hdev);
1162 }
1163
1164 int hci_uuids_clear(struct hci_dev *hdev)
1165 {
1166         struct list_head *p, *n;
1167
1168         list_for_each_safe(p, n, &hdev->uuids) {
1169                 struct bt_uuid *uuid;
1170
1171                 uuid = list_entry(p, struct bt_uuid, list);
1172
1173                 list_del(p);
1174                 kfree(uuid);
1175         }
1176
1177         return 0;
1178 }
1179
1180 int hci_link_keys_clear(struct hci_dev *hdev)
1181 {
1182         struct list_head *p, *n;
1183
1184         list_for_each_safe(p, n, &hdev->link_keys) {
1185                 struct link_key *key;
1186
1187                 key = list_entry(p, struct link_key, list);
1188
1189                 list_del(p);
1190                 kfree(key);
1191         }
1192
1193         return 0;
1194 }
1195
1196 int hci_smp_ltks_clear(struct hci_dev *hdev)
1197 {
1198         struct smp_ltk *k, *tmp;
1199
1200         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1201                 list_del(&k->list);
1202                 kfree(k);
1203         }
1204
1205         return 0;
1206 }
1207
1208 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1209 {
1210         struct link_key *k;
1211
1212         list_for_each_entry(k, &hdev->link_keys, list)
1213                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1214                         return k;
1215
1216         return NULL;
1217 }
1218
1219 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1220                                                 u8 key_type, u8 old_key_type)
1221 {
1222         /* Legacy key */
1223         if (key_type < 0x03)
1224                 return true;
1225
1226         /* Debug keys are insecure so don't store them persistently */
1227         if (key_type == HCI_LK_DEBUG_COMBINATION)
1228                 return false;
1229
1230         /* Changed combination key and there's no previous one */
1231         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1232                 return false;
1233
1234         /* Security mode 3 case */
1235         if (!conn)
1236                 return true;
1237
1238         /* Neither local nor remote side had no-bonding as requirement */
1239         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1240                 return true;
1241
1242         /* Local side had dedicated bonding as requirement */
1243         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1244                 return true;
1245
1246         /* Remote side had dedicated bonding as requirement */
1247         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1248                 return true;
1249
1250         /* If none of the above criteria match, then don't store the key
1251          * persistently */
1252         return false;
1253 }
1254
1255 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1256 {
1257         struct smp_ltk *k;
1258
1259         list_for_each_entry(k, &hdev->long_term_keys, list) {
1260                 if (k->ediv != ediv ||
1261                                 memcmp(rand, k->rand, sizeof(k->rand)))
1262                         continue;
1263
1264                 return k;
1265         }
1266
1267         return NULL;
1268 }
1269 EXPORT_SYMBOL(hci_find_ltk);
1270
1271 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1272                                      u8 addr_type)
1273 {
1274         struct smp_ltk *k;
1275
1276         list_for_each_entry(k, &hdev->long_term_keys, list)
1277                 if (addr_type == k->bdaddr_type &&
1278                                         bacmp(bdaddr, &k->bdaddr) == 0)
1279                         return k;
1280
1281         return NULL;
1282 }
1283 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1284
1285 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1286                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1287 {
1288         struct link_key *key, *old_key;
1289         u8 old_key_type;
1290         bool persistent;
1291
1292         old_key = hci_find_link_key(hdev, bdaddr);
1293         if (old_key) {
1294                 old_key_type = old_key->type;
1295                 key = old_key;
1296         } else {
1297                 old_key_type = conn ? conn->key_type : 0xff;
1298                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1299                 if (!key)
1300                         return -ENOMEM;
1301                 list_add(&key->list, &hdev->link_keys);
1302         }
1303
1304         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1305
1306         /* Some buggy controller combinations generate a changed
1307          * combination key for legacy pairing even when there's no
1308          * previous key */
1309         if (type == HCI_LK_CHANGED_COMBINATION &&
1310                                         (!conn || conn->remote_auth == 0xff) &&
1311                                         old_key_type == 0xff) {
1312                 type = HCI_LK_COMBINATION;
1313                 if (conn)
1314                         conn->key_type = type;
1315         }
1316
1317         bacpy(&key->bdaddr, bdaddr);
1318         memcpy(key->val, val, 16);
1319         key->pin_len = pin_len;
1320
1321         if (type == HCI_LK_CHANGED_COMBINATION)
1322                 key->type = old_key_type;
1323         else
1324                 key->type = type;
1325
1326         if (!new_key)
1327                 return 0;
1328
1329         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1330
1331         mgmt_new_link_key(hdev, key, persistent);
1332
1333         if (!persistent) {
1334                 list_del(&key->list);
1335                 kfree(key);
1336         }
1337
1338         return 0;
1339 }
1340
1341 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1342                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
1343                 ediv, u8 rand[8])
1344 {
1345         struct smp_ltk *key, *old_key;
1346
1347         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1348                 return 0;
1349
1350         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1351         if (old_key)
1352                 key = old_key;
1353         else {
1354                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1355                 if (!key)
1356                         return -ENOMEM;
1357                 list_add(&key->list, &hdev->long_term_keys);
1358         }
1359
1360         bacpy(&key->bdaddr, bdaddr);
1361         key->bdaddr_type = addr_type;
1362         memcpy(key->val, tk, sizeof(key->val));
1363         key->authenticated = authenticated;
1364         key->ediv = ediv;
1365         key->enc_size = enc_size;
1366         key->type = type;
1367         memcpy(key->rand, rand, sizeof(key->rand));
1368
1369         if (!new_key)
1370                 return 0;
1371
1372         if (type & HCI_SMP_LTK)
1373                 mgmt_new_ltk(hdev, key, 1);
1374
1375         return 0;
1376 }
1377
1378 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1379 {
1380         struct link_key *key;
1381
1382         key = hci_find_link_key(hdev, bdaddr);
1383         if (!key)
1384                 return -ENOENT;
1385
1386         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1387
1388         list_del(&key->list);
1389         kfree(key);
1390
1391         return 0;
1392 }
1393
1394 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1395 {
1396         struct smp_ltk *k, *tmp;
1397
1398         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1399                 if (bacmp(bdaddr, &k->bdaddr))
1400                         continue;
1401
1402                 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1403
1404                 list_del(&k->list);
1405                 kfree(k);
1406         }
1407
1408         return 0;
1409 }
1410
1411 /* HCI command timer function */
1412 static void hci_cmd_timer(unsigned long arg)
1413 {
1414         struct hci_dev *hdev = (void *) arg;
1415
1416         BT_ERR("%s command tx timeout", hdev->name);
1417         atomic_set(&hdev->cmd_cnt, 1);
1418         queue_work(hdev->workqueue, &hdev->cmd_work);
1419 }
1420
1421 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1422                                           bdaddr_t *bdaddr)
1423 {
1424         struct oob_data *data;
1425
1426         list_for_each_entry(data, &hdev->remote_oob_data, list)
1427                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1428                         return data;
1429
1430         return NULL;
1431 }
1432
1433 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1434 {
1435         struct oob_data *data;
1436
1437         data = hci_find_remote_oob_data(hdev, bdaddr);
1438         if (!data)
1439                 return -ENOENT;
1440
1441         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1442
1443         list_del(&data->list);
1444         kfree(data);
1445
1446         return 0;
1447 }
1448
1449 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1450 {
1451         struct oob_data *data, *n;
1452
1453         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1454                 list_del(&data->list);
1455                 kfree(data);
1456         }
1457
1458         return 0;
1459 }
1460
1461 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1462                             u8 *randomizer)
1463 {
1464         struct oob_data *data;
1465
1466         data = hci_find_remote_oob_data(hdev, bdaddr);
1467
1468         if (!data) {
1469                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1470                 if (!data)
1471                         return -ENOMEM;
1472
1473                 bacpy(&data->bdaddr, bdaddr);
1474                 list_add(&data->list, &hdev->remote_oob_data);
1475         }
1476
1477         memcpy(data->hash, hash, sizeof(data->hash));
1478         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1479
1480         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1481
1482         return 0;
1483 }
1484
1485 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1486 {
1487         struct bdaddr_list *b;
1488
1489         list_for_each_entry(b, &hdev->blacklist, list)
1490                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1491                         return b;
1492
1493         return NULL;
1494 }
1495
1496 int hci_blacklist_clear(struct hci_dev *hdev)
1497 {
1498         struct list_head *p, *n;
1499
1500         list_for_each_safe(p, n, &hdev->blacklist) {
1501                 struct bdaddr_list *b;
1502
1503                 b = list_entry(p, struct bdaddr_list, list);
1504
1505                 list_del(p);
1506                 kfree(b);
1507         }
1508
1509         return 0;
1510 }
1511
1512 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1513 {
1514         struct bdaddr_list *entry;
1515
1516         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1517                 return -EBADF;
1518
1519         if (hci_blacklist_lookup(hdev, bdaddr))
1520                 return -EEXIST;
1521
1522         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1523         if (!entry)
1524                 return -ENOMEM;
1525
1526         bacpy(&entry->bdaddr, bdaddr);
1527
1528         list_add(&entry->list, &hdev->blacklist);
1529
1530         return mgmt_device_blocked(hdev, bdaddr, type);
1531 }
1532
1533 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1534 {
1535         struct bdaddr_list *entry;
1536
1537         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1538                 return hci_blacklist_clear(hdev);
1539
1540         entry = hci_blacklist_lookup(hdev, bdaddr);
1541         if (!entry)
1542                 return -ENOENT;
1543
1544         list_del(&entry->list);
1545         kfree(entry);
1546
1547         return mgmt_device_unblocked(hdev, bdaddr, type);
1548 }
1549
1550 static void hci_clear_adv_cache(struct work_struct *work)
1551 {
1552         struct hci_dev *hdev = container_of(work, struct hci_dev,
1553                                             adv_work.work);
1554
1555         hci_dev_lock(hdev);
1556
1557         hci_adv_entries_clear(hdev);
1558
1559         hci_dev_unlock(hdev);
1560 }
1561
1562 int hci_adv_entries_clear(struct hci_dev *hdev)
1563 {
1564         struct adv_entry *entry, *tmp;
1565
1566         list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1567                 list_del(&entry->list);
1568                 kfree(entry);
1569         }
1570
1571         BT_DBG("%s adv cache cleared", hdev->name);
1572
1573         return 0;
1574 }
1575
1576 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1577 {
1578         struct adv_entry *entry;
1579
1580         list_for_each_entry(entry, &hdev->adv_entries, list)
1581                 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1582                         return entry;
1583
1584         return NULL;
1585 }
1586
1587 static inline int is_connectable_adv(u8 evt_type)
1588 {
1589         if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1590                 return 1;
1591
1592         return 0;
1593 }
1594
1595 int hci_add_adv_entry(struct hci_dev *hdev,
1596                                         struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
1597                 return -EINVAL;
1598
1599         /* Only new entries should be added to adv_entries. So, if
1600          * bdaddr was found, don't add it. */
1601         if (hci_find_adv_entry(hdev, &ev->bdaddr))
1602                 return 0;
1603
1604         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1605         if (!entry)
1606                 return -ENOMEM;
1607
1608         bacpy(&entry->bdaddr, &ev->bdaddr);
1609         entry->bdaddr_type = ev->bdaddr_type;
1610
1611         list_add(&entry->list, &hdev->adv_entries);
1612
1613         BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1614                                 batostr(&entry->bdaddr), entry->bdaddr_type);
1615
1616         return 0;
1617 }
1618
1619 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1620 {
1621         struct le_scan_params *param =  (struct le_scan_params *) opt;
1622         struct hci_cp_le_set_scan_param cp;
1623
1624         memset(&cp, 0, sizeof(cp));
1625         cp.type = param->type;
1626         cp.interval = cpu_to_le16(param->interval);
1627         cp.window = cpu_to_le16(param->window);
1628
1629         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1630 }
1631
1632 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1633 {
1634         struct hci_cp_le_set_scan_enable cp;
1635
1636         memset(&cp, 0, sizeof(cp));
1637         cp.enable = 1;
1638
1639         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1640 }
1641
1642 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1643                           u16 window, int timeout)
1644 {
1645         long timeo = msecs_to_jiffies(3000);
1646         struct le_scan_params param;
1647         int err;
1648
1649         BT_DBG("%s", hdev->name);
1650
1651         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1652                 return -EINPROGRESS;
1653
1654         param.type = type;
1655         param.interval = interval;
1656         param.window = window;
1657
1658         hci_req_lock(hdev);
1659
1660         err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1661                             timeo);
1662         if (!err)
1663                 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1664
1665         hci_req_unlock(hdev);
1666
1667         if (err < 0)
1668                 return err;
1669
1670         schedule_delayed_work(&hdev->le_scan_disable,
1671                               msecs_to_jiffies(timeout));
1672
1673         return 0;
1674 }
1675
1676 static void le_scan_disable_work(struct work_struct *work)
1677 {
1678         struct hci_dev *hdev = container_of(work, struct hci_dev,
1679                                             le_scan_disable.work);
1680         struct hci_cp_le_set_scan_enable cp;
1681
1682         BT_DBG("%s", hdev->name);
1683
1684         memset(&cp, 0, sizeof(cp));
1685
1686         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1687 }
1688
1689 static void le_scan_work(struct work_struct *work)
1690 {
1691         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1692         struct le_scan_params *param = &hdev->le_scan_params;
1693
1694         BT_DBG("%s", hdev->name);
1695
1696         hci_do_le_scan(hdev, param->type, param->interval, param->window,
1697                        param->timeout);
1698 }
1699
1700 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1701                 int timeout)
1702 {
1703         struct le_scan_params *param = &hdev->le_scan_params;
1704
1705         BT_DBG("%s", hdev->name);
1706
1707         if (work_busy(&hdev->le_scan))
1708                 return -EINPROGRESS;
1709
1710         param->type = type;
1711         param->interval = interval;
1712         param->window = window;
1713         param->timeout = timeout;
1714
1715         queue_work(system_long_wq, &hdev->le_scan);
1716
1717         return 0;
1718 }
1719
1720 /* Register HCI device */
1721 int hci_register_dev(struct hci_dev *hdev)
1722 {
1723         struct list_head *head = &hci_dev_list, *p;
1724         int i, id, error;
1725
1726         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1727
1728         if (!hdev->open || !hdev->close)
1729                 return -EINVAL;
1730
1731         /* Do not allow HCI_AMP devices to register at index 0,
1732          * so the index can be used as the AMP controller ID.
1733          */
1734         id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1735
1736         write_lock(&hci_dev_list_lock);
1737
1738         /* Find first available device id */
1739         list_for_each(p, &hci_dev_list) {
1740                 if (list_entry(p, struct hci_dev, list)->id != id)
1741                         break;
1742                 head = p; id++;
1743         }
1744
1745         sprintf(hdev->name, "hci%d", id);
1746         hdev->id = id;
1747         list_add_tail(&hdev->list, head);
1748
1749         mutex_init(&hdev->lock);
1750
1751         hdev->flags = 0;
1752         hdev->dev_flags = 0;
1753         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1754         hdev->esco_type = (ESCO_HV1);
1755         hdev->link_mode = (HCI_LM_ACCEPT);
1756         hdev->io_capability = 0x03; /* No Input No Output */
1757
1758         hdev->idle_timeout = 0;
1759         hdev->sniff_max_interval = 800;
1760         hdev->sniff_min_interval = 80;
1761
1762         INIT_WORK(&hdev->rx_work, hci_rx_work);
1763         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1764         INIT_WORK(&hdev->tx_work, hci_tx_work);
1765
1766
1767         skb_queue_head_init(&hdev->rx_q);
1768         skb_queue_head_init(&hdev->cmd_q);
1769         skb_queue_head_init(&hdev->raw_q);
1770
1771         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1772
1773         for (i = 0; i < NUM_REASSEMBLY; i++)
1774                 hdev->reassembly[i] = NULL;
1775
1776         init_waitqueue_head(&hdev->req_wait_q);
1777         mutex_init(&hdev->req_lock);
1778
1779         discovery_init(hdev);
1780
1781         hci_conn_hash_init(hdev);
1782
1783         INIT_LIST_HEAD(&hdev->mgmt_pending);
1784
1785         INIT_LIST_HEAD(&hdev->blacklist);
1786
1787         INIT_LIST_HEAD(&hdev->uuids);
1788
1789         INIT_LIST_HEAD(&hdev->link_keys);
1790         INIT_LIST_HEAD(&hdev->long_term_keys);
1791
1792         INIT_LIST_HEAD(&hdev->remote_oob_data);
1793
1794         INIT_LIST_HEAD(&hdev->adv_entries);
1795
1796         INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1797         INIT_WORK(&hdev->power_on, hci_power_on);
1798         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1799
1800         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1801
1802         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1803
1804         atomic_set(&hdev->promisc, 0);
1805
1806         INIT_WORK(&hdev->le_scan, le_scan_work);
1807
1808         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1809
1810         write_unlock(&hci_dev_list_lock);
1811
1812         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1813                                                         WQ_MEM_RECLAIM, 1);
1814         if (!hdev->workqueue) {
1815                 error = -ENOMEM;
1816                 goto err;
1817         }
1818
1819         error = hci_add_sysfs(hdev);
1820         if (error < 0)
1821                 goto err_wqueue;
1822
1823         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1824                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1825         if (hdev->rfkill) {
1826                 if (rfkill_register(hdev->rfkill) < 0) {
1827                         rfkill_destroy(hdev->rfkill);
1828                         hdev->rfkill = NULL;
1829                 }
1830         }
1831
1832         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1833         set_bit(HCI_SETUP, &hdev->dev_flags);
1834         schedule_work(&hdev->power_on);
1835
1836         hci_notify(hdev, HCI_DEV_REG);
1837         hci_dev_hold(hdev);
1838
1839         return id;
1840
1841 err_wqueue:
1842         destroy_workqueue(hdev->workqueue);
1843 err:
1844         write_lock(&hci_dev_list_lock);
1845         list_del(&hdev->list);
1846         write_unlock(&hci_dev_list_lock);
1847
1848         return error;
1849 }
1850 EXPORT_SYMBOL(hci_register_dev);
1851
1852 /* Unregister HCI device */
1853 void hci_unregister_dev(struct hci_dev *hdev)
1854 {
1855         int i;
1856
1857         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1858
1859         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1860
1861         write_lock(&hci_dev_list_lock);
1862         list_del(&hdev->list);
1863         write_unlock(&hci_dev_list_lock);
1864
1865         hci_dev_do_close(hdev);
1866
1867         for (i = 0; i < NUM_REASSEMBLY; i++)
1868                 kfree_skb(hdev->reassembly[i]);
1869
1870         if (!test_bit(HCI_INIT, &hdev->flags) &&
1871                                 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1872                 hci_dev_lock(hdev);
1873                 mgmt_index_removed(hdev);
1874                 hci_dev_unlock(hdev);
1875         }
1876
1877         /* mgmt_index_removed should take care of emptying the
1878          * pending list */
1879         BUG_ON(!list_empty(&hdev->mgmt_pending));
1880
1881         hci_notify(hdev, HCI_DEV_UNREG);
1882
1883         if (hdev->rfkill) {
1884                 rfkill_unregister(hdev->rfkill);
1885                 rfkill_destroy(hdev->rfkill);
1886         }
1887
1888         hci_del_sysfs(hdev);
1889
1890         cancel_delayed_work_sync(&hdev->adv_work);
1891
1892         destroy_workqueue(hdev->workqueue);
1893
1894         hci_dev_lock(hdev);
1895         hci_blacklist_clear(hdev);
1896         hci_uuids_clear(hdev);
1897         hci_link_keys_clear(hdev);
1898         hci_smp_ltks_clear(hdev);
1899         hci_remote_oob_data_clear(hdev);
1900         hci_adv_entries_clear(hdev);
1901         hci_dev_unlock(hdev);
1902
1903         hci_dev_put(hdev);
1904 }
1905 EXPORT_SYMBOL(hci_unregister_dev);
1906
1907 /* Suspend HCI device */
1908 int hci_suspend_dev(struct hci_dev *hdev)
1909 {
1910         hci_notify(hdev, HCI_DEV_SUSPEND);
1911         return 0;
1912 }
1913 EXPORT_SYMBOL(hci_suspend_dev);
1914
1915 /* Resume HCI device */
1916 int hci_resume_dev(struct hci_dev *hdev)
1917 {
1918         hci_notify(hdev, HCI_DEV_RESUME);
1919         return 0;
1920 }
1921 EXPORT_SYMBOL(hci_resume_dev);
1922
1923 /* Receive frame from HCI drivers */
1924 int hci_recv_frame(struct sk_buff *skb)
1925 {
1926         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1927         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1928                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1929                 kfree_skb(skb);
1930                 return -ENXIO;
1931         }
1932
1933         /* Incomming skb */
1934         bt_cb(skb)->incoming = 1;
1935
1936         /* Time stamp */
1937         __net_timestamp(skb);
1938
1939         skb_queue_tail(&hdev->rx_q, skb);
1940         queue_work(hdev->workqueue, &hdev->rx_work);
1941
1942         return 0;
1943 }
1944 EXPORT_SYMBOL(hci_recv_frame);
1945
1946 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1947                                                   int count, __u8 index)
1948 {
1949         int len = 0;
1950         int hlen = 0;
1951         int remain = count;
1952         struct sk_buff *skb;
1953         struct bt_skb_cb *scb;
1954
1955         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1956                                 index >= NUM_REASSEMBLY)
1957                 return -EILSEQ;
1958
1959         skb = hdev->reassembly[index];
1960
1961         if (!skb) {
1962                 switch (type) {
1963                 case HCI_ACLDATA_PKT:
1964                         len = HCI_MAX_FRAME_SIZE;
1965                         hlen = HCI_ACL_HDR_SIZE;
1966                         break;
1967                 case HCI_EVENT_PKT:
1968                         len = HCI_MAX_EVENT_SIZE;
1969                         hlen = HCI_EVENT_HDR_SIZE;
1970                         break;
1971                 case HCI_SCODATA_PKT:
1972                         len = HCI_MAX_SCO_SIZE;
1973                         hlen = HCI_SCO_HDR_SIZE;
1974                         break;
1975                 }
1976
1977                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1978                 if (!skb)
1979                         return -ENOMEM;
1980
1981                 scb = (void *) skb->cb;
1982                 scb->expect = hlen;
1983                 scb->pkt_type = type;
1984
1985                 skb->dev = (void *) hdev;
1986                 hdev->reassembly[index] = skb;
1987         }
1988
1989         while (count) {
1990                 scb = (void *) skb->cb;
1991                 len = min_t(uint, scb->expect, count);
1992
1993                 memcpy(skb_put(skb, len), data, len);
1994
1995                 count -= len;
1996                 data += len;
1997                 scb->expect -= len;
1998                 remain = count;
1999
2000                 switch (type) {
2001                 case HCI_EVENT_PKT:
2002                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2003                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2004                                 scb->expect = h->plen;
2005
2006                                 if (skb_tailroom(skb) < scb->expect) {
2007                                         kfree_skb(skb);
2008                                         hdev->reassembly[index] = NULL;
2009                                         return -ENOMEM;
2010                                 }
2011                         }
2012                         break;
2013
2014                 case HCI_ACLDATA_PKT:
2015                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2016                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2017                                 scb->expect = __le16_to_cpu(h->dlen);
2018
2019                                 if (skb_tailroom(skb) < scb->expect) {
2020                                         kfree_skb(skb);
2021                                         hdev->reassembly[index] = NULL;
2022                                         return -ENOMEM;
2023                                 }
2024                         }
2025                         break;
2026
2027                 case HCI_SCODATA_PKT:
2028                         if (skb->len == HCI_SCO_HDR_SIZE) {
2029                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2030                                 scb->expect = h->dlen;
2031
2032                                 if (skb_tailroom(skb) < scb->expect) {
2033                                         kfree_skb(skb);
2034                                         hdev->reassembly[index] = NULL;
2035                                         return -ENOMEM;
2036                                 }
2037                         }
2038                         break;
2039                 }
2040
2041                 if (scb->expect == 0) {
2042                         /* Complete frame */
2043
2044                         bt_cb(skb)->pkt_type = type;
2045                         hci_recv_frame(skb);
2046
2047                         hdev->reassembly[index] = NULL;
2048                         return remain;
2049                 }
2050         }
2051
2052         return remain;
2053 }
2054
2055 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2056 {
2057         int rem = 0;
2058
2059         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2060                 return -EILSEQ;
2061
2062         while (count) {
2063                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2064                 if (rem < 0)
2065                         return rem;
2066
2067                 data += (count - rem);
2068                 count = rem;
2069         }
2070
2071         return rem;
2072 }
2073 EXPORT_SYMBOL(hci_recv_fragment);
2074
2075 #define STREAM_REASSEMBLY 0
2076
2077 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2078 {
2079         int type;
2080         int rem = 0;
2081
2082         while (count) {
2083                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2084
2085                 if (!skb) {
2086                         struct { char type; } *pkt;
2087
2088                         /* Start of the frame */
2089                         pkt = data;
2090                         type = pkt->type;
2091
2092                         data++;
2093                         count--;
2094                 } else
2095                         type = bt_cb(skb)->pkt_type;
2096
2097                 rem = hci_reassembly(hdev, type, data, count,
2098                                                         STREAM_REASSEMBLY);
2099                 if (rem < 0)
2100                         return rem;
2101
2102                 data += (count - rem);
2103                 count = rem;
2104         }
2105
2106         return rem;
2107 }
2108 EXPORT_SYMBOL(hci_recv_stream_fragment);
2109
2110 /* ---- Interface to upper protocols ---- */
2111
2112 int hci_register_cb(struct hci_cb *cb)
2113 {
2114         BT_DBG("%p name %s", cb, cb->name);
2115
2116         write_lock(&hci_cb_list_lock);
2117         list_add(&cb->list, &hci_cb_list);
2118         write_unlock(&hci_cb_list_lock);
2119
2120         return 0;
2121 }
2122 EXPORT_SYMBOL(hci_register_cb);
2123
2124 int hci_unregister_cb(struct hci_cb *cb)
2125 {
2126         BT_DBG("%p name %s", cb, cb->name);
2127
2128         write_lock(&hci_cb_list_lock);
2129         list_del(&cb->list);
2130         write_unlock(&hci_cb_list_lock);
2131
2132         return 0;
2133 }
2134 EXPORT_SYMBOL(hci_unregister_cb);
2135
2136 static int hci_send_frame(struct sk_buff *skb)
2137 {
2138         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2139
2140         if (!hdev) {
2141                 kfree_skb(skb);
2142                 return -ENODEV;
2143         }
2144
2145         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2146
2147         /* Time stamp */
2148         __net_timestamp(skb);
2149
2150         /* Send copy to monitor */
2151         hci_send_to_monitor(hdev, skb);
2152
2153         if (atomic_read(&hdev->promisc)) {
2154                 /* Send copy to the sockets */
2155                 hci_send_to_sock(hdev, skb);
2156         }
2157
2158         /* Get rid of skb owner, prior to sending to the driver. */
2159         skb_orphan(skb);
2160
2161         return hdev->send(skb);
2162 }
2163
2164 /* Send HCI command */
2165 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2166 {
2167         int len = HCI_COMMAND_HDR_SIZE + plen;
2168         struct hci_command_hdr *hdr;
2169         struct sk_buff *skb;
2170
2171         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2172
2173         skb = bt_skb_alloc(len, GFP_ATOMIC);
2174         if (!skb) {
2175                 BT_ERR("%s no memory for command", hdev->name);
2176                 return -ENOMEM;
2177         }
2178
2179         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2180         hdr->opcode = cpu_to_le16(opcode);
2181         hdr->plen   = plen;
2182
2183         if (plen)
2184                 memcpy(skb_put(skb, plen), param, plen);
2185
2186         BT_DBG("skb len %d", skb->len);
2187
2188         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2189         skb->dev = (void *) hdev;
2190
2191         if (test_bit(HCI_INIT, &hdev->flags))
2192                 hdev->init_last_cmd = opcode;
2193
2194         skb_queue_tail(&hdev->cmd_q, skb);
2195         queue_work(hdev->workqueue, &hdev->cmd_work);
2196
2197         return 0;
2198 }
2199
2200 /* Get data from the previously sent command */
2201 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2202 {
2203         struct hci_command_hdr *hdr;
2204
2205         if (!hdev->sent_cmd)
2206                 return NULL;
2207
2208         hdr = (void *) hdev->sent_cmd->data;
2209
2210         if (hdr->opcode != cpu_to_le16(opcode))
2211                 return NULL;
2212
2213         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2214
2215         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2216 }
2217
2218 /* Send ACL data */
2219 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2220 {
2221         struct hci_acl_hdr *hdr;
2222         int len = skb->len;
2223
2224         skb_push(skb, HCI_ACL_HDR_SIZE);
2225         skb_reset_transport_header(skb);
2226         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2227         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2228         hdr->dlen   = cpu_to_le16(len);
2229 }
2230
2231 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2232                                 struct sk_buff *skb, __u16 flags)
2233 {
2234         struct hci_dev *hdev = conn->hdev;
2235         struct sk_buff *list;
2236
2237         list = skb_shinfo(skb)->frag_list;
2238         if (!list) {
2239                 /* Non fragmented */
2240                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2241
2242                 skb_queue_tail(queue, skb);
2243         } else {
2244                 /* Fragmented */
2245                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2246
2247                 skb_shinfo(skb)->frag_list = NULL;
2248
2249                 /* Queue all fragments atomically */
2250                 spin_lock(&queue->lock);
2251
2252                 __skb_queue_tail(queue, skb);
2253
2254                 flags &= ~ACL_START;
2255                 flags |= ACL_CONT;
2256                 do {
2257                         skb = list; list = list->next;
2258
2259                         skb->dev = (void *) hdev;
2260                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2261                         hci_add_acl_hdr(skb, conn->handle, flags);
2262
2263                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2264
2265                         __skb_queue_tail(queue, skb);
2266                 } while (list);
2267
2268                 spin_unlock(&queue->lock);
2269         }
2270 }
2271
2272 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2273 {
2274         struct hci_conn *conn = chan->conn;
2275         struct hci_dev *hdev = conn->hdev;
2276
2277         BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2278
2279         skb->dev = (void *) hdev;
2280         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2281         hci_add_acl_hdr(skb, conn->handle, flags);
2282
2283         hci_queue_acl(conn, &chan->data_q, skb, flags);
2284
2285         queue_work(hdev->workqueue, &hdev->tx_work);
2286 }
2287 EXPORT_SYMBOL(hci_send_acl);
2288
2289 /* Send SCO data */
2290 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2291 {
2292         struct hci_dev *hdev = conn->hdev;
2293         struct hci_sco_hdr hdr;
2294
2295         BT_DBG("%s len %d", hdev->name, skb->len);
2296
2297         hdr.handle = cpu_to_le16(conn->handle);
2298         hdr.dlen   = skb->len;
2299
2300         skb_push(skb, HCI_SCO_HDR_SIZE);
2301         skb_reset_transport_header(skb);
2302         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2303
2304         skb->dev = (void *) hdev;
2305         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2306
2307         skb_queue_tail(&conn->data_q, skb);
2308         queue_work(hdev->workqueue, &hdev->tx_work);
2309 }
2310 EXPORT_SYMBOL(hci_send_sco);
2311
2312 /* ---- HCI TX task (outgoing data) ---- */
2313
2314 /* HCI Connection scheduler */
2315 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2316 {
2317         struct hci_conn_hash *h = &hdev->conn_hash;
2318         struct hci_conn *conn = NULL, *c;
2319         int num = 0, min = ~0;
2320
2321         /* We don't have to lock device here. Connections are always
2322          * added and removed with TX task disabled. */
2323
2324         rcu_read_lock();
2325
2326         list_for_each_entry_rcu(c, &h->list, list) {
2327                 if (c->type != type || skb_queue_empty(&c->data_q))
2328                         continue;
2329
2330                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2331                         continue;
2332
2333                 num++;
2334
2335                 if (c->sent < min) {
2336                         min  = c->sent;
2337                         conn = c;
2338                 }
2339
2340                 if (hci_conn_num(hdev, type) == num)
2341                         break;
2342         }
2343
2344         rcu_read_unlock();
2345
2346         if (conn) {
2347                 int cnt, q;
2348
2349                 switch (conn->type) {
2350                 case ACL_LINK:
2351                         cnt = hdev->acl_cnt;
2352                         break;
2353                 case SCO_LINK:
2354                 case ESCO_LINK:
2355                         cnt = hdev->sco_cnt;
2356                         break;
2357                 case LE_LINK:
2358                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2359                         break;
2360                 default:
2361                         cnt = 0;
2362                         BT_ERR("Unknown link type");
2363                 }
2364
2365                 q = cnt / num;
2366                 *quote = q ? q : 1;
2367         } else
2368                 *quote = 0;
2369
2370         BT_DBG("conn %p quote %d", conn, *quote);
2371         return conn;
2372 }
2373
2374 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2375 {
2376         struct hci_conn_hash *h = &hdev->conn_hash;
2377         struct hci_conn *c;
2378
2379         BT_ERR("%s link tx timeout", hdev->name);
2380
2381         rcu_read_lock();
2382
2383         /* Kill stalled connections */
2384         list_for_each_entry_rcu(c, &h->list, list) {
2385                 if (c->type == type && c->sent) {
2386                         BT_ERR("%s killing stalled connection %s",
2387                                 hdev->name, batostr(&c->dst));
2388                         hci_acl_disconn(c, 0x13);
2389                 }
2390         }
2391
2392         rcu_read_unlock();
2393 }
2394
2395 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2396                                                 int *quote)
2397 {
2398         struct hci_conn_hash *h = &hdev->conn_hash;
2399         struct hci_chan *chan = NULL;
2400         int num = 0, min = ~0, cur_prio = 0;
2401         struct hci_conn *conn;
2402         int cnt, q, conn_num = 0;
2403
2404         BT_DBG("%s", hdev->name);
2405
2406         rcu_read_lock();
2407
2408         list_for_each_entry_rcu(conn, &h->list, list) {
2409                 struct hci_chan *tmp;
2410
2411                 if (conn->type != type)
2412                         continue;
2413
2414                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2415                         continue;
2416
2417                 conn_num++;
2418
2419                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2420                         struct sk_buff *skb;
2421
2422                         if (skb_queue_empty(&tmp->data_q))
2423                                 continue;
2424
2425                         skb = skb_peek(&tmp->data_q);
2426                         if (skb->priority < cur_prio)
2427                                 continue;
2428
2429                         if (skb->priority > cur_prio) {
2430                                 num = 0;
2431                                 min = ~0;
2432                                 cur_prio = skb->priority;
2433                         }
2434
2435                         num++;
2436
2437                         if (conn->sent < min) {
2438                                 min  = conn->sent;
2439                                 chan = tmp;
2440                         }
2441                 }
2442
2443                 if (hci_conn_num(hdev, type) == conn_num)
2444                         break;
2445         }
2446
2447         rcu_read_unlock();
2448
2449         if (!chan)
2450                 return NULL;
2451
2452         switch (chan->conn->type) {
2453         case ACL_LINK:
2454                 cnt = hdev->acl_cnt;
2455                 break;
2456         case SCO_LINK:
2457         case ESCO_LINK:
2458                 cnt = hdev->sco_cnt;
2459                 break;
2460         case LE_LINK:
2461                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2462                 break;
2463         default:
2464                 cnt = 0;
2465                 BT_ERR("Unknown link type");
2466         }
2467
2468         q = cnt / num;
2469         *quote = q ? q : 1;
2470         BT_DBG("chan %p quote %d", chan, *quote);
2471         return chan;
2472 }
2473
2474 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2475 {
2476         struct hci_conn_hash *h = &hdev->conn_hash;
2477         struct hci_conn *conn;
2478         int num = 0;
2479
2480         BT_DBG("%s", hdev->name);
2481
2482         rcu_read_lock();
2483
2484         list_for_each_entry_rcu(conn, &h->list, list) {
2485                 struct hci_chan *chan;
2486
2487                 if (conn->type != type)
2488                         continue;
2489
2490                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2491                         continue;
2492
2493                 num++;
2494
2495                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2496                         struct sk_buff *skb;
2497
2498                         if (chan->sent) {
2499                                 chan->sent = 0;
2500                                 continue;
2501                         }
2502
2503                         if (skb_queue_empty(&chan->data_q))
2504                                 continue;
2505
2506                         skb = skb_peek(&chan->data_q);
2507                         if (skb->priority >= HCI_PRIO_MAX - 1)
2508                                 continue;
2509
2510                         skb->priority = HCI_PRIO_MAX - 1;
2511
2512                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2513                                                                 skb->priority);
2514                 }
2515
2516                 if (hci_conn_num(hdev, type) == num)
2517                         break;
2518         }
2519
2520         rcu_read_unlock();
2521
2522 }
2523
2524 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2525 {
2526         /* Calculate count of blocks used by this packet */
2527         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2528 }
2529
2530 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2531 {
2532         if (!test_bit(HCI_RAW, &hdev->flags)) {
2533                 /* ACL tx timeout must be longer than maximum
2534                  * link supervision timeout (40.9 seconds) */
2535                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2536                                         msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2537                         hci_link_tx_to(hdev, ACL_LINK);
2538         }
2539 }
2540
2541 static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2542 {
2543         unsigned int cnt = hdev->acl_cnt;
2544         struct hci_chan *chan;
2545         struct sk_buff *skb;
2546         int quote;
2547
2548         __check_timeout(hdev, cnt);
2549
2550         while (hdev->acl_cnt &&
2551                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2552                 u32 priority = (skb_peek(&chan->data_q))->priority;
2553                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2554                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2555                                         skb->len, skb->priority);
2556
2557                         /* Stop if priority has changed */
2558                         if (skb->priority < priority)
2559                                 break;
2560
2561                         skb = skb_dequeue(&chan->data_q);
2562
2563                         hci_conn_enter_active_mode(chan->conn,
2564                                                    bt_cb(skb)->force_active);
2565
2566                         hci_send_frame(skb);
2567                         hdev->acl_last_tx = jiffies;
2568
2569                         hdev->acl_cnt--;
2570                         chan->sent++;
2571                         chan->conn->sent++;
2572                 }
2573         }
2574
2575         if (cnt != hdev->acl_cnt)
2576                 hci_prio_recalculate(hdev, ACL_LINK);
2577 }
2578
2579 static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2580 {
2581         unsigned int cnt = hdev->block_cnt;
2582         struct hci_chan *chan;
2583         struct sk_buff *skb;
2584         int quote;
2585
2586         __check_timeout(hdev, cnt);
2587
2588         while (hdev->block_cnt > 0 &&
2589                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2590                 u32 priority = (skb_peek(&chan->data_q))->priority;
2591                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2592                         int blocks;
2593
2594                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2595                                                 skb->len, skb->priority);
2596
2597                         /* Stop if priority has changed */
2598                         if (skb->priority < priority)
2599                                 break;
2600
2601                         skb = skb_dequeue(&chan->data_q);
2602
2603                         blocks = __get_blocks(hdev, skb);
2604                         if (blocks > hdev->block_cnt)
2605                                 return;
2606
2607                         hci_conn_enter_active_mode(chan->conn,
2608                                                 bt_cb(skb)->force_active);
2609
2610                         hci_send_frame(skb);
2611                         hdev->acl_last_tx = jiffies;
2612
2613                         hdev->block_cnt -= blocks;
2614                         quote -= blocks;
2615
2616                         chan->sent += blocks;
2617                         chan->conn->sent += blocks;
2618                 }
2619         }
2620
2621         if (cnt != hdev->block_cnt)
2622                 hci_prio_recalculate(hdev, ACL_LINK);
2623 }
2624
2625 static inline void hci_sched_acl(struct hci_dev *hdev)
2626 {
2627         BT_DBG("%s", hdev->name);
2628
2629         if (!hci_conn_num(hdev, ACL_LINK))
2630                 return;
2631
2632         switch (hdev->flow_ctl_mode) {
2633         case HCI_FLOW_CTL_MODE_PACKET_BASED:
2634                 hci_sched_acl_pkt(hdev);
2635                 break;
2636
2637         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2638                 hci_sched_acl_blk(hdev);
2639                 break;
2640         }
2641 }
2642
2643 /* Schedule SCO */
2644 static inline void hci_sched_sco(struct hci_dev *hdev)
2645 {
2646         struct hci_conn *conn;
2647         struct sk_buff *skb;
2648         int quote;
2649
2650         BT_DBG("%s", hdev->name);
2651
2652         if (!hci_conn_num(hdev, SCO_LINK))
2653                 return;
2654
2655         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2656                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2657                         BT_DBG("skb %p len %d", skb, skb->len);
2658                         hci_send_frame(skb);
2659
2660                         conn->sent++;
2661                         if (conn->sent == ~0)
2662                                 conn->sent = 0;
2663                 }
2664         }
2665 }
2666
2667 static inline void hci_sched_esco(struct hci_dev *hdev)
2668 {
2669         struct hci_conn *conn;
2670         struct sk_buff *skb;
2671         int quote;
2672
2673         BT_DBG("%s", hdev->name);
2674
2675         if (!hci_conn_num(hdev, ESCO_LINK))
2676                 return;
2677
2678         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2679                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2680                         BT_DBG("skb %p len %d", skb, skb->len);
2681                         hci_send_frame(skb);
2682
2683                         conn->sent++;
2684                         if (conn->sent == ~0)
2685                                 conn->sent = 0;
2686                 }
2687         }
2688 }
2689
2690 static inline void hci_sched_le(struct hci_dev *hdev)
2691 {
2692         struct hci_chan *chan;
2693         struct sk_buff *skb;
2694         int quote, cnt, tmp;
2695
2696         BT_DBG("%s", hdev->name);
2697
2698         if (!hci_conn_num(hdev, LE_LINK))
2699                 return;
2700
2701         if (!test_bit(HCI_RAW, &hdev->flags)) {
2702                 /* LE tx timeout must be longer than maximum
2703                  * link supervision timeout (40.9 seconds) */
2704                 if (!hdev->le_cnt && hdev->le_pkts &&
2705                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2706                         hci_link_tx_to(hdev, LE_LINK);
2707         }
2708
2709         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2710         tmp = cnt;
2711         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2712                 u32 priority = (skb_peek(&chan->data_q))->priority;
2713                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2714                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2715                                         skb->len, skb->priority);
2716
2717                         /* Stop if priority has changed */
2718                         if (skb->priority < priority)
2719                                 break;
2720
2721                         skb = skb_dequeue(&chan->data_q);
2722
2723                         hci_send_frame(skb);
2724                         hdev->le_last_tx = jiffies;
2725
2726                         cnt--;
2727                         chan->sent++;
2728                         chan->conn->sent++;
2729                 }
2730         }
2731
2732         if (hdev->le_pkts)
2733                 hdev->le_cnt = cnt;
2734         else
2735                 hdev->acl_cnt = cnt;
2736
2737         if (cnt != tmp)
2738                 hci_prio_recalculate(hdev, LE_LINK);
2739 }
2740
2741 static void hci_tx_work(struct work_struct *work)
2742 {
2743         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2744         struct sk_buff *skb;
2745
2746         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2747                 hdev->sco_cnt, hdev->le_cnt);
2748
2749         /* Schedule queues and send stuff to HCI driver */
2750
2751         hci_sched_acl(hdev);
2752
2753         hci_sched_sco(hdev);
2754
2755         hci_sched_esco(hdev);
2756
2757         hci_sched_le(hdev);
2758
2759         /* Send next queued raw (unknown type) packet */
2760         while ((skb = skb_dequeue(&hdev->raw_q)))
2761                 hci_send_frame(skb);
2762 }
2763
2764 /* ----- HCI RX task (incoming data processing) ----- */
2765
2766 /* ACL data packet */
2767 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2768 {
2769         struct hci_acl_hdr *hdr = (void *) skb->data;
2770         struct hci_conn *conn;
2771         __u16 handle, flags;
2772
2773         skb_pull(skb, HCI_ACL_HDR_SIZE);
2774
2775         handle = __le16_to_cpu(hdr->handle);
2776         flags  = hci_flags(handle);
2777         handle = hci_handle(handle);
2778
2779         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2780
2781         hdev->stat.acl_rx++;
2782
2783         hci_dev_lock(hdev);
2784         conn = hci_conn_hash_lookup_handle(hdev, handle);
2785         hci_dev_unlock(hdev);
2786
2787         if (conn) {
2788                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2789
2790                 /* Send to upper protocol */
2791                 l2cap_recv_acldata(conn, skb, flags);
2792                 return;
2793         } else {
2794                 BT_ERR("%s ACL packet for unknown connection handle %d",
2795                         hdev->name, handle);
2796         }
2797
2798         kfree_skb(skb);
2799 }
2800
2801 /* SCO data packet */
2802 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2803 {
2804         struct hci_sco_hdr *hdr = (void *) skb->data;
2805         struct hci_conn *conn;
2806         __u16 handle;
2807
2808         skb_pull(skb, HCI_SCO_HDR_SIZE);
2809
2810         handle = __le16_to_cpu(hdr->handle);
2811
2812         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2813
2814         hdev->stat.sco_rx++;
2815
2816         hci_dev_lock(hdev);
2817         conn = hci_conn_hash_lookup_handle(hdev, handle);
2818         hci_dev_unlock(hdev);
2819
2820         if (conn) {
2821                 /* Send to upper protocol */
2822                 sco_recv_scodata(conn, skb);
2823                 return;
2824         } else {
2825                 BT_ERR("%s SCO packet for unknown connection handle %d",
2826                         hdev->name, handle);
2827         }
2828
2829         kfree_skb(skb);
2830 }
2831
2832 static void hci_rx_work(struct work_struct *work)
2833 {
2834         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2835         struct sk_buff *skb;
2836
2837         BT_DBG("%s", hdev->name);
2838
2839         while ((skb = skb_dequeue(&hdev->rx_q))) {
2840                 /* Send copy to monitor */
2841                 hci_send_to_monitor(hdev, skb);
2842
2843                 if (atomic_read(&hdev->promisc)) {
2844                         /* Send copy to the sockets */
2845                         hci_send_to_sock(hdev, skb);
2846                 }
2847
2848                 if (test_bit(HCI_RAW, &hdev->flags)) {
2849                         kfree_skb(skb);
2850                         continue;
2851                 }
2852
2853                 if (test_bit(HCI_INIT, &hdev->flags)) {
2854                         /* Don't process data packets in this states. */
2855                         switch (bt_cb(skb)->pkt_type) {
2856                         case HCI_ACLDATA_PKT:
2857                         case HCI_SCODATA_PKT:
2858                                 kfree_skb(skb);
2859                                 continue;
2860                         }
2861                 }
2862
2863                 /* Process frame */
2864                 switch (bt_cb(skb)->pkt_type) {
2865                 case HCI_EVENT_PKT:
2866                         BT_DBG("%s Event packet", hdev->name);
2867                         hci_event_packet(hdev, skb);
2868                         break;
2869
2870                 case HCI_ACLDATA_PKT:
2871                         BT_DBG("%s ACL data packet", hdev->name);
2872                         hci_acldata_packet(hdev, skb);
2873                         break;
2874
2875                 case HCI_SCODATA_PKT:
2876                         BT_DBG("%s SCO data packet", hdev->name);
2877                         hci_scodata_packet(hdev, skb);
2878                         break;
2879
2880                 default:
2881                         kfree_skb(skb);
2882                         break;
2883                 }
2884         }
2885 }
2886
2887 static void hci_cmd_work(struct work_struct *work)
2888 {
2889         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2890         struct sk_buff *skb;
2891
2892         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2893
2894         /* Send queued commands */
2895         if (atomic_read(&hdev->cmd_cnt)) {
2896                 skb = skb_dequeue(&hdev->cmd_q);
2897                 if (!skb)
2898                         return;
2899
2900                 kfree_skb(hdev->sent_cmd);
2901
2902                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2903                 if (hdev->sent_cmd) {
2904                         atomic_dec(&hdev->cmd_cnt);
2905                         hci_send_frame(skb);
2906                         if (test_bit(HCI_RESET, &hdev->flags))
2907                                 del_timer(&hdev->cmd_timer);
2908                         else
2909                                 mod_timer(&hdev->cmd_timer,
2910                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2911                 } else {
2912                         skb_queue_head(&hdev->cmd_q, skb);
2913                         queue_work(hdev->workqueue, &hdev->cmd_work);
2914                 }
2915         }
2916 }
2917
2918 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2919 {
2920         /* General inquiry access code (GIAC) */
2921         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2922         struct hci_cp_inquiry cp;
2923
2924         BT_DBG("%s", hdev->name);
2925
2926         if (test_bit(HCI_INQUIRY, &hdev->flags))
2927                 return -EINPROGRESS;
2928
2929         inquiry_cache_flush(hdev);
2930
2931         memset(&cp, 0, sizeof(cp));
2932         memcpy(&cp.lap, lap, sizeof(cp.lap));
2933         cp.length  = length;
2934
2935         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2936 }
2937
2938 int hci_cancel_inquiry(struct hci_dev *hdev)
2939 {
2940         BT_DBG("%s", hdev->name);
2941
2942         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2943                 return -EPERM;
2944
2945         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2946 }