Bluetooth: mgmt: Fix device_connected sending order
[linux-flexiantxendom0-3.2.10.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <linux/crypto.h>
46 #include <net/sock.h>
47
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
50
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53
54 #define AUTO_OFF_TIMEOUT 2000
55
56 static void hci_rx_work(struct work_struct *work);
57 static void hci_cmd_work(struct work_struct *work);
58 static void hci_tx_work(struct work_struct *work);
59
60 /* HCI device list */
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
63
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
67
68 /* ---- HCI notifications ---- */
69
70 static void hci_notify(struct hci_dev *hdev, int event)
71 {
72         hci_sock_dev_event(hdev, event);
73 }
74
75 /* ---- HCI requests ---- */
76
77 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
78 {
79         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
80
81         /* If this is the init phase check if the completed command matches
82          * the last init command, and if not just return.
83          */
84         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd) {
85                 struct hci_command_hdr *sent = (void *) hdev->sent_cmd->data;
86                 struct sk_buff *skb;
87
88                 /* Some CSR based controllers generate a spontaneous
89                  * reset complete event during init and any pending
90                  * command will never be completed. In such a case we
91                  * need to resend whatever was the last sent
92                  * command.
93                  */
94
95                 if (cmd != HCI_OP_RESET || sent->opcode == HCI_OP_RESET)
96                         return;
97
98                 skb = skb_clone(hdev->sent_cmd, GFP_ATOMIC);
99                 if (skb) {
100                         skb_queue_head(&hdev->cmd_q, skb);
101                         queue_work(hdev->workqueue, &hdev->cmd_work);
102                 }
103
104                 return;
105         }
106
107         if (hdev->req_status == HCI_REQ_PEND) {
108                 hdev->req_result = result;
109                 hdev->req_status = HCI_REQ_DONE;
110                 wake_up_interruptible(&hdev->req_wait_q);
111         }
112 }
113
114 static void hci_req_cancel(struct hci_dev *hdev, int err)
115 {
116         BT_DBG("%s err 0x%2.2x", hdev->name, err);
117
118         if (hdev->req_status == HCI_REQ_PEND) {
119                 hdev->req_result = err;
120                 hdev->req_status = HCI_REQ_CANCELED;
121                 wake_up_interruptible(&hdev->req_wait_q);
122         }
123 }
124
125 /* Execute request and wait for completion. */
126 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
127                                         unsigned long opt, __u32 timeout)
128 {
129         DECLARE_WAITQUEUE(wait, current);
130         int err = 0;
131
132         BT_DBG("%s start", hdev->name);
133
134         hdev->req_status = HCI_REQ_PEND;
135
136         add_wait_queue(&hdev->req_wait_q, &wait);
137         set_current_state(TASK_INTERRUPTIBLE);
138
139         req(hdev, opt);
140         schedule_timeout(timeout);
141
142         remove_wait_queue(&hdev->req_wait_q, &wait);
143
144         if (signal_pending(current))
145                 return -EINTR;
146
147         switch (hdev->req_status) {
148         case HCI_REQ_DONE:
149                 err = -bt_to_errno(hdev->req_result);
150                 break;
151
152         case HCI_REQ_CANCELED:
153                 err = -hdev->req_result;
154                 break;
155
156         default:
157                 err = -ETIMEDOUT;
158                 break;
159         }
160
161         hdev->req_status = hdev->req_result = 0;
162
163         BT_DBG("%s end: err %d", hdev->name, err);
164
165         return err;
166 }
167
168 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
169                                         unsigned long opt, __u32 timeout)
170 {
171         int ret;
172
173         if (!test_bit(HCI_UP, &hdev->flags))
174                 return -ENETDOWN;
175
176         /* Serialize all requests */
177         hci_req_lock(hdev);
178         ret = __hci_request(hdev, req, opt, timeout);
179         hci_req_unlock(hdev);
180
181         return ret;
182 }
183
184 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
185 {
186         BT_DBG("%s %ld", hdev->name, opt);
187
188         /* Reset device */
189         set_bit(HCI_RESET, &hdev->flags);
190         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
191 }
192
193 static void bredr_init(struct hci_dev *hdev)
194 {
195         struct hci_cp_delete_stored_link_key cp;
196         __le16 param;
197         __u8 flt_type;
198
199         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
200
201         /* Mandatory initialization */
202
203         /* Reset */
204         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
205                 set_bit(HCI_RESET, &hdev->flags);
206                 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
207         }
208
209         /* Read Local Supported Features */
210         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
211
212         /* Read Local Version */
213         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
214
215         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
216         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
217
218         /* Read BD Address */
219         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
220
221         /* Read Class of Device */
222         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
223
224         /* Read Local Name */
225         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
226
227         /* Read Voice Setting */
228         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
229
230         /* Optional initialization */
231
232         /* Clear Event Filters */
233         flt_type = HCI_FLT_CLEAR_ALL;
234         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
235
236         /* Connection accept timeout ~20 secs */
237         param = cpu_to_le16(0x7d00);
238         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
239
240         bacpy(&cp.bdaddr, BDADDR_ANY);
241         cp.delete_all = 1;
242         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
243 }
244
245 static void amp_init(struct hci_dev *hdev)
246 {
247         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
248
249         /* Reset */
250         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
251
252         /* Read Local Version */
253         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
254 }
255
256 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
257 {
258         struct sk_buff *skb;
259
260         BT_DBG("%s %ld", hdev->name, opt);
261
262         /* Driver initialization */
263
264         /* Special commands */
265         while ((skb = skb_dequeue(&hdev->driver_init))) {
266                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
267                 skb->dev = (void *) hdev;
268
269                 skb_queue_tail(&hdev->cmd_q, skb);
270                 queue_work(hdev->workqueue, &hdev->cmd_work);
271         }
272         skb_queue_purge(&hdev->driver_init);
273
274         switch (hdev->dev_type) {
275         case HCI_BREDR:
276                 bredr_init(hdev);
277                 break;
278
279         case HCI_AMP:
280                 amp_init(hdev);
281                 break;
282
283         default:
284                 BT_ERR("Unknown device type %d", hdev->dev_type);
285                 break;
286         }
287
288 }
289
290 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
291 {
292         BT_DBG("%s", hdev->name);
293
294         /* Read LE buffer size */
295         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
296 }
297
298 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
299 {
300         __u8 scan = opt;
301
302         BT_DBG("%s %x", hdev->name, scan);
303
304         /* Inquiry and Page scans */
305         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
306 }
307
308 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
309 {
310         __u8 auth = opt;
311
312         BT_DBG("%s %x", hdev->name, auth);
313
314         /* Authentication */
315         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
316 }
317
318 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
319 {
320         __u8 encrypt = opt;
321
322         BT_DBG("%s %x", hdev->name, encrypt);
323
324         /* Encryption */
325         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
326 }
327
328 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
329 {
330         __le16 policy = cpu_to_le16(opt);
331
332         BT_DBG("%s %x", hdev->name, policy);
333
334         /* Default link policy */
335         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
336 }
337
338 /* Get HCI device by index.
339  * Device is held on return. */
340 struct hci_dev *hci_dev_get(int index)
341 {
342         struct hci_dev *hdev = NULL, *d;
343
344         BT_DBG("%d", index);
345
346         if (index < 0)
347                 return NULL;
348
349         read_lock(&hci_dev_list_lock);
350         list_for_each_entry(d, &hci_dev_list, list) {
351                 if (d->id == index) {
352                         hdev = hci_dev_hold(d);
353                         break;
354                 }
355         }
356         read_unlock(&hci_dev_list_lock);
357         return hdev;
358 }
359
360 /* ---- Inquiry support ---- */
361
362 bool hci_discovery_active(struct hci_dev *hdev)
363 {
364         struct discovery_state *discov = &hdev->discovery;
365
366         switch (discov->state) {
367         case DISCOVERY_FINDING:
368         case DISCOVERY_RESOLVING:
369                 return true;
370
371         default:
372                 return false;
373         }
374 }
375
376 void hci_discovery_set_state(struct hci_dev *hdev, int state)
377 {
378         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
379
380         if (hdev->discovery.state == state)
381                 return;
382
383         switch (state) {
384         case DISCOVERY_STOPPED:
385                 if (hdev->discovery.state != DISCOVERY_STARTING)
386                         mgmt_discovering(hdev, 0);
387                 hdev->discovery.type = 0;
388                 break;
389         case DISCOVERY_STARTING:
390                 break;
391         case DISCOVERY_FINDING:
392                 mgmt_discovering(hdev, 1);
393                 break;
394         case DISCOVERY_RESOLVING:
395                 break;
396         case DISCOVERY_STOPPING:
397                 break;
398         }
399
400         hdev->discovery.state = state;
401 }
402
403 static void inquiry_cache_flush(struct hci_dev *hdev)
404 {
405         struct discovery_state *cache = &hdev->discovery;
406         struct inquiry_entry *p, *n;
407
408         list_for_each_entry_safe(p, n, &cache->all, all) {
409                 list_del(&p->all);
410                 kfree(p);
411         }
412
413         INIT_LIST_HEAD(&cache->unknown);
414         INIT_LIST_HEAD(&cache->resolve);
415 }
416
417 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
418 {
419         struct discovery_state *cache = &hdev->discovery;
420         struct inquiry_entry *e;
421
422         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
423
424         list_for_each_entry(e, &cache->all, all) {
425                 if (!bacmp(&e->data.bdaddr, bdaddr))
426                         return e;
427         }
428
429         return NULL;
430 }
431
432 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
433                                                        bdaddr_t *bdaddr)
434 {
435         struct discovery_state *cache = &hdev->discovery;
436         struct inquiry_entry *e;
437
438         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
439
440         list_for_each_entry(e, &cache->unknown, list) {
441                 if (!bacmp(&e->data.bdaddr, bdaddr))
442                         return e;
443         }
444
445         return NULL;
446 }
447
448 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
449                                                        bdaddr_t *bdaddr,
450                                                        int state)
451 {
452         struct discovery_state *cache = &hdev->discovery;
453         struct inquiry_entry *e;
454
455         BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
456
457         list_for_each_entry(e, &cache->resolve, list) {
458                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
459                         return e;
460                 if (!bacmp(&e->data.bdaddr, bdaddr))
461                         return e;
462         }
463
464         return NULL;
465 }
466
467 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
468                                       struct inquiry_entry *ie)
469 {
470         struct discovery_state *cache = &hdev->discovery;
471         struct list_head *pos = &cache->resolve;
472         struct inquiry_entry *p;
473
474         list_del(&ie->list);
475
476         list_for_each_entry(p, &cache->resolve, list) {
477                 if (p->name_state != NAME_PENDING &&
478                                 abs(p->data.rssi) >= abs(ie->data.rssi))
479                         break;
480                 pos = &p->list;
481         }
482
483         list_add(&ie->list, pos);
484 }
485
486 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
487                               bool name_known, bool *ssp)
488 {
489         struct discovery_state *cache = &hdev->discovery;
490         struct inquiry_entry *ie;
491
492         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
493
494         if (ssp)
495                 *ssp = data->ssp_mode;
496
497         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
498         if (ie) {
499                 if (ie->data.ssp_mode && ssp)
500                         *ssp = true;
501
502                 if (ie->name_state == NAME_NEEDED &&
503                                                 data->rssi != ie->data.rssi) {
504                         ie->data.rssi = data->rssi;
505                         hci_inquiry_cache_update_resolve(hdev, ie);
506                 }
507
508                 goto update;
509         }
510
511         /* Entry not in the cache. Add new one. */
512         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
513         if (!ie)
514                 return false;
515
516         list_add(&ie->all, &cache->all);
517
518         if (name_known) {
519                 ie->name_state = NAME_KNOWN;
520         } else {
521                 ie->name_state = NAME_NOT_KNOWN;
522                 list_add(&ie->list, &cache->unknown);
523         }
524
525 update:
526         if (name_known && ie->name_state != NAME_KNOWN &&
527                                         ie->name_state != NAME_PENDING) {
528                 ie->name_state = NAME_KNOWN;
529                 list_del(&ie->list);
530         }
531
532         memcpy(&ie->data, data, sizeof(*data));
533         ie->timestamp = jiffies;
534         cache->timestamp = jiffies;
535
536         if (ie->name_state == NAME_NOT_KNOWN)
537                 return false;
538
539         return true;
540 }
541
542 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
543 {
544         struct discovery_state *cache = &hdev->discovery;
545         struct inquiry_info *info = (struct inquiry_info *) buf;
546         struct inquiry_entry *e;
547         int copied = 0;
548
549         list_for_each_entry(e, &cache->all, all) {
550                 struct inquiry_data *data = &e->data;
551
552                 if (copied >= num)
553                         break;
554
555                 bacpy(&info->bdaddr, &data->bdaddr);
556                 info->pscan_rep_mode    = data->pscan_rep_mode;
557                 info->pscan_period_mode = data->pscan_period_mode;
558                 info->pscan_mode        = data->pscan_mode;
559                 memcpy(info->dev_class, data->dev_class, 3);
560                 info->clock_offset      = data->clock_offset;
561
562                 info++;
563                 copied++;
564         }
565
566         BT_DBG("cache %p, copied %d", cache, copied);
567         return copied;
568 }
569
570 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
571 {
572         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
573         struct hci_cp_inquiry cp;
574
575         BT_DBG("%s", hdev->name);
576
577         if (test_bit(HCI_INQUIRY, &hdev->flags))
578                 return;
579
580         /* Start Inquiry */
581         memcpy(&cp.lap, &ir->lap, 3);
582         cp.length  = ir->length;
583         cp.num_rsp = ir->num_rsp;
584         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
585 }
586
587 int hci_inquiry(void __user *arg)
588 {
589         __u8 __user *ptr = arg;
590         struct hci_inquiry_req ir;
591         struct hci_dev *hdev;
592         int err = 0, do_inquiry = 0, max_rsp;
593         long timeo;
594         __u8 *buf;
595
596         if (copy_from_user(&ir, ptr, sizeof(ir)))
597                 return -EFAULT;
598
599         hdev = hci_dev_get(ir.dev_id);
600         if (!hdev)
601                 return -ENODEV;
602
603         hci_dev_lock(hdev);
604         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
605                                 inquiry_cache_empty(hdev) ||
606                                 ir.flags & IREQ_CACHE_FLUSH) {
607                 inquiry_cache_flush(hdev);
608                 do_inquiry = 1;
609         }
610         hci_dev_unlock(hdev);
611
612         timeo = ir.length * msecs_to_jiffies(2000);
613
614         if (do_inquiry) {
615                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
616                 if (err < 0)
617                         goto done;
618         }
619
620         /* for unlimited number of responses we will use buffer with 255 entries */
621         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
622
623         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
624          * copy it to the user space.
625          */
626         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
627         if (!buf) {
628                 err = -ENOMEM;
629                 goto done;
630         }
631
632         hci_dev_lock(hdev);
633         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
634         hci_dev_unlock(hdev);
635
636         BT_DBG("num_rsp %d", ir.num_rsp);
637
638         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
639                 ptr += sizeof(ir);
640                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
641                                         ir.num_rsp))
642                         err = -EFAULT;
643         } else
644                 err = -EFAULT;
645
646         kfree(buf);
647
648 done:
649         hci_dev_put(hdev);
650         return err;
651 }
652
653 /* ---- HCI ioctl helpers ---- */
654
655 int hci_dev_open(__u16 dev)
656 {
657         struct hci_dev *hdev;
658         int ret = 0;
659
660         hdev = hci_dev_get(dev);
661         if (!hdev)
662                 return -ENODEV;
663
664         BT_DBG("%s %p", hdev->name, hdev);
665
666         hci_req_lock(hdev);
667
668         if (test_bit(HCI_UNREGISTER, &hdev->dev_flags)) {
669                 ret = -ENODEV;
670                 goto done;
671         }
672
673         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
674                 ret = -ERFKILL;
675                 goto done;
676         }
677
678         if (test_bit(HCI_UP, &hdev->flags)) {
679                 ret = -EALREADY;
680                 goto done;
681         }
682
683         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
684                 set_bit(HCI_RAW, &hdev->flags);
685
686         /* Treat all non BR/EDR controllers as raw devices if
687            enable_hs is not set */
688         if (hdev->dev_type != HCI_BREDR && !enable_hs)
689                 set_bit(HCI_RAW, &hdev->flags);
690
691         if (hdev->open(hdev)) {
692                 ret = -EIO;
693                 goto done;
694         }
695
696         if (!test_bit(HCI_RAW, &hdev->flags)) {
697                 atomic_set(&hdev->cmd_cnt, 1);
698                 set_bit(HCI_INIT, &hdev->flags);
699                 hdev->init_last_cmd = 0;
700
701                 ret = __hci_request(hdev, hci_init_req, 0,
702                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
703
704                 if (lmp_host_le_capable(hdev))
705                         ret = __hci_request(hdev, hci_le_init_req, 0,
706                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
707
708                 clear_bit(HCI_INIT, &hdev->flags);
709         }
710
711         if (!ret) {
712                 hci_dev_hold(hdev);
713                 set_bit(HCI_UP, &hdev->flags);
714                 hci_notify(hdev, HCI_DEV_UP);
715                 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
716                         hci_dev_lock(hdev);
717                         mgmt_powered(hdev, 1);
718                         hci_dev_unlock(hdev);
719                 }
720         } else {
721                 /* Init failed, cleanup */
722                 flush_work(&hdev->tx_work);
723                 flush_work(&hdev->cmd_work);
724                 flush_work(&hdev->rx_work);
725
726                 skb_queue_purge(&hdev->cmd_q);
727                 skb_queue_purge(&hdev->rx_q);
728
729                 if (hdev->flush)
730                         hdev->flush(hdev);
731
732                 if (hdev->sent_cmd) {
733                         kfree_skb(hdev->sent_cmd);
734                         hdev->sent_cmd = NULL;
735                 }
736
737                 hdev->close(hdev);
738                 hdev->flags = 0;
739         }
740
741 done:
742         hci_req_unlock(hdev);
743         hci_dev_put(hdev);
744         return ret;
745 }
746
747 static int hci_dev_do_close(struct hci_dev *hdev)
748 {
749         BT_DBG("%s %p", hdev->name, hdev);
750
751         cancel_work_sync(&hdev->le_scan);
752
753         hci_req_cancel(hdev, ENODEV);
754         hci_req_lock(hdev);
755
756         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
757                 del_timer_sync(&hdev->cmd_timer);
758                 hci_req_unlock(hdev);
759                 return 0;
760         }
761
762         /* Flush RX and TX works */
763         flush_work(&hdev->tx_work);
764         flush_work(&hdev->rx_work);
765
766         if (hdev->discov_timeout > 0) {
767                 cancel_delayed_work(&hdev->discov_off);
768                 hdev->discov_timeout = 0;
769                 clear_bit(HCI_DISCOVERABLE, &hdev->dev_flags);
770         }
771
772         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
773                 cancel_delayed_work(&hdev->service_cache);
774
775         cancel_delayed_work_sync(&hdev->le_scan_disable);
776
777         hci_dev_lock(hdev);
778         inquiry_cache_flush(hdev);
779         hci_conn_hash_flush(hdev);
780         hci_dev_unlock(hdev);
781
782         hci_notify(hdev, HCI_DEV_DOWN);
783
784         if (hdev->flush)
785                 hdev->flush(hdev);
786
787         /* Reset device */
788         skb_queue_purge(&hdev->cmd_q);
789         atomic_set(&hdev->cmd_cnt, 1);
790         if (!test_bit(HCI_RAW, &hdev->flags) &&
791                                 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
792                 set_bit(HCI_INIT, &hdev->flags);
793                 __hci_request(hdev, hci_reset_req, 0,
794                                         msecs_to_jiffies(250));
795                 clear_bit(HCI_INIT, &hdev->flags);
796         }
797
798         /* flush cmd  work */
799         flush_work(&hdev->cmd_work);
800
801         /* Drop queues */
802         skb_queue_purge(&hdev->rx_q);
803         skb_queue_purge(&hdev->cmd_q);
804         skb_queue_purge(&hdev->raw_q);
805
806         /* Drop last sent command */
807         if (hdev->sent_cmd) {
808                 del_timer_sync(&hdev->cmd_timer);
809                 kfree_skb(hdev->sent_cmd);
810                 hdev->sent_cmd = NULL;
811         }
812
813         /* After this point our queues are empty
814          * and no tasks are scheduled. */
815         hdev->close(hdev);
816
817         if (!test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags)) {
818                 hci_dev_lock(hdev);
819                 mgmt_powered(hdev, 0);
820                 hci_dev_unlock(hdev);
821         }
822
823         /* Clear flags */
824         hdev->flags = 0;
825
826         memset(hdev->eir, 0, sizeof(hdev->eir));
827         memset(hdev->dev_class, 0, sizeof(hdev->dev_class));
828
829         hci_req_unlock(hdev);
830
831         hci_dev_put(hdev);
832         return 0;
833 }
834
835 int hci_dev_close(__u16 dev)
836 {
837         struct hci_dev *hdev;
838         int err;
839
840         hdev = hci_dev_get(dev);
841         if (!hdev)
842                 return -ENODEV;
843
844         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
845                 cancel_delayed_work(&hdev->power_off);
846
847         err = hci_dev_do_close(hdev);
848
849         hci_dev_put(hdev);
850         return err;
851 }
852
853 int hci_dev_reset(__u16 dev)
854 {
855         struct hci_dev *hdev;
856         int ret = 0;
857
858         hdev = hci_dev_get(dev);
859         if (!hdev)
860                 return -ENODEV;
861
862         hci_req_lock(hdev);
863
864         if (!test_bit(HCI_UP, &hdev->flags))
865                 goto done;
866
867         /* Drop queues */
868         skb_queue_purge(&hdev->rx_q);
869         skb_queue_purge(&hdev->cmd_q);
870
871         hci_dev_lock(hdev);
872         inquiry_cache_flush(hdev);
873         hci_conn_hash_flush(hdev);
874         hci_dev_unlock(hdev);
875
876         if (hdev->flush)
877                 hdev->flush(hdev);
878
879         atomic_set(&hdev->cmd_cnt, 1);
880         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
881
882         if (!test_bit(HCI_RAW, &hdev->flags))
883                 ret = __hci_request(hdev, hci_reset_req, 0,
884                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
885
886 done:
887         hci_req_unlock(hdev);
888         hci_dev_put(hdev);
889         return ret;
890 }
891
892 int hci_dev_reset_stat(__u16 dev)
893 {
894         struct hci_dev *hdev;
895         int ret = 0;
896
897         hdev = hci_dev_get(dev);
898         if (!hdev)
899                 return -ENODEV;
900
901         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
902
903         hci_dev_put(hdev);
904
905         return ret;
906 }
907
908 int hci_dev_cmd(unsigned int cmd, void __user *arg)
909 {
910         struct hci_dev *hdev;
911         struct hci_dev_req dr;
912         int err = 0;
913
914         if (copy_from_user(&dr, arg, sizeof(dr)))
915                 return -EFAULT;
916
917         hdev = hci_dev_get(dr.dev_id);
918         if (!hdev)
919                 return -ENODEV;
920
921         switch (cmd) {
922         case HCISETAUTH:
923                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
924                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
925                 break;
926
927         case HCISETENCRYPT:
928                 if (!lmp_encrypt_capable(hdev)) {
929                         err = -EOPNOTSUPP;
930                         break;
931                 }
932
933                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
934                         /* Auth must be enabled first */
935                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
936                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
937                         if (err)
938                                 break;
939                 }
940
941                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
942                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
943                 break;
944
945         case HCISETSCAN:
946                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
947                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
948                 break;
949
950         case HCISETLINKPOL:
951                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
952                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
953                 break;
954
955         case HCISETLINKMODE:
956                 hdev->link_mode = ((__u16) dr.dev_opt) &
957                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
958                 break;
959
960         case HCISETPTYPE:
961                 hdev->pkt_type = (__u16) dr.dev_opt;
962                 break;
963
964         case HCISETACLMTU:
965                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
966                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
967                 break;
968
969         case HCISETSCOMTU:
970                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
971                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
972                 break;
973
974         default:
975                 err = -EINVAL;
976                 break;
977         }
978
979         hci_dev_put(hdev);
980         return err;
981 }
982
983 int hci_get_dev_list(void __user *arg)
984 {
985         struct hci_dev *hdev;
986         struct hci_dev_list_req *dl;
987         struct hci_dev_req *dr;
988         int n = 0, size, err;
989         __u16 dev_num;
990
991         if (get_user(dev_num, (__u16 __user *) arg))
992                 return -EFAULT;
993
994         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
995                 return -EINVAL;
996
997         size = sizeof(*dl) + dev_num * sizeof(*dr);
998
999         dl = kzalloc(size, GFP_KERNEL);
1000         if (!dl)
1001                 return -ENOMEM;
1002
1003         dr = dl->dev_req;
1004
1005         read_lock(&hci_dev_list_lock);
1006         list_for_each_entry(hdev, &hci_dev_list, list) {
1007                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1008                         cancel_delayed_work(&hdev->power_off);
1009
1010                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1011                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1012
1013                 (dr + n)->dev_id  = hdev->id;
1014                 (dr + n)->dev_opt = hdev->flags;
1015
1016                 if (++n >= dev_num)
1017                         break;
1018         }
1019         read_unlock(&hci_dev_list_lock);
1020
1021         dl->dev_num = n;
1022         size = sizeof(*dl) + n * sizeof(*dr);
1023
1024         err = copy_to_user(arg, dl, size);
1025         kfree(dl);
1026
1027         return err ? -EFAULT : 0;
1028 }
1029
1030 int hci_get_dev_info(void __user *arg)
1031 {
1032         struct hci_dev *hdev;
1033         struct hci_dev_info di;
1034         int err = 0;
1035
1036         if (copy_from_user(&di, arg, sizeof(di)))
1037                 return -EFAULT;
1038
1039         hdev = hci_dev_get(di.dev_id);
1040         if (!hdev)
1041                 return -ENODEV;
1042
1043         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1044                 cancel_delayed_work_sync(&hdev->power_off);
1045
1046         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1047                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1048
1049         strcpy(di.name, hdev->name);
1050         di.bdaddr   = hdev->bdaddr;
1051         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1052         di.flags    = hdev->flags;
1053         di.pkt_type = hdev->pkt_type;
1054         di.acl_mtu  = hdev->acl_mtu;
1055         di.acl_pkts = hdev->acl_pkts;
1056         di.sco_mtu  = hdev->sco_mtu;
1057         di.sco_pkts = hdev->sco_pkts;
1058         di.link_policy = hdev->link_policy;
1059         di.link_mode   = hdev->link_mode;
1060
1061         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1062         memcpy(&di.features, &hdev->features, sizeof(di.features));
1063
1064         if (copy_to_user(arg, &di, sizeof(di)))
1065                 err = -EFAULT;
1066
1067         hci_dev_put(hdev);
1068
1069         return err;
1070 }
1071
1072 /* ---- Interface to HCI drivers ---- */
1073
1074 static int hci_rfkill_set_block(void *data, bool blocked)
1075 {
1076         struct hci_dev *hdev = data;
1077
1078         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1079
1080         if (!blocked)
1081                 return 0;
1082
1083         hci_dev_do_close(hdev);
1084
1085         return 0;
1086 }
1087
1088 static const struct rfkill_ops hci_rfkill_ops = {
1089         .set_block = hci_rfkill_set_block,
1090 };
1091
1092 /* Alloc HCI device */
1093 struct hci_dev *hci_alloc_dev(void)
1094 {
1095         struct hci_dev *hdev;
1096
1097         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1098         if (!hdev)
1099                 return NULL;
1100
1101         hci_init_sysfs(hdev);
1102         skb_queue_head_init(&hdev->driver_init);
1103
1104         return hdev;
1105 }
1106 EXPORT_SYMBOL(hci_alloc_dev);
1107
1108 /* Free HCI device */
1109 void hci_free_dev(struct hci_dev *hdev)
1110 {
1111         skb_queue_purge(&hdev->driver_init);
1112
1113         /* will free via device release */
1114         put_device(&hdev->dev);
1115 }
1116 EXPORT_SYMBOL(hci_free_dev);
1117
1118 static void hci_power_on(struct work_struct *work)
1119 {
1120         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1121
1122         BT_DBG("%s", hdev->name);
1123
1124         if (hci_dev_open(hdev->id) < 0)
1125                 return;
1126
1127         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1128                 schedule_delayed_work(&hdev->power_off,
1129                                         msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1130
1131         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1132                 mgmt_index_added(hdev);
1133 }
1134
1135 static void hci_power_off(struct work_struct *work)
1136 {
1137         struct hci_dev *hdev = container_of(work, struct hci_dev,
1138                                                         power_off.work);
1139
1140         BT_DBG("%s", hdev->name);
1141
1142         hci_dev_do_close(hdev);
1143 }
1144
1145 static void hci_discov_off(struct work_struct *work)
1146 {
1147         struct hci_dev *hdev;
1148         u8 scan = SCAN_PAGE;
1149
1150         hdev = container_of(work, struct hci_dev, discov_off.work);
1151
1152         BT_DBG("%s", hdev->name);
1153
1154         hci_dev_lock(hdev);
1155
1156         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1157
1158         hdev->discov_timeout = 0;
1159
1160         hci_dev_unlock(hdev);
1161 }
1162
1163 int hci_uuids_clear(struct hci_dev *hdev)
1164 {
1165         struct list_head *p, *n;
1166
1167         list_for_each_safe(p, n, &hdev->uuids) {
1168                 struct bt_uuid *uuid;
1169
1170                 uuid = list_entry(p, struct bt_uuid, list);
1171
1172                 list_del(p);
1173                 kfree(uuid);
1174         }
1175
1176         return 0;
1177 }
1178
1179 int hci_link_keys_clear(struct hci_dev *hdev)
1180 {
1181         struct list_head *p, *n;
1182
1183         list_for_each_safe(p, n, &hdev->link_keys) {
1184                 struct link_key *key;
1185
1186                 key = list_entry(p, struct link_key, list);
1187
1188                 list_del(p);
1189                 kfree(key);
1190         }
1191
1192         return 0;
1193 }
1194
1195 int hci_smp_ltks_clear(struct hci_dev *hdev)
1196 {
1197         struct smp_ltk *k, *tmp;
1198
1199         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1200                 list_del(&k->list);
1201                 kfree(k);
1202         }
1203
1204         return 0;
1205 }
1206
1207 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1208 {
1209         struct link_key *k;
1210
1211         list_for_each_entry(k, &hdev->link_keys, list)
1212                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1213                         return k;
1214
1215         return NULL;
1216 }
1217
1218 static bool hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1219                                                 u8 key_type, u8 old_key_type)
1220 {
1221         /* Legacy key */
1222         if (key_type < 0x03)
1223                 return true;
1224
1225         /* Debug keys are insecure so don't store them persistently */
1226         if (key_type == HCI_LK_DEBUG_COMBINATION)
1227                 return false;
1228
1229         /* Changed combination key and there's no previous one */
1230         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1231                 return false;
1232
1233         /* Security mode 3 case */
1234         if (!conn)
1235                 return true;
1236
1237         /* Neither local nor remote side had no-bonding as requirement */
1238         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1239                 return true;
1240
1241         /* Local side had dedicated bonding as requirement */
1242         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1243                 return true;
1244
1245         /* Remote side had dedicated bonding as requirement */
1246         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1247                 return true;
1248
1249         /* If none of the above criteria match, then don't store the key
1250          * persistently */
1251         return false;
1252 }
1253
1254 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1255 {
1256         struct smp_ltk *k;
1257
1258         list_for_each_entry(k, &hdev->long_term_keys, list) {
1259                 if (k->ediv != ediv ||
1260                                 memcmp(rand, k->rand, sizeof(k->rand)))
1261                         continue;
1262
1263                 return k;
1264         }
1265
1266         return NULL;
1267 }
1268 EXPORT_SYMBOL(hci_find_ltk);
1269
1270 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1271                                      u8 addr_type)
1272 {
1273         struct smp_ltk *k;
1274
1275         list_for_each_entry(k, &hdev->long_term_keys, list)
1276                 if (addr_type == k->bdaddr_type &&
1277                                         bacmp(bdaddr, &k->bdaddr) == 0)
1278                         return k;
1279
1280         return NULL;
1281 }
1282 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1283
1284 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1285                      bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1286 {
1287         struct link_key *key, *old_key;
1288         u8 old_key_type;
1289         bool persistent;
1290
1291         old_key = hci_find_link_key(hdev, bdaddr);
1292         if (old_key) {
1293                 old_key_type = old_key->type;
1294                 key = old_key;
1295         } else {
1296                 old_key_type = conn ? conn->key_type : 0xff;
1297                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1298                 if (!key)
1299                         return -ENOMEM;
1300                 list_add(&key->list, &hdev->link_keys);
1301         }
1302
1303         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1304
1305         /* Some buggy controller combinations generate a changed
1306          * combination key for legacy pairing even when there's no
1307          * previous key */
1308         if (type == HCI_LK_CHANGED_COMBINATION &&
1309                                         (!conn || conn->remote_auth == 0xff) &&
1310                                         old_key_type == 0xff) {
1311                 type = HCI_LK_COMBINATION;
1312                 if (conn)
1313                         conn->key_type = type;
1314         }
1315
1316         bacpy(&key->bdaddr, bdaddr);
1317         memcpy(key->val, val, 16);
1318         key->pin_len = pin_len;
1319
1320         if (type == HCI_LK_CHANGED_COMBINATION)
1321                 key->type = old_key_type;
1322         else
1323                 key->type = type;
1324
1325         if (!new_key)
1326                 return 0;
1327
1328         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1329
1330         mgmt_new_link_key(hdev, key, persistent);
1331
1332         if (conn)
1333                 conn->flush_key = !persistent;
1334
1335         return 0;
1336 }
1337
1338 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1339                 int new_key, u8 authenticated, u8 tk[16], u8 enc_size, u16
1340                 ediv, u8 rand[8])
1341 {
1342         struct smp_ltk *key, *old_key;
1343
1344         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1345                 return 0;
1346
1347         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1348         if (old_key)
1349                 key = old_key;
1350         else {
1351                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1352                 if (!key)
1353                         return -ENOMEM;
1354                 list_add(&key->list, &hdev->long_term_keys);
1355         }
1356
1357         bacpy(&key->bdaddr, bdaddr);
1358         key->bdaddr_type = addr_type;
1359         memcpy(key->val, tk, sizeof(key->val));
1360         key->authenticated = authenticated;
1361         key->ediv = ediv;
1362         key->enc_size = enc_size;
1363         key->type = type;
1364         memcpy(key->rand, rand, sizeof(key->rand));
1365
1366         if (!new_key)
1367                 return 0;
1368
1369         if (type & HCI_SMP_LTK)
1370                 mgmt_new_ltk(hdev, key, 1);
1371
1372         return 0;
1373 }
1374
1375 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1376 {
1377         struct link_key *key;
1378
1379         key = hci_find_link_key(hdev, bdaddr);
1380         if (!key)
1381                 return -ENOENT;
1382
1383         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1384
1385         list_del(&key->list);
1386         kfree(key);
1387
1388         return 0;
1389 }
1390
1391 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1392 {
1393         struct smp_ltk *k, *tmp;
1394
1395         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1396                 if (bacmp(bdaddr, &k->bdaddr))
1397                         continue;
1398
1399                 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1400
1401                 list_del(&k->list);
1402                 kfree(k);
1403         }
1404
1405         return 0;
1406 }
1407
1408 /* HCI command timer function */
1409 static void hci_cmd_timer(unsigned long arg)
1410 {
1411         struct hci_dev *hdev = (void *) arg;
1412
1413         BT_ERR("%s command tx timeout", hdev->name);
1414         atomic_set(&hdev->cmd_cnt, 1);
1415         queue_work(hdev->workqueue, &hdev->cmd_work);
1416 }
1417
1418 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1419                                           bdaddr_t *bdaddr)
1420 {
1421         struct oob_data *data;
1422
1423         list_for_each_entry(data, &hdev->remote_oob_data, list)
1424                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1425                         return data;
1426
1427         return NULL;
1428 }
1429
1430 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1431 {
1432         struct oob_data *data;
1433
1434         data = hci_find_remote_oob_data(hdev, bdaddr);
1435         if (!data)
1436                 return -ENOENT;
1437
1438         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1439
1440         list_del(&data->list);
1441         kfree(data);
1442
1443         return 0;
1444 }
1445
1446 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1447 {
1448         struct oob_data *data, *n;
1449
1450         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1451                 list_del(&data->list);
1452                 kfree(data);
1453         }
1454
1455         return 0;
1456 }
1457
1458 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1459                             u8 *randomizer)
1460 {
1461         struct oob_data *data;
1462
1463         data = hci_find_remote_oob_data(hdev, bdaddr);
1464
1465         if (!data) {
1466                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1467                 if (!data)
1468                         return -ENOMEM;
1469
1470                 bacpy(&data->bdaddr, bdaddr);
1471                 list_add(&data->list, &hdev->remote_oob_data);
1472         }
1473
1474         memcpy(data->hash, hash, sizeof(data->hash));
1475         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1476
1477         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1478
1479         return 0;
1480 }
1481
1482 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
1483 {
1484         struct bdaddr_list *b;
1485
1486         list_for_each_entry(b, &hdev->blacklist, list)
1487                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1488                         return b;
1489
1490         return NULL;
1491 }
1492
1493 int hci_blacklist_clear(struct hci_dev *hdev)
1494 {
1495         struct list_head *p, *n;
1496
1497         list_for_each_safe(p, n, &hdev->blacklist) {
1498                 struct bdaddr_list *b;
1499
1500                 b = list_entry(p, struct bdaddr_list, list);
1501
1502                 list_del(p);
1503                 kfree(b);
1504         }
1505
1506         return 0;
1507 }
1508
1509 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1510 {
1511         struct bdaddr_list *entry;
1512
1513         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1514                 return -EBADF;
1515
1516         if (hci_blacklist_lookup(hdev, bdaddr))
1517                 return -EEXIST;
1518
1519         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1520         if (!entry)
1521                 return -ENOMEM;
1522
1523         bacpy(&entry->bdaddr, bdaddr);
1524
1525         list_add(&entry->list, &hdev->blacklist);
1526
1527         return mgmt_device_blocked(hdev, bdaddr, type);
1528 }
1529
1530 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 type)
1531 {
1532         struct bdaddr_list *entry;
1533
1534         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1535                 return hci_blacklist_clear(hdev);
1536
1537         entry = hci_blacklist_lookup(hdev, bdaddr);
1538         if (!entry)
1539                 return -ENOENT;
1540
1541         list_del(&entry->list);
1542         kfree(entry);
1543
1544         return mgmt_device_unblocked(hdev, bdaddr, type);
1545 }
1546
1547 static void hci_clear_adv_cache(struct work_struct *work)
1548 {
1549         struct hci_dev *hdev = container_of(work, struct hci_dev,
1550                                             adv_work.work);
1551
1552         hci_dev_lock(hdev);
1553
1554         hci_adv_entries_clear(hdev);
1555
1556         hci_dev_unlock(hdev);
1557 }
1558
1559 int hci_adv_entries_clear(struct hci_dev *hdev)
1560 {
1561         struct adv_entry *entry, *tmp;
1562
1563         list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1564                 list_del(&entry->list);
1565                 kfree(entry);
1566         }
1567
1568         BT_DBG("%s adv cache cleared", hdev->name);
1569
1570         return 0;
1571 }
1572
1573 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1574 {
1575         struct adv_entry *entry;
1576
1577         list_for_each_entry(entry, &hdev->adv_entries, list)
1578                 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1579                         return entry;
1580
1581         return NULL;
1582 }
1583
1584 static inline int is_connectable_adv(u8 evt_type)
1585 {
1586         if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1587                 return 1;
1588
1589         return 0;
1590 }
1591
1592 int hci_add_adv_entry(struct hci_dev *hdev,
1593                                         struct hci_ev_le_advertising_info *ev) { struct adv_entry *entry; if (!is_connectable_adv(ev->evt_type))
1594                 return -EINVAL;
1595
1596         /* Only new entries should be added to adv_entries. So, if
1597          * bdaddr was found, don't add it. */
1598         if (hci_find_adv_entry(hdev, &ev->bdaddr))
1599                 return 0;
1600
1601         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1602         if (!entry)
1603                 return -ENOMEM;
1604
1605         bacpy(&entry->bdaddr, &ev->bdaddr);
1606         entry->bdaddr_type = ev->bdaddr_type;
1607
1608         list_add(&entry->list, &hdev->adv_entries);
1609
1610         BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1611                                 batostr(&entry->bdaddr), entry->bdaddr_type);
1612
1613         return 0;
1614 }
1615
1616 static void le_scan_param_req(struct hci_dev *hdev, unsigned long opt)
1617 {
1618         struct le_scan_params *param =  (struct le_scan_params *) opt;
1619         struct hci_cp_le_set_scan_param cp;
1620
1621         memset(&cp, 0, sizeof(cp));
1622         cp.type = param->type;
1623         cp.interval = cpu_to_le16(param->interval);
1624         cp.window = cpu_to_le16(param->window);
1625
1626         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_PARAM, sizeof(cp), &cp);
1627 }
1628
1629 static void le_scan_enable_req(struct hci_dev *hdev, unsigned long opt)
1630 {
1631         struct hci_cp_le_set_scan_enable cp;
1632
1633         memset(&cp, 0, sizeof(cp));
1634         cp.enable = 1;
1635
1636         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1637 }
1638
1639 static int hci_do_le_scan(struct hci_dev *hdev, u8 type, u16 interval,
1640                           u16 window, int timeout)
1641 {
1642         long timeo = msecs_to_jiffies(3000);
1643         struct le_scan_params param;
1644         int err;
1645
1646         BT_DBG("%s", hdev->name);
1647
1648         if (test_bit(HCI_LE_SCAN, &hdev->dev_flags))
1649                 return -EINPROGRESS;
1650
1651         param.type = type;
1652         param.interval = interval;
1653         param.window = window;
1654
1655         hci_req_lock(hdev);
1656
1657         err = __hci_request(hdev, le_scan_param_req, (unsigned long) &param,
1658                             timeo);
1659         if (!err)
1660                 err = __hci_request(hdev, le_scan_enable_req, 0, timeo);
1661
1662         hci_req_unlock(hdev);
1663
1664         if (err < 0)
1665                 return err;
1666
1667         schedule_delayed_work(&hdev->le_scan_disable,
1668                               msecs_to_jiffies(timeout));
1669
1670         return 0;
1671 }
1672
1673 static void le_scan_disable_work(struct work_struct *work)
1674 {
1675         struct hci_dev *hdev = container_of(work, struct hci_dev,
1676                                             le_scan_disable.work);
1677         struct hci_cp_le_set_scan_enable cp;
1678
1679         BT_DBG("%s", hdev->name);
1680
1681         memset(&cp, 0, sizeof(cp));
1682
1683         hci_send_cmd(hdev, HCI_OP_LE_SET_SCAN_ENABLE, sizeof(cp), &cp);
1684 }
1685
1686 static void le_scan_work(struct work_struct *work)
1687 {
1688         struct hci_dev *hdev = container_of(work, struct hci_dev, le_scan);
1689         struct le_scan_params *param = &hdev->le_scan_params;
1690
1691         BT_DBG("%s", hdev->name);
1692
1693         hci_do_le_scan(hdev, param->type, param->interval, param->window,
1694                        param->timeout);
1695 }
1696
1697 int hci_le_scan(struct hci_dev *hdev, u8 type, u16 interval, u16 window,
1698                 int timeout)
1699 {
1700         struct le_scan_params *param = &hdev->le_scan_params;
1701
1702         BT_DBG("%s", hdev->name);
1703
1704         if (work_busy(&hdev->le_scan))
1705                 return -EINPROGRESS;
1706
1707         param->type = type;
1708         param->interval = interval;
1709         param->window = window;
1710         param->timeout = timeout;
1711
1712         queue_work(system_long_wq, &hdev->le_scan);
1713
1714         return 0;
1715 }
1716
1717 /* Register HCI device */
1718 int hci_register_dev(struct hci_dev *hdev)
1719 {
1720         struct list_head *head = &hci_dev_list, *p;
1721         int i, id, error;
1722
1723         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1724
1725         if (!hdev->open || !hdev->close)
1726                 return -EINVAL;
1727
1728         /* Do not allow HCI_AMP devices to register at index 0,
1729          * so the index can be used as the AMP controller ID.
1730          */
1731         id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1732
1733         write_lock(&hci_dev_list_lock);
1734
1735         /* Find first available device id */
1736         list_for_each(p, &hci_dev_list) {
1737                 if (list_entry(p, struct hci_dev, list)->id != id)
1738                         break;
1739                 head = p; id++;
1740         }
1741
1742         sprintf(hdev->name, "hci%d", id);
1743         hdev->id = id;
1744         list_add_tail(&hdev->list, head);
1745
1746         mutex_init(&hdev->lock);
1747
1748         hdev->flags = 0;
1749         hdev->dev_flags = 0;
1750         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1751         hdev->esco_type = (ESCO_HV1);
1752         hdev->link_mode = (HCI_LM_ACCEPT);
1753         hdev->io_capability = 0x03; /* No Input No Output */
1754
1755         hdev->idle_timeout = 0;
1756         hdev->sniff_max_interval = 800;
1757         hdev->sniff_min_interval = 80;
1758
1759         INIT_WORK(&hdev->rx_work, hci_rx_work);
1760         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1761         INIT_WORK(&hdev->tx_work, hci_tx_work);
1762
1763
1764         skb_queue_head_init(&hdev->rx_q);
1765         skb_queue_head_init(&hdev->cmd_q);
1766         skb_queue_head_init(&hdev->raw_q);
1767
1768         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1769
1770         for (i = 0; i < NUM_REASSEMBLY; i++)
1771                 hdev->reassembly[i] = NULL;
1772
1773         init_waitqueue_head(&hdev->req_wait_q);
1774         mutex_init(&hdev->req_lock);
1775
1776         discovery_init(hdev);
1777
1778         hci_conn_hash_init(hdev);
1779
1780         INIT_LIST_HEAD(&hdev->mgmt_pending);
1781
1782         INIT_LIST_HEAD(&hdev->blacklist);
1783
1784         INIT_LIST_HEAD(&hdev->uuids);
1785
1786         INIT_LIST_HEAD(&hdev->link_keys);
1787         INIT_LIST_HEAD(&hdev->long_term_keys);
1788
1789         INIT_LIST_HEAD(&hdev->remote_oob_data);
1790
1791         INIT_LIST_HEAD(&hdev->adv_entries);
1792
1793         INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1794         INIT_WORK(&hdev->power_on, hci_power_on);
1795         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1796
1797         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1798
1799         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1800
1801         atomic_set(&hdev->promisc, 0);
1802
1803         INIT_WORK(&hdev->le_scan, le_scan_work);
1804
1805         INIT_DELAYED_WORK(&hdev->le_scan_disable, le_scan_disable_work);
1806
1807         write_unlock(&hci_dev_list_lock);
1808
1809         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1810                                                         WQ_MEM_RECLAIM, 1);
1811         if (!hdev->workqueue) {
1812                 error = -ENOMEM;
1813                 goto err;
1814         }
1815
1816         error = hci_add_sysfs(hdev);
1817         if (error < 0)
1818                 goto err_wqueue;
1819
1820         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1821                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1822         if (hdev->rfkill) {
1823                 if (rfkill_register(hdev->rfkill) < 0) {
1824                         rfkill_destroy(hdev->rfkill);
1825                         hdev->rfkill = NULL;
1826                 }
1827         }
1828
1829         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1830         set_bit(HCI_SETUP, &hdev->dev_flags);
1831         schedule_work(&hdev->power_on);
1832
1833         hci_notify(hdev, HCI_DEV_REG);
1834         hci_dev_hold(hdev);
1835
1836         return id;
1837
1838 err_wqueue:
1839         destroy_workqueue(hdev->workqueue);
1840 err:
1841         write_lock(&hci_dev_list_lock);
1842         list_del(&hdev->list);
1843         write_unlock(&hci_dev_list_lock);
1844
1845         return error;
1846 }
1847 EXPORT_SYMBOL(hci_register_dev);
1848
1849 /* Unregister HCI device */
1850 void hci_unregister_dev(struct hci_dev *hdev)
1851 {
1852         int i;
1853
1854         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1855
1856         set_bit(HCI_UNREGISTER, &hdev->dev_flags);
1857
1858         write_lock(&hci_dev_list_lock);
1859         list_del(&hdev->list);
1860         write_unlock(&hci_dev_list_lock);
1861
1862         hci_dev_do_close(hdev);
1863
1864         for (i = 0; i < NUM_REASSEMBLY; i++)
1865                 kfree_skb(hdev->reassembly[i]);
1866
1867         if (!test_bit(HCI_INIT, &hdev->flags) &&
1868                                 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1869                 hci_dev_lock(hdev);
1870                 mgmt_index_removed(hdev);
1871                 hci_dev_unlock(hdev);
1872         }
1873
1874         /* mgmt_index_removed should take care of emptying the
1875          * pending list */
1876         BUG_ON(!list_empty(&hdev->mgmt_pending));
1877
1878         hci_notify(hdev, HCI_DEV_UNREG);
1879
1880         if (hdev->rfkill) {
1881                 rfkill_unregister(hdev->rfkill);
1882                 rfkill_destroy(hdev->rfkill);
1883         }
1884
1885         hci_del_sysfs(hdev);
1886
1887         cancel_delayed_work_sync(&hdev->adv_work);
1888
1889         destroy_workqueue(hdev->workqueue);
1890
1891         hci_dev_lock(hdev);
1892         hci_blacklist_clear(hdev);
1893         hci_uuids_clear(hdev);
1894         hci_link_keys_clear(hdev);
1895         hci_smp_ltks_clear(hdev);
1896         hci_remote_oob_data_clear(hdev);
1897         hci_adv_entries_clear(hdev);
1898         hci_dev_unlock(hdev);
1899
1900         hci_dev_put(hdev);
1901 }
1902 EXPORT_SYMBOL(hci_unregister_dev);
1903
1904 /* Suspend HCI device */
1905 int hci_suspend_dev(struct hci_dev *hdev)
1906 {
1907         hci_notify(hdev, HCI_DEV_SUSPEND);
1908         return 0;
1909 }
1910 EXPORT_SYMBOL(hci_suspend_dev);
1911
1912 /* Resume HCI device */
1913 int hci_resume_dev(struct hci_dev *hdev)
1914 {
1915         hci_notify(hdev, HCI_DEV_RESUME);
1916         return 0;
1917 }
1918 EXPORT_SYMBOL(hci_resume_dev);
1919
1920 /* Receive frame from HCI drivers */
1921 int hci_recv_frame(struct sk_buff *skb)
1922 {
1923         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1924         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1925                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1926                 kfree_skb(skb);
1927                 return -ENXIO;
1928         }
1929
1930         /* Incomming skb */
1931         bt_cb(skb)->incoming = 1;
1932
1933         /* Time stamp */
1934         __net_timestamp(skb);
1935
1936         skb_queue_tail(&hdev->rx_q, skb);
1937         queue_work(hdev->workqueue, &hdev->rx_work);
1938
1939         return 0;
1940 }
1941 EXPORT_SYMBOL(hci_recv_frame);
1942
1943 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1944                                                   int count, __u8 index)
1945 {
1946         int len = 0;
1947         int hlen = 0;
1948         int remain = count;
1949         struct sk_buff *skb;
1950         struct bt_skb_cb *scb;
1951
1952         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1953                                 index >= NUM_REASSEMBLY)
1954                 return -EILSEQ;
1955
1956         skb = hdev->reassembly[index];
1957
1958         if (!skb) {
1959                 switch (type) {
1960                 case HCI_ACLDATA_PKT:
1961                         len = HCI_MAX_FRAME_SIZE;
1962                         hlen = HCI_ACL_HDR_SIZE;
1963                         break;
1964                 case HCI_EVENT_PKT:
1965                         len = HCI_MAX_EVENT_SIZE;
1966                         hlen = HCI_EVENT_HDR_SIZE;
1967                         break;
1968                 case HCI_SCODATA_PKT:
1969                         len = HCI_MAX_SCO_SIZE;
1970                         hlen = HCI_SCO_HDR_SIZE;
1971                         break;
1972                 }
1973
1974                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1975                 if (!skb)
1976                         return -ENOMEM;
1977
1978                 scb = (void *) skb->cb;
1979                 scb->expect = hlen;
1980                 scb->pkt_type = type;
1981
1982                 skb->dev = (void *) hdev;
1983                 hdev->reassembly[index] = skb;
1984         }
1985
1986         while (count) {
1987                 scb = (void *) skb->cb;
1988                 len = min_t(uint, scb->expect, count);
1989
1990                 memcpy(skb_put(skb, len), data, len);
1991
1992                 count -= len;
1993                 data += len;
1994                 scb->expect -= len;
1995                 remain = count;
1996
1997                 switch (type) {
1998                 case HCI_EVENT_PKT:
1999                         if (skb->len == HCI_EVENT_HDR_SIZE) {
2000                                 struct hci_event_hdr *h = hci_event_hdr(skb);
2001                                 scb->expect = h->plen;
2002
2003                                 if (skb_tailroom(skb) < scb->expect) {
2004                                         kfree_skb(skb);
2005                                         hdev->reassembly[index] = NULL;
2006                                         return -ENOMEM;
2007                                 }
2008                         }
2009                         break;
2010
2011                 case HCI_ACLDATA_PKT:
2012                         if (skb->len  == HCI_ACL_HDR_SIZE) {
2013                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
2014                                 scb->expect = __le16_to_cpu(h->dlen);
2015
2016                                 if (skb_tailroom(skb) < scb->expect) {
2017                                         kfree_skb(skb);
2018                                         hdev->reassembly[index] = NULL;
2019                                         return -ENOMEM;
2020                                 }
2021                         }
2022                         break;
2023
2024                 case HCI_SCODATA_PKT:
2025                         if (skb->len == HCI_SCO_HDR_SIZE) {
2026                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
2027                                 scb->expect = h->dlen;
2028
2029                                 if (skb_tailroom(skb) < scb->expect) {
2030                                         kfree_skb(skb);
2031                                         hdev->reassembly[index] = NULL;
2032                                         return -ENOMEM;
2033                                 }
2034                         }
2035                         break;
2036                 }
2037
2038                 if (scb->expect == 0) {
2039                         /* Complete frame */
2040
2041                         bt_cb(skb)->pkt_type = type;
2042                         hci_recv_frame(skb);
2043
2044                         hdev->reassembly[index] = NULL;
2045                         return remain;
2046                 }
2047         }
2048
2049         return remain;
2050 }
2051
2052 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
2053 {
2054         int rem = 0;
2055
2056         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
2057                 return -EILSEQ;
2058
2059         while (count) {
2060                 rem = hci_reassembly(hdev, type, data, count, type - 1);
2061                 if (rem < 0)
2062                         return rem;
2063
2064                 data += (count - rem);
2065                 count = rem;
2066         }
2067
2068         return rem;
2069 }
2070 EXPORT_SYMBOL(hci_recv_fragment);
2071
2072 #define STREAM_REASSEMBLY 0
2073
2074 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
2075 {
2076         int type;
2077         int rem = 0;
2078
2079         while (count) {
2080                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
2081
2082                 if (!skb) {
2083                         struct { char type; } *pkt;
2084
2085                         /* Start of the frame */
2086                         pkt = data;
2087                         type = pkt->type;
2088
2089                         data++;
2090                         count--;
2091                 } else
2092                         type = bt_cb(skb)->pkt_type;
2093
2094                 rem = hci_reassembly(hdev, type, data, count,
2095                                                         STREAM_REASSEMBLY);
2096                 if (rem < 0)
2097                         return rem;
2098
2099                 data += (count - rem);
2100                 count = rem;
2101         }
2102
2103         return rem;
2104 }
2105 EXPORT_SYMBOL(hci_recv_stream_fragment);
2106
2107 /* ---- Interface to upper protocols ---- */
2108
2109 int hci_register_cb(struct hci_cb *cb)
2110 {
2111         BT_DBG("%p name %s", cb, cb->name);
2112
2113         write_lock(&hci_cb_list_lock);
2114         list_add(&cb->list, &hci_cb_list);
2115         write_unlock(&hci_cb_list_lock);
2116
2117         return 0;
2118 }
2119 EXPORT_SYMBOL(hci_register_cb);
2120
2121 int hci_unregister_cb(struct hci_cb *cb)
2122 {
2123         BT_DBG("%p name %s", cb, cb->name);
2124
2125         write_lock(&hci_cb_list_lock);
2126         list_del(&cb->list);
2127         write_unlock(&hci_cb_list_lock);
2128
2129         return 0;
2130 }
2131 EXPORT_SYMBOL(hci_unregister_cb);
2132
2133 static int hci_send_frame(struct sk_buff *skb)
2134 {
2135         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2136
2137         if (!hdev) {
2138                 kfree_skb(skb);
2139                 return -ENODEV;
2140         }
2141
2142         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2143
2144         /* Time stamp */
2145         __net_timestamp(skb);
2146
2147         /* Send copy to monitor */
2148         hci_send_to_monitor(hdev, skb);
2149
2150         if (atomic_read(&hdev->promisc)) {
2151                 /* Send copy to the sockets */
2152                 hci_send_to_sock(hdev, skb);
2153         }
2154
2155         /* Get rid of skb owner, prior to sending to the driver. */
2156         skb_orphan(skb);
2157
2158         return hdev->send(skb);
2159 }
2160
2161 /* Send HCI command */
2162 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2163 {
2164         int len = HCI_COMMAND_HDR_SIZE + plen;
2165         struct hci_command_hdr *hdr;
2166         struct sk_buff *skb;
2167
2168         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2169
2170         skb = bt_skb_alloc(len, GFP_ATOMIC);
2171         if (!skb) {
2172                 BT_ERR("%s no memory for command", hdev->name);
2173                 return -ENOMEM;
2174         }
2175
2176         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2177         hdr->opcode = cpu_to_le16(opcode);
2178         hdr->plen   = plen;
2179
2180         if (plen)
2181                 memcpy(skb_put(skb, plen), param, plen);
2182
2183         BT_DBG("skb len %d", skb->len);
2184
2185         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2186         skb->dev = (void *) hdev;
2187
2188         if (test_bit(HCI_INIT, &hdev->flags))
2189                 hdev->init_last_cmd = opcode;
2190
2191         skb_queue_tail(&hdev->cmd_q, skb);
2192         queue_work(hdev->workqueue, &hdev->cmd_work);
2193
2194         return 0;
2195 }
2196
2197 /* Get data from the previously sent command */
2198 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2199 {
2200         struct hci_command_hdr *hdr;
2201
2202         if (!hdev->sent_cmd)
2203                 return NULL;
2204
2205         hdr = (void *) hdev->sent_cmd->data;
2206
2207         if (hdr->opcode != cpu_to_le16(opcode))
2208                 return NULL;
2209
2210         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2211
2212         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2213 }
2214
2215 /* Send ACL data */
2216 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2217 {
2218         struct hci_acl_hdr *hdr;
2219         int len = skb->len;
2220
2221         skb_push(skb, HCI_ACL_HDR_SIZE);
2222         skb_reset_transport_header(skb);
2223         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2224         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2225         hdr->dlen   = cpu_to_le16(len);
2226 }
2227
2228 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2229                                 struct sk_buff *skb, __u16 flags)
2230 {
2231         struct hci_dev *hdev = conn->hdev;
2232         struct sk_buff *list;
2233
2234         list = skb_shinfo(skb)->frag_list;
2235         if (!list) {
2236                 /* Non fragmented */
2237                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2238
2239                 skb_queue_tail(queue, skb);
2240         } else {
2241                 /* Fragmented */
2242                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2243
2244                 skb_shinfo(skb)->frag_list = NULL;
2245
2246                 /* Queue all fragments atomically */
2247                 spin_lock(&queue->lock);
2248
2249                 __skb_queue_tail(queue, skb);
2250
2251                 flags &= ~ACL_START;
2252                 flags |= ACL_CONT;
2253                 do {
2254                         skb = list; list = list->next;
2255
2256                         skb->dev = (void *) hdev;
2257                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2258                         hci_add_acl_hdr(skb, conn->handle, flags);
2259
2260                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2261
2262                         __skb_queue_tail(queue, skb);
2263                 } while (list);
2264
2265                 spin_unlock(&queue->lock);
2266         }
2267 }
2268
2269 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2270 {
2271         struct hci_conn *conn = chan->conn;
2272         struct hci_dev *hdev = conn->hdev;
2273
2274         BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2275
2276         skb->dev = (void *) hdev;
2277         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2278         hci_add_acl_hdr(skb, conn->handle, flags);
2279
2280         hci_queue_acl(conn, &chan->data_q, skb, flags);
2281
2282         queue_work(hdev->workqueue, &hdev->tx_work);
2283 }
2284 EXPORT_SYMBOL(hci_send_acl);
2285
2286 /* Send SCO data */
2287 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2288 {
2289         struct hci_dev *hdev = conn->hdev;
2290         struct hci_sco_hdr hdr;
2291
2292         BT_DBG("%s len %d", hdev->name, skb->len);
2293
2294         hdr.handle = cpu_to_le16(conn->handle);
2295         hdr.dlen   = skb->len;
2296
2297         skb_push(skb, HCI_SCO_HDR_SIZE);
2298         skb_reset_transport_header(skb);
2299         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2300
2301         skb->dev = (void *) hdev;
2302         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2303
2304         skb_queue_tail(&conn->data_q, skb);
2305         queue_work(hdev->workqueue, &hdev->tx_work);
2306 }
2307 EXPORT_SYMBOL(hci_send_sco);
2308
2309 /* ---- HCI TX task (outgoing data) ---- */
2310
2311 /* HCI Connection scheduler */
2312 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2313 {
2314         struct hci_conn_hash *h = &hdev->conn_hash;
2315         struct hci_conn *conn = NULL, *c;
2316         int num = 0, min = ~0;
2317
2318         /* We don't have to lock device here. Connections are always
2319          * added and removed with TX task disabled. */
2320
2321         rcu_read_lock();
2322
2323         list_for_each_entry_rcu(c, &h->list, list) {
2324                 if (c->type != type || skb_queue_empty(&c->data_q))
2325                         continue;
2326
2327                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2328                         continue;
2329
2330                 num++;
2331
2332                 if (c->sent < min) {
2333                         min  = c->sent;
2334                         conn = c;
2335                 }
2336
2337                 if (hci_conn_num(hdev, type) == num)
2338                         break;
2339         }
2340
2341         rcu_read_unlock();
2342
2343         if (conn) {
2344                 int cnt, q;
2345
2346                 switch (conn->type) {
2347                 case ACL_LINK:
2348                         cnt = hdev->acl_cnt;
2349                         break;
2350                 case SCO_LINK:
2351                 case ESCO_LINK:
2352                         cnt = hdev->sco_cnt;
2353                         break;
2354                 case LE_LINK:
2355                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2356                         break;
2357                 default:
2358                         cnt = 0;
2359                         BT_ERR("Unknown link type");
2360                 }
2361
2362                 q = cnt / num;
2363                 *quote = q ? q : 1;
2364         } else
2365                 *quote = 0;
2366
2367         BT_DBG("conn %p quote %d", conn, *quote);
2368         return conn;
2369 }
2370
2371 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2372 {
2373         struct hci_conn_hash *h = &hdev->conn_hash;
2374         struct hci_conn *c;
2375
2376         BT_ERR("%s link tx timeout", hdev->name);
2377
2378         rcu_read_lock();
2379
2380         /* Kill stalled connections */
2381         list_for_each_entry_rcu(c, &h->list, list) {
2382                 if (c->type == type && c->sent) {
2383                         BT_ERR("%s killing stalled connection %s",
2384                                 hdev->name, batostr(&c->dst));
2385                         hci_acl_disconn(c, 0x13);
2386                 }
2387         }
2388
2389         rcu_read_unlock();
2390 }
2391
2392 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2393                                                 int *quote)
2394 {
2395         struct hci_conn_hash *h = &hdev->conn_hash;
2396         struct hci_chan *chan = NULL;
2397         int num = 0, min = ~0, cur_prio = 0;
2398         struct hci_conn *conn;
2399         int cnt, q, conn_num = 0;
2400
2401         BT_DBG("%s", hdev->name);
2402
2403         rcu_read_lock();
2404
2405         list_for_each_entry_rcu(conn, &h->list, list) {
2406                 struct hci_chan *tmp;
2407
2408                 if (conn->type != type)
2409                         continue;
2410
2411                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2412                         continue;
2413
2414                 conn_num++;
2415
2416                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2417                         struct sk_buff *skb;
2418
2419                         if (skb_queue_empty(&tmp->data_q))
2420                                 continue;
2421
2422                         skb = skb_peek(&tmp->data_q);
2423                         if (skb->priority < cur_prio)
2424                                 continue;
2425
2426                         if (skb->priority > cur_prio) {
2427                                 num = 0;
2428                                 min = ~0;
2429                                 cur_prio = skb->priority;
2430                         }
2431
2432                         num++;
2433
2434                         if (conn->sent < min) {
2435                                 min  = conn->sent;
2436                                 chan = tmp;
2437                         }
2438                 }
2439
2440                 if (hci_conn_num(hdev, type) == conn_num)
2441                         break;
2442         }
2443
2444         rcu_read_unlock();
2445
2446         if (!chan)
2447                 return NULL;
2448
2449         switch (chan->conn->type) {
2450         case ACL_LINK:
2451                 cnt = hdev->acl_cnt;
2452                 break;
2453         case SCO_LINK:
2454         case ESCO_LINK:
2455                 cnt = hdev->sco_cnt;
2456                 break;
2457         case LE_LINK:
2458                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2459                 break;
2460         default:
2461                 cnt = 0;
2462                 BT_ERR("Unknown link type");
2463         }
2464
2465         q = cnt / num;
2466         *quote = q ? q : 1;
2467         BT_DBG("chan %p quote %d", chan, *quote);
2468         return chan;
2469 }
2470
2471 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2472 {
2473         struct hci_conn_hash *h = &hdev->conn_hash;
2474         struct hci_conn *conn;
2475         int num = 0;
2476
2477         BT_DBG("%s", hdev->name);
2478
2479         rcu_read_lock();
2480
2481         list_for_each_entry_rcu(conn, &h->list, list) {
2482                 struct hci_chan *chan;
2483
2484                 if (conn->type != type)
2485                         continue;
2486
2487                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2488                         continue;
2489
2490                 num++;
2491
2492                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2493                         struct sk_buff *skb;
2494
2495                         if (chan->sent) {
2496                                 chan->sent = 0;
2497                                 continue;
2498                         }
2499
2500                         if (skb_queue_empty(&chan->data_q))
2501                                 continue;
2502
2503                         skb = skb_peek(&chan->data_q);
2504                         if (skb->priority >= HCI_PRIO_MAX - 1)
2505                                 continue;
2506
2507                         skb->priority = HCI_PRIO_MAX - 1;
2508
2509                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2510                                                                 skb->priority);
2511                 }
2512
2513                 if (hci_conn_num(hdev, type) == num)
2514                         break;
2515         }
2516
2517         rcu_read_unlock();
2518
2519 }
2520
2521 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2522 {
2523         /* Calculate count of blocks used by this packet */
2524         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2525 }
2526
2527 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2528 {
2529         if (!test_bit(HCI_RAW, &hdev->flags)) {
2530                 /* ACL tx timeout must be longer than maximum
2531                  * link supervision timeout (40.9 seconds) */
2532                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2533                                         msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2534                         hci_link_tx_to(hdev, ACL_LINK);
2535         }
2536 }
2537
2538 static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2539 {
2540         unsigned int cnt = hdev->acl_cnt;
2541         struct hci_chan *chan;
2542         struct sk_buff *skb;
2543         int quote;
2544
2545         __check_timeout(hdev, cnt);
2546
2547         while (hdev->acl_cnt &&
2548                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2549                 u32 priority = (skb_peek(&chan->data_q))->priority;
2550                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2551                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2552                                         skb->len, skb->priority);
2553
2554                         /* Stop if priority has changed */
2555                         if (skb->priority < priority)
2556                                 break;
2557
2558                         skb = skb_dequeue(&chan->data_q);
2559
2560                         hci_conn_enter_active_mode(chan->conn,
2561                                                    bt_cb(skb)->force_active);
2562
2563                         hci_send_frame(skb);
2564                         hdev->acl_last_tx = jiffies;
2565
2566                         hdev->acl_cnt--;
2567                         chan->sent++;
2568                         chan->conn->sent++;
2569                 }
2570         }
2571
2572         if (cnt != hdev->acl_cnt)
2573                 hci_prio_recalculate(hdev, ACL_LINK);
2574 }
2575
2576 static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2577 {
2578         unsigned int cnt = hdev->block_cnt;
2579         struct hci_chan *chan;
2580         struct sk_buff *skb;
2581         int quote;
2582
2583         __check_timeout(hdev, cnt);
2584
2585         while (hdev->block_cnt > 0 &&
2586                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2587                 u32 priority = (skb_peek(&chan->data_q))->priority;
2588                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2589                         int blocks;
2590
2591                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2592                                                 skb->len, skb->priority);
2593
2594                         /* Stop if priority has changed */
2595                         if (skb->priority < priority)
2596                                 break;
2597
2598                         skb = skb_dequeue(&chan->data_q);
2599
2600                         blocks = __get_blocks(hdev, skb);
2601                         if (blocks > hdev->block_cnt)
2602                                 return;
2603
2604                         hci_conn_enter_active_mode(chan->conn,
2605                                                 bt_cb(skb)->force_active);
2606
2607                         hci_send_frame(skb);
2608                         hdev->acl_last_tx = jiffies;
2609
2610                         hdev->block_cnt -= blocks;
2611                         quote -= blocks;
2612
2613                         chan->sent += blocks;
2614                         chan->conn->sent += blocks;
2615                 }
2616         }
2617
2618         if (cnt != hdev->block_cnt)
2619                 hci_prio_recalculate(hdev, ACL_LINK);
2620 }
2621
2622 static inline void hci_sched_acl(struct hci_dev *hdev)
2623 {
2624         BT_DBG("%s", hdev->name);
2625
2626         if (!hci_conn_num(hdev, ACL_LINK))
2627                 return;
2628
2629         switch (hdev->flow_ctl_mode) {
2630         case HCI_FLOW_CTL_MODE_PACKET_BASED:
2631                 hci_sched_acl_pkt(hdev);
2632                 break;
2633
2634         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2635                 hci_sched_acl_blk(hdev);
2636                 break;
2637         }
2638 }
2639
2640 /* Schedule SCO */
2641 static inline void hci_sched_sco(struct hci_dev *hdev)
2642 {
2643         struct hci_conn *conn;
2644         struct sk_buff *skb;
2645         int quote;
2646
2647         BT_DBG("%s", hdev->name);
2648
2649         if (!hci_conn_num(hdev, SCO_LINK))
2650                 return;
2651
2652         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2653                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2654                         BT_DBG("skb %p len %d", skb, skb->len);
2655                         hci_send_frame(skb);
2656
2657                         conn->sent++;
2658                         if (conn->sent == ~0)
2659                                 conn->sent = 0;
2660                 }
2661         }
2662 }
2663
2664 static inline void hci_sched_esco(struct hci_dev *hdev)
2665 {
2666         struct hci_conn *conn;
2667         struct sk_buff *skb;
2668         int quote;
2669
2670         BT_DBG("%s", hdev->name);
2671
2672         if (!hci_conn_num(hdev, ESCO_LINK))
2673                 return;
2674
2675         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2676                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2677                         BT_DBG("skb %p len %d", skb, skb->len);
2678                         hci_send_frame(skb);
2679
2680                         conn->sent++;
2681                         if (conn->sent == ~0)
2682                                 conn->sent = 0;
2683                 }
2684         }
2685 }
2686
2687 static inline void hci_sched_le(struct hci_dev *hdev)
2688 {
2689         struct hci_chan *chan;
2690         struct sk_buff *skb;
2691         int quote, cnt, tmp;
2692
2693         BT_DBG("%s", hdev->name);
2694
2695         if (!hci_conn_num(hdev, LE_LINK))
2696                 return;
2697
2698         if (!test_bit(HCI_RAW, &hdev->flags)) {
2699                 /* LE tx timeout must be longer than maximum
2700                  * link supervision timeout (40.9 seconds) */
2701                 if (!hdev->le_cnt && hdev->le_pkts &&
2702                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2703                         hci_link_tx_to(hdev, LE_LINK);
2704         }
2705
2706         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2707         tmp = cnt;
2708         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2709                 u32 priority = (skb_peek(&chan->data_q))->priority;
2710                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2711                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2712                                         skb->len, skb->priority);
2713
2714                         /* Stop if priority has changed */
2715                         if (skb->priority < priority)
2716                                 break;
2717
2718                         skb = skb_dequeue(&chan->data_q);
2719
2720                         hci_send_frame(skb);
2721                         hdev->le_last_tx = jiffies;
2722
2723                         cnt--;
2724                         chan->sent++;
2725                         chan->conn->sent++;
2726                 }
2727         }
2728
2729         if (hdev->le_pkts)
2730                 hdev->le_cnt = cnt;
2731         else
2732                 hdev->acl_cnt = cnt;
2733
2734         if (cnt != tmp)
2735                 hci_prio_recalculate(hdev, LE_LINK);
2736 }
2737
2738 static void hci_tx_work(struct work_struct *work)
2739 {
2740         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2741         struct sk_buff *skb;
2742
2743         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2744                 hdev->sco_cnt, hdev->le_cnt);
2745
2746         /* Schedule queues and send stuff to HCI driver */
2747
2748         hci_sched_acl(hdev);
2749
2750         hci_sched_sco(hdev);
2751
2752         hci_sched_esco(hdev);
2753
2754         hci_sched_le(hdev);
2755
2756         /* Send next queued raw (unknown type) packet */
2757         while ((skb = skb_dequeue(&hdev->raw_q)))
2758                 hci_send_frame(skb);
2759 }
2760
2761 /* ----- HCI RX task (incoming data processing) ----- */
2762
2763 /* ACL data packet */
2764 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2765 {
2766         struct hci_acl_hdr *hdr = (void *) skb->data;
2767         struct hci_conn *conn;
2768         __u16 handle, flags;
2769
2770         skb_pull(skb, HCI_ACL_HDR_SIZE);
2771
2772         handle = __le16_to_cpu(hdr->handle);
2773         flags  = hci_flags(handle);
2774         handle = hci_handle(handle);
2775
2776         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2777
2778         hdev->stat.acl_rx++;
2779
2780         hci_dev_lock(hdev);
2781         conn = hci_conn_hash_lookup_handle(hdev, handle);
2782         hci_dev_unlock(hdev);
2783
2784         if (conn) {
2785                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2786
2787                 hci_dev_lock(hdev);
2788                 if (test_bit(HCI_MGMT, &hdev->dev_flags) &&
2789                     !test_and_set_bit(HCI_CONN_MGMT_CONNECTED, &conn->flags))
2790                         mgmt_device_connected(hdev, &conn->dst, conn->type,
2791                                               conn->dst_type, 0, NULL, 0,
2792                                               conn->dev_class);
2793                 hci_dev_unlock(hdev);
2794
2795                 /* Send to upper protocol */
2796                 l2cap_recv_acldata(conn, skb, flags);
2797                 return;
2798         } else {
2799                 BT_ERR("%s ACL packet for unknown connection handle %d",
2800                         hdev->name, handle);
2801         }
2802
2803         kfree_skb(skb);
2804 }
2805
2806 /* SCO data packet */
2807 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2808 {
2809         struct hci_sco_hdr *hdr = (void *) skb->data;
2810         struct hci_conn *conn;
2811         __u16 handle;
2812
2813         skb_pull(skb, HCI_SCO_HDR_SIZE);
2814
2815         handle = __le16_to_cpu(hdr->handle);
2816
2817         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2818
2819         hdev->stat.sco_rx++;
2820
2821         hci_dev_lock(hdev);
2822         conn = hci_conn_hash_lookup_handle(hdev, handle);
2823         hci_dev_unlock(hdev);
2824
2825         if (conn) {
2826                 /* Send to upper protocol */
2827                 sco_recv_scodata(conn, skb);
2828                 return;
2829         } else {
2830                 BT_ERR("%s SCO packet for unknown connection handle %d",
2831                         hdev->name, handle);
2832         }
2833
2834         kfree_skb(skb);
2835 }
2836
2837 static void hci_rx_work(struct work_struct *work)
2838 {
2839         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2840         struct sk_buff *skb;
2841
2842         BT_DBG("%s", hdev->name);
2843
2844         while ((skb = skb_dequeue(&hdev->rx_q))) {
2845                 /* Send copy to monitor */
2846                 hci_send_to_monitor(hdev, skb);
2847
2848                 if (atomic_read(&hdev->promisc)) {
2849                         /* Send copy to the sockets */
2850                         hci_send_to_sock(hdev, skb);
2851                 }
2852
2853                 if (test_bit(HCI_RAW, &hdev->flags)) {
2854                         kfree_skb(skb);
2855                         continue;
2856                 }
2857
2858                 if (test_bit(HCI_INIT, &hdev->flags)) {
2859                         /* Don't process data packets in this states. */
2860                         switch (bt_cb(skb)->pkt_type) {
2861                         case HCI_ACLDATA_PKT:
2862                         case HCI_SCODATA_PKT:
2863                                 kfree_skb(skb);
2864                                 continue;
2865                         }
2866                 }
2867
2868                 /* Process frame */
2869                 switch (bt_cb(skb)->pkt_type) {
2870                 case HCI_EVENT_PKT:
2871                         BT_DBG("%s Event packet", hdev->name);
2872                         hci_event_packet(hdev, skb);
2873                         break;
2874
2875                 case HCI_ACLDATA_PKT:
2876                         BT_DBG("%s ACL data packet", hdev->name);
2877                         hci_acldata_packet(hdev, skb);
2878                         break;
2879
2880                 case HCI_SCODATA_PKT:
2881                         BT_DBG("%s SCO data packet", hdev->name);
2882                         hci_scodata_packet(hdev, skb);
2883                         break;
2884
2885                 default:
2886                         kfree_skb(skb);
2887                         break;
2888                 }
2889         }
2890 }
2891
2892 static void hci_cmd_work(struct work_struct *work)
2893 {
2894         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2895         struct sk_buff *skb;
2896
2897         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2898
2899         /* Send queued commands */
2900         if (atomic_read(&hdev->cmd_cnt)) {
2901                 skb = skb_dequeue(&hdev->cmd_q);
2902                 if (!skb)
2903                         return;
2904
2905                 kfree_skb(hdev->sent_cmd);
2906
2907                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2908                 if (hdev->sent_cmd) {
2909                         atomic_dec(&hdev->cmd_cnt);
2910                         hci_send_frame(skb);
2911                         if (test_bit(HCI_RESET, &hdev->flags))
2912                                 del_timer(&hdev->cmd_timer);
2913                         else
2914                                 mod_timer(&hdev->cmd_timer,
2915                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2916                 } else {
2917                         skb_queue_head(&hdev->cmd_q, skb);
2918                         queue_work(hdev->workqueue, &hdev->cmd_work);
2919                 }
2920         }
2921 }
2922
2923 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2924 {
2925         /* General inquiry access code (GIAC) */
2926         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2927         struct hci_cp_inquiry cp;
2928
2929         BT_DBG("%s", hdev->name);
2930
2931         if (test_bit(HCI_INQUIRY, &hdev->flags))
2932                 return -EINPROGRESS;
2933
2934         inquiry_cache_flush(hdev);
2935
2936         memset(&cp, 0, sizeof(cp));
2937         memcpy(&cp.lap, lap, sizeof(cp.lap));
2938         cp.length  = length;
2939
2940         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2941 }
2942
2943 int hci_cancel_inquiry(struct hci_dev *hdev)
2944 {
2945         BT_DBG("%s", hdev->name);
2946
2947         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2948                 return -EPERM;
2949
2950         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2951 }