Bluetooth: Sort to-be-resolved devices by RSSI during discovery
[linux-flexiantxendom0-3.2.10.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
52
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
55
56 #define AUTO_OFF_TIMEOUT 2000
57
58 int enable_hs;
59
60 static void hci_rx_work(struct work_struct *work);
61 static void hci_cmd_work(struct work_struct *work);
62 static void hci_tx_work(struct work_struct *work);
63
64 /* HCI device list */
65 LIST_HEAD(hci_dev_list);
66 DEFINE_RWLOCK(hci_dev_list_lock);
67
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list);
70 DEFINE_RWLOCK(hci_cb_list_lock);
71
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74
75 /* ---- HCI notifications ---- */
76
77 int hci_register_notifier(struct notifier_block *nb)
78 {
79         return atomic_notifier_chain_register(&hci_notifier, nb);
80 }
81
82 int hci_unregister_notifier(struct notifier_block *nb)
83 {
84         return atomic_notifier_chain_unregister(&hci_notifier, nb);
85 }
86
87 static void hci_notify(struct hci_dev *hdev, int event)
88 {
89         atomic_notifier_call_chain(&hci_notifier, event, hdev);
90 }
91
92 /* ---- HCI requests ---- */
93
94 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
95 {
96         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
98         /* If this is the init phase check if the completed command matches
99          * the last init command, and if not just return.
100          */
101         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
102                 return;
103
104         if (hdev->req_status == HCI_REQ_PEND) {
105                 hdev->req_result = result;
106                 hdev->req_status = HCI_REQ_DONE;
107                 wake_up_interruptible(&hdev->req_wait_q);
108         }
109 }
110
111 static void hci_req_cancel(struct hci_dev *hdev, int err)
112 {
113         BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115         if (hdev->req_status == HCI_REQ_PEND) {
116                 hdev->req_result = err;
117                 hdev->req_status = HCI_REQ_CANCELED;
118                 wake_up_interruptible(&hdev->req_wait_q);
119         }
120 }
121
122 /* Execute request and wait for completion. */
123 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
124                                         unsigned long opt, __u32 timeout)
125 {
126         DECLARE_WAITQUEUE(wait, current);
127         int err = 0;
128
129         BT_DBG("%s start", hdev->name);
130
131         hdev->req_status = HCI_REQ_PEND;
132
133         add_wait_queue(&hdev->req_wait_q, &wait);
134         set_current_state(TASK_INTERRUPTIBLE);
135
136         req(hdev, opt);
137         schedule_timeout(timeout);
138
139         remove_wait_queue(&hdev->req_wait_q, &wait);
140
141         if (signal_pending(current))
142                 return -EINTR;
143
144         switch (hdev->req_status) {
145         case HCI_REQ_DONE:
146                 err = -bt_to_errno(hdev->req_result);
147                 break;
148
149         case HCI_REQ_CANCELED:
150                 err = -hdev->req_result;
151                 break;
152
153         default:
154                 err = -ETIMEDOUT;
155                 break;
156         }
157
158         hdev->req_status = hdev->req_result = 0;
159
160         BT_DBG("%s end: err %d", hdev->name, err);
161
162         return err;
163 }
164
165 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
166                                         unsigned long opt, __u32 timeout)
167 {
168         int ret;
169
170         if (!test_bit(HCI_UP, &hdev->flags))
171                 return -ENETDOWN;
172
173         /* Serialize all requests */
174         hci_req_lock(hdev);
175         ret = __hci_request(hdev, req, opt, timeout);
176         hci_req_unlock(hdev);
177
178         return ret;
179 }
180
181 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182 {
183         BT_DBG("%s %ld", hdev->name, opt);
184
185         /* Reset device */
186         set_bit(HCI_RESET, &hdev->flags);
187         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
188 }
189
190 static void bredr_init(struct hci_dev *hdev)
191 {
192         struct hci_cp_delete_stored_link_key cp;
193         __le16 param;
194         __u8 flt_type;
195
196         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
198         /* Mandatory initialization */
199
200         /* Reset */
201         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
202                 set_bit(HCI_RESET, &hdev->flags);
203                 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
204         }
205
206         /* Read Local Supported Features */
207         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
208
209         /* Read Local Version */
210         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
211
212         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
214
215         /* Read BD Address */
216         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218         /* Read Class of Device */
219         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221         /* Read Local Name */
222         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
223
224         /* Read Voice Setting */
225         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
226
227         /* Optional initialization */
228
229         /* Clear Event Filters */
230         flt_type = HCI_FLT_CLEAR_ALL;
231         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
232
233         /* Connection accept timeout ~20 secs */
234         param = cpu_to_le16(0x7d00);
235         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
236
237         bacpy(&cp.bdaddr, BDADDR_ANY);
238         cp.delete_all = 1;
239         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
240 }
241
242 static void amp_init(struct hci_dev *hdev)
243 {
244         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
246         /* Reset */
247         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249         /* Read Local Version */
250         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251 }
252
253 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254 {
255         struct sk_buff *skb;
256
257         BT_DBG("%s %ld", hdev->name, opt);
258
259         /* Driver initialization */
260
261         /* Special commands */
262         while ((skb = skb_dequeue(&hdev->driver_init))) {
263                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264                 skb->dev = (void *) hdev;
265
266                 skb_queue_tail(&hdev->cmd_q, skb);
267                 queue_work(hdev->workqueue, &hdev->cmd_work);
268         }
269         skb_queue_purge(&hdev->driver_init);
270
271         switch (hdev->dev_type) {
272         case HCI_BREDR:
273                 bredr_init(hdev);
274                 break;
275
276         case HCI_AMP:
277                 amp_init(hdev);
278                 break;
279
280         default:
281                 BT_ERR("Unknown device type %d", hdev->dev_type);
282                 break;
283         }
284
285 }
286
287 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288 {
289         BT_DBG("%s", hdev->name);
290
291         /* Read LE buffer size */
292         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293 }
294
295 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296 {
297         __u8 scan = opt;
298
299         BT_DBG("%s %x", hdev->name, scan);
300
301         /* Inquiry and Page scans */
302         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
303 }
304
305 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306 {
307         __u8 auth = opt;
308
309         BT_DBG("%s %x", hdev->name, auth);
310
311         /* Authentication */
312         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
313 }
314
315 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316 {
317         __u8 encrypt = opt;
318
319         BT_DBG("%s %x", hdev->name, encrypt);
320
321         /* Encryption */
322         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
323 }
324
325 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326 {
327         __le16 policy = cpu_to_le16(opt);
328
329         BT_DBG("%s %x", hdev->name, policy);
330
331         /* Default link policy */
332         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333 }
334
335 /* Get HCI device by index.
336  * Device is held on return. */
337 struct hci_dev *hci_dev_get(int index)
338 {
339         struct hci_dev *hdev = NULL, *d;
340
341         BT_DBG("%d", index);
342
343         if (index < 0)
344                 return NULL;
345
346         read_lock(&hci_dev_list_lock);
347         list_for_each_entry(d, &hci_dev_list, list) {
348                 if (d->id == index) {
349                         hdev = hci_dev_hold(d);
350                         break;
351                 }
352         }
353         read_unlock(&hci_dev_list_lock);
354         return hdev;
355 }
356
357 /* ---- Inquiry support ---- */
358
359 bool hci_discovery_active(struct hci_dev *hdev)
360 {
361         struct discovery_state *discov = &hdev->discovery;
362
363         if (discov->state == DISCOVERY_INQUIRY ||
364                                         discov->state == DISCOVERY_RESOLVING)
365                 return true;
366
367         return false;
368 }
369
370 void hci_discovery_set_state(struct hci_dev *hdev, int state)
371 {
372         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
373
374         if (hdev->discovery.state == state)
375                 return;
376
377         switch (state) {
378         case DISCOVERY_STOPPED:
379                 mgmt_discovering(hdev, 0);
380                 break;
381         case DISCOVERY_STARTING:
382                 break;
383         case DISCOVERY_INQUIRY:
384                 mgmt_discovering(hdev, 1);
385                 break;
386         case DISCOVERY_RESOLVING:
387                 break;
388         case DISCOVERY_STOPPING:
389                 break;
390         }
391
392         hdev->discovery.state = state;
393 }
394
395 static void inquiry_cache_flush(struct hci_dev *hdev)
396 {
397         struct discovery_state *cache = &hdev->discovery;
398         struct inquiry_entry *p, *n;
399
400         list_for_each_entry_safe(p, n, &cache->all, all) {
401                 list_del(&p->all);
402                 kfree(p);
403         }
404
405         INIT_LIST_HEAD(&cache->unknown);
406         INIT_LIST_HEAD(&cache->resolve);
407         cache->state = DISCOVERY_STOPPED;
408 }
409
410 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
411 {
412         struct discovery_state *cache = &hdev->discovery;
413         struct inquiry_entry *e;
414
415         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
416
417         list_for_each_entry(e, &cache->all, all) {
418                 if (!bacmp(&e->data.bdaddr, bdaddr))
419                         return e;
420         }
421
422         return NULL;
423 }
424
425 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
426                                                         bdaddr_t *bdaddr)
427 {
428         struct discovery_state *cache = &hdev->discovery;
429         struct inquiry_entry *e;
430
431         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
432
433         list_for_each_entry(e, &cache->unknown, list) {
434                 if (!bacmp(&e->data.bdaddr, bdaddr))
435                         return e;
436         }
437
438         return NULL;
439 }
440
441 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
442                                                         bdaddr_t *bdaddr,
443                                                         int state)
444 {
445         struct discovery_state *cache = &hdev->discovery;
446         struct inquiry_entry *e;
447
448         BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
449
450         list_for_each_entry(e, &cache->resolve, list) {
451                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
452                         return e;
453                 if (!bacmp(&e->data.bdaddr, bdaddr))
454                         return e;
455         }
456
457         return NULL;
458 }
459
460 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
461                                                 struct inquiry_entry *ie)
462 {
463         struct discovery_state *cache = &hdev->discovery;
464         struct list_head *pos = &cache->resolve;
465         struct inquiry_entry *p;
466
467         list_del(&ie->list);
468
469         list_for_each_entry(p, &cache->resolve, list) {
470                 if (p->name_state != NAME_PENDING &&
471                                 abs(p->data.rssi) >= abs(ie->data.rssi))
472                         break;
473                 pos = &p->list;
474         }
475
476         list_add(&ie->list, pos);
477 }
478
479 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
480                                                         bool name_known)
481 {
482         struct discovery_state *cache = &hdev->discovery;
483         struct inquiry_entry *ie;
484
485         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
486
487         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
488         if (ie) {
489                 if (ie->name_state == NAME_NEEDED &&
490                                                 data->rssi != ie->data.rssi) {
491                         ie->data.rssi = data->rssi;
492                         hci_inquiry_cache_update_resolve(hdev, ie);
493                 }
494
495                 goto update;
496         }
497
498         /* Entry not in the cache. Add new one. */
499         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
500         if (!ie)
501                 return false;
502
503         list_add(&ie->all, &cache->all);
504
505         if (name_known) {
506                 ie->name_state = NAME_KNOWN;
507         } else {
508                 ie->name_state = NAME_NOT_KNOWN;
509                 list_add(&ie->list, &cache->unknown);
510         }
511
512 update:
513         if (name_known && ie->name_state != NAME_KNOWN &&
514                                         ie->name_state != NAME_PENDING) {
515                 ie->name_state = NAME_KNOWN;
516                 list_del(&ie->list);
517         }
518
519         memcpy(&ie->data, data, sizeof(*data));
520         ie->timestamp = jiffies;
521         cache->timestamp = jiffies;
522
523         if (ie->name_state == NAME_NOT_KNOWN)
524                 return false;
525
526         return true;
527 }
528
529 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
530 {
531         struct discovery_state *cache = &hdev->discovery;
532         struct inquiry_info *info = (struct inquiry_info *) buf;
533         struct inquiry_entry *e;
534         int copied = 0;
535
536         list_for_each_entry(e, &cache->all, all) {
537                 struct inquiry_data *data = &e->data;
538
539                 if (copied >= num)
540                         break;
541
542                 bacpy(&info->bdaddr, &data->bdaddr);
543                 info->pscan_rep_mode    = data->pscan_rep_mode;
544                 info->pscan_period_mode = data->pscan_period_mode;
545                 info->pscan_mode        = data->pscan_mode;
546                 memcpy(info->dev_class, data->dev_class, 3);
547                 info->clock_offset      = data->clock_offset;
548
549                 info++;
550                 copied++;
551         }
552
553         BT_DBG("cache %p, copied %d", cache, copied);
554         return copied;
555 }
556
557 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
558 {
559         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
560         struct hci_cp_inquiry cp;
561
562         BT_DBG("%s", hdev->name);
563
564         if (test_bit(HCI_INQUIRY, &hdev->flags))
565                 return;
566
567         /* Start Inquiry */
568         memcpy(&cp.lap, &ir->lap, 3);
569         cp.length  = ir->length;
570         cp.num_rsp = ir->num_rsp;
571         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
572 }
573
574 int hci_inquiry(void __user *arg)
575 {
576         __u8 __user *ptr = arg;
577         struct hci_inquiry_req ir;
578         struct hci_dev *hdev;
579         int err = 0, do_inquiry = 0, max_rsp;
580         long timeo;
581         __u8 *buf;
582
583         if (copy_from_user(&ir, ptr, sizeof(ir)))
584                 return -EFAULT;
585
586         hdev = hci_dev_get(ir.dev_id);
587         if (!hdev)
588                 return -ENODEV;
589
590         hci_dev_lock(hdev);
591         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
592                                 inquiry_cache_empty(hdev) ||
593                                 ir.flags & IREQ_CACHE_FLUSH) {
594                 inquiry_cache_flush(hdev);
595                 do_inquiry = 1;
596         }
597         hci_dev_unlock(hdev);
598
599         timeo = ir.length * msecs_to_jiffies(2000);
600
601         if (do_inquiry) {
602                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
603                 if (err < 0)
604                         goto done;
605         }
606
607         /* for unlimited number of responses we will use buffer with 255 entries */
608         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
609
610         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
611          * copy it to the user space.
612          */
613         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
614         if (!buf) {
615                 err = -ENOMEM;
616                 goto done;
617         }
618
619         hci_dev_lock(hdev);
620         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
621         hci_dev_unlock(hdev);
622
623         BT_DBG("num_rsp %d", ir.num_rsp);
624
625         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
626                 ptr += sizeof(ir);
627                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
628                                         ir.num_rsp))
629                         err = -EFAULT;
630         } else
631                 err = -EFAULT;
632
633         kfree(buf);
634
635 done:
636         hci_dev_put(hdev);
637         return err;
638 }
639
640 /* ---- HCI ioctl helpers ---- */
641
642 int hci_dev_open(__u16 dev)
643 {
644         struct hci_dev *hdev;
645         int ret = 0;
646
647         hdev = hci_dev_get(dev);
648         if (!hdev)
649                 return -ENODEV;
650
651         BT_DBG("%s %p", hdev->name, hdev);
652
653         hci_req_lock(hdev);
654
655         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
656                 ret = -ERFKILL;
657                 goto done;
658         }
659
660         if (test_bit(HCI_UP, &hdev->flags)) {
661                 ret = -EALREADY;
662                 goto done;
663         }
664
665         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
666                 set_bit(HCI_RAW, &hdev->flags);
667
668         /* Treat all non BR/EDR controllers as raw devices if
669            enable_hs is not set */
670         if (hdev->dev_type != HCI_BREDR && !enable_hs)
671                 set_bit(HCI_RAW, &hdev->flags);
672
673         if (hdev->open(hdev)) {
674                 ret = -EIO;
675                 goto done;
676         }
677
678         if (!test_bit(HCI_RAW, &hdev->flags)) {
679                 atomic_set(&hdev->cmd_cnt, 1);
680                 set_bit(HCI_INIT, &hdev->flags);
681                 hdev->init_last_cmd = 0;
682
683                 ret = __hci_request(hdev, hci_init_req, 0,
684                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
685
686                 if (lmp_host_le_capable(hdev))
687                         ret = __hci_request(hdev, hci_le_init_req, 0,
688                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
689
690                 clear_bit(HCI_INIT, &hdev->flags);
691         }
692
693         if (!ret) {
694                 hci_dev_hold(hdev);
695                 set_bit(HCI_UP, &hdev->flags);
696                 hci_notify(hdev, HCI_DEV_UP);
697                 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
698                         hci_dev_lock(hdev);
699                         mgmt_powered(hdev, 1);
700                         hci_dev_unlock(hdev);
701                 }
702         } else {
703                 /* Init failed, cleanup */
704                 flush_work(&hdev->tx_work);
705                 flush_work(&hdev->cmd_work);
706                 flush_work(&hdev->rx_work);
707
708                 skb_queue_purge(&hdev->cmd_q);
709                 skb_queue_purge(&hdev->rx_q);
710
711                 if (hdev->flush)
712                         hdev->flush(hdev);
713
714                 if (hdev->sent_cmd) {
715                         kfree_skb(hdev->sent_cmd);
716                         hdev->sent_cmd = NULL;
717                 }
718
719                 hdev->close(hdev);
720                 hdev->flags = 0;
721         }
722
723 done:
724         hci_req_unlock(hdev);
725         hci_dev_put(hdev);
726         return ret;
727 }
728
729 static int hci_dev_do_close(struct hci_dev *hdev)
730 {
731         BT_DBG("%s %p", hdev->name, hdev);
732
733         hci_req_cancel(hdev, ENODEV);
734         hci_req_lock(hdev);
735
736         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
737                 del_timer_sync(&hdev->cmd_timer);
738                 hci_req_unlock(hdev);
739                 return 0;
740         }
741
742         /* Flush RX and TX works */
743         flush_work(&hdev->tx_work);
744         flush_work(&hdev->rx_work);
745
746         if (hdev->discov_timeout > 0) {
747                 cancel_delayed_work(&hdev->discov_off);
748                 hdev->discov_timeout = 0;
749         }
750
751         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
752                 cancel_delayed_work(&hdev->power_off);
753
754         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
755                 cancel_delayed_work(&hdev->service_cache);
756
757         hci_dev_lock(hdev);
758         inquiry_cache_flush(hdev);
759         hci_conn_hash_flush(hdev);
760         hci_dev_unlock(hdev);
761
762         hci_notify(hdev, HCI_DEV_DOWN);
763
764         if (hdev->flush)
765                 hdev->flush(hdev);
766
767         /* Reset device */
768         skb_queue_purge(&hdev->cmd_q);
769         atomic_set(&hdev->cmd_cnt, 1);
770         if (!test_bit(HCI_RAW, &hdev->flags)) {
771                 set_bit(HCI_INIT, &hdev->flags);
772                 __hci_request(hdev, hci_reset_req, 0,
773                                         msecs_to_jiffies(250));
774                 clear_bit(HCI_INIT, &hdev->flags);
775         }
776
777         /* flush cmd  work */
778         flush_work(&hdev->cmd_work);
779
780         /* Drop queues */
781         skb_queue_purge(&hdev->rx_q);
782         skb_queue_purge(&hdev->cmd_q);
783         skb_queue_purge(&hdev->raw_q);
784
785         /* Drop last sent command */
786         if (hdev->sent_cmd) {
787                 del_timer_sync(&hdev->cmd_timer);
788                 kfree_skb(hdev->sent_cmd);
789                 hdev->sent_cmd = NULL;
790         }
791
792         /* After this point our queues are empty
793          * and no tasks are scheduled. */
794         hdev->close(hdev);
795
796         hci_dev_lock(hdev);
797         mgmt_powered(hdev, 0);
798         hci_dev_unlock(hdev);
799
800         /* Clear flags */
801         hdev->flags = 0;
802
803         hci_req_unlock(hdev);
804
805         hci_dev_put(hdev);
806         return 0;
807 }
808
809 int hci_dev_close(__u16 dev)
810 {
811         struct hci_dev *hdev;
812         int err;
813
814         hdev = hci_dev_get(dev);
815         if (!hdev)
816                 return -ENODEV;
817         err = hci_dev_do_close(hdev);
818         hci_dev_put(hdev);
819         return err;
820 }
821
822 int hci_dev_reset(__u16 dev)
823 {
824         struct hci_dev *hdev;
825         int ret = 0;
826
827         hdev = hci_dev_get(dev);
828         if (!hdev)
829                 return -ENODEV;
830
831         hci_req_lock(hdev);
832
833         if (!test_bit(HCI_UP, &hdev->flags))
834                 goto done;
835
836         /* Drop queues */
837         skb_queue_purge(&hdev->rx_q);
838         skb_queue_purge(&hdev->cmd_q);
839
840         hci_dev_lock(hdev);
841         inquiry_cache_flush(hdev);
842         hci_conn_hash_flush(hdev);
843         hci_dev_unlock(hdev);
844
845         if (hdev->flush)
846                 hdev->flush(hdev);
847
848         atomic_set(&hdev->cmd_cnt, 1);
849         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
850
851         if (!test_bit(HCI_RAW, &hdev->flags))
852                 ret = __hci_request(hdev, hci_reset_req, 0,
853                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
854
855 done:
856         hci_req_unlock(hdev);
857         hci_dev_put(hdev);
858         return ret;
859 }
860
861 int hci_dev_reset_stat(__u16 dev)
862 {
863         struct hci_dev *hdev;
864         int ret = 0;
865
866         hdev = hci_dev_get(dev);
867         if (!hdev)
868                 return -ENODEV;
869
870         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
871
872         hci_dev_put(hdev);
873
874         return ret;
875 }
876
877 int hci_dev_cmd(unsigned int cmd, void __user *arg)
878 {
879         struct hci_dev *hdev;
880         struct hci_dev_req dr;
881         int err = 0;
882
883         if (copy_from_user(&dr, arg, sizeof(dr)))
884                 return -EFAULT;
885
886         hdev = hci_dev_get(dr.dev_id);
887         if (!hdev)
888                 return -ENODEV;
889
890         switch (cmd) {
891         case HCISETAUTH:
892                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
893                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
894                 break;
895
896         case HCISETENCRYPT:
897                 if (!lmp_encrypt_capable(hdev)) {
898                         err = -EOPNOTSUPP;
899                         break;
900                 }
901
902                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
903                         /* Auth must be enabled first */
904                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
905                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
906                         if (err)
907                                 break;
908                 }
909
910                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
911                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
912                 break;
913
914         case HCISETSCAN:
915                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
916                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
917                 break;
918
919         case HCISETLINKPOL:
920                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
921                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
922                 break;
923
924         case HCISETLINKMODE:
925                 hdev->link_mode = ((__u16) dr.dev_opt) &
926                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
927                 break;
928
929         case HCISETPTYPE:
930                 hdev->pkt_type = (__u16) dr.dev_opt;
931                 break;
932
933         case HCISETACLMTU:
934                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
935                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
936                 break;
937
938         case HCISETSCOMTU:
939                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
940                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
941                 break;
942
943         default:
944                 err = -EINVAL;
945                 break;
946         }
947
948         hci_dev_put(hdev);
949         return err;
950 }
951
952 int hci_get_dev_list(void __user *arg)
953 {
954         struct hci_dev *hdev;
955         struct hci_dev_list_req *dl;
956         struct hci_dev_req *dr;
957         int n = 0, size, err;
958         __u16 dev_num;
959
960         if (get_user(dev_num, (__u16 __user *) arg))
961                 return -EFAULT;
962
963         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
964                 return -EINVAL;
965
966         size = sizeof(*dl) + dev_num * sizeof(*dr);
967
968         dl = kzalloc(size, GFP_KERNEL);
969         if (!dl)
970                 return -ENOMEM;
971
972         dr = dl->dev_req;
973
974         read_lock(&hci_dev_list_lock);
975         list_for_each_entry(hdev, &hci_dev_list, list) {
976                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
977                         cancel_delayed_work(&hdev->power_off);
978
979                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
980                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
981
982                 (dr + n)->dev_id  = hdev->id;
983                 (dr + n)->dev_opt = hdev->flags;
984
985                 if (++n >= dev_num)
986                         break;
987         }
988         read_unlock(&hci_dev_list_lock);
989
990         dl->dev_num = n;
991         size = sizeof(*dl) + n * sizeof(*dr);
992
993         err = copy_to_user(arg, dl, size);
994         kfree(dl);
995
996         return err ? -EFAULT : 0;
997 }
998
999 int hci_get_dev_info(void __user *arg)
1000 {
1001         struct hci_dev *hdev;
1002         struct hci_dev_info di;
1003         int err = 0;
1004
1005         if (copy_from_user(&di, arg, sizeof(di)))
1006                 return -EFAULT;
1007
1008         hdev = hci_dev_get(di.dev_id);
1009         if (!hdev)
1010                 return -ENODEV;
1011
1012         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1013                 cancel_delayed_work_sync(&hdev->power_off);
1014
1015         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1016                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1017
1018         strcpy(di.name, hdev->name);
1019         di.bdaddr   = hdev->bdaddr;
1020         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1021         di.flags    = hdev->flags;
1022         di.pkt_type = hdev->pkt_type;
1023         di.acl_mtu  = hdev->acl_mtu;
1024         di.acl_pkts = hdev->acl_pkts;
1025         di.sco_mtu  = hdev->sco_mtu;
1026         di.sco_pkts = hdev->sco_pkts;
1027         di.link_policy = hdev->link_policy;
1028         di.link_mode   = hdev->link_mode;
1029
1030         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1031         memcpy(&di.features, &hdev->features, sizeof(di.features));
1032
1033         if (copy_to_user(arg, &di, sizeof(di)))
1034                 err = -EFAULT;
1035
1036         hci_dev_put(hdev);
1037
1038         return err;
1039 }
1040
1041 /* ---- Interface to HCI drivers ---- */
1042
1043 static int hci_rfkill_set_block(void *data, bool blocked)
1044 {
1045         struct hci_dev *hdev = data;
1046
1047         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1048
1049         if (!blocked)
1050                 return 0;
1051
1052         hci_dev_do_close(hdev);
1053
1054         return 0;
1055 }
1056
1057 static const struct rfkill_ops hci_rfkill_ops = {
1058         .set_block = hci_rfkill_set_block,
1059 };
1060
1061 /* Alloc HCI device */
1062 struct hci_dev *hci_alloc_dev(void)
1063 {
1064         struct hci_dev *hdev;
1065
1066         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1067         if (!hdev)
1068                 return NULL;
1069
1070         hci_init_sysfs(hdev);
1071         skb_queue_head_init(&hdev->driver_init);
1072
1073         return hdev;
1074 }
1075 EXPORT_SYMBOL(hci_alloc_dev);
1076
1077 /* Free HCI device */
1078 void hci_free_dev(struct hci_dev *hdev)
1079 {
1080         skb_queue_purge(&hdev->driver_init);
1081
1082         /* will free via device release */
1083         put_device(&hdev->dev);
1084 }
1085 EXPORT_SYMBOL(hci_free_dev);
1086
1087 static void hci_power_on(struct work_struct *work)
1088 {
1089         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1090
1091         BT_DBG("%s", hdev->name);
1092
1093         if (hci_dev_open(hdev->id) < 0)
1094                 return;
1095
1096         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1097                 schedule_delayed_work(&hdev->power_off,
1098                                         msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1099
1100         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1101                 mgmt_index_added(hdev);
1102 }
1103
1104 static void hci_power_off(struct work_struct *work)
1105 {
1106         struct hci_dev *hdev = container_of(work, struct hci_dev,
1107                                                         power_off.work);
1108
1109         BT_DBG("%s", hdev->name);
1110
1111         clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1112
1113         hci_dev_close(hdev->id);
1114 }
1115
1116 static void hci_discov_off(struct work_struct *work)
1117 {
1118         struct hci_dev *hdev;
1119         u8 scan = SCAN_PAGE;
1120
1121         hdev = container_of(work, struct hci_dev, discov_off.work);
1122
1123         BT_DBG("%s", hdev->name);
1124
1125         hci_dev_lock(hdev);
1126
1127         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1128
1129         hdev->discov_timeout = 0;
1130
1131         hci_dev_unlock(hdev);
1132 }
1133
1134 int hci_uuids_clear(struct hci_dev *hdev)
1135 {
1136         struct list_head *p, *n;
1137
1138         list_for_each_safe(p, n, &hdev->uuids) {
1139                 struct bt_uuid *uuid;
1140
1141                 uuid = list_entry(p, struct bt_uuid, list);
1142
1143                 list_del(p);
1144                 kfree(uuid);
1145         }
1146
1147         return 0;
1148 }
1149
1150 int hci_link_keys_clear(struct hci_dev *hdev)
1151 {
1152         struct list_head *p, *n;
1153
1154         list_for_each_safe(p, n, &hdev->link_keys) {
1155                 struct link_key *key;
1156
1157                 key = list_entry(p, struct link_key, list);
1158
1159                 list_del(p);
1160                 kfree(key);
1161         }
1162
1163         return 0;
1164 }
1165
1166 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1167 {
1168         struct link_key *k;
1169
1170         list_for_each_entry(k, &hdev->link_keys, list)
1171                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1172                         return k;
1173
1174         return NULL;
1175 }
1176
1177 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1178                                                 u8 key_type, u8 old_key_type)
1179 {
1180         /* Legacy key */
1181         if (key_type < 0x03)
1182                 return 1;
1183
1184         /* Debug keys are insecure so don't store them persistently */
1185         if (key_type == HCI_LK_DEBUG_COMBINATION)
1186                 return 0;
1187
1188         /* Changed combination key and there's no previous one */
1189         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1190                 return 0;
1191
1192         /* Security mode 3 case */
1193         if (!conn)
1194                 return 1;
1195
1196         /* Neither local nor remote side had no-bonding as requirement */
1197         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1198                 return 1;
1199
1200         /* Local side had dedicated bonding as requirement */
1201         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1202                 return 1;
1203
1204         /* Remote side had dedicated bonding as requirement */
1205         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1206                 return 1;
1207
1208         /* If none of the above criteria match, then don't store the key
1209          * persistently */
1210         return 0;
1211 }
1212
1213 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1214 {
1215         struct link_key *k;
1216
1217         list_for_each_entry(k, &hdev->link_keys, list) {
1218                 struct key_master_id *id;
1219
1220                 if (k->type != HCI_LK_SMP_LTK)
1221                         continue;
1222
1223                 if (k->dlen != sizeof(*id))
1224                         continue;
1225
1226                 id = (void *) &k->data;
1227                 if (id->ediv == ediv &&
1228                                 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1229                         return k;
1230         }
1231
1232         return NULL;
1233 }
1234 EXPORT_SYMBOL(hci_find_ltk);
1235
1236 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1237                                         bdaddr_t *bdaddr, u8 type)
1238 {
1239         struct link_key *k;
1240
1241         list_for_each_entry(k, &hdev->link_keys, list)
1242                 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1243                         return k;
1244
1245         return NULL;
1246 }
1247 EXPORT_SYMBOL(hci_find_link_key_type);
1248
1249 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1250                                 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1251 {
1252         struct link_key *key, *old_key;
1253         u8 old_key_type, persistent;
1254
1255         old_key = hci_find_link_key(hdev, bdaddr);
1256         if (old_key) {
1257                 old_key_type = old_key->type;
1258                 key = old_key;
1259         } else {
1260                 old_key_type = conn ? conn->key_type : 0xff;
1261                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1262                 if (!key)
1263                         return -ENOMEM;
1264                 list_add(&key->list, &hdev->link_keys);
1265         }
1266
1267         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1268
1269         /* Some buggy controller combinations generate a changed
1270          * combination key for legacy pairing even when there's no
1271          * previous key */
1272         if (type == HCI_LK_CHANGED_COMBINATION &&
1273                                         (!conn || conn->remote_auth == 0xff) &&
1274                                         old_key_type == 0xff) {
1275                 type = HCI_LK_COMBINATION;
1276                 if (conn)
1277                         conn->key_type = type;
1278         }
1279
1280         bacpy(&key->bdaddr, bdaddr);
1281         memcpy(key->val, val, 16);
1282         key->pin_len = pin_len;
1283
1284         if (type == HCI_LK_CHANGED_COMBINATION)
1285                 key->type = old_key_type;
1286         else
1287                 key->type = type;
1288
1289         if (!new_key)
1290                 return 0;
1291
1292         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1293
1294         mgmt_new_link_key(hdev, key, persistent);
1295
1296         if (!persistent) {
1297                 list_del(&key->list);
1298                 kfree(key);
1299         }
1300
1301         return 0;
1302 }
1303
1304 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1305                         u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1306 {
1307         struct link_key *key, *old_key;
1308         struct key_master_id *id;
1309         u8 old_key_type;
1310
1311         BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1312
1313         old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1314         if (old_key) {
1315                 key = old_key;
1316                 old_key_type = old_key->type;
1317         } else {
1318                 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1319                 if (!key)
1320                         return -ENOMEM;
1321                 list_add(&key->list, &hdev->link_keys);
1322                 old_key_type = 0xff;
1323         }
1324
1325         key->dlen = sizeof(*id);
1326
1327         bacpy(&key->bdaddr, bdaddr);
1328         memcpy(key->val, ltk, sizeof(key->val));
1329         key->type = HCI_LK_SMP_LTK;
1330         key->pin_len = key_size;
1331
1332         id = (void *) &key->data;
1333         id->ediv = ediv;
1334         memcpy(id->rand, rand, sizeof(id->rand));
1335
1336         if (new_key)
1337                 mgmt_new_link_key(hdev, key, old_key_type);
1338
1339         return 0;
1340 }
1341
1342 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1343 {
1344         struct link_key *key;
1345
1346         key = hci_find_link_key(hdev, bdaddr);
1347         if (!key)
1348                 return -ENOENT;
1349
1350         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1351
1352         list_del(&key->list);
1353         kfree(key);
1354
1355         return 0;
1356 }
1357
1358 /* HCI command timer function */
1359 static void hci_cmd_timer(unsigned long arg)
1360 {
1361         struct hci_dev *hdev = (void *) arg;
1362
1363         BT_ERR("%s command tx timeout", hdev->name);
1364         atomic_set(&hdev->cmd_cnt, 1);
1365         queue_work(hdev->workqueue, &hdev->cmd_work);
1366 }
1367
1368 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1369                                                         bdaddr_t *bdaddr)
1370 {
1371         struct oob_data *data;
1372
1373         list_for_each_entry(data, &hdev->remote_oob_data, list)
1374                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1375                         return data;
1376
1377         return NULL;
1378 }
1379
1380 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1381 {
1382         struct oob_data *data;
1383
1384         data = hci_find_remote_oob_data(hdev, bdaddr);
1385         if (!data)
1386                 return -ENOENT;
1387
1388         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1389
1390         list_del(&data->list);
1391         kfree(data);
1392
1393         return 0;
1394 }
1395
1396 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1397 {
1398         struct oob_data *data, *n;
1399
1400         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1401                 list_del(&data->list);
1402                 kfree(data);
1403         }
1404
1405         return 0;
1406 }
1407
1408 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1409                                                                 u8 *randomizer)
1410 {
1411         struct oob_data *data;
1412
1413         data = hci_find_remote_oob_data(hdev, bdaddr);
1414
1415         if (!data) {
1416                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1417                 if (!data)
1418                         return -ENOMEM;
1419
1420                 bacpy(&data->bdaddr, bdaddr);
1421                 list_add(&data->list, &hdev->remote_oob_data);
1422         }
1423
1424         memcpy(data->hash, hash, sizeof(data->hash));
1425         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1426
1427         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1428
1429         return 0;
1430 }
1431
1432 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1433                                                 bdaddr_t *bdaddr)
1434 {
1435         struct bdaddr_list *b;
1436
1437         list_for_each_entry(b, &hdev->blacklist, list)
1438                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1439                         return b;
1440
1441         return NULL;
1442 }
1443
1444 int hci_blacklist_clear(struct hci_dev *hdev)
1445 {
1446         struct list_head *p, *n;
1447
1448         list_for_each_safe(p, n, &hdev->blacklist) {
1449                 struct bdaddr_list *b;
1450
1451                 b = list_entry(p, struct bdaddr_list, list);
1452
1453                 list_del(p);
1454                 kfree(b);
1455         }
1456
1457         return 0;
1458 }
1459
1460 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1461 {
1462         struct bdaddr_list *entry;
1463
1464         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1465                 return -EBADF;
1466
1467         if (hci_blacklist_lookup(hdev, bdaddr))
1468                 return -EEXIST;
1469
1470         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1471         if (!entry)
1472                 return -ENOMEM;
1473
1474         bacpy(&entry->bdaddr, bdaddr);
1475
1476         list_add(&entry->list, &hdev->blacklist);
1477
1478         return mgmt_device_blocked(hdev, bdaddr);
1479 }
1480
1481 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1482 {
1483         struct bdaddr_list *entry;
1484
1485         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1486                 return hci_blacklist_clear(hdev);
1487
1488         entry = hci_blacklist_lookup(hdev, bdaddr);
1489         if (!entry)
1490                 return -ENOENT;
1491
1492         list_del(&entry->list);
1493         kfree(entry);
1494
1495         return mgmt_device_unblocked(hdev, bdaddr);
1496 }
1497
1498 static void hci_clear_adv_cache(struct work_struct *work)
1499 {
1500         struct hci_dev *hdev = container_of(work, struct hci_dev,
1501                                                         adv_work.work);
1502
1503         hci_dev_lock(hdev);
1504
1505         hci_adv_entries_clear(hdev);
1506
1507         hci_dev_unlock(hdev);
1508 }
1509
1510 int hci_adv_entries_clear(struct hci_dev *hdev)
1511 {
1512         struct adv_entry *entry, *tmp;
1513
1514         list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1515                 list_del(&entry->list);
1516                 kfree(entry);
1517         }
1518
1519         BT_DBG("%s adv cache cleared", hdev->name);
1520
1521         return 0;
1522 }
1523
1524 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1525 {
1526         struct adv_entry *entry;
1527
1528         list_for_each_entry(entry, &hdev->adv_entries, list)
1529                 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1530                         return entry;
1531
1532         return NULL;
1533 }
1534
1535 static inline int is_connectable_adv(u8 evt_type)
1536 {
1537         if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1538                 return 1;
1539
1540         return 0;
1541 }
1542
1543 int hci_add_adv_entry(struct hci_dev *hdev,
1544                                         struct hci_ev_le_advertising_info *ev)
1545 {
1546         struct adv_entry *entry;
1547
1548         if (!is_connectable_adv(ev->evt_type))
1549                 return -EINVAL;
1550
1551         /* Only new entries should be added to adv_entries. So, if
1552          * bdaddr was found, don't add it. */
1553         if (hci_find_adv_entry(hdev, &ev->bdaddr))
1554                 return 0;
1555
1556         entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1557         if (!entry)
1558                 return -ENOMEM;
1559
1560         bacpy(&entry->bdaddr, &ev->bdaddr);
1561         entry->bdaddr_type = ev->bdaddr_type;
1562
1563         list_add(&entry->list, &hdev->adv_entries);
1564
1565         BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1566                                 batostr(&entry->bdaddr), entry->bdaddr_type);
1567
1568         return 0;
1569 }
1570
1571 /* Register HCI device */
1572 int hci_register_dev(struct hci_dev *hdev)
1573 {
1574         struct list_head *head = &hci_dev_list, *p;
1575         int i, id, error;
1576
1577         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1578
1579         if (!hdev->open || !hdev->close)
1580                 return -EINVAL;
1581
1582         /* Do not allow HCI_AMP devices to register at index 0,
1583          * so the index can be used as the AMP controller ID.
1584          */
1585         id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1586
1587         write_lock(&hci_dev_list_lock);
1588
1589         /* Find first available device id */
1590         list_for_each(p, &hci_dev_list) {
1591                 if (list_entry(p, struct hci_dev, list)->id != id)
1592                         break;
1593                 head = p; id++;
1594         }
1595
1596         sprintf(hdev->name, "hci%d", id);
1597         hdev->id = id;
1598         list_add_tail(&hdev->list, head);
1599
1600         mutex_init(&hdev->lock);
1601
1602         hdev->flags = 0;
1603         hdev->dev_flags = 0;
1604         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1605         hdev->esco_type = (ESCO_HV1);
1606         hdev->link_mode = (HCI_LM_ACCEPT);
1607         hdev->io_capability = 0x03; /* No Input No Output */
1608
1609         hdev->idle_timeout = 0;
1610         hdev->sniff_max_interval = 800;
1611         hdev->sniff_min_interval = 80;
1612
1613         INIT_WORK(&hdev->rx_work, hci_rx_work);
1614         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1615         INIT_WORK(&hdev->tx_work, hci_tx_work);
1616
1617
1618         skb_queue_head_init(&hdev->rx_q);
1619         skb_queue_head_init(&hdev->cmd_q);
1620         skb_queue_head_init(&hdev->raw_q);
1621
1622         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1623
1624         for (i = 0; i < NUM_REASSEMBLY; i++)
1625                 hdev->reassembly[i] = NULL;
1626
1627         init_waitqueue_head(&hdev->req_wait_q);
1628         mutex_init(&hdev->req_lock);
1629
1630         discovery_init(hdev);
1631
1632         hci_conn_hash_init(hdev);
1633
1634         INIT_LIST_HEAD(&hdev->mgmt_pending);
1635
1636         INIT_LIST_HEAD(&hdev->blacklist);
1637
1638         INIT_LIST_HEAD(&hdev->uuids);
1639
1640         INIT_LIST_HEAD(&hdev->link_keys);
1641
1642         INIT_LIST_HEAD(&hdev->remote_oob_data);
1643
1644         INIT_LIST_HEAD(&hdev->adv_entries);
1645
1646         INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1647         INIT_WORK(&hdev->power_on, hci_power_on);
1648         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1649
1650         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1651
1652         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1653
1654         atomic_set(&hdev->promisc, 0);
1655
1656         write_unlock(&hci_dev_list_lock);
1657
1658         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1659                                                         WQ_MEM_RECLAIM, 1);
1660         if (!hdev->workqueue) {
1661                 error = -ENOMEM;
1662                 goto err;
1663         }
1664
1665         error = hci_add_sysfs(hdev);
1666         if (error < 0)
1667                 goto err_wqueue;
1668
1669         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1670                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1671         if (hdev->rfkill) {
1672                 if (rfkill_register(hdev->rfkill) < 0) {
1673                         rfkill_destroy(hdev->rfkill);
1674                         hdev->rfkill = NULL;
1675                 }
1676         }
1677
1678         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1679         set_bit(HCI_SETUP, &hdev->dev_flags);
1680         schedule_work(&hdev->power_on);
1681
1682         hci_notify(hdev, HCI_DEV_REG);
1683         hci_dev_hold(hdev);
1684
1685         return id;
1686
1687 err_wqueue:
1688         destroy_workqueue(hdev->workqueue);
1689 err:
1690         write_lock(&hci_dev_list_lock);
1691         list_del(&hdev->list);
1692         write_unlock(&hci_dev_list_lock);
1693
1694         return error;
1695 }
1696 EXPORT_SYMBOL(hci_register_dev);
1697
1698 /* Unregister HCI device */
1699 void hci_unregister_dev(struct hci_dev *hdev)
1700 {
1701         int i;
1702
1703         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1704
1705         write_lock(&hci_dev_list_lock);
1706         list_del(&hdev->list);
1707         write_unlock(&hci_dev_list_lock);
1708
1709         hci_dev_do_close(hdev);
1710
1711         for (i = 0; i < NUM_REASSEMBLY; i++)
1712                 kfree_skb(hdev->reassembly[i]);
1713
1714         if (!test_bit(HCI_INIT, &hdev->flags) &&
1715                                 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1716                 hci_dev_lock(hdev);
1717                 mgmt_index_removed(hdev);
1718                 hci_dev_unlock(hdev);
1719         }
1720
1721         /* mgmt_index_removed should take care of emptying the
1722          * pending list */
1723         BUG_ON(!list_empty(&hdev->mgmt_pending));
1724
1725         hci_notify(hdev, HCI_DEV_UNREG);
1726
1727         if (hdev->rfkill) {
1728                 rfkill_unregister(hdev->rfkill);
1729                 rfkill_destroy(hdev->rfkill);
1730         }
1731
1732         hci_del_sysfs(hdev);
1733
1734         cancel_delayed_work_sync(&hdev->adv_work);
1735
1736         destroy_workqueue(hdev->workqueue);
1737
1738         hci_dev_lock(hdev);
1739         hci_blacklist_clear(hdev);
1740         hci_uuids_clear(hdev);
1741         hci_link_keys_clear(hdev);
1742         hci_remote_oob_data_clear(hdev);
1743         hci_adv_entries_clear(hdev);
1744         hci_dev_unlock(hdev);
1745
1746         hci_dev_put(hdev);
1747 }
1748 EXPORT_SYMBOL(hci_unregister_dev);
1749
1750 /* Suspend HCI device */
1751 int hci_suspend_dev(struct hci_dev *hdev)
1752 {
1753         hci_notify(hdev, HCI_DEV_SUSPEND);
1754         return 0;
1755 }
1756 EXPORT_SYMBOL(hci_suspend_dev);
1757
1758 /* Resume HCI device */
1759 int hci_resume_dev(struct hci_dev *hdev)
1760 {
1761         hci_notify(hdev, HCI_DEV_RESUME);
1762         return 0;
1763 }
1764 EXPORT_SYMBOL(hci_resume_dev);
1765
1766 /* Receive frame from HCI drivers */
1767 int hci_recv_frame(struct sk_buff *skb)
1768 {
1769         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1770         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1771                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1772                 kfree_skb(skb);
1773                 return -ENXIO;
1774         }
1775
1776         /* Incomming skb */
1777         bt_cb(skb)->incoming = 1;
1778
1779         /* Time stamp */
1780         __net_timestamp(skb);
1781
1782         skb_queue_tail(&hdev->rx_q, skb);
1783         queue_work(hdev->workqueue, &hdev->rx_work);
1784
1785         return 0;
1786 }
1787 EXPORT_SYMBOL(hci_recv_frame);
1788
1789 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1790                                                   int count, __u8 index)
1791 {
1792         int len = 0;
1793         int hlen = 0;
1794         int remain = count;
1795         struct sk_buff *skb;
1796         struct bt_skb_cb *scb;
1797
1798         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1799                                 index >= NUM_REASSEMBLY)
1800                 return -EILSEQ;
1801
1802         skb = hdev->reassembly[index];
1803
1804         if (!skb) {
1805                 switch (type) {
1806                 case HCI_ACLDATA_PKT:
1807                         len = HCI_MAX_FRAME_SIZE;
1808                         hlen = HCI_ACL_HDR_SIZE;
1809                         break;
1810                 case HCI_EVENT_PKT:
1811                         len = HCI_MAX_EVENT_SIZE;
1812                         hlen = HCI_EVENT_HDR_SIZE;
1813                         break;
1814                 case HCI_SCODATA_PKT:
1815                         len = HCI_MAX_SCO_SIZE;
1816                         hlen = HCI_SCO_HDR_SIZE;
1817                         break;
1818                 }
1819
1820                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1821                 if (!skb)
1822                         return -ENOMEM;
1823
1824                 scb = (void *) skb->cb;
1825                 scb->expect = hlen;
1826                 scb->pkt_type = type;
1827
1828                 skb->dev = (void *) hdev;
1829                 hdev->reassembly[index] = skb;
1830         }
1831
1832         while (count) {
1833                 scb = (void *) skb->cb;
1834                 len = min(scb->expect, (__u16)count);
1835
1836                 memcpy(skb_put(skb, len), data, len);
1837
1838                 count -= len;
1839                 data += len;
1840                 scb->expect -= len;
1841                 remain = count;
1842
1843                 switch (type) {
1844                 case HCI_EVENT_PKT:
1845                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1846                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1847                                 scb->expect = h->plen;
1848
1849                                 if (skb_tailroom(skb) < scb->expect) {
1850                                         kfree_skb(skb);
1851                                         hdev->reassembly[index] = NULL;
1852                                         return -ENOMEM;
1853                                 }
1854                         }
1855                         break;
1856
1857                 case HCI_ACLDATA_PKT:
1858                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1859                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1860                                 scb->expect = __le16_to_cpu(h->dlen);
1861
1862                                 if (skb_tailroom(skb) < scb->expect) {
1863                                         kfree_skb(skb);
1864                                         hdev->reassembly[index] = NULL;
1865                                         return -ENOMEM;
1866                                 }
1867                         }
1868                         break;
1869
1870                 case HCI_SCODATA_PKT:
1871                         if (skb->len == HCI_SCO_HDR_SIZE) {
1872                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1873                                 scb->expect = h->dlen;
1874
1875                                 if (skb_tailroom(skb) < scb->expect) {
1876                                         kfree_skb(skb);
1877                                         hdev->reassembly[index] = NULL;
1878                                         return -ENOMEM;
1879                                 }
1880                         }
1881                         break;
1882                 }
1883
1884                 if (scb->expect == 0) {
1885                         /* Complete frame */
1886
1887                         bt_cb(skb)->pkt_type = type;
1888                         hci_recv_frame(skb);
1889
1890                         hdev->reassembly[index] = NULL;
1891                         return remain;
1892                 }
1893         }
1894
1895         return remain;
1896 }
1897
1898 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1899 {
1900         int rem = 0;
1901
1902         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1903                 return -EILSEQ;
1904
1905         while (count) {
1906                 rem = hci_reassembly(hdev, type, data, count, type - 1);
1907                 if (rem < 0)
1908                         return rem;
1909
1910                 data += (count - rem);
1911                 count = rem;
1912         }
1913
1914         return rem;
1915 }
1916 EXPORT_SYMBOL(hci_recv_fragment);
1917
1918 #define STREAM_REASSEMBLY 0
1919
1920 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1921 {
1922         int type;
1923         int rem = 0;
1924
1925         while (count) {
1926                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1927
1928                 if (!skb) {
1929                         struct { char type; } *pkt;
1930
1931                         /* Start of the frame */
1932                         pkt = data;
1933                         type = pkt->type;
1934
1935                         data++;
1936                         count--;
1937                 } else
1938                         type = bt_cb(skb)->pkt_type;
1939
1940                 rem = hci_reassembly(hdev, type, data, count,
1941                                                         STREAM_REASSEMBLY);
1942                 if (rem < 0)
1943                         return rem;
1944
1945                 data += (count - rem);
1946                 count = rem;
1947         }
1948
1949         return rem;
1950 }
1951 EXPORT_SYMBOL(hci_recv_stream_fragment);
1952
1953 /* ---- Interface to upper protocols ---- */
1954
1955 int hci_register_cb(struct hci_cb *cb)
1956 {
1957         BT_DBG("%p name %s", cb, cb->name);
1958
1959         write_lock(&hci_cb_list_lock);
1960         list_add(&cb->list, &hci_cb_list);
1961         write_unlock(&hci_cb_list_lock);
1962
1963         return 0;
1964 }
1965 EXPORT_SYMBOL(hci_register_cb);
1966
1967 int hci_unregister_cb(struct hci_cb *cb)
1968 {
1969         BT_DBG("%p name %s", cb, cb->name);
1970
1971         write_lock(&hci_cb_list_lock);
1972         list_del(&cb->list);
1973         write_unlock(&hci_cb_list_lock);
1974
1975         return 0;
1976 }
1977 EXPORT_SYMBOL(hci_unregister_cb);
1978
1979 static int hci_send_frame(struct sk_buff *skb)
1980 {
1981         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1982
1983         if (!hdev) {
1984                 kfree_skb(skb);
1985                 return -ENODEV;
1986         }
1987
1988         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1989
1990         if (atomic_read(&hdev->promisc)) {
1991                 /* Time stamp */
1992                 __net_timestamp(skb);
1993
1994                 hci_send_to_sock(hdev, skb, NULL);
1995         }
1996
1997         /* Get rid of skb owner, prior to sending to the driver. */
1998         skb_orphan(skb);
1999
2000         return hdev->send(skb);
2001 }
2002
2003 /* Send HCI command */
2004 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2005 {
2006         int len = HCI_COMMAND_HDR_SIZE + plen;
2007         struct hci_command_hdr *hdr;
2008         struct sk_buff *skb;
2009
2010         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2011
2012         skb = bt_skb_alloc(len, GFP_ATOMIC);
2013         if (!skb) {
2014                 BT_ERR("%s no memory for command", hdev->name);
2015                 return -ENOMEM;
2016         }
2017
2018         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2019         hdr->opcode = cpu_to_le16(opcode);
2020         hdr->plen   = plen;
2021
2022         if (plen)
2023                 memcpy(skb_put(skb, plen), param, plen);
2024
2025         BT_DBG("skb len %d", skb->len);
2026
2027         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2028         skb->dev = (void *) hdev;
2029
2030         if (test_bit(HCI_INIT, &hdev->flags))
2031                 hdev->init_last_cmd = opcode;
2032
2033         skb_queue_tail(&hdev->cmd_q, skb);
2034         queue_work(hdev->workqueue, &hdev->cmd_work);
2035
2036         return 0;
2037 }
2038
2039 /* Get data from the previously sent command */
2040 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2041 {
2042         struct hci_command_hdr *hdr;
2043
2044         if (!hdev->sent_cmd)
2045                 return NULL;
2046
2047         hdr = (void *) hdev->sent_cmd->data;
2048
2049         if (hdr->opcode != cpu_to_le16(opcode))
2050                 return NULL;
2051
2052         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2053
2054         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2055 }
2056
2057 /* Send ACL data */
2058 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2059 {
2060         struct hci_acl_hdr *hdr;
2061         int len = skb->len;
2062
2063         skb_push(skb, HCI_ACL_HDR_SIZE);
2064         skb_reset_transport_header(skb);
2065         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2066         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2067         hdr->dlen   = cpu_to_le16(len);
2068 }
2069
2070 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2071                                 struct sk_buff *skb, __u16 flags)
2072 {
2073         struct hci_dev *hdev = conn->hdev;
2074         struct sk_buff *list;
2075
2076         list = skb_shinfo(skb)->frag_list;
2077         if (!list) {
2078                 /* Non fragmented */
2079                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2080
2081                 skb_queue_tail(queue, skb);
2082         } else {
2083                 /* Fragmented */
2084                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2085
2086                 skb_shinfo(skb)->frag_list = NULL;
2087
2088                 /* Queue all fragments atomically */
2089                 spin_lock(&queue->lock);
2090
2091                 __skb_queue_tail(queue, skb);
2092
2093                 flags &= ~ACL_START;
2094                 flags |= ACL_CONT;
2095                 do {
2096                         skb = list; list = list->next;
2097
2098                         skb->dev = (void *) hdev;
2099                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2100                         hci_add_acl_hdr(skb, conn->handle, flags);
2101
2102                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2103
2104                         __skb_queue_tail(queue, skb);
2105                 } while (list);
2106
2107                 spin_unlock(&queue->lock);
2108         }
2109 }
2110
2111 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2112 {
2113         struct hci_conn *conn = chan->conn;
2114         struct hci_dev *hdev = conn->hdev;
2115
2116         BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2117
2118         skb->dev = (void *) hdev;
2119         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2120         hci_add_acl_hdr(skb, conn->handle, flags);
2121
2122         hci_queue_acl(conn, &chan->data_q, skb, flags);
2123
2124         queue_work(hdev->workqueue, &hdev->tx_work);
2125 }
2126 EXPORT_SYMBOL(hci_send_acl);
2127
2128 /* Send SCO data */
2129 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2130 {
2131         struct hci_dev *hdev = conn->hdev;
2132         struct hci_sco_hdr hdr;
2133
2134         BT_DBG("%s len %d", hdev->name, skb->len);
2135
2136         hdr.handle = cpu_to_le16(conn->handle);
2137         hdr.dlen   = skb->len;
2138
2139         skb_push(skb, HCI_SCO_HDR_SIZE);
2140         skb_reset_transport_header(skb);
2141         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2142
2143         skb->dev = (void *) hdev;
2144         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2145
2146         skb_queue_tail(&conn->data_q, skb);
2147         queue_work(hdev->workqueue, &hdev->tx_work);
2148 }
2149 EXPORT_SYMBOL(hci_send_sco);
2150
2151 /* ---- HCI TX task (outgoing data) ---- */
2152
2153 /* HCI Connection scheduler */
2154 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2155 {
2156         struct hci_conn_hash *h = &hdev->conn_hash;
2157         struct hci_conn *conn = NULL, *c;
2158         int num = 0, min = ~0;
2159
2160         /* We don't have to lock device here. Connections are always
2161          * added and removed with TX task disabled. */
2162
2163         rcu_read_lock();
2164
2165         list_for_each_entry_rcu(c, &h->list, list) {
2166                 if (c->type != type || skb_queue_empty(&c->data_q))
2167                         continue;
2168
2169                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2170                         continue;
2171
2172                 num++;
2173
2174                 if (c->sent < min) {
2175                         min  = c->sent;
2176                         conn = c;
2177                 }
2178
2179                 if (hci_conn_num(hdev, type) == num)
2180                         break;
2181         }
2182
2183         rcu_read_unlock();
2184
2185         if (conn) {
2186                 int cnt, q;
2187
2188                 switch (conn->type) {
2189                 case ACL_LINK:
2190                         cnt = hdev->acl_cnt;
2191                         break;
2192                 case SCO_LINK:
2193                 case ESCO_LINK:
2194                         cnt = hdev->sco_cnt;
2195                         break;
2196                 case LE_LINK:
2197                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2198                         break;
2199                 default:
2200                         cnt = 0;
2201                         BT_ERR("Unknown link type");
2202                 }
2203
2204                 q = cnt / num;
2205                 *quote = q ? q : 1;
2206         } else
2207                 *quote = 0;
2208
2209         BT_DBG("conn %p quote %d", conn, *quote);
2210         return conn;
2211 }
2212
2213 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2214 {
2215         struct hci_conn_hash *h = &hdev->conn_hash;
2216         struct hci_conn *c;
2217
2218         BT_ERR("%s link tx timeout", hdev->name);
2219
2220         rcu_read_lock();
2221
2222         /* Kill stalled connections */
2223         list_for_each_entry_rcu(c, &h->list, list) {
2224                 if (c->type == type && c->sent) {
2225                         BT_ERR("%s killing stalled connection %s",
2226                                 hdev->name, batostr(&c->dst));
2227                         hci_acl_disconn(c, 0x13);
2228                 }
2229         }
2230
2231         rcu_read_unlock();
2232 }
2233
2234 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2235                                                 int *quote)
2236 {
2237         struct hci_conn_hash *h = &hdev->conn_hash;
2238         struct hci_chan *chan = NULL;
2239         int num = 0, min = ~0, cur_prio = 0;
2240         struct hci_conn *conn;
2241         int cnt, q, conn_num = 0;
2242
2243         BT_DBG("%s", hdev->name);
2244
2245         rcu_read_lock();
2246
2247         list_for_each_entry_rcu(conn, &h->list, list) {
2248                 struct hci_chan *tmp;
2249
2250                 if (conn->type != type)
2251                         continue;
2252
2253                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2254                         continue;
2255
2256                 conn_num++;
2257
2258                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2259                         struct sk_buff *skb;
2260
2261                         if (skb_queue_empty(&tmp->data_q))
2262                                 continue;
2263
2264                         skb = skb_peek(&tmp->data_q);
2265                         if (skb->priority < cur_prio)
2266                                 continue;
2267
2268                         if (skb->priority > cur_prio) {
2269                                 num = 0;
2270                                 min = ~0;
2271                                 cur_prio = skb->priority;
2272                         }
2273
2274                         num++;
2275
2276                         if (conn->sent < min) {
2277                                 min  = conn->sent;
2278                                 chan = tmp;
2279                         }
2280                 }
2281
2282                 if (hci_conn_num(hdev, type) == conn_num)
2283                         break;
2284         }
2285
2286         rcu_read_unlock();
2287
2288         if (!chan)
2289                 return NULL;
2290
2291         switch (chan->conn->type) {
2292         case ACL_LINK:
2293                 cnt = hdev->acl_cnt;
2294                 break;
2295         case SCO_LINK:
2296         case ESCO_LINK:
2297                 cnt = hdev->sco_cnt;
2298                 break;
2299         case LE_LINK:
2300                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2301                 break;
2302         default:
2303                 cnt = 0;
2304                 BT_ERR("Unknown link type");
2305         }
2306
2307         q = cnt / num;
2308         *quote = q ? q : 1;
2309         BT_DBG("chan %p quote %d", chan, *quote);
2310         return chan;
2311 }
2312
2313 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2314 {
2315         struct hci_conn_hash *h = &hdev->conn_hash;
2316         struct hci_conn *conn;
2317         int num = 0;
2318
2319         BT_DBG("%s", hdev->name);
2320
2321         rcu_read_lock();
2322
2323         list_for_each_entry_rcu(conn, &h->list, list) {
2324                 struct hci_chan *chan;
2325
2326                 if (conn->type != type)
2327                         continue;
2328
2329                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2330                         continue;
2331
2332                 num++;
2333
2334                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2335                         struct sk_buff *skb;
2336
2337                         if (chan->sent) {
2338                                 chan->sent = 0;
2339                                 continue;
2340                         }
2341
2342                         if (skb_queue_empty(&chan->data_q))
2343                                 continue;
2344
2345                         skb = skb_peek(&chan->data_q);
2346                         if (skb->priority >= HCI_PRIO_MAX - 1)
2347                                 continue;
2348
2349                         skb->priority = HCI_PRIO_MAX - 1;
2350
2351                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2352                                                                 skb->priority);
2353                 }
2354
2355                 if (hci_conn_num(hdev, type) == num)
2356                         break;
2357         }
2358
2359         rcu_read_unlock();
2360
2361 }
2362
2363 static inline void hci_sched_acl(struct hci_dev *hdev)
2364 {
2365         struct hci_chan *chan;
2366         struct sk_buff *skb;
2367         int quote;
2368         unsigned int cnt;
2369
2370         BT_DBG("%s", hdev->name);
2371
2372         if (!hci_conn_num(hdev, ACL_LINK))
2373                 return;
2374
2375         if (!test_bit(HCI_RAW, &hdev->flags)) {
2376                 /* ACL tx timeout must be longer than maximum
2377                  * link supervision timeout (40.9 seconds) */
2378                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx +
2379                                         msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2380                         hci_link_tx_to(hdev, ACL_LINK);
2381         }
2382
2383         cnt = hdev->acl_cnt;
2384
2385         while (hdev->acl_cnt &&
2386                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2387                 u32 priority = (skb_peek(&chan->data_q))->priority;
2388                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2389                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2390                                         skb->len, skb->priority);
2391
2392                         /* Stop if priority has changed */
2393                         if (skb->priority < priority)
2394                                 break;
2395
2396                         skb = skb_dequeue(&chan->data_q);
2397
2398                         hci_conn_enter_active_mode(chan->conn,
2399                                                 bt_cb(skb)->force_active);
2400
2401                         hci_send_frame(skb);
2402                         hdev->acl_last_tx = jiffies;
2403
2404                         hdev->acl_cnt--;
2405                         chan->sent++;
2406                         chan->conn->sent++;
2407                 }
2408         }
2409
2410         if (cnt != hdev->acl_cnt)
2411                 hci_prio_recalculate(hdev, ACL_LINK);
2412 }
2413
2414 /* Schedule SCO */
2415 static inline void hci_sched_sco(struct hci_dev *hdev)
2416 {
2417         struct hci_conn *conn;
2418         struct sk_buff *skb;
2419         int quote;
2420
2421         BT_DBG("%s", hdev->name);
2422
2423         if (!hci_conn_num(hdev, SCO_LINK))
2424                 return;
2425
2426         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2427                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2428                         BT_DBG("skb %p len %d", skb, skb->len);
2429                         hci_send_frame(skb);
2430
2431                         conn->sent++;
2432                         if (conn->sent == ~0)
2433                                 conn->sent = 0;
2434                 }
2435         }
2436 }
2437
2438 static inline void hci_sched_esco(struct hci_dev *hdev)
2439 {
2440         struct hci_conn *conn;
2441         struct sk_buff *skb;
2442         int quote;
2443
2444         BT_DBG("%s", hdev->name);
2445
2446         if (!hci_conn_num(hdev, ESCO_LINK))
2447                 return;
2448
2449         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2450                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2451                         BT_DBG("skb %p len %d", skb, skb->len);
2452                         hci_send_frame(skb);
2453
2454                         conn->sent++;
2455                         if (conn->sent == ~0)
2456                                 conn->sent = 0;
2457                 }
2458         }
2459 }
2460
2461 static inline void hci_sched_le(struct hci_dev *hdev)
2462 {
2463         struct hci_chan *chan;
2464         struct sk_buff *skb;
2465         int quote, cnt, tmp;
2466
2467         BT_DBG("%s", hdev->name);
2468
2469         if (!hci_conn_num(hdev, LE_LINK))
2470                 return;
2471
2472         if (!test_bit(HCI_RAW, &hdev->flags)) {
2473                 /* LE tx timeout must be longer than maximum
2474                  * link supervision timeout (40.9 seconds) */
2475                 if (!hdev->le_cnt && hdev->le_pkts &&
2476                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2477                         hci_link_tx_to(hdev, LE_LINK);
2478         }
2479
2480         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2481         tmp = cnt;
2482         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2483                 u32 priority = (skb_peek(&chan->data_q))->priority;
2484                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2485                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2486                                         skb->len, skb->priority);
2487
2488                         /* Stop if priority has changed */
2489                         if (skb->priority < priority)
2490                                 break;
2491
2492                         skb = skb_dequeue(&chan->data_q);
2493
2494                         hci_send_frame(skb);
2495                         hdev->le_last_tx = jiffies;
2496
2497                         cnt--;
2498                         chan->sent++;
2499                         chan->conn->sent++;
2500                 }
2501         }
2502
2503         if (hdev->le_pkts)
2504                 hdev->le_cnt = cnt;
2505         else
2506                 hdev->acl_cnt = cnt;
2507
2508         if (cnt != tmp)
2509                 hci_prio_recalculate(hdev, LE_LINK);
2510 }
2511
2512 static void hci_tx_work(struct work_struct *work)
2513 {
2514         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2515         struct sk_buff *skb;
2516
2517         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2518                 hdev->sco_cnt, hdev->le_cnt);
2519
2520         /* Schedule queues and send stuff to HCI driver */
2521
2522         hci_sched_acl(hdev);
2523
2524         hci_sched_sco(hdev);
2525
2526         hci_sched_esco(hdev);
2527
2528         hci_sched_le(hdev);
2529
2530         /* Send next queued raw (unknown type) packet */
2531         while ((skb = skb_dequeue(&hdev->raw_q)))
2532                 hci_send_frame(skb);
2533 }
2534
2535 /* ----- HCI RX task (incoming data processing) ----- */
2536
2537 /* ACL data packet */
2538 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2539 {
2540         struct hci_acl_hdr *hdr = (void *) skb->data;
2541         struct hci_conn *conn;
2542         __u16 handle, flags;
2543
2544         skb_pull(skb, HCI_ACL_HDR_SIZE);
2545
2546         handle = __le16_to_cpu(hdr->handle);
2547         flags  = hci_flags(handle);
2548         handle = hci_handle(handle);
2549
2550         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2551
2552         hdev->stat.acl_rx++;
2553
2554         hci_dev_lock(hdev);
2555         conn = hci_conn_hash_lookup_handle(hdev, handle);
2556         hci_dev_unlock(hdev);
2557
2558         if (conn) {
2559                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2560
2561                 /* Send to upper protocol */
2562                 l2cap_recv_acldata(conn, skb, flags);
2563                 return;
2564         } else {
2565                 BT_ERR("%s ACL packet for unknown connection handle %d",
2566                         hdev->name, handle);
2567         }
2568
2569         kfree_skb(skb);
2570 }
2571
2572 /* SCO data packet */
2573 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2574 {
2575         struct hci_sco_hdr *hdr = (void *) skb->data;
2576         struct hci_conn *conn;
2577         __u16 handle;
2578
2579         skb_pull(skb, HCI_SCO_HDR_SIZE);
2580
2581         handle = __le16_to_cpu(hdr->handle);
2582
2583         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2584
2585         hdev->stat.sco_rx++;
2586
2587         hci_dev_lock(hdev);
2588         conn = hci_conn_hash_lookup_handle(hdev, handle);
2589         hci_dev_unlock(hdev);
2590
2591         if (conn) {
2592                 /* Send to upper protocol */
2593                 sco_recv_scodata(conn, skb);
2594                 return;
2595         } else {
2596                 BT_ERR("%s SCO packet for unknown connection handle %d",
2597                         hdev->name, handle);
2598         }
2599
2600         kfree_skb(skb);
2601 }
2602
2603 static void hci_rx_work(struct work_struct *work)
2604 {
2605         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2606         struct sk_buff *skb;
2607
2608         BT_DBG("%s", hdev->name);
2609
2610         while ((skb = skb_dequeue(&hdev->rx_q))) {
2611                 if (atomic_read(&hdev->promisc)) {
2612                         /* Send copy to the sockets */
2613                         hci_send_to_sock(hdev, skb, NULL);
2614                 }
2615
2616                 if (test_bit(HCI_RAW, &hdev->flags)) {
2617                         kfree_skb(skb);
2618                         continue;
2619                 }
2620
2621                 if (test_bit(HCI_INIT, &hdev->flags)) {
2622                         /* Don't process data packets in this states. */
2623                         switch (bt_cb(skb)->pkt_type) {
2624                         case HCI_ACLDATA_PKT:
2625                         case HCI_SCODATA_PKT:
2626                                 kfree_skb(skb);
2627                                 continue;
2628                         }
2629                 }
2630
2631                 /* Process frame */
2632                 switch (bt_cb(skb)->pkt_type) {
2633                 case HCI_EVENT_PKT:
2634                         BT_DBG("%s Event packet", hdev->name);
2635                         hci_event_packet(hdev, skb);
2636                         break;
2637
2638                 case HCI_ACLDATA_PKT:
2639                         BT_DBG("%s ACL data packet", hdev->name);
2640                         hci_acldata_packet(hdev, skb);
2641                         break;
2642
2643                 case HCI_SCODATA_PKT:
2644                         BT_DBG("%s SCO data packet", hdev->name);
2645                         hci_scodata_packet(hdev, skb);
2646                         break;
2647
2648                 default:
2649                         kfree_skb(skb);
2650                         break;
2651                 }
2652         }
2653 }
2654
2655 static void hci_cmd_work(struct work_struct *work)
2656 {
2657         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2658         struct sk_buff *skb;
2659
2660         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2661
2662         /* Send queued commands */
2663         if (atomic_read(&hdev->cmd_cnt)) {
2664                 skb = skb_dequeue(&hdev->cmd_q);
2665                 if (!skb)
2666                         return;
2667
2668                 kfree_skb(hdev->sent_cmd);
2669
2670                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2671                 if (hdev->sent_cmd) {
2672                         atomic_dec(&hdev->cmd_cnt);
2673                         hci_send_frame(skb);
2674                         if (test_bit(HCI_RESET, &hdev->flags))
2675                                 del_timer(&hdev->cmd_timer);
2676                         else
2677                                 mod_timer(&hdev->cmd_timer,
2678                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2679                 } else {
2680                         skb_queue_head(&hdev->cmd_q, skb);
2681                         queue_work(hdev->workqueue, &hdev->cmd_work);
2682                 }
2683         }
2684 }
2685
2686 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2687 {
2688         /* General inquiry access code (GIAC) */
2689         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2690         struct hci_cp_inquiry cp;
2691
2692         BT_DBG("%s", hdev->name);
2693
2694         if (test_bit(HCI_INQUIRY, &hdev->flags))
2695                 return -EINPROGRESS;
2696
2697         inquiry_cache_flush(hdev);
2698
2699         memset(&cp, 0, sizeof(cp));
2700         memcpy(&cp.lap, lap, sizeof(cp.lap));
2701         cp.length  = length;
2702
2703         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2704 }
2705
2706 int hci_cancel_inquiry(struct hci_dev *hdev)
2707 {
2708         BT_DBG("%s", hdev->name);
2709
2710         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2711                 return -EPERM;
2712
2713         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2714 }
2715
2716 module_param(enable_hs, bool, 0644);
2717 MODULE_PARM_DESC(enable_hs, "Enable High Speed");