Bluetooth: LE scan should send Discovering events
[linux-flexiantxendom0-3.2.10.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
52
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
55
56 #define AUTO_OFF_TIMEOUT 2000
57
58 bool enable_hs;
59
60 static void hci_rx_work(struct work_struct *work);
61 static void hci_cmd_work(struct work_struct *work);
62 static void hci_tx_work(struct work_struct *work);
63
64 /* HCI device list */
65 LIST_HEAD(hci_dev_list);
66 DEFINE_RWLOCK(hci_dev_list_lock);
67
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list);
70 DEFINE_RWLOCK(hci_cb_list_lock);
71
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74
75 /* ---- HCI notifications ---- */
76
77 int hci_register_notifier(struct notifier_block *nb)
78 {
79         return atomic_notifier_chain_register(&hci_notifier, nb);
80 }
81
82 int hci_unregister_notifier(struct notifier_block *nb)
83 {
84         return atomic_notifier_chain_unregister(&hci_notifier, nb);
85 }
86
87 static void hci_notify(struct hci_dev *hdev, int event)
88 {
89         atomic_notifier_call_chain(&hci_notifier, event, hdev);
90 }
91
92 /* ---- HCI requests ---- */
93
94 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
95 {
96         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
98         /* If this is the init phase check if the completed command matches
99          * the last init command, and if not just return.
100          */
101         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
102                 return;
103
104         if (hdev->req_status == HCI_REQ_PEND) {
105                 hdev->req_result = result;
106                 hdev->req_status = HCI_REQ_DONE;
107                 wake_up_interruptible(&hdev->req_wait_q);
108         }
109 }
110
111 static void hci_req_cancel(struct hci_dev *hdev, int err)
112 {
113         BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115         if (hdev->req_status == HCI_REQ_PEND) {
116                 hdev->req_result = err;
117                 hdev->req_status = HCI_REQ_CANCELED;
118                 wake_up_interruptible(&hdev->req_wait_q);
119         }
120 }
121
122 /* Execute request and wait for completion. */
123 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
124                                         unsigned long opt, __u32 timeout)
125 {
126         DECLARE_WAITQUEUE(wait, current);
127         int err = 0;
128
129         BT_DBG("%s start", hdev->name);
130
131         hdev->req_status = HCI_REQ_PEND;
132
133         add_wait_queue(&hdev->req_wait_q, &wait);
134         set_current_state(TASK_INTERRUPTIBLE);
135
136         req(hdev, opt);
137         schedule_timeout(timeout);
138
139         remove_wait_queue(&hdev->req_wait_q, &wait);
140
141         if (signal_pending(current))
142                 return -EINTR;
143
144         switch (hdev->req_status) {
145         case HCI_REQ_DONE:
146                 err = -bt_to_errno(hdev->req_result);
147                 break;
148
149         case HCI_REQ_CANCELED:
150                 err = -hdev->req_result;
151                 break;
152
153         default:
154                 err = -ETIMEDOUT;
155                 break;
156         }
157
158         hdev->req_status = hdev->req_result = 0;
159
160         BT_DBG("%s end: err %d", hdev->name, err);
161
162         return err;
163 }
164
165 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
166                                         unsigned long opt, __u32 timeout)
167 {
168         int ret;
169
170         if (!test_bit(HCI_UP, &hdev->flags))
171                 return -ENETDOWN;
172
173         /* Serialize all requests */
174         hci_req_lock(hdev);
175         ret = __hci_request(hdev, req, opt, timeout);
176         hci_req_unlock(hdev);
177
178         return ret;
179 }
180
181 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182 {
183         BT_DBG("%s %ld", hdev->name, opt);
184
185         /* Reset device */
186         set_bit(HCI_RESET, &hdev->flags);
187         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
188 }
189
190 static void bredr_init(struct hci_dev *hdev)
191 {
192         struct hci_cp_delete_stored_link_key cp;
193         __le16 param;
194         __u8 flt_type;
195
196         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
198         /* Mandatory initialization */
199
200         /* Reset */
201         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
202                 set_bit(HCI_RESET, &hdev->flags);
203                 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
204         }
205
206         /* Read Local Supported Features */
207         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
208
209         /* Read Local Version */
210         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
211
212         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
214
215         /* Read BD Address */
216         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218         /* Read Class of Device */
219         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221         /* Read Local Name */
222         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
223
224         /* Read Voice Setting */
225         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
226
227         /* Optional initialization */
228
229         /* Clear Event Filters */
230         flt_type = HCI_FLT_CLEAR_ALL;
231         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
232
233         /* Connection accept timeout ~20 secs */
234         param = cpu_to_le16(0x7d00);
235         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
236
237         bacpy(&cp.bdaddr, BDADDR_ANY);
238         cp.delete_all = 1;
239         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
240 }
241
242 static void amp_init(struct hci_dev *hdev)
243 {
244         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
246         /* Reset */
247         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249         /* Read Local Version */
250         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251 }
252
253 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254 {
255         struct sk_buff *skb;
256
257         BT_DBG("%s %ld", hdev->name, opt);
258
259         /* Driver initialization */
260
261         /* Special commands */
262         while ((skb = skb_dequeue(&hdev->driver_init))) {
263                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264                 skb->dev = (void *) hdev;
265
266                 skb_queue_tail(&hdev->cmd_q, skb);
267                 queue_work(hdev->workqueue, &hdev->cmd_work);
268         }
269         skb_queue_purge(&hdev->driver_init);
270
271         switch (hdev->dev_type) {
272         case HCI_BREDR:
273                 bredr_init(hdev);
274                 break;
275
276         case HCI_AMP:
277                 amp_init(hdev);
278                 break;
279
280         default:
281                 BT_ERR("Unknown device type %d", hdev->dev_type);
282                 break;
283         }
284
285 }
286
287 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288 {
289         BT_DBG("%s", hdev->name);
290
291         /* Read LE buffer size */
292         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293 }
294
295 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296 {
297         __u8 scan = opt;
298
299         BT_DBG("%s %x", hdev->name, scan);
300
301         /* Inquiry and Page scans */
302         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
303 }
304
305 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306 {
307         __u8 auth = opt;
308
309         BT_DBG("%s %x", hdev->name, auth);
310
311         /* Authentication */
312         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
313 }
314
315 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316 {
317         __u8 encrypt = opt;
318
319         BT_DBG("%s %x", hdev->name, encrypt);
320
321         /* Encryption */
322         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
323 }
324
325 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326 {
327         __le16 policy = cpu_to_le16(opt);
328
329         BT_DBG("%s %x", hdev->name, policy);
330
331         /* Default link policy */
332         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333 }
334
335 /* Get HCI device by index.
336  * Device is held on return. */
337 struct hci_dev *hci_dev_get(int index)
338 {
339         struct hci_dev *hdev = NULL, *d;
340
341         BT_DBG("%d", index);
342
343         if (index < 0)
344                 return NULL;
345
346         read_lock(&hci_dev_list_lock);
347         list_for_each_entry(d, &hci_dev_list, list) {
348                 if (d->id == index) {
349                         hdev = hci_dev_hold(d);
350                         break;
351                 }
352         }
353         read_unlock(&hci_dev_list_lock);
354         return hdev;
355 }
356
357 /* ---- Inquiry support ---- */
358
359 bool hci_discovery_active(struct hci_dev *hdev)
360 {
361         struct discovery_state *discov = &hdev->discovery;
362
363         if (discov->state == DISCOVERY_INQUIRY ||
364                                         discov->state == DISCOVERY_LE_SCAN ||
365                                         discov->state == DISCOVERY_RESOLVING)
366                 return true;
367
368         return false;
369 }
370
371 void hci_discovery_set_state(struct hci_dev *hdev, int state)
372 {
373         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
374
375         if (hdev->discovery.state == state)
376                 return;
377
378         switch (state) {
379         case DISCOVERY_STOPPED:
380                 mgmt_discovering(hdev, 0);
381                 break;
382         case DISCOVERY_STARTING:
383                 break;
384         case DISCOVERY_INQUIRY:
385         case DISCOVERY_LE_SCAN:
386                 mgmt_discovering(hdev, 1);
387                 break;
388         case DISCOVERY_RESOLVING:
389                 break;
390         case DISCOVERY_STOPPING:
391                 break;
392         }
393
394         hdev->discovery.state = state;
395 }
396
397 static void inquiry_cache_flush(struct hci_dev *hdev)
398 {
399         struct discovery_state *cache = &hdev->discovery;
400         struct inquiry_entry *p, *n;
401
402         list_for_each_entry_safe(p, n, &cache->all, all) {
403                 list_del(&p->all);
404                 kfree(p);
405         }
406
407         INIT_LIST_HEAD(&cache->unknown);
408         INIT_LIST_HEAD(&cache->resolve);
409         cache->state = DISCOVERY_STOPPED;
410 }
411
412 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
413 {
414         struct discovery_state *cache = &hdev->discovery;
415         struct inquiry_entry *e;
416
417         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
418
419         list_for_each_entry(e, &cache->all, all) {
420                 if (!bacmp(&e->data.bdaddr, bdaddr))
421                         return e;
422         }
423
424         return NULL;
425 }
426
427 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
428                                                         bdaddr_t *bdaddr)
429 {
430         struct discovery_state *cache = &hdev->discovery;
431         struct inquiry_entry *e;
432
433         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
434
435         list_for_each_entry(e, &cache->unknown, list) {
436                 if (!bacmp(&e->data.bdaddr, bdaddr))
437                         return e;
438         }
439
440         return NULL;
441 }
442
443 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
444                                                         bdaddr_t *bdaddr,
445                                                         int state)
446 {
447         struct discovery_state *cache = &hdev->discovery;
448         struct inquiry_entry *e;
449
450         BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
451
452         list_for_each_entry(e, &cache->resolve, list) {
453                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
454                         return e;
455                 if (!bacmp(&e->data.bdaddr, bdaddr))
456                         return e;
457         }
458
459         return NULL;
460 }
461
462 void hci_inquiry_cache_update_resolve(struct hci_dev *hdev,
463                                                 struct inquiry_entry *ie)
464 {
465         struct discovery_state *cache = &hdev->discovery;
466         struct list_head *pos = &cache->resolve;
467         struct inquiry_entry *p;
468
469         list_del(&ie->list);
470
471         list_for_each_entry(p, &cache->resolve, list) {
472                 if (p->name_state != NAME_PENDING &&
473                                 abs(p->data.rssi) >= abs(ie->data.rssi))
474                         break;
475                 pos = &p->list;
476         }
477
478         list_add(&ie->list, pos);
479 }
480
481 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
482                                                         bool name_known)
483 {
484         struct discovery_state *cache = &hdev->discovery;
485         struct inquiry_entry *ie;
486
487         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
488
489         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
490         if (ie) {
491                 if (ie->name_state == NAME_NEEDED &&
492                                                 data->rssi != ie->data.rssi) {
493                         ie->data.rssi = data->rssi;
494                         hci_inquiry_cache_update_resolve(hdev, ie);
495                 }
496
497                 goto update;
498         }
499
500         /* Entry not in the cache. Add new one. */
501         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
502         if (!ie)
503                 return false;
504
505         list_add(&ie->all, &cache->all);
506
507         if (name_known) {
508                 ie->name_state = NAME_KNOWN;
509         } else {
510                 ie->name_state = NAME_NOT_KNOWN;
511                 list_add(&ie->list, &cache->unknown);
512         }
513
514 update:
515         if (name_known && ie->name_state != NAME_KNOWN &&
516                                         ie->name_state != NAME_PENDING) {
517                 ie->name_state = NAME_KNOWN;
518                 list_del(&ie->list);
519         }
520
521         memcpy(&ie->data, data, sizeof(*data));
522         ie->timestamp = jiffies;
523         cache->timestamp = jiffies;
524
525         if (ie->name_state == NAME_NOT_KNOWN)
526                 return false;
527
528         return true;
529 }
530
531 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
532 {
533         struct discovery_state *cache = &hdev->discovery;
534         struct inquiry_info *info = (struct inquiry_info *) buf;
535         struct inquiry_entry *e;
536         int copied = 0;
537
538         list_for_each_entry(e, &cache->all, all) {
539                 struct inquiry_data *data = &e->data;
540
541                 if (copied >= num)
542                         break;
543
544                 bacpy(&info->bdaddr, &data->bdaddr);
545                 info->pscan_rep_mode    = data->pscan_rep_mode;
546                 info->pscan_period_mode = data->pscan_period_mode;
547                 info->pscan_mode        = data->pscan_mode;
548                 memcpy(info->dev_class, data->dev_class, 3);
549                 info->clock_offset      = data->clock_offset;
550
551                 info++;
552                 copied++;
553         }
554
555         BT_DBG("cache %p, copied %d", cache, copied);
556         return copied;
557 }
558
559 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
560 {
561         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
562         struct hci_cp_inquiry cp;
563
564         BT_DBG("%s", hdev->name);
565
566         if (test_bit(HCI_INQUIRY, &hdev->flags))
567                 return;
568
569         /* Start Inquiry */
570         memcpy(&cp.lap, &ir->lap, 3);
571         cp.length  = ir->length;
572         cp.num_rsp = ir->num_rsp;
573         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
574 }
575
576 int hci_inquiry(void __user *arg)
577 {
578         __u8 __user *ptr = arg;
579         struct hci_inquiry_req ir;
580         struct hci_dev *hdev;
581         int err = 0, do_inquiry = 0, max_rsp;
582         long timeo;
583         __u8 *buf;
584
585         if (copy_from_user(&ir, ptr, sizeof(ir)))
586                 return -EFAULT;
587
588         hdev = hci_dev_get(ir.dev_id);
589         if (!hdev)
590                 return -ENODEV;
591
592         hci_dev_lock(hdev);
593         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
594                                 inquiry_cache_empty(hdev) ||
595                                 ir.flags & IREQ_CACHE_FLUSH) {
596                 inquiry_cache_flush(hdev);
597                 do_inquiry = 1;
598         }
599         hci_dev_unlock(hdev);
600
601         timeo = ir.length * msecs_to_jiffies(2000);
602
603         if (do_inquiry) {
604                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
605                 if (err < 0)
606                         goto done;
607         }
608
609         /* for unlimited number of responses we will use buffer with 255 entries */
610         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
611
612         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
613          * copy it to the user space.
614          */
615         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
616         if (!buf) {
617                 err = -ENOMEM;
618                 goto done;
619         }
620
621         hci_dev_lock(hdev);
622         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
623         hci_dev_unlock(hdev);
624
625         BT_DBG("num_rsp %d", ir.num_rsp);
626
627         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
628                 ptr += sizeof(ir);
629                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
630                                         ir.num_rsp))
631                         err = -EFAULT;
632         } else
633                 err = -EFAULT;
634
635         kfree(buf);
636
637 done:
638         hci_dev_put(hdev);
639         return err;
640 }
641
642 /* ---- HCI ioctl helpers ---- */
643
644 int hci_dev_open(__u16 dev)
645 {
646         struct hci_dev *hdev;
647         int ret = 0;
648
649         hdev = hci_dev_get(dev);
650         if (!hdev)
651                 return -ENODEV;
652
653         BT_DBG("%s %p", hdev->name, hdev);
654
655         hci_req_lock(hdev);
656
657         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
658                 ret = -ERFKILL;
659                 goto done;
660         }
661
662         if (test_bit(HCI_UP, &hdev->flags)) {
663                 ret = -EALREADY;
664                 goto done;
665         }
666
667         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
668                 set_bit(HCI_RAW, &hdev->flags);
669
670         /* Treat all non BR/EDR controllers as raw devices if
671            enable_hs is not set */
672         if (hdev->dev_type != HCI_BREDR && !enable_hs)
673                 set_bit(HCI_RAW, &hdev->flags);
674
675         if (hdev->open(hdev)) {
676                 ret = -EIO;
677                 goto done;
678         }
679
680         if (!test_bit(HCI_RAW, &hdev->flags)) {
681                 atomic_set(&hdev->cmd_cnt, 1);
682                 set_bit(HCI_INIT, &hdev->flags);
683                 hdev->init_last_cmd = 0;
684
685                 ret = __hci_request(hdev, hci_init_req, 0,
686                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
687
688                 if (lmp_host_le_capable(hdev))
689                         ret = __hci_request(hdev, hci_le_init_req, 0,
690                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
691
692                 clear_bit(HCI_INIT, &hdev->flags);
693         }
694
695         if (!ret) {
696                 hci_dev_hold(hdev);
697                 set_bit(HCI_UP, &hdev->flags);
698                 hci_notify(hdev, HCI_DEV_UP);
699                 if (!test_bit(HCI_SETUP, &hdev->dev_flags)) {
700                         hci_dev_lock(hdev);
701                         mgmt_powered(hdev, 1);
702                         hci_dev_unlock(hdev);
703                 }
704         } else {
705                 /* Init failed, cleanup */
706                 flush_work(&hdev->tx_work);
707                 flush_work(&hdev->cmd_work);
708                 flush_work(&hdev->rx_work);
709
710                 skb_queue_purge(&hdev->cmd_q);
711                 skb_queue_purge(&hdev->rx_q);
712
713                 if (hdev->flush)
714                         hdev->flush(hdev);
715
716                 if (hdev->sent_cmd) {
717                         kfree_skb(hdev->sent_cmd);
718                         hdev->sent_cmd = NULL;
719                 }
720
721                 hdev->close(hdev);
722                 hdev->flags = 0;
723         }
724
725 done:
726         hci_req_unlock(hdev);
727         hci_dev_put(hdev);
728         return ret;
729 }
730
731 static int hci_dev_do_close(struct hci_dev *hdev)
732 {
733         BT_DBG("%s %p", hdev->name, hdev);
734
735         hci_req_cancel(hdev, ENODEV);
736         hci_req_lock(hdev);
737
738         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
739                 del_timer_sync(&hdev->cmd_timer);
740                 hci_req_unlock(hdev);
741                 return 0;
742         }
743
744         /* Flush RX and TX works */
745         flush_work(&hdev->tx_work);
746         flush_work(&hdev->rx_work);
747
748         if (hdev->discov_timeout > 0) {
749                 cancel_delayed_work(&hdev->discov_off);
750                 hdev->discov_timeout = 0;
751         }
752
753         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
754                 cancel_delayed_work(&hdev->power_off);
755
756         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->dev_flags))
757                 cancel_delayed_work(&hdev->service_cache);
758
759         hci_dev_lock(hdev);
760         inquiry_cache_flush(hdev);
761         hci_conn_hash_flush(hdev);
762         hci_dev_unlock(hdev);
763
764         hci_notify(hdev, HCI_DEV_DOWN);
765
766         if (hdev->flush)
767                 hdev->flush(hdev);
768
769         /* Reset device */
770         skb_queue_purge(&hdev->cmd_q);
771         atomic_set(&hdev->cmd_cnt, 1);
772         if (!test_bit(HCI_RAW, &hdev->flags) &&
773                                 test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
774                 set_bit(HCI_INIT, &hdev->flags);
775                 __hci_request(hdev, hci_reset_req, 0,
776                                         msecs_to_jiffies(250));
777                 clear_bit(HCI_INIT, &hdev->flags);
778         }
779
780         /* flush cmd  work */
781         flush_work(&hdev->cmd_work);
782
783         /* Drop queues */
784         skb_queue_purge(&hdev->rx_q);
785         skb_queue_purge(&hdev->cmd_q);
786         skb_queue_purge(&hdev->raw_q);
787
788         /* Drop last sent command */
789         if (hdev->sent_cmd) {
790                 del_timer_sync(&hdev->cmd_timer);
791                 kfree_skb(hdev->sent_cmd);
792                 hdev->sent_cmd = NULL;
793         }
794
795         /* After this point our queues are empty
796          * and no tasks are scheduled. */
797         hdev->close(hdev);
798
799         hci_dev_lock(hdev);
800         mgmt_powered(hdev, 0);
801         hci_dev_unlock(hdev);
802
803         /* Clear flags */
804         hdev->flags = 0;
805
806         hci_req_unlock(hdev);
807
808         hci_dev_put(hdev);
809         return 0;
810 }
811
812 int hci_dev_close(__u16 dev)
813 {
814         struct hci_dev *hdev;
815         int err;
816
817         hdev = hci_dev_get(dev);
818         if (!hdev)
819                 return -ENODEV;
820         err = hci_dev_do_close(hdev);
821         hci_dev_put(hdev);
822         return err;
823 }
824
825 int hci_dev_reset(__u16 dev)
826 {
827         struct hci_dev *hdev;
828         int ret = 0;
829
830         hdev = hci_dev_get(dev);
831         if (!hdev)
832                 return -ENODEV;
833
834         hci_req_lock(hdev);
835
836         if (!test_bit(HCI_UP, &hdev->flags))
837                 goto done;
838
839         /* Drop queues */
840         skb_queue_purge(&hdev->rx_q);
841         skb_queue_purge(&hdev->cmd_q);
842
843         hci_dev_lock(hdev);
844         inquiry_cache_flush(hdev);
845         hci_conn_hash_flush(hdev);
846         hci_dev_unlock(hdev);
847
848         if (hdev->flush)
849                 hdev->flush(hdev);
850
851         atomic_set(&hdev->cmd_cnt, 1);
852         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
853
854         if (!test_bit(HCI_RAW, &hdev->flags))
855                 ret = __hci_request(hdev, hci_reset_req, 0,
856                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
857
858 done:
859         hci_req_unlock(hdev);
860         hci_dev_put(hdev);
861         return ret;
862 }
863
864 int hci_dev_reset_stat(__u16 dev)
865 {
866         struct hci_dev *hdev;
867         int ret = 0;
868
869         hdev = hci_dev_get(dev);
870         if (!hdev)
871                 return -ENODEV;
872
873         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
874
875         hci_dev_put(hdev);
876
877         return ret;
878 }
879
880 int hci_dev_cmd(unsigned int cmd, void __user *arg)
881 {
882         struct hci_dev *hdev;
883         struct hci_dev_req dr;
884         int err = 0;
885
886         if (copy_from_user(&dr, arg, sizeof(dr)))
887                 return -EFAULT;
888
889         hdev = hci_dev_get(dr.dev_id);
890         if (!hdev)
891                 return -ENODEV;
892
893         switch (cmd) {
894         case HCISETAUTH:
895                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
896                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
897                 break;
898
899         case HCISETENCRYPT:
900                 if (!lmp_encrypt_capable(hdev)) {
901                         err = -EOPNOTSUPP;
902                         break;
903                 }
904
905                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
906                         /* Auth must be enabled first */
907                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
908                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
909                         if (err)
910                                 break;
911                 }
912
913                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
914                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
915                 break;
916
917         case HCISETSCAN:
918                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
919                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
920                 break;
921
922         case HCISETLINKPOL:
923                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
924                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
925                 break;
926
927         case HCISETLINKMODE:
928                 hdev->link_mode = ((__u16) dr.dev_opt) &
929                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
930                 break;
931
932         case HCISETPTYPE:
933                 hdev->pkt_type = (__u16) dr.dev_opt;
934                 break;
935
936         case HCISETACLMTU:
937                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
938                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
939                 break;
940
941         case HCISETSCOMTU:
942                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
943                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
944                 break;
945
946         default:
947                 err = -EINVAL;
948                 break;
949         }
950
951         hci_dev_put(hdev);
952         return err;
953 }
954
955 int hci_get_dev_list(void __user *arg)
956 {
957         struct hci_dev *hdev;
958         struct hci_dev_list_req *dl;
959         struct hci_dev_req *dr;
960         int n = 0, size, err;
961         __u16 dev_num;
962
963         if (get_user(dev_num, (__u16 __user *) arg))
964                 return -EFAULT;
965
966         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
967                 return -EINVAL;
968
969         size = sizeof(*dl) + dev_num * sizeof(*dr);
970
971         dl = kzalloc(size, GFP_KERNEL);
972         if (!dl)
973                 return -ENOMEM;
974
975         dr = dl->dev_req;
976
977         read_lock(&hci_dev_list_lock);
978         list_for_each_entry(hdev, &hci_dev_list, list) {
979                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
980                         cancel_delayed_work(&hdev->power_off);
981
982                 if (!test_bit(HCI_MGMT, &hdev->dev_flags))
983                         set_bit(HCI_PAIRABLE, &hdev->dev_flags);
984
985                 (dr + n)->dev_id  = hdev->id;
986                 (dr + n)->dev_opt = hdev->flags;
987
988                 if (++n >= dev_num)
989                         break;
990         }
991         read_unlock(&hci_dev_list_lock);
992
993         dl->dev_num = n;
994         size = sizeof(*dl) + n * sizeof(*dr);
995
996         err = copy_to_user(arg, dl, size);
997         kfree(dl);
998
999         return err ? -EFAULT : 0;
1000 }
1001
1002 int hci_get_dev_info(void __user *arg)
1003 {
1004         struct hci_dev *hdev;
1005         struct hci_dev_info di;
1006         int err = 0;
1007
1008         if (copy_from_user(&di, arg, sizeof(di)))
1009                 return -EFAULT;
1010
1011         hdev = hci_dev_get(di.dev_id);
1012         if (!hdev)
1013                 return -ENODEV;
1014
1015         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1016                 cancel_delayed_work_sync(&hdev->power_off);
1017
1018         if (!test_bit(HCI_MGMT, &hdev->dev_flags))
1019                 set_bit(HCI_PAIRABLE, &hdev->dev_flags);
1020
1021         strcpy(di.name, hdev->name);
1022         di.bdaddr   = hdev->bdaddr;
1023         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
1024         di.flags    = hdev->flags;
1025         di.pkt_type = hdev->pkt_type;
1026         di.acl_mtu  = hdev->acl_mtu;
1027         di.acl_pkts = hdev->acl_pkts;
1028         di.sco_mtu  = hdev->sco_mtu;
1029         di.sco_pkts = hdev->sco_pkts;
1030         di.link_policy = hdev->link_policy;
1031         di.link_mode   = hdev->link_mode;
1032
1033         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1034         memcpy(&di.features, &hdev->features, sizeof(di.features));
1035
1036         if (copy_to_user(arg, &di, sizeof(di)))
1037                 err = -EFAULT;
1038
1039         hci_dev_put(hdev);
1040
1041         return err;
1042 }
1043
1044 /* ---- Interface to HCI drivers ---- */
1045
1046 static int hci_rfkill_set_block(void *data, bool blocked)
1047 {
1048         struct hci_dev *hdev = data;
1049
1050         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1051
1052         if (!blocked)
1053                 return 0;
1054
1055         hci_dev_do_close(hdev);
1056
1057         return 0;
1058 }
1059
1060 static const struct rfkill_ops hci_rfkill_ops = {
1061         .set_block = hci_rfkill_set_block,
1062 };
1063
1064 /* Alloc HCI device */
1065 struct hci_dev *hci_alloc_dev(void)
1066 {
1067         struct hci_dev *hdev;
1068
1069         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1070         if (!hdev)
1071                 return NULL;
1072
1073         hci_init_sysfs(hdev);
1074         skb_queue_head_init(&hdev->driver_init);
1075
1076         return hdev;
1077 }
1078 EXPORT_SYMBOL(hci_alloc_dev);
1079
1080 /* Free HCI device */
1081 void hci_free_dev(struct hci_dev *hdev)
1082 {
1083         skb_queue_purge(&hdev->driver_init);
1084
1085         /* will free via device release */
1086         put_device(&hdev->dev);
1087 }
1088 EXPORT_SYMBOL(hci_free_dev);
1089
1090 static void hci_power_on(struct work_struct *work)
1091 {
1092         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1093
1094         BT_DBG("%s", hdev->name);
1095
1096         if (hci_dev_open(hdev->id) < 0)
1097                 return;
1098
1099         if (test_bit(HCI_AUTO_OFF, &hdev->dev_flags))
1100                 schedule_delayed_work(&hdev->power_off,
1101                                         msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1102
1103         if (test_and_clear_bit(HCI_SETUP, &hdev->dev_flags))
1104                 mgmt_index_added(hdev);
1105 }
1106
1107 static void hci_power_off(struct work_struct *work)
1108 {
1109         struct hci_dev *hdev = container_of(work, struct hci_dev,
1110                                                         power_off.work);
1111
1112         BT_DBG("%s", hdev->name);
1113
1114         clear_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1115
1116         hci_dev_close(hdev->id);
1117 }
1118
1119 static void hci_discov_off(struct work_struct *work)
1120 {
1121         struct hci_dev *hdev;
1122         u8 scan = SCAN_PAGE;
1123
1124         hdev = container_of(work, struct hci_dev, discov_off.work);
1125
1126         BT_DBG("%s", hdev->name);
1127
1128         hci_dev_lock(hdev);
1129
1130         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1131
1132         hdev->discov_timeout = 0;
1133
1134         hci_dev_unlock(hdev);
1135 }
1136
1137 int hci_uuids_clear(struct hci_dev *hdev)
1138 {
1139         struct list_head *p, *n;
1140
1141         list_for_each_safe(p, n, &hdev->uuids) {
1142                 struct bt_uuid *uuid;
1143
1144                 uuid = list_entry(p, struct bt_uuid, list);
1145
1146                 list_del(p);
1147                 kfree(uuid);
1148         }
1149
1150         return 0;
1151 }
1152
1153 int hci_link_keys_clear(struct hci_dev *hdev)
1154 {
1155         struct list_head *p, *n;
1156
1157         list_for_each_safe(p, n, &hdev->link_keys) {
1158                 struct link_key *key;
1159
1160                 key = list_entry(p, struct link_key, list);
1161
1162                 list_del(p);
1163                 kfree(key);
1164         }
1165
1166         return 0;
1167 }
1168
1169 int hci_smp_ltks_clear(struct hci_dev *hdev)
1170 {
1171         struct smp_ltk *k, *tmp;
1172
1173         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1174                 list_del(&k->list);
1175                 kfree(k);
1176         }
1177
1178         return 0;
1179 }
1180
1181 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1182 {
1183         struct link_key *k;
1184
1185         list_for_each_entry(k, &hdev->link_keys, list)
1186                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1187                         return k;
1188
1189         return NULL;
1190 }
1191
1192 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1193                                                 u8 key_type, u8 old_key_type)
1194 {
1195         /* Legacy key */
1196         if (key_type < 0x03)
1197                 return 1;
1198
1199         /* Debug keys are insecure so don't store them persistently */
1200         if (key_type == HCI_LK_DEBUG_COMBINATION)
1201                 return 0;
1202
1203         /* Changed combination key and there's no previous one */
1204         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1205                 return 0;
1206
1207         /* Security mode 3 case */
1208         if (!conn)
1209                 return 1;
1210
1211         /* Neither local nor remote side had no-bonding as requirement */
1212         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1213                 return 1;
1214
1215         /* Local side had dedicated bonding as requirement */
1216         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1217                 return 1;
1218
1219         /* Remote side had dedicated bonding as requirement */
1220         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1221                 return 1;
1222
1223         /* If none of the above criteria match, then don't store the key
1224          * persistently */
1225         return 0;
1226 }
1227
1228 struct smp_ltk *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1229 {
1230         struct smp_ltk *k;
1231
1232         list_for_each_entry(k, &hdev->long_term_keys, list) {
1233                 if (k->ediv != ediv ||
1234                                 memcmp(rand, k->rand, sizeof(k->rand)))
1235                         continue;
1236
1237                 return k;
1238         }
1239
1240         return NULL;
1241 }
1242 EXPORT_SYMBOL(hci_find_ltk);
1243
1244 struct smp_ltk *hci_find_ltk_by_addr(struct hci_dev *hdev, bdaddr_t *bdaddr,
1245                                                                 u8 addr_type)
1246 {
1247         struct smp_ltk *k;
1248
1249         list_for_each_entry(k, &hdev->long_term_keys, list)
1250                 if (addr_type == k->bdaddr_type &&
1251                                         bacmp(bdaddr, &k->bdaddr) == 0)
1252                         return k;
1253
1254         return NULL;
1255 }
1256 EXPORT_SYMBOL(hci_find_ltk_by_addr);
1257
1258 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1259                                 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1260 {
1261         struct link_key *key, *old_key;
1262         u8 old_key_type, persistent;
1263
1264         old_key = hci_find_link_key(hdev, bdaddr);
1265         if (old_key) {
1266                 old_key_type = old_key->type;
1267                 key = old_key;
1268         } else {
1269                 old_key_type = conn ? conn->key_type : 0xff;
1270                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1271                 if (!key)
1272                         return -ENOMEM;
1273                 list_add(&key->list, &hdev->link_keys);
1274         }
1275
1276         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1277
1278         /* Some buggy controller combinations generate a changed
1279          * combination key for legacy pairing even when there's no
1280          * previous key */
1281         if (type == HCI_LK_CHANGED_COMBINATION &&
1282                                         (!conn || conn->remote_auth == 0xff) &&
1283                                         old_key_type == 0xff) {
1284                 type = HCI_LK_COMBINATION;
1285                 if (conn)
1286                         conn->key_type = type;
1287         }
1288
1289         bacpy(&key->bdaddr, bdaddr);
1290         memcpy(key->val, val, 16);
1291         key->pin_len = pin_len;
1292
1293         if (type == HCI_LK_CHANGED_COMBINATION)
1294                 key->type = old_key_type;
1295         else
1296                 key->type = type;
1297
1298         if (!new_key)
1299                 return 0;
1300
1301         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1302
1303         mgmt_new_link_key(hdev, key, persistent);
1304
1305         if (!persistent) {
1306                 list_del(&key->list);
1307                 kfree(key);
1308         }
1309
1310         return 0;
1311 }
1312
1313 int hci_add_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 addr_type, u8 type,
1314                                 int new_key, u8 authenticated, u8 tk[16],
1315                                 u8 enc_size, u16 ediv, u8 rand[8])
1316 {
1317         struct smp_ltk *key, *old_key;
1318
1319         if (!(type & HCI_SMP_STK) && !(type & HCI_SMP_LTK))
1320                 return 0;
1321
1322         old_key = hci_find_ltk_by_addr(hdev, bdaddr, addr_type);
1323         if (old_key)
1324                 key = old_key;
1325         else {
1326                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1327                 if (!key)
1328                         return -ENOMEM;
1329                 list_add(&key->list, &hdev->long_term_keys);
1330         }
1331
1332         bacpy(&key->bdaddr, bdaddr);
1333         key->bdaddr_type = addr_type;
1334         memcpy(key->val, tk, sizeof(key->val));
1335         key->authenticated = authenticated;
1336         key->ediv = ediv;
1337         key->enc_size = enc_size;
1338         key->type = type;
1339         memcpy(key->rand, rand, sizeof(key->rand));
1340
1341         if (!new_key)
1342                 return 0;
1343
1344         if (type & HCI_SMP_LTK)
1345                 mgmt_new_ltk(hdev, key, 1);
1346
1347         return 0;
1348 }
1349
1350 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1351 {
1352         struct link_key *key;
1353
1354         key = hci_find_link_key(hdev, bdaddr);
1355         if (!key)
1356                 return -ENOENT;
1357
1358         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1359
1360         list_del(&key->list);
1361         kfree(key);
1362
1363         return 0;
1364 }
1365
1366 int hci_remove_ltk(struct hci_dev *hdev, bdaddr_t *bdaddr)
1367 {
1368         struct smp_ltk *k, *tmp;
1369
1370         list_for_each_entry_safe(k, tmp, &hdev->long_term_keys, list) {
1371                 if (bacmp(bdaddr, &k->bdaddr))
1372                         continue;
1373
1374                 BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1375
1376                 list_del(&k->list);
1377                 kfree(k);
1378         }
1379
1380         return 0;
1381 }
1382
1383 /* HCI command timer function */
1384 static void hci_cmd_timer(unsigned long arg)
1385 {
1386         struct hci_dev *hdev = (void *) arg;
1387
1388         BT_ERR("%s command tx timeout", hdev->name);
1389         atomic_set(&hdev->cmd_cnt, 1);
1390         queue_work(hdev->workqueue, &hdev->cmd_work);
1391 }
1392
1393 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1394                                                         bdaddr_t *bdaddr)
1395 {
1396         struct oob_data *data;
1397
1398         list_for_each_entry(data, &hdev->remote_oob_data, list)
1399                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1400                         return data;
1401
1402         return NULL;
1403 }
1404
1405 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1406 {
1407         struct oob_data *data;
1408
1409         data = hci_find_remote_oob_data(hdev, bdaddr);
1410         if (!data)
1411                 return -ENOENT;
1412
1413         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1414
1415         list_del(&data->list);
1416         kfree(data);
1417
1418         return 0;
1419 }
1420
1421 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1422 {
1423         struct oob_data *data, *n;
1424
1425         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1426                 list_del(&data->list);
1427                 kfree(data);
1428         }
1429
1430         return 0;
1431 }
1432
1433 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1434                                                                 u8 *randomizer)
1435 {
1436         struct oob_data *data;
1437
1438         data = hci_find_remote_oob_data(hdev, bdaddr);
1439
1440         if (!data) {
1441                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1442                 if (!data)
1443                         return -ENOMEM;
1444
1445                 bacpy(&data->bdaddr, bdaddr);
1446                 list_add(&data->list, &hdev->remote_oob_data);
1447         }
1448
1449         memcpy(data->hash, hash, sizeof(data->hash));
1450         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1451
1452         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1453
1454         return 0;
1455 }
1456
1457 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1458                                                 bdaddr_t *bdaddr)
1459 {
1460         struct bdaddr_list *b;
1461
1462         list_for_each_entry(b, &hdev->blacklist, list)
1463                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1464                         return b;
1465
1466         return NULL;
1467 }
1468
1469 int hci_blacklist_clear(struct hci_dev *hdev)
1470 {
1471         struct list_head *p, *n;
1472
1473         list_for_each_safe(p, n, &hdev->blacklist) {
1474                 struct bdaddr_list *b;
1475
1476                 b = list_entry(p, struct bdaddr_list, list);
1477
1478                 list_del(p);
1479                 kfree(b);
1480         }
1481
1482         return 0;
1483 }
1484
1485 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1486 {
1487         struct bdaddr_list *entry;
1488
1489         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1490                 return -EBADF;
1491
1492         if (hci_blacklist_lookup(hdev, bdaddr))
1493                 return -EEXIST;
1494
1495         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1496         if (!entry)
1497                 return -ENOMEM;
1498
1499         bacpy(&entry->bdaddr, bdaddr);
1500
1501         list_add(&entry->list, &hdev->blacklist);
1502
1503         return mgmt_device_blocked(hdev, bdaddr);
1504 }
1505
1506 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1507 {
1508         struct bdaddr_list *entry;
1509
1510         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1511                 return hci_blacklist_clear(hdev);
1512
1513         entry = hci_blacklist_lookup(hdev, bdaddr);
1514         if (!entry)
1515                 return -ENOENT;
1516
1517         list_del(&entry->list);
1518         kfree(entry);
1519
1520         return mgmt_device_unblocked(hdev, bdaddr);
1521 }
1522
1523 static void hci_clear_adv_cache(struct work_struct *work)
1524 {
1525         struct hci_dev *hdev = container_of(work, struct hci_dev,
1526                                                         adv_work.work);
1527
1528         hci_dev_lock(hdev);
1529
1530         hci_adv_entries_clear(hdev);
1531
1532         hci_dev_unlock(hdev);
1533 }
1534
1535 int hci_adv_entries_clear(struct hci_dev *hdev)
1536 {
1537         struct adv_entry *entry, *tmp;
1538
1539         list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1540                 list_del(&entry->list);
1541                 kfree(entry);
1542         }
1543
1544         BT_DBG("%s adv cache cleared", hdev->name);
1545
1546         return 0;
1547 }
1548
1549 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1550 {
1551         struct adv_entry *entry;
1552
1553         list_for_each_entry(entry, &hdev->adv_entries, list)
1554                 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1555                         return entry;
1556
1557         return NULL;
1558 }
1559
1560 static inline int is_connectable_adv(u8 evt_type)
1561 {
1562         if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1563                 return 1;
1564
1565         return 0;
1566 }
1567
1568 int hci_add_adv_entry(struct hci_dev *hdev,
1569                                         struct hci_ev_le_advertising_info *ev)
1570 {
1571         struct adv_entry *entry;
1572
1573         if (!is_connectable_adv(ev->evt_type))
1574                 return -EINVAL;
1575
1576         /* Only new entries should be added to adv_entries. So, if
1577          * bdaddr was found, don't add it. */
1578         if (hci_find_adv_entry(hdev, &ev->bdaddr))
1579                 return 0;
1580
1581         entry = kzalloc(sizeof(*entry), GFP_KERNEL);
1582         if (!entry)
1583                 return -ENOMEM;
1584
1585         bacpy(&entry->bdaddr, &ev->bdaddr);
1586         entry->bdaddr_type = ev->bdaddr_type;
1587
1588         list_add(&entry->list, &hdev->adv_entries);
1589
1590         BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1591                                 batostr(&entry->bdaddr), entry->bdaddr_type);
1592
1593         return 0;
1594 }
1595
1596 /* Register HCI device */
1597 int hci_register_dev(struct hci_dev *hdev)
1598 {
1599         struct list_head *head = &hci_dev_list, *p;
1600         int i, id, error;
1601
1602         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1603
1604         if (!hdev->open || !hdev->close)
1605                 return -EINVAL;
1606
1607         /* Do not allow HCI_AMP devices to register at index 0,
1608          * so the index can be used as the AMP controller ID.
1609          */
1610         id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1611
1612         write_lock(&hci_dev_list_lock);
1613
1614         /* Find first available device id */
1615         list_for_each(p, &hci_dev_list) {
1616                 if (list_entry(p, struct hci_dev, list)->id != id)
1617                         break;
1618                 head = p; id++;
1619         }
1620
1621         sprintf(hdev->name, "hci%d", id);
1622         hdev->id = id;
1623         list_add_tail(&hdev->list, head);
1624
1625         mutex_init(&hdev->lock);
1626
1627         hdev->flags = 0;
1628         hdev->dev_flags = 0;
1629         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1630         hdev->esco_type = (ESCO_HV1);
1631         hdev->link_mode = (HCI_LM_ACCEPT);
1632         hdev->io_capability = 0x03; /* No Input No Output */
1633
1634         hdev->idle_timeout = 0;
1635         hdev->sniff_max_interval = 800;
1636         hdev->sniff_min_interval = 80;
1637
1638         INIT_WORK(&hdev->rx_work, hci_rx_work);
1639         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1640         INIT_WORK(&hdev->tx_work, hci_tx_work);
1641
1642
1643         skb_queue_head_init(&hdev->rx_q);
1644         skb_queue_head_init(&hdev->cmd_q);
1645         skb_queue_head_init(&hdev->raw_q);
1646
1647         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1648
1649         for (i = 0; i < NUM_REASSEMBLY; i++)
1650                 hdev->reassembly[i] = NULL;
1651
1652         init_waitqueue_head(&hdev->req_wait_q);
1653         mutex_init(&hdev->req_lock);
1654
1655         discovery_init(hdev);
1656
1657         hci_conn_hash_init(hdev);
1658
1659         INIT_LIST_HEAD(&hdev->mgmt_pending);
1660
1661         INIT_LIST_HEAD(&hdev->blacklist);
1662
1663         INIT_LIST_HEAD(&hdev->uuids);
1664
1665         INIT_LIST_HEAD(&hdev->link_keys);
1666         INIT_LIST_HEAD(&hdev->long_term_keys);
1667
1668         INIT_LIST_HEAD(&hdev->remote_oob_data);
1669
1670         INIT_LIST_HEAD(&hdev->adv_entries);
1671
1672         INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1673         INIT_WORK(&hdev->power_on, hci_power_on);
1674         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1675
1676         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1677
1678         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1679
1680         atomic_set(&hdev->promisc, 0);
1681
1682         write_unlock(&hci_dev_list_lock);
1683
1684         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1685                                                         WQ_MEM_RECLAIM, 1);
1686         if (!hdev->workqueue) {
1687                 error = -ENOMEM;
1688                 goto err;
1689         }
1690
1691         error = hci_add_sysfs(hdev);
1692         if (error < 0)
1693                 goto err_wqueue;
1694
1695         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1696                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1697         if (hdev->rfkill) {
1698                 if (rfkill_register(hdev->rfkill) < 0) {
1699                         rfkill_destroy(hdev->rfkill);
1700                         hdev->rfkill = NULL;
1701                 }
1702         }
1703
1704         set_bit(HCI_AUTO_OFF, &hdev->dev_flags);
1705         set_bit(HCI_SETUP, &hdev->dev_flags);
1706         schedule_work(&hdev->power_on);
1707
1708         hci_notify(hdev, HCI_DEV_REG);
1709         hci_dev_hold(hdev);
1710
1711         return id;
1712
1713 err_wqueue:
1714         destroy_workqueue(hdev->workqueue);
1715 err:
1716         write_lock(&hci_dev_list_lock);
1717         list_del(&hdev->list);
1718         write_unlock(&hci_dev_list_lock);
1719
1720         return error;
1721 }
1722 EXPORT_SYMBOL(hci_register_dev);
1723
1724 /* Unregister HCI device */
1725 void hci_unregister_dev(struct hci_dev *hdev)
1726 {
1727         int i;
1728
1729         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1730
1731         write_lock(&hci_dev_list_lock);
1732         list_del(&hdev->list);
1733         write_unlock(&hci_dev_list_lock);
1734
1735         hci_dev_do_close(hdev);
1736
1737         for (i = 0; i < NUM_REASSEMBLY; i++)
1738                 kfree_skb(hdev->reassembly[i]);
1739
1740         if (!test_bit(HCI_INIT, &hdev->flags) &&
1741                                 !test_bit(HCI_SETUP, &hdev->dev_flags)) {
1742                 hci_dev_lock(hdev);
1743                 mgmt_index_removed(hdev);
1744                 hci_dev_unlock(hdev);
1745         }
1746
1747         /* mgmt_index_removed should take care of emptying the
1748          * pending list */
1749         BUG_ON(!list_empty(&hdev->mgmt_pending));
1750
1751         hci_notify(hdev, HCI_DEV_UNREG);
1752
1753         if (hdev->rfkill) {
1754                 rfkill_unregister(hdev->rfkill);
1755                 rfkill_destroy(hdev->rfkill);
1756         }
1757
1758         hci_del_sysfs(hdev);
1759
1760         cancel_delayed_work_sync(&hdev->adv_work);
1761
1762         destroy_workqueue(hdev->workqueue);
1763
1764         hci_dev_lock(hdev);
1765         hci_blacklist_clear(hdev);
1766         hci_uuids_clear(hdev);
1767         hci_link_keys_clear(hdev);
1768         hci_smp_ltks_clear(hdev);
1769         hci_remote_oob_data_clear(hdev);
1770         hci_adv_entries_clear(hdev);
1771         hci_dev_unlock(hdev);
1772
1773         hci_dev_put(hdev);
1774 }
1775 EXPORT_SYMBOL(hci_unregister_dev);
1776
1777 /* Suspend HCI device */
1778 int hci_suspend_dev(struct hci_dev *hdev)
1779 {
1780         hci_notify(hdev, HCI_DEV_SUSPEND);
1781         return 0;
1782 }
1783 EXPORT_SYMBOL(hci_suspend_dev);
1784
1785 /* Resume HCI device */
1786 int hci_resume_dev(struct hci_dev *hdev)
1787 {
1788         hci_notify(hdev, HCI_DEV_RESUME);
1789         return 0;
1790 }
1791 EXPORT_SYMBOL(hci_resume_dev);
1792
1793 /* Receive frame from HCI drivers */
1794 int hci_recv_frame(struct sk_buff *skb)
1795 {
1796         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1797         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1798                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1799                 kfree_skb(skb);
1800                 return -ENXIO;
1801         }
1802
1803         /* Incomming skb */
1804         bt_cb(skb)->incoming = 1;
1805
1806         /* Time stamp */
1807         __net_timestamp(skb);
1808
1809         skb_queue_tail(&hdev->rx_q, skb);
1810         queue_work(hdev->workqueue, &hdev->rx_work);
1811
1812         return 0;
1813 }
1814 EXPORT_SYMBOL(hci_recv_frame);
1815
1816 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1817                                                   int count, __u8 index)
1818 {
1819         int len = 0;
1820         int hlen = 0;
1821         int remain = count;
1822         struct sk_buff *skb;
1823         struct bt_skb_cb *scb;
1824
1825         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1826                                 index >= NUM_REASSEMBLY)
1827                 return -EILSEQ;
1828
1829         skb = hdev->reassembly[index];
1830
1831         if (!skb) {
1832                 switch (type) {
1833                 case HCI_ACLDATA_PKT:
1834                         len = HCI_MAX_FRAME_SIZE;
1835                         hlen = HCI_ACL_HDR_SIZE;
1836                         break;
1837                 case HCI_EVENT_PKT:
1838                         len = HCI_MAX_EVENT_SIZE;
1839                         hlen = HCI_EVENT_HDR_SIZE;
1840                         break;
1841                 case HCI_SCODATA_PKT:
1842                         len = HCI_MAX_SCO_SIZE;
1843                         hlen = HCI_SCO_HDR_SIZE;
1844                         break;
1845                 }
1846
1847                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1848                 if (!skb)
1849                         return -ENOMEM;
1850
1851                 scb = (void *) skb->cb;
1852                 scb->expect = hlen;
1853                 scb->pkt_type = type;
1854
1855                 skb->dev = (void *) hdev;
1856                 hdev->reassembly[index] = skb;
1857         }
1858
1859         while (count) {
1860                 scb = (void *) skb->cb;
1861                 len = min(scb->expect, (__u16)count);
1862
1863                 memcpy(skb_put(skb, len), data, len);
1864
1865                 count -= len;
1866                 data += len;
1867                 scb->expect -= len;
1868                 remain = count;
1869
1870                 switch (type) {
1871                 case HCI_EVENT_PKT:
1872                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1873                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1874                                 scb->expect = h->plen;
1875
1876                                 if (skb_tailroom(skb) < scb->expect) {
1877                                         kfree_skb(skb);
1878                                         hdev->reassembly[index] = NULL;
1879                                         return -ENOMEM;
1880                                 }
1881                         }
1882                         break;
1883
1884                 case HCI_ACLDATA_PKT:
1885                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1886                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1887                                 scb->expect = __le16_to_cpu(h->dlen);
1888
1889                                 if (skb_tailroom(skb) < scb->expect) {
1890                                         kfree_skb(skb);
1891                                         hdev->reassembly[index] = NULL;
1892                                         return -ENOMEM;
1893                                 }
1894                         }
1895                         break;
1896
1897                 case HCI_SCODATA_PKT:
1898                         if (skb->len == HCI_SCO_HDR_SIZE) {
1899                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1900                                 scb->expect = h->dlen;
1901
1902                                 if (skb_tailroom(skb) < scb->expect) {
1903                                         kfree_skb(skb);
1904                                         hdev->reassembly[index] = NULL;
1905                                         return -ENOMEM;
1906                                 }
1907                         }
1908                         break;
1909                 }
1910
1911                 if (scb->expect == 0) {
1912                         /* Complete frame */
1913
1914                         bt_cb(skb)->pkt_type = type;
1915                         hci_recv_frame(skb);
1916
1917                         hdev->reassembly[index] = NULL;
1918                         return remain;
1919                 }
1920         }
1921
1922         return remain;
1923 }
1924
1925 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1926 {
1927         int rem = 0;
1928
1929         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1930                 return -EILSEQ;
1931
1932         while (count) {
1933                 rem = hci_reassembly(hdev, type, data, count, type - 1);
1934                 if (rem < 0)
1935                         return rem;
1936
1937                 data += (count - rem);
1938                 count = rem;
1939         }
1940
1941         return rem;
1942 }
1943 EXPORT_SYMBOL(hci_recv_fragment);
1944
1945 #define STREAM_REASSEMBLY 0
1946
1947 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1948 {
1949         int type;
1950         int rem = 0;
1951
1952         while (count) {
1953                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1954
1955                 if (!skb) {
1956                         struct { char type; } *pkt;
1957
1958                         /* Start of the frame */
1959                         pkt = data;
1960                         type = pkt->type;
1961
1962                         data++;
1963                         count--;
1964                 } else
1965                         type = bt_cb(skb)->pkt_type;
1966
1967                 rem = hci_reassembly(hdev, type, data, count,
1968                                                         STREAM_REASSEMBLY);
1969                 if (rem < 0)
1970                         return rem;
1971
1972                 data += (count - rem);
1973                 count = rem;
1974         }
1975
1976         return rem;
1977 }
1978 EXPORT_SYMBOL(hci_recv_stream_fragment);
1979
1980 /* ---- Interface to upper protocols ---- */
1981
1982 int hci_register_cb(struct hci_cb *cb)
1983 {
1984         BT_DBG("%p name %s", cb, cb->name);
1985
1986         write_lock(&hci_cb_list_lock);
1987         list_add(&cb->list, &hci_cb_list);
1988         write_unlock(&hci_cb_list_lock);
1989
1990         return 0;
1991 }
1992 EXPORT_SYMBOL(hci_register_cb);
1993
1994 int hci_unregister_cb(struct hci_cb *cb)
1995 {
1996         BT_DBG("%p name %s", cb, cb->name);
1997
1998         write_lock(&hci_cb_list_lock);
1999         list_del(&cb->list);
2000         write_unlock(&hci_cb_list_lock);
2001
2002         return 0;
2003 }
2004 EXPORT_SYMBOL(hci_unregister_cb);
2005
2006 static int hci_send_frame(struct sk_buff *skb)
2007 {
2008         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
2009
2010         if (!hdev) {
2011                 kfree_skb(skb);
2012                 return -ENODEV;
2013         }
2014
2015         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
2016
2017         if (atomic_read(&hdev->promisc)) {
2018                 /* Time stamp */
2019                 __net_timestamp(skb);
2020
2021                 hci_send_to_sock(hdev, skb, NULL);
2022         }
2023
2024         /* Get rid of skb owner, prior to sending to the driver. */
2025         skb_orphan(skb);
2026
2027         return hdev->send(skb);
2028 }
2029
2030 /* Send HCI command */
2031 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
2032 {
2033         int len = HCI_COMMAND_HDR_SIZE + plen;
2034         struct hci_command_hdr *hdr;
2035         struct sk_buff *skb;
2036
2037         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
2038
2039         skb = bt_skb_alloc(len, GFP_ATOMIC);
2040         if (!skb) {
2041                 BT_ERR("%s no memory for command", hdev->name);
2042                 return -ENOMEM;
2043         }
2044
2045         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
2046         hdr->opcode = cpu_to_le16(opcode);
2047         hdr->plen   = plen;
2048
2049         if (plen)
2050                 memcpy(skb_put(skb, plen), param, plen);
2051
2052         BT_DBG("skb len %d", skb->len);
2053
2054         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2055         skb->dev = (void *) hdev;
2056
2057         if (test_bit(HCI_INIT, &hdev->flags))
2058                 hdev->init_last_cmd = opcode;
2059
2060         skb_queue_tail(&hdev->cmd_q, skb);
2061         queue_work(hdev->workqueue, &hdev->cmd_work);
2062
2063         return 0;
2064 }
2065
2066 /* Get data from the previously sent command */
2067 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2068 {
2069         struct hci_command_hdr *hdr;
2070
2071         if (!hdev->sent_cmd)
2072                 return NULL;
2073
2074         hdr = (void *) hdev->sent_cmd->data;
2075
2076         if (hdr->opcode != cpu_to_le16(opcode))
2077                 return NULL;
2078
2079         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2080
2081         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2082 }
2083
2084 /* Send ACL data */
2085 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2086 {
2087         struct hci_acl_hdr *hdr;
2088         int len = skb->len;
2089
2090         skb_push(skb, HCI_ACL_HDR_SIZE);
2091         skb_reset_transport_header(skb);
2092         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2093         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2094         hdr->dlen   = cpu_to_le16(len);
2095 }
2096
2097 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2098                                 struct sk_buff *skb, __u16 flags)
2099 {
2100         struct hci_dev *hdev = conn->hdev;
2101         struct sk_buff *list;
2102
2103         list = skb_shinfo(skb)->frag_list;
2104         if (!list) {
2105                 /* Non fragmented */
2106                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2107
2108                 skb_queue_tail(queue, skb);
2109         } else {
2110                 /* Fragmented */
2111                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2112
2113                 skb_shinfo(skb)->frag_list = NULL;
2114
2115                 /* Queue all fragments atomically */
2116                 spin_lock(&queue->lock);
2117
2118                 __skb_queue_tail(queue, skb);
2119
2120                 flags &= ~ACL_START;
2121                 flags |= ACL_CONT;
2122                 do {
2123                         skb = list; list = list->next;
2124
2125                         skb->dev = (void *) hdev;
2126                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2127                         hci_add_acl_hdr(skb, conn->handle, flags);
2128
2129                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2130
2131                         __skb_queue_tail(queue, skb);
2132                 } while (list);
2133
2134                 spin_unlock(&queue->lock);
2135         }
2136 }
2137
2138 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2139 {
2140         struct hci_conn *conn = chan->conn;
2141         struct hci_dev *hdev = conn->hdev;
2142
2143         BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2144
2145         skb->dev = (void *) hdev;
2146         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2147         hci_add_acl_hdr(skb, conn->handle, flags);
2148
2149         hci_queue_acl(conn, &chan->data_q, skb, flags);
2150
2151         queue_work(hdev->workqueue, &hdev->tx_work);
2152 }
2153 EXPORT_SYMBOL(hci_send_acl);
2154
2155 /* Send SCO data */
2156 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2157 {
2158         struct hci_dev *hdev = conn->hdev;
2159         struct hci_sco_hdr hdr;
2160
2161         BT_DBG("%s len %d", hdev->name, skb->len);
2162
2163         hdr.handle = cpu_to_le16(conn->handle);
2164         hdr.dlen   = skb->len;
2165
2166         skb_push(skb, HCI_SCO_HDR_SIZE);
2167         skb_reset_transport_header(skb);
2168         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2169
2170         skb->dev = (void *) hdev;
2171         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2172
2173         skb_queue_tail(&conn->data_q, skb);
2174         queue_work(hdev->workqueue, &hdev->tx_work);
2175 }
2176 EXPORT_SYMBOL(hci_send_sco);
2177
2178 /* ---- HCI TX task (outgoing data) ---- */
2179
2180 /* HCI Connection scheduler */
2181 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2182 {
2183         struct hci_conn_hash *h = &hdev->conn_hash;
2184         struct hci_conn *conn = NULL, *c;
2185         int num = 0, min = ~0;
2186
2187         /* We don't have to lock device here. Connections are always
2188          * added and removed with TX task disabled. */
2189
2190         rcu_read_lock();
2191
2192         list_for_each_entry_rcu(c, &h->list, list) {
2193                 if (c->type != type || skb_queue_empty(&c->data_q))
2194                         continue;
2195
2196                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2197                         continue;
2198
2199                 num++;
2200
2201                 if (c->sent < min) {
2202                         min  = c->sent;
2203                         conn = c;
2204                 }
2205
2206                 if (hci_conn_num(hdev, type) == num)
2207                         break;
2208         }
2209
2210         rcu_read_unlock();
2211
2212         if (conn) {
2213                 int cnt, q;
2214
2215                 switch (conn->type) {
2216                 case ACL_LINK:
2217                         cnt = hdev->acl_cnt;
2218                         break;
2219                 case SCO_LINK:
2220                 case ESCO_LINK:
2221                         cnt = hdev->sco_cnt;
2222                         break;
2223                 case LE_LINK:
2224                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2225                         break;
2226                 default:
2227                         cnt = 0;
2228                         BT_ERR("Unknown link type");
2229                 }
2230
2231                 q = cnt / num;
2232                 *quote = q ? q : 1;
2233         } else
2234                 *quote = 0;
2235
2236         BT_DBG("conn %p quote %d", conn, *quote);
2237         return conn;
2238 }
2239
2240 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2241 {
2242         struct hci_conn_hash *h = &hdev->conn_hash;
2243         struct hci_conn *c;
2244
2245         BT_ERR("%s link tx timeout", hdev->name);
2246
2247         rcu_read_lock();
2248
2249         /* Kill stalled connections */
2250         list_for_each_entry_rcu(c, &h->list, list) {
2251                 if (c->type == type && c->sent) {
2252                         BT_ERR("%s killing stalled connection %s",
2253                                 hdev->name, batostr(&c->dst));
2254                         hci_acl_disconn(c, 0x13);
2255                 }
2256         }
2257
2258         rcu_read_unlock();
2259 }
2260
2261 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2262                                                 int *quote)
2263 {
2264         struct hci_conn_hash *h = &hdev->conn_hash;
2265         struct hci_chan *chan = NULL;
2266         int num = 0, min = ~0, cur_prio = 0;
2267         struct hci_conn *conn;
2268         int cnt, q, conn_num = 0;
2269
2270         BT_DBG("%s", hdev->name);
2271
2272         rcu_read_lock();
2273
2274         list_for_each_entry_rcu(conn, &h->list, list) {
2275                 struct hci_chan *tmp;
2276
2277                 if (conn->type != type)
2278                         continue;
2279
2280                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2281                         continue;
2282
2283                 conn_num++;
2284
2285                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2286                         struct sk_buff *skb;
2287
2288                         if (skb_queue_empty(&tmp->data_q))
2289                                 continue;
2290
2291                         skb = skb_peek(&tmp->data_q);
2292                         if (skb->priority < cur_prio)
2293                                 continue;
2294
2295                         if (skb->priority > cur_prio) {
2296                                 num = 0;
2297                                 min = ~0;
2298                                 cur_prio = skb->priority;
2299                         }
2300
2301                         num++;
2302
2303                         if (conn->sent < min) {
2304                                 min  = conn->sent;
2305                                 chan = tmp;
2306                         }
2307                 }
2308
2309                 if (hci_conn_num(hdev, type) == conn_num)
2310                         break;
2311         }
2312
2313         rcu_read_unlock();
2314
2315         if (!chan)
2316                 return NULL;
2317
2318         switch (chan->conn->type) {
2319         case ACL_LINK:
2320                 cnt = hdev->acl_cnt;
2321                 break;
2322         case SCO_LINK:
2323         case ESCO_LINK:
2324                 cnt = hdev->sco_cnt;
2325                 break;
2326         case LE_LINK:
2327                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2328                 break;
2329         default:
2330                 cnt = 0;
2331                 BT_ERR("Unknown link type");
2332         }
2333
2334         q = cnt / num;
2335         *quote = q ? q : 1;
2336         BT_DBG("chan %p quote %d", chan, *quote);
2337         return chan;
2338 }
2339
2340 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2341 {
2342         struct hci_conn_hash *h = &hdev->conn_hash;
2343         struct hci_conn *conn;
2344         int num = 0;
2345
2346         BT_DBG("%s", hdev->name);
2347
2348         rcu_read_lock();
2349
2350         list_for_each_entry_rcu(conn, &h->list, list) {
2351                 struct hci_chan *chan;
2352
2353                 if (conn->type != type)
2354                         continue;
2355
2356                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2357                         continue;
2358
2359                 num++;
2360
2361                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2362                         struct sk_buff *skb;
2363
2364                         if (chan->sent) {
2365                                 chan->sent = 0;
2366                                 continue;
2367                         }
2368
2369                         if (skb_queue_empty(&chan->data_q))
2370                                 continue;
2371
2372                         skb = skb_peek(&chan->data_q);
2373                         if (skb->priority >= HCI_PRIO_MAX - 1)
2374                                 continue;
2375
2376                         skb->priority = HCI_PRIO_MAX - 1;
2377
2378                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2379                                                                 skb->priority);
2380                 }
2381
2382                 if (hci_conn_num(hdev, type) == num)
2383                         break;
2384         }
2385
2386         rcu_read_unlock();
2387
2388 }
2389
2390 static inline int __get_blocks(struct hci_dev *hdev, struct sk_buff *skb)
2391 {
2392         /* Calculate count of blocks used by this packet */
2393         return DIV_ROUND_UP(skb->len - HCI_ACL_HDR_SIZE, hdev->block_len);
2394 }
2395
2396 static inline void __check_timeout(struct hci_dev *hdev, unsigned int cnt)
2397 {
2398         if (!test_bit(HCI_RAW, &hdev->flags)) {
2399                 /* ACL tx timeout must be longer than maximum
2400                  * link supervision timeout (40.9 seconds) */
2401                 if (!cnt && time_after(jiffies, hdev->acl_last_tx +
2402                                         msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2403                         hci_link_tx_to(hdev, ACL_LINK);
2404         }
2405 }
2406
2407 static inline void hci_sched_acl_pkt(struct hci_dev *hdev)
2408 {
2409         unsigned int cnt = hdev->acl_cnt;
2410         struct hci_chan *chan;
2411         struct sk_buff *skb;
2412         int quote;
2413
2414         __check_timeout(hdev, cnt);
2415
2416         while (hdev->acl_cnt &&
2417                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2418                 u32 priority = (skb_peek(&chan->data_q))->priority;
2419                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2420                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2421                                         skb->len, skb->priority);
2422
2423                         /* Stop if priority has changed */
2424                         if (skb->priority < priority)
2425                                 break;
2426
2427                         skb = skb_dequeue(&chan->data_q);
2428
2429                         hci_conn_enter_active_mode(chan->conn,
2430                                                 bt_cb(skb)->force_active);
2431
2432                         hci_send_frame(skb);
2433                         hdev->acl_last_tx = jiffies;
2434
2435                         hdev->acl_cnt--;
2436                         chan->sent++;
2437                         chan->conn->sent++;
2438                 }
2439         }
2440
2441         if (cnt != hdev->acl_cnt)
2442                 hci_prio_recalculate(hdev, ACL_LINK);
2443 }
2444
2445 static inline void hci_sched_acl_blk(struct hci_dev *hdev)
2446 {
2447         unsigned int cnt = hdev->block_cnt;
2448         struct hci_chan *chan;
2449         struct sk_buff *skb;
2450         int quote;
2451
2452         __check_timeout(hdev, cnt);
2453
2454         while (hdev->block_cnt > 0 &&
2455                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2456                 u32 priority = (skb_peek(&chan->data_q))->priority;
2457                 while (quote > 0 && (skb = skb_peek(&chan->data_q))) {
2458                         int blocks;
2459
2460                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2461                                                 skb->len, skb->priority);
2462
2463                         /* Stop if priority has changed */
2464                         if (skb->priority < priority)
2465                                 break;
2466
2467                         skb = skb_dequeue(&chan->data_q);
2468
2469                         blocks = __get_blocks(hdev, skb);
2470                         if (blocks > hdev->block_cnt)
2471                                 return;
2472
2473                         hci_conn_enter_active_mode(chan->conn,
2474                                                 bt_cb(skb)->force_active);
2475
2476                         hci_send_frame(skb);
2477                         hdev->acl_last_tx = jiffies;
2478
2479                         hdev->block_cnt -= blocks;
2480                         quote -= blocks;
2481
2482                         chan->sent += blocks;
2483                         chan->conn->sent += blocks;
2484                 }
2485         }
2486
2487         if (cnt != hdev->block_cnt)
2488                 hci_prio_recalculate(hdev, ACL_LINK);
2489 }
2490
2491 static inline void hci_sched_acl(struct hci_dev *hdev)
2492 {
2493         BT_DBG("%s", hdev->name);
2494
2495         if (!hci_conn_num(hdev, ACL_LINK))
2496                 return;
2497
2498         switch (hdev->flow_ctl_mode) {
2499         case HCI_FLOW_CTL_MODE_PACKET_BASED:
2500                 hci_sched_acl_pkt(hdev);
2501                 break;
2502
2503         case HCI_FLOW_CTL_MODE_BLOCK_BASED:
2504                 hci_sched_acl_blk(hdev);
2505                 break;
2506         }
2507 }
2508
2509 /* Schedule SCO */
2510 static inline void hci_sched_sco(struct hci_dev *hdev)
2511 {
2512         struct hci_conn *conn;
2513         struct sk_buff *skb;
2514         int quote;
2515
2516         BT_DBG("%s", hdev->name);
2517
2518         if (!hci_conn_num(hdev, SCO_LINK))
2519                 return;
2520
2521         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2522                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2523                         BT_DBG("skb %p len %d", skb, skb->len);
2524                         hci_send_frame(skb);
2525
2526                         conn->sent++;
2527                         if (conn->sent == ~0)
2528                                 conn->sent = 0;
2529                 }
2530         }
2531 }
2532
2533 static inline void hci_sched_esco(struct hci_dev *hdev)
2534 {
2535         struct hci_conn *conn;
2536         struct sk_buff *skb;
2537         int quote;
2538
2539         BT_DBG("%s", hdev->name);
2540
2541         if (!hci_conn_num(hdev, ESCO_LINK))
2542                 return;
2543
2544         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2545                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2546                         BT_DBG("skb %p len %d", skb, skb->len);
2547                         hci_send_frame(skb);
2548
2549                         conn->sent++;
2550                         if (conn->sent == ~0)
2551                                 conn->sent = 0;
2552                 }
2553         }
2554 }
2555
2556 static inline void hci_sched_le(struct hci_dev *hdev)
2557 {
2558         struct hci_chan *chan;
2559         struct sk_buff *skb;
2560         int quote, cnt, tmp;
2561
2562         BT_DBG("%s", hdev->name);
2563
2564         if (!hci_conn_num(hdev, LE_LINK))
2565                 return;
2566
2567         if (!test_bit(HCI_RAW, &hdev->flags)) {
2568                 /* LE tx timeout must be longer than maximum
2569                  * link supervision timeout (40.9 seconds) */
2570                 if (!hdev->le_cnt && hdev->le_pkts &&
2571                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2572                         hci_link_tx_to(hdev, LE_LINK);
2573         }
2574
2575         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2576         tmp = cnt;
2577         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2578                 u32 priority = (skb_peek(&chan->data_q))->priority;
2579                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2580                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2581                                         skb->len, skb->priority);
2582
2583                         /* Stop if priority has changed */
2584                         if (skb->priority < priority)
2585                                 break;
2586
2587                         skb = skb_dequeue(&chan->data_q);
2588
2589                         hci_send_frame(skb);
2590                         hdev->le_last_tx = jiffies;
2591
2592                         cnt--;
2593                         chan->sent++;
2594                         chan->conn->sent++;
2595                 }
2596         }
2597
2598         if (hdev->le_pkts)
2599                 hdev->le_cnt = cnt;
2600         else
2601                 hdev->acl_cnt = cnt;
2602
2603         if (cnt != tmp)
2604                 hci_prio_recalculate(hdev, LE_LINK);
2605 }
2606
2607 static void hci_tx_work(struct work_struct *work)
2608 {
2609         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2610         struct sk_buff *skb;
2611
2612         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2613                 hdev->sco_cnt, hdev->le_cnt);
2614
2615         /* Schedule queues and send stuff to HCI driver */
2616
2617         hci_sched_acl(hdev);
2618
2619         hci_sched_sco(hdev);
2620
2621         hci_sched_esco(hdev);
2622
2623         hci_sched_le(hdev);
2624
2625         /* Send next queued raw (unknown type) packet */
2626         while ((skb = skb_dequeue(&hdev->raw_q)))
2627                 hci_send_frame(skb);
2628 }
2629
2630 /* ----- HCI RX task (incoming data processing) ----- */
2631
2632 /* ACL data packet */
2633 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2634 {
2635         struct hci_acl_hdr *hdr = (void *) skb->data;
2636         struct hci_conn *conn;
2637         __u16 handle, flags;
2638
2639         skb_pull(skb, HCI_ACL_HDR_SIZE);
2640
2641         handle = __le16_to_cpu(hdr->handle);
2642         flags  = hci_flags(handle);
2643         handle = hci_handle(handle);
2644
2645         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2646
2647         hdev->stat.acl_rx++;
2648
2649         hci_dev_lock(hdev);
2650         conn = hci_conn_hash_lookup_handle(hdev, handle);
2651         hci_dev_unlock(hdev);
2652
2653         if (conn) {
2654                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2655
2656                 /* Send to upper protocol */
2657                 l2cap_recv_acldata(conn, skb, flags);
2658                 return;
2659         } else {
2660                 BT_ERR("%s ACL packet for unknown connection handle %d",
2661                         hdev->name, handle);
2662         }
2663
2664         kfree_skb(skb);
2665 }
2666
2667 /* SCO data packet */
2668 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2669 {
2670         struct hci_sco_hdr *hdr = (void *) skb->data;
2671         struct hci_conn *conn;
2672         __u16 handle;
2673
2674         skb_pull(skb, HCI_SCO_HDR_SIZE);
2675
2676         handle = __le16_to_cpu(hdr->handle);
2677
2678         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2679
2680         hdev->stat.sco_rx++;
2681
2682         hci_dev_lock(hdev);
2683         conn = hci_conn_hash_lookup_handle(hdev, handle);
2684         hci_dev_unlock(hdev);
2685
2686         if (conn) {
2687                 /* Send to upper protocol */
2688                 sco_recv_scodata(conn, skb);
2689                 return;
2690         } else {
2691                 BT_ERR("%s SCO packet for unknown connection handle %d",
2692                         hdev->name, handle);
2693         }
2694
2695         kfree_skb(skb);
2696 }
2697
2698 static void hci_rx_work(struct work_struct *work)
2699 {
2700         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2701         struct sk_buff *skb;
2702
2703         BT_DBG("%s", hdev->name);
2704
2705         while ((skb = skb_dequeue(&hdev->rx_q))) {
2706                 if (atomic_read(&hdev->promisc)) {
2707                         /* Send copy to the sockets */
2708                         hci_send_to_sock(hdev, skb, NULL);
2709                 }
2710
2711                 if (test_bit(HCI_RAW, &hdev->flags)) {
2712                         kfree_skb(skb);
2713                         continue;
2714                 }
2715
2716                 if (test_bit(HCI_INIT, &hdev->flags)) {
2717                         /* Don't process data packets in this states. */
2718                         switch (bt_cb(skb)->pkt_type) {
2719                         case HCI_ACLDATA_PKT:
2720                         case HCI_SCODATA_PKT:
2721                                 kfree_skb(skb);
2722                                 continue;
2723                         }
2724                 }
2725
2726                 /* Process frame */
2727                 switch (bt_cb(skb)->pkt_type) {
2728                 case HCI_EVENT_PKT:
2729                         BT_DBG("%s Event packet", hdev->name);
2730                         hci_event_packet(hdev, skb);
2731                         break;
2732
2733                 case HCI_ACLDATA_PKT:
2734                         BT_DBG("%s ACL data packet", hdev->name);
2735                         hci_acldata_packet(hdev, skb);
2736                         break;
2737
2738                 case HCI_SCODATA_PKT:
2739                         BT_DBG("%s SCO data packet", hdev->name);
2740                         hci_scodata_packet(hdev, skb);
2741                         break;
2742
2743                 default:
2744                         kfree_skb(skb);
2745                         break;
2746                 }
2747         }
2748 }
2749
2750 static void hci_cmd_work(struct work_struct *work)
2751 {
2752         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2753         struct sk_buff *skb;
2754
2755         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2756
2757         /* Send queued commands */
2758         if (atomic_read(&hdev->cmd_cnt)) {
2759                 skb = skb_dequeue(&hdev->cmd_q);
2760                 if (!skb)
2761                         return;
2762
2763                 kfree_skb(hdev->sent_cmd);
2764
2765                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2766                 if (hdev->sent_cmd) {
2767                         atomic_dec(&hdev->cmd_cnt);
2768                         hci_send_frame(skb);
2769                         if (test_bit(HCI_RESET, &hdev->flags))
2770                                 del_timer(&hdev->cmd_timer);
2771                         else
2772                                 mod_timer(&hdev->cmd_timer,
2773                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2774                 } else {
2775                         skb_queue_head(&hdev->cmd_q, skb);
2776                         queue_work(hdev->workqueue, &hdev->cmd_work);
2777                 }
2778         }
2779 }
2780
2781 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2782 {
2783         /* General inquiry access code (GIAC) */
2784         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2785         struct hci_cp_inquiry cp;
2786
2787         BT_DBG("%s", hdev->name);
2788
2789         if (test_bit(HCI_INQUIRY, &hdev->flags))
2790                 return -EINPROGRESS;
2791
2792         inquiry_cache_flush(hdev);
2793
2794         memset(&cp, 0, sizeof(cp));
2795         memcpy(&cp.lap, lap, sizeof(cp.lap));
2796         cp.length  = length;
2797
2798         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2799 }
2800
2801 int hci_cancel_inquiry(struct hci_dev *hdev)
2802 {
2803         BT_DBG("%s", hdev->name);
2804
2805         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2806                 return -EPERM;
2807
2808         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2809 }
2810
2811 module_param(enable_hs, bool, 0644);
2812 MODULE_PARM_DESC(enable_hs, "Enable High Speed");