Bluetooth: Correctly take hci_dev->dev refcount
[linux-flexiantxendom0-3.2.10.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
52
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
55
56 #define AUTO_OFF_TIMEOUT 2000
57
58 int enable_hs;
59
60 static void hci_rx_work(struct work_struct *work);
61 static void hci_cmd_work(struct work_struct *work);
62 static void hci_tx_work(struct work_struct *work);
63
64 /* HCI device list */
65 LIST_HEAD(hci_dev_list);
66 DEFINE_RWLOCK(hci_dev_list_lock);
67
68 /* HCI callback list */
69 LIST_HEAD(hci_cb_list);
70 DEFINE_RWLOCK(hci_cb_list_lock);
71
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74
75 /* ---- HCI notifications ---- */
76
77 int hci_register_notifier(struct notifier_block *nb)
78 {
79         return atomic_notifier_chain_register(&hci_notifier, nb);
80 }
81
82 int hci_unregister_notifier(struct notifier_block *nb)
83 {
84         return atomic_notifier_chain_unregister(&hci_notifier, nb);
85 }
86
87 static void hci_notify(struct hci_dev *hdev, int event)
88 {
89         atomic_notifier_call_chain(&hci_notifier, event, hdev);
90 }
91
92 /* ---- HCI requests ---- */
93
94 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
95 {
96         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
98         /* If this is the init phase check if the completed command matches
99          * the last init command, and if not just return.
100          */
101         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
102                 return;
103
104         if (hdev->req_status == HCI_REQ_PEND) {
105                 hdev->req_result = result;
106                 hdev->req_status = HCI_REQ_DONE;
107                 wake_up_interruptible(&hdev->req_wait_q);
108         }
109 }
110
111 static void hci_req_cancel(struct hci_dev *hdev, int err)
112 {
113         BT_DBG("%s err 0x%2.2x", hdev->name, err);
114
115         if (hdev->req_status == HCI_REQ_PEND) {
116                 hdev->req_result = err;
117                 hdev->req_status = HCI_REQ_CANCELED;
118                 wake_up_interruptible(&hdev->req_wait_q);
119         }
120 }
121
122 /* Execute request and wait for completion. */
123 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
124                                         unsigned long opt, __u32 timeout)
125 {
126         DECLARE_WAITQUEUE(wait, current);
127         int err = 0;
128
129         BT_DBG("%s start", hdev->name);
130
131         hdev->req_status = HCI_REQ_PEND;
132
133         add_wait_queue(&hdev->req_wait_q, &wait);
134         set_current_state(TASK_INTERRUPTIBLE);
135
136         req(hdev, opt);
137         schedule_timeout(timeout);
138
139         remove_wait_queue(&hdev->req_wait_q, &wait);
140
141         if (signal_pending(current))
142                 return -EINTR;
143
144         switch (hdev->req_status) {
145         case HCI_REQ_DONE:
146                 err = -bt_to_errno(hdev->req_result);
147                 break;
148
149         case HCI_REQ_CANCELED:
150                 err = -hdev->req_result;
151                 break;
152
153         default:
154                 err = -ETIMEDOUT;
155                 break;
156         }
157
158         hdev->req_status = hdev->req_result = 0;
159
160         BT_DBG("%s end: err %d", hdev->name, err);
161
162         return err;
163 }
164
165 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
166                                         unsigned long opt, __u32 timeout)
167 {
168         int ret;
169
170         if (!test_bit(HCI_UP, &hdev->flags))
171                 return -ENETDOWN;
172
173         /* Serialize all requests */
174         hci_req_lock(hdev);
175         ret = __hci_request(hdev, req, opt, timeout);
176         hci_req_unlock(hdev);
177
178         return ret;
179 }
180
181 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
182 {
183         BT_DBG("%s %ld", hdev->name, opt);
184
185         /* Reset device */
186         set_bit(HCI_RESET, &hdev->flags);
187         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
188 }
189
190 static void bredr_init(struct hci_dev *hdev)
191 {
192         struct hci_cp_delete_stored_link_key cp;
193         __le16 param;
194         __u8 flt_type;
195
196         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
197
198         /* Mandatory initialization */
199
200         /* Reset */
201         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
202                 set_bit(HCI_RESET, &hdev->flags);
203                 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
204         }
205
206         /* Read Local Supported Features */
207         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
208
209         /* Read Local Version */
210         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
211
212         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
213         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
214
215         /* Read BD Address */
216         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
217
218         /* Read Class of Device */
219         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
220
221         /* Read Local Name */
222         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
223
224         /* Read Voice Setting */
225         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
226
227         /* Optional initialization */
228
229         /* Clear Event Filters */
230         flt_type = HCI_FLT_CLEAR_ALL;
231         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
232
233         /* Connection accept timeout ~20 secs */
234         param = cpu_to_le16(0x7d00);
235         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
236
237         bacpy(&cp.bdaddr, BDADDR_ANY);
238         cp.delete_all = 1;
239         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
240 }
241
242 static void amp_init(struct hci_dev *hdev)
243 {
244         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
245
246         /* Reset */
247         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
248
249         /* Read Local Version */
250         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
251 }
252
253 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
254 {
255         struct sk_buff *skb;
256
257         BT_DBG("%s %ld", hdev->name, opt);
258
259         /* Driver initialization */
260
261         /* Special commands */
262         while ((skb = skb_dequeue(&hdev->driver_init))) {
263                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
264                 skb->dev = (void *) hdev;
265
266                 skb_queue_tail(&hdev->cmd_q, skb);
267                 queue_work(hdev->workqueue, &hdev->cmd_work);
268         }
269         skb_queue_purge(&hdev->driver_init);
270
271         switch (hdev->dev_type) {
272         case HCI_BREDR:
273                 bredr_init(hdev);
274                 break;
275
276         case HCI_AMP:
277                 amp_init(hdev);
278                 break;
279
280         default:
281                 BT_ERR("Unknown device type %d", hdev->dev_type);
282                 break;
283         }
284
285 }
286
287 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
288 {
289         BT_DBG("%s", hdev->name);
290
291         /* Read LE buffer size */
292         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
293 }
294
295 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
296 {
297         __u8 scan = opt;
298
299         BT_DBG("%s %x", hdev->name, scan);
300
301         /* Inquiry and Page scans */
302         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
303 }
304
305 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
306 {
307         __u8 auth = opt;
308
309         BT_DBG("%s %x", hdev->name, auth);
310
311         /* Authentication */
312         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
313 }
314
315 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
316 {
317         __u8 encrypt = opt;
318
319         BT_DBG("%s %x", hdev->name, encrypt);
320
321         /* Encryption */
322         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
323 }
324
325 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
326 {
327         __le16 policy = cpu_to_le16(opt);
328
329         BT_DBG("%s %x", hdev->name, policy);
330
331         /* Default link policy */
332         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
333 }
334
335 /* Get HCI device by index.
336  * Device is held on return. */
337 struct hci_dev *hci_dev_get(int index)
338 {
339         struct hci_dev *hdev = NULL, *d;
340
341         BT_DBG("%d", index);
342
343         if (index < 0)
344                 return NULL;
345
346         read_lock(&hci_dev_list_lock);
347         list_for_each_entry(d, &hci_dev_list, list) {
348                 if (d->id == index) {
349                         hdev = hci_dev_hold(d);
350                         break;
351                 }
352         }
353         read_unlock(&hci_dev_list_lock);
354         return hdev;
355 }
356
357 /* ---- Inquiry support ---- */
358
359 bool hci_discovery_active(struct hci_dev *hdev)
360 {
361         struct discovery_state *discov = &hdev->discovery;
362
363         if (discov->state == DISCOVERY_INQUIRY ||
364                                         discov->state == DISCOVERY_RESOLVING)
365                 return true;
366
367         return false;
368 }
369
370 void hci_discovery_set_state(struct hci_dev *hdev, int state)
371 {
372         BT_DBG("%s state %u -> %u", hdev->name, hdev->discovery.state, state);
373
374         if (hdev->discovery.state == state)
375                 return;
376
377         switch (state) {
378         case DISCOVERY_STOPPED:
379                 mgmt_discovering(hdev, 0);
380                 break;
381         case DISCOVERY_STARTING:
382                 break;
383         case DISCOVERY_INQUIRY:
384                 mgmt_discovering(hdev, 1);
385                 break;
386         case DISCOVERY_RESOLVING:
387                 break;
388         case DISCOVERY_STOPPING:
389                 break;
390         }
391
392         hdev->discovery.state = state;
393 }
394
395 static void inquiry_cache_flush(struct hci_dev *hdev)
396 {
397         struct discovery_state *cache = &hdev->discovery;
398         struct inquiry_entry *p, *n;
399
400         list_for_each_entry_safe(p, n, &cache->all, all) {
401                 list_del(&p->all);
402                 kfree(p);
403         }
404
405         INIT_LIST_HEAD(&cache->unknown);
406         INIT_LIST_HEAD(&cache->resolve);
407         cache->state = DISCOVERY_STOPPED;
408 }
409
410 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
411 {
412         struct discovery_state *cache = &hdev->discovery;
413         struct inquiry_entry *e;
414
415         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
416
417         list_for_each_entry(e, &cache->all, all) {
418                 if (!bacmp(&e->data.bdaddr, bdaddr))
419                         return e;
420         }
421
422         return NULL;
423 }
424
425 struct inquiry_entry *hci_inquiry_cache_lookup_unknown(struct hci_dev *hdev,
426                                                         bdaddr_t *bdaddr)
427 {
428         struct discovery_state *cache = &hdev->discovery;
429         struct inquiry_entry *e;
430
431         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
432
433         list_for_each_entry(e, &cache->unknown, list) {
434                 if (!bacmp(&e->data.bdaddr, bdaddr))
435                         return e;
436         }
437
438         return NULL;
439 }
440
441 struct inquiry_entry *hci_inquiry_cache_lookup_resolve(struct hci_dev *hdev,
442                                                         bdaddr_t *bdaddr,
443                                                         int state)
444 {
445         struct discovery_state *cache = &hdev->discovery;
446         struct inquiry_entry *e;
447
448         BT_DBG("cache %p bdaddr %s state %d", cache, batostr(bdaddr), state);
449
450         list_for_each_entry(e, &cache->resolve, list) {
451                 if (!bacmp(bdaddr, BDADDR_ANY) && e->name_state == state)
452                         return e;
453                 if (!bacmp(&e->data.bdaddr, bdaddr))
454                         return e;
455         }
456
457         return NULL;
458 }
459
460 bool hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data,
461                                                         bool name_known)
462 {
463         struct discovery_state *cache = &hdev->discovery;
464         struct inquiry_entry *ie;
465
466         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
467
468         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
469         if (ie)
470                 goto update;
471
472         /* Entry not in the cache. Add new one. */
473         ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
474         if (!ie)
475                 return false;
476
477         list_add(&ie->all, &cache->all);
478
479         if (name_known) {
480                 ie->name_state = NAME_KNOWN;
481         } else {
482                 ie->name_state = NAME_NOT_KNOWN;
483                 list_add(&ie->list, &cache->unknown);
484         }
485
486 update:
487         if (name_known && ie->name_state != NAME_KNOWN &&
488                                         ie->name_state != NAME_PENDING) {
489                 ie->name_state = NAME_KNOWN;
490                 list_del(&ie->list);
491         }
492
493         memcpy(&ie->data, data, sizeof(*data));
494         ie->timestamp = jiffies;
495         cache->timestamp = jiffies;
496
497         if (ie->name_state == NAME_NOT_KNOWN)
498                 return false;
499
500         return true;
501 }
502
503 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
504 {
505         struct discovery_state *cache = &hdev->discovery;
506         struct inquiry_info *info = (struct inquiry_info *) buf;
507         struct inquiry_entry *e;
508         int copied = 0;
509
510         list_for_each_entry(e, &cache->all, all) {
511                 struct inquiry_data *data = &e->data;
512
513                 if (copied >= num)
514                         break;
515
516                 bacpy(&info->bdaddr, &data->bdaddr);
517                 info->pscan_rep_mode    = data->pscan_rep_mode;
518                 info->pscan_period_mode = data->pscan_period_mode;
519                 info->pscan_mode        = data->pscan_mode;
520                 memcpy(info->dev_class, data->dev_class, 3);
521                 info->clock_offset      = data->clock_offset;
522
523                 info++;
524                 copied++;
525         }
526
527         BT_DBG("cache %p, copied %d", cache, copied);
528         return copied;
529 }
530
531 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
532 {
533         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
534         struct hci_cp_inquiry cp;
535
536         BT_DBG("%s", hdev->name);
537
538         if (test_bit(HCI_INQUIRY, &hdev->flags))
539                 return;
540
541         /* Start Inquiry */
542         memcpy(&cp.lap, &ir->lap, 3);
543         cp.length  = ir->length;
544         cp.num_rsp = ir->num_rsp;
545         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
546 }
547
548 int hci_inquiry(void __user *arg)
549 {
550         __u8 __user *ptr = arg;
551         struct hci_inquiry_req ir;
552         struct hci_dev *hdev;
553         int err = 0, do_inquiry = 0, max_rsp;
554         long timeo;
555         __u8 *buf;
556
557         if (copy_from_user(&ir, ptr, sizeof(ir)))
558                 return -EFAULT;
559
560         hdev = hci_dev_get(ir.dev_id);
561         if (!hdev)
562                 return -ENODEV;
563
564         hci_dev_lock(hdev);
565         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
566                                 inquiry_cache_empty(hdev) ||
567                                 ir.flags & IREQ_CACHE_FLUSH) {
568                 inquiry_cache_flush(hdev);
569                 do_inquiry = 1;
570         }
571         hci_dev_unlock(hdev);
572
573         timeo = ir.length * msecs_to_jiffies(2000);
574
575         if (do_inquiry) {
576                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
577                 if (err < 0)
578                         goto done;
579         }
580
581         /* for unlimited number of responses we will use buffer with 255 entries */
582         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
583
584         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
585          * copy it to the user space.
586          */
587         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
588         if (!buf) {
589                 err = -ENOMEM;
590                 goto done;
591         }
592
593         hci_dev_lock(hdev);
594         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
595         hci_dev_unlock(hdev);
596
597         BT_DBG("num_rsp %d", ir.num_rsp);
598
599         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
600                 ptr += sizeof(ir);
601                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
602                                         ir.num_rsp))
603                         err = -EFAULT;
604         } else
605                 err = -EFAULT;
606
607         kfree(buf);
608
609 done:
610         hci_dev_put(hdev);
611         return err;
612 }
613
614 /* ---- HCI ioctl helpers ---- */
615
616 int hci_dev_open(__u16 dev)
617 {
618         struct hci_dev *hdev;
619         int ret = 0;
620
621         hdev = hci_dev_get(dev);
622         if (!hdev)
623                 return -ENODEV;
624
625         BT_DBG("%s %p", hdev->name, hdev);
626
627         hci_req_lock(hdev);
628
629         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
630                 ret = -ERFKILL;
631                 goto done;
632         }
633
634         if (test_bit(HCI_UP, &hdev->flags)) {
635                 ret = -EALREADY;
636                 goto done;
637         }
638
639         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
640                 set_bit(HCI_RAW, &hdev->flags);
641
642         /* Treat all non BR/EDR controllers as raw devices if
643            enable_hs is not set */
644         if (hdev->dev_type != HCI_BREDR && !enable_hs)
645                 set_bit(HCI_RAW, &hdev->flags);
646
647         if (hdev->open(hdev)) {
648                 ret = -EIO;
649                 goto done;
650         }
651
652         if (!test_bit(HCI_RAW, &hdev->flags)) {
653                 atomic_set(&hdev->cmd_cnt, 1);
654                 set_bit(HCI_INIT, &hdev->flags);
655                 hdev->init_last_cmd = 0;
656
657                 ret = __hci_request(hdev, hci_init_req, 0,
658                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
659
660                 if (lmp_host_le_capable(hdev))
661                         ret = __hci_request(hdev, hci_le_init_req, 0,
662                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
663
664                 clear_bit(HCI_INIT, &hdev->flags);
665         }
666
667         if (!ret) {
668                 hci_dev_hold(hdev);
669                 set_bit(HCI_UP, &hdev->flags);
670                 hci_notify(hdev, HCI_DEV_UP);
671                 if (!test_bit(HCI_SETUP, &hdev->flags)) {
672                         hci_dev_lock(hdev);
673                         mgmt_powered(hdev, 1);
674                         hci_dev_unlock(hdev);
675                 }
676         } else {
677                 /* Init failed, cleanup */
678                 flush_work(&hdev->tx_work);
679                 flush_work(&hdev->cmd_work);
680                 flush_work(&hdev->rx_work);
681
682                 skb_queue_purge(&hdev->cmd_q);
683                 skb_queue_purge(&hdev->rx_q);
684
685                 if (hdev->flush)
686                         hdev->flush(hdev);
687
688                 if (hdev->sent_cmd) {
689                         kfree_skb(hdev->sent_cmd);
690                         hdev->sent_cmd = NULL;
691                 }
692
693                 hdev->close(hdev);
694                 hdev->flags = 0;
695         }
696
697 done:
698         hci_req_unlock(hdev);
699         hci_dev_put(hdev);
700         return ret;
701 }
702
703 static int hci_dev_do_close(struct hci_dev *hdev)
704 {
705         BT_DBG("%s %p", hdev->name, hdev);
706
707         hci_req_cancel(hdev, ENODEV);
708         hci_req_lock(hdev);
709
710         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
711                 del_timer_sync(&hdev->cmd_timer);
712                 hci_req_unlock(hdev);
713                 return 0;
714         }
715
716         /* Flush RX and TX works */
717         flush_work(&hdev->tx_work);
718         flush_work(&hdev->rx_work);
719
720         if (hdev->discov_timeout > 0) {
721                 cancel_delayed_work(&hdev->discov_off);
722                 hdev->discov_timeout = 0;
723         }
724
725         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
726                 cancel_delayed_work(&hdev->power_off);
727
728         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
729                 cancel_delayed_work(&hdev->service_cache);
730
731         hci_dev_lock(hdev);
732         inquiry_cache_flush(hdev);
733         hci_conn_hash_flush(hdev);
734         hci_dev_unlock(hdev);
735
736         hci_notify(hdev, HCI_DEV_DOWN);
737
738         if (hdev->flush)
739                 hdev->flush(hdev);
740
741         /* Reset device */
742         skb_queue_purge(&hdev->cmd_q);
743         atomic_set(&hdev->cmd_cnt, 1);
744         if (!test_bit(HCI_RAW, &hdev->flags)) {
745                 set_bit(HCI_INIT, &hdev->flags);
746                 __hci_request(hdev, hci_reset_req, 0,
747                                         msecs_to_jiffies(250));
748                 clear_bit(HCI_INIT, &hdev->flags);
749         }
750
751         /* flush cmd  work */
752         flush_work(&hdev->cmd_work);
753
754         /* Drop queues */
755         skb_queue_purge(&hdev->rx_q);
756         skb_queue_purge(&hdev->cmd_q);
757         skb_queue_purge(&hdev->raw_q);
758
759         /* Drop last sent command */
760         if (hdev->sent_cmd) {
761                 del_timer_sync(&hdev->cmd_timer);
762                 kfree_skb(hdev->sent_cmd);
763                 hdev->sent_cmd = NULL;
764         }
765
766         /* After this point our queues are empty
767          * and no tasks are scheduled. */
768         hdev->close(hdev);
769
770         hci_dev_lock(hdev);
771         mgmt_powered(hdev, 0);
772         hci_dev_unlock(hdev);
773
774         /* Clear flags */
775         hdev->flags = 0;
776
777         hci_req_unlock(hdev);
778
779         hci_dev_put(hdev);
780         return 0;
781 }
782
783 int hci_dev_close(__u16 dev)
784 {
785         struct hci_dev *hdev;
786         int err;
787
788         hdev = hci_dev_get(dev);
789         if (!hdev)
790                 return -ENODEV;
791         err = hci_dev_do_close(hdev);
792         hci_dev_put(hdev);
793         return err;
794 }
795
796 int hci_dev_reset(__u16 dev)
797 {
798         struct hci_dev *hdev;
799         int ret = 0;
800
801         hdev = hci_dev_get(dev);
802         if (!hdev)
803                 return -ENODEV;
804
805         hci_req_lock(hdev);
806
807         if (!test_bit(HCI_UP, &hdev->flags))
808                 goto done;
809
810         /* Drop queues */
811         skb_queue_purge(&hdev->rx_q);
812         skb_queue_purge(&hdev->cmd_q);
813
814         hci_dev_lock(hdev);
815         inquiry_cache_flush(hdev);
816         hci_conn_hash_flush(hdev);
817         hci_dev_unlock(hdev);
818
819         if (hdev->flush)
820                 hdev->flush(hdev);
821
822         atomic_set(&hdev->cmd_cnt, 1);
823         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
824
825         if (!test_bit(HCI_RAW, &hdev->flags))
826                 ret = __hci_request(hdev, hci_reset_req, 0,
827                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
828
829 done:
830         hci_req_unlock(hdev);
831         hci_dev_put(hdev);
832         return ret;
833 }
834
835 int hci_dev_reset_stat(__u16 dev)
836 {
837         struct hci_dev *hdev;
838         int ret = 0;
839
840         hdev = hci_dev_get(dev);
841         if (!hdev)
842                 return -ENODEV;
843
844         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
845
846         hci_dev_put(hdev);
847
848         return ret;
849 }
850
851 int hci_dev_cmd(unsigned int cmd, void __user *arg)
852 {
853         struct hci_dev *hdev;
854         struct hci_dev_req dr;
855         int err = 0;
856
857         if (copy_from_user(&dr, arg, sizeof(dr)))
858                 return -EFAULT;
859
860         hdev = hci_dev_get(dr.dev_id);
861         if (!hdev)
862                 return -ENODEV;
863
864         switch (cmd) {
865         case HCISETAUTH:
866                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
867                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
868                 break;
869
870         case HCISETENCRYPT:
871                 if (!lmp_encrypt_capable(hdev)) {
872                         err = -EOPNOTSUPP;
873                         break;
874                 }
875
876                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
877                         /* Auth must be enabled first */
878                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
879                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
880                         if (err)
881                                 break;
882                 }
883
884                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
885                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
886                 break;
887
888         case HCISETSCAN:
889                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
890                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
891                 break;
892
893         case HCISETLINKPOL:
894                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
895                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
896                 break;
897
898         case HCISETLINKMODE:
899                 hdev->link_mode = ((__u16) dr.dev_opt) &
900                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
901                 break;
902
903         case HCISETPTYPE:
904                 hdev->pkt_type = (__u16) dr.dev_opt;
905                 break;
906
907         case HCISETACLMTU:
908                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
909                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
910                 break;
911
912         case HCISETSCOMTU:
913                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
914                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
915                 break;
916
917         default:
918                 err = -EINVAL;
919                 break;
920         }
921
922         hci_dev_put(hdev);
923         return err;
924 }
925
926 int hci_get_dev_list(void __user *arg)
927 {
928         struct hci_dev *hdev;
929         struct hci_dev_list_req *dl;
930         struct hci_dev_req *dr;
931         int n = 0, size, err;
932         __u16 dev_num;
933
934         if (get_user(dev_num, (__u16 __user *) arg))
935                 return -EFAULT;
936
937         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
938                 return -EINVAL;
939
940         size = sizeof(*dl) + dev_num * sizeof(*dr);
941
942         dl = kzalloc(size, GFP_KERNEL);
943         if (!dl)
944                 return -ENOMEM;
945
946         dr = dl->dev_req;
947
948         read_lock(&hci_dev_list_lock);
949         list_for_each_entry(hdev, &hci_dev_list, list) {
950                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
951                         cancel_delayed_work(&hdev->power_off);
952
953                 if (!test_bit(HCI_MGMT, &hdev->flags))
954                         set_bit(HCI_PAIRABLE, &hdev->flags);
955
956                 (dr + n)->dev_id  = hdev->id;
957                 (dr + n)->dev_opt = hdev->flags;
958
959                 if (++n >= dev_num)
960                         break;
961         }
962         read_unlock(&hci_dev_list_lock);
963
964         dl->dev_num = n;
965         size = sizeof(*dl) + n * sizeof(*dr);
966
967         err = copy_to_user(arg, dl, size);
968         kfree(dl);
969
970         return err ? -EFAULT : 0;
971 }
972
973 int hci_get_dev_info(void __user *arg)
974 {
975         struct hci_dev *hdev;
976         struct hci_dev_info di;
977         int err = 0;
978
979         if (copy_from_user(&di, arg, sizeof(di)))
980                 return -EFAULT;
981
982         hdev = hci_dev_get(di.dev_id);
983         if (!hdev)
984                 return -ENODEV;
985
986         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
987                 cancel_delayed_work_sync(&hdev->power_off);
988
989         if (!test_bit(HCI_MGMT, &hdev->flags))
990                 set_bit(HCI_PAIRABLE, &hdev->flags);
991
992         strcpy(di.name, hdev->name);
993         di.bdaddr   = hdev->bdaddr;
994         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
995         di.flags    = hdev->flags;
996         di.pkt_type = hdev->pkt_type;
997         di.acl_mtu  = hdev->acl_mtu;
998         di.acl_pkts = hdev->acl_pkts;
999         di.sco_mtu  = hdev->sco_mtu;
1000         di.sco_pkts = hdev->sco_pkts;
1001         di.link_policy = hdev->link_policy;
1002         di.link_mode   = hdev->link_mode;
1003
1004         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
1005         memcpy(&di.features, &hdev->features, sizeof(di.features));
1006
1007         if (copy_to_user(arg, &di, sizeof(di)))
1008                 err = -EFAULT;
1009
1010         hci_dev_put(hdev);
1011
1012         return err;
1013 }
1014
1015 /* ---- Interface to HCI drivers ---- */
1016
1017 static int hci_rfkill_set_block(void *data, bool blocked)
1018 {
1019         struct hci_dev *hdev = data;
1020
1021         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
1022
1023         if (!blocked)
1024                 return 0;
1025
1026         hci_dev_do_close(hdev);
1027
1028         return 0;
1029 }
1030
1031 static const struct rfkill_ops hci_rfkill_ops = {
1032         .set_block = hci_rfkill_set_block,
1033 };
1034
1035 /* Alloc HCI device */
1036 struct hci_dev *hci_alloc_dev(void)
1037 {
1038         struct hci_dev *hdev;
1039
1040         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
1041         if (!hdev)
1042                 return NULL;
1043
1044         hci_init_sysfs(hdev);
1045         skb_queue_head_init(&hdev->driver_init);
1046
1047         return hdev;
1048 }
1049 EXPORT_SYMBOL(hci_alloc_dev);
1050
1051 /* Free HCI device */
1052 void hci_free_dev(struct hci_dev *hdev)
1053 {
1054         skb_queue_purge(&hdev->driver_init);
1055
1056         /* will free via device release */
1057         put_device(&hdev->dev);
1058 }
1059 EXPORT_SYMBOL(hci_free_dev);
1060
1061 static void hci_power_on(struct work_struct *work)
1062 {
1063         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
1064
1065         BT_DBG("%s", hdev->name);
1066
1067         if (hci_dev_open(hdev->id) < 0)
1068                 return;
1069
1070         if (test_bit(HCI_AUTO_OFF, &hdev->flags))
1071                 schedule_delayed_work(&hdev->power_off,
1072                                         msecs_to_jiffies(AUTO_OFF_TIMEOUT));
1073
1074         if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
1075                 mgmt_index_added(hdev);
1076 }
1077
1078 static void hci_power_off(struct work_struct *work)
1079 {
1080         struct hci_dev *hdev = container_of(work, struct hci_dev,
1081                                                         power_off.work);
1082
1083         BT_DBG("%s", hdev->name);
1084
1085         clear_bit(HCI_AUTO_OFF, &hdev->flags);
1086
1087         hci_dev_close(hdev->id);
1088 }
1089
1090 static void hci_discov_off(struct work_struct *work)
1091 {
1092         struct hci_dev *hdev;
1093         u8 scan = SCAN_PAGE;
1094
1095         hdev = container_of(work, struct hci_dev, discov_off.work);
1096
1097         BT_DBG("%s", hdev->name);
1098
1099         hci_dev_lock(hdev);
1100
1101         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1102
1103         hdev->discov_timeout = 0;
1104
1105         hci_dev_unlock(hdev);
1106 }
1107
1108 int hci_uuids_clear(struct hci_dev *hdev)
1109 {
1110         struct list_head *p, *n;
1111
1112         list_for_each_safe(p, n, &hdev->uuids) {
1113                 struct bt_uuid *uuid;
1114
1115                 uuid = list_entry(p, struct bt_uuid, list);
1116
1117                 list_del(p);
1118                 kfree(uuid);
1119         }
1120
1121         return 0;
1122 }
1123
1124 int hci_link_keys_clear(struct hci_dev *hdev)
1125 {
1126         struct list_head *p, *n;
1127
1128         list_for_each_safe(p, n, &hdev->link_keys) {
1129                 struct link_key *key;
1130
1131                 key = list_entry(p, struct link_key, list);
1132
1133                 list_del(p);
1134                 kfree(key);
1135         }
1136
1137         return 0;
1138 }
1139
1140 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1141 {
1142         struct link_key *k;
1143
1144         list_for_each_entry(k, &hdev->link_keys, list)
1145                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1146                         return k;
1147
1148         return NULL;
1149 }
1150
1151 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1152                                                 u8 key_type, u8 old_key_type)
1153 {
1154         /* Legacy key */
1155         if (key_type < 0x03)
1156                 return 1;
1157
1158         /* Debug keys are insecure so don't store them persistently */
1159         if (key_type == HCI_LK_DEBUG_COMBINATION)
1160                 return 0;
1161
1162         /* Changed combination key and there's no previous one */
1163         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1164                 return 0;
1165
1166         /* Security mode 3 case */
1167         if (!conn)
1168                 return 1;
1169
1170         /* Neither local nor remote side had no-bonding as requirement */
1171         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1172                 return 1;
1173
1174         /* Local side had dedicated bonding as requirement */
1175         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1176                 return 1;
1177
1178         /* Remote side had dedicated bonding as requirement */
1179         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1180                 return 1;
1181
1182         /* If none of the above criteria match, then don't store the key
1183          * persistently */
1184         return 0;
1185 }
1186
1187 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1188 {
1189         struct link_key *k;
1190
1191         list_for_each_entry(k, &hdev->link_keys, list) {
1192                 struct key_master_id *id;
1193
1194                 if (k->type != HCI_LK_SMP_LTK)
1195                         continue;
1196
1197                 if (k->dlen != sizeof(*id))
1198                         continue;
1199
1200                 id = (void *) &k->data;
1201                 if (id->ediv == ediv &&
1202                                 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1203                         return k;
1204         }
1205
1206         return NULL;
1207 }
1208 EXPORT_SYMBOL(hci_find_ltk);
1209
1210 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1211                                         bdaddr_t *bdaddr, u8 type)
1212 {
1213         struct link_key *k;
1214
1215         list_for_each_entry(k, &hdev->link_keys, list)
1216                 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1217                         return k;
1218
1219         return NULL;
1220 }
1221 EXPORT_SYMBOL(hci_find_link_key_type);
1222
1223 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1224                                 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1225 {
1226         struct link_key *key, *old_key;
1227         u8 old_key_type, persistent;
1228
1229         old_key = hci_find_link_key(hdev, bdaddr);
1230         if (old_key) {
1231                 old_key_type = old_key->type;
1232                 key = old_key;
1233         } else {
1234                 old_key_type = conn ? conn->key_type : 0xff;
1235                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1236                 if (!key)
1237                         return -ENOMEM;
1238                 list_add(&key->list, &hdev->link_keys);
1239         }
1240
1241         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1242
1243         /* Some buggy controller combinations generate a changed
1244          * combination key for legacy pairing even when there's no
1245          * previous key */
1246         if (type == HCI_LK_CHANGED_COMBINATION &&
1247                                         (!conn || conn->remote_auth == 0xff) &&
1248                                         old_key_type == 0xff) {
1249                 type = HCI_LK_COMBINATION;
1250                 if (conn)
1251                         conn->key_type = type;
1252         }
1253
1254         bacpy(&key->bdaddr, bdaddr);
1255         memcpy(key->val, val, 16);
1256         key->pin_len = pin_len;
1257
1258         if (type == HCI_LK_CHANGED_COMBINATION)
1259                 key->type = old_key_type;
1260         else
1261                 key->type = type;
1262
1263         if (!new_key)
1264                 return 0;
1265
1266         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1267
1268         mgmt_new_link_key(hdev, key, persistent);
1269
1270         if (!persistent) {
1271                 list_del(&key->list);
1272                 kfree(key);
1273         }
1274
1275         return 0;
1276 }
1277
1278 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1279                         u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1280 {
1281         struct link_key *key, *old_key;
1282         struct key_master_id *id;
1283         u8 old_key_type;
1284
1285         BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1286
1287         old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1288         if (old_key) {
1289                 key = old_key;
1290                 old_key_type = old_key->type;
1291         } else {
1292                 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1293                 if (!key)
1294                         return -ENOMEM;
1295                 list_add(&key->list, &hdev->link_keys);
1296                 old_key_type = 0xff;
1297         }
1298
1299         key->dlen = sizeof(*id);
1300
1301         bacpy(&key->bdaddr, bdaddr);
1302         memcpy(key->val, ltk, sizeof(key->val));
1303         key->type = HCI_LK_SMP_LTK;
1304         key->pin_len = key_size;
1305
1306         id = (void *) &key->data;
1307         id->ediv = ediv;
1308         memcpy(id->rand, rand, sizeof(id->rand));
1309
1310         if (new_key)
1311                 mgmt_new_link_key(hdev, key, old_key_type);
1312
1313         return 0;
1314 }
1315
1316 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1317 {
1318         struct link_key *key;
1319
1320         key = hci_find_link_key(hdev, bdaddr);
1321         if (!key)
1322                 return -ENOENT;
1323
1324         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1325
1326         list_del(&key->list);
1327         kfree(key);
1328
1329         return 0;
1330 }
1331
1332 /* HCI command timer function */
1333 static void hci_cmd_timer(unsigned long arg)
1334 {
1335         struct hci_dev *hdev = (void *) arg;
1336
1337         BT_ERR("%s command tx timeout", hdev->name);
1338         atomic_set(&hdev->cmd_cnt, 1);
1339         queue_work(hdev->workqueue, &hdev->cmd_work);
1340 }
1341
1342 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1343                                                         bdaddr_t *bdaddr)
1344 {
1345         struct oob_data *data;
1346
1347         list_for_each_entry(data, &hdev->remote_oob_data, list)
1348                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1349                         return data;
1350
1351         return NULL;
1352 }
1353
1354 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1355 {
1356         struct oob_data *data;
1357
1358         data = hci_find_remote_oob_data(hdev, bdaddr);
1359         if (!data)
1360                 return -ENOENT;
1361
1362         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1363
1364         list_del(&data->list);
1365         kfree(data);
1366
1367         return 0;
1368 }
1369
1370 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1371 {
1372         struct oob_data *data, *n;
1373
1374         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1375                 list_del(&data->list);
1376                 kfree(data);
1377         }
1378
1379         return 0;
1380 }
1381
1382 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1383                                                                 u8 *randomizer)
1384 {
1385         struct oob_data *data;
1386
1387         data = hci_find_remote_oob_data(hdev, bdaddr);
1388
1389         if (!data) {
1390                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1391                 if (!data)
1392                         return -ENOMEM;
1393
1394                 bacpy(&data->bdaddr, bdaddr);
1395                 list_add(&data->list, &hdev->remote_oob_data);
1396         }
1397
1398         memcpy(data->hash, hash, sizeof(data->hash));
1399         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1400
1401         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1402
1403         return 0;
1404 }
1405
1406 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1407                                                 bdaddr_t *bdaddr)
1408 {
1409         struct bdaddr_list *b;
1410
1411         list_for_each_entry(b, &hdev->blacklist, list)
1412                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1413                         return b;
1414
1415         return NULL;
1416 }
1417
1418 int hci_blacklist_clear(struct hci_dev *hdev)
1419 {
1420         struct list_head *p, *n;
1421
1422         list_for_each_safe(p, n, &hdev->blacklist) {
1423                 struct bdaddr_list *b;
1424
1425                 b = list_entry(p, struct bdaddr_list, list);
1426
1427                 list_del(p);
1428                 kfree(b);
1429         }
1430
1431         return 0;
1432 }
1433
1434 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1435 {
1436         struct bdaddr_list *entry;
1437
1438         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1439                 return -EBADF;
1440
1441         if (hci_blacklist_lookup(hdev, bdaddr))
1442                 return -EEXIST;
1443
1444         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1445         if (!entry)
1446                 return -ENOMEM;
1447
1448         bacpy(&entry->bdaddr, bdaddr);
1449
1450         list_add(&entry->list, &hdev->blacklist);
1451
1452         return mgmt_device_blocked(hdev, bdaddr);
1453 }
1454
1455 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1456 {
1457         struct bdaddr_list *entry;
1458
1459         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1460                 return hci_blacklist_clear(hdev);
1461
1462         entry = hci_blacklist_lookup(hdev, bdaddr);
1463         if (!entry)
1464                 return -ENOENT;
1465
1466         list_del(&entry->list);
1467         kfree(entry);
1468
1469         return mgmt_device_unblocked(hdev, bdaddr);
1470 }
1471
1472 static void hci_clear_adv_cache(struct work_struct *work)
1473 {
1474         struct hci_dev *hdev = container_of(work, struct hci_dev,
1475                                                         adv_work.work);
1476
1477         hci_dev_lock(hdev);
1478
1479         hci_adv_entries_clear(hdev);
1480
1481         hci_dev_unlock(hdev);
1482 }
1483
1484 int hci_adv_entries_clear(struct hci_dev *hdev)
1485 {
1486         struct adv_entry *entry, *tmp;
1487
1488         list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1489                 list_del(&entry->list);
1490                 kfree(entry);
1491         }
1492
1493         BT_DBG("%s adv cache cleared", hdev->name);
1494
1495         return 0;
1496 }
1497
1498 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1499 {
1500         struct adv_entry *entry;
1501
1502         list_for_each_entry(entry, &hdev->adv_entries, list)
1503                 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1504                         return entry;
1505
1506         return NULL;
1507 }
1508
1509 static inline int is_connectable_adv(u8 evt_type)
1510 {
1511         if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1512                 return 1;
1513
1514         return 0;
1515 }
1516
1517 int hci_add_adv_entry(struct hci_dev *hdev,
1518                                         struct hci_ev_le_advertising_info *ev)
1519 {
1520         struct adv_entry *entry;
1521
1522         if (!is_connectable_adv(ev->evt_type))
1523                 return -EINVAL;
1524
1525         /* Only new entries should be added to adv_entries. So, if
1526          * bdaddr was found, don't add it. */
1527         if (hci_find_adv_entry(hdev, &ev->bdaddr))
1528                 return 0;
1529
1530         entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1531         if (!entry)
1532                 return -ENOMEM;
1533
1534         bacpy(&entry->bdaddr, &ev->bdaddr);
1535         entry->bdaddr_type = ev->bdaddr_type;
1536
1537         list_add(&entry->list, &hdev->adv_entries);
1538
1539         BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1540                                 batostr(&entry->bdaddr), entry->bdaddr_type);
1541
1542         return 0;
1543 }
1544
1545 /* Register HCI device */
1546 int hci_register_dev(struct hci_dev *hdev)
1547 {
1548         struct list_head *head = &hci_dev_list, *p;
1549         int i, id, error;
1550
1551         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1552
1553         if (!hdev->open || !hdev->close)
1554                 return -EINVAL;
1555
1556         /* Do not allow HCI_AMP devices to register at index 0,
1557          * so the index can be used as the AMP controller ID.
1558          */
1559         id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1560
1561         write_lock(&hci_dev_list_lock);
1562
1563         /* Find first available device id */
1564         list_for_each(p, &hci_dev_list) {
1565                 if (list_entry(p, struct hci_dev, list)->id != id)
1566                         break;
1567                 head = p; id++;
1568         }
1569
1570         sprintf(hdev->name, "hci%d", id);
1571         hdev->id = id;
1572         list_add_tail(&hdev->list, head);
1573
1574         mutex_init(&hdev->lock);
1575
1576         hdev->flags = 0;
1577         hdev->dev_flags = 0;
1578         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1579         hdev->esco_type = (ESCO_HV1);
1580         hdev->link_mode = (HCI_LM_ACCEPT);
1581         hdev->io_capability = 0x03; /* No Input No Output */
1582
1583         hdev->idle_timeout = 0;
1584         hdev->sniff_max_interval = 800;
1585         hdev->sniff_min_interval = 80;
1586
1587         INIT_WORK(&hdev->rx_work, hci_rx_work);
1588         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1589         INIT_WORK(&hdev->tx_work, hci_tx_work);
1590
1591
1592         skb_queue_head_init(&hdev->rx_q);
1593         skb_queue_head_init(&hdev->cmd_q);
1594         skb_queue_head_init(&hdev->raw_q);
1595
1596         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1597
1598         for (i = 0; i < NUM_REASSEMBLY; i++)
1599                 hdev->reassembly[i] = NULL;
1600
1601         init_waitqueue_head(&hdev->req_wait_q);
1602         mutex_init(&hdev->req_lock);
1603
1604         discovery_init(hdev);
1605
1606         hci_conn_hash_init(hdev);
1607
1608         INIT_LIST_HEAD(&hdev->mgmt_pending);
1609
1610         INIT_LIST_HEAD(&hdev->blacklist);
1611
1612         INIT_LIST_HEAD(&hdev->uuids);
1613
1614         INIT_LIST_HEAD(&hdev->link_keys);
1615
1616         INIT_LIST_HEAD(&hdev->remote_oob_data);
1617
1618         INIT_LIST_HEAD(&hdev->adv_entries);
1619
1620         INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1621         INIT_WORK(&hdev->power_on, hci_power_on);
1622         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1623
1624         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1625
1626         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1627
1628         atomic_set(&hdev->promisc, 0);
1629
1630         write_unlock(&hci_dev_list_lock);
1631
1632         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1633                                                         WQ_MEM_RECLAIM, 1);
1634         if (!hdev->workqueue) {
1635                 error = -ENOMEM;
1636                 goto err;
1637         }
1638
1639         error = hci_add_sysfs(hdev);
1640         if (error < 0)
1641                 goto err_wqueue;
1642
1643         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1644                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1645         if (hdev->rfkill) {
1646                 if (rfkill_register(hdev->rfkill) < 0) {
1647                         rfkill_destroy(hdev->rfkill);
1648                         hdev->rfkill = NULL;
1649                 }
1650         }
1651
1652         set_bit(HCI_AUTO_OFF, &hdev->flags);
1653         set_bit(HCI_SETUP, &hdev->flags);
1654         schedule_work(&hdev->power_on);
1655
1656         hci_notify(hdev, HCI_DEV_REG);
1657         __hci_dev_hold(hdev);
1658
1659         return id;
1660
1661 err_wqueue:
1662         destroy_workqueue(hdev->workqueue);
1663 err:
1664         write_lock(&hci_dev_list_lock);
1665         list_del(&hdev->list);
1666         write_unlock(&hci_dev_list_lock);
1667
1668         return error;
1669 }
1670 EXPORT_SYMBOL(hci_register_dev);
1671
1672 /* Unregister HCI device */
1673 void hci_unregister_dev(struct hci_dev *hdev)
1674 {
1675         int i;
1676
1677         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1678
1679         write_lock(&hci_dev_list_lock);
1680         list_del(&hdev->list);
1681         write_unlock(&hci_dev_list_lock);
1682
1683         hci_dev_do_close(hdev);
1684
1685         for (i = 0; i < NUM_REASSEMBLY; i++)
1686                 kfree_skb(hdev->reassembly[i]);
1687
1688         if (!test_bit(HCI_INIT, &hdev->flags) &&
1689                                         !test_bit(HCI_SETUP, &hdev->flags)) {
1690                 hci_dev_lock(hdev);
1691                 mgmt_index_removed(hdev);
1692                 hci_dev_unlock(hdev);
1693         }
1694
1695         /* mgmt_index_removed should take care of emptying the
1696          * pending list */
1697         BUG_ON(!list_empty(&hdev->mgmt_pending));
1698
1699         hci_notify(hdev, HCI_DEV_UNREG);
1700
1701         if (hdev->rfkill) {
1702                 rfkill_unregister(hdev->rfkill);
1703                 rfkill_destroy(hdev->rfkill);
1704         }
1705
1706         hci_del_sysfs(hdev);
1707
1708         cancel_delayed_work_sync(&hdev->adv_work);
1709
1710         destroy_workqueue(hdev->workqueue);
1711
1712         hci_dev_lock(hdev);
1713         hci_blacklist_clear(hdev);
1714         hci_uuids_clear(hdev);
1715         hci_link_keys_clear(hdev);
1716         hci_remote_oob_data_clear(hdev);
1717         hci_adv_entries_clear(hdev);
1718         hci_dev_unlock(hdev);
1719
1720         __hci_dev_put(hdev);
1721 }
1722 EXPORT_SYMBOL(hci_unregister_dev);
1723
1724 /* Suspend HCI device */
1725 int hci_suspend_dev(struct hci_dev *hdev)
1726 {
1727         hci_notify(hdev, HCI_DEV_SUSPEND);
1728         return 0;
1729 }
1730 EXPORT_SYMBOL(hci_suspend_dev);
1731
1732 /* Resume HCI device */
1733 int hci_resume_dev(struct hci_dev *hdev)
1734 {
1735         hci_notify(hdev, HCI_DEV_RESUME);
1736         return 0;
1737 }
1738 EXPORT_SYMBOL(hci_resume_dev);
1739
1740 /* Receive frame from HCI drivers */
1741 int hci_recv_frame(struct sk_buff *skb)
1742 {
1743         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1744         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1745                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1746                 kfree_skb(skb);
1747                 return -ENXIO;
1748         }
1749
1750         /* Incomming skb */
1751         bt_cb(skb)->incoming = 1;
1752
1753         /* Time stamp */
1754         __net_timestamp(skb);
1755
1756         skb_queue_tail(&hdev->rx_q, skb);
1757         queue_work(hdev->workqueue, &hdev->rx_work);
1758
1759         return 0;
1760 }
1761 EXPORT_SYMBOL(hci_recv_frame);
1762
1763 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1764                                                   int count, __u8 index)
1765 {
1766         int len = 0;
1767         int hlen = 0;
1768         int remain = count;
1769         struct sk_buff *skb;
1770         struct bt_skb_cb *scb;
1771
1772         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1773                                 index >= NUM_REASSEMBLY)
1774                 return -EILSEQ;
1775
1776         skb = hdev->reassembly[index];
1777
1778         if (!skb) {
1779                 switch (type) {
1780                 case HCI_ACLDATA_PKT:
1781                         len = HCI_MAX_FRAME_SIZE;
1782                         hlen = HCI_ACL_HDR_SIZE;
1783                         break;
1784                 case HCI_EVENT_PKT:
1785                         len = HCI_MAX_EVENT_SIZE;
1786                         hlen = HCI_EVENT_HDR_SIZE;
1787                         break;
1788                 case HCI_SCODATA_PKT:
1789                         len = HCI_MAX_SCO_SIZE;
1790                         hlen = HCI_SCO_HDR_SIZE;
1791                         break;
1792                 }
1793
1794                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1795                 if (!skb)
1796                         return -ENOMEM;
1797
1798                 scb = (void *) skb->cb;
1799                 scb->expect = hlen;
1800                 scb->pkt_type = type;
1801
1802                 skb->dev = (void *) hdev;
1803                 hdev->reassembly[index] = skb;
1804         }
1805
1806         while (count) {
1807                 scb = (void *) skb->cb;
1808                 len = min(scb->expect, (__u16)count);
1809
1810                 memcpy(skb_put(skb, len), data, len);
1811
1812                 count -= len;
1813                 data += len;
1814                 scb->expect -= len;
1815                 remain = count;
1816
1817                 switch (type) {
1818                 case HCI_EVENT_PKT:
1819                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1820                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1821                                 scb->expect = h->plen;
1822
1823                                 if (skb_tailroom(skb) < scb->expect) {
1824                                         kfree_skb(skb);
1825                                         hdev->reassembly[index] = NULL;
1826                                         return -ENOMEM;
1827                                 }
1828                         }
1829                         break;
1830
1831                 case HCI_ACLDATA_PKT:
1832                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1833                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1834                                 scb->expect = __le16_to_cpu(h->dlen);
1835
1836                                 if (skb_tailroom(skb) < scb->expect) {
1837                                         kfree_skb(skb);
1838                                         hdev->reassembly[index] = NULL;
1839                                         return -ENOMEM;
1840                                 }
1841                         }
1842                         break;
1843
1844                 case HCI_SCODATA_PKT:
1845                         if (skb->len == HCI_SCO_HDR_SIZE) {
1846                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1847                                 scb->expect = h->dlen;
1848
1849                                 if (skb_tailroom(skb) < scb->expect) {
1850                                         kfree_skb(skb);
1851                                         hdev->reassembly[index] = NULL;
1852                                         return -ENOMEM;
1853                                 }
1854                         }
1855                         break;
1856                 }
1857
1858                 if (scb->expect == 0) {
1859                         /* Complete frame */
1860
1861                         bt_cb(skb)->pkt_type = type;
1862                         hci_recv_frame(skb);
1863
1864                         hdev->reassembly[index] = NULL;
1865                         return remain;
1866                 }
1867         }
1868
1869         return remain;
1870 }
1871
1872 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1873 {
1874         int rem = 0;
1875
1876         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1877                 return -EILSEQ;
1878
1879         while (count) {
1880                 rem = hci_reassembly(hdev, type, data, count, type - 1);
1881                 if (rem < 0)
1882                         return rem;
1883
1884                 data += (count - rem);
1885                 count = rem;
1886         }
1887
1888         return rem;
1889 }
1890 EXPORT_SYMBOL(hci_recv_fragment);
1891
1892 #define STREAM_REASSEMBLY 0
1893
1894 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1895 {
1896         int type;
1897         int rem = 0;
1898
1899         while (count) {
1900                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1901
1902                 if (!skb) {
1903                         struct { char type; } *pkt;
1904
1905                         /* Start of the frame */
1906                         pkt = data;
1907                         type = pkt->type;
1908
1909                         data++;
1910                         count--;
1911                 } else
1912                         type = bt_cb(skb)->pkt_type;
1913
1914                 rem = hci_reassembly(hdev, type, data, count,
1915                                                         STREAM_REASSEMBLY);
1916                 if (rem < 0)
1917                         return rem;
1918
1919                 data += (count - rem);
1920                 count = rem;
1921         }
1922
1923         return rem;
1924 }
1925 EXPORT_SYMBOL(hci_recv_stream_fragment);
1926
1927 /* ---- Interface to upper protocols ---- */
1928
1929 int hci_register_cb(struct hci_cb *cb)
1930 {
1931         BT_DBG("%p name %s", cb, cb->name);
1932
1933         write_lock(&hci_cb_list_lock);
1934         list_add(&cb->list, &hci_cb_list);
1935         write_unlock(&hci_cb_list_lock);
1936
1937         return 0;
1938 }
1939 EXPORT_SYMBOL(hci_register_cb);
1940
1941 int hci_unregister_cb(struct hci_cb *cb)
1942 {
1943         BT_DBG("%p name %s", cb, cb->name);
1944
1945         write_lock(&hci_cb_list_lock);
1946         list_del(&cb->list);
1947         write_unlock(&hci_cb_list_lock);
1948
1949         return 0;
1950 }
1951 EXPORT_SYMBOL(hci_unregister_cb);
1952
1953 static int hci_send_frame(struct sk_buff *skb)
1954 {
1955         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1956
1957         if (!hdev) {
1958                 kfree_skb(skb);
1959                 return -ENODEV;
1960         }
1961
1962         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1963
1964         if (atomic_read(&hdev->promisc)) {
1965                 /* Time stamp */
1966                 __net_timestamp(skb);
1967
1968                 hci_send_to_sock(hdev, skb, NULL);
1969         }
1970
1971         /* Get rid of skb owner, prior to sending to the driver. */
1972         skb_orphan(skb);
1973
1974         return hdev->send(skb);
1975 }
1976
1977 /* Send HCI command */
1978 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1979 {
1980         int len = HCI_COMMAND_HDR_SIZE + plen;
1981         struct hci_command_hdr *hdr;
1982         struct sk_buff *skb;
1983
1984         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1985
1986         skb = bt_skb_alloc(len, GFP_ATOMIC);
1987         if (!skb) {
1988                 BT_ERR("%s no memory for command", hdev->name);
1989                 return -ENOMEM;
1990         }
1991
1992         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1993         hdr->opcode = cpu_to_le16(opcode);
1994         hdr->plen   = plen;
1995
1996         if (plen)
1997                 memcpy(skb_put(skb, plen), param, plen);
1998
1999         BT_DBG("skb len %d", skb->len);
2000
2001         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
2002         skb->dev = (void *) hdev;
2003
2004         if (test_bit(HCI_INIT, &hdev->flags))
2005                 hdev->init_last_cmd = opcode;
2006
2007         skb_queue_tail(&hdev->cmd_q, skb);
2008         queue_work(hdev->workqueue, &hdev->cmd_work);
2009
2010         return 0;
2011 }
2012
2013 /* Get data from the previously sent command */
2014 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
2015 {
2016         struct hci_command_hdr *hdr;
2017
2018         if (!hdev->sent_cmd)
2019                 return NULL;
2020
2021         hdr = (void *) hdev->sent_cmd->data;
2022
2023         if (hdr->opcode != cpu_to_le16(opcode))
2024                 return NULL;
2025
2026         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
2027
2028         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
2029 }
2030
2031 /* Send ACL data */
2032 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
2033 {
2034         struct hci_acl_hdr *hdr;
2035         int len = skb->len;
2036
2037         skb_push(skb, HCI_ACL_HDR_SIZE);
2038         skb_reset_transport_header(skb);
2039         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
2040         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
2041         hdr->dlen   = cpu_to_le16(len);
2042 }
2043
2044 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
2045                                 struct sk_buff *skb, __u16 flags)
2046 {
2047         struct hci_dev *hdev = conn->hdev;
2048         struct sk_buff *list;
2049
2050         list = skb_shinfo(skb)->frag_list;
2051         if (!list) {
2052                 /* Non fragmented */
2053                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2054
2055                 skb_queue_tail(queue, skb);
2056         } else {
2057                 /* Fragmented */
2058                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2059
2060                 skb_shinfo(skb)->frag_list = NULL;
2061
2062                 /* Queue all fragments atomically */
2063                 spin_lock(&queue->lock);
2064
2065                 __skb_queue_tail(queue, skb);
2066
2067                 flags &= ~ACL_START;
2068                 flags |= ACL_CONT;
2069                 do {
2070                         skb = list; list = list->next;
2071
2072                         skb->dev = (void *) hdev;
2073                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2074                         hci_add_acl_hdr(skb, conn->handle, flags);
2075
2076                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2077
2078                         __skb_queue_tail(queue, skb);
2079                 } while (list);
2080
2081                 spin_unlock(&queue->lock);
2082         }
2083 }
2084
2085 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2086 {
2087         struct hci_conn *conn = chan->conn;
2088         struct hci_dev *hdev = conn->hdev;
2089
2090         BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2091
2092         skb->dev = (void *) hdev;
2093         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2094         hci_add_acl_hdr(skb, conn->handle, flags);
2095
2096         hci_queue_acl(conn, &chan->data_q, skb, flags);
2097
2098         queue_work(hdev->workqueue, &hdev->tx_work);
2099 }
2100 EXPORT_SYMBOL(hci_send_acl);
2101
2102 /* Send SCO data */
2103 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2104 {
2105         struct hci_dev *hdev = conn->hdev;
2106         struct hci_sco_hdr hdr;
2107
2108         BT_DBG("%s len %d", hdev->name, skb->len);
2109
2110         hdr.handle = cpu_to_le16(conn->handle);
2111         hdr.dlen   = skb->len;
2112
2113         skb_push(skb, HCI_SCO_HDR_SIZE);
2114         skb_reset_transport_header(skb);
2115         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2116
2117         skb->dev = (void *) hdev;
2118         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2119
2120         skb_queue_tail(&conn->data_q, skb);
2121         queue_work(hdev->workqueue, &hdev->tx_work);
2122 }
2123 EXPORT_SYMBOL(hci_send_sco);
2124
2125 /* ---- HCI TX task (outgoing data) ---- */
2126
2127 /* HCI Connection scheduler */
2128 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2129 {
2130         struct hci_conn_hash *h = &hdev->conn_hash;
2131         struct hci_conn *conn = NULL, *c;
2132         int num = 0, min = ~0;
2133
2134         /* We don't have to lock device here. Connections are always
2135          * added and removed with TX task disabled. */
2136
2137         rcu_read_lock();
2138
2139         list_for_each_entry_rcu(c, &h->list, list) {
2140                 if (c->type != type || skb_queue_empty(&c->data_q))
2141                         continue;
2142
2143                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2144                         continue;
2145
2146                 num++;
2147
2148                 if (c->sent < min) {
2149                         min  = c->sent;
2150                         conn = c;
2151                 }
2152
2153                 if (hci_conn_num(hdev, type) == num)
2154                         break;
2155         }
2156
2157         rcu_read_unlock();
2158
2159         if (conn) {
2160                 int cnt, q;
2161
2162                 switch (conn->type) {
2163                 case ACL_LINK:
2164                         cnt = hdev->acl_cnt;
2165                         break;
2166                 case SCO_LINK:
2167                 case ESCO_LINK:
2168                         cnt = hdev->sco_cnt;
2169                         break;
2170                 case LE_LINK:
2171                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2172                         break;
2173                 default:
2174                         cnt = 0;
2175                         BT_ERR("Unknown link type");
2176                 }
2177
2178                 q = cnt / num;
2179                 *quote = q ? q : 1;
2180         } else
2181                 *quote = 0;
2182
2183         BT_DBG("conn %p quote %d", conn, *quote);
2184         return conn;
2185 }
2186
2187 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2188 {
2189         struct hci_conn_hash *h = &hdev->conn_hash;
2190         struct hci_conn *c;
2191
2192         BT_ERR("%s link tx timeout", hdev->name);
2193
2194         rcu_read_lock();
2195
2196         /* Kill stalled connections */
2197         list_for_each_entry_rcu(c, &h->list, list) {
2198                 if (c->type == type && c->sent) {
2199                         BT_ERR("%s killing stalled connection %s",
2200                                 hdev->name, batostr(&c->dst));
2201                         hci_acl_disconn(c, 0x13);
2202                 }
2203         }
2204
2205         rcu_read_unlock();
2206 }
2207
2208 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2209                                                 int *quote)
2210 {
2211         struct hci_conn_hash *h = &hdev->conn_hash;
2212         struct hci_chan *chan = NULL;
2213         int num = 0, min = ~0, cur_prio = 0;
2214         struct hci_conn *conn;
2215         int cnt, q, conn_num = 0;
2216
2217         BT_DBG("%s", hdev->name);
2218
2219         rcu_read_lock();
2220
2221         list_for_each_entry_rcu(conn, &h->list, list) {
2222                 struct hci_chan *tmp;
2223
2224                 if (conn->type != type)
2225                         continue;
2226
2227                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2228                         continue;
2229
2230                 conn_num++;
2231
2232                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2233                         struct sk_buff *skb;
2234
2235                         if (skb_queue_empty(&tmp->data_q))
2236                                 continue;
2237
2238                         skb = skb_peek(&tmp->data_q);
2239                         if (skb->priority < cur_prio)
2240                                 continue;
2241
2242                         if (skb->priority > cur_prio) {
2243                                 num = 0;
2244                                 min = ~0;
2245                                 cur_prio = skb->priority;
2246                         }
2247
2248                         num++;
2249
2250                         if (conn->sent < min) {
2251                                 min  = conn->sent;
2252                                 chan = tmp;
2253                         }
2254                 }
2255
2256                 if (hci_conn_num(hdev, type) == conn_num)
2257                         break;
2258         }
2259
2260         rcu_read_unlock();
2261
2262         if (!chan)
2263                 return NULL;
2264
2265         switch (chan->conn->type) {
2266         case ACL_LINK:
2267                 cnt = hdev->acl_cnt;
2268                 break;
2269         case SCO_LINK:
2270         case ESCO_LINK:
2271                 cnt = hdev->sco_cnt;
2272                 break;
2273         case LE_LINK:
2274                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2275                 break;
2276         default:
2277                 cnt = 0;
2278                 BT_ERR("Unknown link type");
2279         }
2280
2281         q = cnt / num;
2282         *quote = q ? q : 1;
2283         BT_DBG("chan %p quote %d", chan, *quote);
2284         return chan;
2285 }
2286
2287 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2288 {
2289         struct hci_conn_hash *h = &hdev->conn_hash;
2290         struct hci_conn *conn;
2291         int num = 0;
2292
2293         BT_DBG("%s", hdev->name);
2294
2295         rcu_read_lock();
2296
2297         list_for_each_entry_rcu(conn, &h->list, list) {
2298                 struct hci_chan *chan;
2299
2300                 if (conn->type != type)
2301                         continue;
2302
2303                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2304                         continue;
2305
2306                 num++;
2307
2308                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2309                         struct sk_buff *skb;
2310
2311                         if (chan->sent) {
2312                                 chan->sent = 0;
2313                                 continue;
2314                         }
2315
2316                         if (skb_queue_empty(&chan->data_q))
2317                                 continue;
2318
2319                         skb = skb_peek(&chan->data_q);
2320                         if (skb->priority >= HCI_PRIO_MAX - 1)
2321                                 continue;
2322
2323                         skb->priority = HCI_PRIO_MAX - 1;
2324
2325                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2326                                                                 skb->priority);
2327                 }
2328
2329                 if (hci_conn_num(hdev, type) == num)
2330                         break;
2331         }
2332
2333         rcu_read_unlock();
2334
2335 }
2336
2337 static inline void hci_sched_acl(struct hci_dev *hdev)
2338 {
2339         struct hci_chan *chan;
2340         struct sk_buff *skb;
2341         int quote;
2342         unsigned int cnt;
2343
2344         BT_DBG("%s", hdev->name);
2345
2346         if (!hci_conn_num(hdev, ACL_LINK))
2347                 return;
2348
2349         if (!test_bit(HCI_RAW, &hdev->flags)) {
2350                 /* ACL tx timeout must be longer than maximum
2351                  * link supervision timeout (40.9 seconds) */
2352                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx +
2353                                         msecs_to_jiffies(HCI_ACL_TX_TIMEOUT)))
2354                         hci_link_tx_to(hdev, ACL_LINK);
2355         }
2356
2357         cnt = hdev->acl_cnt;
2358
2359         while (hdev->acl_cnt &&
2360                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2361                 u32 priority = (skb_peek(&chan->data_q))->priority;
2362                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2363                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2364                                         skb->len, skb->priority);
2365
2366                         /* Stop if priority has changed */
2367                         if (skb->priority < priority)
2368                                 break;
2369
2370                         skb = skb_dequeue(&chan->data_q);
2371
2372                         hci_conn_enter_active_mode(chan->conn,
2373                                                 bt_cb(skb)->force_active);
2374
2375                         hci_send_frame(skb);
2376                         hdev->acl_last_tx = jiffies;
2377
2378                         hdev->acl_cnt--;
2379                         chan->sent++;
2380                         chan->conn->sent++;
2381                 }
2382         }
2383
2384         if (cnt != hdev->acl_cnt)
2385                 hci_prio_recalculate(hdev, ACL_LINK);
2386 }
2387
2388 /* Schedule SCO */
2389 static inline void hci_sched_sco(struct hci_dev *hdev)
2390 {
2391         struct hci_conn *conn;
2392         struct sk_buff *skb;
2393         int quote;
2394
2395         BT_DBG("%s", hdev->name);
2396
2397         if (!hci_conn_num(hdev, SCO_LINK))
2398                 return;
2399
2400         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2401                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2402                         BT_DBG("skb %p len %d", skb, skb->len);
2403                         hci_send_frame(skb);
2404
2405                         conn->sent++;
2406                         if (conn->sent == ~0)
2407                                 conn->sent = 0;
2408                 }
2409         }
2410 }
2411
2412 static inline void hci_sched_esco(struct hci_dev *hdev)
2413 {
2414         struct hci_conn *conn;
2415         struct sk_buff *skb;
2416         int quote;
2417
2418         BT_DBG("%s", hdev->name);
2419
2420         if (!hci_conn_num(hdev, ESCO_LINK))
2421                 return;
2422
2423         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2424                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2425                         BT_DBG("skb %p len %d", skb, skb->len);
2426                         hci_send_frame(skb);
2427
2428                         conn->sent++;
2429                         if (conn->sent == ~0)
2430                                 conn->sent = 0;
2431                 }
2432         }
2433 }
2434
2435 static inline void hci_sched_le(struct hci_dev *hdev)
2436 {
2437         struct hci_chan *chan;
2438         struct sk_buff *skb;
2439         int quote, cnt, tmp;
2440
2441         BT_DBG("%s", hdev->name);
2442
2443         if (!hci_conn_num(hdev, LE_LINK))
2444                 return;
2445
2446         if (!test_bit(HCI_RAW, &hdev->flags)) {
2447                 /* LE tx timeout must be longer than maximum
2448                  * link supervision timeout (40.9 seconds) */
2449                 if (!hdev->le_cnt && hdev->le_pkts &&
2450                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2451                         hci_link_tx_to(hdev, LE_LINK);
2452         }
2453
2454         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2455         tmp = cnt;
2456         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2457                 u32 priority = (skb_peek(&chan->data_q))->priority;
2458                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2459                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2460                                         skb->len, skb->priority);
2461
2462                         /* Stop if priority has changed */
2463                         if (skb->priority < priority)
2464                                 break;
2465
2466                         skb = skb_dequeue(&chan->data_q);
2467
2468                         hci_send_frame(skb);
2469                         hdev->le_last_tx = jiffies;
2470
2471                         cnt--;
2472                         chan->sent++;
2473                         chan->conn->sent++;
2474                 }
2475         }
2476
2477         if (hdev->le_pkts)
2478                 hdev->le_cnt = cnt;
2479         else
2480                 hdev->acl_cnt = cnt;
2481
2482         if (cnt != tmp)
2483                 hci_prio_recalculate(hdev, LE_LINK);
2484 }
2485
2486 static void hci_tx_work(struct work_struct *work)
2487 {
2488         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2489         struct sk_buff *skb;
2490
2491         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2492                 hdev->sco_cnt, hdev->le_cnt);
2493
2494         /* Schedule queues and send stuff to HCI driver */
2495
2496         hci_sched_acl(hdev);
2497
2498         hci_sched_sco(hdev);
2499
2500         hci_sched_esco(hdev);
2501
2502         hci_sched_le(hdev);
2503
2504         /* Send next queued raw (unknown type) packet */
2505         while ((skb = skb_dequeue(&hdev->raw_q)))
2506                 hci_send_frame(skb);
2507 }
2508
2509 /* ----- HCI RX task (incoming data processing) ----- */
2510
2511 /* ACL data packet */
2512 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2513 {
2514         struct hci_acl_hdr *hdr = (void *) skb->data;
2515         struct hci_conn *conn;
2516         __u16 handle, flags;
2517
2518         skb_pull(skb, HCI_ACL_HDR_SIZE);
2519
2520         handle = __le16_to_cpu(hdr->handle);
2521         flags  = hci_flags(handle);
2522         handle = hci_handle(handle);
2523
2524         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2525
2526         hdev->stat.acl_rx++;
2527
2528         hci_dev_lock(hdev);
2529         conn = hci_conn_hash_lookup_handle(hdev, handle);
2530         hci_dev_unlock(hdev);
2531
2532         if (conn) {
2533                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2534
2535                 /* Send to upper protocol */
2536                 l2cap_recv_acldata(conn, skb, flags);
2537                 return;
2538         } else {
2539                 BT_ERR("%s ACL packet for unknown connection handle %d",
2540                         hdev->name, handle);
2541         }
2542
2543         kfree_skb(skb);
2544 }
2545
2546 /* SCO data packet */
2547 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2548 {
2549         struct hci_sco_hdr *hdr = (void *) skb->data;
2550         struct hci_conn *conn;
2551         __u16 handle;
2552
2553         skb_pull(skb, HCI_SCO_HDR_SIZE);
2554
2555         handle = __le16_to_cpu(hdr->handle);
2556
2557         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2558
2559         hdev->stat.sco_rx++;
2560
2561         hci_dev_lock(hdev);
2562         conn = hci_conn_hash_lookup_handle(hdev, handle);
2563         hci_dev_unlock(hdev);
2564
2565         if (conn) {
2566                 /* Send to upper protocol */
2567                 sco_recv_scodata(conn, skb);
2568                 return;
2569         } else {
2570                 BT_ERR("%s SCO packet for unknown connection handle %d",
2571                         hdev->name, handle);
2572         }
2573
2574         kfree_skb(skb);
2575 }
2576
2577 static void hci_rx_work(struct work_struct *work)
2578 {
2579         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2580         struct sk_buff *skb;
2581
2582         BT_DBG("%s", hdev->name);
2583
2584         while ((skb = skb_dequeue(&hdev->rx_q))) {
2585                 if (atomic_read(&hdev->promisc)) {
2586                         /* Send copy to the sockets */
2587                         hci_send_to_sock(hdev, skb, NULL);
2588                 }
2589
2590                 if (test_bit(HCI_RAW, &hdev->flags)) {
2591                         kfree_skb(skb);
2592                         continue;
2593                 }
2594
2595                 if (test_bit(HCI_INIT, &hdev->flags)) {
2596                         /* Don't process data packets in this states. */
2597                         switch (bt_cb(skb)->pkt_type) {
2598                         case HCI_ACLDATA_PKT:
2599                         case HCI_SCODATA_PKT:
2600                                 kfree_skb(skb);
2601                                 continue;
2602                         }
2603                 }
2604
2605                 /* Process frame */
2606                 switch (bt_cb(skb)->pkt_type) {
2607                 case HCI_EVENT_PKT:
2608                         BT_DBG("%s Event packet", hdev->name);
2609                         hci_event_packet(hdev, skb);
2610                         break;
2611
2612                 case HCI_ACLDATA_PKT:
2613                         BT_DBG("%s ACL data packet", hdev->name);
2614                         hci_acldata_packet(hdev, skb);
2615                         break;
2616
2617                 case HCI_SCODATA_PKT:
2618                         BT_DBG("%s SCO data packet", hdev->name);
2619                         hci_scodata_packet(hdev, skb);
2620                         break;
2621
2622                 default:
2623                         kfree_skb(skb);
2624                         break;
2625                 }
2626         }
2627 }
2628
2629 static void hci_cmd_work(struct work_struct *work)
2630 {
2631         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2632         struct sk_buff *skb;
2633
2634         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2635
2636         /* Send queued commands */
2637         if (atomic_read(&hdev->cmd_cnt)) {
2638                 skb = skb_dequeue(&hdev->cmd_q);
2639                 if (!skb)
2640                         return;
2641
2642                 kfree_skb(hdev->sent_cmd);
2643
2644                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2645                 if (hdev->sent_cmd) {
2646                         atomic_dec(&hdev->cmd_cnt);
2647                         hci_send_frame(skb);
2648                         if (test_bit(HCI_RESET, &hdev->flags))
2649                                 del_timer(&hdev->cmd_timer);
2650                         else
2651                                 mod_timer(&hdev->cmd_timer,
2652                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2653                 } else {
2654                         skb_queue_head(&hdev->cmd_q, skb);
2655                         queue_work(hdev->workqueue, &hdev->cmd_work);
2656                 }
2657         }
2658 }
2659
2660 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2661 {
2662         /* General inquiry access code (GIAC) */
2663         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2664         struct hci_cp_inquiry cp;
2665
2666         BT_DBG("%s", hdev->name);
2667
2668         if (test_bit(HCI_INQUIRY, &hdev->flags))
2669                 return -EINPROGRESS;
2670
2671         inquiry_cache_flush(hdev);
2672
2673         memset(&cp, 0, sizeof(cp));
2674         memcpy(&cp.lap, lap, sizeof(cp.lap));
2675         cp.length  = length;
2676
2677         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2678 }
2679
2680 int hci_cancel_inquiry(struct hci_dev *hdev)
2681 {
2682         BT_DBG("%s", hdev->name);
2683
2684         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2685                 return -EPERM;
2686
2687         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2688 }
2689
2690 module_param(enable_hs, bool, 0644);
2691 MODULE_PARM_DESC(enable_hs, "Enable High Speed");