Bluetooth: Add ProFUSION's copyright
[linux-flexiantxendom0-3.2.10.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
52
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
55
56 #define AUTO_OFF_TIMEOUT 2000
57
58 int enable_hs;
59
60 static void hci_rx_work(struct work_struct *work);
61 static void hci_cmd_work(struct work_struct *work);
62 static void hci_tx_work(struct work_struct *work);
63
64 static DEFINE_MUTEX(hci_task_lock);
65
66 /* HCI device list */
67 LIST_HEAD(hci_dev_list);
68 DEFINE_RWLOCK(hci_dev_list_lock);
69
70 /* HCI callback list */
71 LIST_HEAD(hci_cb_list);
72 DEFINE_RWLOCK(hci_cb_list_lock);
73
74 /* HCI protocols */
75 #define HCI_MAX_PROTO   2
76 struct hci_proto *hci_proto[HCI_MAX_PROTO];
77
78 /* HCI notifiers list */
79 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
80
81 /* ---- HCI notifications ---- */
82
83 int hci_register_notifier(struct notifier_block *nb)
84 {
85         return atomic_notifier_chain_register(&hci_notifier, nb);
86 }
87
88 int hci_unregister_notifier(struct notifier_block *nb)
89 {
90         return atomic_notifier_chain_unregister(&hci_notifier, nb);
91 }
92
93 static void hci_notify(struct hci_dev *hdev, int event)
94 {
95         atomic_notifier_call_chain(&hci_notifier, event, hdev);
96 }
97
98 /* ---- HCI requests ---- */
99
100 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
101 {
102         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
103
104         /* If this is the init phase check if the completed command matches
105          * the last init command, and if not just return.
106          */
107         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
108                 return;
109
110         if (hdev->req_status == HCI_REQ_PEND) {
111                 hdev->req_result = result;
112                 hdev->req_status = HCI_REQ_DONE;
113                 wake_up_interruptible(&hdev->req_wait_q);
114         }
115 }
116
117 static void hci_req_cancel(struct hci_dev *hdev, int err)
118 {
119         BT_DBG("%s err 0x%2.2x", hdev->name, err);
120
121         if (hdev->req_status == HCI_REQ_PEND) {
122                 hdev->req_result = err;
123                 hdev->req_status = HCI_REQ_CANCELED;
124                 wake_up_interruptible(&hdev->req_wait_q);
125         }
126 }
127
128 /* Execute request and wait for completion. */
129 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
130                                         unsigned long opt, __u32 timeout)
131 {
132         DECLARE_WAITQUEUE(wait, current);
133         int err = 0;
134
135         BT_DBG("%s start", hdev->name);
136
137         hdev->req_status = HCI_REQ_PEND;
138
139         add_wait_queue(&hdev->req_wait_q, &wait);
140         set_current_state(TASK_INTERRUPTIBLE);
141
142         req(hdev, opt);
143         schedule_timeout(timeout);
144
145         remove_wait_queue(&hdev->req_wait_q, &wait);
146
147         if (signal_pending(current))
148                 return -EINTR;
149
150         switch (hdev->req_status) {
151         case HCI_REQ_DONE:
152                 err = -bt_to_errno(hdev->req_result);
153                 break;
154
155         case HCI_REQ_CANCELED:
156                 err = -hdev->req_result;
157                 break;
158
159         default:
160                 err = -ETIMEDOUT;
161                 break;
162         }
163
164         hdev->req_status = hdev->req_result = 0;
165
166         BT_DBG("%s end: err %d", hdev->name, err);
167
168         return err;
169 }
170
171 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
172                                         unsigned long opt, __u32 timeout)
173 {
174         int ret;
175
176         if (!test_bit(HCI_UP, &hdev->flags))
177                 return -ENETDOWN;
178
179         /* Serialize all requests */
180         hci_req_lock(hdev);
181         ret = __hci_request(hdev, req, opt, timeout);
182         hci_req_unlock(hdev);
183
184         return ret;
185 }
186
187 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
188 {
189         BT_DBG("%s %ld", hdev->name, opt);
190
191         /* Reset device */
192         set_bit(HCI_RESET, &hdev->flags);
193         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
194 }
195
196 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
197 {
198         struct hci_cp_delete_stored_link_key cp;
199         struct sk_buff *skb;
200         __le16 param;
201         __u8 flt_type;
202
203         BT_DBG("%s %ld", hdev->name, opt);
204
205         /* Driver initialization */
206
207         /* Special commands */
208         while ((skb = skb_dequeue(&hdev->driver_init))) {
209                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
210                 skb->dev = (void *) hdev;
211
212                 skb_queue_tail(&hdev->cmd_q, skb);
213                 queue_work(hdev->workqueue, &hdev->cmd_work);
214         }
215         skb_queue_purge(&hdev->driver_init);
216
217         /* Mandatory initialization */
218
219         /* Reset */
220         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
221                         set_bit(HCI_RESET, &hdev->flags);
222                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
223         }
224
225         /* Read Local Supported Features */
226         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
227
228         /* Read Local Version */
229         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
230
231         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
232         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
233
234         /* Read BD Address */
235         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
236
237         /* Read Class of Device */
238         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
239
240         /* Read Local Name */
241         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
242
243         /* Read Voice Setting */
244         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
245
246         /* Optional initialization */
247
248         /* Clear Event Filters */
249         flt_type = HCI_FLT_CLEAR_ALL;
250         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
251
252         /* Connection accept timeout ~20 secs */
253         param = cpu_to_le16(0x7d00);
254         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
255
256         bacpy(&cp.bdaddr, BDADDR_ANY);
257         cp.delete_all = 1;
258         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
259 }
260
261 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
262 {
263         BT_DBG("%s", hdev->name);
264
265         /* Read LE buffer size */
266         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
267 }
268
269 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
270 {
271         __u8 scan = opt;
272
273         BT_DBG("%s %x", hdev->name, scan);
274
275         /* Inquiry and Page scans */
276         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
277 }
278
279 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
280 {
281         __u8 auth = opt;
282
283         BT_DBG("%s %x", hdev->name, auth);
284
285         /* Authentication */
286         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
287 }
288
289 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
290 {
291         __u8 encrypt = opt;
292
293         BT_DBG("%s %x", hdev->name, encrypt);
294
295         /* Encryption */
296         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
297 }
298
299 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
300 {
301         __le16 policy = cpu_to_le16(opt);
302
303         BT_DBG("%s %x", hdev->name, policy);
304
305         /* Default link policy */
306         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
307 }
308
309 /* Get HCI device by index.
310  * Device is held on return. */
311 struct hci_dev *hci_dev_get(int index)
312 {
313         struct hci_dev *hdev = NULL, *d;
314
315         BT_DBG("%d", index);
316
317         if (index < 0)
318                 return NULL;
319
320         read_lock(&hci_dev_list_lock);
321         list_for_each_entry(d, &hci_dev_list, list) {
322                 if (d->id == index) {
323                         hdev = hci_dev_hold(d);
324                         break;
325                 }
326         }
327         read_unlock(&hci_dev_list_lock);
328         return hdev;
329 }
330
331 /* ---- Inquiry support ---- */
332 static void inquiry_cache_flush(struct hci_dev *hdev)
333 {
334         struct inquiry_cache *cache = &hdev->inq_cache;
335         struct inquiry_entry *next  = cache->list, *e;
336
337         BT_DBG("cache %p", cache);
338
339         cache->list = NULL;
340         while ((e = next)) {
341                 next = e->next;
342                 kfree(e);
343         }
344 }
345
346 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
347 {
348         struct inquiry_cache *cache = &hdev->inq_cache;
349         struct inquiry_entry *e;
350
351         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
352
353         for (e = cache->list; e; e = e->next)
354                 if (!bacmp(&e->data.bdaddr, bdaddr))
355                         break;
356         return e;
357 }
358
359 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
360 {
361         struct inquiry_cache *cache = &hdev->inq_cache;
362         struct inquiry_entry *ie;
363
364         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
365
366         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
367         if (!ie) {
368                 /* Entry not in the cache. Add new one. */
369                 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
370                 if (!ie)
371                         return;
372
373                 ie->next = cache->list;
374                 cache->list = ie;
375         }
376
377         memcpy(&ie->data, data, sizeof(*data));
378         ie->timestamp = jiffies;
379         cache->timestamp = jiffies;
380 }
381
382 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
383 {
384         struct inquiry_cache *cache = &hdev->inq_cache;
385         struct inquiry_info *info = (struct inquiry_info *) buf;
386         struct inquiry_entry *e;
387         int copied = 0;
388
389         for (e = cache->list; e && copied < num; e = e->next, copied++) {
390                 struct inquiry_data *data = &e->data;
391                 bacpy(&info->bdaddr, &data->bdaddr);
392                 info->pscan_rep_mode    = data->pscan_rep_mode;
393                 info->pscan_period_mode = data->pscan_period_mode;
394                 info->pscan_mode        = data->pscan_mode;
395                 memcpy(info->dev_class, data->dev_class, 3);
396                 info->clock_offset      = data->clock_offset;
397                 info++;
398         }
399
400         BT_DBG("cache %p, copied %d", cache, copied);
401         return copied;
402 }
403
404 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
405 {
406         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
407         struct hci_cp_inquiry cp;
408
409         BT_DBG("%s", hdev->name);
410
411         if (test_bit(HCI_INQUIRY, &hdev->flags))
412                 return;
413
414         /* Start Inquiry */
415         memcpy(&cp.lap, &ir->lap, 3);
416         cp.length  = ir->length;
417         cp.num_rsp = ir->num_rsp;
418         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
419 }
420
421 int hci_inquiry(void __user *arg)
422 {
423         __u8 __user *ptr = arg;
424         struct hci_inquiry_req ir;
425         struct hci_dev *hdev;
426         int err = 0, do_inquiry = 0, max_rsp;
427         long timeo;
428         __u8 *buf;
429
430         if (copy_from_user(&ir, ptr, sizeof(ir)))
431                 return -EFAULT;
432
433         hdev = hci_dev_get(ir.dev_id);
434         if (!hdev)
435                 return -ENODEV;
436
437         hci_dev_lock(hdev);
438         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
439                                 inquiry_cache_empty(hdev) ||
440                                 ir.flags & IREQ_CACHE_FLUSH) {
441                 inquiry_cache_flush(hdev);
442                 do_inquiry = 1;
443         }
444         hci_dev_unlock(hdev);
445
446         timeo = ir.length * msecs_to_jiffies(2000);
447
448         if (do_inquiry) {
449                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
450                 if (err < 0)
451                         goto done;
452         }
453
454         /* for unlimited number of responses we will use buffer with 255 entries */
455         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
456
457         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
458          * copy it to the user space.
459          */
460         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
461         if (!buf) {
462                 err = -ENOMEM;
463                 goto done;
464         }
465
466         hci_dev_lock(hdev);
467         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
468         hci_dev_unlock(hdev);
469
470         BT_DBG("num_rsp %d", ir.num_rsp);
471
472         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
473                 ptr += sizeof(ir);
474                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
475                                         ir.num_rsp))
476                         err = -EFAULT;
477         } else
478                 err = -EFAULT;
479
480         kfree(buf);
481
482 done:
483         hci_dev_put(hdev);
484         return err;
485 }
486
487 /* ---- HCI ioctl helpers ---- */
488
489 int hci_dev_open(__u16 dev)
490 {
491         struct hci_dev *hdev;
492         int ret = 0;
493
494         hdev = hci_dev_get(dev);
495         if (!hdev)
496                 return -ENODEV;
497
498         BT_DBG("%s %p", hdev->name, hdev);
499
500         hci_req_lock(hdev);
501
502         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
503                 ret = -ERFKILL;
504                 goto done;
505         }
506
507         if (test_bit(HCI_UP, &hdev->flags)) {
508                 ret = -EALREADY;
509                 goto done;
510         }
511
512         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
513                 set_bit(HCI_RAW, &hdev->flags);
514
515         /* Treat all non BR/EDR controllers as raw devices if
516            enable_hs is not set */
517         if (hdev->dev_type != HCI_BREDR && !enable_hs)
518                 set_bit(HCI_RAW, &hdev->flags);
519
520         if (hdev->open(hdev)) {
521                 ret = -EIO;
522                 goto done;
523         }
524
525         if (!test_bit(HCI_RAW, &hdev->flags)) {
526                 atomic_set(&hdev->cmd_cnt, 1);
527                 set_bit(HCI_INIT, &hdev->flags);
528                 hdev->init_last_cmd = 0;
529
530                 ret = __hci_request(hdev, hci_init_req, 0,
531                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
532
533                 if (lmp_host_le_capable(hdev))
534                         ret = __hci_request(hdev, hci_le_init_req, 0,
535                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
536
537                 clear_bit(HCI_INIT, &hdev->flags);
538         }
539
540         if (!ret) {
541                 hci_dev_hold(hdev);
542                 set_bit(HCI_UP, &hdev->flags);
543                 hci_notify(hdev, HCI_DEV_UP);
544                 if (!test_bit(HCI_SETUP, &hdev->flags)) {
545                         hci_dev_lock(hdev);
546                         mgmt_powered(hdev, 1);
547                         hci_dev_unlock(hdev);
548                 }
549         } else {
550                 /* Init failed, cleanup */
551                 flush_work(&hdev->tx_work);
552                 flush_work(&hdev->cmd_work);
553                 flush_work(&hdev->rx_work);
554
555                 skb_queue_purge(&hdev->cmd_q);
556                 skb_queue_purge(&hdev->rx_q);
557
558                 if (hdev->flush)
559                         hdev->flush(hdev);
560
561                 if (hdev->sent_cmd) {
562                         kfree_skb(hdev->sent_cmd);
563                         hdev->sent_cmd = NULL;
564                 }
565
566                 hdev->close(hdev);
567                 hdev->flags = 0;
568         }
569
570 done:
571         hci_req_unlock(hdev);
572         hci_dev_put(hdev);
573         return ret;
574 }
575
576 static int hci_dev_do_close(struct hci_dev *hdev)
577 {
578         BT_DBG("%s %p", hdev->name, hdev);
579
580         hci_req_cancel(hdev, ENODEV);
581         hci_req_lock(hdev);
582
583         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
584                 del_timer_sync(&hdev->cmd_timer);
585                 hci_req_unlock(hdev);
586                 return 0;
587         }
588
589         /* Flush RX and TX works */
590         flush_work(&hdev->tx_work);
591         flush_work(&hdev->rx_work);
592
593         if (hdev->discov_timeout > 0) {
594                 cancel_delayed_work(&hdev->discov_off);
595                 hdev->discov_timeout = 0;
596         }
597
598         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
599                 cancel_delayed_work(&hdev->power_off);
600
601         hci_dev_lock(hdev);
602         inquiry_cache_flush(hdev);
603         hci_conn_hash_flush(hdev);
604         hci_dev_unlock(hdev);
605
606         hci_notify(hdev, HCI_DEV_DOWN);
607
608         if (hdev->flush)
609                 hdev->flush(hdev);
610
611         /* Reset device */
612         skb_queue_purge(&hdev->cmd_q);
613         atomic_set(&hdev->cmd_cnt, 1);
614         if (!test_bit(HCI_RAW, &hdev->flags)) {
615                 set_bit(HCI_INIT, &hdev->flags);
616                 __hci_request(hdev, hci_reset_req, 0,
617                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
618                 clear_bit(HCI_INIT, &hdev->flags);
619         }
620
621         /* flush cmd  work */
622         flush_work(&hdev->cmd_work);
623
624         /* Drop queues */
625         skb_queue_purge(&hdev->rx_q);
626         skb_queue_purge(&hdev->cmd_q);
627         skb_queue_purge(&hdev->raw_q);
628
629         /* Drop last sent command */
630         if (hdev->sent_cmd) {
631                 del_timer_sync(&hdev->cmd_timer);
632                 kfree_skb(hdev->sent_cmd);
633                 hdev->sent_cmd = NULL;
634         }
635
636         /* After this point our queues are empty
637          * and no tasks are scheduled. */
638         hdev->close(hdev);
639
640         hci_dev_lock(hdev);
641         mgmt_powered(hdev, 0);
642         hci_dev_unlock(hdev);
643
644         /* Clear flags */
645         hdev->flags = 0;
646
647         hci_req_unlock(hdev);
648
649         hci_dev_put(hdev);
650         return 0;
651 }
652
653 int hci_dev_close(__u16 dev)
654 {
655         struct hci_dev *hdev;
656         int err;
657
658         hdev = hci_dev_get(dev);
659         if (!hdev)
660                 return -ENODEV;
661         err = hci_dev_do_close(hdev);
662         hci_dev_put(hdev);
663         return err;
664 }
665
666 int hci_dev_reset(__u16 dev)
667 {
668         struct hci_dev *hdev;
669         int ret = 0;
670
671         hdev = hci_dev_get(dev);
672         if (!hdev)
673                 return -ENODEV;
674
675         hci_req_lock(hdev);
676
677         if (!test_bit(HCI_UP, &hdev->flags))
678                 goto done;
679
680         /* Drop queues */
681         skb_queue_purge(&hdev->rx_q);
682         skb_queue_purge(&hdev->cmd_q);
683
684         hci_dev_lock(hdev);
685         inquiry_cache_flush(hdev);
686         hci_conn_hash_flush(hdev);
687         hci_dev_unlock(hdev);
688
689         if (hdev->flush)
690                 hdev->flush(hdev);
691
692         atomic_set(&hdev->cmd_cnt, 1);
693         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
694
695         if (!test_bit(HCI_RAW, &hdev->flags))
696                 ret = __hci_request(hdev, hci_reset_req, 0,
697                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
698
699 done:
700         hci_req_unlock(hdev);
701         hci_dev_put(hdev);
702         return ret;
703 }
704
705 int hci_dev_reset_stat(__u16 dev)
706 {
707         struct hci_dev *hdev;
708         int ret = 0;
709
710         hdev = hci_dev_get(dev);
711         if (!hdev)
712                 return -ENODEV;
713
714         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
715
716         hci_dev_put(hdev);
717
718         return ret;
719 }
720
721 int hci_dev_cmd(unsigned int cmd, void __user *arg)
722 {
723         struct hci_dev *hdev;
724         struct hci_dev_req dr;
725         int err = 0;
726
727         if (copy_from_user(&dr, arg, sizeof(dr)))
728                 return -EFAULT;
729
730         hdev = hci_dev_get(dr.dev_id);
731         if (!hdev)
732                 return -ENODEV;
733
734         switch (cmd) {
735         case HCISETAUTH:
736                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
737                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
738                 break;
739
740         case HCISETENCRYPT:
741                 if (!lmp_encrypt_capable(hdev)) {
742                         err = -EOPNOTSUPP;
743                         break;
744                 }
745
746                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
747                         /* Auth must be enabled first */
748                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
749                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
750                         if (err)
751                                 break;
752                 }
753
754                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
755                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
756                 break;
757
758         case HCISETSCAN:
759                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
760                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
761                 break;
762
763         case HCISETLINKPOL:
764                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
765                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
766                 break;
767
768         case HCISETLINKMODE:
769                 hdev->link_mode = ((__u16) dr.dev_opt) &
770                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
771                 break;
772
773         case HCISETPTYPE:
774                 hdev->pkt_type = (__u16) dr.dev_opt;
775                 break;
776
777         case HCISETACLMTU:
778                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
779                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
780                 break;
781
782         case HCISETSCOMTU:
783                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
784                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
785                 break;
786
787         default:
788                 err = -EINVAL;
789                 break;
790         }
791
792         hci_dev_put(hdev);
793         return err;
794 }
795
796 int hci_get_dev_list(void __user *arg)
797 {
798         struct hci_dev *hdev;
799         struct hci_dev_list_req *dl;
800         struct hci_dev_req *dr;
801         int n = 0, size, err;
802         __u16 dev_num;
803
804         if (get_user(dev_num, (__u16 __user *) arg))
805                 return -EFAULT;
806
807         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
808                 return -EINVAL;
809
810         size = sizeof(*dl) + dev_num * sizeof(*dr);
811
812         dl = kzalloc(size, GFP_KERNEL);
813         if (!dl)
814                 return -ENOMEM;
815
816         dr = dl->dev_req;
817
818         read_lock_bh(&hci_dev_list_lock);
819         list_for_each_entry(hdev, &hci_dev_list, list) {
820                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
821                         cancel_delayed_work(&hdev->power_off);
822
823                 if (!test_bit(HCI_MGMT, &hdev->flags))
824                         set_bit(HCI_PAIRABLE, &hdev->flags);
825
826                 (dr + n)->dev_id  = hdev->id;
827                 (dr + n)->dev_opt = hdev->flags;
828
829                 if (++n >= dev_num)
830                         break;
831         }
832         read_unlock_bh(&hci_dev_list_lock);
833
834         dl->dev_num = n;
835         size = sizeof(*dl) + n * sizeof(*dr);
836
837         err = copy_to_user(arg, dl, size);
838         kfree(dl);
839
840         return err ? -EFAULT : 0;
841 }
842
843 int hci_get_dev_info(void __user *arg)
844 {
845         struct hci_dev *hdev;
846         struct hci_dev_info di;
847         int err = 0;
848
849         if (copy_from_user(&di, arg, sizeof(di)))
850                 return -EFAULT;
851
852         hdev = hci_dev_get(di.dev_id);
853         if (!hdev)
854                 return -ENODEV;
855
856         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
857                 cancel_delayed_work_sync(&hdev->power_off);
858
859         if (!test_bit(HCI_MGMT, &hdev->flags))
860                 set_bit(HCI_PAIRABLE, &hdev->flags);
861
862         strcpy(di.name, hdev->name);
863         di.bdaddr   = hdev->bdaddr;
864         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
865         di.flags    = hdev->flags;
866         di.pkt_type = hdev->pkt_type;
867         di.acl_mtu  = hdev->acl_mtu;
868         di.acl_pkts = hdev->acl_pkts;
869         di.sco_mtu  = hdev->sco_mtu;
870         di.sco_pkts = hdev->sco_pkts;
871         di.link_policy = hdev->link_policy;
872         di.link_mode   = hdev->link_mode;
873
874         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
875         memcpy(&di.features, &hdev->features, sizeof(di.features));
876
877         if (copy_to_user(arg, &di, sizeof(di)))
878                 err = -EFAULT;
879
880         hci_dev_put(hdev);
881
882         return err;
883 }
884
885 /* ---- Interface to HCI drivers ---- */
886
887 static int hci_rfkill_set_block(void *data, bool blocked)
888 {
889         struct hci_dev *hdev = data;
890
891         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
892
893         if (!blocked)
894                 return 0;
895
896         hci_dev_do_close(hdev);
897
898         return 0;
899 }
900
901 static const struct rfkill_ops hci_rfkill_ops = {
902         .set_block = hci_rfkill_set_block,
903 };
904
905 /* Alloc HCI device */
906 struct hci_dev *hci_alloc_dev(void)
907 {
908         struct hci_dev *hdev;
909
910         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
911         if (!hdev)
912                 return NULL;
913
914         hci_init_sysfs(hdev);
915         skb_queue_head_init(&hdev->driver_init);
916
917         return hdev;
918 }
919 EXPORT_SYMBOL(hci_alloc_dev);
920
921 /* Free HCI device */
922 void hci_free_dev(struct hci_dev *hdev)
923 {
924         skb_queue_purge(&hdev->driver_init);
925
926         /* will free via device release */
927         put_device(&hdev->dev);
928 }
929 EXPORT_SYMBOL(hci_free_dev);
930
931 static void hci_power_on(struct work_struct *work)
932 {
933         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
934
935         BT_DBG("%s", hdev->name);
936
937         if (hci_dev_open(hdev->id) < 0)
938                 return;
939
940         if (test_bit(HCI_AUTO_OFF, &hdev->flags))
941                 schedule_delayed_work(&hdev->power_off,
942                                         msecs_to_jiffies(AUTO_OFF_TIMEOUT));
943
944         if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
945                 mgmt_index_added(hdev);
946 }
947
948 static void hci_power_off(struct work_struct *work)
949 {
950         struct hci_dev *hdev = container_of(work, struct hci_dev,
951                                                         power_off.work);
952
953         BT_DBG("%s", hdev->name);
954
955         clear_bit(HCI_AUTO_OFF, &hdev->flags);
956
957         hci_dev_close(hdev->id);
958 }
959
960 static void hci_discov_off(struct work_struct *work)
961 {
962         struct hci_dev *hdev;
963         u8 scan = SCAN_PAGE;
964
965         hdev = container_of(work, struct hci_dev, discov_off.work);
966
967         BT_DBG("%s", hdev->name);
968
969         hci_dev_lock(hdev);
970
971         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
972
973         hdev->discov_timeout = 0;
974
975         hci_dev_unlock(hdev);
976 }
977
978 int hci_uuids_clear(struct hci_dev *hdev)
979 {
980         struct list_head *p, *n;
981
982         list_for_each_safe(p, n, &hdev->uuids) {
983                 struct bt_uuid *uuid;
984
985                 uuid = list_entry(p, struct bt_uuid, list);
986
987                 list_del(p);
988                 kfree(uuid);
989         }
990
991         return 0;
992 }
993
994 int hci_link_keys_clear(struct hci_dev *hdev)
995 {
996         struct list_head *p, *n;
997
998         list_for_each_safe(p, n, &hdev->link_keys) {
999                 struct link_key *key;
1000
1001                 key = list_entry(p, struct link_key, list);
1002
1003                 list_del(p);
1004                 kfree(key);
1005         }
1006
1007         return 0;
1008 }
1009
1010 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1011 {
1012         struct link_key *k;
1013
1014         list_for_each_entry(k, &hdev->link_keys, list)
1015                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1016                         return k;
1017
1018         return NULL;
1019 }
1020
1021 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1022                                                 u8 key_type, u8 old_key_type)
1023 {
1024         /* Legacy key */
1025         if (key_type < 0x03)
1026                 return 1;
1027
1028         /* Debug keys are insecure so don't store them persistently */
1029         if (key_type == HCI_LK_DEBUG_COMBINATION)
1030                 return 0;
1031
1032         /* Changed combination key and there's no previous one */
1033         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1034                 return 0;
1035
1036         /* Security mode 3 case */
1037         if (!conn)
1038                 return 1;
1039
1040         /* Neither local nor remote side had no-bonding as requirement */
1041         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1042                 return 1;
1043
1044         /* Local side had dedicated bonding as requirement */
1045         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1046                 return 1;
1047
1048         /* Remote side had dedicated bonding as requirement */
1049         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1050                 return 1;
1051
1052         /* If none of the above criteria match, then don't store the key
1053          * persistently */
1054         return 0;
1055 }
1056
1057 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1058 {
1059         struct link_key *k;
1060
1061         list_for_each_entry(k, &hdev->link_keys, list) {
1062                 struct key_master_id *id;
1063
1064                 if (k->type != HCI_LK_SMP_LTK)
1065                         continue;
1066
1067                 if (k->dlen != sizeof(*id))
1068                         continue;
1069
1070                 id = (void *) &k->data;
1071                 if (id->ediv == ediv &&
1072                                 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1073                         return k;
1074         }
1075
1076         return NULL;
1077 }
1078 EXPORT_SYMBOL(hci_find_ltk);
1079
1080 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1081                                         bdaddr_t *bdaddr, u8 type)
1082 {
1083         struct link_key *k;
1084
1085         list_for_each_entry(k, &hdev->link_keys, list)
1086                 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1087                         return k;
1088
1089         return NULL;
1090 }
1091 EXPORT_SYMBOL(hci_find_link_key_type);
1092
1093 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1094                                 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1095 {
1096         struct link_key *key, *old_key;
1097         u8 old_key_type, persistent;
1098
1099         old_key = hci_find_link_key(hdev, bdaddr);
1100         if (old_key) {
1101                 old_key_type = old_key->type;
1102                 key = old_key;
1103         } else {
1104                 old_key_type = conn ? conn->key_type : 0xff;
1105                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1106                 if (!key)
1107                         return -ENOMEM;
1108                 list_add(&key->list, &hdev->link_keys);
1109         }
1110
1111         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1112
1113         /* Some buggy controller combinations generate a changed
1114          * combination key for legacy pairing even when there's no
1115          * previous key */
1116         if (type == HCI_LK_CHANGED_COMBINATION &&
1117                                         (!conn || conn->remote_auth == 0xff) &&
1118                                         old_key_type == 0xff) {
1119                 type = HCI_LK_COMBINATION;
1120                 if (conn)
1121                         conn->key_type = type;
1122         }
1123
1124         bacpy(&key->bdaddr, bdaddr);
1125         memcpy(key->val, val, 16);
1126         key->pin_len = pin_len;
1127
1128         if (type == HCI_LK_CHANGED_COMBINATION)
1129                 key->type = old_key_type;
1130         else
1131                 key->type = type;
1132
1133         if (!new_key)
1134                 return 0;
1135
1136         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1137
1138         mgmt_new_link_key(hdev, key, persistent);
1139
1140         if (!persistent) {
1141                 list_del(&key->list);
1142                 kfree(key);
1143         }
1144
1145         return 0;
1146 }
1147
1148 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1149                         u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1150 {
1151         struct link_key *key, *old_key;
1152         struct key_master_id *id;
1153         u8 old_key_type;
1154
1155         BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1156
1157         old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1158         if (old_key) {
1159                 key = old_key;
1160                 old_key_type = old_key->type;
1161         } else {
1162                 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1163                 if (!key)
1164                         return -ENOMEM;
1165                 list_add(&key->list, &hdev->link_keys);
1166                 old_key_type = 0xff;
1167         }
1168
1169         key->dlen = sizeof(*id);
1170
1171         bacpy(&key->bdaddr, bdaddr);
1172         memcpy(key->val, ltk, sizeof(key->val));
1173         key->type = HCI_LK_SMP_LTK;
1174         key->pin_len = key_size;
1175
1176         id = (void *) &key->data;
1177         id->ediv = ediv;
1178         memcpy(id->rand, rand, sizeof(id->rand));
1179
1180         if (new_key)
1181                 mgmt_new_link_key(hdev, key, old_key_type);
1182
1183         return 0;
1184 }
1185
1186 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1187 {
1188         struct link_key *key;
1189
1190         key = hci_find_link_key(hdev, bdaddr);
1191         if (!key)
1192                 return -ENOENT;
1193
1194         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1195
1196         list_del(&key->list);
1197         kfree(key);
1198
1199         return 0;
1200 }
1201
1202 /* HCI command timer function */
1203 static void hci_cmd_timer(unsigned long arg)
1204 {
1205         struct hci_dev *hdev = (void *) arg;
1206
1207         BT_ERR("%s command tx timeout", hdev->name);
1208         atomic_set(&hdev->cmd_cnt, 1);
1209         queue_work(hdev->workqueue, &hdev->cmd_work);
1210 }
1211
1212 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1213                                                         bdaddr_t *bdaddr)
1214 {
1215         struct oob_data *data;
1216
1217         list_for_each_entry(data, &hdev->remote_oob_data, list)
1218                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1219                         return data;
1220
1221         return NULL;
1222 }
1223
1224 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1225 {
1226         struct oob_data *data;
1227
1228         data = hci_find_remote_oob_data(hdev, bdaddr);
1229         if (!data)
1230                 return -ENOENT;
1231
1232         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1233
1234         list_del(&data->list);
1235         kfree(data);
1236
1237         return 0;
1238 }
1239
1240 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1241 {
1242         struct oob_data *data, *n;
1243
1244         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1245                 list_del(&data->list);
1246                 kfree(data);
1247         }
1248
1249         return 0;
1250 }
1251
1252 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1253                                                                 u8 *randomizer)
1254 {
1255         struct oob_data *data;
1256
1257         data = hci_find_remote_oob_data(hdev, bdaddr);
1258
1259         if (!data) {
1260                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1261                 if (!data)
1262                         return -ENOMEM;
1263
1264                 bacpy(&data->bdaddr, bdaddr);
1265                 list_add(&data->list, &hdev->remote_oob_data);
1266         }
1267
1268         memcpy(data->hash, hash, sizeof(data->hash));
1269         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1270
1271         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1272
1273         return 0;
1274 }
1275
1276 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1277                                                 bdaddr_t *bdaddr)
1278 {
1279         struct bdaddr_list *b;
1280
1281         list_for_each_entry(b, &hdev->blacklist, list)
1282                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1283                         return b;
1284
1285         return NULL;
1286 }
1287
1288 int hci_blacklist_clear(struct hci_dev *hdev)
1289 {
1290         struct list_head *p, *n;
1291
1292         list_for_each_safe(p, n, &hdev->blacklist) {
1293                 struct bdaddr_list *b;
1294
1295                 b = list_entry(p, struct bdaddr_list, list);
1296
1297                 list_del(p);
1298                 kfree(b);
1299         }
1300
1301         return 0;
1302 }
1303
1304 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1305 {
1306         struct bdaddr_list *entry;
1307
1308         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1309                 return -EBADF;
1310
1311         if (hci_blacklist_lookup(hdev, bdaddr))
1312                 return -EEXIST;
1313
1314         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1315         if (!entry)
1316                 return -ENOMEM;
1317
1318         bacpy(&entry->bdaddr, bdaddr);
1319
1320         list_add(&entry->list, &hdev->blacklist);
1321
1322         return mgmt_device_blocked(hdev, bdaddr);
1323 }
1324
1325 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1326 {
1327         struct bdaddr_list *entry;
1328
1329         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1330                 return hci_blacklist_clear(hdev);
1331
1332         entry = hci_blacklist_lookup(hdev, bdaddr);
1333         if (!entry)
1334                 return -ENOENT;
1335
1336         list_del(&entry->list);
1337         kfree(entry);
1338
1339         return mgmt_device_unblocked(hdev, bdaddr);
1340 }
1341
1342 static void hci_clear_adv_cache(struct work_struct *work)
1343 {
1344         struct hci_dev *hdev = container_of(work, struct hci_dev,
1345                                                         adv_work.work);
1346
1347         hci_dev_lock(hdev);
1348
1349         hci_adv_entries_clear(hdev);
1350
1351         hci_dev_unlock(hdev);
1352 }
1353
1354 int hci_adv_entries_clear(struct hci_dev *hdev)
1355 {
1356         struct adv_entry *entry, *tmp;
1357
1358         list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1359                 list_del(&entry->list);
1360                 kfree(entry);
1361         }
1362
1363         BT_DBG("%s adv cache cleared", hdev->name);
1364
1365         return 0;
1366 }
1367
1368 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1369 {
1370         struct adv_entry *entry;
1371
1372         list_for_each_entry(entry, &hdev->adv_entries, list)
1373                 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1374                         return entry;
1375
1376         return NULL;
1377 }
1378
1379 static inline int is_connectable_adv(u8 evt_type)
1380 {
1381         if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1382                 return 1;
1383
1384         return 0;
1385 }
1386
1387 int hci_add_adv_entry(struct hci_dev *hdev,
1388                                         struct hci_ev_le_advertising_info *ev)
1389 {
1390         struct adv_entry *entry;
1391
1392         if (!is_connectable_adv(ev->evt_type))
1393                 return -EINVAL;
1394
1395         /* Only new entries should be added to adv_entries. So, if
1396          * bdaddr was found, don't add it. */
1397         if (hci_find_adv_entry(hdev, &ev->bdaddr))
1398                 return 0;
1399
1400         entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1401         if (!entry)
1402                 return -ENOMEM;
1403
1404         bacpy(&entry->bdaddr, &ev->bdaddr);
1405         entry->bdaddr_type = ev->bdaddr_type;
1406
1407         list_add(&entry->list, &hdev->adv_entries);
1408
1409         BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1410                                 batostr(&entry->bdaddr), entry->bdaddr_type);
1411
1412         return 0;
1413 }
1414
1415 /* Register HCI device */
1416 int hci_register_dev(struct hci_dev *hdev)
1417 {
1418         struct list_head *head = &hci_dev_list, *p;
1419         int i, id, error;
1420
1421         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1422                                                 hdev->bus, hdev->owner);
1423
1424         if (!hdev->open || !hdev->close || !hdev->destruct)
1425                 return -EINVAL;
1426
1427         /* Do not allow HCI_AMP devices to register at index 0,
1428          * so the index can be used as the AMP controller ID.
1429          */
1430         id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1431
1432         write_lock_bh(&hci_dev_list_lock);
1433
1434         /* Find first available device id */
1435         list_for_each(p, &hci_dev_list) {
1436                 if (list_entry(p, struct hci_dev, list)->id != id)
1437                         break;
1438                 head = p; id++;
1439         }
1440
1441         sprintf(hdev->name, "hci%d", id);
1442         hdev->id = id;
1443         list_add_tail(&hdev->list, head);
1444
1445         atomic_set(&hdev->refcnt, 1);
1446         mutex_init(&hdev->lock);
1447
1448         hdev->flags = 0;
1449         hdev->dev_flags = 0;
1450         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1451         hdev->esco_type = (ESCO_HV1);
1452         hdev->link_mode = (HCI_LM_ACCEPT);
1453         hdev->io_capability = 0x03; /* No Input No Output */
1454
1455         hdev->idle_timeout = 0;
1456         hdev->sniff_max_interval = 800;
1457         hdev->sniff_min_interval = 80;
1458
1459         INIT_WORK(&hdev->rx_work, hci_rx_work);
1460         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1461         INIT_WORK(&hdev->tx_work, hci_tx_work);
1462
1463
1464         skb_queue_head_init(&hdev->rx_q);
1465         skb_queue_head_init(&hdev->cmd_q);
1466         skb_queue_head_init(&hdev->raw_q);
1467
1468         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1469
1470         for (i = 0; i < NUM_REASSEMBLY; i++)
1471                 hdev->reassembly[i] = NULL;
1472
1473         init_waitqueue_head(&hdev->req_wait_q);
1474         mutex_init(&hdev->req_lock);
1475
1476         inquiry_cache_init(hdev);
1477
1478         hci_conn_hash_init(hdev);
1479
1480         INIT_LIST_HEAD(&hdev->mgmt_pending);
1481
1482         INIT_LIST_HEAD(&hdev->blacklist);
1483
1484         INIT_LIST_HEAD(&hdev->uuids);
1485
1486         INIT_LIST_HEAD(&hdev->link_keys);
1487
1488         INIT_LIST_HEAD(&hdev->remote_oob_data);
1489
1490         INIT_LIST_HEAD(&hdev->adv_entries);
1491
1492         INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1493         INIT_WORK(&hdev->power_on, hci_power_on);
1494         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1495
1496         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1497
1498         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1499
1500         atomic_set(&hdev->promisc, 0);
1501
1502         write_unlock_bh(&hci_dev_list_lock);
1503
1504         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1505                                                         WQ_MEM_RECLAIM, 1);
1506         if (!hdev->workqueue) {
1507                 error = -ENOMEM;
1508                 goto err;
1509         }
1510
1511         error = hci_add_sysfs(hdev);
1512         if (error < 0)
1513                 goto err_wqueue;
1514
1515         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1516                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1517         if (hdev->rfkill) {
1518                 if (rfkill_register(hdev->rfkill) < 0) {
1519                         rfkill_destroy(hdev->rfkill);
1520                         hdev->rfkill = NULL;
1521                 }
1522         }
1523
1524         set_bit(HCI_AUTO_OFF, &hdev->flags);
1525         set_bit(HCI_SETUP, &hdev->flags);
1526         schedule_work(&hdev->power_on);
1527
1528         hci_notify(hdev, HCI_DEV_REG);
1529
1530         return id;
1531
1532 err_wqueue:
1533         destroy_workqueue(hdev->workqueue);
1534 err:
1535         write_lock_bh(&hci_dev_list_lock);
1536         list_del(&hdev->list);
1537         write_unlock_bh(&hci_dev_list_lock);
1538
1539         return error;
1540 }
1541 EXPORT_SYMBOL(hci_register_dev);
1542
1543 /* Unregister HCI device */
1544 void hci_unregister_dev(struct hci_dev *hdev)
1545 {
1546         int i;
1547
1548         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1549
1550         write_lock_bh(&hci_dev_list_lock);
1551         list_del(&hdev->list);
1552         write_unlock_bh(&hci_dev_list_lock);
1553
1554         hci_dev_do_close(hdev);
1555
1556         for (i = 0; i < NUM_REASSEMBLY; i++)
1557                 kfree_skb(hdev->reassembly[i]);
1558
1559         if (!test_bit(HCI_INIT, &hdev->flags) &&
1560                                         !test_bit(HCI_SETUP, &hdev->flags)) {
1561                 hci_dev_lock(hdev);
1562                 mgmt_index_removed(hdev);
1563                 hci_dev_unlock(hdev);
1564         }
1565
1566         /* mgmt_index_removed should take care of emptying the
1567          * pending list */
1568         BUG_ON(!list_empty(&hdev->mgmt_pending));
1569
1570         hci_notify(hdev, HCI_DEV_UNREG);
1571
1572         if (hdev->rfkill) {
1573                 rfkill_unregister(hdev->rfkill);
1574                 rfkill_destroy(hdev->rfkill);
1575         }
1576
1577         hci_del_sysfs(hdev);
1578
1579         cancel_delayed_work_sync(&hdev->adv_work);
1580
1581         destroy_workqueue(hdev->workqueue);
1582
1583         hci_dev_lock(hdev);
1584         hci_blacklist_clear(hdev);
1585         hci_uuids_clear(hdev);
1586         hci_link_keys_clear(hdev);
1587         hci_remote_oob_data_clear(hdev);
1588         hci_adv_entries_clear(hdev);
1589         hci_dev_unlock(hdev);
1590
1591         __hci_dev_put(hdev);
1592 }
1593 EXPORT_SYMBOL(hci_unregister_dev);
1594
1595 /* Suspend HCI device */
1596 int hci_suspend_dev(struct hci_dev *hdev)
1597 {
1598         hci_notify(hdev, HCI_DEV_SUSPEND);
1599         return 0;
1600 }
1601 EXPORT_SYMBOL(hci_suspend_dev);
1602
1603 /* Resume HCI device */
1604 int hci_resume_dev(struct hci_dev *hdev)
1605 {
1606         hci_notify(hdev, HCI_DEV_RESUME);
1607         return 0;
1608 }
1609 EXPORT_SYMBOL(hci_resume_dev);
1610
1611 /* Receive frame from HCI drivers */
1612 int hci_recv_frame(struct sk_buff *skb)
1613 {
1614         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1615         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1616                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1617                 kfree_skb(skb);
1618                 return -ENXIO;
1619         }
1620
1621         /* Incomming skb */
1622         bt_cb(skb)->incoming = 1;
1623
1624         /* Time stamp */
1625         __net_timestamp(skb);
1626
1627         skb_queue_tail(&hdev->rx_q, skb);
1628         queue_work(hdev->workqueue, &hdev->rx_work);
1629
1630         return 0;
1631 }
1632 EXPORT_SYMBOL(hci_recv_frame);
1633
1634 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1635                                                   int count, __u8 index)
1636 {
1637         int len = 0;
1638         int hlen = 0;
1639         int remain = count;
1640         struct sk_buff *skb;
1641         struct bt_skb_cb *scb;
1642
1643         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1644                                 index >= NUM_REASSEMBLY)
1645                 return -EILSEQ;
1646
1647         skb = hdev->reassembly[index];
1648
1649         if (!skb) {
1650                 switch (type) {
1651                 case HCI_ACLDATA_PKT:
1652                         len = HCI_MAX_FRAME_SIZE;
1653                         hlen = HCI_ACL_HDR_SIZE;
1654                         break;
1655                 case HCI_EVENT_PKT:
1656                         len = HCI_MAX_EVENT_SIZE;
1657                         hlen = HCI_EVENT_HDR_SIZE;
1658                         break;
1659                 case HCI_SCODATA_PKT:
1660                         len = HCI_MAX_SCO_SIZE;
1661                         hlen = HCI_SCO_HDR_SIZE;
1662                         break;
1663                 }
1664
1665                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1666                 if (!skb)
1667                         return -ENOMEM;
1668
1669                 scb = (void *) skb->cb;
1670                 scb->expect = hlen;
1671                 scb->pkt_type = type;
1672
1673                 skb->dev = (void *) hdev;
1674                 hdev->reassembly[index] = skb;
1675         }
1676
1677         while (count) {
1678                 scb = (void *) skb->cb;
1679                 len = min(scb->expect, (__u16)count);
1680
1681                 memcpy(skb_put(skb, len), data, len);
1682
1683                 count -= len;
1684                 data += len;
1685                 scb->expect -= len;
1686                 remain = count;
1687
1688                 switch (type) {
1689                 case HCI_EVENT_PKT:
1690                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1691                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1692                                 scb->expect = h->plen;
1693
1694                                 if (skb_tailroom(skb) < scb->expect) {
1695                                         kfree_skb(skb);
1696                                         hdev->reassembly[index] = NULL;
1697                                         return -ENOMEM;
1698                                 }
1699                         }
1700                         break;
1701
1702                 case HCI_ACLDATA_PKT:
1703                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1704                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1705                                 scb->expect = __le16_to_cpu(h->dlen);
1706
1707                                 if (skb_tailroom(skb) < scb->expect) {
1708                                         kfree_skb(skb);
1709                                         hdev->reassembly[index] = NULL;
1710                                         return -ENOMEM;
1711                                 }
1712                         }
1713                         break;
1714
1715                 case HCI_SCODATA_PKT:
1716                         if (skb->len == HCI_SCO_HDR_SIZE) {
1717                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1718                                 scb->expect = h->dlen;
1719
1720                                 if (skb_tailroom(skb) < scb->expect) {
1721                                         kfree_skb(skb);
1722                                         hdev->reassembly[index] = NULL;
1723                                         return -ENOMEM;
1724                                 }
1725                         }
1726                         break;
1727                 }
1728
1729                 if (scb->expect == 0) {
1730                         /* Complete frame */
1731
1732                         bt_cb(skb)->pkt_type = type;
1733                         hci_recv_frame(skb);
1734
1735                         hdev->reassembly[index] = NULL;
1736                         return remain;
1737                 }
1738         }
1739
1740         return remain;
1741 }
1742
1743 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1744 {
1745         int rem = 0;
1746
1747         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1748                 return -EILSEQ;
1749
1750         while (count) {
1751                 rem = hci_reassembly(hdev, type, data, count, type - 1);
1752                 if (rem < 0)
1753                         return rem;
1754
1755                 data += (count - rem);
1756                 count = rem;
1757         }
1758
1759         return rem;
1760 }
1761 EXPORT_SYMBOL(hci_recv_fragment);
1762
1763 #define STREAM_REASSEMBLY 0
1764
1765 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1766 {
1767         int type;
1768         int rem = 0;
1769
1770         while (count) {
1771                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1772
1773                 if (!skb) {
1774                         struct { char type; } *pkt;
1775
1776                         /* Start of the frame */
1777                         pkt = data;
1778                         type = pkt->type;
1779
1780                         data++;
1781                         count--;
1782                 } else
1783                         type = bt_cb(skb)->pkt_type;
1784
1785                 rem = hci_reassembly(hdev, type, data, count,
1786                                                         STREAM_REASSEMBLY);
1787                 if (rem < 0)
1788                         return rem;
1789
1790                 data += (count - rem);
1791                 count = rem;
1792         }
1793
1794         return rem;
1795 }
1796 EXPORT_SYMBOL(hci_recv_stream_fragment);
1797
1798 /* ---- Interface to upper protocols ---- */
1799
1800 /* Register/Unregister protocols.
1801  * hci_task_lock is used to ensure that no tasks are running. */
1802 int hci_register_proto(struct hci_proto *hp)
1803 {
1804         int err = 0;
1805
1806         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1807
1808         if (hp->id >= HCI_MAX_PROTO)
1809                 return -EINVAL;
1810
1811         mutex_lock(&hci_task_lock);
1812
1813         if (!hci_proto[hp->id])
1814                 hci_proto[hp->id] = hp;
1815         else
1816                 err = -EEXIST;
1817
1818         mutex_unlock(&hci_task_lock);
1819
1820         return err;
1821 }
1822 EXPORT_SYMBOL(hci_register_proto);
1823
1824 int hci_unregister_proto(struct hci_proto *hp)
1825 {
1826         int err = 0;
1827
1828         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1829
1830         if (hp->id >= HCI_MAX_PROTO)
1831                 return -EINVAL;
1832
1833         mutex_lock(&hci_task_lock);
1834
1835         if (hci_proto[hp->id])
1836                 hci_proto[hp->id] = NULL;
1837         else
1838                 err = -ENOENT;
1839
1840         mutex_unlock(&hci_task_lock);
1841
1842         return err;
1843 }
1844 EXPORT_SYMBOL(hci_unregister_proto);
1845
1846 int hci_register_cb(struct hci_cb *cb)
1847 {
1848         BT_DBG("%p name %s", cb, cb->name);
1849
1850         write_lock_bh(&hci_cb_list_lock);
1851         list_add(&cb->list, &hci_cb_list);
1852         write_unlock_bh(&hci_cb_list_lock);
1853
1854         return 0;
1855 }
1856 EXPORT_SYMBOL(hci_register_cb);
1857
1858 int hci_unregister_cb(struct hci_cb *cb)
1859 {
1860         BT_DBG("%p name %s", cb, cb->name);
1861
1862         write_lock_bh(&hci_cb_list_lock);
1863         list_del(&cb->list);
1864         write_unlock_bh(&hci_cb_list_lock);
1865
1866         return 0;
1867 }
1868 EXPORT_SYMBOL(hci_unregister_cb);
1869
1870 static int hci_send_frame(struct sk_buff *skb)
1871 {
1872         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1873
1874         if (!hdev) {
1875                 kfree_skb(skb);
1876                 return -ENODEV;
1877         }
1878
1879         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1880
1881         if (atomic_read(&hdev->promisc)) {
1882                 /* Time stamp */
1883                 __net_timestamp(skb);
1884
1885                 hci_send_to_sock(hdev, skb, NULL);
1886         }
1887
1888         /* Get rid of skb owner, prior to sending to the driver. */
1889         skb_orphan(skb);
1890
1891         return hdev->send(skb);
1892 }
1893
1894 /* Send HCI command */
1895 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1896 {
1897         int len = HCI_COMMAND_HDR_SIZE + plen;
1898         struct hci_command_hdr *hdr;
1899         struct sk_buff *skb;
1900
1901         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1902
1903         skb = bt_skb_alloc(len, GFP_ATOMIC);
1904         if (!skb) {
1905                 BT_ERR("%s no memory for command", hdev->name);
1906                 return -ENOMEM;
1907         }
1908
1909         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1910         hdr->opcode = cpu_to_le16(opcode);
1911         hdr->plen   = plen;
1912
1913         if (plen)
1914                 memcpy(skb_put(skb, plen), param, plen);
1915
1916         BT_DBG("skb len %d", skb->len);
1917
1918         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1919         skb->dev = (void *) hdev;
1920
1921         if (test_bit(HCI_INIT, &hdev->flags))
1922                 hdev->init_last_cmd = opcode;
1923
1924         skb_queue_tail(&hdev->cmd_q, skb);
1925         queue_work(hdev->workqueue, &hdev->cmd_work);
1926
1927         return 0;
1928 }
1929
1930 /* Get data from the previously sent command */
1931 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1932 {
1933         struct hci_command_hdr *hdr;
1934
1935         if (!hdev->sent_cmd)
1936                 return NULL;
1937
1938         hdr = (void *) hdev->sent_cmd->data;
1939
1940         if (hdr->opcode != cpu_to_le16(opcode))
1941                 return NULL;
1942
1943         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1944
1945         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1946 }
1947
1948 /* Send ACL data */
1949 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1950 {
1951         struct hci_acl_hdr *hdr;
1952         int len = skb->len;
1953
1954         skb_push(skb, HCI_ACL_HDR_SIZE);
1955         skb_reset_transport_header(skb);
1956         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1957         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1958         hdr->dlen   = cpu_to_le16(len);
1959 }
1960
1961 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1962                                 struct sk_buff *skb, __u16 flags)
1963 {
1964         struct hci_dev *hdev = conn->hdev;
1965         struct sk_buff *list;
1966
1967         list = skb_shinfo(skb)->frag_list;
1968         if (!list) {
1969                 /* Non fragmented */
1970                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1971
1972                 skb_queue_tail(queue, skb);
1973         } else {
1974                 /* Fragmented */
1975                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1976
1977                 skb_shinfo(skb)->frag_list = NULL;
1978
1979                 /* Queue all fragments atomically */
1980                 spin_lock_bh(&queue->lock);
1981
1982                 __skb_queue_tail(queue, skb);
1983
1984                 flags &= ~ACL_START;
1985                 flags |= ACL_CONT;
1986                 do {
1987                         skb = list; list = list->next;
1988
1989                         skb->dev = (void *) hdev;
1990                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1991                         hci_add_acl_hdr(skb, conn->handle, flags);
1992
1993                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1994
1995                         __skb_queue_tail(queue, skb);
1996                 } while (list);
1997
1998                 spin_unlock_bh(&queue->lock);
1999         }
2000 }
2001
2002 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2003 {
2004         struct hci_conn *conn = chan->conn;
2005         struct hci_dev *hdev = conn->hdev;
2006
2007         BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2008
2009         skb->dev = (void *) hdev;
2010         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2011         hci_add_acl_hdr(skb, conn->handle, flags);
2012
2013         hci_queue_acl(conn, &chan->data_q, skb, flags);
2014
2015         queue_work(hdev->workqueue, &hdev->tx_work);
2016 }
2017 EXPORT_SYMBOL(hci_send_acl);
2018
2019 /* Send SCO data */
2020 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2021 {
2022         struct hci_dev *hdev = conn->hdev;
2023         struct hci_sco_hdr hdr;
2024
2025         BT_DBG("%s len %d", hdev->name, skb->len);
2026
2027         hdr.handle = cpu_to_le16(conn->handle);
2028         hdr.dlen   = skb->len;
2029
2030         skb_push(skb, HCI_SCO_HDR_SIZE);
2031         skb_reset_transport_header(skb);
2032         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2033
2034         skb->dev = (void *) hdev;
2035         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2036
2037         skb_queue_tail(&conn->data_q, skb);
2038         queue_work(hdev->workqueue, &hdev->tx_work);
2039 }
2040 EXPORT_SYMBOL(hci_send_sco);
2041
2042 /* ---- HCI TX task (outgoing data) ---- */
2043
2044 /* HCI Connection scheduler */
2045 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2046 {
2047         struct hci_conn_hash *h = &hdev->conn_hash;
2048         struct hci_conn *conn = NULL, *c;
2049         int num = 0, min = ~0;
2050
2051         /* We don't have to lock device here. Connections are always
2052          * added and removed with TX task disabled. */
2053
2054         rcu_read_lock();
2055
2056         list_for_each_entry_rcu(c, &h->list, list) {
2057                 if (c->type != type || skb_queue_empty(&c->data_q))
2058                         continue;
2059
2060                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2061                         continue;
2062
2063                 num++;
2064
2065                 if (c->sent < min) {
2066                         min  = c->sent;
2067                         conn = c;
2068                 }
2069
2070                 if (hci_conn_num(hdev, type) == num)
2071                         break;
2072         }
2073
2074         rcu_read_unlock();
2075
2076         if (conn) {
2077                 int cnt, q;
2078
2079                 switch (conn->type) {
2080                 case ACL_LINK:
2081                         cnt = hdev->acl_cnt;
2082                         break;
2083                 case SCO_LINK:
2084                 case ESCO_LINK:
2085                         cnt = hdev->sco_cnt;
2086                         break;
2087                 case LE_LINK:
2088                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2089                         break;
2090                 default:
2091                         cnt = 0;
2092                         BT_ERR("Unknown link type");
2093                 }
2094
2095                 q = cnt / num;
2096                 *quote = q ? q : 1;
2097         } else
2098                 *quote = 0;
2099
2100         BT_DBG("conn %p quote %d", conn, *quote);
2101         return conn;
2102 }
2103
2104 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2105 {
2106         struct hci_conn_hash *h = &hdev->conn_hash;
2107         struct hci_conn *c;
2108
2109         BT_ERR("%s link tx timeout", hdev->name);
2110
2111         rcu_read_lock();
2112
2113         /* Kill stalled connections */
2114         list_for_each_entry_rcu(c, &h->list, list) {
2115                 if (c->type == type && c->sent) {
2116                         BT_ERR("%s killing stalled connection %s",
2117                                 hdev->name, batostr(&c->dst));
2118                         hci_acl_disconn(c, 0x13);
2119                 }
2120         }
2121
2122         rcu_read_unlock();
2123 }
2124
2125 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2126                                                 int *quote)
2127 {
2128         struct hci_conn_hash *h = &hdev->conn_hash;
2129         struct hci_chan *chan = NULL;
2130         int num = 0, min = ~0, cur_prio = 0;
2131         struct hci_conn *conn;
2132         int cnt, q, conn_num = 0;
2133
2134         BT_DBG("%s", hdev->name);
2135
2136         rcu_read_lock();
2137
2138         list_for_each_entry_rcu(conn, &h->list, list) {
2139                 struct hci_chan *tmp;
2140
2141                 if (conn->type != type)
2142                         continue;
2143
2144                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2145                         continue;
2146
2147                 conn_num++;
2148
2149                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2150                         struct sk_buff *skb;
2151
2152                         if (skb_queue_empty(&tmp->data_q))
2153                                 continue;
2154
2155                         skb = skb_peek(&tmp->data_q);
2156                         if (skb->priority < cur_prio)
2157                                 continue;
2158
2159                         if (skb->priority > cur_prio) {
2160                                 num = 0;
2161                                 min = ~0;
2162                                 cur_prio = skb->priority;
2163                         }
2164
2165                         num++;
2166
2167                         if (conn->sent < min) {
2168                                 min  = conn->sent;
2169                                 chan = tmp;
2170                         }
2171                 }
2172
2173                 if (hci_conn_num(hdev, type) == conn_num)
2174                         break;
2175         }
2176
2177         rcu_read_unlock();
2178
2179         if (!chan)
2180                 return NULL;
2181
2182         switch (chan->conn->type) {
2183         case ACL_LINK:
2184                 cnt = hdev->acl_cnt;
2185                 break;
2186         case SCO_LINK:
2187         case ESCO_LINK:
2188                 cnt = hdev->sco_cnt;
2189                 break;
2190         case LE_LINK:
2191                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2192                 break;
2193         default:
2194                 cnt = 0;
2195                 BT_ERR("Unknown link type");
2196         }
2197
2198         q = cnt / num;
2199         *quote = q ? q : 1;
2200         BT_DBG("chan %p quote %d", chan, *quote);
2201         return chan;
2202 }
2203
2204 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2205 {
2206         struct hci_conn_hash *h = &hdev->conn_hash;
2207         struct hci_conn *conn;
2208         int num = 0;
2209
2210         BT_DBG("%s", hdev->name);
2211
2212         rcu_read_lock();
2213
2214         list_for_each_entry_rcu(conn, &h->list, list) {
2215                 struct hci_chan *chan;
2216
2217                 if (conn->type != type)
2218                         continue;
2219
2220                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2221                         continue;
2222
2223                 num++;
2224
2225                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2226                         struct sk_buff *skb;
2227
2228                         if (chan->sent) {
2229                                 chan->sent = 0;
2230                                 continue;
2231                         }
2232
2233                         if (skb_queue_empty(&chan->data_q))
2234                                 continue;
2235
2236                         skb = skb_peek(&chan->data_q);
2237                         if (skb->priority >= HCI_PRIO_MAX - 1)
2238                                 continue;
2239
2240                         skb->priority = HCI_PRIO_MAX - 1;
2241
2242                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2243                                                                 skb->priority);
2244                 }
2245
2246                 if (hci_conn_num(hdev, type) == num)
2247                         break;
2248         }
2249
2250         rcu_read_unlock();
2251
2252 }
2253
2254 static inline void hci_sched_acl(struct hci_dev *hdev)
2255 {
2256         struct hci_chan *chan;
2257         struct sk_buff *skb;
2258         int quote;
2259         unsigned int cnt;
2260
2261         BT_DBG("%s", hdev->name);
2262
2263         if (!hci_conn_num(hdev, ACL_LINK))
2264                 return;
2265
2266         if (!test_bit(HCI_RAW, &hdev->flags)) {
2267                 /* ACL tx timeout must be longer than maximum
2268                  * link supervision timeout (40.9 seconds) */
2269                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2270                         hci_link_tx_to(hdev, ACL_LINK);
2271         }
2272
2273         cnt = hdev->acl_cnt;
2274
2275         while (hdev->acl_cnt &&
2276                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2277                 u32 priority = (skb_peek(&chan->data_q))->priority;
2278                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2279                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2280                                         skb->len, skb->priority);
2281
2282                         /* Stop if priority has changed */
2283                         if (skb->priority < priority)
2284                                 break;
2285
2286                         skb = skb_dequeue(&chan->data_q);
2287
2288                         hci_conn_enter_active_mode(chan->conn,
2289                                                 bt_cb(skb)->force_active);
2290
2291                         hci_send_frame(skb);
2292                         hdev->acl_last_tx = jiffies;
2293
2294                         hdev->acl_cnt--;
2295                         chan->sent++;
2296                         chan->conn->sent++;
2297                 }
2298         }
2299
2300         if (cnt != hdev->acl_cnt)
2301                 hci_prio_recalculate(hdev, ACL_LINK);
2302 }
2303
2304 /* Schedule SCO */
2305 static inline void hci_sched_sco(struct hci_dev *hdev)
2306 {
2307         struct hci_conn *conn;
2308         struct sk_buff *skb;
2309         int quote;
2310
2311         BT_DBG("%s", hdev->name);
2312
2313         if (!hci_conn_num(hdev, SCO_LINK))
2314                 return;
2315
2316         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2317                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2318                         BT_DBG("skb %p len %d", skb, skb->len);
2319                         hci_send_frame(skb);
2320
2321                         conn->sent++;
2322                         if (conn->sent == ~0)
2323                                 conn->sent = 0;
2324                 }
2325         }
2326 }
2327
2328 static inline void hci_sched_esco(struct hci_dev *hdev)
2329 {
2330         struct hci_conn *conn;
2331         struct sk_buff *skb;
2332         int quote;
2333
2334         BT_DBG("%s", hdev->name);
2335
2336         if (!hci_conn_num(hdev, ESCO_LINK))
2337                 return;
2338
2339         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2340                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2341                         BT_DBG("skb %p len %d", skb, skb->len);
2342                         hci_send_frame(skb);
2343
2344                         conn->sent++;
2345                         if (conn->sent == ~0)
2346                                 conn->sent = 0;
2347                 }
2348         }
2349 }
2350
2351 static inline void hci_sched_le(struct hci_dev *hdev)
2352 {
2353         struct hci_chan *chan;
2354         struct sk_buff *skb;
2355         int quote, cnt, tmp;
2356
2357         BT_DBG("%s", hdev->name);
2358
2359         if (!hci_conn_num(hdev, LE_LINK))
2360                 return;
2361
2362         if (!test_bit(HCI_RAW, &hdev->flags)) {
2363                 /* LE tx timeout must be longer than maximum
2364                  * link supervision timeout (40.9 seconds) */
2365                 if (!hdev->le_cnt && hdev->le_pkts &&
2366                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2367                         hci_link_tx_to(hdev, LE_LINK);
2368         }
2369
2370         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2371         tmp = cnt;
2372         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2373                 u32 priority = (skb_peek(&chan->data_q))->priority;
2374                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2375                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2376                                         skb->len, skb->priority);
2377
2378                         /* Stop if priority has changed */
2379                         if (skb->priority < priority)
2380                                 break;
2381
2382                         skb = skb_dequeue(&chan->data_q);
2383
2384                         hci_send_frame(skb);
2385                         hdev->le_last_tx = jiffies;
2386
2387                         cnt--;
2388                         chan->sent++;
2389                         chan->conn->sent++;
2390                 }
2391         }
2392
2393         if (hdev->le_pkts)
2394                 hdev->le_cnt = cnt;
2395         else
2396                 hdev->acl_cnt = cnt;
2397
2398         if (cnt != tmp)
2399                 hci_prio_recalculate(hdev, LE_LINK);
2400 }
2401
2402 static void hci_tx_work(struct work_struct *work)
2403 {
2404         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2405         struct sk_buff *skb;
2406
2407         mutex_lock(&hci_task_lock);
2408
2409         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2410                 hdev->sco_cnt, hdev->le_cnt);
2411
2412         /* Schedule queues and send stuff to HCI driver */
2413
2414         hci_sched_acl(hdev);
2415
2416         hci_sched_sco(hdev);
2417
2418         hci_sched_esco(hdev);
2419
2420         hci_sched_le(hdev);
2421
2422         /* Send next queued raw (unknown type) packet */
2423         while ((skb = skb_dequeue(&hdev->raw_q)))
2424                 hci_send_frame(skb);
2425
2426         mutex_unlock(&hci_task_lock);
2427 }
2428
2429 /* ----- HCI RX task (incoming data processing) ----- */
2430
2431 /* ACL data packet */
2432 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2433 {
2434         struct hci_acl_hdr *hdr = (void *) skb->data;
2435         struct hci_conn *conn;
2436         __u16 handle, flags;
2437
2438         skb_pull(skb, HCI_ACL_HDR_SIZE);
2439
2440         handle = __le16_to_cpu(hdr->handle);
2441         flags  = hci_flags(handle);
2442         handle = hci_handle(handle);
2443
2444         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2445
2446         hdev->stat.acl_rx++;
2447
2448         hci_dev_lock(hdev);
2449         conn = hci_conn_hash_lookup_handle(hdev, handle);
2450         hci_dev_unlock(hdev);
2451
2452         if (conn) {
2453                 register struct hci_proto *hp;
2454
2455                 hci_conn_enter_active_mode(conn, bt_cb(skb)->force_active);
2456
2457                 /* Send to upper protocol */
2458                 hp = hci_proto[HCI_PROTO_L2CAP];
2459                 if (hp && hp->recv_acldata) {
2460                         hp->recv_acldata(conn, skb, flags);
2461                         return;
2462                 }
2463         } else {
2464                 BT_ERR("%s ACL packet for unknown connection handle %d",
2465                         hdev->name, handle);
2466         }
2467
2468         kfree_skb(skb);
2469 }
2470
2471 /* SCO data packet */
2472 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2473 {
2474         struct hci_sco_hdr *hdr = (void *) skb->data;
2475         struct hci_conn *conn;
2476         __u16 handle;
2477
2478         skb_pull(skb, HCI_SCO_HDR_SIZE);
2479
2480         handle = __le16_to_cpu(hdr->handle);
2481
2482         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2483
2484         hdev->stat.sco_rx++;
2485
2486         hci_dev_lock(hdev);
2487         conn = hci_conn_hash_lookup_handle(hdev, handle);
2488         hci_dev_unlock(hdev);
2489
2490         if (conn) {
2491                 register struct hci_proto *hp;
2492
2493                 /* Send to upper protocol */
2494                 hp = hci_proto[HCI_PROTO_SCO];
2495                 if (hp && hp->recv_scodata) {
2496                         hp->recv_scodata(conn, skb);
2497                         return;
2498                 }
2499         } else {
2500                 BT_ERR("%s SCO packet for unknown connection handle %d",
2501                         hdev->name, handle);
2502         }
2503
2504         kfree_skb(skb);
2505 }
2506
2507 static void hci_rx_work(struct work_struct *work)
2508 {
2509         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2510         struct sk_buff *skb;
2511
2512         BT_DBG("%s", hdev->name);
2513
2514         mutex_lock(&hci_task_lock);
2515
2516         while ((skb = skb_dequeue(&hdev->rx_q))) {
2517                 if (atomic_read(&hdev->promisc)) {
2518                         /* Send copy to the sockets */
2519                         hci_send_to_sock(hdev, skb, NULL);
2520                 }
2521
2522                 if (test_bit(HCI_RAW, &hdev->flags)) {
2523                         kfree_skb(skb);
2524                         continue;
2525                 }
2526
2527                 if (test_bit(HCI_INIT, &hdev->flags)) {
2528                         /* Don't process data packets in this states. */
2529                         switch (bt_cb(skb)->pkt_type) {
2530                         case HCI_ACLDATA_PKT:
2531                         case HCI_SCODATA_PKT:
2532                                 kfree_skb(skb);
2533                                 continue;
2534                         }
2535                 }
2536
2537                 /* Process frame */
2538                 switch (bt_cb(skb)->pkt_type) {
2539                 case HCI_EVENT_PKT:
2540                         BT_DBG("%s Event packet", hdev->name);
2541                         hci_event_packet(hdev, skb);
2542                         break;
2543
2544                 case HCI_ACLDATA_PKT:
2545                         BT_DBG("%s ACL data packet", hdev->name);
2546                         hci_acldata_packet(hdev, skb);
2547                         break;
2548
2549                 case HCI_SCODATA_PKT:
2550                         BT_DBG("%s SCO data packet", hdev->name);
2551                         hci_scodata_packet(hdev, skb);
2552                         break;
2553
2554                 default:
2555                         kfree_skb(skb);
2556                         break;
2557                 }
2558         }
2559
2560         mutex_unlock(&hci_task_lock);
2561 }
2562
2563 static void hci_cmd_work(struct work_struct *work)
2564 {
2565         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2566         struct sk_buff *skb;
2567
2568         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2569
2570         /* Send queued commands */
2571         if (atomic_read(&hdev->cmd_cnt)) {
2572                 skb = skb_dequeue(&hdev->cmd_q);
2573                 if (!skb)
2574                         return;
2575
2576                 kfree_skb(hdev->sent_cmd);
2577
2578                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2579                 if (hdev->sent_cmd) {
2580                         atomic_dec(&hdev->cmd_cnt);
2581                         hci_send_frame(skb);
2582                         if (test_bit(HCI_RESET, &hdev->flags))
2583                                 del_timer(&hdev->cmd_timer);
2584                         else
2585                                 mod_timer(&hdev->cmd_timer,
2586                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2587                 } else {
2588                         skb_queue_head(&hdev->cmd_q, skb);
2589                         queue_work(hdev->workqueue, &hdev->cmd_work);
2590                 }
2591         }
2592 }
2593
2594 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2595 {
2596         /* General inquiry access code (GIAC) */
2597         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2598         struct hci_cp_inquiry cp;
2599
2600         BT_DBG("%s", hdev->name);
2601
2602         if (test_bit(HCI_INQUIRY, &hdev->flags))
2603                 return -EINPROGRESS;
2604
2605         memset(&cp, 0, sizeof(cp));
2606         memcpy(&cp.lap, lap, sizeof(cp.lap));
2607         cp.length  = length;
2608
2609         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2610 }
2611
2612 int hci_cancel_inquiry(struct hci_dev *hdev)
2613 {
2614         BT_DBG("%s", hdev->name);
2615
2616         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2617                 return -EPERM;
2618
2619         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2620 }
2621
2622 module_param(enable_hs, bool, 0644);
2623 MODULE_PARM_DESC(enable_hs, "Enable High Speed");