Bluetooth: Initialize default flow control mode
[linux-flexiantxendom0-3.2.10.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4    Copyright (C) 2011 ProFUSION Embedded Systems
5
6    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
7
8    This program is free software; you can redistribute it and/or modify
9    it under the terms of the GNU General Public License version 2 as
10    published by the Free Software Foundation;
11
12    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
13    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
14    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
15    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
16    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
17    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
18    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
19    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
20
21    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
22    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
23    SOFTWARE IS DISCLAIMED.
24 */
25
26 /* Bluetooth HCI core. */
27
28 #include <linux/jiffies.h>
29 #include <linux/module.h>
30 #include <linux/kmod.h>
31
32 #include <linux/types.h>
33 #include <linux/errno.h>
34 #include <linux/kernel.h>
35 #include <linux/sched.h>
36 #include <linux/slab.h>
37 #include <linux/poll.h>
38 #include <linux/fcntl.h>
39 #include <linux/init.h>
40 #include <linux/skbuff.h>
41 #include <linux/workqueue.h>
42 #include <linux/interrupt.h>
43 #include <linux/notifier.h>
44 #include <linux/rfkill.h>
45 #include <linux/timer.h>
46 #include <linux/crypto.h>
47 #include <net/sock.h>
48
49 #include <asm/system.h>
50 #include <linux/uaccess.h>
51 #include <asm/unaligned.h>
52
53 #include <net/bluetooth/bluetooth.h>
54 #include <net/bluetooth/hci_core.h>
55
56 #define AUTO_OFF_TIMEOUT 2000
57
58 int enable_hs;
59
60 static void hci_rx_work(struct work_struct *work);
61 static void hci_cmd_work(struct work_struct *work);
62 static void hci_tx_work(struct work_struct *work);
63
64 static DEFINE_MUTEX(hci_task_lock);
65
66 /* HCI device list */
67 LIST_HEAD(hci_dev_list);
68 DEFINE_RWLOCK(hci_dev_list_lock);
69
70 /* HCI callback list */
71 LIST_HEAD(hci_cb_list);
72 DEFINE_RWLOCK(hci_cb_list_lock);
73
74 /* HCI protocols */
75 #define HCI_MAX_PROTO   2
76 struct hci_proto *hci_proto[HCI_MAX_PROTO];
77
78 /* HCI notifiers list */
79 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
80
81 /* ---- HCI notifications ---- */
82
83 int hci_register_notifier(struct notifier_block *nb)
84 {
85         return atomic_notifier_chain_register(&hci_notifier, nb);
86 }
87
88 int hci_unregister_notifier(struct notifier_block *nb)
89 {
90         return atomic_notifier_chain_unregister(&hci_notifier, nb);
91 }
92
93 static void hci_notify(struct hci_dev *hdev, int event)
94 {
95         atomic_notifier_call_chain(&hci_notifier, event, hdev);
96 }
97
98 /* ---- HCI requests ---- */
99
100 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
101 {
102         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
103
104         /* If this is the init phase check if the completed command matches
105          * the last init command, and if not just return.
106          */
107         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
108                 return;
109
110         if (hdev->req_status == HCI_REQ_PEND) {
111                 hdev->req_result = result;
112                 hdev->req_status = HCI_REQ_DONE;
113                 wake_up_interruptible(&hdev->req_wait_q);
114         }
115 }
116
117 static void hci_req_cancel(struct hci_dev *hdev, int err)
118 {
119         BT_DBG("%s err 0x%2.2x", hdev->name, err);
120
121         if (hdev->req_status == HCI_REQ_PEND) {
122                 hdev->req_result = err;
123                 hdev->req_status = HCI_REQ_CANCELED;
124                 wake_up_interruptible(&hdev->req_wait_q);
125         }
126 }
127
128 /* Execute request and wait for completion. */
129 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
130                                         unsigned long opt, __u32 timeout)
131 {
132         DECLARE_WAITQUEUE(wait, current);
133         int err = 0;
134
135         BT_DBG("%s start", hdev->name);
136
137         hdev->req_status = HCI_REQ_PEND;
138
139         add_wait_queue(&hdev->req_wait_q, &wait);
140         set_current_state(TASK_INTERRUPTIBLE);
141
142         req(hdev, opt);
143         schedule_timeout(timeout);
144
145         remove_wait_queue(&hdev->req_wait_q, &wait);
146
147         if (signal_pending(current))
148                 return -EINTR;
149
150         switch (hdev->req_status) {
151         case HCI_REQ_DONE:
152                 err = -bt_to_errno(hdev->req_result);
153                 break;
154
155         case HCI_REQ_CANCELED:
156                 err = -hdev->req_result;
157                 break;
158
159         default:
160                 err = -ETIMEDOUT;
161                 break;
162         }
163
164         hdev->req_status = hdev->req_result = 0;
165
166         BT_DBG("%s end: err %d", hdev->name, err);
167
168         return err;
169 }
170
171 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
172                                         unsigned long opt, __u32 timeout)
173 {
174         int ret;
175
176         if (!test_bit(HCI_UP, &hdev->flags))
177                 return -ENETDOWN;
178
179         /* Serialize all requests */
180         hci_req_lock(hdev);
181         ret = __hci_request(hdev, req, opt, timeout);
182         hci_req_unlock(hdev);
183
184         return ret;
185 }
186
187 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
188 {
189         BT_DBG("%s %ld", hdev->name, opt);
190
191         /* Reset device */
192         set_bit(HCI_RESET, &hdev->flags);
193         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
194 }
195
196 static void bredr_init(struct hci_dev *hdev)
197 {
198         struct hci_cp_delete_stored_link_key cp;
199         __le16 param;
200         __u8 flt_type;
201
202         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_PACKET_BASED;
203
204         /* Mandatory initialization */
205
206         /* Reset */
207         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
208                 set_bit(HCI_RESET, &hdev->flags);
209                 hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
210         }
211
212         /* Read Local Supported Features */
213         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
214
215         /* Read Local Version */
216         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
217
218         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
219         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
220
221         /* Read BD Address */
222         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
223
224         /* Read Class of Device */
225         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
226
227         /* Read Local Name */
228         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
229
230         /* Read Voice Setting */
231         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
232
233         /* Optional initialization */
234
235         /* Clear Event Filters */
236         flt_type = HCI_FLT_CLEAR_ALL;
237         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
238
239         /* Connection accept timeout ~20 secs */
240         param = cpu_to_le16(0x7d00);
241         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
242
243         bacpy(&cp.bdaddr, BDADDR_ANY);
244         cp.delete_all = 1;
245         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
246 }
247
248 static void amp_init(struct hci_dev *hdev)
249 {
250         hdev->flow_ctl_mode = HCI_FLOW_CTL_MODE_BLOCK_BASED;
251
252         /* Reset */
253         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
254
255         /* Read Local Version */
256         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
257 }
258
259 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
260 {
261         struct sk_buff *skb;
262
263         BT_DBG("%s %ld", hdev->name, opt);
264
265         /* Driver initialization */
266
267         /* Special commands */
268         while ((skb = skb_dequeue(&hdev->driver_init))) {
269                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
270                 skb->dev = (void *) hdev;
271
272                 skb_queue_tail(&hdev->cmd_q, skb);
273                 queue_work(hdev->workqueue, &hdev->cmd_work);
274         }
275         skb_queue_purge(&hdev->driver_init);
276
277         switch (hdev->dev_type) {
278         case HCI_BREDR:
279                 bredr_init(hdev);
280                 break;
281
282         case HCI_AMP:
283                 amp_init(hdev);
284                 break;
285
286         default:
287                 BT_ERR("Unknown device type %d", hdev->dev_type);
288                 break;
289         }
290
291 }
292
293 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
294 {
295         BT_DBG("%s", hdev->name);
296
297         /* Read LE buffer size */
298         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
299 }
300
301 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
302 {
303         __u8 scan = opt;
304
305         BT_DBG("%s %x", hdev->name, scan);
306
307         /* Inquiry and Page scans */
308         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
309 }
310
311 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
312 {
313         __u8 auth = opt;
314
315         BT_DBG("%s %x", hdev->name, auth);
316
317         /* Authentication */
318         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
319 }
320
321 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
322 {
323         __u8 encrypt = opt;
324
325         BT_DBG("%s %x", hdev->name, encrypt);
326
327         /* Encryption */
328         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
329 }
330
331 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
332 {
333         __le16 policy = cpu_to_le16(opt);
334
335         BT_DBG("%s %x", hdev->name, policy);
336
337         /* Default link policy */
338         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
339 }
340
341 /* Get HCI device by index.
342  * Device is held on return. */
343 struct hci_dev *hci_dev_get(int index)
344 {
345         struct hci_dev *hdev = NULL, *d;
346
347         BT_DBG("%d", index);
348
349         if (index < 0)
350                 return NULL;
351
352         read_lock(&hci_dev_list_lock);
353         list_for_each_entry(d, &hci_dev_list, list) {
354                 if (d->id == index) {
355                         hdev = hci_dev_hold(d);
356                         break;
357                 }
358         }
359         read_unlock(&hci_dev_list_lock);
360         return hdev;
361 }
362
363 /* ---- Inquiry support ---- */
364 static void inquiry_cache_flush(struct hci_dev *hdev)
365 {
366         struct inquiry_cache *cache = &hdev->inq_cache;
367         struct inquiry_entry *next  = cache->list, *e;
368
369         BT_DBG("cache %p", cache);
370
371         cache->list = NULL;
372         while ((e = next)) {
373                 next = e->next;
374                 kfree(e);
375         }
376 }
377
378 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
379 {
380         struct inquiry_cache *cache = &hdev->inq_cache;
381         struct inquiry_entry *e;
382
383         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
384
385         for (e = cache->list; e; e = e->next)
386                 if (!bacmp(&e->data.bdaddr, bdaddr))
387                         break;
388         return e;
389 }
390
391 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
392 {
393         struct inquiry_cache *cache = &hdev->inq_cache;
394         struct inquiry_entry *ie;
395
396         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
397
398         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
399         if (!ie) {
400                 /* Entry not in the cache. Add new one. */
401                 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
402                 if (!ie)
403                         return;
404
405                 ie->next = cache->list;
406                 cache->list = ie;
407         }
408
409         memcpy(&ie->data, data, sizeof(*data));
410         ie->timestamp = jiffies;
411         cache->timestamp = jiffies;
412 }
413
414 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
415 {
416         struct inquiry_cache *cache = &hdev->inq_cache;
417         struct inquiry_info *info = (struct inquiry_info *) buf;
418         struct inquiry_entry *e;
419         int copied = 0;
420
421         for (e = cache->list; e && copied < num; e = e->next, copied++) {
422                 struct inquiry_data *data = &e->data;
423                 bacpy(&info->bdaddr, &data->bdaddr);
424                 info->pscan_rep_mode    = data->pscan_rep_mode;
425                 info->pscan_period_mode = data->pscan_period_mode;
426                 info->pscan_mode        = data->pscan_mode;
427                 memcpy(info->dev_class, data->dev_class, 3);
428                 info->clock_offset      = data->clock_offset;
429                 info++;
430         }
431
432         BT_DBG("cache %p, copied %d", cache, copied);
433         return copied;
434 }
435
436 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
437 {
438         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
439         struct hci_cp_inquiry cp;
440
441         BT_DBG("%s", hdev->name);
442
443         if (test_bit(HCI_INQUIRY, &hdev->flags))
444                 return;
445
446         /* Start Inquiry */
447         memcpy(&cp.lap, &ir->lap, 3);
448         cp.length  = ir->length;
449         cp.num_rsp = ir->num_rsp;
450         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
451 }
452
453 int hci_inquiry(void __user *arg)
454 {
455         __u8 __user *ptr = arg;
456         struct hci_inquiry_req ir;
457         struct hci_dev *hdev;
458         int err = 0, do_inquiry = 0, max_rsp;
459         long timeo;
460         __u8 *buf;
461
462         if (copy_from_user(&ir, ptr, sizeof(ir)))
463                 return -EFAULT;
464
465         hdev = hci_dev_get(ir.dev_id);
466         if (!hdev)
467                 return -ENODEV;
468
469         hci_dev_lock(hdev);
470         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
471                                 inquiry_cache_empty(hdev) ||
472                                 ir.flags & IREQ_CACHE_FLUSH) {
473                 inquiry_cache_flush(hdev);
474                 do_inquiry = 1;
475         }
476         hci_dev_unlock(hdev);
477
478         timeo = ir.length * msecs_to_jiffies(2000);
479
480         if (do_inquiry) {
481                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
482                 if (err < 0)
483                         goto done;
484         }
485
486         /* for unlimited number of responses we will use buffer with 255 entries */
487         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
488
489         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
490          * copy it to the user space.
491          */
492         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
493         if (!buf) {
494                 err = -ENOMEM;
495                 goto done;
496         }
497
498         hci_dev_lock(hdev);
499         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
500         hci_dev_unlock(hdev);
501
502         BT_DBG("num_rsp %d", ir.num_rsp);
503
504         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
505                 ptr += sizeof(ir);
506                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
507                                         ir.num_rsp))
508                         err = -EFAULT;
509         } else
510                 err = -EFAULT;
511
512         kfree(buf);
513
514 done:
515         hci_dev_put(hdev);
516         return err;
517 }
518
519 /* ---- HCI ioctl helpers ---- */
520
521 int hci_dev_open(__u16 dev)
522 {
523         struct hci_dev *hdev;
524         int ret = 0;
525
526         hdev = hci_dev_get(dev);
527         if (!hdev)
528                 return -ENODEV;
529
530         BT_DBG("%s %p", hdev->name, hdev);
531
532         hci_req_lock(hdev);
533
534         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
535                 ret = -ERFKILL;
536                 goto done;
537         }
538
539         if (test_bit(HCI_UP, &hdev->flags)) {
540                 ret = -EALREADY;
541                 goto done;
542         }
543
544         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
545                 set_bit(HCI_RAW, &hdev->flags);
546
547         /* Treat all non BR/EDR controllers as raw devices if
548            enable_hs is not set */
549         if (hdev->dev_type != HCI_BREDR && !enable_hs)
550                 set_bit(HCI_RAW, &hdev->flags);
551
552         if (hdev->open(hdev)) {
553                 ret = -EIO;
554                 goto done;
555         }
556
557         if (!test_bit(HCI_RAW, &hdev->flags)) {
558                 atomic_set(&hdev->cmd_cnt, 1);
559                 set_bit(HCI_INIT, &hdev->flags);
560                 hdev->init_last_cmd = 0;
561
562                 ret = __hci_request(hdev, hci_init_req, 0,
563                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
564
565                 if (lmp_host_le_capable(hdev))
566                         ret = __hci_request(hdev, hci_le_init_req, 0,
567                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
568
569                 clear_bit(HCI_INIT, &hdev->flags);
570         }
571
572         if (!ret) {
573                 hci_dev_hold(hdev);
574                 set_bit(HCI_UP, &hdev->flags);
575                 hci_notify(hdev, HCI_DEV_UP);
576                 if (!test_bit(HCI_SETUP, &hdev->flags)) {
577                         hci_dev_lock(hdev);
578                         mgmt_powered(hdev, 1);
579                         hci_dev_unlock(hdev);
580                 }
581         } else {
582                 /* Init failed, cleanup */
583                 flush_work(&hdev->tx_work);
584                 flush_work(&hdev->cmd_work);
585                 flush_work(&hdev->rx_work);
586
587                 skb_queue_purge(&hdev->cmd_q);
588                 skb_queue_purge(&hdev->rx_q);
589
590                 if (hdev->flush)
591                         hdev->flush(hdev);
592
593                 if (hdev->sent_cmd) {
594                         kfree_skb(hdev->sent_cmd);
595                         hdev->sent_cmd = NULL;
596                 }
597
598                 hdev->close(hdev);
599                 hdev->flags = 0;
600         }
601
602 done:
603         hci_req_unlock(hdev);
604         hci_dev_put(hdev);
605         return ret;
606 }
607
608 static int hci_dev_do_close(struct hci_dev *hdev)
609 {
610         BT_DBG("%s %p", hdev->name, hdev);
611
612         hci_req_cancel(hdev, ENODEV);
613         hci_req_lock(hdev);
614
615         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
616                 del_timer_sync(&hdev->cmd_timer);
617                 hci_req_unlock(hdev);
618                 return 0;
619         }
620
621         /* Flush RX and TX works */
622         flush_work(&hdev->tx_work);
623         flush_work(&hdev->rx_work);
624
625         if (hdev->discov_timeout > 0) {
626                 cancel_delayed_work(&hdev->discov_off);
627                 hdev->discov_timeout = 0;
628         }
629
630         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
631                 cancel_delayed_work(&hdev->power_off);
632
633         if (test_and_clear_bit(HCI_SERVICE_CACHE, &hdev->flags))
634                 cancel_delayed_work(&hdev->service_cache);
635
636         hci_dev_lock(hdev);
637         inquiry_cache_flush(hdev);
638         hci_conn_hash_flush(hdev);
639         hci_dev_unlock(hdev);
640
641         hci_notify(hdev, HCI_DEV_DOWN);
642
643         if (hdev->flush)
644                 hdev->flush(hdev);
645
646         /* Reset device */
647         skb_queue_purge(&hdev->cmd_q);
648         atomic_set(&hdev->cmd_cnt, 1);
649         if (!test_bit(HCI_RAW, &hdev->flags)) {
650                 set_bit(HCI_INIT, &hdev->flags);
651                 __hci_request(hdev, hci_reset_req, 0,
652                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
653                 clear_bit(HCI_INIT, &hdev->flags);
654         }
655
656         /* flush cmd  work */
657         flush_work(&hdev->cmd_work);
658
659         /* Drop queues */
660         skb_queue_purge(&hdev->rx_q);
661         skb_queue_purge(&hdev->cmd_q);
662         skb_queue_purge(&hdev->raw_q);
663
664         /* Drop last sent command */
665         if (hdev->sent_cmd) {
666                 del_timer_sync(&hdev->cmd_timer);
667                 kfree_skb(hdev->sent_cmd);
668                 hdev->sent_cmd = NULL;
669         }
670
671         /* After this point our queues are empty
672          * and no tasks are scheduled. */
673         hdev->close(hdev);
674
675         hci_dev_lock(hdev);
676         mgmt_powered(hdev, 0);
677         hci_dev_unlock(hdev);
678
679         /* Clear flags */
680         hdev->flags = 0;
681
682         hci_req_unlock(hdev);
683
684         hci_dev_put(hdev);
685         return 0;
686 }
687
688 int hci_dev_close(__u16 dev)
689 {
690         struct hci_dev *hdev;
691         int err;
692
693         hdev = hci_dev_get(dev);
694         if (!hdev)
695                 return -ENODEV;
696         err = hci_dev_do_close(hdev);
697         hci_dev_put(hdev);
698         return err;
699 }
700
701 int hci_dev_reset(__u16 dev)
702 {
703         struct hci_dev *hdev;
704         int ret = 0;
705
706         hdev = hci_dev_get(dev);
707         if (!hdev)
708                 return -ENODEV;
709
710         hci_req_lock(hdev);
711
712         if (!test_bit(HCI_UP, &hdev->flags))
713                 goto done;
714
715         /* Drop queues */
716         skb_queue_purge(&hdev->rx_q);
717         skb_queue_purge(&hdev->cmd_q);
718
719         hci_dev_lock(hdev);
720         inquiry_cache_flush(hdev);
721         hci_conn_hash_flush(hdev);
722         hci_dev_unlock(hdev);
723
724         if (hdev->flush)
725                 hdev->flush(hdev);
726
727         atomic_set(&hdev->cmd_cnt, 1);
728         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
729
730         if (!test_bit(HCI_RAW, &hdev->flags))
731                 ret = __hci_request(hdev, hci_reset_req, 0,
732                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
733
734 done:
735         hci_req_unlock(hdev);
736         hci_dev_put(hdev);
737         return ret;
738 }
739
740 int hci_dev_reset_stat(__u16 dev)
741 {
742         struct hci_dev *hdev;
743         int ret = 0;
744
745         hdev = hci_dev_get(dev);
746         if (!hdev)
747                 return -ENODEV;
748
749         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
750
751         hci_dev_put(hdev);
752
753         return ret;
754 }
755
756 int hci_dev_cmd(unsigned int cmd, void __user *arg)
757 {
758         struct hci_dev *hdev;
759         struct hci_dev_req dr;
760         int err = 0;
761
762         if (copy_from_user(&dr, arg, sizeof(dr)))
763                 return -EFAULT;
764
765         hdev = hci_dev_get(dr.dev_id);
766         if (!hdev)
767                 return -ENODEV;
768
769         switch (cmd) {
770         case HCISETAUTH:
771                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
772                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
773                 break;
774
775         case HCISETENCRYPT:
776                 if (!lmp_encrypt_capable(hdev)) {
777                         err = -EOPNOTSUPP;
778                         break;
779                 }
780
781                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
782                         /* Auth must be enabled first */
783                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
784                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
785                         if (err)
786                                 break;
787                 }
788
789                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
790                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
791                 break;
792
793         case HCISETSCAN:
794                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
795                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
796                 break;
797
798         case HCISETLINKPOL:
799                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
800                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
801                 break;
802
803         case HCISETLINKMODE:
804                 hdev->link_mode = ((__u16) dr.dev_opt) &
805                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
806                 break;
807
808         case HCISETPTYPE:
809                 hdev->pkt_type = (__u16) dr.dev_opt;
810                 break;
811
812         case HCISETACLMTU:
813                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
814                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
815                 break;
816
817         case HCISETSCOMTU:
818                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
819                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
820                 break;
821
822         default:
823                 err = -EINVAL;
824                 break;
825         }
826
827         hci_dev_put(hdev);
828         return err;
829 }
830
831 int hci_get_dev_list(void __user *arg)
832 {
833         struct hci_dev *hdev;
834         struct hci_dev_list_req *dl;
835         struct hci_dev_req *dr;
836         int n = 0, size, err;
837         __u16 dev_num;
838
839         if (get_user(dev_num, (__u16 __user *) arg))
840                 return -EFAULT;
841
842         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
843                 return -EINVAL;
844
845         size = sizeof(*dl) + dev_num * sizeof(*dr);
846
847         dl = kzalloc(size, GFP_KERNEL);
848         if (!dl)
849                 return -ENOMEM;
850
851         dr = dl->dev_req;
852
853         read_lock_bh(&hci_dev_list_lock);
854         list_for_each_entry(hdev, &hci_dev_list, list) {
855                 if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
856                         cancel_delayed_work(&hdev->power_off);
857
858                 if (!test_bit(HCI_MGMT, &hdev->flags))
859                         set_bit(HCI_PAIRABLE, &hdev->flags);
860
861                 (dr + n)->dev_id  = hdev->id;
862                 (dr + n)->dev_opt = hdev->flags;
863
864                 if (++n >= dev_num)
865                         break;
866         }
867         read_unlock_bh(&hci_dev_list_lock);
868
869         dl->dev_num = n;
870         size = sizeof(*dl) + n * sizeof(*dr);
871
872         err = copy_to_user(arg, dl, size);
873         kfree(dl);
874
875         return err ? -EFAULT : 0;
876 }
877
878 int hci_get_dev_info(void __user *arg)
879 {
880         struct hci_dev *hdev;
881         struct hci_dev_info di;
882         int err = 0;
883
884         if (copy_from_user(&di, arg, sizeof(di)))
885                 return -EFAULT;
886
887         hdev = hci_dev_get(di.dev_id);
888         if (!hdev)
889                 return -ENODEV;
890
891         if (test_and_clear_bit(HCI_AUTO_OFF, &hdev->flags))
892                 cancel_delayed_work_sync(&hdev->power_off);
893
894         if (!test_bit(HCI_MGMT, &hdev->flags))
895                 set_bit(HCI_PAIRABLE, &hdev->flags);
896
897         strcpy(di.name, hdev->name);
898         di.bdaddr   = hdev->bdaddr;
899         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
900         di.flags    = hdev->flags;
901         di.pkt_type = hdev->pkt_type;
902         di.acl_mtu  = hdev->acl_mtu;
903         di.acl_pkts = hdev->acl_pkts;
904         di.sco_mtu  = hdev->sco_mtu;
905         di.sco_pkts = hdev->sco_pkts;
906         di.link_policy = hdev->link_policy;
907         di.link_mode   = hdev->link_mode;
908
909         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
910         memcpy(&di.features, &hdev->features, sizeof(di.features));
911
912         if (copy_to_user(arg, &di, sizeof(di)))
913                 err = -EFAULT;
914
915         hci_dev_put(hdev);
916
917         return err;
918 }
919
920 /* ---- Interface to HCI drivers ---- */
921
922 static int hci_rfkill_set_block(void *data, bool blocked)
923 {
924         struct hci_dev *hdev = data;
925
926         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
927
928         if (!blocked)
929                 return 0;
930
931         hci_dev_do_close(hdev);
932
933         return 0;
934 }
935
936 static const struct rfkill_ops hci_rfkill_ops = {
937         .set_block = hci_rfkill_set_block,
938 };
939
940 /* Alloc HCI device */
941 struct hci_dev *hci_alloc_dev(void)
942 {
943         struct hci_dev *hdev;
944
945         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
946         if (!hdev)
947                 return NULL;
948
949         hci_init_sysfs(hdev);
950         skb_queue_head_init(&hdev->driver_init);
951
952         return hdev;
953 }
954 EXPORT_SYMBOL(hci_alloc_dev);
955
956 /* Free HCI device */
957 void hci_free_dev(struct hci_dev *hdev)
958 {
959         skb_queue_purge(&hdev->driver_init);
960
961         /* will free via device release */
962         put_device(&hdev->dev);
963 }
964 EXPORT_SYMBOL(hci_free_dev);
965
966 static void hci_power_on(struct work_struct *work)
967 {
968         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
969
970         BT_DBG("%s", hdev->name);
971
972         if (hci_dev_open(hdev->id) < 0)
973                 return;
974
975         if (test_bit(HCI_AUTO_OFF, &hdev->flags))
976                 schedule_delayed_work(&hdev->power_off,
977                                         msecs_to_jiffies(AUTO_OFF_TIMEOUT));
978
979         if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
980                 mgmt_index_added(hdev);
981 }
982
983 static void hci_power_off(struct work_struct *work)
984 {
985         struct hci_dev *hdev = container_of(work, struct hci_dev,
986                                                         power_off.work);
987
988         BT_DBG("%s", hdev->name);
989
990         clear_bit(HCI_AUTO_OFF, &hdev->flags);
991
992         hci_dev_close(hdev->id);
993 }
994
995 static void hci_discov_off(struct work_struct *work)
996 {
997         struct hci_dev *hdev;
998         u8 scan = SCAN_PAGE;
999
1000         hdev = container_of(work, struct hci_dev, discov_off.work);
1001
1002         BT_DBG("%s", hdev->name);
1003
1004         hci_dev_lock(hdev);
1005
1006         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, sizeof(scan), &scan);
1007
1008         hdev->discov_timeout = 0;
1009
1010         hci_dev_unlock(hdev);
1011 }
1012
1013 int hci_uuids_clear(struct hci_dev *hdev)
1014 {
1015         struct list_head *p, *n;
1016
1017         list_for_each_safe(p, n, &hdev->uuids) {
1018                 struct bt_uuid *uuid;
1019
1020                 uuid = list_entry(p, struct bt_uuid, list);
1021
1022                 list_del(p);
1023                 kfree(uuid);
1024         }
1025
1026         return 0;
1027 }
1028
1029 int hci_link_keys_clear(struct hci_dev *hdev)
1030 {
1031         struct list_head *p, *n;
1032
1033         list_for_each_safe(p, n, &hdev->link_keys) {
1034                 struct link_key *key;
1035
1036                 key = list_entry(p, struct link_key, list);
1037
1038                 list_del(p);
1039                 kfree(key);
1040         }
1041
1042         return 0;
1043 }
1044
1045 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1046 {
1047         struct link_key *k;
1048
1049         list_for_each_entry(k, &hdev->link_keys, list)
1050                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1051                         return k;
1052
1053         return NULL;
1054 }
1055
1056 static int hci_persistent_key(struct hci_dev *hdev, struct hci_conn *conn,
1057                                                 u8 key_type, u8 old_key_type)
1058 {
1059         /* Legacy key */
1060         if (key_type < 0x03)
1061                 return 1;
1062
1063         /* Debug keys are insecure so don't store them persistently */
1064         if (key_type == HCI_LK_DEBUG_COMBINATION)
1065                 return 0;
1066
1067         /* Changed combination key and there's no previous one */
1068         if (key_type == HCI_LK_CHANGED_COMBINATION && old_key_type == 0xff)
1069                 return 0;
1070
1071         /* Security mode 3 case */
1072         if (!conn)
1073                 return 1;
1074
1075         /* Neither local nor remote side had no-bonding as requirement */
1076         if (conn->auth_type > 0x01 && conn->remote_auth > 0x01)
1077                 return 1;
1078
1079         /* Local side had dedicated bonding as requirement */
1080         if (conn->auth_type == 0x02 || conn->auth_type == 0x03)
1081                 return 1;
1082
1083         /* Remote side had dedicated bonding as requirement */
1084         if (conn->remote_auth == 0x02 || conn->remote_auth == 0x03)
1085                 return 1;
1086
1087         /* If none of the above criteria match, then don't store the key
1088          * persistently */
1089         return 0;
1090 }
1091
1092 struct link_key *hci_find_ltk(struct hci_dev *hdev, __le16 ediv, u8 rand[8])
1093 {
1094         struct link_key *k;
1095
1096         list_for_each_entry(k, &hdev->link_keys, list) {
1097                 struct key_master_id *id;
1098
1099                 if (k->type != HCI_LK_SMP_LTK)
1100                         continue;
1101
1102                 if (k->dlen != sizeof(*id))
1103                         continue;
1104
1105                 id = (void *) &k->data;
1106                 if (id->ediv == ediv &&
1107                                 (memcmp(rand, id->rand, sizeof(id->rand)) == 0))
1108                         return k;
1109         }
1110
1111         return NULL;
1112 }
1113 EXPORT_SYMBOL(hci_find_ltk);
1114
1115 struct link_key *hci_find_link_key_type(struct hci_dev *hdev,
1116                                         bdaddr_t *bdaddr, u8 type)
1117 {
1118         struct link_key *k;
1119
1120         list_for_each_entry(k, &hdev->link_keys, list)
1121                 if (k->type == type && bacmp(bdaddr, &k->bdaddr) == 0)
1122                         return k;
1123
1124         return NULL;
1125 }
1126 EXPORT_SYMBOL(hci_find_link_key_type);
1127
1128 int hci_add_link_key(struct hci_dev *hdev, struct hci_conn *conn, int new_key,
1129                                 bdaddr_t *bdaddr, u8 *val, u8 type, u8 pin_len)
1130 {
1131         struct link_key *key, *old_key;
1132         u8 old_key_type, persistent;
1133
1134         old_key = hci_find_link_key(hdev, bdaddr);
1135         if (old_key) {
1136                 old_key_type = old_key->type;
1137                 key = old_key;
1138         } else {
1139                 old_key_type = conn ? conn->key_type : 0xff;
1140                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1141                 if (!key)
1142                         return -ENOMEM;
1143                 list_add(&key->list, &hdev->link_keys);
1144         }
1145
1146         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1147
1148         /* Some buggy controller combinations generate a changed
1149          * combination key for legacy pairing even when there's no
1150          * previous key */
1151         if (type == HCI_LK_CHANGED_COMBINATION &&
1152                                         (!conn || conn->remote_auth == 0xff) &&
1153                                         old_key_type == 0xff) {
1154                 type = HCI_LK_COMBINATION;
1155                 if (conn)
1156                         conn->key_type = type;
1157         }
1158
1159         bacpy(&key->bdaddr, bdaddr);
1160         memcpy(key->val, val, 16);
1161         key->pin_len = pin_len;
1162
1163         if (type == HCI_LK_CHANGED_COMBINATION)
1164                 key->type = old_key_type;
1165         else
1166                 key->type = type;
1167
1168         if (!new_key)
1169                 return 0;
1170
1171         persistent = hci_persistent_key(hdev, conn, type, old_key_type);
1172
1173         mgmt_new_link_key(hdev, key, persistent);
1174
1175         if (!persistent) {
1176                 list_del(&key->list);
1177                 kfree(key);
1178         }
1179
1180         return 0;
1181 }
1182
1183 int hci_add_ltk(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1184                         u8 key_size, __le16 ediv, u8 rand[8], u8 ltk[16])
1185 {
1186         struct link_key *key, *old_key;
1187         struct key_master_id *id;
1188         u8 old_key_type;
1189
1190         BT_DBG("%s addr %s", hdev->name, batostr(bdaddr));
1191
1192         old_key = hci_find_link_key_type(hdev, bdaddr, HCI_LK_SMP_LTK);
1193         if (old_key) {
1194                 key = old_key;
1195                 old_key_type = old_key->type;
1196         } else {
1197                 key = kzalloc(sizeof(*key) + sizeof(*id), GFP_ATOMIC);
1198                 if (!key)
1199                         return -ENOMEM;
1200                 list_add(&key->list, &hdev->link_keys);
1201                 old_key_type = 0xff;
1202         }
1203
1204         key->dlen = sizeof(*id);
1205
1206         bacpy(&key->bdaddr, bdaddr);
1207         memcpy(key->val, ltk, sizeof(key->val));
1208         key->type = HCI_LK_SMP_LTK;
1209         key->pin_len = key_size;
1210
1211         id = (void *) &key->data;
1212         id->ediv = ediv;
1213         memcpy(id->rand, rand, sizeof(id->rand));
1214
1215         if (new_key)
1216                 mgmt_new_link_key(hdev, key, old_key_type);
1217
1218         return 0;
1219 }
1220
1221 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1222 {
1223         struct link_key *key;
1224
1225         key = hci_find_link_key(hdev, bdaddr);
1226         if (!key)
1227                 return -ENOENT;
1228
1229         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1230
1231         list_del(&key->list);
1232         kfree(key);
1233
1234         return 0;
1235 }
1236
1237 /* HCI command timer function */
1238 static void hci_cmd_timer(unsigned long arg)
1239 {
1240         struct hci_dev *hdev = (void *) arg;
1241
1242         BT_ERR("%s command tx timeout", hdev->name);
1243         atomic_set(&hdev->cmd_cnt, 1);
1244         queue_work(hdev->workqueue, &hdev->cmd_work);
1245 }
1246
1247 struct oob_data *hci_find_remote_oob_data(struct hci_dev *hdev,
1248                                                         bdaddr_t *bdaddr)
1249 {
1250         struct oob_data *data;
1251
1252         list_for_each_entry(data, &hdev->remote_oob_data, list)
1253                 if (bacmp(bdaddr, &data->bdaddr) == 0)
1254                         return data;
1255
1256         return NULL;
1257 }
1258
1259 int hci_remove_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr)
1260 {
1261         struct oob_data *data;
1262
1263         data = hci_find_remote_oob_data(hdev, bdaddr);
1264         if (!data)
1265                 return -ENOENT;
1266
1267         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1268
1269         list_del(&data->list);
1270         kfree(data);
1271
1272         return 0;
1273 }
1274
1275 int hci_remote_oob_data_clear(struct hci_dev *hdev)
1276 {
1277         struct oob_data *data, *n;
1278
1279         list_for_each_entry_safe(data, n, &hdev->remote_oob_data, list) {
1280                 list_del(&data->list);
1281                 kfree(data);
1282         }
1283
1284         return 0;
1285 }
1286
1287 int hci_add_remote_oob_data(struct hci_dev *hdev, bdaddr_t *bdaddr, u8 *hash,
1288                                                                 u8 *randomizer)
1289 {
1290         struct oob_data *data;
1291
1292         data = hci_find_remote_oob_data(hdev, bdaddr);
1293
1294         if (!data) {
1295                 data = kmalloc(sizeof(*data), GFP_ATOMIC);
1296                 if (!data)
1297                         return -ENOMEM;
1298
1299                 bacpy(&data->bdaddr, bdaddr);
1300                 list_add(&data->list, &hdev->remote_oob_data);
1301         }
1302
1303         memcpy(data->hash, hash, sizeof(data->hash));
1304         memcpy(data->randomizer, randomizer, sizeof(data->randomizer));
1305
1306         BT_DBG("%s for %s", hdev->name, batostr(bdaddr));
1307
1308         return 0;
1309 }
1310
1311 struct bdaddr_list *hci_blacklist_lookup(struct hci_dev *hdev,
1312                                                 bdaddr_t *bdaddr)
1313 {
1314         struct bdaddr_list *b;
1315
1316         list_for_each_entry(b, &hdev->blacklist, list)
1317                 if (bacmp(bdaddr, &b->bdaddr) == 0)
1318                         return b;
1319
1320         return NULL;
1321 }
1322
1323 int hci_blacklist_clear(struct hci_dev *hdev)
1324 {
1325         struct list_head *p, *n;
1326
1327         list_for_each_safe(p, n, &hdev->blacklist) {
1328                 struct bdaddr_list *b;
1329
1330                 b = list_entry(p, struct bdaddr_list, list);
1331
1332                 list_del(p);
1333                 kfree(b);
1334         }
1335
1336         return 0;
1337 }
1338
1339 int hci_blacklist_add(struct hci_dev *hdev, bdaddr_t *bdaddr)
1340 {
1341         struct bdaddr_list *entry;
1342
1343         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1344                 return -EBADF;
1345
1346         if (hci_blacklist_lookup(hdev, bdaddr))
1347                 return -EEXIST;
1348
1349         entry = kzalloc(sizeof(struct bdaddr_list), GFP_KERNEL);
1350         if (!entry)
1351                 return -ENOMEM;
1352
1353         bacpy(&entry->bdaddr, bdaddr);
1354
1355         list_add(&entry->list, &hdev->blacklist);
1356
1357         return mgmt_device_blocked(hdev, bdaddr);
1358 }
1359
1360 int hci_blacklist_del(struct hci_dev *hdev, bdaddr_t *bdaddr)
1361 {
1362         struct bdaddr_list *entry;
1363
1364         if (bacmp(bdaddr, BDADDR_ANY) == 0)
1365                 return hci_blacklist_clear(hdev);
1366
1367         entry = hci_blacklist_lookup(hdev, bdaddr);
1368         if (!entry)
1369                 return -ENOENT;
1370
1371         list_del(&entry->list);
1372         kfree(entry);
1373
1374         return mgmt_device_unblocked(hdev, bdaddr);
1375 }
1376
1377 static void hci_clear_adv_cache(struct work_struct *work)
1378 {
1379         struct hci_dev *hdev = container_of(work, struct hci_dev,
1380                                                         adv_work.work);
1381
1382         hci_dev_lock(hdev);
1383
1384         hci_adv_entries_clear(hdev);
1385
1386         hci_dev_unlock(hdev);
1387 }
1388
1389 int hci_adv_entries_clear(struct hci_dev *hdev)
1390 {
1391         struct adv_entry *entry, *tmp;
1392
1393         list_for_each_entry_safe(entry, tmp, &hdev->adv_entries, list) {
1394                 list_del(&entry->list);
1395                 kfree(entry);
1396         }
1397
1398         BT_DBG("%s adv cache cleared", hdev->name);
1399
1400         return 0;
1401 }
1402
1403 struct adv_entry *hci_find_adv_entry(struct hci_dev *hdev, bdaddr_t *bdaddr)
1404 {
1405         struct adv_entry *entry;
1406
1407         list_for_each_entry(entry, &hdev->adv_entries, list)
1408                 if (bacmp(bdaddr, &entry->bdaddr) == 0)
1409                         return entry;
1410
1411         return NULL;
1412 }
1413
1414 static inline int is_connectable_adv(u8 evt_type)
1415 {
1416         if (evt_type == ADV_IND || evt_type == ADV_DIRECT_IND)
1417                 return 1;
1418
1419         return 0;
1420 }
1421
1422 int hci_add_adv_entry(struct hci_dev *hdev,
1423                                         struct hci_ev_le_advertising_info *ev)
1424 {
1425         struct adv_entry *entry;
1426
1427         if (!is_connectable_adv(ev->evt_type))
1428                 return -EINVAL;
1429
1430         /* Only new entries should be added to adv_entries. So, if
1431          * bdaddr was found, don't add it. */
1432         if (hci_find_adv_entry(hdev, &ev->bdaddr))
1433                 return 0;
1434
1435         entry = kzalloc(sizeof(*entry), GFP_ATOMIC);
1436         if (!entry)
1437                 return -ENOMEM;
1438
1439         bacpy(&entry->bdaddr, &ev->bdaddr);
1440         entry->bdaddr_type = ev->bdaddr_type;
1441
1442         list_add(&entry->list, &hdev->adv_entries);
1443
1444         BT_DBG("%s adv entry added: address %s type %u", hdev->name,
1445                                 batostr(&entry->bdaddr), entry->bdaddr_type);
1446
1447         return 0;
1448 }
1449
1450 /* Register HCI device */
1451 int hci_register_dev(struct hci_dev *hdev)
1452 {
1453         struct list_head *head = &hci_dev_list, *p;
1454         int i, id, error;
1455
1456         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1457                                                 hdev->bus, hdev->owner);
1458
1459         if (!hdev->open || !hdev->close || !hdev->destruct)
1460                 return -EINVAL;
1461
1462         /* Do not allow HCI_AMP devices to register at index 0,
1463          * so the index can be used as the AMP controller ID.
1464          */
1465         id = (hdev->dev_type == HCI_BREDR) ? 0 : 1;
1466
1467         write_lock_bh(&hci_dev_list_lock);
1468
1469         /* Find first available device id */
1470         list_for_each(p, &hci_dev_list) {
1471                 if (list_entry(p, struct hci_dev, list)->id != id)
1472                         break;
1473                 head = p; id++;
1474         }
1475
1476         sprintf(hdev->name, "hci%d", id);
1477         hdev->id = id;
1478         list_add_tail(&hdev->list, head);
1479
1480         atomic_set(&hdev->refcnt, 1);
1481         mutex_init(&hdev->lock);
1482
1483         hdev->flags = 0;
1484         hdev->dev_flags = 0;
1485         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1486         hdev->esco_type = (ESCO_HV1);
1487         hdev->link_mode = (HCI_LM_ACCEPT);
1488         hdev->io_capability = 0x03; /* No Input No Output */
1489
1490         hdev->idle_timeout = 0;
1491         hdev->sniff_max_interval = 800;
1492         hdev->sniff_min_interval = 80;
1493
1494         INIT_WORK(&hdev->rx_work, hci_rx_work);
1495         INIT_WORK(&hdev->cmd_work, hci_cmd_work);
1496         INIT_WORK(&hdev->tx_work, hci_tx_work);
1497
1498
1499         skb_queue_head_init(&hdev->rx_q);
1500         skb_queue_head_init(&hdev->cmd_q);
1501         skb_queue_head_init(&hdev->raw_q);
1502
1503         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1504
1505         for (i = 0; i < NUM_REASSEMBLY; i++)
1506                 hdev->reassembly[i] = NULL;
1507
1508         init_waitqueue_head(&hdev->req_wait_q);
1509         mutex_init(&hdev->req_lock);
1510
1511         inquiry_cache_init(hdev);
1512
1513         hci_conn_hash_init(hdev);
1514
1515         INIT_LIST_HEAD(&hdev->mgmt_pending);
1516
1517         INIT_LIST_HEAD(&hdev->blacklist);
1518
1519         INIT_LIST_HEAD(&hdev->uuids);
1520
1521         INIT_LIST_HEAD(&hdev->link_keys);
1522
1523         INIT_LIST_HEAD(&hdev->remote_oob_data);
1524
1525         INIT_LIST_HEAD(&hdev->adv_entries);
1526
1527         INIT_DELAYED_WORK(&hdev->adv_work, hci_clear_adv_cache);
1528         INIT_WORK(&hdev->power_on, hci_power_on);
1529         INIT_DELAYED_WORK(&hdev->power_off, hci_power_off);
1530
1531         INIT_DELAYED_WORK(&hdev->discov_off, hci_discov_off);
1532
1533         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1534
1535         atomic_set(&hdev->promisc, 0);
1536
1537         write_unlock_bh(&hci_dev_list_lock);
1538
1539         hdev->workqueue = alloc_workqueue(hdev->name, WQ_HIGHPRI | WQ_UNBOUND |
1540                                                         WQ_MEM_RECLAIM, 1);
1541         if (!hdev->workqueue) {
1542                 error = -ENOMEM;
1543                 goto err;
1544         }
1545
1546         error = hci_add_sysfs(hdev);
1547         if (error < 0)
1548                 goto err_wqueue;
1549
1550         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1551                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1552         if (hdev->rfkill) {
1553                 if (rfkill_register(hdev->rfkill) < 0) {
1554                         rfkill_destroy(hdev->rfkill);
1555                         hdev->rfkill = NULL;
1556                 }
1557         }
1558
1559         set_bit(HCI_AUTO_OFF, &hdev->flags);
1560         set_bit(HCI_SETUP, &hdev->flags);
1561         schedule_work(&hdev->power_on);
1562
1563         hci_notify(hdev, HCI_DEV_REG);
1564
1565         return id;
1566
1567 err_wqueue:
1568         destroy_workqueue(hdev->workqueue);
1569 err:
1570         write_lock_bh(&hci_dev_list_lock);
1571         list_del(&hdev->list);
1572         write_unlock_bh(&hci_dev_list_lock);
1573
1574         return error;
1575 }
1576 EXPORT_SYMBOL(hci_register_dev);
1577
1578 /* Unregister HCI device */
1579 void hci_unregister_dev(struct hci_dev *hdev)
1580 {
1581         int i;
1582
1583         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1584
1585         write_lock_bh(&hci_dev_list_lock);
1586         list_del(&hdev->list);
1587         write_unlock_bh(&hci_dev_list_lock);
1588
1589         hci_dev_do_close(hdev);
1590
1591         for (i = 0; i < NUM_REASSEMBLY; i++)
1592                 kfree_skb(hdev->reassembly[i]);
1593
1594         if (!test_bit(HCI_INIT, &hdev->flags) &&
1595                                         !test_bit(HCI_SETUP, &hdev->flags)) {
1596                 hci_dev_lock(hdev);
1597                 mgmt_index_removed(hdev);
1598                 hci_dev_unlock(hdev);
1599         }
1600
1601         /* mgmt_index_removed should take care of emptying the
1602          * pending list */
1603         BUG_ON(!list_empty(&hdev->mgmt_pending));
1604
1605         hci_notify(hdev, HCI_DEV_UNREG);
1606
1607         if (hdev->rfkill) {
1608                 rfkill_unregister(hdev->rfkill);
1609                 rfkill_destroy(hdev->rfkill);
1610         }
1611
1612         hci_del_sysfs(hdev);
1613
1614         cancel_delayed_work_sync(&hdev->adv_work);
1615
1616         destroy_workqueue(hdev->workqueue);
1617
1618         hci_dev_lock(hdev);
1619         hci_blacklist_clear(hdev);
1620         hci_uuids_clear(hdev);
1621         hci_link_keys_clear(hdev);
1622         hci_remote_oob_data_clear(hdev);
1623         hci_adv_entries_clear(hdev);
1624         hci_dev_unlock(hdev);
1625
1626         __hci_dev_put(hdev);
1627 }
1628 EXPORT_SYMBOL(hci_unregister_dev);
1629
1630 /* Suspend HCI device */
1631 int hci_suspend_dev(struct hci_dev *hdev)
1632 {
1633         hci_notify(hdev, HCI_DEV_SUSPEND);
1634         return 0;
1635 }
1636 EXPORT_SYMBOL(hci_suspend_dev);
1637
1638 /* Resume HCI device */
1639 int hci_resume_dev(struct hci_dev *hdev)
1640 {
1641         hci_notify(hdev, HCI_DEV_RESUME);
1642         return 0;
1643 }
1644 EXPORT_SYMBOL(hci_resume_dev);
1645
1646 /* Receive frame from HCI drivers */
1647 int hci_recv_frame(struct sk_buff *skb)
1648 {
1649         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1650         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1651                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1652                 kfree_skb(skb);
1653                 return -ENXIO;
1654         }
1655
1656         /* Incomming skb */
1657         bt_cb(skb)->incoming = 1;
1658
1659         /* Time stamp */
1660         __net_timestamp(skb);
1661
1662         skb_queue_tail(&hdev->rx_q, skb);
1663         queue_work(hdev->workqueue, &hdev->rx_work);
1664
1665         return 0;
1666 }
1667 EXPORT_SYMBOL(hci_recv_frame);
1668
1669 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1670                                                   int count, __u8 index)
1671 {
1672         int len = 0;
1673         int hlen = 0;
1674         int remain = count;
1675         struct sk_buff *skb;
1676         struct bt_skb_cb *scb;
1677
1678         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1679                                 index >= NUM_REASSEMBLY)
1680                 return -EILSEQ;
1681
1682         skb = hdev->reassembly[index];
1683
1684         if (!skb) {
1685                 switch (type) {
1686                 case HCI_ACLDATA_PKT:
1687                         len = HCI_MAX_FRAME_SIZE;
1688                         hlen = HCI_ACL_HDR_SIZE;
1689                         break;
1690                 case HCI_EVENT_PKT:
1691                         len = HCI_MAX_EVENT_SIZE;
1692                         hlen = HCI_EVENT_HDR_SIZE;
1693                         break;
1694                 case HCI_SCODATA_PKT:
1695                         len = HCI_MAX_SCO_SIZE;
1696                         hlen = HCI_SCO_HDR_SIZE;
1697                         break;
1698                 }
1699
1700                 skb = bt_skb_alloc(len, GFP_ATOMIC);
1701                 if (!skb)
1702                         return -ENOMEM;
1703
1704                 scb = (void *) skb->cb;
1705                 scb->expect = hlen;
1706                 scb->pkt_type = type;
1707
1708                 skb->dev = (void *) hdev;
1709                 hdev->reassembly[index] = skb;
1710         }
1711
1712         while (count) {
1713                 scb = (void *) skb->cb;
1714                 len = min(scb->expect, (__u16)count);
1715
1716                 memcpy(skb_put(skb, len), data, len);
1717
1718                 count -= len;
1719                 data += len;
1720                 scb->expect -= len;
1721                 remain = count;
1722
1723                 switch (type) {
1724                 case HCI_EVENT_PKT:
1725                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1726                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1727                                 scb->expect = h->plen;
1728
1729                                 if (skb_tailroom(skb) < scb->expect) {
1730                                         kfree_skb(skb);
1731                                         hdev->reassembly[index] = NULL;
1732                                         return -ENOMEM;
1733                                 }
1734                         }
1735                         break;
1736
1737                 case HCI_ACLDATA_PKT:
1738                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1739                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1740                                 scb->expect = __le16_to_cpu(h->dlen);
1741
1742                                 if (skb_tailroom(skb) < scb->expect) {
1743                                         kfree_skb(skb);
1744                                         hdev->reassembly[index] = NULL;
1745                                         return -ENOMEM;
1746                                 }
1747                         }
1748                         break;
1749
1750                 case HCI_SCODATA_PKT:
1751                         if (skb->len == HCI_SCO_HDR_SIZE) {
1752                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1753                                 scb->expect = h->dlen;
1754
1755                                 if (skb_tailroom(skb) < scb->expect) {
1756                                         kfree_skb(skb);
1757                                         hdev->reassembly[index] = NULL;
1758                                         return -ENOMEM;
1759                                 }
1760                         }
1761                         break;
1762                 }
1763
1764                 if (scb->expect == 0) {
1765                         /* Complete frame */
1766
1767                         bt_cb(skb)->pkt_type = type;
1768                         hci_recv_frame(skb);
1769
1770                         hdev->reassembly[index] = NULL;
1771                         return remain;
1772                 }
1773         }
1774
1775         return remain;
1776 }
1777
1778 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1779 {
1780         int rem = 0;
1781
1782         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1783                 return -EILSEQ;
1784
1785         while (count) {
1786                 rem = hci_reassembly(hdev, type, data, count, type - 1);
1787                 if (rem < 0)
1788                         return rem;
1789
1790                 data += (count - rem);
1791                 count = rem;
1792         }
1793
1794         return rem;
1795 }
1796 EXPORT_SYMBOL(hci_recv_fragment);
1797
1798 #define STREAM_REASSEMBLY 0
1799
1800 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1801 {
1802         int type;
1803         int rem = 0;
1804
1805         while (count) {
1806                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1807
1808                 if (!skb) {
1809                         struct { char type; } *pkt;
1810
1811                         /* Start of the frame */
1812                         pkt = data;
1813                         type = pkt->type;
1814
1815                         data++;
1816                         count--;
1817                 } else
1818                         type = bt_cb(skb)->pkt_type;
1819
1820                 rem = hci_reassembly(hdev, type, data, count,
1821                                                         STREAM_REASSEMBLY);
1822                 if (rem < 0)
1823                         return rem;
1824
1825                 data += (count - rem);
1826                 count = rem;
1827         }
1828
1829         return rem;
1830 }
1831 EXPORT_SYMBOL(hci_recv_stream_fragment);
1832
1833 /* ---- Interface to upper protocols ---- */
1834
1835 /* Register/Unregister protocols.
1836  * hci_task_lock is used to ensure that no tasks are running. */
1837 int hci_register_proto(struct hci_proto *hp)
1838 {
1839         int err = 0;
1840
1841         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1842
1843         if (hp->id >= HCI_MAX_PROTO)
1844                 return -EINVAL;
1845
1846         mutex_lock(&hci_task_lock);
1847
1848         if (!hci_proto[hp->id])
1849                 hci_proto[hp->id] = hp;
1850         else
1851                 err = -EEXIST;
1852
1853         mutex_unlock(&hci_task_lock);
1854
1855         return err;
1856 }
1857 EXPORT_SYMBOL(hci_register_proto);
1858
1859 int hci_unregister_proto(struct hci_proto *hp)
1860 {
1861         int err = 0;
1862
1863         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1864
1865         if (hp->id >= HCI_MAX_PROTO)
1866                 return -EINVAL;
1867
1868         mutex_lock(&hci_task_lock);
1869
1870         if (hci_proto[hp->id])
1871                 hci_proto[hp->id] = NULL;
1872         else
1873                 err = -ENOENT;
1874
1875         mutex_unlock(&hci_task_lock);
1876
1877         return err;
1878 }
1879 EXPORT_SYMBOL(hci_unregister_proto);
1880
1881 int hci_register_cb(struct hci_cb *cb)
1882 {
1883         BT_DBG("%p name %s", cb, cb->name);
1884
1885         write_lock_bh(&hci_cb_list_lock);
1886         list_add(&cb->list, &hci_cb_list);
1887         write_unlock_bh(&hci_cb_list_lock);
1888
1889         return 0;
1890 }
1891 EXPORT_SYMBOL(hci_register_cb);
1892
1893 int hci_unregister_cb(struct hci_cb *cb)
1894 {
1895         BT_DBG("%p name %s", cb, cb->name);
1896
1897         write_lock_bh(&hci_cb_list_lock);
1898         list_del(&cb->list);
1899         write_unlock_bh(&hci_cb_list_lock);
1900
1901         return 0;
1902 }
1903 EXPORT_SYMBOL(hci_unregister_cb);
1904
1905 static int hci_send_frame(struct sk_buff *skb)
1906 {
1907         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1908
1909         if (!hdev) {
1910                 kfree_skb(skb);
1911                 return -ENODEV;
1912         }
1913
1914         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1915
1916         if (atomic_read(&hdev->promisc)) {
1917                 /* Time stamp */
1918                 __net_timestamp(skb);
1919
1920                 hci_send_to_sock(hdev, skb, NULL);
1921         }
1922
1923         /* Get rid of skb owner, prior to sending to the driver. */
1924         skb_orphan(skb);
1925
1926         return hdev->send(skb);
1927 }
1928
1929 /* Send HCI command */
1930 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1931 {
1932         int len = HCI_COMMAND_HDR_SIZE + plen;
1933         struct hci_command_hdr *hdr;
1934         struct sk_buff *skb;
1935
1936         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1937
1938         skb = bt_skb_alloc(len, GFP_ATOMIC);
1939         if (!skb) {
1940                 BT_ERR("%s no memory for command", hdev->name);
1941                 return -ENOMEM;
1942         }
1943
1944         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1945         hdr->opcode = cpu_to_le16(opcode);
1946         hdr->plen   = plen;
1947
1948         if (plen)
1949                 memcpy(skb_put(skb, plen), param, plen);
1950
1951         BT_DBG("skb len %d", skb->len);
1952
1953         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1954         skb->dev = (void *) hdev;
1955
1956         if (test_bit(HCI_INIT, &hdev->flags))
1957                 hdev->init_last_cmd = opcode;
1958
1959         skb_queue_tail(&hdev->cmd_q, skb);
1960         queue_work(hdev->workqueue, &hdev->cmd_work);
1961
1962         return 0;
1963 }
1964
1965 /* Get data from the previously sent command */
1966 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1967 {
1968         struct hci_command_hdr *hdr;
1969
1970         if (!hdev->sent_cmd)
1971                 return NULL;
1972
1973         hdr = (void *) hdev->sent_cmd->data;
1974
1975         if (hdr->opcode != cpu_to_le16(opcode))
1976                 return NULL;
1977
1978         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1979
1980         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1981 }
1982
1983 /* Send ACL data */
1984 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1985 {
1986         struct hci_acl_hdr *hdr;
1987         int len = skb->len;
1988
1989         skb_push(skb, HCI_ACL_HDR_SIZE);
1990         skb_reset_transport_header(skb);
1991         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1992         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1993         hdr->dlen   = cpu_to_le16(len);
1994 }
1995
1996 static void hci_queue_acl(struct hci_conn *conn, struct sk_buff_head *queue,
1997                                 struct sk_buff *skb, __u16 flags)
1998 {
1999         struct hci_dev *hdev = conn->hdev;
2000         struct sk_buff *list;
2001
2002         list = skb_shinfo(skb)->frag_list;
2003         if (!list) {
2004                 /* Non fragmented */
2005                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
2006
2007                 skb_queue_tail(queue, skb);
2008         } else {
2009                 /* Fragmented */
2010                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2011
2012                 skb_shinfo(skb)->frag_list = NULL;
2013
2014                 /* Queue all fragments atomically */
2015                 spin_lock_bh(&queue->lock);
2016
2017                 __skb_queue_tail(queue, skb);
2018
2019                 flags &= ~ACL_START;
2020                 flags |= ACL_CONT;
2021                 do {
2022                         skb = list; list = list->next;
2023
2024                         skb->dev = (void *) hdev;
2025                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2026                         hci_add_acl_hdr(skb, conn->handle, flags);
2027
2028                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
2029
2030                         __skb_queue_tail(queue, skb);
2031                 } while (list);
2032
2033                 spin_unlock_bh(&queue->lock);
2034         }
2035 }
2036
2037 void hci_send_acl(struct hci_chan *chan, struct sk_buff *skb, __u16 flags)
2038 {
2039         struct hci_conn *conn = chan->conn;
2040         struct hci_dev *hdev = conn->hdev;
2041
2042         BT_DBG("%s chan %p flags 0x%x", hdev->name, chan, flags);
2043
2044         skb->dev = (void *) hdev;
2045         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
2046         hci_add_acl_hdr(skb, conn->handle, flags);
2047
2048         hci_queue_acl(conn, &chan->data_q, skb, flags);
2049
2050         queue_work(hdev->workqueue, &hdev->tx_work);
2051 }
2052 EXPORT_SYMBOL(hci_send_acl);
2053
2054 /* Send SCO data */
2055 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
2056 {
2057         struct hci_dev *hdev = conn->hdev;
2058         struct hci_sco_hdr hdr;
2059
2060         BT_DBG("%s len %d", hdev->name, skb->len);
2061
2062         hdr.handle = cpu_to_le16(conn->handle);
2063         hdr.dlen   = skb->len;
2064
2065         skb_push(skb, HCI_SCO_HDR_SIZE);
2066         skb_reset_transport_header(skb);
2067         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
2068
2069         skb->dev = (void *) hdev;
2070         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
2071
2072         skb_queue_tail(&conn->data_q, skb);
2073         queue_work(hdev->workqueue, &hdev->tx_work);
2074 }
2075 EXPORT_SYMBOL(hci_send_sco);
2076
2077 /* ---- HCI TX task (outgoing data) ---- */
2078
2079 /* HCI Connection scheduler */
2080 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
2081 {
2082         struct hci_conn_hash *h = &hdev->conn_hash;
2083         struct hci_conn *conn = NULL, *c;
2084         int num = 0, min = ~0;
2085
2086         /* We don't have to lock device here. Connections are always
2087          * added and removed with TX task disabled. */
2088
2089         rcu_read_lock();
2090
2091         list_for_each_entry_rcu(c, &h->list, list) {
2092                 if (c->type != type || skb_queue_empty(&c->data_q))
2093                         continue;
2094
2095                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
2096                         continue;
2097
2098                 num++;
2099
2100                 if (c->sent < min) {
2101                         min  = c->sent;
2102                         conn = c;
2103                 }
2104
2105                 if (hci_conn_num(hdev, type) == num)
2106                         break;
2107         }
2108
2109         rcu_read_unlock();
2110
2111         if (conn) {
2112                 int cnt, q;
2113
2114                 switch (conn->type) {
2115                 case ACL_LINK:
2116                         cnt = hdev->acl_cnt;
2117                         break;
2118                 case SCO_LINK:
2119                 case ESCO_LINK:
2120                         cnt = hdev->sco_cnt;
2121                         break;
2122                 case LE_LINK:
2123                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2124                         break;
2125                 default:
2126                         cnt = 0;
2127                         BT_ERR("Unknown link type");
2128                 }
2129
2130                 q = cnt / num;
2131                 *quote = q ? q : 1;
2132         } else
2133                 *quote = 0;
2134
2135         BT_DBG("conn %p quote %d", conn, *quote);
2136         return conn;
2137 }
2138
2139 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
2140 {
2141         struct hci_conn_hash *h = &hdev->conn_hash;
2142         struct hci_conn *c;
2143
2144         BT_ERR("%s link tx timeout", hdev->name);
2145
2146         rcu_read_lock();
2147
2148         /* Kill stalled connections */
2149         list_for_each_entry_rcu(c, &h->list, list) {
2150                 if (c->type == type && c->sent) {
2151                         BT_ERR("%s killing stalled connection %s",
2152                                 hdev->name, batostr(&c->dst));
2153                         hci_acl_disconn(c, 0x13);
2154                 }
2155         }
2156
2157         rcu_read_unlock();
2158 }
2159
2160 static inline struct hci_chan *hci_chan_sent(struct hci_dev *hdev, __u8 type,
2161                                                 int *quote)
2162 {
2163         struct hci_conn_hash *h = &hdev->conn_hash;
2164         struct hci_chan *chan = NULL;
2165         int num = 0, min = ~0, cur_prio = 0;
2166         struct hci_conn *conn;
2167         int cnt, q, conn_num = 0;
2168
2169         BT_DBG("%s", hdev->name);
2170
2171         rcu_read_lock();
2172
2173         list_for_each_entry_rcu(conn, &h->list, list) {
2174                 struct hci_chan *tmp;
2175
2176                 if (conn->type != type)
2177                         continue;
2178
2179                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2180                         continue;
2181
2182                 conn_num++;
2183
2184                 list_for_each_entry_rcu(tmp, &conn->chan_list, list) {
2185                         struct sk_buff *skb;
2186
2187                         if (skb_queue_empty(&tmp->data_q))
2188                                 continue;
2189
2190                         skb = skb_peek(&tmp->data_q);
2191                         if (skb->priority < cur_prio)
2192                                 continue;
2193
2194                         if (skb->priority > cur_prio) {
2195                                 num = 0;
2196                                 min = ~0;
2197                                 cur_prio = skb->priority;
2198                         }
2199
2200                         num++;
2201
2202                         if (conn->sent < min) {
2203                                 min  = conn->sent;
2204                                 chan = tmp;
2205                         }
2206                 }
2207
2208                 if (hci_conn_num(hdev, type) == conn_num)
2209                         break;
2210         }
2211
2212         rcu_read_unlock();
2213
2214         if (!chan)
2215                 return NULL;
2216
2217         switch (chan->conn->type) {
2218         case ACL_LINK:
2219                 cnt = hdev->acl_cnt;
2220                 break;
2221         case SCO_LINK:
2222         case ESCO_LINK:
2223                 cnt = hdev->sco_cnt;
2224                 break;
2225         case LE_LINK:
2226                 cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
2227                 break;
2228         default:
2229                 cnt = 0;
2230                 BT_ERR("Unknown link type");
2231         }
2232
2233         q = cnt / num;
2234         *quote = q ? q : 1;
2235         BT_DBG("chan %p quote %d", chan, *quote);
2236         return chan;
2237 }
2238
2239 static void hci_prio_recalculate(struct hci_dev *hdev, __u8 type)
2240 {
2241         struct hci_conn_hash *h = &hdev->conn_hash;
2242         struct hci_conn *conn;
2243         int num = 0;
2244
2245         BT_DBG("%s", hdev->name);
2246
2247         rcu_read_lock();
2248
2249         list_for_each_entry_rcu(conn, &h->list, list) {
2250                 struct hci_chan *chan;
2251
2252                 if (conn->type != type)
2253                         continue;
2254
2255                 if (conn->state != BT_CONNECTED && conn->state != BT_CONFIG)
2256                         continue;
2257
2258                 num++;
2259
2260                 list_for_each_entry_rcu(chan, &conn->chan_list, list) {
2261                         struct sk_buff *skb;
2262
2263                         if (chan->sent) {
2264                                 chan->sent = 0;
2265                                 continue;
2266                         }
2267
2268                         if (skb_queue_empty(&chan->data_q))
2269                                 continue;
2270
2271                         skb = skb_peek(&chan->data_q);
2272                         if (skb->priority >= HCI_PRIO_MAX - 1)
2273                                 continue;
2274
2275                         skb->priority = HCI_PRIO_MAX - 1;
2276
2277                         BT_DBG("chan %p skb %p promoted to %d", chan, skb,
2278                                                                 skb->priority);
2279                 }
2280
2281                 if (hci_conn_num(hdev, type) == num)
2282                         break;
2283         }
2284
2285         rcu_read_unlock();
2286
2287 }
2288
2289 static inline void hci_sched_acl(struct hci_dev *hdev)
2290 {
2291         struct hci_chan *chan;
2292         struct sk_buff *skb;
2293         int quote;
2294         unsigned int cnt;
2295
2296         BT_DBG("%s", hdev->name);
2297
2298         if (!hci_conn_num(hdev, ACL_LINK))
2299                 return;
2300
2301         if (!test_bit(HCI_RAW, &hdev->flags)) {
2302                 /* ACL tx timeout must be longer than maximum
2303                  * link supervision timeout (40.9 seconds) */
2304                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
2305                         hci_link_tx_to(hdev, ACL_LINK);
2306         }
2307
2308         cnt = hdev->acl_cnt;
2309
2310         while (hdev->acl_cnt &&
2311                         (chan = hci_chan_sent(hdev, ACL_LINK, &quote))) {
2312                 u32 priority = (skb_peek(&chan->data_q))->priority;
2313                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2314                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2315                                         skb->len, skb->priority);
2316
2317                         /* Stop if priority has changed */
2318                         if (skb->priority < priority)
2319                                 break;
2320
2321                         skb = skb_dequeue(&chan->data_q);
2322
2323                         hci_conn_enter_active_mode(chan->conn,
2324                                                 bt_cb(skb)->force_active);
2325
2326                         hci_send_frame(skb);
2327                         hdev->acl_last_tx = jiffies;
2328
2329                         hdev->acl_cnt--;
2330                         chan->sent++;
2331                         chan->conn->sent++;
2332                 }
2333         }
2334
2335         if (cnt != hdev->acl_cnt)
2336                 hci_prio_recalculate(hdev, ACL_LINK);
2337 }
2338
2339 /* Schedule SCO */
2340 static inline void hci_sched_sco(struct hci_dev *hdev)
2341 {
2342         struct hci_conn *conn;
2343         struct sk_buff *skb;
2344         int quote;
2345
2346         BT_DBG("%s", hdev->name);
2347
2348         if (!hci_conn_num(hdev, SCO_LINK))
2349                 return;
2350
2351         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
2352                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2353                         BT_DBG("skb %p len %d", skb, skb->len);
2354                         hci_send_frame(skb);
2355
2356                         conn->sent++;
2357                         if (conn->sent == ~0)
2358                                 conn->sent = 0;
2359                 }
2360         }
2361 }
2362
2363 static inline void hci_sched_esco(struct hci_dev *hdev)
2364 {
2365         struct hci_conn *conn;
2366         struct sk_buff *skb;
2367         int quote;
2368
2369         BT_DBG("%s", hdev->name);
2370
2371         if (!hci_conn_num(hdev, ESCO_LINK))
2372                 return;
2373
2374         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
2375                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
2376                         BT_DBG("skb %p len %d", skb, skb->len);
2377                         hci_send_frame(skb);
2378
2379                         conn->sent++;
2380                         if (conn->sent == ~0)
2381                                 conn->sent = 0;
2382                 }
2383         }
2384 }
2385
2386 static inline void hci_sched_le(struct hci_dev *hdev)
2387 {
2388         struct hci_chan *chan;
2389         struct sk_buff *skb;
2390         int quote, cnt, tmp;
2391
2392         BT_DBG("%s", hdev->name);
2393
2394         if (!hci_conn_num(hdev, LE_LINK))
2395                 return;
2396
2397         if (!test_bit(HCI_RAW, &hdev->flags)) {
2398                 /* LE tx timeout must be longer than maximum
2399                  * link supervision timeout (40.9 seconds) */
2400                 if (!hdev->le_cnt && hdev->le_pkts &&
2401                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
2402                         hci_link_tx_to(hdev, LE_LINK);
2403         }
2404
2405         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
2406         tmp = cnt;
2407         while (cnt && (chan = hci_chan_sent(hdev, LE_LINK, &quote))) {
2408                 u32 priority = (skb_peek(&chan->data_q))->priority;
2409                 while (quote-- && (skb = skb_peek(&chan->data_q))) {
2410                         BT_DBG("chan %p skb %p len %d priority %u", chan, skb,
2411                                         skb->len, skb->priority);
2412
2413                         /* Stop if priority has changed */
2414                         if (skb->priority < priority)
2415                                 break;
2416
2417                         skb = skb_dequeue(&chan->data_q);
2418
2419                         hci_send_frame(skb);
2420                         hdev->le_last_tx = jiffies;
2421
2422                         cnt--;
2423                         chan->sent++;
2424                         chan->conn->sent++;
2425                 }
2426         }
2427
2428         if (hdev->le_pkts)
2429                 hdev->le_cnt = cnt;
2430         else
2431                 hdev->acl_cnt = cnt;
2432
2433         if (cnt != tmp)
2434                 hci_prio_recalculate(hdev, LE_LINK);
2435 }
2436
2437 static void hci_tx_work(struct work_struct *work)
2438 {
2439         struct hci_dev *hdev = container_of(work, struct hci_dev, tx_work);
2440         struct sk_buff *skb;
2441
2442         mutex_lock(&hci_task_lock);
2443
2444         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
2445                 hdev->sco_cnt, hdev->le_cnt);
2446
2447         /* Schedule queues and send stuff to HCI driver */
2448
2449         hci_sched_acl(hdev);
2450
2451         hci_sched_sco(hdev);
2452
2453         hci_sched_esco(hdev);
2454
2455         hci_sched_le(hdev);
2456
2457         /* Send next queued raw (unknown type) packet */
2458         while ((skb = skb_dequeue(&hdev->raw_q)))
2459                 hci_send_frame(skb);
2460
2461         mutex_unlock(&hci_task_lock);
2462 }
2463
2464 /* ----- HCI RX task (incoming data processing) ----- */
2465
2466 /* ACL data packet */
2467 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2468 {
2469         struct hci_acl_hdr *hdr = (void *) skb->data;
2470         struct hci_conn *conn;
2471         __u16 handle, flags;
2472
2473         skb_pull(skb, HCI_ACL_HDR_SIZE);
2474
2475         handle = __le16_to_cpu(hdr->handle);
2476         flags  = hci_flags(handle);
2477         handle = hci_handle(handle);
2478
2479         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
2480
2481         hdev->stat.acl_rx++;
2482
2483         hci_dev_lock(hdev);
2484         conn = hci_conn_hash_lookup_handle(hdev, handle);
2485         hci_dev_unlock(hdev);
2486
2487         if (conn) {
2488                 register struct hci_proto *hp;
2489
2490                 hci_conn_enter_active_mode(conn, BT_POWER_FORCE_ACTIVE_OFF);
2491
2492                 /* Send to upper protocol */
2493                 hp = hci_proto[HCI_PROTO_L2CAP];
2494                 if (hp && hp->recv_acldata) {
2495                         hp->recv_acldata(conn, skb, flags);
2496                         return;
2497                 }
2498         } else {
2499                 BT_ERR("%s ACL packet for unknown connection handle %d",
2500                         hdev->name, handle);
2501         }
2502
2503         kfree_skb(skb);
2504 }
2505
2506 /* SCO data packet */
2507 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
2508 {
2509         struct hci_sco_hdr *hdr = (void *) skb->data;
2510         struct hci_conn *conn;
2511         __u16 handle;
2512
2513         skb_pull(skb, HCI_SCO_HDR_SIZE);
2514
2515         handle = __le16_to_cpu(hdr->handle);
2516
2517         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
2518
2519         hdev->stat.sco_rx++;
2520
2521         hci_dev_lock(hdev);
2522         conn = hci_conn_hash_lookup_handle(hdev, handle);
2523         hci_dev_unlock(hdev);
2524
2525         if (conn) {
2526                 register struct hci_proto *hp;
2527
2528                 /* Send to upper protocol */
2529                 hp = hci_proto[HCI_PROTO_SCO];
2530                 if (hp && hp->recv_scodata) {
2531                         hp->recv_scodata(conn, skb);
2532                         return;
2533                 }
2534         } else {
2535                 BT_ERR("%s SCO packet for unknown connection handle %d",
2536                         hdev->name, handle);
2537         }
2538
2539         kfree_skb(skb);
2540 }
2541
2542 static void hci_rx_work(struct work_struct *work)
2543 {
2544         struct hci_dev *hdev = container_of(work, struct hci_dev, rx_work);
2545         struct sk_buff *skb;
2546
2547         BT_DBG("%s", hdev->name);
2548
2549         mutex_lock(&hci_task_lock);
2550
2551         while ((skb = skb_dequeue(&hdev->rx_q))) {
2552                 if (atomic_read(&hdev->promisc)) {
2553                         /* Send copy to the sockets */
2554                         hci_send_to_sock(hdev, skb, NULL);
2555                 }
2556
2557                 if (test_bit(HCI_RAW, &hdev->flags)) {
2558                         kfree_skb(skb);
2559                         continue;
2560                 }
2561
2562                 if (test_bit(HCI_INIT, &hdev->flags)) {
2563                         /* Don't process data packets in this states. */
2564                         switch (bt_cb(skb)->pkt_type) {
2565                         case HCI_ACLDATA_PKT:
2566                         case HCI_SCODATA_PKT:
2567                                 kfree_skb(skb);
2568                                 continue;
2569                         }
2570                 }
2571
2572                 /* Process frame */
2573                 switch (bt_cb(skb)->pkt_type) {
2574                 case HCI_EVENT_PKT:
2575                         BT_DBG("%s Event packet", hdev->name);
2576                         hci_event_packet(hdev, skb);
2577                         break;
2578
2579                 case HCI_ACLDATA_PKT:
2580                         BT_DBG("%s ACL data packet", hdev->name);
2581                         hci_acldata_packet(hdev, skb);
2582                         break;
2583
2584                 case HCI_SCODATA_PKT:
2585                         BT_DBG("%s SCO data packet", hdev->name);
2586                         hci_scodata_packet(hdev, skb);
2587                         break;
2588
2589                 default:
2590                         kfree_skb(skb);
2591                         break;
2592                 }
2593         }
2594
2595         mutex_unlock(&hci_task_lock);
2596 }
2597
2598 static void hci_cmd_work(struct work_struct *work)
2599 {
2600         struct hci_dev *hdev = container_of(work, struct hci_dev, cmd_work);
2601         struct sk_buff *skb;
2602
2603         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2604
2605         /* Send queued commands */
2606         if (atomic_read(&hdev->cmd_cnt)) {
2607                 skb = skb_dequeue(&hdev->cmd_q);
2608                 if (!skb)
2609                         return;
2610
2611                 kfree_skb(hdev->sent_cmd);
2612
2613                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2614                 if (hdev->sent_cmd) {
2615                         atomic_dec(&hdev->cmd_cnt);
2616                         hci_send_frame(skb);
2617                         if (test_bit(HCI_RESET, &hdev->flags))
2618                                 del_timer(&hdev->cmd_timer);
2619                         else
2620                                 mod_timer(&hdev->cmd_timer,
2621                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2622                 } else {
2623                         skb_queue_head(&hdev->cmd_q, skb);
2624                         queue_work(hdev->workqueue, &hdev->cmd_work);
2625                 }
2626         }
2627 }
2628
2629 int hci_do_inquiry(struct hci_dev *hdev, u8 length)
2630 {
2631         /* General inquiry access code (GIAC) */
2632         u8 lap[3] = { 0x33, 0x8b, 0x9e };
2633         struct hci_cp_inquiry cp;
2634
2635         BT_DBG("%s", hdev->name);
2636
2637         if (test_bit(HCI_INQUIRY, &hdev->flags))
2638                 return -EINPROGRESS;
2639
2640         memset(&cp, 0, sizeof(cp));
2641         memcpy(&cp.lap, lap, sizeof(cp.lap));
2642         cp.length  = length;
2643
2644         return hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
2645 }
2646
2647 int hci_cancel_inquiry(struct hci_dev *hdev)
2648 {
2649         BT_DBG("%s", hdev->name);
2650
2651         if (!test_bit(HCI_INQUIRY, &hdev->flags))
2652                 return -EPERM;
2653
2654         return hci_send_cmd(hdev, HCI_OP_INQUIRY_CANCEL, 0, NULL);
2655 }
2656
2657 module_param(enable_hs, bool, 0644);
2658 MODULE_PARM_DESC(enable_hs, "Enable High Speed");