Bluetooth: clean up hci code
[linux-flexiantxendom0.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <net/sock.h>
45
46 #include <asm/system.h>
47 #include <linux/uaccess.h>
48 #include <asm/unaligned.h>
49
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52
53 static void hci_cmd_task(unsigned long arg);
54 static void hci_rx_task(unsigned long arg);
55 static void hci_tx_task(unsigned long arg);
56 static void hci_notify(struct hci_dev *hdev, int event);
57
58 static DEFINE_RWLOCK(hci_task_lock);
59
60 /* HCI device list */
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
63
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
67
68 /* HCI protocols */
69 #define HCI_MAX_PROTO   2
70 struct hci_proto *hci_proto[HCI_MAX_PROTO];
71
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74
75 /* ---- HCI notifications ---- */
76
77 int hci_register_notifier(struct notifier_block *nb)
78 {
79         return atomic_notifier_chain_register(&hci_notifier, nb);
80 }
81
82 int hci_unregister_notifier(struct notifier_block *nb)
83 {
84         return atomic_notifier_chain_unregister(&hci_notifier, nb);
85 }
86
87 static void hci_notify(struct hci_dev *hdev, int event)
88 {
89         atomic_notifier_call_chain(&hci_notifier, event, hdev);
90 }
91
92 /* ---- HCI requests ---- */
93
94 void hci_req_complete(struct hci_dev *hdev, int result)
95 {
96         BT_DBG("%s result 0x%2.2x", hdev->name, result);
97
98         if (hdev->req_status == HCI_REQ_PEND) {
99                 hdev->req_result = result;
100                 hdev->req_status = HCI_REQ_DONE;
101                 wake_up_interruptible(&hdev->req_wait_q);
102         }
103 }
104
105 static void hci_req_cancel(struct hci_dev *hdev, int err)
106 {
107         BT_DBG("%s err 0x%2.2x", hdev->name, err);
108
109         if (hdev->req_status == HCI_REQ_PEND) {
110                 hdev->req_result = err;
111                 hdev->req_status = HCI_REQ_CANCELED;
112                 wake_up_interruptible(&hdev->req_wait_q);
113         }
114 }
115
116 /* Execute request and wait for completion. */
117 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
118                                 unsigned long opt, __u32 timeout)
119 {
120         DECLARE_WAITQUEUE(wait, current);
121         int err = 0;
122
123         BT_DBG("%s start", hdev->name);
124
125         hdev->req_status = HCI_REQ_PEND;
126
127         add_wait_queue(&hdev->req_wait_q, &wait);
128         set_current_state(TASK_INTERRUPTIBLE);
129
130         req(hdev, opt);
131         schedule_timeout(timeout);
132
133         remove_wait_queue(&hdev->req_wait_q, &wait);
134
135         if (signal_pending(current))
136                 return -EINTR;
137
138         switch (hdev->req_status) {
139         case HCI_REQ_DONE:
140                 err = -bt_err(hdev->req_result);
141                 break;
142
143         case HCI_REQ_CANCELED:
144                 err = -hdev->req_result;
145                 break;
146
147         default:
148                 err = -ETIMEDOUT;
149                 break;
150         }
151
152         hdev->req_status = hdev->req_result = 0;
153
154         BT_DBG("%s end: err %d", hdev->name, err);
155
156         return err;
157 }
158
159 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160                                 unsigned long opt, __u32 timeout)
161 {
162         int ret;
163
164         if (!test_bit(HCI_UP, &hdev->flags))
165                 return -ENETDOWN;
166
167         /* Serialize all requests */
168         hci_req_lock(hdev);
169         ret = __hci_request(hdev, req, opt, timeout);
170         hci_req_unlock(hdev);
171
172         return ret;
173 }
174
175 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176 {
177         BT_DBG("%s %ld", hdev->name, opt);
178
179         /* Reset device */
180         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
181 }
182
183 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
184 {
185         struct sk_buff *skb;
186         __le16 param;
187         __u8 flt_type;
188
189         BT_DBG("%s %ld", hdev->name, opt);
190
191         /* Driver initialization */
192
193         /* Special commands */
194         while ((skb = skb_dequeue(&hdev->driver_init))) {
195                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
196                 skb->dev = (void *) hdev;
197
198                 skb_queue_tail(&hdev->cmd_q, skb);
199                 tasklet_schedule(&hdev->cmd_task);
200         }
201         skb_queue_purge(&hdev->driver_init);
202
203         /* Mandatory initialization */
204
205         /* Reset */
206         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
207                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
208
209         /* Read Local Supported Features */
210         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
211
212         /* Read Local Version */
213         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
214
215         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
216         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
217
218 #if 0
219         /* Host buffer size */
220         {
221                 struct hci_cp_host_buffer_size cp;
222                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
223                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
224                 cp.acl_max_pkt = cpu_to_le16(0xffff);
225                 cp.sco_max_pkt = cpu_to_le16(0xffff);
226                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
227         }
228 #endif
229
230         /* Read BD Address */
231         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
232
233         /* Read Class of Device */
234         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
235
236         /* Read Local Name */
237         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
238
239         /* Read Voice Setting */
240         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
241
242         /* Optional initialization */
243
244         /* Clear Event Filters */
245         flt_type = HCI_FLT_CLEAR_ALL;
246         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
247
248         /* Page timeout ~20 secs */
249         param = cpu_to_le16(0x8000);
250         hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
251
252         /* Connection accept timeout ~20 secs */
253         param = cpu_to_le16(0x7d00);
254         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
255 }
256
257 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
258 {
259         __u8 scan = opt;
260
261         BT_DBG("%s %x", hdev->name, scan);
262
263         /* Inquiry and Page scans */
264         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
265 }
266
267 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
268 {
269         __u8 auth = opt;
270
271         BT_DBG("%s %x", hdev->name, auth);
272
273         /* Authentication */
274         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
275 }
276
277 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
278 {
279         __u8 encrypt = opt;
280
281         BT_DBG("%s %x", hdev->name, encrypt);
282
283         /* Encryption */
284         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
285 }
286
287 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
288 {
289         __le16 policy = cpu_to_le16(opt);
290
291         BT_DBG("%s %x", hdev->name, policy);
292
293         /* Default link policy */
294         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
295 }
296
297 /* Get HCI device by index.
298  * Device is held on return. */
299 struct hci_dev *hci_dev_get(int index)
300 {
301         struct hci_dev *hdev = NULL;
302         struct list_head *p;
303
304         BT_DBG("%d", index);
305
306         if (index < 0)
307                 return NULL;
308
309         read_lock(&hci_dev_list_lock);
310         list_for_each(p, &hci_dev_list) {
311                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
312                 if (d->id == index) {
313                         hdev = hci_dev_hold(d);
314                         break;
315                 }
316         }
317         read_unlock(&hci_dev_list_lock);
318         return hdev;
319 }
320
321 /* ---- Inquiry support ---- */
322 static void inquiry_cache_flush(struct hci_dev *hdev)
323 {
324         struct inquiry_cache *cache = &hdev->inq_cache;
325         struct inquiry_entry *next  = cache->list, *e;
326
327         BT_DBG("cache %p", cache);
328
329         cache->list = NULL;
330         while ((e = next)) {
331                 next = e->next;
332                 kfree(e);
333         }
334 }
335
336 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
337 {
338         struct inquiry_cache *cache = &hdev->inq_cache;
339         struct inquiry_entry *e;
340
341         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
342
343         for (e = cache->list; e; e = e->next)
344                 if (!bacmp(&e->data.bdaddr, bdaddr))
345                         break;
346         return e;
347 }
348
349 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
350 {
351         struct inquiry_cache *cache = &hdev->inq_cache;
352         struct inquiry_entry *ie;
353
354         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
355
356         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
357         if (!ie) {
358                 /* Entry not in the cache. Add new one. */
359                 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
360                 if (!ie)
361                         return;
362
363                 ie->next = cache->list;
364                 cache->list = ie;
365         }
366
367         memcpy(&ie->data, data, sizeof(*data));
368         ie->timestamp = jiffies;
369         cache->timestamp = jiffies;
370 }
371
372 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
373 {
374         struct inquiry_cache *cache = &hdev->inq_cache;
375         struct inquiry_info *info = (struct inquiry_info *) buf;
376         struct inquiry_entry *e;
377         int copied = 0;
378
379         for (e = cache->list; e && copied < num; e = e->next, copied++) {
380                 struct inquiry_data *data = &e->data;
381                 bacpy(&info->bdaddr, &data->bdaddr);
382                 info->pscan_rep_mode    = data->pscan_rep_mode;
383                 info->pscan_period_mode = data->pscan_period_mode;
384                 info->pscan_mode        = data->pscan_mode;
385                 memcpy(info->dev_class, data->dev_class, 3);
386                 info->clock_offset      = data->clock_offset;
387                 info++;
388         }
389
390         BT_DBG("cache %p, copied %d", cache, copied);
391         return copied;
392 }
393
394 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
395 {
396         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
397         struct hci_cp_inquiry cp;
398
399         BT_DBG("%s", hdev->name);
400
401         if (test_bit(HCI_INQUIRY, &hdev->flags))
402                 return;
403
404         /* Start Inquiry */
405         memcpy(&cp.lap, &ir->lap, 3);
406         cp.length  = ir->length;
407         cp.num_rsp = ir->num_rsp;
408         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
409 }
410
411 int hci_inquiry(void __user *arg)
412 {
413         __u8 __user *ptr = arg;
414         struct hci_inquiry_req ir;
415         struct hci_dev *hdev;
416         int err = 0, do_inquiry = 0, max_rsp;
417         long timeo;
418         __u8 *buf;
419
420         if (copy_from_user(&ir, ptr, sizeof(ir)))
421                 return -EFAULT;
422
423         if (!(hdev = hci_dev_get(ir.dev_id)))
424                 return -ENODEV;
425
426         hci_dev_lock_bh(hdev);
427         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
428                                 inquiry_cache_empty(hdev) ||
429                                 ir.flags & IREQ_CACHE_FLUSH) {
430                 inquiry_cache_flush(hdev);
431                 do_inquiry = 1;
432         }
433         hci_dev_unlock_bh(hdev);
434
435         timeo = ir.length * msecs_to_jiffies(2000);
436
437         if (do_inquiry) {
438                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
439                 if (err < 0)
440                         goto done;
441         }
442
443         /* for unlimited number of responses we will use buffer with 255 entries */
444         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
445
446         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
447          * copy it to the user space.
448          */
449         buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
450         if (!buf) {
451                 err = -ENOMEM;
452                 goto done;
453         }
454
455         hci_dev_lock_bh(hdev);
456         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
457         hci_dev_unlock_bh(hdev);
458
459         BT_DBG("num_rsp %d", ir.num_rsp);
460
461         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
462                 ptr += sizeof(ir);
463                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
464                                         ir.num_rsp))
465                         err = -EFAULT;
466         } else
467                 err = -EFAULT;
468
469         kfree(buf);
470
471 done:
472         hci_dev_put(hdev);
473         return err;
474 }
475
476 /* ---- HCI ioctl helpers ---- */
477
478 int hci_dev_open(__u16 dev)
479 {
480         struct hci_dev *hdev;
481         int ret = 0;
482
483         if (!(hdev = hci_dev_get(dev)))
484                 return -ENODEV;
485
486         BT_DBG("%s %p", hdev->name, hdev);
487
488         hci_req_lock(hdev);
489
490         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
491                 ret = -ERFKILL;
492                 goto done;
493         }
494
495         if (test_bit(HCI_UP, &hdev->flags)) {
496                 ret = -EALREADY;
497                 goto done;
498         }
499
500         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
501                 set_bit(HCI_RAW, &hdev->flags);
502
503         /* Treat all non BR/EDR controllers as raw devices for now */
504         if (hdev->dev_type != HCI_BREDR)
505                 set_bit(HCI_RAW, &hdev->flags);
506
507         if (hdev->open(hdev)) {
508                 ret = -EIO;
509                 goto done;
510         }
511
512         if (!test_bit(HCI_RAW, &hdev->flags)) {
513                 atomic_set(&hdev->cmd_cnt, 1);
514                 set_bit(HCI_INIT, &hdev->flags);
515
516                 //__hci_request(hdev, hci_reset_req, 0, HZ);
517                 ret = __hci_request(hdev, hci_init_req, 0,
518                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
519
520                 clear_bit(HCI_INIT, &hdev->flags);
521         }
522
523         if (!ret) {
524                 hci_dev_hold(hdev);
525                 set_bit(HCI_UP, &hdev->flags);
526                 hci_notify(hdev, HCI_DEV_UP);
527         } else {
528                 /* Init failed, cleanup */
529                 tasklet_kill(&hdev->rx_task);
530                 tasklet_kill(&hdev->tx_task);
531                 tasklet_kill(&hdev->cmd_task);
532
533                 skb_queue_purge(&hdev->cmd_q);
534                 skb_queue_purge(&hdev->rx_q);
535
536                 if (hdev->flush)
537                         hdev->flush(hdev);
538
539                 if (hdev->sent_cmd) {
540                         kfree_skb(hdev->sent_cmd);
541                         hdev->sent_cmd = NULL;
542                 }
543
544                 hdev->close(hdev);
545                 hdev->flags = 0;
546         }
547
548 done:
549         hci_req_unlock(hdev);
550         hci_dev_put(hdev);
551         return ret;
552 }
553
554 static int hci_dev_do_close(struct hci_dev *hdev)
555 {
556         BT_DBG("%s %p", hdev->name, hdev);
557
558         hci_req_cancel(hdev, ENODEV);
559         hci_req_lock(hdev);
560
561         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
562                 hci_req_unlock(hdev);
563                 return 0;
564         }
565
566         /* Kill RX and TX tasks */
567         tasklet_kill(&hdev->rx_task);
568         tasklet_kill(&hdev->tx_task);
569
570         hci_dev_lock_bh(hdev);
571         inquiry_cache_flush(hdev);
572         hci_conn_hash_flush(hdev);
573         hci_dev_unlock_bh(hdev);
574
575         hci_notify(hdev, HCI_DEV_DOWN);
576
577         if (hdev->flush)
578                 hdev->flush(hdev);
579
580         /* Reset device */
581         skb_queue_purge(&hdev->cmd_q);
582         atomic_set(&hdev->cmd_cnt, 1);
583         if (!test_bit(HCI_RAW, &hdev->flags)) {
584                 set_bit(HCI_INIT, &hdev->flags);
585                 __hci_request(hdev, hci_reset_req, 0,
586                                         msecs_to_jiffies(250));
587                 clear_bit(HCI_INIT, &hdev->flags);
588         }
589
590         /* Kill cmd task */
591         tasklet_kill(&hdev->cmd_task);
592
593         /* Drop queues */
594         skb_queue_purge(&hdev->rx_q);
595         skb_queue_purge(&hdev->cmd_q);
596         skb_queue_purge(&hdev->raw_q);
597
598         /* Drop last sent command */
599         if (hdev->sent_cmd) {
600                 kfree_skb(hdev->sent_cmd);
601                 hdev->sent_cmd = NULL;
602         }
603
604         /* After this point our queues are empty
605          * and no tasks are scheduled. */
606         hdev->close(hdev);
607
608         /* Clear flags */
609         hdev->flags = 0;
610
611         hci_req_unlock(hdev);
612
613         hci_dev_put(hdev);
614         return 0;
615 }
616
617 int hci_dev_close(__u16 dev)
618 {
619         struct hci_dev *hdev;
620         int err;
621
622         hdev = hci_dev_get(dev);
623         if (!hdev)
624                 return -ENODEV;
625         err = hci_dev_do_close(hdev);
626         hci_dev_put(hdev);
627         return err;
628 }
629
630 int hci_dev_reset(__u16 dev)
631 {
632         struct hci_dev *hdev;
633         int ret = 0;
634
635         hdev = hci_dev_get(dev);
636         if (!hdev)
637                 return -ENODEV;
638
639         hci_req_lock(hdev);
640         tasklet_disable(&hdev->tx_task);
641
642         if (!test_bit(HCI_UP, &hdev->flags))
643                 goto done;
644
645         /* Drop queues */
646         skb_queue_purge(&hdev->rx_q);
647         skb_queue_purge(&hdev->cmd_q);
648
649         hci_dev_lock_bh(hdev);
650         inquiry_cache_flush(hdev);
651         hci_conn_hash_flush(hdev);
652         hci_dev_unlock_bh(hdev);
653
654         if (hdev->flush)
655                 hdev->flush(hdev);
656
657         atomic_set(&hdev->cmd_cnt, 1);
658         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
659
660         if (!test_bit(HCI_RAW, &hdev->flags))
661                 ret = __hci_request(hdev, hci_reset_req, 0,
662                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
663
664 done:
665         tasklet_enable(&hdev->tx_task);
666         hci_req_unlock(hdev);
667         hci_dev_put(hdev);
668         return ret;
669 }
670
671 int hci_dev_reset_stat(__u16 dev)
672 {
673         struct hci_dev *hdev;
674         int ret = 0;
675
676         hdev = hci_dev_get(dev);
677         if (!hdev)
678                 return -ENODEV;
679
680         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
681
682         hci_dev_put(hdev);
683
684         return ret;
685 }
686
687 int hci_dev_cmd(unsigned int cmd, void __user *arg)
688 {
689         struct hci_dev *hdev;
690         struct hci_dev_req dr;
691         int err = 0;
692
693         if (copy_from_user(&dr, arg, sizeof(dr)))
694                 return -EFAULT;
695
696         hdev = hci_dev_get(dr.dev_id);
697         if (!hdev)
698                 return -ENODEV;
699
700         switch (cmd) {
701         case HCISETAUTH:
702                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
703                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
704                 break;
705
706         case HCISETENCRYPT:
707                 if (!lmp_encrypt_capable(hdev)) {
708                         err = -EOPNOTSUPP;
709                         break;
710                 }
711
712                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
713                         /* Auth must be enabled first */
714                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
715                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
716                         if (err)
717                                 break;
718                 }
719
720                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
721                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
722                 break;
723
724         case HCISETSCAN:
725                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
726                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
727                 break;
728
729         case HCISETLINKPOL:
730                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
731                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
732                 break;
733
734         case HCISETLINKMODE:
735                 hdev->link_mode = ((__u16) dr.dev_opt) &
736                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
737                 break;
738
739         case HCISETPTYPE:
740                 hdev->pkt_type = (__u16) dr.dev_opt;
741                 break;
742
743         case HCISETACLMTU:
744                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
745                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
746                 break;
747
748         case HCISETSCOMTU:
749                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
750                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
751                 break;
752
753         default:
754                 err = -EINVAL;
755                 break;
756         }
757
758         hci_dev_put(hdev);
759         return err;
760 }
761
762 int hci_get_dev_list(void __user *arg)
763 {
764         struct hci_dev_list_req *dl;
765         struct hci_dev_req *dr;
766         struct list_head *p;
767         int n = 0, size, err;
768         __u16 dev_num;
769
770         if (get_user(dev_num, (__u16 __user *) arg))
771                 return -EFAULT;
772
773         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
774                 return -EINVAL;
775
776         size = sizeof(*dl) + dev_num * sizeof(*dr);
777
778         dl = kzalloc(size, GFP_KERNEL);
779         if (!dl)
780                 return -ENOMEM;
781
782         dr = dl->dev_req;
783
784         read_lock_bh(&hci_dev_list_lock);
785         list_for_each(p, &hci_dev_list) {
786                 struct hci_dev *hdev;
787                 hdev = list_entry(p, struct hci_dev, list);
788                 (dr + n)->dev_id  = hdev->id;
789                 (dr + n)->dev_opt = hdev->flags;
790                 if (++n >= dev_num)
791                         break;
792         }
793         read_unlock_bh(&hci_dev_list_lock);
794
795         dl->dev_num = n;
796         size = sizeof(*dl) + n * sizeof(*dr);
797
798         err = copy_to_user(arg, dl, size);
799         kfree(dl);
800
801         return err ? -EFAULT : 0;
802 }
803
804 int hci_get_dev_info(void __user *arg)
805 {
806         struct hci_dev *hdev;
807         struct hci_dev_info di;
808         int err = 0;
809
810         if (copy_from_user(&di, arg, sizeof(di)))
811                 return -EFAULT;
812
813         hdev = hci_dev_get(di.dev_id);
814         if (!hdev)
815                 return -ENODEV;
816
817         strcpy(di.name, hdev->name);
818         di.bdaddr   = hdev->bdaddr;
819         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
820         di.flags    = hdev->flags;
821         di.pkt_type = hdev->pkt_type;
822         di.acl_mtu  = hdev->acl_mtu;
823         di.acl_pkts = hdev->acl_pkts;
824         di.sco_mtu  = hdev->sco_mtu;
825         di.sco_pkts = hdev->sco_pkts;
826         di.link_policy = hdev->link_policy;
827         di.link_mode   = hdev->link_mode;
828
829         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
830         memcpy(&di.features, &hdev->features, sizeof(di.features));
831
832         if (copy_to_user(arg, &di, sizeof(di)))
833                 err = -EFAULT;
834
835         hci_dev_put(hdev);
836
837         return err;
838 }
839
840 /* ---- Interface to HCI drivers ---- */
841
842 static int hci_rfkill_set_block(void *data, bool blocked)
843 {
844         struct hci_dev *hdev = data;
845
846         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
847
848         if (!blocked)
849                 return 0;
850
851         hci_dev_do_close(hdev);
852
853         return 0;
854 }
855
856 static const struct rfkill_ops hci_rfkill_ops = {
857         .set_block = hci_rfkill_set_block,
858 };
859
860 /* Alloc HCI device */
861 struct hci_dev *hci_alloc_dev(void)
862 {
863         struct hci_dev *hdev;
864
865         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
866         if (!hdev)
867                 return NULL;
868
869         skb_queue_head_init(&hdev->driver_init);
870
871         return hdev;
872 }
873 EXPORT_SYMBOL(hci_alloc_dev);
874
875 /* Free HCI device */
876 void hci_free_dev(struct hci_dev *hdev)
877 {
878         skb_queue_purge(&hdev->driver_init);
879
880         /* will free via device release */
881         put_device(&hdev->dev);
882 }
883 EXPORT_SYMBOL(hci_free_dev);
884
885 /* Register HCI device */
886 int hci_register_dev(struct hci_dev *hdev)
887 {
888         struct list_head *head = &hci_dev_list, *p;
889         int i, id = 0;
890
891         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
892                                                 hdev->bus, hdev->owner);
893
894         if (!hdev->open || !hdev->close || !hdev->destruct)
895                 return -EINVAL;
896
897         write_lock_bh(&hci_dev_list_lock);
898
899         /* Find first available device id */
900         list_for_each(p, &hci_dev_list) {
901                 if (list_entry(p, struct hci_dev, list)->id != id)
902                         break;
903                 head = p; id++;
904         }
905
906         sprintf(hdev->name, "hci%d", id);
907         hdev->id = id;
908         list_add(&hdev->list, head);
909
910         atomic_set(&hdev->refcnt, 1);
911         spin_lock_init(&hdev->lock);
912
913         hdev->flags = 0;
914         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
915         hdev->esco_type = (ESCO_HV1);
916         hdev->link_mode = (HCI_LM_ACCEPT);
917
918         hdev->idle_timeout = 0;
919         hdev->sniff_max_interval = 800;
920         hdev->sniff_min_interval = 80;
921
922         tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
923         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
924         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
925
926         skb_queue_head_init(&hdev->rx_q);
927         skb_queue_head_init(&hdev->cmd_q);
928         skb_queue_head_init(&hdev->raw_q);
929
930         for (i = 0; i < NUM_REASSEMBLY; i++)
931                 hdev->reassembly[i] = NULL;
932
933         init_waitqueue_head(&hdev->req_wait_q);
934         mutex_init(&hdev->req_lock);
935
936         inquiry_cache_init(hdev);
937
938         hci_conn_hash_init(hdev);
939
940         INIT_LIST_HEAD(&hdev->blacklist);
941
942         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
943
944         atomic_set(&hdev->promisc, 0);
945
946         write_unlock_bh(&hci_dev_list_lock);
947
948         hdev->workqueue = create_singlethread_workqueue(hdev->name);
949         if (!hdev->workqueue)
950                 goto nomem;
951
952         hci_register_sysfs(hdev);
953
954         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
955                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
956         if (hdev->rfkill) {
957                 if (rfkill_register(hdev->rfkill) < 0) {
958                         rfkill_destroy(hdev->rfkill);
959                         hdev->rfkill = NULL;
960                 }
961         }
962
963         hci_notify(hdev, HCI_DEV_REG);
964
965         return id;
966
967 nomem:
968         write_lock_bh(&hci_dev_list_lock);
969         list_del(&hdev->list);
970         write_unlock_bh(&hci_dev_list_lock);
971
972         return -ENOMEM;
973 }
974 EXPORT_SYMBOL(hci_register_dev);
975
976 /* Unregister HCI device */
977 int hci_unregister_dev(struct hci_dev *hdev)
978 {
979         int i;
980
981         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
982
983         write_lock_bh(&hci_dev_list_lock);
984         list_del(&hdev->list);
985         write_unlock_bh(&hci_dev_list_lock);
986
987         hci_dev_do_close(hdev);
988
989         for (i = 0; i < NUM_REASSEMBLY; i++)
990                 kfree_skb(hdev->reassembly[i]);
991
992         hci_notify(hdev, HCI_DEV_UNREG);
993
994         if (hdev->rfkill) {
995                 rfkill_unregister(hdev->rfkill);
996                 rfkill_destroy(hdev->rfkill);
997         }
998
999         hci_unregister_sysfs(hdev);
1000
1001         destroy_workqueue(hdev->workqueue);
1002
1003         __hci_dev_put(hdev);
1004
1005         return 0;
1006 }
1007 EXPORT_SYMBOL(hci_unregister_dev);
1008
1009 /* Suspend HCI device */
1010 int hci_suspend_dev(struct hci_dev *hdev)
1011 {
1012         hci_notify(hdev, HCI_DEV_SUSPEND);
1013         return 0;
1014 }
1015 EXPORT_SYMBOL(hci_suspend_dev);
1016
1017 /* Resume HCI device */
1018 int hci_resume_dev(struct hci_dev *hdev)
1019 {
1020         hci_notify(hdev, HCI_DEV_RESUME);
1021         return 0;
1022 }
1023 EXPORT_SYMBOL(hci_resume_dev);
1024
1025 /* Receive frame from HCI drivers */
1026 int hci_recv_frame(struct sk_buff *skb)
1027 {
1028         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1029         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1030                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1031                 kfree_skb(skb);
1032                 return -ENXIO;
1033         }
1034
1035         /* Incomming skb */
1036         bt_cb(skb)->incoming = 1;
1037
1038         /* Time stamp */
1039         __net_timestamp(skb);
1040
1041         /* Queue frame for rx task */
1042         skb_queue_tail(&hdev->rx_q, skb);
1043         tasklet_schedule(&hdev->rx_task);
1044
1045         return 0;
1046 }
1047 EXPORT_SYMBOL(hci_recv_frame);
1048
1049 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1050                           int count, __u8 index, gfp_t gfp_mask)
1051 {
1052         int len = 0;
1053         int hlen = 0;
1054         int remain = count;
1055         struct sk_buff *skb;
1056         struct bt_skb_cb *scb;
1057
1058         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1059                                 index >= NUM_REASSEMBLY)
1060                 return -EILSEQ;
1061
1062         skb = hdev->reassembly[index];
1063
1064         if (!skb) {
1065                 switch (type) {
1066                 case HCI_ACLDATA_PKT:
1067                         len = HCI_MAX_FRAME_SIZE;
1068                         hlen = HCI_ACL_HDR_SIZE;
1069                         break;
1070                 case HCI_EVENT_PKT:
1071                         len = HCI_MAX_EVENT_SIZE;
1072                         hlen = HCI_EVENT_HDR_SIZE;
1073                         break;
1074                 case HCI_SCODATA_PKT:
1075                         len = HCI_MAX_SCO_SIZE;
1076                         hlen = HCI_SCO_HDR_SIZE;
1077                         break;
1078                 }
1079
1080                 skb = bt_skb_alloc(len, gfp_mask);
1081                 if (!skb)
1082                         return -ENOMEM;
1083
1084                 scb = (void *) skb->cb;
1085                 scb->expect = hlen;
1086                 scb->pkt_type = type;
1087
1088                 skb->dev = (void *) hdev;
1089                 hdev->reassembly[index] = skb;
1090         }
1091
1092         while (count) {
1093                 scb = (void *) skb->cb;
1094                 len = min(scb->expect, (__u16)count);
1095
1096                 memcpy(skb_put(skb, len), data, len);
1097
1098                 count -= len;
1099                 data += len;
1100                 scb->expect -= len;
1101                 remain = count;
1102
1103                 switch (type) {
1104                 case HCI_EVENT_PKT:
1105                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1106                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1107                                 scb->expect = h->plen;
1108
1109                                 if (skb_tailroom(skb) < scb->expect) {
1110                                         kfree_skb(skb);
1111                                         hdev->reassembly[index] = NULL;
1112                                         return -ENOMEM;
1113                                 }
1114                         }
1115                         break;
1116
1117                 case HCI_ACLDATA_PKT:
1118                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1119                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1120                                 scb->expect = __le16_to_cpu(h->dlen);
1121
1122                                 if (skb_tailroom(skb) < scb->expect) {
1123                                         kfree_skb(skb);
1124                                         hdev->reassembly[index] = NULL;
1125                                         return -ENOMEM;
1126                                 }
1127                         }
1128                         break;
1129
1130                 case HCI_SCODATA_PKT:
1131                         if (skb->len == HCI_SCO_HDR_SIZE) {
1132                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1133                                 scb->expect = h->dlen;
1134
1135                                 if (skb_tailroom(skb) < scb->expect) {
1136                                         kfree_skb(skb);
1137                                         hdev->reassembly[index] = NULL;
1138                                         return -ENOMEM;
1139                                 }
1140                         }
1141                         break;
1142                 }
1143
1144                 if (scb->expect == 0) {
1145                         /* Complete frame */
1146
1147                         bt_cb(skb)->pkt_type = type;
1148                         hci_recv_frame(skb);
1149
1150                         hdev->reassembly[index] = NULL;
1151                         return remain;
1152                 }
1153         }
1154
1155         return remain;
1156 }
1157
1158 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1159 {
1160         int rem = 0;
1161
1162         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1163                 return -EILSEQ;
1164
1165         while (count) {
1166                 rem = hci_reassembly(hdev, type, data, count,
1167                                                 type - 1, GFP_ATOMIC);
1168                 if (rem < 0)
1169                         return rem;
1170
1171                 data += (count - rem);
1172                 count = rem;
1173         };
1174
1175         return rem;
1176 }
1177 EXPORT_SYMBOL(hci_recv_fragment);
1178
1179 #define STREAM_REASSEMBLY 0
1180
1181 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1182 {
1183         int type;
1184         int rem = 0;
1185
1186         while (count) {
1187                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1188
1189                 if (!skb) {
1190                         struct { char type; } *pkt;
1191
1192                         /* Start of the frame */
1193                         pkt = data;
1194                         type = pkt->type;
1195
1196                         data++;
1197                         count--;
1198                 } else
1199                         type = bt_cb(skb)->pkt_type;
1200
1201                 rem = hci_reassembly(hdev, type, data,
1202                                         count, STREAM_REASSEMBLY, GFP_ATOMIC);
1203                 if (rem < 0)
1204                         return rem;
1205
1206                 data += (count - rem);
1207                 count = rem;
1208         };
1209
1210         return rem;
1211 }
1212 EXPORT_SYMBOL(hci_recv_stream_fragment);
1213
1214 /* ---- Interface to upper protocols ---- */
1215
1216 /* Register/Unregister protocols.
1217  * hci_task_lock is used to ensure that no tasks are running. */
1218 int hci_register_proto(struct hci_proto *hp)
1219 {
1220         int err = 0;
1221
1222         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1223
1224         if (hp->id >= HCI_MAX_PROTO)
1225                 return -EINVAL;
1226
1227         write_lock_bh(&hci_task_lock);
1228
1229         if (!hci_proto[hp->id])
1230                 hci_proto[hp->id] = hp;
1231         else
1232                 err = -EEXIST;
1233
1234         write_unlock_bh(&hci_task_lock);
1235
1236         return err;
1237 }
1238 EXPORT_SYMBOL(hci_register_proto);
1239
1240 int hci_unregister_proto(struct hci_proto *hp)
1241 {
1242         int err = 0;
1243
1244         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1245
1246         if (hp->id >= HCI_MAX_PROTO)
1247                 return -EINVAL;
1248
1249         write_lock_bh(&hci_task_lock);
1250
1251         if (hci_proto[hp->id])
1252                 hci_proto[hp->id] = NULL;
1253         else
1254                 err = -ENOENT;
1255
1256         write_unlock_bh(&hci_task_lock);
1257
1258         return err;
1259 }
1260 EXPORT_SYMBOL(hci_unregister_proto);
1261
1262 int hci_register_cb(struct hci_cb *cb)
1263 {
1264         BT_DBG("%p name %s", cb, cb->name);
1265
1266         write_lock_bh(&hci_cb_list_lock);
1267         list_add(&cb->list, &hci_cb_list);
1268         write_unlock_bh(&hci_cb_list_lock);
1269
1270         return 0;
1271 }
1272 EXPORT_SYMBOL(hci_register_cb);
1273
1274 int hci_unregister_cb(struct hci_cb *cb)
1275 {
1276         BT_DBG("%p name %s", cb, cb->name);
1277
1278         write_lock_bh(&hci_cb_list_lock);
1279         list_del(&cb->list);
1280         write_unlock_bh(&hci_cb_list_lock);
1281
1282         return 0;
1283 }
1284 EXPORT_SYMBOL(hci_unregister_cb);
1285
1286 static int hci_send_frame(struct sk_buff *skb)
1287 {
1288         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1289
1290         if (!hdev) {
1291                 kfree_skb(skb);
1292                 return -ENODEV;
1293         }
1294
1295         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1296
1297         if (atomic_read(&hdev->promisc)) {
1298                 /* Time stamp */
1299                 __net_timestamp(skb);
1300
1301                 hci_send_to_sock(hdev, skb);
1302         }
1303
1304         /* Get rid of skb owner, prior to sending to the driver. */
1305         skb_orphan(skb);
1306
1307         return hdev->send(skb);
1308 }
1309
1310 /* Send HCI command */
1311 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1312 {
1313         int len = HCI_COMMAND_HDR_SIZE + plen;
1314         struct hci_command_hdr *hdr;
1315         struct sk_buff *skb;
1316
1317         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1318
1319         skb = bt_skb_alloc(len, GFP_ATOMIC);
1320         if (!skb) {
1321                 BT_ERR("%s no memory for command", hdev->name);
1322                 return -ENOMEM;
1323         }
1324
1325         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1326         hdr->opcode = cpu_to_le16(opcode);
1327         hdr->plen   = plen;
1328
1329         if (plen)
1330                 memcpy(skb_put(skb, plen), param, plen);
1331
1332         BT_DBG("skb len %d", skb->len);
1333
1334         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1335         skb->dev = (void *) hdev;
1336
1337         skb_queue_tail(&hdev->cmd_q, skb);
1338         tasklet_schedule(&hdev->cmd_task);
1339
1340         return 0;
1341 }
1342
1343 /* Get data from the previously sent command */
1344 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1345 {
1346         struct hci_command_hdr *hdr;
1347
1348         if (!hdev->sent_cmd)
1349                 return NULL;
1350
1351         hdr = (void *) hdev->sent_cmd->data;
1352
1353         if (hdr->opcode != cpu_to_le16(opcode))
1354                 return NULL;
1355
1356         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1357
1358         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1359 }
1360
1361 /* Send ACL data */
1362 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1363 {
1364         struct hci_acl_hdr *hdr;
1365         int len = skb->len;
1366
1367         skb_push(skb, HCI_ACL_HDR_SIZE);
1368         skb_reset_transport_header(skb);
1369         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1370         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1371         hdr->dlen   = cpu_to_le16(len);
1372 }
1373
1374 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1375 {
1376         struct hci_dev *hdev = conn->hdev;
1377         struct sk_buff *list;
1378
1379         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1380
1381         skb->dev = (void *) hdev;
1382         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1383         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1384
1385         list = skb_shinfo(skb)->frag_list;
1386         if (!list) {
1387                 /* Non fragmented */
1388                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1389
1390                 skb_queue_tail(&conn->data_q, skb);
1391         } else {
1392                 /* Fragmented */
1393                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1394
1395                 skb_shinfo(skb)->frag_list = NULL;
1396
1397                 /* Queue all fragments atomically */
1398                 spin_lock_bh(&conn->data_q.lock);
1399
1400                 __skb_queue_tail(&conn->data_q, skb);
1401                 do {
1402                         skb = list; list = list->next;
1403
1404                         skb->dev = (void *) hdev;
1405                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1406                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1407
1408                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1409
1410                         __skb_queue_tail(&conn->data_q, skb);
1411                 } while (list);
1412
1413                 spin_unlock_bh(&conn->data_q.lock);
1414         }
1415
1416         tasklet_schedule(&hdev->tx_task);
1417 }
1418 EXPORT_SYMBOL(hci_send_acl);
1419
1420 /* Send SCO data */
1421 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1422 {
1423         struct hci_dev *hdev = conn->hdev;
1424         struct hci_sco_hdr hdr;
1425
1426         BT_DBG("%s len %d", hdev->name, skb->len);
1427
1428         hdr.handle = cpu_to_le16(conn->handle);
1429         hdr.dlen   = skb->len;
1430
1431         skb_push(skb, HCI_SCO_HDR_SIZE);
1432         skb_reset_transport_header(skb);
1433         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1434
1435         skb->dev = (void *) hdev;
1436         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1437
1438         skb_queue_tail(&conn->data_q, skb);
1439         tasklet_schedule(&hdev->tx_task);
1440 }
1441 EXPORT_SYMBOL(hci_send_sco);
1442
1443 /* ---- HCI TX task (outgoing data) ---- */
1444
1445 /* HCI Connection scheduler */
1446 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1447 {
1448         struct hci_conn_hash *h = &hdev->conn_hash;
1449         struct hci_conn *conn = NULL;
1450         int num = 0, min = ~0;
1451         struct list_head *p;
1452
1453         /* We don't have to lock device here. Connections are always
1454          * added and removed with TX task disabled. */
1455         list_for_each(p, &h->list) {
1456                 struct hci_conn *c;
1457                 c = list_entry(p, struct hci_conn, list);
1458
1459                 if (c->type != type || skb_queue_empty(&c->data_q))
1460                         continue;
1461
1462                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1463                         continue;
1464
1465                 num++;
1466
1467                 if (c->sent < min) {
1468                         min  = c->sent;
1469                         conn = c;
1470                 }
1471         }
1472
1473         if (conn) {
1474                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1475                 int q = cnt / num;
1476                 *quote = q ? q : 1;
1477         } else
1478                 *quote = 0;
1479
1480         BT_DBG("conn %p quote %d", conn, *quote);
1481         return conn;
1482 }
1483
1484 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1485 {
1486         struct hci_conn_hash *h = &hdev->conn_hash;
1487         struct list_head *p;
1488         struct hci_conn  *c;
1489
1490         BT_ERR("%s ACL tx timeout", hdev->name);
1491
1492         /* Kill stalled connections */
1493         list_for_each(p, &h->list) {
1494                 c = list_entry(p, struct hci_conn, list);
1495                 if (c->type == ACL_LINK && c->sent) {
1496                         BT_ERR("%s killing stalled ACL connection %s",
1497                                 hdev->name, batostr(&c->dst));
1498                         hci_acl_disconn(c, 0x13);
1499                 }
1500         }
1501 }
1502
1503 static inline void hci_sched_acl(struct hci_dev *hdev)
1504 {
1505         struct hci_conn *conn;
1506         struct sk_buff *skb;
1507         int quote;
1508
1509         BT_DBG("%s", hdev->name);
1510
1511         if (!test_bit(HCI_RAW, &hdev->flags)) {
1512                 /* ACL tx timeout must be longer than maximum
1513                  * link supervision timeout (40.9 seconds) */
1514                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1515                         hci_acl_tx_to(hdev);
1516         }
1517
1518         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1519                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1520                         BT_DBG("skb %p len %d", skb, skb->len);
1521
1522                         hci_conn_enter_active_mode(conn);
1523
1524                         hci_send_frame(skb);
1525                         hdev->acl_last_tx = jiffies;
1526
1527                         hdev->acl_cnt--;
1528                         conn->sent++;
1529                 }
1530         }
1531 }
1532
1533 /* Schedule SCO */
1534 static inline void hci_sched_sco(struct hci_dev *hdev)
1535 {
1536         struct hci_conn *conn;
1537         struct sk_buff *skb;
1538         int quote;
1539
1540         BT_DBG("%s", hdev->name);
1541
1542         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1543                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1544                         BT_DBG("skb %p len %d", skb, skb->len);
1545                         hci_send_frame(skb);
1546
1547                         conn->sent++;
1548                         if (conn->sent == ~0)
1549                                 conn->sent = 0;
1550                 }
1551         }
1552 }
1553
1554 static inline void hci_sched_esco(struct hci_dev *hdev)
1555 {
1556         struct hci_conn *conn;
1557         struct sk_buff *skb;
1558         int quote;
1559
1560         BT_DBG("%s", hdev->name);
1561
1562         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1563                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1564                         BT_DBG("skb %p len %d", skb, skb->len);
1565                         hci_send_frame(skb);
1566
1567                         conn->sent++;
1568                         if (conn->sent == ~0)
1569                                 conn->sent = 0;
1570                 }
1571         }
1572 }
1573
1574 static void hci_tx_task(unsigned long arg)
1575 {
1576         struct hci_dev *hdev = (struct hci_dev *) arg;
1577         struct sk_buff *skb;
1578
1579         read_lock(&hci_task_lock);
1580
1581         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1582
1583         /* Schedule queues and send stuff to HCI driver */
1584
1585         hci_sched_acl(hdev);
1586
1587         hci_sched_sco(hdev);
1588
1589         hci_sched_esco(hdev);
1590
1591         /* Send next queued raw (unknown type) packet */
1592         while ((skb = skb_dequeue(&hdev->raw_q)))
1593                 hci_send_frame(skb);
1594
1595         read_unlock(&hci_task_lock);
1596 }
1597
1598 /* ----- HCI RX task (incoming data proccessing) ----- */
1599
1600 /* ACL data packet */
1601 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1602 {
1603         struct hci_acl_hdr *hdr = (void *) skb->data;
1604         struct hci_conn *conn;
1605         __u16 handle, flags;
1606
1607         skb_pull(skb, HCI_ACL_HDR_SIZE);
1608
1609         handle = __le16_to_cpu(hdr->handle);
1610         flags  = hci_flags(handle);
1611         handle = hci_handle(handle);
1612
1613         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1614
1615         hdev->stat.acl_rx++;
1616
1617         hci_dev_lock(hdev);
1618         conn = hci_conn_hash_lookup_handle(hdev, handle);
1619         hci_dev_unlock(hdev);
1620
1621         if (conn) {
1622                 register struct hci_proto *hp;
1623
1624                 hci_conn_enter_active_mode(conn);
1625
1626                 /* Send to upper protocol */
1627                 hp = hci_proto[HCI_PROTO_L2CAP];
1628                 if (hp && hp->recv_acldata) {
1629                         hp->recv_acldata(conn, skb, flags);
1630                         return;
1631                 }
1632         } else {
1633                 BT_ERR("%s ACL packet for unknown connection handle %d",
1634                         hdev->name, handle);
1635         }
1636
1637         kfree_skb(skb);
1638 }
1639
1640 /* SCO data packet */
1641 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1642 {
1643         struct hci_sco_hdr *hdr = (void *) skb->data;
1644         struct hci_conn *conn;
1645         __u16 handle;
1646
1647         skb_pull(skb, HCI_SCO_HDR_SIZE);
1648
1649         handle = __le16_to_cpu(hdr->handle);
1650
1651         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1652
1653         hdev->stat.sco_rx++;
1654
1655         hci_dev_lock(hdev);
1656         conn = hci_conn_hash_lookup_handle(hdev, handle);
1657         hci_dev_unlock(hdev);
1658
1659         if (conn) {
1660                 register struct hci_proto *hp;
1661
1662                 /* Send to upper protocol */
1663                 hp = hci_proto[HCI_PROTO_SCO];
1664                 if (hp && hp->recv_scodata) {
1665                         hp->recv_scodata(conn, skb);
1666                         return;
1667                 }
1668         } else {
1669                 BT_ERR("%s SCO packet for unknown connection handle %d",
1670                         hdev->name, handle);
1671         }
1672
1673         kfree_skb(skb);
1674 }
1675
1676 static void hci_rx_task(unsigned long arg)
1677 {
1678         struct hci_dev *hdev = (struct hci_dev *) arg;
1679         struct sk_buff *skb;
1680
1681         BT_DBG("%s", hdev->name);
1682
1683         read_lock(&hci_task_lock);
1684
1685         while ((skb = skb_dequeue(&hdev->rx_q))) {
1686                 if (atomic_read(&hdev->promisc)) {
1687                         /* Send copy to the sockets */
1688                         hci_send_to_sock(hdev, skb);
1689                 }
1690
1691                 if (test_bit(HCI_RAW, &hdev->flags)) {
1692                         kfree_skb(skb);
1693                         continue;
1694                 }
1695
1696                 if (test_bit(HCI_INIT, &hdev->flags)) {
1697                         /* Don't process data packets in this states. */
1698                         switch (bt_cb(skb)->pkt_type) {
1699                         case HCI_ACLDATA_PKT:
1700                         case HCI_SCODATA_PKT:
1701                                 kfree_skb(skb);
1702                                 continue;
1703                         }
1704                 }
1705
1706                 /* Process frame */
1707                 switch (bt_cb(skb)->pkt_type) {
1708                 case HCI_EVENT_PKT:
1709                         hci_event_packet(hdev, skb);
1710                         break;
1711
1712                 case HCI_ACLDATA_PKT:
1713                         BT_DBG("%s ACL data packet", hdev->name);
1714                         hci_acldata_packet(hdev, skb);
1715                         break;
1716
1717                 case HCI_SCODATA_PKT:
1718                         BT_DBG("%s SCO data packet", hdev->name);
1719                         hci_scodata_packet(hdev, skb);
1720                         break;
1721
1722                 default:
1723                         kfree_skb(skb);
1724                         break;
1725                 }
1726         }
1727
1728         read_unlock(&hci_task_lock);
1729 }
1730
1731 static void hci_cmd_task(unsigned long arg)
1732 {
1733         struct hci_dev *hdev = (struct hci_dev *) arg;
1734         struct sk_buff *skb;
1735
1736         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1737
1738         if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1739                 BT_ERR("%s command tx timeout", hdev->name);
1740                 atomic_set(&hdev->cmd_cnt, 1);
1741         }
1742
1743         /* Send queued commands */
1744         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1745                 kfree_skb(hdev->sent_cmd);
1746
1747                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1748                 if (hdev->sent_cmd) {
1749                         atomic_dec(&hdev->cmd_cnt);
1750                         hci_send_frame(skb);
1751                         hdev->cmd_last_tx = jiffies;
1752                 } else {
1753                         skb_queue_head(&hdev->cmd_q, skb);
1754                         tasklet_schedule(&hdev->cmd_task);
1755                 }
1756         }
1757 }