Bluetooth: Implement hci_reassembly helper to reassemble RX packets
[linux-flexiantxendom0.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <net/sock.h>
45
46 #include <asm/system.h>
47 #include <asm/uaccess.h>
48 #include <asm/unaligned.h>
49
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52
53 static void hci_cmd_task(unsigned long arg);
54 static void hci_rx_task(unsigned long arg);
55 static void hci_tx_task(unsigned long arg);
56 static void hci_notify(struct hci_dev *hdev, int event);
57
58 static DEFINE_RWLOCK(hci_task_lock);
59
60 /* HCI device list */
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
63
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
67
68 /* HCI protocols */
69 #define HCI_MAX_PROTO   2
70 struct hci_proto *hci_proto[HCI_MAX_PROTO];
71
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74
75 /* ---- HCI notifications ---- */
76
77 int hci_register_notifier(struct notifier_block *nb)
78 {
79         return atomic_notifier_chain_register(&hci_notifier, nb);
80 }
81
82 int hci_unregister_notifier(struct notifier_block *nb)
83 {
84         return atomic_notifier_chain_unregister(&hci_notifier, nb);
85 }
86
87 static void hci_notify(struct hci_dev *hdev, int event)
88 {
89         atomic_notifier_call_chain(&hci_notifier, event, hdev);
90 }
91
92 /* ---- HCI requests ---- */
93
94 void hci_req_complete(struct hci_dev *hdev, int result)
95 {
96         BT_DBG("%s result 0x%2.2x", hdev->name, result);
97
98         if (hdev->req_status == HCI_REQ_PEND) {
99                 hdev->req_result = result;
100                 hdev->req_status = HCI_REQ_DONE;
101                 wake_up_interruptible(&hdev->req_wait_q);
102         }
103 }
104
105 static void hci_req_cancel(struct hci_dev *hdev, int err)
106 {
107         BT_DBG("%s err 0x%2.2x", hdev->name, err);
108
109         if (hdev->req_status == HCI_REQ_PEND) {
110                 hdev->req_result = err;
111                 hdev->req_status = HCI_REQ_CANCELED;
112                 wake_up_interruptible(&hdev->req_wait_q);
113         }
114 }
115
116 /* Execute request and wait for completion. */
117 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
118                                 unsigned long opt, __u32 timeout)
119 {
120         DECLARE_WAITQUEUE(wait, current);
121         int err = 0;
122
123         BT_DBG("%s start", hdev->name);
124
125         hdev->req_status = HCI_REQ_PEND;
126
127         add_wait_queue(&hdev->req_wait_q, &wait);
128         set_current_state(TASK_INTERRUPTIBLE);
129
130         req(hdev, opt);
131         schedule_timeout(timeout);
132
133         remove_wait_queue(&hdev->req_wait_q, &wait);
134
135         if (signal_pending(current))
136                 return -EINTR;
137
138         switch (hdev->req_status) {
139         case HCI_REQ_DONE:
140                 err = -bt_err(hdev->req_result);
141                 break;
142
143         case HCI_REQ_CANCELED:
144                 err = -hdev->req_result;
145                 break;
146
147         default:
148                 err = -ETIMEDOUT;
149                 break;
150         }
151
152         hdev->req_status = hdev->req_result = 0;
153
154         BT_DBG("%s end: err %d", hdev->name, err);
155
156         return err;
157 }
158
159 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
160                                 unsigned long opt, __u32 timeout)
161 {
162         int ret;
163
164         if (!test_bit(HCI_UP, &hdev->flags))
165                 return -ENETDOWN;
166
167         /* Serialize all requests */
168         hci_req_lock(hdev);
169         ret = __hci_request(hdev, req, opt, timeout);
170         hci_req_unlock(hdev);
171
172         return ret;
173 }
174
175 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
176 {
177         BT_DBG("%s %ld", hdev->name, opt);
178
179         /* Reset device */
180         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
181 }
182
183 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
184 {
185         struct sk_buff *skb;
186         __le16 param;
187         __u8 flt_type;
188
189         BT_DBG("%s %ld", hdev->name, opt);
190
191         /* Driver initialization */
192
193         /* Special commands */
194         while ((skb = skb_dequeue(&hdev->driver_init))) {
195                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
196                 skb->dev = (void *) hdev;
197
198                 skb_queue_tail(&hdev->cmd_q, skb);
199                 tasklet_schedule(&hdev->cmd_task);
200         }
201         skb_queue_purge(&hdev->driver_init);
202
203         /* Mandatory initialization */
204
205         /* Reset */
206         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
207                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
208
209         /* Read Local Supported Features */
210         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
211
212         /* Read Local Version */
213         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
214
215         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
216         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
217
218 #if 0
219         /* Host buffer size */
220         {
221                 struct hci_cp_host_buffer_size cp;
222                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
223                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
224                 cp.acl_max_pkt = cpu_to_le16(0xffff);
225                 cp.sco_max_pkt = cpu_to_le16(0xffff);
226                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
227         }
228 #endif
229
230         /* Read BD Address */
231         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
232
233         /* Read Class of Device */
234         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
235
236         /* Read Local Name */
237         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
238
239         /* Read Voice Setting */
240         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
241
242         /* Optional initialization */
243
244         /* Clear Event Filters */
245         flt_type = HCI_FLT_CLEAR_ALL;
246         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
247
248         /* Page timeout ~20 secs */
249         param = cpu_to_le16(0x8000);
250         hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
251
252         /* Connection accept timeout ~20 secs */
253         param = cpu_to_le16(0x7d00);
254         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
255 }
256
257 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
258 {
259         __u8 scan = opt;
260
261         BT_DBG("%s %x", hdev->name, scan);
262
263         /* Inquiry and Page scans */
264         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
265 }
266
267 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
268 {
269         __u8 auth = opt;
270
271         BT_DBG("%s %x", hdev->name, auth);
272
273         /* Authentication */
274         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
275 }
276
277 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
278 {
279         __u8 encrypt = opt;
280
281         BT_DBG("%s %x", hdev->name, encrypt);
282
283         /* Encryption */
284         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
285 }
286
287 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
288 {
289         __le16 policy = cpu_to_le16(opt);
290
291         BT_DBG("%s %x", hdev->name, policy);
292
293         /* Default link policy */
294         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
295 }
296
297 /* Get HCI device by index.
298  * Device is held on return. */
299 struct hci_dev *hci_dev_get(int index)
300 {
301         struct hci_dev *hdev = NULL;
302         struct list_head *p;
303
304         BT_DBG("%d", index);
305
306         if (index < 0)
307                 return NULL;
308
309         read_lock(&hci_dev_list_lock);
310         list_for_each(p, &hci_dev_list) {
311                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
312                 if (d->id == index) {
313                         hdev = hci_dev_hold(d);
314                         break;
315                 }
316         }
317         read_unlock(&hci_dev_list_lock);
318         return hdev;
319 }
320
321 /* ---- Inquiry support ---- */
322 static void inquiry_cache_flush(struct hci_dev *hdev)
323 {
324         struct inquiry_cache *cache = &hdev->inq_cache;
325         struct inquiry_entry *next  = cache->list, *e;
326
327         BT_DBG("cache %p", cache);
328
329         cache->list = NULL;
330         while ((e = next)) {
331                 next = e->next;
332                 kfree(e);
333         }
334 }
335
336 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
337 {
338         struct inquiry_cache *cache = &hdev->inq_cache;
339         struct inquiry_entry *e;
340
341         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
342
343         for (e = cache->list; e; e = e->next)
344                 if (!bacmp(&e->data.bdaddr, bdaddr))
345                         break;
346         return e;
347 }
348
349 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
350 {
351         struct inquiry_cache *cache = &hdev->inq_cache;
352         struct inquiry_entry *e;
353
354         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
355
356         if (!(e = hci_inquiry_cache_lookup(hdev, &data->bdaddr))) {
357                 /* Entry not in the cache. Add new one. */
358                 if (!(e = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC)))
359                         return;
360                 e->next     = cache->list;
361                 cache->list = e;
362         }
363
364         memcpy(&e->data, data, sizeof(*data));
365         e->timestamp = jiffies;
366         cache->timestamp = jiffies;
367 }
368
369 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
370 {
371         struct inquiry_cache *cache = &hdev->inq_cache;
372         struct inquiry_info *info = (struct inquiry_info *) buf;
373         struct inquiry_entry *e;
374         int copied = 0;
375
376         for (e = cache->list; e && copied < num; e = e->next, copied++) {
377                 struct inquiry_data *data = &e->data;
378                 bacpy(&info->bdaddr, &data->bdaddr);
379                 info->pscan_rep_mode    = data->pscan_rep_mode;
380                 info->pscan_period_mode = data->pscan_period_mode;
381                 info->pscan_mode        = data->pscan_mode;
382                 memcpy(info->dev_class, data->dev_class, 3);
383                 info->clock_offset      = data->clock_offset;
384                 info++;
385         }
386
387         BT_DBG("cache %p, copied %d", cache, copied);
388         return copied;
389 }
390
391 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
392 {
393         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
394         struct hci_cp_inquiry cp;
395
396         BT_DBG("%s", hdev->name);
397
398         if (test_bit(HCI_INQUIRY, &hdev->flags))
399                 return;
400
401         /* Start Inquiry */
402         memcpy(&cp.lap, &ir->lap, 3);
403         cp.length  = ir->length;
404         cp.num_rsp = ir->num_rsp;
405         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
406 }
407
408 int hci_inquiry(void __user *arg)
409 {
410         __u8 __user *ptr = arg;
411         struct hci_inquiry_req ir;
412         struct hci_dev *hdev;
413         int err = 0, do_inquiry = 0, max_rsp;
414         long timeo;
415         __u8 *buf;
416
417         if (copy_from_user(&ir, ptr, sizeof(ir)))
418                 return -EFAULT;
419
420         if (!(hdev = hci_dev_get(ir.dev_id)))
421                 return -ENODEV;
422
423         hci_dev_lock_bh(hdev);
424         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
425                                         inquiry_cache_empty(hdev) ||
426                                         ir.flags & IREQ_CACHE_FLUSH) {
427                 inquiry_cache_flush(hdev);
428                 do_inquiry = 1;
429         }
430         hci_dev_unlock_bh(hdev);
431
432         timeo = ir.length * msecs_to_jiffies(2000);
433         if (do_inquiry && (err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo)) < 0)
434                 goto done;
435
436         /* for unlimited number of responses we will use buffer with 255 entries */
437         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
438
439         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
440          * copy it to the user space.
441          */
442         if (!(buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL))) {
443                 err = -ENOMEM;
444                 goto done;
445         }
446
447         hci_dev_lock_bh(hdev);
448         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
449         hci_dev_unlock_bh(hdev);
450
451         BT_DBG("num_rsp %d", ir.num_rsp);
452
453         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
454                 ptr += sizeof(ir);
455                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
456                                         ir.num_rsp))
457                         err = -EFAULT;
458         } else
459                 err = -EFAULT;
460
461         kfree(buf);
462
463 done:
464         hci_dev_put(hdev);
465         return err;
466 }
467
468 /* ---- HCI ioctl helpers ---- */
469
470 int hci_dev_open(__u16 dev)
471 {
472         struct hci_dev *hdev;
473         int ret = 0;
474
475         if (!(hdev = hci_dev_get(dev)))
476                 return -ENODEV;
477
478         BT_DBG("%s %p", hdev->name, hdev);
479
480         hci_req_lock(hdev);
481
482         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
483                 ret = -ERFKILL;
484                 goto done;
485         }
486
487         if (test_bit(HCI_UP, &hdev->flags)) {
488                 ret = -EALREADY;
489                 goto done;
490         }
491
492         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
493                 set_bit(HCI_RAW, &hdev->flags);
494
495         /* Treat all non BR/EDR controllers as raw devices for now */
496         if (hdev->dev_type != HCI_BREDR)
497                 set_bit(HCI_RAW, &hdev->flags);
498
499         if (hdev->open(hdev)) {
500                 ret = -EIO;
501                 goto done;
502         }
503
504         if (!test_bit(HCI_RAW, &hdev->flags)) {
505                 atomic_set(&hdev->cmd_cnt, 1);
506                 set_bit(HCI_INIT, &hdev->flags);
507
508                 //__hci_request(hdev, hci_reset_req, 0, HZ);
509                 ret = __hci_request(hdev, hci_init_req, 0,
510                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
511
512                 clear_bit(HCI_INIT, &hdev->flags);
513         }
514
515         if (!ret) {
516                 hci_dev_hold(hdev);
517                 set_bit(HCI_UP, &hdev->flags);
518                 hci_notify(hdev, HCI_DEV_UP);
519         } else {
520                 /* Init failed, cleanup */
521                 tasklet_kill(&hdev->rx_task);
522                 tasklet_kill(&hdev->tx_task);
523                 tasklet_kill(&hdev->cmd_task);
524
525                 skb_queue_purge(&hdev->cmd_q);
526                 skb_queue_purge(&hdev->rx_q);
527
528                 if (hdev->flush)
529                         hdev->flush(hdev);
530
531                 if (hdev->sent_cmd) {
532                         kfree_skb(hdev->sent_cmd);
533                         hdev->sent_cmd = NULL;
534                 }
535
536                 hdev->close(hdev);
537                 hdev->flags = 0;
538         }
539
540 done:
541         hci_req_unlock(hdev);
542         hci_dev_put(hdev);
543         return ret;
544 }
545
546 static int hci_dev_do_close(struct hci_dev *hdev)
547 {
548         BT_DBG("%s %p", hdev->name, hdev);
549
550         hci_req_cancel(hdev, ENODEV);
551         hci_req_lock(hdev);
552
553         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
554                 hci_req_unlock(hdev);
555                 return 0;
556         }
557
558         /* Kill RX and TX tasks */
559         tasklet_kill(&hdev->rx_task);
560         tasklet_kill(&hdev->tx_task);
561
562         hci_dev_lock_bh(hdev);
563         inquiry_cache_flush(hdev);
564         hci_conn_hash_flush(hdev);
565         hci_blacklist_clear(hdev);
566         hci_dev_unlock_bh(hdev);
567
568         hci_notify(hdev, HCI_DEV_DOWN);
569
570         if (hdev->flush)
571                 hdev->flush(hdev);
572
573         /* Reset device */
574         skb_queue_purge(&hdev->cmd_q);
575         atomic_set(&hdev->cmd_cnt, 1);
576         if (!test_bit(HCI_RAW, &hdev->flags)) {
577                 set_bit(HCI_INIT, &hdev->flags);
578                 __hci_request(hdev, hci_reset_req, 0,
579                                         msecs_to_jiffies(250));
580                 clear_bit(HCI_INIT, &hdev->flags);
581         }
582
583         /* Kill cmd task */
584         tasklet_kill(&hdev->cmd_task);
585
586         /* Drop queues */
587         skb_queue_purge(&hdev->rx_q);
588         skb_queue_purge(&hdev->cmd_q);
589         skb_queue_purge(&hdev->raw_q);
590
591         /* Drop last sent command */
592         if (hdev->sent_cmd) {
593                 kfree_skb(hdev->sent_cmd);
594                 hdev->sent_cmd = NULL;
595         }
596
597         /* After this point our queues are empty
598          * and no tasks are scheduled. */
599         hdev->close(hdev);
600
601         /* Clear flags */
602         hdev->flags = 0;
603
604         hci_req_unlock(hdev);
605
606         hci_dev_put(hdev);
607         return 0;
608 }
609
610 int hci_dev_close(__u16 dev)
611 {
612         struct hci_dev *hdev;
613         int err;
614
615         if (!(hdev = hci_dev_get(dev)))
616                 return -ENODEV;
617         err = hci_dev_do_close(hdev);
618         hci_dev_put(hdev);
619         return err;
620 }
621
622 int hci_dev_reset(__u16 dev)
623 {
624         struct hci_dev *hdev;
625         int ret = 0;
626
627         if (!(hdev = hci_dev_get(dev)))
628                 return -ENODEV;
629
630         hci_req_lock(hdev);
631         tasklet_disable(&hdev->tx_task);
632
633         if (!test_bit(HCI_UP, &hdev->flags))
634                 goto done;
635
636         /* Drop queues */
637         skb_queue_purge(&hdev->rx_q);
638         skb_queue_purge(&hdev->cmd_q);
639
640         hci_dev_lock_bh(hdev);
641         inquiry_cache_flush(hdev);
642         hci_conn_hash_flush(hdev);
643         hci_dev_unlock_bh(hdev);
644
645         if (hdev->flush)
646                 hdev->flush(hdev);
647
648         atomic_set(&hdev->cmd_cnt, 1);
649         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
650
651         if (!test_bit(HCI_RAW, &hdev->flags))
652                 ret = __hci_request(hdev, hci_reset_req, 0,
653                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
654
655 done:
656         tasklet_enable(&hdev->tx_task);
657         hci_req_unlock(hdev);
658         hci_dev_put(hdev);
659         return ret;
660 }
661
662 int hci_dev_reset_stat(__u16 dev)
663 {
664         struct hci_dev *hdev;
665         int ret = 0;
666
667         if (!(hdev = hci_dev_get(dev)))
668                 return -ENODEV;
669
670         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
671
672         hci_dev_put(hdev);
673
674         return ret;
675 }
676
677 int hci_dev_cmd(unsigned int cmd, void __user *arg)
678 {
679         struct hci_dev *hdev;
680         struct hci_dev_req dr;
681         int err = 0;
682
683         if (copy_from_user(&dr, arg, sizeof(dr)))
684                 return -EFAULT;
685
686         if (!(hdev = hci_dev_get(dr.dev_id)))
687                 return -ENODEV;
688
689         switch (cmd) {
690         case HCISETAUTH:
691                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
692                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
693                 break;
694
695         case HCISETENCRYPT:
696                 if (!lmp_encrypt_capable(hdev)) {
697                         err = -EOPNOTSUPP;
698                         break;
699                 }
700
701                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
702                         /* Auth must be enabled first */
703                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
704                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
705                         if (err)
706                                 break;
707                 }
708
709                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
710                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
711                 break;
712
713         case HCISETSCAN:
714                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
715                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
716                 break;
717
718         case HCISETLINKPOL:
719                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
720                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
721                 break;
722
723         case HCISETLINKMODE:
724                 hdev->link_mode = ((__u16) dr.dev_opt) &
725                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
726                 break;
727
728         case HCISETPTYPE:
729                 hdev->pkt_type = (__u16) dr.dev_opt;
730                 break;
731
732         case HCISETACLMTU:
733                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
734                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
735                 break;
736
737         case HCISETSCOMTU:
738                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
739                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
740                 break;
741
742         default:
743                 err = -EINVAL;
744                 break;
745         }
746
747         hci_dev_put(hdev);
748         return err;
749 }
750
751 int hci_get_dev_list(void __user *arg)
752 {
753         struct hci_dev_list_req *dl;
754         struct hci_dev_req *dr;
755         struct list_head *p;
756         int n = 0, size, err;
757         __u16 dev_num;
758
759         if (get_user(dev_num, (__u16 __user *) arg))
760                 return -EFAULT;
761
762         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
763                 return -EINVAL;
764
765         size = sizeof(*dl) + dev_num * sizeof(*dr);
766
767         if (!(dl = kzalloc(size, GFP_KERNEL)))
768                 return -ENOMEM;
769
770         dr = dl->dev_req;
771
772         read_lock_bh(&hci_dev_list_lock);
773         list_for_each(p, &hci_dev_list) {
774                 struct hci_dev *hdev;
775                 hdev = list_entry(p, struct hci_dev, list);
776                 (dr + n)->dev_id  = hdev->id;
777                 (dr + n)->dev_opt = hdev->flags;
778                 if (++n >= dev_num)
779                         break;
780         }
781         read_unlock_bh(&hci_dev_list_lock);
782
783         dl->dev_num = n;
784         size = sizeof(*dl) + n * sizeof(*dr);
785
786         err = copy_to_user(arg, dl, size);
787         kfree(dl);
788
789         return err ? -EFAULT : 0;
790 }
791
792 int hci_get_dev_info(void __user *arg)
793 {
794         struct hci_dev *hdev;
795         struct hci_dev_info di;
796         int err = 0;
797
798         if (copy_from_user(&di, arg, sizeof(di)))
799                 return -EFAULT;
800
801         if (!(hdev = hci_dev_get(di.dev_id)))
802                 return -ENODEV;
803
804         strcpy(di.name, hdev->name);
805         di.bdaddr   = hdev->bdaddr;
806         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
807         di.flags    = hdev->flags;
808         di.pkt_type = hdev->pkt_type;
809         di.acl_mtu  = hdev->acl_mtu;
810         di.acl_pkts = hdev->acl_pkts;
811         di.sco_mtu  = hdev->sco_mtu;
812         di.sco_pkts = hdev->sco_pkts;
813         di.link_policy = hdev->link_policy;
814         di.link_mode   = hdev->link_mode;
815
816         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
817         memcpy(&di.features, &hdev->features, sizeof(di.features));
818
819         if (copy_to_user(arg, &di, sizeof(di)))
820                 err = -EFAULT;
821
822         hci_dev_put(hdev);
823
824         return err;
825 }
826
827 /* ---- Interface to HCI drivers ---- */
828
829 static int hci_rfkill_set_block(void *data, bool blocked)
830 {
831         struct hci_dev *hdev = data;
832
833         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
834
835         if (!blocked)
836                 return 0;
837
838         hci_dev_do_close(hdev);
839
840         return 0;
841 }
842
843 static const struct rfkill_ops hci_rfkill_ops = {
844         .set_block = hci_rfkill_set_block,
845 };
846
847 /* Alloc HCI device */
848 struct hci_dev *hci_alloc_dev(void)
849 {
850         struct hci_dev *hdev;
851
852         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
853         if (!hdev)
854                 return NULL;
855
856         skb_queue_head_init(&hdev->driver_init);
857
858         return hdev;
859 }
860 EXPORT_SYMBOL(hci_alloc_dev);
861
862 /* Free HCI device */
863 void hci_free_dev(struct hci_dev *hdev)
864 {
865         skb_queue_purge(&hdev->driver_init);
866
867         /* will free via device release */
868         put_device(&hdev->dev);
869 }
870 EXPORT_SYMBOL(hci_free_dev);
871
872 /* Register HCI device */
873 int hci_register_dev(struct hci_dev *hdev)
874 {
875         struct list_head *head = &hci_dev_list, *p;
876         int i, id = 0;
877
878         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
879                                                 hdev->bus, hdev->owner);
880
881         if (!hdev->open || !hdev->close || !hdev->destruct)
882                 return -EINVAL;
883
884         write_lock_bh(&hci_dev_list_lock);
885
886         /* Find first available device id */
887         list_for_each(p, &hci_dev_list) {
888                 if (list_entry(p, struct hci_dev, list)->id != id)
889                         break;
890                 head = p; id++;
891         }
892
893         sprintf(hdev->name, "hci%d", id);
894         hdev->id = id;
895         list_add(&hdev->list, head);
896
897         atomic_set(&hdev->refcnt, 1);
898         spin_lock_init(&hdev->lock);
899
900         hdev->flags = 0;
901         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
902         hdev->esco_type = (ESCO_HV1);
903         hdev->link_mode = (HCI_LM_ACCEPT);
904
905         hdev->idle_timeout = 0;
906         hdev->sniff_max_interval = 800;
907         hdev->sniff_min_interval = 80;
908
909         tasklet_init(&hdev->cmd_task, hci_cmd_task,(unsigned long) hdev);
910         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
911         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
912
913         skb_queue_head_init(&hdev->rx_q);
914         skb_queue_head_init(&hdev->cmd_q);
915         skb_queue_head_init(&hdev->raw_q);
916
917         for (i = 0; i < NUM_REASSEMBLY; i++)
918                 hdev->reassembly[i] = NULL;
919
920         init_waitqueue_head(&hdev->req_wait_q);
921         mutex_init(&hdev->req_lock);
922
923         inquiry_cache_init(hdev);
924
925         hci_conn_hash_init(hdev);
926
927         INIT_LIST_HEAD(&hdev->blacklist.list);
928
929         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
930
931         atomic_set(&hdev->promisc, 0);
932
933         write_unlock_bh(&hci_dev_list_lock);
934
935         hdev->workqueue = create_singlethread_workqueue(hdev->name);
936         if (!hdev->workqueue)
937                 goto nomem;
938
939         hci_register_sysfs(hdev);
940
941         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
942                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
943         if (hdev->rfkill) {
944                 if (rfkill_register(hdev->rfkill) < 0) {
945                         rfkill_destroy(hdev->rfkill);
946                         hdev->rfkill = NULL;
947                 }
948         }
949
950         hci_notify(hdev, HCI_DEV_REG);
951
952         return id;
953
954 nomem:
955         write_lock_bh(&hci_dev_list_lock);
956         list_del(&hdev->list);
957         write_unlock_bh(&hci_dev_list_lock);
958
959         return -ENOMEM;
960 }
961 EXPORT_SYMBOL(hci_register_dev);
962
963 /* Unregister HCI device */
964 int hci_unregister_dev(struct hci_dev *hdev)
965 {
966         int i;
967
968         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
969
970         write_lock_bh(&hci_dev_list_lock);
971         list_del(&hdev->list);
972         write_unlock_bh(&hci_dev_list_lock);
973
974         hci_dev_do_close(hdev);
975
976         for (i = 0; i < NUM_REASSEMBLY; i++)
977                 kfree_skb(hdev->reassembly[i]);
978
979         hci_notify(hdev, HCI_DEV_UNREG);
980
981         if (hdev->rfkill) {
982                 rfkill_unregister(hdev->rfkill);
983                 rfkill_destroy(hdev->rfkill);
984         }
985
986         hci_unregister_sysfs(hdev);
987
988         destroy_workqueue(hdev->workqueue);
989
990         __hci_dev_put(hdev);
991
992         return 0;
993 }
994 EXPORT_SYMBOL(hci_unregister_dev);
995
996 /* Suspend HCI device */
997 int hci_suspend_dev(struct hci_dev *hdev)
998 {
999         hci_notify(hdev, HCI_DEV_SUSPEND);
1000         return 0;
1001 }
1002 EXPORT_SYMBOL(hci_suspend_dev);
1003
1004 /* Resume HCI device */
1005 int hci_resume_dev(struct hci_dev *hdev)
1006 {
1007         hci_notify(hdev, HCI_DEV_RESUME);
1008         return 0;
1009 }
1010 EXPORT_SYMBOL(hci_resume_dev);
1011
1012 /* Receive frame from HCI drivers */
1013 int hci_recv_frame(struct sk_buff *skb)
1014 {
1015         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1016         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1017                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1018                 kfree_skb(skb);
1019                 return -ENXIO;
1020         }
1021
1022         /* Incomming skb */
1023         bt_cb(skb)->incoming = 1;
1024
1025         /* Time stamp */
1026         __net_timestamp(skb);
1027
1028         /* Queue frame for rx task */
1029         skb_queue_tail(&hdev->rx_q, skb);
1030         tasklet_schedule(&hdev->rx_task);
1031
1032         return 0;
1033 }
1034 EXPORT_SYMBOL(hci_recv_frame);
1035
1036 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1037                           int count, __u8 index, gfp_t gfp_mask)
1038 {
1039         int len = 0;
1040         int hlen = 0;
1041         int remain = count;
1042         struct sk_buff *skb;
1043         struct bt_skb_cb *scb;
1044
1045         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1046                                 index >= NUM_REASSEMBLY)
1047                 return -EILSEQ;
1048
1049         skb = hdev->reassembly[index];
1050
1051         if (!skb) {
1052                 switch (type) {
1053                 case HCI_ACLDATA_PKT:
1054                         len = HCI_MAX_FRAME_SIZE;
1055                         hlen = HCI_ACL_HDR_SIZE;
1056                         break;
1057                 case HCI_EVENT_PKT:
1058                         len = HCI_MAX_EVENT_SIZE;
1059                         hlen = HCI_EVENT_HDR_SIZE;
1060                         break;
1061                 case HCI_SCODATA_PKT:
1062                         len = HCI_MAX_SCO_SIZE;
1063                         hlen = HCI_SCO_HDR_SIZE;
1064                         break;
1065                 }
1066
1067                 skb = bt_skb_alloc(len, gfp_mask);
1068                 if (!skb)
1069                         return -ENOMEM;
1070
1071                 scb = (void *) skb->cb;
1072                 scb->expect = hlen;
1073                 scb->pkt_type = type;
1074
1075                 skb->dev = (void *) hdev;
1076                 hdev->reassembly[index] = skb;
1077         }
1078
1079         while (count) {
1080                 scb = (void *) skb->cb;
1081                 len = min(scb->expect, (__u16)count);
1082
1083                 memcpy(skb_put(skb, len), data, len);
1084
1085                 count -= len;
1086                 data += len;
1087                 scb->expect -= len;
1088                 remain = count;
1089
1090                 switch (type) {
1091                 case HCI_EVENT_PKT:
1092                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1093                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1094                                 scb->expect = h->plen;
1095
1096                                 if (skb_tailroom(skb) < scb->expect) {
1097                                         kfree_skb(skb);
1098                                         hdev->reassembly[index] = NULL;
1099                                         return -ENOMEM;
1100                                 }
1101                         }
1102                         break;
1103
1104                 case HCI_ACLDATA_PKT:
1105                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1106                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1107                                 scb->expect = __le16_to_cpu(h->dlen);
1108
1109                                 if (skb_tailroom(skb) < scb->expect) {
1110                                         kfree_skb(skb);
1111                                         hdev->reassembly[index] = NULL;
1112                                         return -ENOMEM;
1113                                 }
1114                         }
1115                         break;
1116
1117                 case HCI_SCODATA_PKT:
1118                         if (skb->len == HCI_SCO_HDR_SIZE) {
1119                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1120                                 scb->expect = h->dlen;
1121
1122                                 if (skb_tailroom(skb) < scb->expect) {
1123                                         kfree_skb(skb);
1124                                         hdev->reassembly[index] = NULL;
1125                                         return -ENOMEM;
1126                                 }
1127                         }
1128                         break;
1129                 }
1130
1131                 if (scb->expect == 0) {
1132                         /* Complete frame */
1133
1134                         bt_cb(skb)->pkt_type = type;
1135                         hci_recv_frame(skb);
1136
1137                         hdev->reassembly[index] = NULL;
1138                         return remain;
1139                 }
1140         }
1141
1142         return remain;
1143 }
1144
1145 /* Receive packet type fragment */
1146 #define __reassembly(hdev, type)  ((hdev)->reassembly[(type) - 1])
1147
1148 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1149 {
1150         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1151                 return -EILSEQ;
1152
1153         while (count) {
1154                 struct sk_buff *skb = __reassembly(hdev, type);
1155                 struct { int expect; } *scb;
1156                 int len = 0;
1157
1158                 if (!skb) {
1159                         /* Start of the frame */
1160
1161                         switch (type) {
1162                         case HCI_EVENT_PKT:
1163                                 if (count >= HCI_EVENT_HDR_SIZE) {
1164                                         struct hci_event_hdr *h = data;
1165                                         len = HCI_EVENT_HDR_SIZE + h->plen;
1166                                 } else
1167                                         return -EILSEQ;
1168                                 break;
1169
1170                         case HCI_ACLDATA_PKT:
1171                                 if (count >= HCI_ACL_HDR_SIZE) {
1172                                         struct hci_acl_hdr *h = data;
1173                                         len = HCI_ACL_HDR_SIZE + __le16_to_cpu(h->dlen);
1174                                 } else
1175                                         return -EILSEQ;
1176                                 break;
1177
1178                         case HCI_SCODATA_PKT:
1179                                 if (count >= HCI_SCO_HDR_SIZE) {
1180                                         struct hci_sco_hdr *h = data;
1181                                         len = HCI_SCO_HDR_SIZE + h->dlen;
1182                                 } else
1183                                         return -EILSEQ;
1184                                 break;
1185                         }
1186
1187                         skb = bt_skb_alloc(len, GFP_ATOMIC);
1188                         if (!skb) {
1189                                 BT_ERR("%s no memory for packet", hdev->name);
1190                                 return -ENOMEM;
1191                         }
1192
1193                         skb->dev = (void *) hdev;
1194                         bt_cb(skb)->pkt_type = type;
1195
1196                         __reassembly(hdev, type) = skb;
1197
1198                         scb = (void *) skb->cb;
1199                         scb->expect = len;
1200                 } else {
1201                         /* Continuation */
1202
1203                         scb = (void *) skb->cb;
1204                         len = scb->expect;
1205                 }
1206
1207                 len = min(len, count);
1208
1209                 memcpy(skb_put(skb, len), data, len);
1210
1211                 scb->expect -= len;
1212
1213                 if (scb->expect == 0) {
1214                         /* Complete frame */
1215
1216                         __reassembly(hdev, type) = NULL;
1217
1218                         bt_cb(skb)->pkt_type = type;
1219                         hci_recv_frame(skb);
1220                 }
1221
1222                 count -= len; data += len;
1223         }
1224
1225         return 0;
1226 }
1227 EXPORT_SYMBOL(hci_recv_fragment);
1228
1229 /* ---- Interface to upper protocols ---- */
1230
1231 /* Register/Unregister protocols.
1232  * hci_task_lock is used to ensure that no tasks are running. */
1233 int hci_register_proto(struct hci_proto *hp)
1234 {
1235         int err = 0;
1236
1237         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1238
1239         if (hp->id >= HCI_MAX_PROTO)
1240                 return -EINVAL;
1241
1242         write_lock_bh(&hci_task_lock);
1243
1244         if (!hci_proto[hp->id])
1245                 hci_proto[hp->id] = hp;
1246         else
1247                 err = -EEXIST;
1248
1249         write_unlock_bh(&hci_task_lock);
1250
1251         return err;
1252 }
1253 EXPORT_SYMBOL(hci_register_proto);
1254
1255 int hci_unregister_proto(struct hci_proto *hp)
1256 {
1257         int err = 0;
1258
1259         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1260
1261         if (hp->id >= HCI_MAX_PROTO)
1262                 return -EINVAL;
1263
1264         write_lock_bh(&hci_task_lock);
1265
1266         if (hci_proto[hp->id])
1267                 hci_proto[hp->id] = NULL;
1268         else
1269                 err = -ENOENT;
1270
1271         write_unlock_bh(&hci_task_lock);
1272
1273         return err;
1274 }
1275 EXPORT_SYMBOL(hci_unregister_proto);
1276
1277 int hci_register_cb(struct hci_cb *cb)
1278 {
1279         BT_DBG("%p name %s", cb, cb->name);
1280
1281         write_lock_bh(&hci_cb_list_lock);
1282         list_add(&cb->list, &hci_cb_list);
1283         write_unlock_bh(&hci_cb_list_lock);
1284
1285         return 0;
1286 }
1287 EXPORT_SYMBOL(hci_register_cb);
1288
1289 int hci_unregister_cb(struct hci_cb *cb)
1290 {
1291         BT_DBG("%p name %s", cb, cb->name);
1292
1293         write_lock_bh(&hci_cb_list_lock);
1294         list_del(&cb->list);
1295         write_unlock_bh(&hci_cb_list_lock);
1296
1297         return 0;
1298 }
1299 EXPORT_SYMBOL(hci_unregister_cb);
1300
1301 static int hci_send_frame(struct sk_buff *skb)
1302 {
1303         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1304
1305         if (!hdev) {
1306                 kfree_skb(skb);
1307                 return -ENODEV;
1308         }
1309
1310         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1311
1312         if (atomic_read(&hdev->promisc)) {
1313                 /* Time stamp */
1314                 __net_timestamp(skb);
1315
1316                 hci_send_to_sock(hdev, skb);
1317         }
1318
1319         /* Get rid of skb owner, prior to sending to the driver. */
1320         skb_orphan(skb);
1321
1322         return hdev->send(skb);
1323 }
1324
1325 /* Send HCI command */
1326 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1327 {
1328         int len = HCI_COMMAND_HDR_SIZE + plen;
1329         struct hci_command_hdr *hdr;
1330         struct sk_buff *skb;
1331
1332         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1333
1334         skb = bt_skb_alloc(len, GFP_ATOMIC);
1335         if (!skb) {
1336                 BT_ERR("%s no memory for command", hdev->name);
1337                 return -ENOMEM;
1338         }
1339
1340         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1341         hdr->opcode = cpu_to_le16(opcode);
1342         hdr->plen   = plen;
1343
1344         if (plen)
1345                 memcpy(skb_put(skb, plen), param, plen);
1346
1347         BT_DBG("skb len %d", skb->len);
1348
1349         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1350         skb->dev = (void *) hdev;
1351
1352         skb_queue_tail(&hdev->cmd_q, skb);
1353         tasklet_schedule(&hdev->cmd_task);
1354
1355         return 0;
1356 }
1357
1358 /* Get data from the previously sent command */
1359 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1360 {
1361         struct hci_command_hdr *hdr;
1362
1363         if (!hdev->sent_cmd)
1364                 return NULL;
1365
1366         hdr = (void *) hdev->sent_cmd->data;
1367
1368         if (hdr->opcode != cpu_to_le16(opcode))
1369                 return NULL;
1370
1371         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1372
1373         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1374 }
1375
1376 /* Send ACL data */
1377 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1378 {
1379         struct hci_acl_hdr *hdr;
1380         int len = skb->len;
1381
1382         skb_push(skb, HCI_ACL_HDR_SIZE);
1383         skb_reset_transport_header(skb);
1384         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1385         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1386         hdr->dlen   = cpu_to_le16(len);
1387 }
1388
1389 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1390 {
1391         struct hci_dev *hdev = conn->hdev;
1392         struct sk_buff *list;
1393
1394         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1395
1396         skb->dev = (void *) hdev;
1397         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1398         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1399
1400         if (!(list = skb_shinfo(skb)->frag_list)) {
1401                 /* Non fragmented */
1402                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1403
1404                 skb_queue_tail(&conn->data_q, skb);
1405         } else {
1406                 /* Fragmented */
1407                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1408
1409                 skb_shinfo(skb)->frag_list = NULL;
1410
1411                 /* Queue all fragments atomically */
1412                 spin_lock_bh(&conn->data_q.lock);
1413
1414                 __skb_queue_tail(&conn->data_q, skb);
1415                 do {
1416                         skb = list; list = list->next;
1417
1418                         skb->dev = (void *) hdev;
1419                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1420                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1421
1422                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1423
1424                         __skb_queue_tail(&conn->data_q, skb);
1425                 } while (list);
1426
1427                 spin_unlock_bh(&conn->data_q.lock);
1428         }
1429
1430         tasklet_schedule(&hdev->tx_task);
1431 }
1432 EXPORT_SYMBOL(hci_send_acl);
1433
1434 /* Send SCO data */
1435 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1436 {
1437         struct hci_dev *hdev = conn->hdev;
1438         struct hci_sco_hdr hdr;
1439
1440         BT_DBG("%s len %d", hdev->name, skb->len);
1441
1442         hdr.handle = cpu_to_le16(conn->handle);
1443         hdr.dlen   = skb->len;
1444
1445         skb_push(skb, HCI_SCO_HDR_SIZE);
1446         skb_reset_transport_header(skb);
1447         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1448
1449         skb->dev = (void *) hdev;
1450         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1451
1452         skb_queue_tail(&conn->data_q, skb);
1453         tasklet_schedule(&hdev->tx_task);
1454 }
1455 EXPORT_SYMBOL(hci_send_sco);
1456
1457 /* ---- HCI TX task (outgoing data) ---- */
1458
1459 /* HCI Connection scheduler */
1460 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1461 {
1462         struct hci_conn_hash *h = &hdev->conn_hash;
1463         struct hci_conn *conn = NULL;
1464         int num = 0, min = ~0;
1465         struct list_head *p;
1466
1467         /* We don't have to lock device here. Connections are always
1468          * added and removed with TX task disabled. */
1469         list_for_each(p, &h->list) {
1470                 struct hci_conn *c;
1471                 c = list_entry(p, struct hci_conn, list);
1472
1473                 if (c->type != type || skb_queue_empty(&c->data_q))
1474                         continue;
1475
1476                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1477                         continue;
1478
1479                 num++;
1480
1481                 if (c->sent < min) {
1482                         min  = c->sent;
1483                         conn = c;
1484                 }
1485         }
1486
1487         if (conn) {
1488                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1489                 int q = cnt / num;
1490                 *quote = q ? q : 1;
1491         } else
1492                 *quote = 0;
1493
1494         BT_DBG("conn %p quote %d", conn, *quote);
1495         return conn;
1496 }
1497
1498 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1499 {
1500         struct hci_conn_hash *h = &hdev->conn_hash;
1501         struct list_head *p;
1502         struct hci_conn  *c;
1503
1504         BT_ERR("%s ACL tx timeout", hdev->name);
1505
1506         /* Kill stalled connections */
1507         list_for_each(p, &h->list) {
1508                 c = list_entry(p, struct hci_conn, list);
1509                 if (c->type == ACL_LINK && c->sent) {
1510                         BT_ERR("%s killing stalled ACL connection %s",
1511                                 hdev->name, batostr(&c->dst));
1512                         hci_acl_disconn(c, 0x13);
1513                 }
1514         }
1515 }
1516
1517 static inline void hci_sched_acl(struct hci_dev *hdev)
1518 {
1519         struct hci_conn *conn;
1520         struct sk_buff *skb;
1521         int quote;
1522
1523         BT_DBG("%s", hdev->name);
1524
1525         if (!test_bit(HCI_RAW, &hdev->flags)) {
1526                 /* ACL tx timeout must be longer than maximum
1527                  * link supervision timeout (40.9 seconds) */
1528                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1529                         hci_acl_tx_to(hdev);
1530         }
1531
1532         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1533                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1534                         BT_DBG("skb %p len %d", skb, skb->len);
1535
1536                         hci_conn_enter_active_mode(conn);
1537
1538                         hci_send_frame(skb);
1539                         hdev->acl_last_tx = jiffies;
1540
1541                         hdev->acl_cnt--;
1542                         conn->sent++;
1543                 }
1544         }
1545 }
1546
1547 /* Schedule SCO */
1548 static inline void hci_sched_sco(struct hci_dev *hdev)
1549 {
1550         struct hci_conn *conn;
1551         struct sk_buff *skb;
1552         int quote;
1553
1554         BT_DBG("%s", hdev->name);
1555
1556         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1557                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1558                         BT_DBG("skb %p len %d", skb, skb->len);
1559                         hci_send_frame(skb);
1560
1561                         conn->sent++;
1562                         if (conn->sent == ~0)
1563                                 conn->sent = 0;
1564                 }
1565         }
1566 }
1567
1568 static inline void hci_sched_esco(struct hci_dev *hdev)
1569 {
1570         struct hci_conn *conn;
1571         struct sk_buff *skb;
1572         int quote;
1573
1574         BT_DBG("%s", hdev->name);
1575
1576         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1577                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1578                         BT_DBG("skb %p len %d", skb, skb->len);
1579                         hci_send_frame(skb);
1580
1581                         conn->sent++;
1582                         if (conn->sent == ~0)
1583                                 conn->sent = 0;
1584                 }
1585         }
1586 }
1587
1588 static void hci_tx_task(unsigned long arg)
1589 {
1590         struct hci_dev *hdev = (struct hci_dev *) arg;
1591         struct sk_buff *skb;
1592
1593         read_lock(&hci_task_lock);
1594
1595         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1596
1597         /* Schedule queues and send stuff to HCI driver */
1598
1599         hci_sched_acl(hdev);
1600
1601         hci_sched_sco(hdev);
1602
1603         hci_sched_esco(hdev);
1604
1605         /* Send next queued raw (unknown type) packet */
1606         while ((skb = skb_dequeue(&hdev->raw_q)))
1607                 hci_send_frame(skb);
1608
1609         read_unlock(&hci_task_lock);
1610 }
1611
1612 /* ----- HCI RX task (incoming data proccessing) ----- */
1613
1614 /* ACL data packet */
1615 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1616 {
1617         struct hci_acl_hdr *hdr = (void *) skb->data;
1618         struct hci_conn *conn;
1619         __u16 handle, flags;
1620
1621         skb_pull(skb, HCI_ACL_HDR_SIZE);
1622
1623         handle = __le16_to_cpu(hdr->handle);
1624         flags  = hci_flags(handle);
1625         handle = hci_handle(handle);
1626
1627         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1628
1629         hdev->stat.acl_rx++;
1630
1631         hci_dev_lock(hdev);
1632         conn = hci_conn_hash_lookup_handle(hdev, handle);
1633         hci_dev_unlock(hdev);
1634
1635         if (conn) {
1636                 register struct hci_proto *hp;
1637
1638                 hci_conn_enter_active_mode(conn);
1639
1640                 /* Send to upper protocol */
1641                 if ((hp = hci_proto[HCI_PROTO_L2CAP]) && hp->recv_acldata) {
1642                         hp->recv_acldata(conn, skb, flags);
1643                         return;
1644                 }
1645         } else {
1646                 BT_ERR("%s ACL packet for unknown connection handle %d",
1647                         hdev->name, handle);
1648         }
1649
1650         kfree_skb(skb);
1651 }
1652
1653 /* SCO data packet */
1654 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1655 {
1656         struct hci_sco_hdr *hdr = (void *) skb->data;
1657         struct hci_conn *conn;
1658         __u16 handle;
1659
1660         skb_pull(skb, HCI_SCO_HDR_SIZE);
1661
1662         handle = __le16_to_cpu(hdr->handle);
1663
1664         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1665
1666         hdev->stat.sco_rx++;
1667
1668         hci_dev_lock(hdev);
1669         conn = hci_conn_hash_lookup_handle(hdev, handle);
1670         hci_dev_unlock(hdev);
1671
1672         if (conn) {
1673                 register struct hci_proto *hp;
1674
1675                 /* Send to upper protocol */
1676                 if ((hp = hci_proto[HCI_PROTO_SCO]) && hp->recv_scodata) {
1677                         hp->recv_scodata(conn, skb);
1678                         return;
1679                 }
1680         } else {
1681                 BT_ERR("%s SCO packet for unknown connection handle %d",
1682                         hdev->name, handle);
1683         }
1684
1685         kfree_skb(skb);
1686 }
1687
1688 static void hci_rx_task(unsigned long arg)
1689 {
1690         struct hci_dev *hdev = (struct hci_dev *) arg;
1691         struct sk_buff *skb;
1692
1693         BT_DBG("%s", hdev->name);
1694
1695         read_lock(&hci_task_lock);
1696
1697         while ((skb = skb_dequeue(&hdev->rx_q))) {
1698                 if (atomic_read(&hdev->promisc)) {
1699                         /* Send copy to the sockets */
1700                         hci_send_to_sock(hdev, skb);
1701                 }
1702
1703                 if (test_bit(HCI_RAW, &hdev->flags)) {
1704                         kfree_skb(skb);
1705                         continue;
1706                 }
1707
1708                 if (test_bit(HCI_INIT, &hdev->flags)) {
1709                         /* Don't process data packets in this states. */
1710                         switch (bt_cb(skb)->pkt_type) {
1711                         case HCI_ACLDATA_PKT:
1712                         case HCI_SCODATA_PKT:
1713                                 kfree_skb(skb);
1714                                 continue;
1715                         }
1716                 }
1717
1718                 /* Process frame */
1719                 switch (bt_cb(skb)->pkt_type) {
1720                 case HCI_EVENT_PKT:
1721                         hci_event_packet(hdev, skb);
1722                         break;
1723
1724                 case HCI_ACLDATA_PKT:
1725                         BT_DBG("%s ACL data packet", hdev->name);
1726                         hci_acldata_packet(hdev, skb);
1727                         break;
1728
1729                 case HCI_SCODATA_PKT:
1730                         BT_DBG("%s SCO data packet", hdev->name);
1731                         hci_scodata_packet(hdev, skb);
1732                         break;
1733
1734                 default:
1735                         kfree_skb(skb);
1736                         break;
1737                 }
1738         }
1739
1740         read_unlock(&hci_task_lock);
1741 }
1742
1743 static void hci_cmd_task(unsigned long arg)
1744 {
1745         struct hci_dev *hdev = (struct hci_dev *) arg;
1746         struct sk_buff *skb;
1747
1748         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1749
1750         if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1751                 BT_ERR("%s command tx timeout", hdev->name);
1752                 atomic_set(&hdev->cmd_cnt, 1);
1753         }
1754
1755         /* Send queued commands */
1756         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1757                 kfree_skb(hdev->sent_cmd);
1758
1759                 if ((hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC))) {
1760                         atomic_dec(&hdev->cmd_cnt);
1761                         hci_send_frame(skb);
1762                         hdev->cmd_last_tx = jiffies;
1763                 } else {
1764                         skb_queue_head(&hdev->cmd_q, skb);
1765                         tasklet_schedule(&hdev->cmd_task);
1766                 }
1767         }
1768 }