Bluetooth: Fix HCI_RESET command synchronization
[linux-flexiantxendom0-natty.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <net/sock.h>
45
46 #include <asm/system.h>
47 #include <linux/uaccess.h>
48 #include <asm/unaligned.h>
49
50 #include <net/bluetooth/bluetooth.h>
51 #include <net/bluetooth/hci_core.h>
52
53 static void hci_cmd_task(unsigned long arg);
54 static void hci_rx_task(unsigned long arg);
55 static void hci_tx_task(unsigned long arg);
56 static void hci_notify(struct hci_dev *hdev, int event);
57
58 static DEFINE_RWLOCK(hci_task_lock);
59
60 /* HCI device list */
61 LIST_HEAD(hci_dev_list);
62 DEFINE_RWLOCK(hci_dev_list_lock);
63
64 /* HCI callback list */
65 LIST_HEAD(hci_cb_list);
66 DEFINE_RWLOCK(hci_cb_list_lock);
67
68 /* HCI protocols */
69 #define HCI_MAX_PROTO   2
70 struct hci_proto *hci_proto[HCI_MAX_PROTO];
71
72 /* HCI notifiers list */
73 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
74
75 /* ---- HCI notifications ---- */
76
77 int hci_register_notifier(struct notifier_block *nb)
78 {
79         return atomic_notifier_chain_register(&hci_notifier, nb);
80 }
81
82 int hci_unregister_notifier(struct notifier_block *nb)
83 {
84         return atomic_notifier_chain_unregister(&hci_notifier, nb);
85 }
86
87 static void hci_notify(struct hci_dev *hdev, int event)
88 {
89         atomic_notifier_call_chain(&hci_notifier, event, hdev);
90 }
91
92 /* ---- HCI requests ---- */
93
94 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
95 {
96         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
97
98         /* If the request has set req_last_cmd (typical for multi-HCI
99          * command requests) check if the completed command matches
100          * this, and if not just return. Single HCI command requests
101          * typically leave req_last_cmd as 0 */
102         if (hdev->req_last_cmd && cmd != hdev->req_last_cmd)
103                 return;
104
105         if (hdev->req_status == HCI_REQ_PEND) {
106                 hdev->req_result = result;
107                 hdev->req_status = HCI_REQ_DONE;
108                 wake_up_interruptible(&hdev->req_wait_q);
109         }
110 }
111
112 static void hci_req_cancel(struct hci_dev *hdev, int err)
113 {
114         BT_DBG("%s err 0x%2.2x", hdev->name, err);
115
116         if (hdev->req_status == HCI_REQ_PEND) {
117                 hdev->req_result = err;
118                 hdev->req_status = HCI_REQ_CANCELED;
119                 wake_up_interruptible(&hdev->req_wait_q);
120         }
121 }
122
123 /* Execute request and wait for completion. */
124 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
125                                 unsigned long opt, __u32 timeout)
126 {
127         DECLARE_WAITQUEUE(wait, current);
128         int err = 0;
129
130         BT_DBG("%s start", hdev->name);
131
132         hdev->req_status = HCI_REQ_PEND;
133
134         add_wait_queue(&hdev->req_wait_q, &wait);
135         set_current_state(TASK_INTERRUPTIBLE);
136
137         req(hdev, opt);
138         schedule_timeout(timeout);
139
140         remove_wait_queue(&hdev->req_wait_q, &wait);
141
142         if (signal_pending(current))
143                 return -EINTR;
144
145         switch (hdev->req_status) {
146         case HCI_REQ_DONE:
147                 err = -bt_err(hdev->req_result);
148                 break;
149
150         case HCI_REQ_CANCELED:
151                 err = -hdev->req_result;
152                 break;
153
154         default:
155                 err = -ETIMEDOUT;
156                 break;
157         }
158
159         hdev->req_last_cmd = hdev->req_status = hdev->req_result = 0;
160
161         BT_DBG("%s end: err %d", hdev->name, err);
162
163         return err;
164 }
165
166 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
167                                 unsigned long opt, __u32 timeout)
168 {
169         int ret;
170
171         if (!test_bit(HCI_UP, &hdev->flags))
172                 return -ENETDOWN;
173
174         /* Serialize all requests */
175         hci_req_lock(hdev);
176         ret = __hci_request(hdev, req, opt, timeout);
177         hci_req_unlock(hdev);
178
179         return ret;
180 }
181
182 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
183 {
184         BT_DBG("%s %ld", hdev->name, opt);
185
186         /* Reset device */
187         set_bit(HCI_RESET, &hdev->flags);
188         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
189 }
190
191 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
192 {
193         struct sk_buff *skb;
194         __le16 param;
195         __u8 flt_type;
196
197         BT_DBG("%s %ld", hdev->name, opt);
198
199         /* Driver initialization */
200
201         /* Special commands */
202         while ((skb = skb_dequeue(&hdev->driver_init))) {
203                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
204                 skb->dev = (void *) hdev;
205
206                 skb_queue_tail(&hdev->cmd_q, skb);
207                 tasklet_schedule(&hdev->cmd_task);
208         }
209         skb_queue_purge(&hdev->driver_init);
210
211         /* Mandatory initialization */
212
213         /* Reset */
214         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks)) {
215                         set_bit(HCI_RESET, &hdev->flags);
216                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
217         }
218
219         /* Read Local Supported Features */
220         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
221
222         /* Read Local Version */
223         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
224
225         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
226         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
227
228 #if 0
229         /* Host buffer size */
230         {
231                 struct hci_cp_host_buffer_size cp;
232                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
233                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
234                 cp.acl_max_pkt = cpu_to_le16(0xffff);
235                 cp.sco_max_pkt = cpu_to_le16(0xffff);
236                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
237         }
238 #endif
239
240         /* Read BD Address */
241         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
242
243         /* Read Class of Device */
244         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
245
246         /* Read Local Name */
247         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
248
249         /* Read Voice Setting */
250         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
251
252         /* Optional initialization */
253
254         /* Clear Event Filters */
255         flt_type = HCI_FLT_CLEAR_ALL;
256         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
257
258         /* Page timeout ~20 secs */
259         param = cpu_to_le16(0x8000);
260         hci_send_cmd(hdev, HCI_OP_WRITE_PG_TIMEOUT, 2, &param);
261
262         /* Connection accept timeout ~20 secs */
263         param = cpu_to_le16(0x7d00);
264         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
265
266         hdev->req_last_cmd = HCI_OP_WRITE_CA_TIMEOUT;
267 }
268
269 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
270 {
271         __u8 scan = opt;
272
273         BT_DBG("%s %x", hdev->name, scan);
274
275         /* Inquiry and Page scans */
276         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
277 }
278
279 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
280 {
281         __u8 auth = opt;
282
283         BT_DBG("%s %x", hdev->name, auth);
284
285         /* Authentication */
286         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
287 }
288
289 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
290 {
291         __u8 encrypt = opt;
292
293         BT_DBG("%s %x", hdev->name, encrypt);
294
295         /* Encryption */
296         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
297 }
298
299 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
300 {
301         __le16 policy = cpu_to_le16(opt);
302
303         BT_DBG("%s %x", hdev->name, policy);
304
305         /* Default link policy */
306         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
307 }
308
309 /* Get HCI device by index.
310  * Device is held on return. */
311 struct hci_dev *hci_dev_get(int index)
312 {
313         struct hci_dev *hdev = NULL;
314         struct list_head *p;
315
316         BT_DBG("%d", index);
317
318         if (index < 0)
319                 return NULL;
320
321         read_lock(&hci_dev_list_lock);
322         list_for_each(p, &hci_dev_list) {
323                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
324                 if (d->id == index) {
325                         hdev = hci_dev_hold(d);
326                         break;
327                 }
328         }
329         read_unlock(&hci_dev_list_lock);
330         return hdev;
331 }
332
333 /* ---- Inquiry support ---- */
334 static void inquiry_cache_flush(struct hci_dev *hdev)
335 {
336         struct inquiry_cache *cache = &hdev->inq_cache;
337         struct inquiry_entry *next  = cache->list, *e;
338
339         BT_DBG("cache %p", cache);
340
341         cache->list = NULL;
342         while ((e = next)) {
343                 next = e->next;
344                 kfree(e);
345         }
346 }
347
348 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
349 {
350         struct inquiry_cache *cache = &hdev->inq_cache;
351         struct inquiry_entry *e;
352
353         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
354
355         for (e = cache->list; e; e = e->next)
356                 if (!bacmp(&e->data.bdaddr, bdaddr))
357                         break;
358         return e;
359 }
360
361 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
362 {
363         struct inquiry_cache *cache = &hdev->inq_cache;
364         struct inquiry_entry *ie;
365
366         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
367
368         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
369         if (!ie) {
370                 /* Entry not in the cache. Add new one. */
371                 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
372                 if (!ie)
373                         return;
374
375                 ie->next = cache->list;
376                 cache->list = ie;
377         }
378
379         memcpy(&ie->data, data, sizeof(*data));
380         ie->timestamp = jiffies;
381         cache->timestamp = jiffies;
382 }
383
384 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
385 {
386         struct inquiry_cache *cache = &hdev->inq_cache;
387         struct inquiry_info *info = (struct inquiry_info *) buf;
388         struct inquiry_entry *e;
389         int copied = 0;
390
391         for (e = cache->list; e && copied < num; e = e->next, copied++) {
392                 struct inquiry_data *data = &e->data;
393                 bacpy(&info->bdaddr, &data->bdaddr);
394                 info->pscan_rep_mode    = data->pscan_rep_mode;
395                 info->pscan_period_mode = data->pscan_period_mode;
396                 info->pscan_mode        = data->pscan_mode;
397                 memcpy(info->dev_class, data->dev_class, 3);
398                 info->clock_offset      = data->clock_offset;
399                 info++;
400         }
401
402         BT_DBG("cache %p, copied %d", cache, copied);
403         return copied;
404 }
405
406 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
407 {
408         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
409         struct hci_cp_inquiry cp;
410
411         BT_DBG("%s", hdev->name);
412
413         if (test_bit(HCI_INQUIRY, &hdev->flags))
414                 return;
415
416         /* Start Inquiry */
417         memcpy(&cp.lap, &ir->lap, 3);
418         cp.length  = ir->length;
419         cp.num_rsp = ir->num_rsp;
420         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
421 }
422
423 int hci_inquiry(void __user *arg)
424 {
425         __u8 __user *ptr = arg;
426         struct hci_inquiry_req ir;
427         struct hci_dev *hdev;
428         int err = 0, do_inquiry = 0, max_rsp;
429         long timeo;
430         __u8 *buf;
431
432         if (copy_from_user(&ir, ptr, sizeof(ir)))
433                 return -EFAULT;
434
435         if (!(hdev = hci_dev_get(ir.dev_id)))
436                 return -ENODEV;
437
438         hci_dev_lock_bh(hdev);
439         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
440                                 inquiry_cache_empty(hdev) ||
441                                 ir.flags & IREQ_CACHE_FLUSH) {
442                 inquiry_cache_flush(hdev);
443                 do_inquiry = 1;
444         }
445         hci_dev_unlock_bh(hdev);
446
447         timeo = ir.length * msecs_to_jiffies(2000);
448
449         if (do_inquiry) {
450                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
451                 if (err < 0)
452                         goto done;
453         }
454
455         /* for unlimited number of responses we will use buffer with 255 entries */
456         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
457
458         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
459          * copy it to the user space.
460          */
461         buf = kmalloc(sizeof(struct inquiry_info) *max_rsp, GFP_KERNEL);
462         if (!buf) {
463                 err = -ENOMEM;
464                 goto done;
465         }
466
467         hci_dev_lock_bh(hdev);
468         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
469         hci_dev_unlock_bh(hdev);
470
471         BT_DBG("num_rsp %d", ir.num_rsp);
472
473         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
474                 ptr += sizeof(ir);
475                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
476                                         ir.num_rsp))
477                         err = -EFAULT;
478         } else
479                 err = -EFAULT;
480
481         kfree(buf);
482
483 done:
484         hci_dev_put(hdev);
485         return err;
486 }
487
488 /* ---- HCI ioctl helpers ---- */
489
490 int hci_dev_open(__u16 dev)
491 {
492         struct hci_dev *hdev;
493         int ret = 0;
494
495         if (!(hdev = hci_dev_get(dev)))
496                 return -ENODEV;
497
498         BT_DBG("%s %p", hdev->name, hdev);
499
500         hci_req_lock(hdev);
501
502         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
503                 ret = -ERFKILL;
504                 goto done;
505         }
506
507         if (test_bit(HCI_UP, &hdev->flags)) {
508                 ret = -EALREADY;
509                 goto done;
510         }
511
512         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
513                 set_bit(HCI_RAW, &hdev->flags);
514
515         /* Treat all non BR/EDR controllers as raw devices for now */
516         if (hdev->dev_type != HCI_BREDR)
517                 set_bit(HCI_RAW, &hdev->flags);
518
519         if (hdev->open(hdev)) {
520                 ret = -EIO;
521                 goto done;
522         }
523
524         if (!test_bit(HCI_RAW, &hdev->flags)) {
525                 atomic_set(&hdev->cmd_cnt, 1);
526                 set_bit(HCI_INIT, &hdev->flags);
527
528                 //__hci_request(hdev, hci_reset_req, 0, HZ);
529                 ret = __hci_request(hdev, hci_init_req, 0,
530                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
531
532                 clear_bit(HCI_INIT, &hdev->flags);
533         }
534
535         if (!ret) {
536                 hci_dev_hold(hdev);
537                 set_bit(HCI_UP, &hdev->flags);
538                 hci_notify(hdev, HCI_DEV_UP);
539         } else {
540                 /* Init failed, cleanup */
541                 tasklet_kill(&hdev->rx_task);
542                 tasklet_kill(&hdev->tx_task);
543                 tasklet_kill(&hdev->cmd_task);
544
545                 skb_queue_purge(&hdev->cmd_q);
546                 skb_queue_purge(&hdev->rx_q);
547
548                 if (hdev->flush)
549                         hdev->flush(hdev);
550
551                 if (hdev->sent_cmd) {
552                         kfree_skb(hdev->sent_cmd);
553                         hdev->sent_cmd = NULL;
554                 }
555
556                 hdev->close(hdev);
557                 hdev->flags = 0;
558         }
559
560 done:
561         hci_req_unlock(hdev);
562         hci_dev_put(hdev);
563         return ret;
564 }
565
566 static int hci_dev_do_close(struct hci_dev *hdev)
567 {
568         BT_DBG("%s %p", hdev->name, hdev);
569
570         hci_req_cancel(hdev, ENODEV);
571         hci_req_lock(hdev);
572
573         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
574                 hci_req_unlock(hdev);
575                 return 0;
576         }
577
578         /* Kill RX and TX tasks */
579         tasklet_kill(&hdev->rx_task);
580         tasklet_kill(&hdev->tx_task);
581
582         hci_dev_lock_bh(hdev);
583         inquiry_cache_flush(hdev);
584         hci_conn_hash_flush(hdev);
585         hci_dev_unlock_bh(hdev);
586
587         hci_notify(hdev, HCI_DEV_DOWN);
588
589         if (hdev->flush)
590                 hdev->flush(hdev);
591
592         /* Reset device */
593         skb_queue_purge(&hdev->cmd_q);
594         atomic_set(&hdev->cmd_cnt, 1);
595         if (!test_bit(HCI_RAW, &hdev->flags)) {
596                 set_bit(HCI_INIT, &hdev->flags);
597                 __hci_request(hdev, hci_reset_req, 0,
598                                         msecs_to_jiffies(250));
599                 clear_bit(HCI_INIT, &hdev->flags);
600         }
601
602         /* Kill cmd task */
603         tasklet_kill(&hdev->cmd_task);
604
605         /* Drop queues */
606         skb_queue_purge(&hdev->rx_q);
607         skb_queue_purge(&hdev->cmd_q);
608         skb_queue_purge(&hdev->raw_q);
609
610         /* Drop last sent command */
611         if (hdev->sent_cmd) {
612                 kfree_skb(hdev->sent_cmd);
613                 hdev->sent_cmd = NULL;
614         }
615
616         /* After this point our queues are empty
617          * and no tasks are scheduled. */
618         hdev->close(hdev);
619
620         /* Clear flags */
621         hdev->flags = 0;
622
623         hci_req_unlock(hdev);
624
625         hci_dev_put(hdev);
626         return 0;
627 }
628
629 int hci_dev_close(__u16 dev)
630 {
631         struct hci_dev *hdev;
632         int err;
633
634         hdev = hci_dev_get(dev);
635         if (!hdev)
636                 return -ENODEV;
637         err = hci_dev_do_close(hdev);
638         hci_dev_put(hdev);
639         return err;
640 }
641
642 int hci_dev_reset(__u16 dev)
643 {
644         struct hci_dev *hdev;
645         int ret = 0;
646
647         hdev = hci_dev_get(dev);
648         if (!hdev)
649                 return -ENODEV;
650
651         hci_req_lock(hdev);
652         tasklet_disable(&hdev->tx_task);
653
654         if (!test_bit(HCI_UP, &hdev->flags))
655                 goto done;
656
657         /* Drop queues */
658         skb_queue_purge(&hdev->rx_q);
659         skb_queue_purge(&hdev->cmd_q);
660
661         hci_dev_lock_bh(hdev);
662         inquiry_cache_flush(hdev);
663         hci_conn_hash_flush(hdev);
664         hci_dev_unlock_bh(hdev);
665
666         if (hdev->flush)
667                 hdev->flush(hdev);
668
669         atomic_set(&hdev->cmd_cnt, 1);
670         hdev->acl_cnt = 0; hdev->sco_cnt = 0;
671
672         if (!test_bit(HCI_RAW, &hdev->flags))
673                 ret = __hci_request(hdev, hci_reset_req, 0,
674                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
675
676 done:
677         tasklet_enable(&hdev->tx_task);
678         hci_req_unlock(hdev);
679         hci_dev_put(hdev);
680         return ret;
681 }
682
683 int hci_dev_reset_stat(__u16 dev)
684 {
685         struct hci_dev *hdev;
686         int ret = 0;
687
688         hdev = hci_dev_get(dev);
689         if (!hdev)
690                 return -ENODEV;
691
692         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
693
694         hci_dev_put(hdev);
695
696         return ret;
697 }
698
699 int hci_dev_cmd(unsigned int cmd, void __user *arg)
700 {
701         struct hci_dev *hdev;
702         struct hci_dev_req dr;
703         int err = 0;
704
705         if (copy_from_user(&dr, arg, sizeof(dr)))
706                 return -EFAULT;
707
708         hdev = hci_dev_get(dr.dev_id);
709         if (!hdev)
710                 return -ENODEV;
711
712         switch (cmd) {
713         case HCISETAUTH:
714                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
715                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
716                 break;
717
718         case HCISETENCRYPT:
719                 if (!lmp_encrypt_capable(hdev)) {
720                         err = -EOPNOTSUPP;
721                         break;
722                 }
723
724                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
725                         /* Auth must be enabled first */
726                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
727                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
728                         if (err)
729                                 break;
730                 }
731
732                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
733                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
734                 break;
735
736         case HCISETSCAN:
737                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
738                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
739                 break;
740
741         case HCISETLINKPOL:
742                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
743                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
744                 break;
745
746         case HCISETLINKMODE:
747                 hdev->link_mode = ((__u16) dr.dev_opt) &
748                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
749                 break;
750
751         case HCISETPTYPE:
752                 hdev->pkt_type = (__u16) dr.dev_opt;
753                 break;
754
755         case HCISETACLMTU:
756                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
757                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
758                 break;
759
760         case HCISETSCOMTU:
761                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
762                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
763                 break;
764
765         default:
766                 err = -EINVAL;
767                 break;
768         }
769
770         hci_dev_put(hdev);
771         return err;
772 }
773
774 int hci_get_dev_list(void __user *arg)
775 {
776         struct hci_dev_list_req *dl;
777         struct hci_dev_req *dr;
778         struct list_head *p;
779         int n = 0, size, err;
780         __u16 dev_num;
781
782         if (get_user(dev_num, (__u16 __user *) arg))
783                 return -EFAULT;
784
785         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
786                 return -EINVAL;
787
788         size = sizeof(*dl) + dev_num * sizeof(*dr);
789
790         dl = kzalloc(size, GFP_KERNEL);
791         if (!dl)
792                 return -ENOMEM;
793
794         dr = dl->dev_req;
795
796         read_lock_bh(&hci_dev_list_lock);
797         list_for_each(p, &hci_dev_list) {
798                 struct hci_dev *hdev;
799                 hdev = list_entry(p, struct hci_dev, list);
800                 (dr + n)->dev_id  = hdev->id;
801                 (dr + n)->dev_opt = hdev->flags;
802                 if (++n >= dev_num)
803                         break;
804         }
805         read_unlock_bh(&hci_dev_list_lock);
806
807         dl->dev_num = n;
808         size = sizeof(*dl) + n * sizeof(*dr);
809
810         err = copy_to_user(arg, dl, size);
811         kfree(dl);
812
813         return err ? -EFAULT : 0;
814 }
815
816 int hci_get_dev_info(void __user *arg)
817 {
818         struct hci_dev *hdev;
819         struct hci_dev_info di;
820         int err = 0;
821
822         if (copy_from_user(&di, arg, sizeof(di)))
823                 return -EFAULT;
824
825         hdev = hci_dev_get(di.dev_id);
826         if (!hdev)
827                 return -ENODEV;
828
829         strcpy(di.name, hdev->name);
830         di.bdaddr   = hdev->bdaddr;
831         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
832         di.flags    = hdev->flags;
833         di.pkt_type = hdev->pkt_type;
834         di.acl_mtu  = hdev->acl_mtu;
835         di.acl_pkts = hdev->acl_pkts;
836         di.sco_mtu  = hdev->sco_mtu;
837         di.sco_pkts = hdev->sco_pkts;
838         di.link_policy = hdev->link_policy;
839         di.link_mode   = hdev->link_mode;
840
841         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
842         memcpy(&di.features, &hdev->features, sizeof(di.features));
843
844         if (copy_to_user(arg, &di, sizeof(di)))
845                 err = -EFAULT;
846
847         hci_dev_put(hdev);
848
849         return err;
850 }
851
852 /* ---- Interface to HCI drivers ---- */
853
854 static int hci_rfkill_set_block(void *data, bool blocked)
855 {
856         struct hci_dev *hdev = data;
857
858         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
859
860         if (!blocked)
861                 return 0;
862
863         hci_dev_do_close(hdev);
864
865         return 0;
866 }
867
868 static const struct rfkill_ops hci_rfkill_ops = {
869         .set_block = hci_rfkill_set_block,
870 };
871
872 /* Alloc HCI device */
873 struct hci_dev *hci_alloc_dev(void)
874 {
875         struct hci_dev *hdev;
876
877         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
878         if (!hdev)
879                 return NULL;
880
881         skb_queue_head_init(&hdev->driver_init);
882
883         return hdev;
884 }
885 EXPORT_SYMBOL(hci_alloc_dev);
886
887 /* Free HCI device */
888 void hci_free_dev(struct hci_dev *hdev)
889 {
890         skb_queue_purge(&hdev->driver_init);
891
892         /* will free via device release */
893         put_device(&hdev->dev);
894 }
895 EXPORT_SYMBOL(hci_free_dev);
896
897 /* Register HCI device */
898 int hci_register_dev(struct hci_dev *hdev)
899 {
900         struct list_head *head = &hci_dev_list, *p;
901         int i, id = 0;
902
903         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
904                                                 hdev->bus, hdev->owner);
905
906         if (!hdev->open || !hdev->close || !hdev->destruct)
907                 return -EINVAL;
908
909         write_lock_bh(&hci_dev_list_lock);
910
911         /* Find first available device id */
912         list_for_each(p, &hci_dev_list) {
913                 if (list_entry(p, struct hci_dev, list)->id != id)
914                         break;
915                 head = p; id++;
916         }
917
918         sprintf(hdev->name, "hci%d", id);
919         hdev->id = id;
920         list_add(&hdev->list, head);
921
922         atomic_set(&hdev->refcnt, 1);
923         spin_lock_init(&hdev->lock);
924
925         hdev->flags = 0;
926         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
927         hdev->esco_type = (ESCO_HV1);
928         hdev->link_mode = (HCI_LM_ACCEPT);
929
930         hdev->idle_timeout = 0;
931         hdev->sniff_max_interval = 800;
932         hdev->sniff_min_interval = 80;
933
934         tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
935         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
936         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
937
938         skb_queue_head_init(&hdev->rx_q);
939         skb_queue_head_init(&hdev->cmd_q);
940         skb_queue_head_init(&hdev->raw_q);
941
942         for (i = 0; i < NUM_REASSEMBLY; i++)
943                 hdev->reassembly[i] = NULL;
944
945         init_waitqueue_head(&hdev->req_wait_q);
946         mutex_init(&hdev->req_lock);
947
948         inquiry_cache_init(hdev);
949
950         hci_conn_hash_init(hdev);
951
952         INIT_LIST_HEAD(&hdev->blacklist);
953
954         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
955
956         atomic_set(&hdev->promisc, 0);
957
958         write_unlock_bh(&hci_dev_list_lock);
959
960         hdev->workqueue = create_singlethread_workqueue(hdev->name);
961         if (!hdev->workqueue)
962                 goto nomem;
963
964         hci_register_sysfs(hdev);
965
966         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
967                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
968         if (hdev->rfkill) {
969                 if (rfkill_register(hdev->rfkill) < 0) {
970                         rfkill_destroy(hdev->rfkill);
971                         hdev->rfkill = NULL;
972                 }
973         }
974
975         mgmt_index_added(hdev->id);
976         hci_notify(hdev, HCI_DEV_REG);
977
978         return id;
979
980 nomem:
981         write_lock_bh(&hci_dev_list_lock);
982         list_del(&hdev->list);
983         write_unlock_bh(&hci_dev_list_lock);
984
985         return -ENOMEM;
986 }
987 EXPORT_SYMBOL(hci_register_dev);
988
989 /* Unregister HCI device */
990 int hci_unregister_dev(struct hci_dev *hdev)
991 {
992         int i;
993
994         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
995
996         write_lock_bh(&hci_dev_list_lock);
997         list_del(&hdev->list);
998         write_unlock_bh(&hci_dev_list_lock);
999
1000         hci_dev_do_close(hdev);
1001
1002         for (i = 0; i < NUM_REASSEMBLY; i++)
1003                 kfree_skb(hdev->reassembly[i]);
1004
1005         mgmt_index_removed(hdev->id);
1006         hci_notify(hdev, HCI_DEV_UNREG);
1007
1008         if (hdev->rfkill) {
1009                 rfkill_unregister(hdev->rfkill);
1010                 rfkill_destroy(hdev->rfkill);
1011         }
1012
1013         hci_unregister_sysfs(hdev);
1014
1015         destroy_workqueue(hdev->workqueue);
1016
1017         hci_dev_lock_bh(hdev);
1018         hci_blacklist_clear(hdev);
1019         hci_dev_unlock_bh(hdev);
1020
1021         __hci_dev_put(hdev);
1022
1023         return 0;
1024 }
1025 EXPORT_SYMBOL(hci_unregister_dev);
1026
1027 /* Suspend HCI device */
1028 int hci_suspend_dev(struct hci_dev *hdev)
1029 {
1030         hci_notify(hdev, HCI_DEV_SUSPEND);
1031         return 0;
1032 }
1033 EXPORT_SYMBOL(hci_suspend_dev);
1034
1035 /* Resume HCI device */
1036 int hci_resume_dev(struct hci_dev *hdev)
1037 {
1038         hci_notify(hdev, HCI_DEV_RESUME);
1039         return 0;
1040 }
1041 EXPORT_SYMBOL(hci_resume_dev);
1042
1043 /* Receive frame from HCI drivers */
1044 int hci_recv_frame(struct sk_buff *skb)
1045 {
1046         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1047         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1048                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1049                 kfree_skb(skb);
1050                 return -ENXIO;
1051         }
1052
1053         /* Incomming skb */
1054         bt_cb(skb)->incoming = 1;
1055
1056         /* Time stamp */
1057         __net_timestamp(skb);
1058
1059         /* Queue frame for rx task */
1060         skb_queue_tail(&hdev->rx_q, skb);
1061         tasklet_schedule(&hdev->rx_task);
1062
1063         return 0;
1064 }
1065 EXPORT_SYMBOL(hci_recv_frame);
1066
1067 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1068                           int count, __u8 index, gfp_t gfp_mask)
1069 {
1070         int len = 0;
1071         int hlen = 0;
1072         int remain = count;
1073         struct sk_buff *skb;
1074         struct bt_skb_cb *scb;
1075
1076         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1077                                 index >= NUM_REASSEMBLY)
1078                 return -EILSEQ;
1079
1080         skb = hdev->reassembly[index];
1081
1082         if (!skb) {
1083                 switch (type) {
1084                 case HCI_ACLDATA_PKT:
1085                         len = HCI_MAX_FRAME_SIZE;
1086                         hlen = HCI_ACL_HDR_SIZE;
1087                         break;
1088                 case HCI_EVENT_PKT:
1089                         len = HCI_MAX_EVENT_SIZE;
1090                         hlen = HCI_EVENT_HDR_SIZE;
1091                         break;
1092                 case HCI_SCODATA_PKT:
1093                         len = HCI_MAX_SCO_SIZE;
1094                         hlen = HCI_SCO_HDR_SIZE;
1095                         break;
1096                 }
1097
1098                 skb = bt_skb_alloc(len, gfp_mask);
1099                 if (!skb)
1100                         return -ENOMEM;
1101
1102                 scb = (void *) skb->cb;
1103                 scb->expect = hlen;
1104                 scb->pkt_type = type;
1105
1106                 skb->dev = (void *) hdev;
1107                 hdev->reassembly[index] = skb;
1108         }
1109
1110         while (count) {
1111                 scb = (void *) skb->cb;
1112                 len = min(scb->expect, (__u16)count);
1113
1114                 memcpy(skb_put(skb, len), data, len);
1115
1116                 count -= len;
1117                 data += len;
1118                 scb->expect -= len;
1119                 remain = count;
1120
1121                 switch (type) {
1122                 case HCI_EVENT_PKT:
1123                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1124                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1125                                 scb->expect = h->plen;
1126
1127                                 if (skb_tailroom(skb) < scb->expect) {
1128                                         kfree_skb(skb);
1129                                         hdev->reassembly[index] = NULL;
1130                                         return -ENOMEM;
1131                                 }
1132                         }
1133                         break;
1134
1135                 case HCI_ACLDATA_PKT:
1136                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1137                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1138                                 scb->expect = __le16_to_cpu(h->dlen);
1139
1140                                 if (skb_tailroom(skb) < scb->expect) {
1141                                         kfree_skb(skb);
1142                                         hdev->reassembly[index] = NULL;
1143                                         return -ENOMEM;
1144                                 }
1145                         }
1146                         break;
1147
1148                 case HCI_SCODATA_PKT:
1149                         if (skb->len == HCI_SCO_HDR_SIZE) {
1150                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1151                                 scb->expect = h->dlen;
1152
1153                                 if (skb_tailroom(skb) < scb->expect) {
1154                                         kfree_skb(skb);
1155                                         hdev->reassembly[index] = NULL;
1156                                         return -ENOMEM;
1157                                 }
1158                         }
1159                         break;
1160                 }
1161
1162                 if (scb->expect == 0) {
1163                         /* Complete frame */
1164
1165                         bt_cb(skb)->pkt_type = type;
1166                         hci_recv_frame(skb);
1167
1168                         hdev->reassembly[index] = NULL;
1169                         return remain;
1170                 }
1171         }
1172
1173         return remain;
1174 }
1175
1176 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1177 {
1178         int rem = 0;
1179
1180         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1181                 return -EILSEQ;
1182
1183         while (count) {
1184                 rem = hci_reassembly(hdev, type, data, count,
1185                                                 type - 1, GFP_ATOMIC);
1186                 if (rem < 0)
1187                         return rem;
1188
1189                 data += (count - rem);
1190                 count = rem;
1191         };
1192
1193         return rem;
1194 }
1195 EXPORT_SYMBOL(hci_recv_fragment);
1196
1197 #define STREAM_REASSEMBLY 0
1198
1199 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1200 {
1201         int type;
1202         int rem = 0;
1203
1204         while (count) {
1205                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1206
1207                 if (!skb) {
1208                         struct { char type; } *pkt;
1209
1210                         /* Start of the frame */
1211                         pkt = data;
1212                         type = pkt->type;
1213
1214                         data++;
1215                         count--;
1216                 } else
1217                         type = bt_cb(skb)->pkt_type;
1218
1219                 rem = hci_reassembly(hdev, type, data,
1220                                         count, STREAM_REASSEMBLY, GFP_ATOMIC);
1221                 if (rem < 0)
1222                         return rem;
1223
1224                 data += (count - rem);
1225                 count = rem;
1226         };
1227
1228         return rem;
1229 }
1230 EXPORT_SYMBOL(hci_recv_stream_fragment);
1231
1232 /* ---- Interface to upper protocols ---- */
1233
1234 /* Register/Unregister protocols.
1235  * hci_task_lock is used to ensure that no tasks are running. */
1236 int hci_register_proto(struct hci_proto *hp)
1237 {
1238         int err = 0;
1239
1240         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1241
1242         if (hp->id >= HCI_MAX_PROTO)
1243                 return -EINVAL;
1244
1245         write_lock_bh(&hci_task_lock);
1246
1247         if (!hci_proto[hp->id])
1248                 hci_proto[hp->id] = hp;
1249         else
1250                 err = -EEXIST;
1251
1252         write_unlock_bh(&hci_task_lock);
1253
1254         return err;
1255 }
1256 EXPORT_SYMBOL(hci_register_proto);
1257
1258 int hci_unregister_proto(struct hci_proto *hp)
1259 {
1260         int err = 0;
1261
1262         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1263
1264         if (hp->id >= HCI_MAX_PROTO)
1265                 return -EINVAL;
1266
1267         write_lock_bh(&hci_task_lock);
1268
1269         if (hci_proto[hp->id])
1270                 hci_proto[hp->id] = NULL;
1271         else
1272                 err = -ENOENT;
1273
1274         write_unlock_bh(&hci_task_lock);
1275
1276         return err;
1277 }
1278 EXPORT_SYMBOL(hci_unregister_proto);
1279
1280 int hci_register_cb(struct hci_cb *cb)
1281 {
1282         BT_DBG("%p name %s", cb, cb->name);
1283
1284         write_lock_bh(&hci_cb_list_lock);
1285         list_add(&cb->list, &hci_cb_list);
1286         write_unlock_bh(&hci_cb_list_lock);
1287
1288         return 0;
1289 }
1290 EXPORT_SYMBOL(hci_register_cb);
1291
1292 int hci_unregister_cb(struct hci_cb *cb)
1293 {
1294         BT_DBG("%p name %s", cb, cb->name);
1295
1296         write_lock_bh(&hci_cb_list_lock);
1297         list_del(&cb->list);
1298         write_unlock_bh(&hci_cb_list_lock);
1299
1300         return 0;
1301 }
1302 EXPORT_SYMBOL(hci_unregister_cb);
1303
1304 static int hci_send_frame(struct sk_buff *skb)
1305 {
1306         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1307
1308         if (!hdev) {
1309                 kfree_skb(skb);
1310                 return -ENODEV;
1311         }
1312
1313         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1314
1315         if (atomic_read(&hdev->promisc)) {
1316                 /* Time stamp */
1317                 __net_timestamp(skb);
1318
1319                 hci_send_to_sock(hdev, skb);
1320         }
1321
1322         /* Get rid of skb owner, prior to sending to the driver. */
1323         skb_orphan(skb);
1324
1325         return hdev->send(skb);
1326 }
1327
1328 /* Send HCI command */
1329 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1330 {
1331         int len = HCI_COMMAND_HDR_SIZE + plen;
1332         struct hci_command_hdr *hdr;
1333         struct sk_buff *skb;
1334
1335         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1336
1337         skb = bt_skb_alloc(len, GFP_ATOMIC);
1338         if (!skb) {
1339                 BT_ERR("%s no memory for command", hdev->name);
1340                 return -ENOMEM;
1341         }
1342
1343         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1344         hdr->opcode = cpu_to_le16(opcode);
1345         hdr->plen   = plen;
1346
1347         if (plen)
1348                 memcpy(skb_put(skb, plen), param, plen);
1349
1350         BT_DBG("skb len %d", skb->len);
1351
1352         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1353         skb->dev = (void *) hdev;
1354
1355         skb_queue_tail(&hdev->cmd_q, skb);
1356         tasklet_schedule(&hdev->cmd_task);
1357
1358         return 0;
1359 }
1360
1361 /* Get data from the previously sent command */
1362 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1363 {
1364         struct hci_command_hdr *hdr;
1365
1366         if (!hdev->sent_cmd)
1367                 return NULL;
1368
1369         hdr = (void *) hdev->sent_cmd->data;
1370
1371         if (hdr->opcode != cpu_to_le16(opcode))
1372                 return NULL;
1373
1374         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1375
1376         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1377 }
1378
1379 /* Send ACL data */
1380 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1381 {
1382         struct hci_acl_hdr *hdr;
1383         int len = skb->len;
1384
1385         skb_push(skb, HCI_ACL_HDR_SIZE);
1386         skb_reset_transport_header(skb);
1387         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1388         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1389         hdr->dlen   = cpu_to_le16(len);
1390 }
1391
1392 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1393 {
1394         struct hci_dev *hdev = conn->hdev;
1395         struct sk_buff *list;
1396
1397         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1398
1399         skb->dev = (void *) hdev;
1400         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1401         hci_add_acl_hdr(skb, conn->handle, flags | ACL_START);
1402
1403         list = skb_shinfo(skb)->frag_list;
1404         if (!list) {
1405                 /* Non fragmented */
1406                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1407
1408                 skb_queue_tail(&conn->data_q, skb);
1409         } else {
1410                 /* Fragmented */
1411                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1412
1413                 skb_shinfo(skb)->frag_list = NULL;
1414
1415                 /* Queue all fragments atomically */
1416                 spin_lock_bh(&conn->data_q.lock);
1417
1418                 __skb_queue_tail(&conn->data_q, skb);
1419                 do {
1420                         skb = list; list = list->next;
1421
1422                         skb->dev = (void *) hdev;
1423                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1424                         hci_add_acl_hdr(skb, conn->handle, flags | ACL_CONT);
1425
1426                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1427
1428                         __skb_queue_tail(&conn->data_q, skb);
1429                 } while (list);
1430
1431                 spin_unlock_bh(&conn->data_q.lock);
1432         }
1433
1434         tasklet_schedule(&hdev->tx_task);
1435 }
1436 EXPORT_SYMBOL(hci_send_acl);
1437
1438 /* Send SCO data */
1439 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1440 {
1441         struct hci_dev *hdev = conn->hdev;
1442         struct hci_sco_hdr hdr;
1443
1444         BT_DBG("%s len %d", hdev->name, skb->len);
1445
1446         hdr.handle = cpu_to_le16(conn->handle);
1447         hdr.dlen   = skb->len;
1448
1449         skb_push(skb, HCI_SCO_HDR_SIZE);
1450         skb_reset_transport_header(skb);
1451         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1452
1453         skb->dev = (void *) hdev;
1454         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1455
1456         skb_queue_tail(&conn->data_q, skb);
1457         tasklet_schedule(&hdev->tx_task);
1458 }
1459 EXPORT_SYMBOL(hci_send_sco);
1460
1461 /* ---- HCI TX task (outgoing data) ---- */
1462
1463 /* HCI Connection scheduler */
1464 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1465 {
1466         struct hci_conn_hash *h = &hdev->conn_hash;
1467         struct hci_conn *conn = NULL;
1468         int num = 0, min = ~0;
1469         struct list_head *p;
1470
1471         /* We don't have to lock device here. Connections are always
1472          * added and removed with TX task disabled. */
1473         list_for_each(p, &h->list) {
1474                 struct hci_conn *c;
1475                 c = list_entry(p, struct hci_conn, list);
1476
1477                 if (c->type != type || skb_queue_empty(&c->data_q))
1478                         continue;
1479
1480                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1481                         continue;
1482
1483                 num++;
1484
1485                 if (c->sent < min) {
1486                         min  = c->sent;
1487                         conn = c;
1488                 }
1489         }
1490
1491         if (conn) {
1492                 int cnt = (type == ACL_LINK ? hdev->acl_cnt : hdev->sco_cnt);
1493                 int q = cnt / num;
1494                 *quote = q ? q : 1;
1495         } else
1496                 *quote = 0;
1497
1498         BT_DBG("conn %p quote %d", conn, *quote);
1499         return conn;
1500 }
1501
1502 static inline void hci_acl_tx_to(struct hci_dev *hdev)
1503 {
1504         struct hci_conn_hash *h = &hdev->conn_hash;
1505         struct list_head *p;
1506         struct hci_conn  *c;
1507
1508         BT_ERR("%s ACL tx timeout", hdev->name);
1509
1510         /* Kill stalled connections */
1511         list_for_each(p, &h->list) {
1512                 c = list_entry(p, struct hci_conn, list);
1513                 if (c->type == ACL_LINK && c->sent) {
1514                         BT_ERR("%s killing stalled ACL connection %s",
1515                                 hdev->name, batostr(&c->dst));
1516                         hci_acl_disconn(c, 0x13);
1517                 }
1518         }
1519 }
1520
1521 static inline void hci_sched_acl(struct hci_dev *hdev)
1522 {
1523         struct hci_conn *conn;
1524         struct sk_buff *skb;
1525         int quote;
1526
1527         BT_DBG("%s", hdev->name);
1528
1529         if (!test_bit(HCI_RAW, &hdev->flags)) {
1530                 /* ACL tx timeout must be longer than maximum
1531                  * link supervision timeout (40.9 seconds) */
1532                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1533                         hci_acl_tx_to(hdev);
1534         }
1535
1536         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1537                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1538                         BT_DBG("skb %p len %d", skb, skb->len);
1539
1540                         hci_conn_enter_active_mode(conn);
1541
1542                         hci_send_frame(skb);
1543                         hdev->acl_last_tx = jiffies;
1544
1545                         hdev->acl_cnt--;
1546                         conn->sent++;
1547                 }
1548         }
1549 }
1550
1551 /* Schedule SCO */
1552 static inline void hci_sched_sco(struct hci_dev *hdev)
1553 {
1554         struct hci_conn *conn;
1555         struct sk_buff *skb;
1556         int quote;
1557
1558         BT_DBG("%s", hdev->name);
1559
1560         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1561                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1562                         BT_DBG("skb %p len %d", skb, skb->len);
1563                         hci_send_frame(skb);
1564
1565                         conn->sent++;
1566                         if (conn->sent == ~0)
1567                                 conn->sent = 0;
1568                 }
1569         }
1570 }
1571
1572 static inline void hci_sched_esco(struct hci_dev *hdev)
1573 {
1574         struct hci_conn *conn;
1575         struct sk_buff *skb;
1576         int quote;
1577
1578         BT_DBG("%s", hdev->name);
1579
1580         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1581                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1582                         BT_DBG("skb %p len %d", skb, skb->len);
1583                         hci_send_frame(skb);
1584
1585                         conn->sent++;
1586                         if (conn->sent == ~0)
1587                                 conn->sent = 0;
1588                 }
1589         }
1590 }
1591
1592 static void hci_tx_task(unsigned long arg)
1593 {
1594         struct hci_dev *hdev = (struct hci_dev *) arg;
1595         struct sk_buff *skb;
1596
1597         read_lock(&hci_task_lock);
1598
1599         BT_DBG("%s acl %d sco %d", hdev->name, hdev->acl_cnt, hdev->sco_cnt);
1600
1601         /* Schedule queues and send stuff to HCI driver */
1602
1603         hci_sched_acl(hdev);
1604
1605         hci_sched_sco(hdev);
1606
1607         hci_sched_esco(hdev);
1608
1609         /* Send next queued raw (unknown type) packet */
1610         while ((skb = skb_dequeue(&hdev->raw_q)))
1611                 hci_send_frame(skb);
1612
1613         read_unlock(&hci_task_lock);
1614 }
1615
1616 /* ----- HCI RX task (incoming data proccessing) ----- */
1617
1618 /* ACL data packet */
1619 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1620 {
1621         struct hci_acl_hdr *hdr = (void *) skb->data;
1622         struct hci_conn *conn;
1623         __u16 handle, flags;
1624
1625         skb_pull(skb, HCI_ACL_HDR_SIZE);
1626
1627         handle = __le16_to_cpu(hdr->handle);
1628         flags  = hci_flags(handle);
1629         handle = hci_handle(handle);
1630
1631         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1632
1633         hdev->stat.acl_rx++;
1634
1635         hci_dev_lock(hdev);
1636         conn = hci_conn_hash_lookup_handle(hdev, handle);
1637         hci_dev_unlock(hdev);
1638
1639         if (conn) {
1640                 register struct hci_proto *hp;
1641
1642                 hci_conn_enter_active_mode(conn);
1643
1644                 /* Send to upper protocol */
1645                 hp = hci_proto[HCI_PROTO_L2CAP];
1646                 if (hp && hp->recv_acldata) {
1647                         hp->recv_acldata(conn, skb, flags);
1648                         return;
1649                 }
1650         } else {
1651                 BT_ERR("%s ACL packet for unknown connection handle %d",
1652                         hdev->name, handle);
1653         }
1654
1655         kfree_skb(skb);
1656 }
1657
1658 /* SCO data packet */
1659 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1660 {
1661         struct hci_sco_hdr *hdr = (void *) skb->data;
1662         struct hci_conn *conn;
1663         __u16 handle;
1664
1665         skb_pull(skb, HCI_SCO_HDR_SIZE);
1666
1667         handle = __le16_to_cpu(hdr->handle);
1668
1669         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1670
1671         hdev->stat.sco_rx++;
1672
1673         hci_dev_lock(hdev);
1674         conn = hci_conn_hash_lookup_handle(hdev, handle);
1675         hci_dev_unlock(hdev);
1676
1677         if (conn) {
1678                 register struct hci_proto *hp;
1679
1680                 /* Send to upper protocol */
1681                 hp = hci_proto[HCI_PROTO_SCO];
1682                 if (hp && hp->recv_scodata) {
1683                         hp->recv_scodata(conn, skb);
1684                         return;
1685                 }
1686         } else {
1687                 BT_ERR("%s SCO packet for unknown connection handle %d",
1688                         hdev->name, handle);
1689         }
1690
1691         kfree_skb(skb);
1692 }
1693
1694 static void hci_rx_task(unsigned long arg)
1695 {
1696         struct hci_dev *hdev = (struct hci_dev *) arg;
1697         struct sk_buff *skb;
1698
1699         BT_DBG("%s", hdev->name);
1700
1701         read_lock(&hci_task_lock);
1702
1703         while ((skb = skb_dequeue(&hdev->rx_q))) {
1704                 if (atomic_read(&hdev->promisc)) {
1705                         /* Send copy to the sockets */
1706                         hci_send_to_sock(hdev, skb);
1707                 }
1708
1709                 if (test_bit(HCI_RAW, &hdev->flags)) {
1710                         kfree_skb(skb);
1711                         continue;
1712                 }
1713
1714                 if (test_bit(HCI_INIT, &hdev->flags)) {
1715                         /* Don't process data packets in this states. */
1716                         switch (bt_cb(skb)->pkt_type) {
1717                         case HCI_ACLDATA_PKT:
1718                         case HCI_SCODATA_PKT:
1719                                 kfree_skb(skb);
1720                                 continue;
1721                         }
1722                 }
1723
1724                 /* Process frame */
1725                 switch (bt_cb(skb)->pkt_type) {
1726                 case HCI_EVENT_PKT:
1727                         hci_event_packet(hdev, skb);
1728                         break;
1729
1730                 case HCI_ACLDATA_PKT:
1731                         BT_DBG("%s ACL data packet", hdev->name);
1732                         hci_acldata_packet(hdev, skb);
1733                         break;
1734
1735                 case HCI_SCODATA_PKT:
1736                         BT_DBG("%s SCO data packet", hdev->name);
1737                         hci_scodata_packet(hdev, skb);
1738                         break;
1739
1740                 default:
1741                         kfree_skb(skb);
1742                         break;
1743                 }
1744         }
1745
1746         read_unlock(&hci_task_lock);
1747 }
1748
1749 static void hci_cmd_task(unsigned long arg)
1750 {
1751         struct hci_dev *hdev = (struct hci_dev *) arg;
1752         struct sk_buff *skb;
1753
1754         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
1755
1756         if (!atomic_read(&hdev->cmd_cnt) && time_after(jiffies, hdev->cmd_last_tx + HZ)) {
1757                 BT_ERR("%s command tx timeout", hdev->name);
1758                 atomic_set(&hdev->cmd_cnt, 1);
1759         }
1760
1761         /* Send queued commands */
1762         if (atomic_read(&hdev->cmd_cnt) && (skb = skb_dequeue(&hdev->cmd_q))) {
1763                 kfree_skb(hdev->sent_cmd);
1764
1765                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
1766                 if (hdev->sent_cmd) {
1767                         atomic_dec(&hdev->cmd_cnt);
1768                         hci_send_frame(skb);
1769                         hdev->cmd_last_tx = jiffies;
1770                 } else {
1771                         skb_queue_head(&hdev->cmd_q, skb);
1772                         tasklet_schedule(&hdev->cmd_task);
1773                 }
1774         }
1775 }