Bluetooth: remove unnecessary function declaration
[linux-flexiantxendom0.git] / net / bluetooth / hci_core.c
1 /*
2    BlueZ - Bluetooth protocol stack for Linux
3    Copyright (C) 2000-2001 Qualcomm Incorporated
4
5    Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com>
6
7    This program is free software; you can redistribute it and/or modify
8    it under the terms of the GNU General Public License version 2 as
9    published by the Free Software Foundation;
10
11    THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
12    OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
13    FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT OF THIRD PARTY RIGHTS.
14    IN NO EVENT SHALL THE COPYRIGHT HOLDER(S) AND AUTHOR(S) BE LIABLE FOR ANY
15    CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES
16    WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
17    ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
18    OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
19
20    ALL LIABILITY, INCLUDING LIABILITY FOR INFRINGEMENT OF ANY PATENTS,
21    COPYRIGHTS, TRADEMARKS OR OTHER RIGHTS, RELATING TO USE OF THIS
22    SOFTWARE IS DISCLAIMED.
23 */
24
25 /* Bluetooth HCI core. */
26
27 #include <linux/jiffies.h>
28 #include <linux/module.h>
29 #include <linux/kmod.h>
30
31 #include <linux/types.h>
32 #include <linux/errno.h>
33 #include <linux/kernel.h>
34 #include <linux/sched.h>
35 #include <linux/slab.h>
36 #include <linux/poll.h>
37 #include <linux/fcntl.h>
38 #include <linux/init.h>
39 #include <linux/skbuff.h>
40 #include <linux/workqueue.h>
41 #include <linux/interrupt.h>
42 #include <linux/notifier.h>
43 #include <linux/rfkill.h>
44 #include <linux/timer.h>
45 #include <net/sock.h>
46
47 #include <asm/system.h>
48 #include <linux/uaccess.h>
49 #include <asm/unaligned.h>
50
51 #include <net/bluetooth/bluetooth.h>
52 #include <net/bluetooth/hci_core.h>
53
54 #define AUTO_OFF_TIMEOUT 2000
55
56 static void hci_cmd_task(unsigned long arg);
57 static void hci_rx_task(unsigned long arg);
58 static void hci_tx_task(unsigned long arg);
59
60 static DEFINE_RWLOCK(hci_task_lock);
61
62 /* HCI device list */
63 LIST_HEAD(hci_dev_list);
64 DEFINE_RWLOCK(hci_dev_list_lock);
65
66 /* HCI callback list */
67 LIST_HEAD(hci_cb_list);
68 DEFINE_RWLOCK(hci_cb_list_lock);
69
70 /* HCI protocols */
71 #define HCI_MAX_PROTO   2
72 struct hci_proto *hci_proto[HCI_MAX_PROTO];
73
74 /* HCI notifiers list */
75 static ATOMIC_NOTIFIER_HEAD(hci_notifier);
76
77 /* ---- HCI notifications ---- */
78
79 int hci_register_notifier(struct notifier_block *nb)
80 {
81         return atomic_notifier_chain_register(&hci_notifier, nb);
82 }
83
84 int hci_unregister_notifier(struct notifier_block *nb)
85 {
86         return atomic_notifier_chain_unregister(&hci_notifier, nb);
87 }
88
89 static void hci_notify(struct hci_dev *hdev, int event)
90 {
91         atomic_notifier_call_chain(&hci_notifier, event, hdev);
92 }
93
94 /* ---- HCI requests ---- */
95
96 void hci_req_complete(struct hci_dev *hdev, __u16 cmd, int result)
97 {
98         BT_DBG("%s command 0x%04x result 0x%2.2x", hdev->name, cmd, result);
99
100         /* If this is the init phase check if the completed command matches
101          * the last init command, and if not just return.
102          */
103         if (test_bit(HCI_INIT, &hdev->flags) && hdev->init_last_cmd != cmd)
104                 return;
105
106         if (hdev->req_status == HCI_REQ_PEND) {
107                 hdev->req_result = result;
108                 hdev->req_status = HCI_REQ_DONE;
109                 wake_up_interruptible(&hdev->req_wait_q);
110         }
111 }
112
113 static void hci_req_cancel(struct hci_dev *hdev, int err)
114 {
115         BT_DBG("%s err 0x%2.2x", hdev->name, err);
116
117         if (hdev->req_status == HCI_REQ_PEND) {
118                 hdev->req_result = err;
119                 hdev->req_status = HCI_REQ_CANCELED;
120                 wake_up_interruptible(&hdev->req_wait_q);
121         }
122 }
123
124 /* Execute request and wait for completion. */
125 static int __hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
126                                         unsigned long opt, __u32 timeout)
127 {
128         DECLARE_WAITQUEUE(wait, current);
129         int err = 0;
130
131         BT_DBG("%s start", hdev->name);
132
133         hdev->req_status = HCI_REQ_PEND;
134
135         add_wait_queue(&hdev->req_wait_q, &wait);
136         set_current_state(TASK_INTERRUPTIBLE);
137
138         req(hdev, opt);
139         schedule_timeout(timeout);
140
141         remove_wait_queue(&hdev->req_wait_q, &wait);
142
143         if (signal_pending(current))
144                 return -EINTR;
145
146         switch (hdev->req_status) {
147         case HCI_REQ_DONE:
148                 err = -bt_err(hdev->req_result);
149                 break;
150
151         case HCI_REQ_CANCELED:
152                 err = -hdev->req_result;
153                 break;
154
155         default:
156                 err = -ETIMEDOUT;
157                 break;
158         }
159
160         hdev->req_status = hdev->req_result = 0;
161
162         BT_DBG("%s end: err %d", hdev->name, err);
163
164         return err;
165 }
166
167 static inline int hci_request(struct hci_dev *hdev, void (*req)(struct hci_dev *hdev, unsigned long opt),
168                                         unsigned long opt, __u32 timeout)
169 {
170         int ret;
171
172         if (!test_bit(HCI_UP, &hdev->flags))
173                 return -ENETDOWN;
174
175         /* Serialize all requests */
176         hci_req_lock(hdev);
177         ret = __hci_request(hdev, req, opt, timeout);
178         hci_req_unlock(hdev);
179
180         return ret;
181 }
182
183 static void hci_reset_req(struct hci_dev *hdev, unsigned long opt)
184 {
185         BT_DBG("%s %ld", hdev->name, opt);
186
187         /* Reset device */
188         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
189 }
190
191 static void hci_init_req(struct hci_dev *hdev, unsigned long opt)
192 {
193         struct hci_cp_delete_stored_link_key cp;
194         struct sk_buff *skb;
195         __le16 param;
196         __u8 flt_type;
197
198         BT_DBG("%s %ld", hdev->name, opt);
199
200         /* Driver initialization */
201
202         /* Special commands */
203         while ((skb = skb_dequeue(&hdev->driver_init))) {
204                 bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
205                 skb->dev = (void *) hdev;
206
207                 skb_queue_tail(&hdev->cmd_q, skb);
208                 tasklet_schedule(&hdev->cmd_task);
209         }
210         skb_queue_purge(&hdev->driver_init);
211
212         /* Mandatory initialization */
213
214         /* Reset */
215         if (!test_bit(HCI_QUIRK_NO_RESET, &hdev->quirks))
216                         hci_send_cmd(hdev, HCI_OP_RESET, 0, NULL);
217
218         /* Read Local Supported Features */
219         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_FEATURES, 0, NULL);
220
221         /* Read Local Version */
222         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_VERSION, 0, NULL);
223
224         /* Read Buffer Size (ACL mtu, max pkt, etc.) */
225         hci_send_cmd(hdev, HCI_OP_READ_BUFFER_SIZE, 0, NULL);
226
227 #if 0
228         /* Host buffer size */
229         {
230                 struct hci_cp_host_buffer_size cp;
231                 cp.acl_mtu = cpu_to_le16(HCI_MAX_ACL_SIZE);
232                 cp.sco_mtu = HCI_MAX_SCO_SIZE;
233                 cp.acl_max_pkt = cpu_to_le16(0xffff);
234                 cp.sco_max_pkt = cpu_to_le16(0xffff);
235                 hci_send_cmd(hdev, HCI_OP_HOST_BUFFER_SIZE, sizeof(cp), &cp);
236         }
237 #endif
238
239         /* Read BD Address */
240         hci_send_cmd(hdev, HCI_OP_READ_BD_ADDR, 0, NULL);
241
242         /* Read Class of Device */
243         hci_send_cmd(hdev, HCI_OP_READ_CLASS_OF_DEV, 0, NULL);
244
245         /* Read Local Name */
246         hci_send_cmd(hdev, HCI_OP_READ_LOCAL_NAME, 0, NULL);
247
248         /* Read Voice Setting */
249         hci_send_cmd(hdev, HCI_OP_READ_VOICE_SETTING, 0, NULL);
250
251         /* Optional initialization */
252
253         /* Clear Event Filters */
254         flt_type = HCI_FLT_CLEAR_ALL;
255         hci_send_cmd(hdev, HCI_OP_SET_EVENT_FLT, 1, &flt_type);
256
257         /* Connection accept timeout ~20 secs */
258         param = cpu_to_le16(0x7d00);
259         hci_send_cmd(hdev, HCI_OP_WRITE_CA_TIMEOUT, 2, &param);
260
261         bacpy(&cp.bdaddr, BDADDR_ANY);
262         cp.delete_all = 1;
263         hci_send_cmd(hdev, HCI_OP_DELETE_STORED_LINK_KEY, sizeof(cp), &cp);
264 }
265
266 static void hci_le_init_req(struct hci_dev *hdev, unsigned long opt)
267 {
268         BT_DBG("%s", hdev->name);
269
270         /* Read LE buffer size */
271         hci_send_cmd(hdev, HCI_OP_LE_READ_BUFFER_SIZE, 0, NULL);
272 }
273
274 static void hci_scan_req(struct hci_dev *hdev, unsigned long opt)
275 {
276         __u8 scan = opt;
277
278         BT_DBG("%s %x", hdev->name, scan);
279
280         /* Inquiry and Page scans */
281         hci_send_cmd(hdev, HCI_OP_WRITE_SCAN_ENABLE, 1, &scan);
282 }
283
284 static void hci_auth_req(struct hci_dev *hdev, unsigned long opt)
285 {
286         __u8 auth = opt;
287
288         BT_DBG("%s %x", hdev->name, auth);
289
290         /* Authentication */
291         hci_send_cmd(hdev, HCI_OP_WRITE_AUTH_ENABLE, 1, &auth);
292 }
293
294 static void hci_encrypt_req(struct hci_dev *hdev, unsigned long opt)
295 {
296         __u8 encrypt = opt;
297
298         BT_DBG("%s %x", hdev->name, encrypt);
299
300         /* Encryption */
301         hci_send_cmd(hdev, HCI_OP_WRITE_ENCRYPT_MODE, 1, &encrypt);
302 }
303
304 static void hci_linkpol_req(struct hci_dev *hdev, unsigned long opt)
305 {
306         __le16 policy = cpu_to_le16(opt);
307
308         BT_DBG("%s %x", hdev->name, policy);
309
310         /* Default link policy */
311         hci_send_cmd(hdev, HCI_OP_WRITE_DEF_LINK_POLICY, 2, &policy);
312 }
313
314 /* Get HCI device by index.
315  * Device is held on return. */
316 struct hci_dev *hci_dev_get(int index)
317 {
318         struct hci_dev *hdev = NULL;
319         struct list_head *p;
320
321         BT_DBG("%d", index);
322
323         if (index < 0)
324                 return NULL;
325
326         read_lock(&hci_dev_list_lock);
327         list_for_each(p, &hci_dev_list) {
328                 struct hci_dev *d = list_entry(p, struct hci_dev, list);
329                 if (d->id == index) {
330                         hdev = hci_dev_hold(d);
331                         break;
332                 }
333         }
334         read_unlock(&hci_dev_list_lock);
335         return hdev;
336 }
337
338 /* ---- Inquiry support ---- */
339 static void inquiry_cache_flush(struct hci_dev *hdev)
340 {
341         struct inquiry_cache *cache = &hdev->inq_cache;
342         struct inquiry_entry *next  = cache->list, *e;
343
344         BT_DBG("cache %p", cache);
345
346         cache->list = NULL;
347         while ((e = next)) {
348                 next = e->next;
349                 kfree(e);
350         }
351 }
352
353 struct inquiry_entry *hci_inquiry_cache_lookup(struct hci_dev *hdev, bdaddr_t *bdaddr)
354 {
355         struct inquiry_cache *cache = &hdev->inq_cache;
356         struct inquiry_entry *e;
357
358         BT_DBG("cache %p, %s", cache, batostr(bdaddr));
359
360         for (e = cache->list; e; e = e->next)
361                 if (!bacmp(&e->data.bdaddr, bdaddr))
362                         break;
363         return e;
364 }
365
366 void hci_inquiry_cache_update(struct hci_dev *hdev, struct inquiry_data *data)
367 {
368         struct inquiry_cache *cache = &hdev->inq_cache;
369         struct inquiry_entry *ie;
370
371         BT_DBG("cache %p, %s", cache, batostr(&data->bdaddr));
372
373         ie = hci_inquiry_cache_lookup(hdev, &data->bdaddr);
374         if (!ie) {
375                 /* Entry not in the cache. Add new one. */
376                 ie = kzalloc(sizeof(struct inquiry_entry), GFP_ATOMIC);
377                 if (!ie)
378                         return;
379
380                 ie->next = cache->list;
381                 cache->list = ie;
382         }
383
384         memcpy(&ie->data, data, sizeof(*data));
385         ie->timestamp = jiffies;
386         cache->timestamp = jiffies;
387 }
388
389 static int inquiry_cache_dump(struct hci_dev *hdev, int num, __u8 *buf)
390 {
391         struct inquiry_cache *cache = &hdev->inq_cache;
392         struct inquiry_info *info = (struct inquiry_info *) buf;
393         struct inquiry_entry *e;
394         int copied = 0;
395
396         for (e = cache->list; e && copied < num; e = e->next, copied++) {
397                 struct inquiry_data *data = &e->data;
398                 bacpy(&info->bdaddr, &data->bdaddr);
399                 info->pscan_rep_mode    = data->pscan_rep_mode;
400                 info->pscan_period_mode = data->pscan_period_mode;
401                 info->pscan_mode        = data->pscan_mode;
402                 memcpy(info->dev_class, data->dev_class, 3);
403                 info->clock_offset      = data->clock_offset;
404                 info++;
405         }
406
407         BT_DBG("cache %p, copied %d", cache, copied);
408         return copied;
409 }
410
411 static void hci_inq_req(struct hci_dev *hdev, unsigned long opt)
412 {
413         struct hci_inquiry_req *ir = (struct hci_inquiry_req *) opt;
414         struct hci_cp_inquiry cp;
415
416         BT_DBG("%s", hdev->name);
417
418         if (test_bit(HCI_INQUIRY, &hdev->flags))
419                 return;
420
421         /* Start Inquiry */
422         memcpy(&cp.lap, &ir->lap, 3);
423         cp.length  = ir->length;
424         cp.num_rsp = ir->num_rsp;
425         hci_send_cmd(hdev, HCI_OP_INQUIRY, sizeof(cp), &cp);
426 }
427
428 int hci_inquiry(void __user *arg)
429 {
430         __u8 __user *ptr = arg;
431         struct hci_inquiry_req ir;
432         struct hci_dev *hdev;
433         int err = 0, do_inquiry = 0, max_rsp;
434         long timeo;
435         __u8 *buf;
436
437         if (copy_from_user(&ir, ptr, sizeof(ir)))
438                 return -EFAULT;
439
440         hdev = hci_dev_get(ir.dev_id);
441         if (!hdev)
442                 return -ENODEV;
443
444         hci_dev_lock_bh(hdev);
445         if (inquiry_cache_age(hdev) > INQUIRY_CACHE_AGE_MAX ||
446                                 inquiry_cache_empty(hdev) ||
447                                 ir.flags & IREQ_CACHE_FLUSH) {
448                 inquiry_cache_flush(hdev);
449                 do_inquiry = 1;
450         }
451         hci_dev_unlock_bh(hdev);
452
453         timeo = ir.length * msecs_to_jiffies(2000);
454
455         if (do_inquiry) {
456                 err = hci_request(hdev, hci_inq_req, (unsigned long)&ir, timeo);
457                 if (err < 0)
458                         goto done;
459         }
460
461         /* for unlimited number of responses we will use buffer with 255 entries */
462         max_rsp = (ir.num_rsp == 0) ? 255 : ir.num_rsp;
463
464         /* cache_dump can't sleep. Therefore we allocate temp buffer and then
465          * copy it to the user space.
466          */
467         buf = kmalloc(sizeof(struct inquiry_info) * max_rsp, GFP_KERNEL);
468         if (!buf) {
469                 err = -ENOMEM;
470                 goto done;
471         }
472
473         hci_dev_lock_bh(hdev);
474         ir.num_rsp = inquiry_cache_dump(hdev, max_rsp, buf);
475         hci_dev_unlock_bh(hdev);
476
477         BT_DBG("num_rsp %d", ir.num_rsp);
478
479         if (!copy_to_user(ptr, &ir, sizeof(ir))) {
480                 ptr += sizeof(ir);
481                 if (copy_to_user(ptr, buf, sizeof(struct inquiry_info) *
482                                         ir.num_rsp))
483                         err = -EFAULT;
484         } else
485                 err = -EFAULT;
486
487         kfree(buf);
488
489 done:
490         hci_dev_put(hdev);
491         return err;
492 }
493
494 /* ---- HCI ioctl helpers ---- */
495
496 int hci_dev_open(__u16 dev)
497 {
498         struct hci_dev *hdev;
499         int ret = 0;
500
501         hdev = hci_dev_get(dev);
502         if (!hdev)
503                 return -ENODEV;
504
505         BT_DBG("%s %p", hdev->name, hdev);
506
507         hci_req_lock(hdev);
508
509         if (hdev->rfkill && rfkill_blocked(hdev->rfkill)) {
510                 ret = -ERFKILL;
511                 goto done;
512         }
513
514         if (test_bit(HCI_UP, &hdev->flags)) {
515                 ret = -EALREADY;
516                 goto done;
517         }
518
519         if (test_bit(HCI_QUIRK_RAW_DEVICE, &hdev->quirks))
520                 set_bit(HCI_RAW, &hdev->flags);
521
522         /* Treat all non BR/EDR controllers as raw devices for now */
523         if (hdev->dev_type != HCI_BREDR)
524                 set_bit(HCI_RAW, &hdev->flags);
525
526         if (hdev->open(hdev)) {
527                 ret = -EIO;
528                 goto done;
529         }
530
531         if (!test_bit(HCI_RAW, &hdev->flags)) {
532                 atomic_set(&hdev->cmd_cnt, 1);
533                 set_bit(HCI_INIT, &hdev->flags);
534                 hdev->init_last_cmd = 0;
535
536                 ret = __hci_request(hdev, hci_init_req, 0,
537                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
538
539                 if (lmp_le_capable(hdev))
540                         ret = __hci_request(hdev, hci_le_init_req, 0,
541                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
542
543                 clear_bit(HCI_INIT, &hdev->flags);
544         }
545
546         if (!ret) {
547                 hci_dev_hold(hdev);
548                 set_bit(HCI_UP, &hdev->flags);
549                 hci_notify(hdev, HCI_DEV_UP);
550                 if (!test_bit(HCI_SETUP, &hdev->flags))
551                         mgmt_powered(hdev->id, 1);
552         } else {
553                 /* Init failed, cleanup */
554                 tasklet_kill(&hdev->rx_task);
555                 tasklet_kill(&hdev->tx_task);
556                 tasklet_kill(&hdev->cmd_task);
557
558                 skb_queue_purge(&hdev->cmd_q);
559                 skb_queue_purge(&hdev->rx_q);
560
561                 if (hdev->flush)
562                         hdev->flush(hdev);
563
564                 if (hdev->sent_cmd) {
565                         kfree_skb(hdev->sent_cmd);
566                         hdev->sent_cmd = NULL;
567                 }
568
569                 hdev->close(hdev);
570                 hdev->flags = 0;
571         }
572
573 done:
574         hci_req_unlock(hdev);
575         hci_dev_put(hdev);
576         return ret;
577 }
578
579 static int hci_dev_do_close(struct hci_dev *hdev)
580 {
581         BT_DBG("%s %p", hdev->name, hdev);
582
583         hci_req_cancel(hdev, ENODEV);
584         hci_req_lock(hdev);
585
586         if (!test_and_clear_bit(HCI_UP, &hdev->flags)) {
587                 hci_req_unlock(hdev);
588                 return 0;
589         }
590
591         /* Kill RX and TX tasks */
592         tasklet_kill(&hdev->rx_task);
593         tasklet_kill(&hdev->tx_task);
594
595         hci_dev_lock_bh(hdev);
596         inquiry_cache_flush(hdev);
597         hci_conn_hash_flush(hdev);
598         hci_dev_unlock_bh(hdev);
599
600         hci_notify(hdev, HCI_DEV_DOWN);
601
602         if (hdev->flush)
603                 hdev->flush(hdev);
604
605         /* Reset device */
606         skb_queue_purge(&hdev->cmd_q);
607         atomic_set(&hdev->cmd_cnt, 1);
608         if (!test_bit(HCI_RAW, &hdev->flags)) {
609                 set_bit(HCI_INIT, &hdev->flags);
610                 __hci_request(hdev, hci_reset_req, 0,
611                                         msecs_to_jiffies(250));
612                 clear_bit(HCI_INIT, &hdev->flags);
613         }
614
615         /* Kill cmd task */
616         tasklet_kill(&hdev->cmd_task);
617
618         /* Drop queues */
619         skb_queue_purge(&hdev->rx_q);
620         skb_queue_purge(&hdev->cmd_q);
621         skb_queue_purge(&hdev->raw_q);
622
623         /* Drop last sent command */
624         if (hdev->sent_cmd) {
625                 del_timer_sync(&hdev->cmd_timer);
626                 kfree_skb(hdev->sent_cmd);
627                 hdev->sent_cmd = NULL;
628         }
629
630         /* After this point our queues are empty
631          * and no tasks are scheduled. */
632         hdev->close(hdev);
633
634         mgmt_powered(hdev->id, 0);
635
636         /* Clear flags */
637         hdev->flags = 0;
638
639         hci_req_unlock(hdev);
640
641         hci_dev_put(hdev);
642         return 0;
643 }
644
645 int hci_dev_close(__u16 dev)
646 {
647         struct hci_dev *hdev;
648         int err;
649
650         hdev = hci_dev_get(dev);
651         if (!hdev)
652                 return -ENODEV;
653         err = hci_dev_do_close(hdev);
654         hci_dev_put(hdev);
655         return err;
656 }
657
658 int hci_dev_reset(__u16 dev)
659 {
660         struct hci_dev *hdev;
661         int ret = 0;
662
663         hdev = hci_dev_get(dev);
664         if (!hdev)
665                 return -ENODEV;
666
667         hci_req_lock(hdev);
668         tasklet_disable(&hdev->tx_task);
669
670         if (!test_bit(HCI_UP, &hdev->flags))
671                 goto done;
672
673         /* Drop queues */
674         skb_queue_purge(&hdev->rx_q);
675         skb_queue_purge(&hdev->cmd_q);
676
677         hci_dev_lock_bh(hdev);
678         inquiry_cache_flush(hdev);
679         hci_conn_hash_flush(hdev);
680         hci_dev_unlock_bh(hdev);
681
682         if (hdev->flush)
683                 hdev->flush(hdev);
684
685         atomic_set(&hdev->cmd_cnt, 1);
686         hdev->acl_cnt = 0; hdev->sco_cnt = 0; hdev->le_cnt = 0;
687
688         if (!test_bit(HCI_RAW, &hdev->flags))
689                 ret = __hci_request(hdev, hci_reset_req, 0,
690                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
691
692 done:
693         tasklet_enable(&hdev->tx_task);
694         hci_req_unlock(hdev);
695         hci_dev_put(hdev);
696         return ret;
697 }
698
699 int hci_dev_reset_stat(__u16 dev)
700 {
701         struct hci_dev *hdev;
702         int ret = 0;
703
704         hdev = hci_dev_get(dev);
705         if (!hdev)
706                 return -ENODEV;
707
708         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
709
710         hci_dev_put(hdev);
711
712         return ret;
713 }
714
715 int hci_dev_cmd(unsigned int cmd, void __user *arg)
716 {
717         struct hci_dev *hdev;
718         struct hci_dev_req dr;
719         int err = 0;
720
721         if (copy_from_user(&dr, arg, sizeof(dr)))
722                 return -EFAULT;
723
724         hdev = hci_dev_get(dr.dev_id);
725         if (!hdev)
726                 return -ENODEV;
727
728         switch (cmd) {
729         case HCISETAUTH:
730                 err = hci_request(hdev, hci_auth_req, dr.dev_opt,
731                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
732                 break;
733
734         case HCISETENCRYPT:
735                 if (!lmp_encrypt_capable(hdev)) {
736                         err = -EOPNOTSUPP;
737                         break;
738                 }
739
740                 if (!test_bit(HCI_AUTH, &hdev->flags)) {
741                         /* Auth must be enabled first */
742                         err = hci_request(hdev, hci_auth_req, dr.dev_opt,
743                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
744                         if (err)
745                                 break;
746                 }
747
748                 err = hci_request(hdev, hci_encrypt_req, dr.dev_opt,
749                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
750                 break;
751
752         case HCISETSCAN:
753                 err = hci_request(hdev, hci_scan_req, dr.dev_opt,
754                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
755                 break;
756
757         case HCISETLINKPOL:
758                 err = hci_request(hdev, hci_linkpol_req, dr.dev_opt,
759                                         msecs_to_jiffies(HCI_INIT_TIMEOUT));
760                 break;
761
762         case HCISETLINKMODE:
763                 hdev->link_mode = ((__u16) dr.dev_opt) &
764                                         (HCI_LM_MASTER | HCI_LM_ACCEPT);
765                 break;
766
767         case HCISETPTYPE:
768                 hdev->pkt_type = (__u16) dr.dev_opt;
769                 break;
770
771         case HCISETACLMTU:
772                 hdev->acl_mtu  = *((__u16 *) &dr.dev_opt + 1);
773                 hdev->acl_pkts = *((__u16 *) &dr.dev_opt + 0);
774                 break;
775
776         case HCISETSCOMTU:
777                 hdev->sco_mtu  = *((__u16 *) &dr.dev_opt + 1);
778                 hdev->sco_pkts = *((__u16 *) &dr.dev_opt + 0);
779                 break;
780
781         default:
782                 err = -EINVAL;
783                 break;
784         }
785
786         hci_dev_put(hdev);
787         return err;
788 }
789
790 int hci_get_dev_list(void __user *arg)
791 {
792         struct hci_dev_list_req *dl;
793         struct hci_dev_req *dr;
794         struct list_head *p;
795         int n = 0, size, err;
796         __u16 dev_num;
797
798         if (get_user(dev_num, (__u16 __user *) arg))
799                 return -EFAULT;
800
801         if (!dev_num || dev_num > (PAGE_SIZE * 2) / sizeof(*dr))
802                 return -EINVAL;
803
804         size = sizeof(*dl) + dev_num * sizeof(*dr);
805
806         dl = kzalloc(size, GFP_KERNEL);
807         if (!dl)
808                 return -ENOMEM;
809
810         dr = dl->dev_req;
811
812         read_lock_bh(&hci_dev_list_lock);
813         list_for_each(p, &hci_dev_list) {
814                 struct hci_dev *hdev;
815
816                 hdev = list_entry(p, struct hci_dev, list);
817
818                 hci_del_off_timer(hdev);
819
820                 if (!test_bit(HCI_MGMT, &hdev->flags))
821                         set_bit(HCI_PAIRABLE, &hdev->flags);
822
823                 (dr + n)->dev_id  = hdev->id;
824                 (dr + n)->dev_opt = hdev->flags;
825
826                 if (++n >= dev_num)
827                         break;
828         }
829         read_unlock_bh(&hci_dev_list_lock);
830
831         dl->dev_num = n;
832         size = sizeof(*dl) + n * sizeof(*dr);
833
834         err = copy_to_user(arg, dl, size);
835         kfree(dl);
836
837         return err ? -EFAULT : 0;
838 }
839
840 int hci_get_dev_info(void __user *arg)
841 {
842         struct hci_dev *hdev;
843         struct hci_dev_info di;
844         int err = 0;
845
846         if (copy_from_user(&di, arg, sizeof(di)))
847                 return -EFAULT;
848
849         hdev = hci_dev_get(di.dev_id);
850         if (!hdev)
851                 return -ENODEV;
852
853         hci_del_off_timer(hdev);
854
855         if (!test_bit(HCI_MGMT, &hdev->flags))
856                 set_bit(HCI_PAIRABLE, &hdev->flags);
857
858         strcpy(di.name, hdev->name);
859         di.bdaddr   = hdev->bdaddr;
860         di.type     = (hdev->bus & 0x0f) | (hdev->dev_type << 4);
861         di.flags    = hdev->flags;
862         di.pkt_type = hdev->pkt_type;
863         di.acl_mtu  = hdev->acl_mtu;
864         di.acl_pkts = hdev->acl_pkts;
865         di.sco_mtu  = hdev->sco_mtu;
866         di.sco_pkts = hdev->sco_pkts;
867         di.link_policy = hdev->link_policy;
868         di.link_mode   = hdev->link_mode;
869
870         memcpy(&di.stat, &hdev->stat, sizeof(di.stat));
871         memcpy(&di.features, &hdev->features, sizeof(di.features));
872
873         if (copy_to_user(arg, &di, sizeof(di)))
874                 err = -EFAULT;
875
876         hci_dev_put(hdev);
877
878         return err;
879 }
880
881 /* ---- Interface to HCI drivers ---- */
882
883 static int hci_rfkill_set_block(void *data, bool blocked)
884 {
885         struct hci_dev *hdev = data;
886
887         BT_DBG("%p name %s blocked %d", hdev, hdev->name, blocked);
888
889         if (!blocked)
890                 return 0;
891
892         hci_dev_do_close(hdev);
893
894         return 0;
895 }
896
897 static const struct rfkill_ops hci_rfkill_ops = {
898         .set_block = hci_rfkill_set_block,
899 };
900
901 /* Alloc HCI device */
902 struct hci_dev *hci_alloc_dev(void)
903 {
904         struct hci_dev *hdev;
905
906         hdev = kzalloc(sizeof(struct hci_dev), GFP_KERNEL);
907         if (!hdev)
908                 return NULL;
909
910         skb_queue_head_init(&hdev->driver_init);
911
912         return hdev;
913 }
914 EXPORT_SYMBOL(hci_alloc_dev);
915
916 /* Free HCI device */
917 void hci_free_dev(struct hci_dev *hdev)
918 {
919         skb_queue_purge(&hdev->driver_init);
920
921         /* will free via device release */
922         put_device(&hdev->dev);
923 }
924 EXPORT_SYMBOL(hci_free_dev);
925
926 static void hci_power_on(struct work_struct *work)
927 {
928         struct hci_dev *hdev = container_of(work, struct hci_dev, power_on);
929
930         BT_DBG("%s", hdev->name);
931
932         if (hci_dev_open(hdev->id) < 0)
933                 return;
934
935         if (test_bit(HCI_AUTO_OFF, &hdev->flags))
936                 mod_timer(&hdev->off_timer,
937                                 jiffies + msecs_to_jiffies(AUTO_OFF_TIMEOUT));
938
939         if (test_and_clear_bit(HCI_SETUP, &hdev->flags))
940                 mgmt_index_added(hdev->id);
941 }
942
943 static void hci_power_off(struct work_struct *work)
944 {
945         struct hci_dev *hdev = container_of(work, struct hci_dev, power_off);
946
947         BT_DBG("%s", hdev->name);
948
949         hci_dev_close(hdev->id);
950 }
951
952 static void hci_auto_off(unsigned long data)
953 {
954         struct hci_dev *hdev = (struct hci_dev *) data;
955
956         BT_DBG("%s", hdev->name);
957
958         clear_bit(HCI_AUTO_OFF, &hdev->flags);
959
960         queue_work(hdev->workqueue, &hdev->power_off);
961 }
962
963 void hci_del_off_timer(struct hci_dev *hdev)
964 {
965         BT_DBG("%s", hdev->name);
966
967         clear_bit(HCI_AUTO_OFF, &hdev->flags);
968         del_timer(&hdev->off_timer);
969 }
970
971 int hci_uuids_clear(struct hci_dev *hdev)
972 {
973         struct list_head *p, *n;
974
975         list_for_each_safe(p, n, &hdev->uuids) {
976                 struct bt_uuid *uuid;
977
978                 uuid = list_entry(p, struct bt_uuid, list);
979
980                 list_del(p);
981                 kfree(uuid);
982         }
983
984         return 0;
985 }
986
987 int hci_link_keys_clear(struct hci_dev *hdev)
988 {
989         struct list_head *p, *n;
990
991         list_for_each_safe(p, n, &hdev->link_keys) {
992                 struct link_key *key;
993
994                 key = list_entry(p, struct link_key, list);
995
996                 list_del(p);
997                 kfree(key);
998         }
999
1000         return 0;
1001 }
1002
1003 struct link_key *hci_find_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1004 {
1005         struct list_head *p;
1006
1007         list_for_each(p, &hdev->link_keys) {
1008                 struct link_key *k;
1009
1010                 k = list_entry(p, struct link_key, list);
1011
1012                 if (bacmp(bdaddr, &k->bdaddr) == 0)
1013                         return k;
1014         }
1015
1016         return NULL;
1017 }
1018
1019 int hci_add_link_key(struct hci_dev *hdev, int new_key, bdaddr_t *bdaddr,
1020                                                 u8 *val, u8 type, u8 pin_len)
1021 {
1022         struct link_key *key, *old_key;
1023         u8 old_key_type;
1024
1025         old_key = hci_find_link_key(hdev, bdaddr);
1026         if (old_key) {
1027                 old_key_type = old_key->type;
1028                 key = old_key;
1029         } else {
1030                 old_key_type = 0xff;
1031                 key = kzalloc(sizeof(*key), GFP_ATOMIC);
1032                 if (!key)
1033                         return -ENOMEM;
1034                 list_add(&key->list, &hdev->link_keys);
1035         }
1036
1037         BT_DBG("%s key for %s type %u", hdev->name, batostr(bdaddr), type);
1038
1039         bacpy(&key->bdaddr, bdaddr);
1040         memcpy(key->val, val, 16);
1041         key->type = type;
1042         key->pin_len = pin_len;
1043
1044         if (new_key)
1045                 mgmt_new_key(hdev->id, key, old_key_type);
1046
1047         if (type == 0x06)
1048                 key->type = old_key_type;
1049
1050         return 0;
1051 }
1052
1053 int hci_remove_link_key(struct hci_dev *hdev, bdaddr_t *bdaddr)
1054 {
1055         struct link_key *key;
1056
1057         key = hci_find_link_key(hdev, bdaddr);
1058         if (!key)
1059                 return -ENOENT;
1060
1061         BT_DBG("%s removing %s", hdev->name, batostr(bdaddr));
1062
1063         list_del(&key->list);
1064         kfree(key);
1065
1066         return 0;
1067 }
1068
1069 /* HCI command timer function */
1070 static void hci_cmd_timer(unsigned long arg)
1071 {
1072         struct hci_dev *hdev = (void *) arg;
1073
1074         BT_ERR("%s command tx timeout", hdev->name);
1075         atomic_set(&hdev->cmd_cnt, 1);
1076         tasklet_schedule(&hdev->cmd_task);
1077 }
1078
1079 /* Register HCI device */
1080 int hci_register_dev(struct hci_dev *hdev)
1081 {
1082         struct list_head *head = &hci_dev_list, *p;
1083         int i, id = 0;
1084
1085         BT_DBG("%p name %s bus %d owner %p", hdev, hdev->name,
1086                                                 hdev->bus, hdev->owner);
1087
1088         if (!hdev->open || !hdev->close || !hdev->destruct)
1089                 return -EINVAL;
1090
1091         write_lock_bh(&hci_dev_list_lock);
1092
1093         /* Find first available device id */
1094         list_for_each(p, &hci_dev_list) {
1095                 if (list_entry(p, struct hci_dev, list)->id != id)
1096                         break;
1097                 head = p; id++;
1098         }
1099
1100         sprintf(hdev->name, "hci%d", id);
1101         hdev->id = id;
1102         list_add(&hdev->list, head);
1103
1104         atomic_set(&hdev->refcnt, 1);
1105         spin_lock_init(&hdev->lock);
1106
1107         hdev->flags = 0;
1108         hdev->pkt_type  = (HCI_DM1 | HCI_DH1 | HCI_HV1);
1109         hdev->esco_type = (ESCO_HV1);
1110         hdev->link_mode = (HCI_LM_ACCEPT);
1111         hdev->io_capability = 0x03; /* No Input No Output */
1112
1113         hdev->idle_timeout = 0;
1114         hdev->sniff_max_interval = 800;
1115         hdev->sniff_min_interval = 80;
1116
1117         tasklet_init(&hdev->cmd_task, hci_cmd_task, (unsigned long) hdev);
1118         tasklet_init(&hdev->rx_task, hci_rx_task, (unsigned long) hdev);
1119         tasklet_init(&hdev->tx_task, hci_tx_task, (unsigned long) hdev);
1120
1121         skb_queue_head_init(&hdev->rx_q);
1122         skb_queue_head_init(&hdev->cmd_q);
1123         skb_queue_head_init(&hdev->raw_q);
1124
1125         setup_timer(&hdev->cmd_timer, hci_cmd_timer, (unsigned long) hdev);
1126
1127         for (i = 0; i < NUM_REASSEMBLY; i++)
1128                 hdev->reassembly[i] = NULL;
1129
1130         init_waitqueue_head(&hdev->req_wait_q);
1131         mutex_init(&hdev->req_lock);
1132
1133         inquiry_cache_init(hdev);
1134
1135         hci_conn_hash_init(hdev);
1136
1137         INIT_LIST_HEAD(&hdev->blacklist);
1138
1139         INIT_LIST_HEAD(&hdev->uuids);
1140
1141         INIT_LIST_HEAD(&hdev->link_keys);
1142
1143         INIT_WORK(&hdev->power_on, hci_power_on);
1144         INIT_WORK(&hdev->power_off, hci_power_off);
1145         setup_timer(&hdev->off_timer, hci_auto_off, (unsigned long) hdev);
1146
1147         memset(&hdev->stat, 0, sizeof(struct hci_dev_stats));
1148
1149         atomic_set(&hdev->promisc, 0);
1150
1151         write_unlock_bh(&hci_dev_list_lock);
1152
1153         hdev->workqueue = create_singlethread_workqueue(hdev->name);
1154         if (!hdev->workqueue)
1155                 goto nomem;
1156
1157         hci_register_sysfs(hdev);
1158
1159         hdev->rfkill = rfkill_alloc(hdev->name, &hdev->dev,
1160                                 RFKILL_TYPE_BLUETOOTH, &hci_rfkill_ops, hdev);
1161         if (hdev->rfkill) {
1162                 if (rfkill_register(hdev->rfkill) < 0) {
1163                         rfkill_destroy(hdev->rfkill);
1164                         hdev->rfkill = NULL;
1165                 }
1166         }
1167
1168         set_bit(HCI_AUTO_OFF, &hdev->flags);
1169         set_bit(HCI_SETUP, &hdev->flags);
1170         queue_work(hdev->workqueue, &hdev->power_on);
1171
1172         hci_notify(hdev, HCI_DEV_REG);
1173
1174         return id;
1175
1176 nomem:
1177         write_lock_bh(&hci_dev_list_lock);
1178         list_del(&hdev->list);
1179         write_unlock_bh(&hci_dev_list_lock);
1180
1181         return -ENOMEM;
1182 }
1183 EXPORT_SYMBOL(hci_register_dev);
1184
1185 /* Unregister HCI device */
1186 int hci_unregister_dev(struct hci_dev *hdev)
1187 {
1188         int i;
1189
1190         BT_DBG("%p name %s bus %d", hdev, hdev->name, hdev->bus);
1191
1192         write_lock_bh(&hci_dev_list_lock);
1193         list_del(&hdev->list);
1194         write_unlock_bh(&hci_dev_list_lock);
1195
1196         hci_dev_do_close(hdev);
1197
1198         for (i = 0; i < NUM_REASSEMBLY; i++)
1199                 kfree_skb(hdev->reassembly[i]);
1200
1201         if (!test_bit(HCI_INIT, &hdev->flags) &&
1202                                         !test_bit(HCI_SETUP, &hdev->flags))
1203                 mgmt_index_removed(hdev->id);
1204
1205         hci_notify(hdev, HCI_DEV_UNREG);
1206
1207         if (hdev->rfkill) {
1208                 rfkill_unregister(hdev->rfkill);
1209                 rfkill_destroy(hdev->rfkill);
1210         }
1211
1212         hci_unregister_sysfs(hdev);
1213
1214         hci_del_off_timer(hdev);
1215
1216         destroy_workqueue(hdev->workqueue);
1217
1218         hci_dev_lock_bh(hdev);
1219         hci_blacklist_clear(hdev);
1220         hci_uuids_clear(hdev);
1221         hci_link_keys_clear(hdev);
1222         hci_dev_unlock_bh(hdev);
1223
1224         __hci_dev_put(hdev);
1225
1226         return 0;
1227 }
1228 EXPORT_SYMBOL(hci_unregister_dev);
1229
1230 /* Suspend HCI device */
1231 int hci_suspend_dev(struct hci_dev *hdev)
1232 {
1233         hci_notify(hdev, HCI_DEV_SUSPEND);
1234         return 0;
1235 }
1236 EXPORT_SYMBOL(hci_suspend_dev);
1237
1238 /* Resume HCI device */
1239 int hci_resume_dev(struct hci_dev *hdev)
1240 {
1241         hci_notify(hdev, HCI_DEV_RESUME);
1242         return 0;
1243 }
1244 EXPORT_SYMBOL(hci_resume_dev);
1245
1246 /* Receive frame from HCI drivers */
1247 int hci_recv_frame(struct sk_buff *skb)
1248 {
1249         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1250         if (!hdev || (!test_bit(HCI_UP, &hdev->flags)
1251                                 && !test_bit(HCI_INIT, &hdev->flags))) {
1252                 kfree_skb(skb);
1253                 return -ENXIO;
1254         }
1255
1256         /* Incomming skb */
1257         bt_cb(skb)->incoming = 1;
1258
1259         /* Time stamp */
1260         __net_timestamp(skb);
1261
1262         /* Queue frame for rx task */
1263         skb_queue_tail(&hdev->rx_q, skb);
1264         tasklet_schedule(&hdev->rx_task);
1265
1266         return 0;
1267 }
1268 EXPORT_SYMBOL(hci_recv_frame);
1269
1270 static int hci_reassembly(struct hci_dev *hdev, int type, void *data,
1271                           int count, __u8 index, gfp_t gfp_mask)
1272 {
1273         int len = 0;
1274         int hlen = 0;
1275         int remain = count;
1276         struct sk_buff *skb;
1277         struct bt_skb_cb *scb;
1278
1279         if ((type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT) ||
1280                                 index >= NUM_REASSEMBLY)
1281                 return -EILSEQ;
1282
1283         skb = hdev->reassembly[index];
1284
1285         if (!skb) {
1286                 switch (type) {
1287                 case HCI_ACLDATA_PKT:
1288                         len = HCI_MAX_FRAME_SIZE;
1289                         hlen = HCI_ACL_HDR_SIZE;
1290                         break;
1291                 case HCI_EVENT_PKT:
1292                         len = HCI_MAX_EVENT_SIZE;
1293                         hlen = HCI_EVENT_HDR_SIZE;
1294                         break;
1295                 case HCI_SCODATA_PKT:
1296                         len = HCI_MAX_SCO_SIZE;
1297                         hlen = HCI_SCO_HDR_SIZE;
1298                         break;
1299                 }
1300
1301                 skb = bt_skb_alloc(len, gfp_mask);
1302                 if (!skb)
1303                         return -ENOMEM;
1304
1305                 scb = (void *) skb->cb;
1306                 scb->expect = hlen;
1307                 scb->pkt_type = type;
1308
1309                 skb->dev = (void *) hdev;
1310                 hdev->reassembly[index] = skb;
1311         }
1312
1313         while (count) {
1314                 scb = (void *) skb->cb;
1315                 len = min(scb->expect, (__u16)count);
1316
1317                 memcpy(skb_put(skb, len), data, len);
1318
1319                 count -= len;
1320                 data += len;
1321                 scb->expect -= len;
1322                 remain = count;
1323
1324                 switch (type) {
1325                 case HCI_EVENT_PKT:
1326                         if (skb->len == HCI_EVENT_HDR_SIZE) {
1327                                 struct hci_event_hdr *h = hci_event_hdr(skb);
1328                                 scb->expect = h->plen;
1329
1330                                 if (skb_tailroom(skb) < scb->expect) {
1331                                         kfree_skb(skb);
1332                                         hdev->reassembly[index] = NULL;
1333                                         return -ENOMEM;
1334                                 }
1335                         }
1336                         break;
1337
1338                 case HCI_ACLDATA_PKT:
1339                         if (skb->len  == HCI_ACL_HDR_SIZE) {
1340                                 struct hci_acl_hdr *h = hci_acl_hdr(skb);
1341                                 scb->expect = __le16_to_cpu(h->dlen);
1342
1343                                 if (skb_tailroom(skb) < scb->expect) {
1344                                         kfree_skb(skb);
1345                                         hdev->reassembly[index] = NULL;
1346                                         return -ENOMEM;
1347                                 }
1348                         }
1349                         break;
1350
1351                 case HCI_SCODATA_PKT:
1352                         if (skb->len == HCI_SCO_HDR_SIZE) {
1353                                 struct hci_sco_hdr *h = hci_sco_hdr(skb);
1354                                 scb->expect = h->dlen;
1355
1356                                 if (skb_tailroom(skb) < scb->expect) {
1357                                         kfree_skb(skb);
1358                                         hdev->reassembly[index] = NULL;
1359                                         return -ENOMEM;
1360                                 }
1361                         }
1362                         break;
1363                 }
1364
1365                 if (scb->expect == 0) {
1366                         /* Complete frame */
1367
1368                         bt_cb(skb)->pkt_type = type;
1369                         hci_recv_frame(skb);
1370
1371                         hdev->reassembly[index] = NULL;
1372                         return remain;
1373                 }
1374         }
1375
1376         return remain;
1377 }
1378
1379 int hci_recv_fragment(struct hci_dev *hdev, int type, void *data, int count)
1380 {
1381         int rem = 0;
1382
1383         if (type < HCI_ACLDATA_PKT || type > HCI_EVENT_PKT)
1384                 return -EILSEQ;
1385
1386         while (count) {
1387                 rem = hci_reassembly(hdev, type, data, count,
1388                                                 type - 1, GFP_ATOMIC);
1389                 if (rem < 0)
1390                         return rem;
1391
1392                 data += (count - rem);
1393                 count = rem;
1394         };
1395
1396         return rem;
1397 }
1398 EXPORT_SYMBOL(hci_recv_fragment);
1399
1400 #define STREAM_REASSEMBLY 0
1401
1402 int hci_recv_stream_fragment(struct hci_dev *hdev, void *data, int count)
1403 {
1404         int type;
1405         int rem = 0;
1406
1407         while (count) {
1408                 struct sk_buff *skb = hdev->reassembly[STREAM_REASSEMBLY];
1409
1410                 if (!skb) {
1411                         struct { char type; } *pkt;
1412
1413                         /* Start of the frame */
1414                         pkt = data;
1415                         type = pkt->type;
1416
1417                         data++;
1418                         count--;
1419                 } else
1420                         type = bt_cb(skb)->pkt_type;
1421
1422                 rem = hci_reassembly(hdev, type, data,
1423                                         count, STREAM_REASSEMBLY, GFP_ATOMIC);
1424                 if (rem < 0)
1425                         return rem;
1426
1427                 data += (count - rem);
1428                 count = rem;
1429         };
1430
1431         return rem;
1432 }
1433 EXPORT_SYMBOL(hci_recv_stream_fragment);
1434
1435 /* ---- Interface to upper protocols ---- */
1436
1437 /* Register/Unregister protocols.
1438  * hci_task_lock is used to ensure that no tasks are running. */
1439 int hci_register_proto(struct hci_proto *hp)
1440 {
1441         int err = 0;
1442
1443         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1444
1445         if (hp->id >= HCI_MAX_PROTO)
1446                 return -EINVAL;
1447
1448         write_lock_bh(&hci_task_lock);
1449
1450         if (!hci_proto[hp->id])
1451                 hci_proto[hp->id] = hp;
1452         else
1453                 err = -EEXIST;
1454
1455         write_unlock_bh(&hci_task_lock);
1456
1457         return err;
1458 }
1459 EXPORT_SYMBOL(hci_register_proto);
1460
1461 int hci_unregister_proto(struct hci_proto *hp)
1462 {
1463         int err = 0;
1464
1465         BT_DBG("%p name %s id %d", hp, hp->name, hp->id);
1466
1467         if (hp->id >= HCI_MAX_PROTO)
1468                 return -EINVAL;
1469
1470         write_lock_bh(&hci_task_lock);
1471
1472         if (hci_proto[hp->id])
1473                 hci_proto[hp->id] = NULL;
1474         else
1475                 err = -ENOENT;
1476
1477         write_unlock_bh(&hci_task_lock);
1478
1479         return err;
1480 }
1481 EXPORT_SYMBOL(hci_unregister_proto);
1482
1483 int hci_register_cb(struct hci_cb *cb)
1484 {
1485         BT_DBG("%p name %s", cb, cb->name);
1486
1487         write_lock_bh(&hci_cb_list_lock);
1488         list_add(&cb->list, &hci_cb_list);
1489         write_unlock_bh(&hci_cb_list_lock);
1490
1491         return 0;
1492 }
1493 EXPORT_SYMBOL(hci_register_cb);
1494
1495 int hci_unregister_cb(struct hci_cb *cb)
1496 {
1497         BT_DBG("%p name %s", cb, cb->name);
1498
1499         write_lock_bh(&hci_cb_list_lock);
1500         list_del(&cb->list);
1501         write_unlock_bh(&hci_cb_list_lock);
1502
1503         return 0;
1504 }
1505 EXPORT_SYMBOL(hci_unregister_cb);
1506
1507 static int hci_send_frame(struct sk_buff *skb)
1508 {
1509         struct hci_dev *hdev = (struct hci_dev *) skb->dev;
1510
1511         if (!hdev) {
1512                 kfree_skb(skb);
1513                 return -ENODEV;
1514         }
1515
1516         BT_DBG("%s type %d len %d", hdev->name, bt_cb(skb)->pkt_type, skb->len);
1517
1518         if (atomic_read(&hdev->promisc)) {
1519                 /* Time stamp */
1520                 __net_timestamp(skb);
1521
1522                 hci_send_to_sock(hdev, skb, NULL);
1523         }
1524
1525         /* Get rid of skb owner, prior to sending to the driver. */
1526         skb_orphan(skb);
1527
1528         return hdev->send(skb);
1529 }
1530
1531 /* Send HCI command */
1532 int hci_send_cmd(struct hci_dev *hdev, __u16 opcode, __u32 plen, void *param)
1533 {
1534         int len = HCI_COMMAND_HDR_SIZE + plen;
1535         struct hci_command_hdr *hdr;
1536         struct sk_buff *skb;
1537
1538         BT_DBG("%s opcode 0x%x plen %d", hdev->name, opcode, plen);
1539
1540         skb = bt_skb_alloc(len, GFP_ATOMIC);
1541         if (!skb) {
1542                 BT_ERR("%s no memory for command", hdev->name);
1543                 return -ENOMEM;
1544         }
1545
1546         hdr = (struct hci_command_hdr *) skb_put(skb, HCI_COMMAND_HDR_SIZE);
1547         hdr->opcode = cpu_to_le16(opcode);
1548         hdr->plen   = plen;
1549
1550         if (plen)
1551                 memcpy(skb_put(skb, plen), param, plen);
1552
1553         BT_DBG("skb len %d", skb->len);
1554
1555         bt_cb(skb)->pkt_type = HCI_COMMAND_PKT;
1556         skb->dev = (void *) hdev;
1557
1558         if (test_bit(HCI_INIT, &hdev->flags))
1559                 hdev->init_last_cmd = opcode;
1560
1561         skb_queue_tail(&hdev->cmd_q, skb);
1562         tasklet_schedule(&hdev->cmd_task);
1563
1564         return 0;
1565 }
1566
1567 /* Get data from the previously sent command */
1568 void *hci_sent_cmd_data(struct hci_dev *hdev, __u16 opcode)
1569 {
1570         struct hci_command_hdr *hdr;
1571
1572         if (!hdev->sent_cmd)
1573                 return NULL;
1574
1575         hdr = (void *) hdev->sent_cmd->data;
1576
1577         if (hdr->opcode != cpu_to_le16(opcode))
1578                 return NULL;
1579
1580         BT_DBG("%s opcode 0x%x", hdev->name, opcode);
1581
1582         return hdev->sent_cmd->data + HCI_COMMAND_HDR_SIZE;
1583 }
1584
1585 /* Send ACL data */
1586 static void hci_add_acl_hdr(struct sk_buff *skb, __u16 handle, __u16 flags)
1587 {
1588         struct hci_acl_hdr *hdr;
1589         int len = skb->len;
1590
1591         skb_push(skb, HCI_ACL_HDR_SIZE);
1592         skb_reset_transport_header(skb);
1593         hdr = (struct hci_acl_hdr *)skb_transport_header(skb);
1594         hdr->handle = cpu_to_le16(hci_handle_pack(handle, flags));
1595         hdr->dlen   = cpu_to_le16(len);
1596 }
1597
1598 void hci_send_acl(struct hci_conn *conn, struct sk_buff *skb, __u16 flags)
1599 {
1600         struct hci_dev *hdev = conn->hdev;
1601         struct sk_buff *list;
1602
1603         BT_DBG("%s conn %p flags 0x%x", hdev->name, conn, flags);
1604
1605         skb->dev = (void *) hdev;
1606         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1607         hci_add_acl_hdr(skb, conn->handle, flags);
1608
1609         list = skb_shinfo(skb)->frag_list;
1610         if (!list) {
1611                 /* Non fragmented */
1612                 BT_DBG("%s nonfrag skb %p len %d", hdev->name, skb, skb->len);
1613
1614                 skb_queue_tail(&conn->data_q, skb);
1615         } else {
1616                 /* Fragmented */
1617                 BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1618
1619                 skb_shinfo(skb)->frag_list = NULL;
1620
1621                 /* Queue all fragments atomically */
1622                 spin_lock_bh(&conn->data_q.lock);
1623
1624                 __skb_queue_tail(&conn->data_q, skb);
1625
1626                 flags &= ~ACL_START;
1627                 flags |= ACL_CONT;
1628                 do {
1629                         skb = list; list = list->next;
1630
1631                         skb->dev = (void *) hdev;
1632                         bt_cb(skb)->pkt_type = HCI_ACLDATA_PKT;
1633                         hci_add_acl_hdr(skb, conn->handle, flags);
1634
1635                         BT_DBG("%s frag %p len %d", hdev->name, skb, skb->len);
1636
1637                         __skb_queue_tail(&conn->data_q, skb);
1638                 } while (list);
1639
1640                 spin_unlock_bh(&conn->data_q.lock);
1641         }
1642
1643         tasklet_schedule(&hdev->tx_task);
1644 }
1645 EXPORT_SYMBOL(hci_send_acl);
1646
1647 /* Send SCO data */
1648 void hci_send_sco(struct hci_conn *conn, struct sk_buff *skb)
1649 {
1650         struct hci_dev *hdev = conn->hdev;
1651         struct hci_sco_hdr hdr;
1652
1653         BT_DBG("%s len %d", hdev->name, skb->len);
1654
1655         hdr.handle = cpu_to_le16(conn->handle);
1656         hdr.dlen   = skb->len;
1657
1658         skb_push(skb, HCI_SCO_HDR_SIZE);
1659         skb_reset_transport_header(skb);
1660         memcpy(skb_transport_header(skb), &hdr, HCI_SCO_HDR_SIZE);
1661
1662         skb->dev = (void *) hdev;
1663         bt_cb(skb)->pkt_type = HCI_SCODATA_PKT;
1664
1665         skb_queue_tail(&conn->data_q, skb);
1666         tasklet_schedule(&hdev->tx_task);
1667 }
1668 EXPORT_SYMBOL(hci_send_sco);
1669
1670 /* ---- HCI TX task (outgoing data) ---- */
1671
1672 /* HCI Connection scheduler */
1673 static inline struct hci_conn *hci_low_sent(struct hci_dev *hdev, __u8 type, int *quote)
1674 {
1675         struct hci_conn_hash *h = &hdev->conn_hash;
1676         struct hci_conn *conn = NULL;
1677         int num = 0, min = ~0;
1678         struct list_head *p;
1679
1680         /* We don't have to lock device here. Connections are always
1681          * added and removed with TX task disabled. */
1682         list_for_each(p, &h->list) {
1683                 struct hci_conn *c;
1684                 c = list_entry(p, struct hci_conn, list);
1685
1686                 if (c->type != type || skb_queue_empty(&c->data_q))
1687                         continue;
1688
1689                 if (c->state != BT_CONNECTED && c->state != BT_CONFIG)
1690                         continue;
1691
1692                 num++;
1693
1694                 if (c->sent < min) {
1695                         min  = c->sent;
1696                         conn = c;
1697                 }
1698         }
1699
1700         if (conn) {
1701                 int cnt, q;
1702
1703                 switch (conn->type) {
1704                 case ACL_LINK:
1705                         cnt = hdev->acl_cnt;
1706                         break;
1707                 case SCO_LINK:
1708                 case ESCO_LINK:
1709                         cnt = hdev->sco_cnt;
1710                         break;
1711                 case LE_LINK:
1712                         cnt = hdev->le_mtu ? hdev->le_cnt : hdev->acl_cnt;
1713                         break;
1714                 default:
1715                         cnt = 0;
1716                         BT_ERR("Unknown link type");
1717                 }
1718
1719                 q = cnt / num;
1720                 *quote = q ? q : 1;
1721         } else
1722                 *quote = 0;
1723
1724         BT_DBG("conn %p quote %d", conn, *quote);
1725         return conn;
1726 }
1727
1728 static inline void hci_link_tx_to(struct hci_dev *hdev, __u8 type)
1729 {
1730         struct hci_conn_hash *h = &hdev->conn_hash;
1731         struct list_head *p;
1732         struct hci_conn  *c;
1733
1734         BT_ERR("%s link tx timeout", hdev->name);
1735
1736         /* Kill stalled connections */
1737         list_for_each(p, &h->list) {
1738                 c = list_entry(p, struct hci_conn, list);
1739                 if (c->type == type && c->sent) {
1740                         BT_ERR("%s killing stalled connection %s",
1741                                 hdev->name, batostr(&c->dst));
1742                         hci_acl_disconn(c, 0x13);
1743                 }
1744         }
1745 }
1746
1747 static inline void hci_sched_acl(struct hci_dev *hdev)
1748 {
1749         struct hci_conn *conn;
1750         struct sk_buff *skb;
1751         int quote;
1752
1753         BT_DBG("%s", hdev->name);
1754
1755         if (!test_bit(HCI_RAW, &hdev->flags)) {
1756                 /* ACL tx timeout must be longer than maximum
1757                  * link supervision timeout (40.9 seconds) */
1758                 if (!hdev->acl_cnt && time_after(jiffies, hdev->acl_last_tx + HZ * 45))
1759                         hci_link_tx_to(hdev, ACL_LINK);
1760         }
1761
1762         while (hdev->acl_cnt && (conn = hci_low_sent(hdev, ACL_LINK, &quote))) {
1763                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1764                         BT_DBG("skb %p len %d", skb, skb->len);
1765
1766                         hci_conn_enter_active_mode(conn);
1767
1768                         hci_send_frame(skb);
1769                         hdev->acl_last_tx = jiffies;
1770
1771                         hdev->acl_cnt--;
1772                         conn->sent++;
1773                 }
1774         }
1775 }
1776
1777 /* Schedule SCO */
1778 static inline void hci_sched_sco(struct hci_dev *hdev)
1779 {
1780         struct hci_conn *conn;
1781         struct sk_buff *skb;
1782         int quote;
1783
1784         BT_DBG("%s", hdev->name);
1785
1786         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, SCO_LINK, &quote))) {
1787                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1788                         BT_DBG("skb %p len %d", skb, skb->len);
1789                         hci_send_frame(skb);
1790
1791                         conn->sent++;
1792                         if (conn->sent == ~0)
1793                                 conn->sent = 0;
1794                 }
1795         }
1796 }
1797
1798 static inline void hci_sched_esco(struct hci_dev *hdev)
1799 {
1800         struct hci_conn *conn;
1801         struct sk_buff *skb;
1802         int quote;
1803
1804         BT_DBG("%s", hdev->name);
1805
1806         while (hdev->sco_cnt && (conn = hci_low_sent(hdev, ESCO_LINK, &quote))) {
1807                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1808                         BT_DBG("skb %p len %d", skb, skb->len);
1809                         hci_send_frame(skb);
1810
1811                         conn->sent++;
1812                         if (conn->sent == ~0)
1813                                 conn->sent = 0;
1814                 }
1815         }
1816 }
1817
1818 static inline void hci_sched_le(struct hci_dev *hdev)
1819 {
1820         struct hci_conn *conn;
1821         struct sk_buff *skb;
1822         int quote, cnt;
1823
1824         BT_DBG("%s", hdev->name);
1825
1826         if (!test_bit(HCI_RAW, &hdev->flags)) {
1827                 /* LE tx timeout must be longer than maximum
1828                  * link supervision timeout (40.9 seconds) */
1829                 if (!hdev->le_cnt && hdev->le_pkts &&
1830                                 time_after(jiffies, hdev->le_last_tx + HZ * 45))
1831                         hci_link_tx_to(hdev, LE_LINK);
1832         }
1833
1834         cnt = hdev->le_pkts ? hdev->le_cnt : hdev->acl_cnt;
1835         while (cnt && (conn = hci_low_sent(hdev, LE_LINK, &quote))) {
1836                 while (quote-- && (skb = skb_dequeue(&conn->data_q))) {
1837                         BT_DBG("skb %p len %d", skb, skb->len);
1838
1839                         hci_send_frame(skb);
1840                         hdev->le_last_tx = jiffies;
1841
1842                         cnt--;
1843                         conn->sent++;
1844                 }
1845         }
1846         if (hdev->le_pkts)
1847                 hdev->le_cnt = cnt;
1848         else
1849                 hdev->acl_cnt = cnt;
1850 }
1851
1852 static void hci_tx_task(unsigned long arg)
1853 {
1854         struct hci_dev *hdev = (struct hci_dev *) arg;
1855         struct sk_buff *skb;
1856
1857         read_lock(&hci_task_lock);
1858
1859         BT_DBG("%s acl %d sco %d le %d", hdev->name, hdev->acl_cnt,
1860                 hdev->sco_cnt, hdev->le_cnt);
1861
1862         /* Schedule queues and send stuff to HCI driver */
1863
1864         hci_sched_acl(hdev);
1865
1866         hci_sched_sco(hdev);
1867
1868         hci_sched_esco(hdev);
1869
1870         hci_sched_le(hdev);
1871
1872         /* Send next queued raw (unknown type) packet */
1873         while ((skb = skb_dequeue(&hdev->raw_q)))
1874                 hci_send_frame(skb);
1875
1876         read_unlock(&hci_task_lock);
1877 }
1878
1879 /* ----- HCI RX task (incoming data proccessing) ----- */
1880
1881 /* ACL data packet */
1882 static inline void hci_acldata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1883 {
1884         struct hci_acl_hdr *hdr = (void *) skb->data;
1885         struct hci_conn *conn;
1886         __u16 handle, flags;
1887
1888         skb_pull(skb, HCI_ACL_HDR_SIZE);
1889
1890         handle = __le16_to_cpu(hdr->handle);
1891         flags  = hci_flags(handle);
1892         handle = hci_handle(handle);
1893
1894         BT_DBG("%s len %d handle 0x%x flags 0x%x", hdev->name, skb->len, handle, flags);
1895
1896         hdev->stat.acl_rx++;
1897
1898         hci_dev_lock(hdev);
1899         conn = hci_conn_hash_lookup_handle(hdev, handle);
1900         hci_dev_unlock(hdev);
1901
1902         if (conn) {
1903                 register struct hci_proto *hp;
1904
1905                 hci_conn_enter_active_mode(conn);
1906
1907                 /* Send to upper protocol */
1908                 hp = hci_proto[HCI_PROTO_L2CAP];
1909                 if (hp && hp->recv_acldata) {
1910                         hp->recv_acldata(conn, skb, flags);
1911                         return;
1912                 }
1913         } else {
1914                 BT_ERR("%s ACL packet for unknown connection handle %d",
1915                         hdev->name, handle);
1916         }
1917
1918         kfree_skb(skb);
1919 }
1920
1921 /* SCO data packet */
1922 static inline void hci_scodata_packet(struct hci_dev *hdev, struct sk_buff *skb)
1923 {
1924         struct hci_sco_hdr *hdr = (void *) skb->data;
1925         struct hci_conn *conn;
1926         __u16 handle;
1927
1928         skb_pull(skb, HCI_SCO_HDR_SIZE);
1929
1930         handle = __le16_to_cpu(hdr->handle);
1931
1932         BT_DBG("%s len %d handle 0x%x", hdev->name, skb->len, handle);
1933
1934         hdev->stat.sco_rx++;
1935
1936         hci_dev_lock(hdev);
1937         conn = hci_conn_hash_lookup_handle(hdev, handle);
1938         hci_dev_unlock(hdev);
1939
1940         if (conn) {
1941                 register struct hci_proto *hp;
1942
1943                 /* Send to upper protocol */
1944                 hp = hci_proto[HCI_PROTO_SCO];
1945                 if (hp && hp->recv_scodata) {
1946                         hp->recv_scodata(conn, skb);
1947                         return;
1948                 }
1949         } else {
1950                 BT_ERR("%s SCO packet for unknown connection handle %d",
1951                         hdev->name, handle);
1952         }
1953
1954         kfree_skb(skb);
1955 }
1956
1957 static void hci_rx_task(unsigned long arg)
1958 {
1959         struct hci_dev *hdev = (struct hci_dev *) arg;
1960         struct sk_buff *skb;
1961
1962         BT_DBG("%s", hdev->name);
1963
1964         read_lock(&hci_task_lock);
1965
1966         while ((skb = skb_dequeue(&hdev->rx_q))) {
1967                 if (atomic_read(&hdev->promisc)) {
1968                         /* Send copy to the sockets */
1969                         hci_send_to_sock(hdev, skb, NULL);
1970                 }
1971
1972                 if (test_bit(HCI_RAW, &hdev->flags)) {
1973                         kfree_skb(skb);
1974                         continue;
1975                 }
1976
1977                 if (test_bit(HCI_INIT, &hdev->flags)) {
1978                         /* Don't process data packets in this states. */
1979                         switch (bt_cb(skb)->pkt_type) {
1980                         case HCI_ACLDATA_PKT:
1981                         case HCI_SCODATA_PKT:
1982                                 kfree_skb(skb);
1983                                 continue;
1984                         }
1985                 }
1986
1987                 /* Process frame */
1988                 switch (bt_cb(skb)->pkt_type) {
1989                 case HCI_EVENT_PKT:
1990                         hci_event_packet(hdev, skb);
1991                         break;
1992
1993                 case HCI_ACLDATA_PKT:
1994                         BT_DBG("%s ACL data packet", hdev->name);
1995                         hci_acldata_packet(hdev, skb);
1996                         break;
1997
1998                 case HCI_SCODATA_PKT:
1999                         BT_DBG("%s SCO data packet", hdev->name);
2000                         hci_scodata_packet(hdev, skb);
2001                         break;
2002
2003                 default:
2004                         kfree_skb(skb);
2005                         break;
2006                 }
2007         }
2008
2009         read_unlock(&hci_task_lock);
2010 }
2011
2012 static void hci_cmd_task(unsigned long arg)
2013 {
2014         struct hci_dev *hdev = (struct hci_dev *) arg;
2015         struct sk_buff *skb;
2016
2017         BT_DBG("%s cmd %d", hdev->name, atomic_read(&hdev->cmd_cnt));
2018
2019         /* Send queued commands */
2020         if (atomic_read(&hdev->cmd_cnt)) {
2021                 skb = skb_dequeue(&hdev->cmd_q);
2022                 if (!skb)
2023                         return;
2024
2025                 kfree_skb(hdev->sent_cmd);
2026
2027                 hdev->sent_cmd = skb_clone(skb, GFP_ATOMIC);
2028                 if (hdev->sent_cmd) {
2029                         atomic_dec(&hdev->cmd_cnt);
2030                         hci_send_frame(skb);
2031                         mod_timer(&hdev->cmd_timer,
2032                                   jiffies + msecs_to_jiffies(HCI_CMD_TIMEOUT));
2033                 } else {
2034                         skb_queue_head(&hdev->cmd_q, skb);
2035                         tasklet_schedule(&hdev->cmd_task);
2036                 }
2037         }
2038 }