- Update Xen patches to 3.3-rc5 and c/s 1157.
[linux-flexiantxendom0-3.2.10.git] / drivers / xen / usbback / usbback.c
1 /*
2  * usbback.c
3  *
4  * Xen USB backend driver
5  *
6  * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
7  * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
8  *
9  * This program is free software; you can redistribute it and/or modify
10  * it under the terms of the GNU General Public License as published by
11  * the Free Software Foundation; either version 2 of the License, or
12  * (at your option) any later version.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, see <http://www.gnu.org/licenses/>.
21  *
22  * or, by your choice,
23  *
24  * When distributed separately from the Linux kernel or incorporated into
25  * other software packages, subject to the following license:
26  *
27  * Permission is hereby granted, free of charge, to any person obtaining a copy
28  * of this software and associated documentation files (the "Software"), to
29  * deal in the Software without restriction, including without limitation the
30  * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
31  * sell copies of the Software, and to permit persons to whom the Software is
32  * furnished to do so, subject to the following conditions:
33  *
34  * The above copyright notice and this permission notice shall be included in
35  * all copies or substantial portions of the Software.
36  *
37  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
38  * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
39  * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
40  * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
41  * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
42  * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
43  * DEALINGS IN THE SOFTWARE.
44  */
45
46 #include <linux/mm.h>
47 #include <xen/balloon.h>
48 #include <xen/evtchn.h>
49 #include <xen/gnttab.h>
50 #include "usbback.h"
51
52 #if 0
53 #include "../../usb/core/hub.h"
54 #endif
55
56 int usbif_reqs = USBIF_BACK_MAX_PENDING_REQS;
57 module_param_named(reqs, usbif_reqs, int, 0);
58 MODULE_PARM_DESC(reqs, "Number of usbback requests to allocate");
59
60 struct pending_req_segment {
61         uint16_t offset;
62         uint16_t length;
63 };
64
65 typedef struct {
66         usbif_t *usbif;
67
68         uint16_t id; /* request id */
69
70         struct usbstub *stub;
71         struct list_head urb_list;
72
73         /* urb */
74         struct urb *urb;
75         void *buffer;
76         dma_addr_t transfer_dma;
77         struct usb_ctrlrequest *setup;
78
79         /* request segments */
80         uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */
81         uint16_t nr_extra_segs; /* number of iso_frame_desc segments (ISO) */
82         struct pending_req_segment *seg;
83
84         struct list_head free_list;
85 } pending_req_t;
86
87 static pending_req_t *pending_reqs;
88 static struct list_head pending_free;
89 static DEFINE_SPINLOCK(pending_free_lock);
90 static LIST_HEAD(pending_urb_free);
91 static DEFINE_SPINLOCK(urb_free_lock);
92 static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
93
94 #define USBBACK_INVALID_HANDLE (~0)
95
96 static struct page **pending_pages;
97 static grant_handle_t *pending_grant_handles;
98
99 static inline int vaddr_pagenr(pending_req_t *req, int seg)
100 {
101         return (req - pending_reqs) * USBIF_MAX_SEGMENTS_PER_REQUEST + seg;
102 }
103
104 static inline unsigned long vaddr(pending_req_t *req, int seg)
105 {
106         unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
107         return (unsigned long)pfn_to_kaddr(pfn);
108 }
109
110 #define pending_handle(_req, _seg) \
111         (pending_grant_handles[vaddr_pagenr(_req, _seg)])
112
113 static pending_req_t *alloc_req(void)
114 {
115         pending_req_t *req = NULL;
116         unsigned long flags;
117
118         spin_lock_irqsave(&pending_free_lock, flags);
119         if (!list_empty(&pending_free)) {
120                 req = list_entry(pending_free.next, pending_req_t, free_list);
121                 list_del(&req->free_list);
122         }
123         spin_unlock_irqrestore(&pending_free_lock, flags);
124         return req;
125 }
126
127 static void free_req(pending_req_t *req)
128 {
129         unsigned long flags;
130         int was_empty;
131
132         spin_lock_irqsave(&pending_free_lock, flags);
133         was_empty = list_empty(&pending_free);
134         list_add(&req->free_list, &pending_free);
135         spin_unlock_irqrestore(&pending_free_lock, flags);
136         if (was_empty)
137                 wake_up(&pending_free_wq);
138 }
139
140 static inline void add_req_to_submitting_list(struct usbstub *stub, pending_req_t *pending_req)
141 {
142         unsigned long flags;
143
144         spin_lock_irqsave(&stub->submitting_lock, flags);
145         list_add_tail(&pending_req->urb_list, &stub->submitting_list);
146         spin_unlock_irqrestore(&stub->submitting_lock, flags);
147 }
148
149 static inline void remove_req_from_submitting_list(struct usbstub *stub, pending_req_t *pending_req)
150 {
151         unsigned long flags;
152
153         spin_lock_irqsave(&stub->submitting_lock, flags);
154         list_del_init(&pending_req->urb_list);
155         spin_unlock_irqrestore(&stub->submitting_lock, flags);
156 }
157
158 void usbbk_unlink_urbs(struct usbstub *stub)
159 {
160         pending_req_t *req, *tmp;
161         unsigned long flags;
162
163         spin_lock_irqsave(&stub->submitting_lock, flags);
164         list_for_each_entry_safe(req, tmp, &stub->submitting_list, urb_list) {
165                 usb_unlink_urb(req->urb);
166         }
167         spin_unlock_irqrestore(&stub->submitting_lock, flags);
168 }
169
170 static void fast_flush_area(pending_req_t *pending_req)
171 {
172         struct gnttab_unmap_grant_ref unmap[USBIF_MAX_SEGMENTS_PER_REQUEST];
173         unsigned int i, nr_segs, invcount = 0;
174         grant_handle_t handle;
175         int ret;
176
177         nr_segs = pending_req->nr_buffer_segs + pending_req->nr_extra_segs;
178
179         if (nr_segs) {
180                 for (i = 0; i < nr_segs; i++) {
181                         handle = pending_handle(pending_req, i);
182                         if (handle == USBBACK_INVALID_HANDLE)
183                                 continue;
184                         gnttab_set_unmap_op(&unmap[invcount], vaddr(pending_req, i),
185                                             GNTMAP_host_map, handle);
186                         pending_handle(pending_req, i) = USBBACK_INVALID_HANDLE;
187                         invcount++;
188                 }
189
190                 ret = HYPERVISOR_grant_table_op(
191                         GNTTABOP_unmap_grant_ref, unmap, invcount);
192                 BUG_ON(ret);
193
194                 kfree(pending_req->seg);
195         }
196
197         return;
198 }
199
200 static void copy_buff_to_pages(void *buff, pending_req_t *pending_req,
201                 int start, int nr_pages)
202 {
203         unsigned long copied = 0;
204         int i;
205
206         for (i = start; i < start + nr_pages; i++) {
207                 memcpy((void *) vaddr(pending_req, i) + pending_req->seg[i].offset,
208                         buff + copied,
209                         pending_req->seg[i].length);
210                 copied += pending_req->seg[i].length;
211         }
212 }
213
214 static void copy_pages_to_buff(void *buff, pending_req_t *pending_req,
215                 int start, int nr_pages)
216 {
217         unsigned long copied = 0;
218         int i;
219
220         for (i = start; i < start + nr_pages; i++) {
221                 memcpy(buff + copied,
222                         (void *) vaddr(pending_req, i) + pending_req->seg[i].offset,
223                         pending_req->seg[i].length);
224                 copied += pending_req->seg[i].length;
225         }
226 }
227
228 static int usbbk_alloc_urb(usbif_urb_request_t *req, pending_req_t *pending_req)
229 {
230         int ret;
231
232         if (usb_pipeisoc(req->pipe))
233                 pending_req->urb = usb_alloc_urb(req->u.isoc.number_of_packets, GFP_KERNEL);
234         else
235                 pending_req->urb = usb_alloc_urb(0, GFP_KERNEL);
236         if (!pending_req->urb) {
237                 pr_err("usbback: can't alloc urb\n");
238                 ret = -ENOMEM;
239                 goto fail;
240         }
241
242         if (req->buffer_length) {
243                 pending_req->buffer = usb_alloc_coherent(pending_req->stub->udev,
244                                 req->buffer_length, GFP_KERNEL,
245                                 &pending_req->transfer_dma);
246                 if (!pending_req->buffer) {
247                         pr_err("usbback: can't alloc urb buffer\n");
248                         ret = -ENOMEM;
249                         goto fail_free_urb;
250                 }
251         }
252
253         if (usb_pipecontrol(req->pipe)) {
254                 pending_req->setup = kmalloc(sizeof(struct usb_ctrlrequest),
255                                              GFP_KERNEL);
256                 if (!pending_req->setup) {
257                         pr_err("usbback: can't alloc usb_ctrlrequest\n");
258                         ret = -ENOMEM;
259                         goto fail_free_buffer;
260                 }
261         }
262
263         return 0;
264
265 fail_free_buffer:
266         if (req->buffer_length)
267                 usb_free_coherent(pending_req->stub->udev,
268                                   req->buffer_length,
269                                   pending_req->buffer,
270                                   pending_req->transfer_dma);
271 fail_free_urb:
272         usb_free_urb(pending_req->urb);
273 fail:
274         return ret;
275 }
276
277 static void usbbk_free_urb(struct urb *urb)
278 {
279         unsigned long flags;
280
281         spin_lock_irqsave(&urb_free_lock, flags);
282         list_add(&urb->urb_list, &pending_urb_free);
283         spin_unlock_irqrestore(&urb_free_lock, flags);
284 }
285
286 static void _usbbk_free_urb(struct urb *urb)
287 {
288         if (usb_pipecontrol(urb->pipe))
289                 kfree(urb->setup_packet);
290         if (urb->transfer_buffer_length)
291                 usb_free_coherent(urb->dev, urb->transfer_buffer_length,
292                                   urb->transfer_buffer, urb->transfer_dma);
293         barrier();
294         usb_free_urb(urb);
295 }
296
297 static void usbbk_free_urbs(void)
298 {
299         unsigned long flags;
300         struct list_head tmp_list;
301
302         if (list_empty(&pending_urb_free))
303                 return;
304
305         INIT_LIST_HEAD(&tmp_list);
306
307         spin_lock_irqsave(&urb_free_lock, flags);
308         list_splice_init(&pending_urb_free, &tmp_list);
309         spin_unlock_irqrestore(&urb_free_lock, flags);
310
311         while (!list_empty(&tmp_list)) {
312                 struct urb *next_urb = list_first_entry(&tmp_list, struct urb,
313                                                         urb_list);
314
315                 list_del(&next_urb->urb_list);
316                 _usbbk_free_urb(next_urb);
317         }
318 }
319
320 static void usbbk_notify_work(usbif_t *usbif)
321 {
322         usbif->waiting_reqs = 1;
323         wake_up(&usbif->wq);
324 }
325
326 irqreturn_t usbbk_be_int(int irq, void *dev_id)
327 {
328         usbbk_notify_work(dev_id);
329         return IRQ_HANDLED;
330 }
331
332 static void usbbk_do_response(pending_req_t *pending_req, int32_t status,
333                                         int32_t actual_length, int32_t error_count, uint16_t start_frame)
334 {
335         usbif_t *usbif = pending_req->usbif;
336         usbif_urb_response_t *res;
337         unsigned long flags;
338         int notify;
339
340         spin_lock_irqsave(&usbif->urb_ring_lock, flags);
341         res = RING_GET_RESPONSE(&usbif->urb_ring, usbif->urb_ring.rsp_prod_pvt);
342         res->id = pending_req->id;
343         res->status = status;
344         res->actual_length = actual_length;
345         res->error_count = error_count;
346         res->start_frame = start_frame;
347         usbif->urb_ring.rsp_prod_pvt++;
348         barrier();
349         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&usbif->urb_ring, notify);
350         spin_unlock_irqrestore(&usbif->urb_ring_lock, flags);
351
352         if (notify)
353                 notify_remote_via_irq(usbif->irq);
354 }
355
356 static void usbbk_urb_complete(struct urb *urb)
357 {
358         pending_req_t *pending_req = (pending_req_t *)urb->context;
359
360         if (usb_pipein(urb->pipe) && urb->status == 0 && urb->actual_length > 0)
361                 copy_buff_to_pages(pending_req->buffer, pending_req,
362                                         0, pending_req->nr_buffer_segs);
363
364         if (usb_pipeisoc(urb->pipe))
365                 copy_buff_to_pages(&urb->iso_frame_desc[0], pending_req,
366                                         pending_req->nr_buffer_segs, pending_req->nr_extra_segs);
367
368         barrier();
369
370         fast_flush_area(pending_req);
371
372         usbbk_do_response(pending_req, urb->status, urb->actual_length,
373                                         urb->error_count, urb->start_frame);
374
375         remove_req_from_submitting_list(pending_req->stub, pending_req);
376
377         barrier();
378         usbbk_free_urb(urb);
379         usbif_put(pending_req->usbif);
380         free_req(pending_req);
381 }
382
383 static int usbbk_gnttab_map(usbif_t *usbif,
384                         usbif_urb_request_t *req, pending_req_t *pending_req)
385 {
386         int i, ret;
387         unsigned int nr_segs;
388         uint32_t flags;
389         struct gnttab_map_grant_ref map[USBIF_MAX_SEGMENTS_PER_REQUEST];
390
391         nr_segs = pending_req->nr_buffer_segs + pending_req->nr_extra_segs;
392
393         if (nr_segs > USBIF_MAX_SEGMENTS_PER_REQUEST) {
394                 pr_err("Bad number of segments in request\n");
395                 ret = -EINVAL;
396                 goto fail;
397         }
398
399         if (nr_segs) {
400                 pending_req->seg = kmalloc(sizeof(struct pending_req_segment)
401                                 * nr_segs, GFP_KERNEL);
402                 if (!pending_req->seg) {
403                         ret = -ENOMEM;
404                         goto fail;
405                 }
406
407                 if (pending_req->nr_buffer_segs) {
408                         flags = GNTMAP_host_map;
409                         if (usb_pipeout(req->pipe))
410                                 flags |= GNTMAP_readonly;
411                         for (i = 0; i < pending_req->nr_buffer_segs; i++)
412                                 gnttab_set_map_op(&map[i], vaddr(
413                                                 pending_req, i), flags,
414                                                 req->seg[i].gref,
415                                                 usbif->domid);
416                 }
417
418                 if (pending_req->nr_extra_segs) {
419                         flags = GNTMAP_host_map;
420                         for (i = req->nr_buffer_segs; i < nr_segs; i++)
421                                 gnttab_set_map_op(&map[i], vaddr(
422                                                 pending_req, i), flags,
423                                                 req->seg[i].gref,
424                                                 usbif->domid);
425                 }
426
427                 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
428                                         map, nr_segs);
429                 BUG_ON(ret);
430
431                 for (i = 0; i < nr_segs; i++) {
432                         /* Make sure than none of the map ops failed with GNTST_eagain */
433                         if (unlikely(map[i].status == GNTST_eagain))
434                                 gnttab_check_GNTST_eagain_while(GNTTABOP_map_grant_ref, &map[i]);
435
436                         if (unlikely(map[i].status != GNTST_okay)) {
437                                 pr_err("usbback: invalid buffer -- could not remap it\n");
438                                 map[i].handle = USBBACK_INVALID_HANDLE;
439                                 ret |= 1;
440                         }
441
442                         pending_handle(pending_req, i) = map[i].handle;
443
444                         if (ret)
445                                 continue;
446
447                         set_phys_to_machine(__pa(vaddr(
448                                 pending_req, i)) >> PAGE_SHIFT,
449                                 FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
450
451                         pending_req->seg[i].offset = req->seg[i].offset;
452                         pending_req->seg[i].length = req->seg[i].length;
453
454                         barrier();
455
456                         if (pending_req->seg[i].offset >= PAGE_SIZE ||
457                                         pending_req->seg[i].length > PAGE_SIZE ||
458                                         pending_req->seg[i].offset + pending_req->seg[i].length > PAGE_SIZE)
459                                         ret |= 1;
460                 }
461
462                 if (ret)
463                         goto fail_flush;
464         }
465
466         return 0;
467
468 fail_flush:
469         fast_flush_area(pending_req);
470         ret = -ENOMEM;
471
472 fail:
473         return ret;
474 }
475
476 static void usbbk_init_urb(usbif_urb_request_t *req, pending_req_t *pending_req)
477 {
478         unsigned int pipe;
479         struct usb_device *udev = pending_req->stub->udev;
480         struct urb *urb = pending_req->urb;
481
482         switch (usb_pipetype(req->pipe)) {
483         case PIPE_ISOCHRONOUS:
484                 if (usb_pipein(req->pipe))
485                         pipe = usb_rcvisocpipe(udev, usb_pipeendpoint(req->pipe));
486                 else
487                         pipe = usb_sndisocpipe(udev, usb_pipeendpoint(req->pipe));
488
489                 urb->dev = udev;
490                 urb->pipe = pipe;
491                 urb->transfer_flags = req->transfer_flags;
492                 urb->transfer_flags |= URB_ISO_ASAP;
493                 urb->transfer_buffer = pending_req->buffer;
494                 urb->transfer_buffer_length = req->buffer_length;
495                 urb->complete = usbbk_urb_complete;
496                 urb->context = pending_req;
497                 urb->interval = req->u.isoc.interval;
498                 urb->start_frame = req->u.isoc.start_frame;
499                 urb->number_of_packets = req->u.isoc.number_of_packets;
500
501                 break;
502         case PIPE_INTERRUPT:
503                 if (usb_pipein(req->pipe))
504                         pipe = usb_rcvintpipe(udev, usb_pipeendpoint(req->pipe));
505                 else
506                         pipe = usb_sndintpipe(udev, usb_pipeendpoint(req->pipe));
507
508                 usb_fill_int_urb(urb, udev, pipe,
509                                 pending_req->buffer, req->buffer_length,
510                                 usbbk_urb_complete,
511                                 pending_req, req->u.intr.interval);
512                 /*
513                  * high speed interrupt endpoints use a logarithmic encoding of
514                  * the endpoint interval, and usb_fill_int_urb() initializes a
515                  * interrupt urb with the encoded interval value.
516                  *
517                  * req->u.intr.interval is the interval value that already
518                  * encoded in the frontend part, and the above usb_fill_int_urb()
519                  * initializes the urb->interval with double encoded value.
520                  *
521                  * so, simply overwrite the urb->interval with original value.
522                  */
523                 urb->interval = req->u.intr.interval;
524                 urb->transfer_flags = req->transfer_flags;
525
526                 break;
527         case PIPE_CONTROL:
528                 if (usb_pipein(req->pipe))
529                         pipe = usb_rcvctrlpipe(udev, 0);
530                 else
531                         pipe = usb_sndctrlpipe(udev, 0);
532
533                 usb_fill_control_urb(urb, udev, pipe,
534                                 (unsigned char *) pending_req->setup,
535                                 pending_req->buffer, req->buffer_length,
536                                 usbbk_urb_complete, pending_req);
537                 memcpy(pending_req->setup, req->u.ctrl, 8);
538                 urb->transfer_flags = req->transfer_flags;
539
540                 break;
541         case PIPE_BULK:
542                 if (usb_pipein(req->pipe))
543                         pipe = usb_rcvbulkpipe(udev, usb_pipeendpoint(req->pipe));
544                 else
545                         pipe = usb_sndbulkpipe(udev, usb_pipeendpoint(req->pipe));
546
547                 usb_fill_bulk_urb(urb, udev, pipe,
548                                 pending_req->buffer, req->buffer_length,
549                                 usbbk_urb_complete, pending_req);
550                 urb->transfer_flags = req->transfer_flags;
551
552                 break;
553         default:
554                 break;
555         }
556
557         if (req->buffer_length) {
558                 urb->transfer_dma = pending_req->transfer_dma;
559                 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
560         }
561 }
562
563 struct set_interface_request {
564         pending_req_t *pending_req;
565         int interface;
566         int alternate;
567         struct work_struct work;
568 };
569
570 static void usbbk_set_interface_work(struct work_struct *arg)
571 {
572         struct set_interface_request *req
573                 = container_of(arg, struct set_interface_request, work);
574         pending_req_t *pending_req = req->pending_req;
575         struct usb_device *udev = req->pending_req->stub->udev;
576
577         int ret;
578
579         usb_lock_device(udev);
580         ret = usb_set_interface(udev, req->interface, req->alternate);
581         usb_unlock_device(udev);
582         usb_put_dev(udev);
583
584         usbbk_do_response(pending_req, ret, 0, 0, 0);
585         usbif_put(pending_req->usbif);
586         free_req(pending_req);
587         kfree(req);
588 }
589
590 static int usbbk_set_interface(pending_req_t *pending_req, int interface, int alternate)
591 {
592         struct set_interface_request *req;
593         struct usb_device *udev = pending_req->stub->udev;
594
595         req = kmalloc(sizeof(*req), GFP_KERNEL);
596         if (!req)
597                 return -ENOMEM;
598         req->pending_req = pending_req;
599         req->interface = interface;
600         req->alternate = alternate;
601         INIT_WORK(&req->work, usbbk_set_interface_work);
602         usb_get_dev(udev);
603         schedule_work(&req->work);
604         return 0;
605 }
606
607 struct clear_halt_request {
608         pending_req_t *pending_req;
609         int pipe;
610         struct work_struct work;
611 };
612
613 static void usbbk_clear_halt_work(struct work_struct *arg)
614 {
615         struct clear_halt_request *req
616                 = container_of(arg, struct clear_halt_request, work);
617         pending_req_t *pending_req = req->pending_req;
618         struct usb_device *udev = req->pending_req->stub->udev;
619         int ret;
620
621         usb_lock_device(udev);
622         ret = usb_clear_halt(req->pending_req->stub->udev, req->pipe);
623         usb_unlock_device(udev);
624         usb_put_dev(udev);
625
626         usbbk_do_response(pending_req, ret, 0, 0, 0);
627         usbif_put(pending_req->usbif);
628         free_req(pending_req);
629         kfree(req);
630 }
631
632 static int usbbk_clear_halt(pending_req_t *pending_req, int pipe)
633 {
634         struct clear_halt_request *req;
635         struct usb_device *udev = pending_req->stub->udev;
636
637         req = kmalloc(sizeof(*req), GFP_KERNEL);
638         if (!req)
639                 return -ENOMEM;
640         req->pending_req = pending_req;
641         req->pipe = pipe;
642         INIT_WORK(&req->work, usbbk_clear_halt_work);
643
644         usb_get_dev(udev);
645         schedule_work(&req->work);
646         return 0;
647 }
648
649 #if 0
650 struct port_reset_request {
651         pending_req_t *pending_req;
652         struct work_struct work;
653 };
654
655 static void usbbk_port_reset_work(struct work_struct *arg)
656 {
657         struct port_reset_request *req
658                 = container_of(arg, struct port_reset_request, work);
659         pending_req_t *pending_req = req->pending_req;
660         struct usb_device *udev = pending_req->stub->udev;
661         int ret, ret_lock;
662
663         ret = ret_lock = usb_lock_device_for_reset(udev, NULL);
664         if (ret_lock >= 0) {
665                 ret = usb_reset_device(udev);
666                 if (ret_lock)
667                         usb_unlock_device(udev);
668         }
669         usb_put_dev(udev);
670
671         usbbk_do_response(pending_req, ret, 0, 0, 0);
672         usbif_put(pending_req->usbif);
673         free_req(pending_req);
674         kfree(req);
675 }
676
677 static int usbbk_port_reset(pending_req_t *pending_req)
678 {
679         struct port_reset_request *req;
680         struct usb_device *udev = pending_req->stub->udev;
681
682         req = kmalloc(sizeof(*req), GFP_KERNEL);
683         if (!req)
684                 return -ENOMEM;
685
686         req->pending_req = pending_req;
687         INIT_WORK(&req->work, usbbk_port_reset_work);
688
689         usb_get_dev(udev);
690         schedule_work(&req->work);
691         return 0;
692 }
693 #endif
694
695 static void usbbk_set_address(usbif_t *usbif, struct usbstub *stub, int cur_addr, int new_addr)
696 {
697         unsigned long flags;
698
699         spin_lock_irqsave(&usbif->addr_lock, flags);
700         if (cur_addr)
701                 usbif->addr_table[cur_addr] = NULL;
702         if (new_addr)
703                 usbif->addr_table[new_addr] = stub;
704         stub->addr = new_addr;
705         spin_unlock_irqrestore(&usbif->addr_lock, flags);
706 }
707
708 struct usbstub *find_attached_device(usbif_t *usbif, int portnum)
709 {
710         struct usbstub *stub;
711         int found = 0;
712         unsigned long flags;
713
714         spin_lock_irqsave(&usbif->stub_lock, flags);
715         list_for_each_entry(stub, &usbif->stub_list, dev_list) {
716                 if (stub->portid->portnum == portnum) {
717                         found = 1;
718                         break;
719                 }
720         }
721         spin_unlock_irqrestore(&usbif->stub_lock, flags);
722
723         if (found)
724                 return stub;
725
726         return NULL;
727 }
728
729 static void process_unlink_req(usbif_t *usbif,
730                 usbif_urb_request_t *req, pending_req_t *pending_req)
731 {
732         pending_req_t *unlink_req = NULL;
733         int devnum;
734         int ret = 0;
735         unsigned long flags;
736
737         devnum = usb_pipedevice(req->pipe);
738         if (unlikely(devnum == 0)) {
739                 pending_req->stub = find_attached_device(usbif, usbif_pipeportnum(req->pipe));
740                 if (unlikely(!pending_req->stub)) {
741                         ret = -ENODEV;
742                         goto fail_response;
743                 }
744         } else {
745                 if (unlikely(!usbif->addr_table[devnum])) {
746                         ret = -ENODEV;
747                         goto fail_response;
748                 }
749                 pending_req->stub = usbif->addr_table[devnum];
750         }
751
752         spin_lock_irqsave(&pending_req->stub->submitting_lock, flags);
753         list_for_each_entry(unlink_req, &pending_req->stub->submitting_list, urb_list) {
754                 if (unlink_req->id == req->u.unlink.unlink_id) {
755                         ret = usb_unlink_urb(unlink_req->urb);
756                         break;
757                 }
758         }
759         spin_unlock_irqrestore(&pending_req->stub->submitting_lock, flags);
760
761 fail_response:
762         usbbk_do_response(pending_req, ret, 0, 0, 0);
763         usbif_put(usbif);
764         free_req(pending_req);
765         return;
766 }
767
768 static int check_and_submit_special_ctrlreq(usbif_t *usbif,
769                 usbif_urb_request_t *req, pending_req_t *pending_req)
770 {
771         int devnum;
772         struct usbstub *stub = NULL;
773         struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *) req->u.ctrl;
774         int ret;
775         int done = 0;
776
777         devnum = usb_pipedevice(req->pipe);
778
779         /*
780          * When the device is first connected or reseted, USB device has no address.
781          * In this initial state, following requests are send to device address (#0),
782          *
783          *  1. GET_DESCRIPTOR (with Descriptor Type is "DEVICE") is send,
784          *     and OS knows what device is connected to.
785          *
786          *  2. SET_ADDRESS is send, and then, device has its address.
787          *
788          * In the next step, SET_CONFIGURATION is send to addressed device, and then,
789          * the device is finally ready to use.
790          */
791         if (unlikely(devnum == 0)) {
792                 stub = find_attached_device(usbif, usbif_pipeportnum(req->pipe));
793                 if (unlikely(!stub)) {
794                         ret = -ENODEV;
795                         goto fail_response;
796                 }
797
798                 switch (ctrl->bRequest) {
799                 case USB_REQ_GET_DESCRIPTOR:
800                         /*
801                          * GET_DESCRIPTOR request to device #0.
802                          * through to normal urb transfer.
803                          */
804                         pending_req->stub = stub;
805                         return 0;
806                         break;
807                 case USB_REQ_SET_ADDRESS:
808                         /*
809                          * SET_ADDRESS request to device #0.
810                          * add attached device to addr_table.
811                          */
812                         {
813                                 __u16 addr = le16_to_cpu(ctrl->wValue);
814                                 usbbk_set_address(usbif, stub, 0, addr);
815                         }
816                         ret = 0;
817                         goto fail_response;
818                         break;
819                 default:
820                         ret = -EINVAL;
821                         goto fail_response;
822                 }
823         } else {
824                 if (unlikely(!usbif->addr_table[devnum])) {
825                         ret = -ENODEV;
826                         goto fail_response;
827                 }
828                 pending_req->stub = usbif->addr_table[devnum];
829         }
830
831         /*
832          * Check special request
833          */
834         switch (ctrl->bRequest) {
835         case USB_REQ_SET_ADDRESS:
836                 /*
837                  * SET_ADDRESS request to addressed device.
838                  * change addr or remove from addr_table.
839                  */
840                 {
841                         __u16 addr = le16_to_cpu(ctrl->wValue);
842                         usbbk_set_address(usbif, stub, devnum, addr);
843                 }
844                 ret = 0;
845                 goto fail_response;
846                 break;
847 #if 0
848         case USB_REQ_SET_CONFIGURATION:
849                 /*
850                  * linux 2.6.27 or later version only!
851                  */
852                 if (ctrl->RequestType == USB_RECIP_DEVICE) {
853                         __u16 config = le16_to_cpu(ctrl->wValue);
854                         usb_driver_set_configuration(pending_req->stub->udev, config);
855                         done = 1;
856                 }
857                 break;
858 #endif
859         case USB_REQ_SET_INTERFACE:
860                 if (ctrl->bRequestType == USB_RECIP_INTERFACE) {
861                         __u16 alt = le16_to_cpu(ctrl->wValue);
862                         __u16 intf = le16_to_cpu(ctrl->wIndex);
863                         usbbk_set_interface(pending_req, intf, alt);
864                         done = 1;
865                 }
866                 break;
867         case USB_REQ_CLEAR_FEATURE:
868                 if (ctrl->bRequestType == USB_RECIP_ENDPOINT
869                         && ctrl->wValue == USB_ENDPOINT_HALT) {
870                         int pipe;
871                         int ep = le16_to_cpu(ctrl->wIndex) & 0x0f;
872                         int dir = le16_to_cpu(ctrl->wIndex)
873                                         & USB_DIR_IN;
874                         if (dir)
875                                 pipe = usb_rcvctrlpipe(pending_req->stub->udev, ep);
876                         else
877                                 pipe = usb_sndctrlpipe(pending_req->stub->udev, ep);
878                         usbbk_clear_halt(pending_req, pipe);
879                         done = 1;
880                 }
881                 break;
882 #if 0 /* not tested yet */
883         case USB_REQ_SET_FEATURE:
884                 if (ctrl->bRequestType == USB_RT_PORT) {
885                         __u16 feat = le16_to_cpu(ctrl->wValue);
886                         if (feat == USB_PORT_FEAT_RESET) {
887                                 usbbk_port_reset(pending_req);
888                                 done = 1;
889                         }
890                 }
891                 break;
892 #endif
893         default:
894                 break;
895         }
896
897         return done;
898
899 fail_response:
900         usbbk_do_response(pending_req, ret, 0, 0, 0);
901         usbif_put(usbif);
902         free_req(pending_req);
903         return 1;
904 }
905
906 static void dispatch_request_to_pending_reqs(usbif_t *usbif,
907                 usbif_urb_request_t *req,
908                 pending_req_t *pending_req)
909 {
910         int ret;
911
912         pending_req->id = req->id;
913         pending_req->usbif = usbif;
914
915         barrier();
916
917         usbif_get(usbif);
918
919         /* unlink request */
920         if (unlikely(usbif_pipeunlink(req->pipe))) {
921                 process_unlink_req(usbif, req, pending_req);
922                 return;
923         }
924
925         if (usb_pipecontrol(req->pipe)) {
926                 if (check_and_submit_special_ctrlreq(usbif, req, pending_req))
927                         return;
928         } else {
929                 int devnum = usb_pipedevice(req->pipe);
930                 if (unlikely(!usbif->addr_table[devnum])) {
931                         ret = -ENODEV;
932                         goto fail_response;
933                 }
934                 pending_req->stub = usbif->addr_table[devnum];
935         }
936
937         barrier();
938
939         ret = usbbk_alloc_urb(req, pending_req);
940         if (ret) {
941                 ret = -ESHUTDOWN;
942                 goto fail_response;
943         }
944
945         add_req_to_submitting_list(pending_req->stub, pending_req);
946
947         barrier();
948
949         usbbk_init_urb(req, pending_req);
950
951         barrier();
952
953         pending_req->nr_buffer_segs = req->nr_buffer_segs;
954         if (usb_pipeisoc(req->pipe))
955                 pending_req->nr_extra_segs = req->u.isoc.nr_frame_desc_segs;
956         else
957                 pending_req->nr_extra_segs = 0;
958
959         barrier();
960
961         ret = usbbk_gnttab_map(usbif, req, pending_req);
962         if (ret) {
963                 pr_err("usbback: invalid buffer\n");
964                 ret = -ESHUTDOWN;
965                 goto fail_free_urb;
966         }
967
968         barrier();
969
970         if (usb_pipeout(req->pipe) && req->buffer_length)
971                 copy_pages_to_buff(pending_req->buffer,
972                                         pending_req,
973                                         0,
974                                         pending_req->nr_buffer_segs);
975         if (usb_pipeisoc(req->pipe)) {
976                 copy_pages_to_buff(&pending_req->urb->iso_frame_desc[0],
977                         pending_req,
978                         pending_req->nr_buffer_segs,
979                         pending_req->nr_extra_segs);
980         }
981
982         barrier();
983
984         ret = usb_submit_urb(pending_req->urb, GFP_KERNEL);
985         if (ret) {
986                 pr_err("usbback: failed submitting urb, error %d\n", ret);
987                 ret = -ESHUTDOWN;
988                 goto fail_flush_area;
989         }
990         return;
991
992 fail_flush_area:
993         fast_flush_area(pending_req);
994 fail_free_urb:
995         remove_req_from_submitting_list(pending_req->stub, pending_req);
996         barrier();
997         usbbk_free_urb(pending_req->urb);
998 fail_response:
999         usbbk_do_response(pending_req, ret, 0, 0, 0);
1000         usbif_put(usbif);
1001         free_req(pending_req);
1002 }
1003
1004 static int usbbk_start_submit_urb(usbif_t *usbif)
1005 {
1006         usbif_urb_back_ring_t *urb_ring = &usbif->urb_ring;
1007         usbif_urb_request_t *req;
1008         pending_req_t *pending_req;
1009         RING_IDX rc, rp;
1010         int more_to_do = 0;
1011
1012         rc = urb_ring->req_cons;
1013         rp = urb_ring->sring->req_prod;
1014         rmb();
1015
1016         while (rc != rp) {
1017                 if (RING_REQUEST_CONS_OVERFLOW(urb_ring, rc)) {
1018                         pr_warning("RING_REQUEST_CONS_OVERFLOW\n");
1019                         break;
1020                 }
1021
1022                 pending_req = alloc_req();
1023                 if (NULL == pending_req) {
1024                         more_to_do = 1;
1025                         break;
1026                 }
1027
1028                 req = RING_GET_REQUEST(urb_ring, rc);
1029                 urb_ring->req_cons = ++rc;
1030
1031                 dispatch_request_to_pending_reqs(usbif, req,
1032                                                         pending_req);
1033         }
1034
1035         RING_FINAL_CHECK_FOR_REQUESTS(&usbif->urb_ring, more_to_do);
1036
1037         cond_resched();
1038
1039         return more_to_do;
1040 }
1041
1042 void usbbk_hotplug_notify(usbif_t *usbif, int portnum, int speed)
1043 {
1044         usbif_conn_back_ring_t *ring = &usbif->conn_ring;
1045         usbif_conn_request_t *req;
1046         usbif_conn_response_t *res;
1047         unsigned long flags;
1048         u16 id;
1049         int notify;
1050
1051         spin_lock_irqsave(&usbif->conn_ring_lock, flags);
1052
1053         req = RING_GET_REQUEST(ring, ring->req_cons);;
1054         id = req->id;
1055         ring->req_cons++;
1056         ring->sring->req_event = ring->req_cons + 1;
1057
1058         res = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt);
1059         res->id = id;
1060         res->portnum = portnum;
1061         res->speed = speed;
1062         ring->rsp_prod_pvt++;
1063         RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(ring, notify);
1064
1065         spin_unlock_irqrestore(&usbif->conn_ring_lock, flags);
1066
1067         if (notify)
1068                 notify_remote_via_irq(usbif->irq);
1069 }
1070
1071 int usbbk_schedule(void *arg)
1072 {
1073         usbif_t *usbif = (usbif_t *) arg;
1074
1075         usbif_get(usbif);
1076
1077         while (!kthread_should_stop()) {
1078                 wait_event_interruptible(
1079                         usbif->wq,
1080                         usbif->waiting_reqs || kthread_should_stop());
1081                 wait_event_interruptible(
1082                         pending_free_wq,
1083                         !list_empty(&pending_free) || kthread_should_stop());
1084                 usbif->waiting_reqs = 0;
1085                 smp_mb();
1086
1087                 if (usbbk_start_submit_urb(usbif))
1088                         usbif->waiting_reqs = 1;
1089
1090                 usbbk_free_urbs();
1091         }
1092
1093         usbbk_free_urbs();
1094         usbif->xenusbd = NULL;
1095         usbif_put(usbif);
1096
1097         return 0;
1098 }
1099
1100 /*
1101  * attach usbstub device to usbif.
1102  */
1103 void usbbk_attach_device(usbif_t *usbif, struct usbstub *stub)
1104 {
1105         unsigned long flags;
1106
1107         spin_lock_irqsave(&usbif->stub_lock, flags);
1108         list_add(&stub->dev_list, &usbif->stub_list);
1109         spin_unlock_irqrestore(&usbif->stub_lock, flags);
1110         stub->usbif = usbif;
1111 }
1112
1113 /*
1114  * detach usbstub device from usbif.
1115  */
1116 void usbbk_detach_device(usbif_t *usbif, struct usbstub *stub)
1117 {
1118         unsigned long flags;
1119
1120         if (stub->addr)
1121                 usbbk_set_address(usbif, stub, stub->addr, 0);
1122         spin_lock_irqsave(&usbif->stub_lock, flags);
1123         list_del(&stub->dev_list);
1124         spin_unlock_irqrestore(&usbif->stub_lock, flags);
1125         stub->usbif = NULL;
1126 }
1127
1128 void detach_device_without_lock(usbif_t *usbif, struct usbstub *stub)
1129 {
1130         if (stub->addr)
1131                 usbbk_set_address(usbif, stub, stub->addr, 0);
1132         list_del(&stub->dev_list);
1133         stub->usbif = NULL;
1134 }
1135
1136 static int __init usbback_init(void)
1137 {
1138         int i, mmap_pages;
1139         int err = 0;
1140
1141         if (!is_running_on_xen())
1142                 return -ENODEV;
1143
1144         mmap_pages = usbif_reqs * USBIF_MAX_SEGMENTS_PER_REQUEST;
1145         pending_reqs = kzalloc(sizeof(pending_reqs[0]) *
1146                         usbif_reqs, GFP_KERNEL);
1147         pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
1148                         mmap_pages, GFP_KERNEL);
1149         pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
1150
1151         if (!pending_reqs || !pending_grant_handles || !pending_pages) {
1152                 err = -ENOMEM;
1153                 goto out_mem;
1154         }
1155
1156         for (i = 0; i < mmap_pages; i++)
1157                 pending_grant_handles[i] = USBBACK_INVALID_HANDLE;
1158
1159         INIT_LIST_HEAD(&pending_free);
1160
1161         for (i = 0; i < usbif_reqs; i++)
1162                 list_add_tail(&pending_reqs[i].free_list, &pending_free);
1163
1164         err = usbstub_init();
1165         if (err)
1166                 goto out_mem;
1167
1168         err = usbback_xenbus_init();
1169         if (err)
1170                 goto out_xenbus;
1171
1172         return 0;
1173
1174 out_xenbus:
1175         usbstub_exit();
1176 out_mem:
1177         kfree(pending_reqs);
1178         kfree(pending_grant_handles);
1179         free_empty_pages_and_pagevec(pending_pages, mmap_pages);
1180         return err;
1181 }
1182
1183 static void __exit usbback_exit(void)
1184 {
1185         usbback_xenbus_exit();
1186         usbstub_exit();
1187         kfree(pending_reqs);
1188         kfree(pending_grant_handles);
1189         free_empty_pages_and_pagevec(pending_pages, usbif_reqs * USBIF_MAX_SEGMENTS_PER_REQUEST);
1190 }
1191
1192 module_init(usbback_init);
1193 module_exit(usbback_exit);
1194
1195 MODULE_AUTHOR("");
1196 MODULE_DESCRIPTION("Xen USB backend driver (usbback)");
1197 MODULE_LICENSE("Dual BSD/GPL");
1198 MODULE_ALIAS("xen-backend:vusb");