4 * Xen USB backend driver
6 * Copyright (C) 2009, FUJITSU LABORATORIES LTD.
7 * Author: Noboru Iwamatsu <n_iwamatsu@jp.fujitsu.com>
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License as published by
11 * the Free Software Foundation; either version 2 of the License, or
12 * (at your option) any later version.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, see <http://www.gnu.org/licenses/>.
24 * When distributed separately from the Linux kernel or incorporated into
25 * other software packages, subject to the following license:
27 * Permission is hereby granted, free of charge, to any person obtaining a copy
28 * of this software and associated documentation files (the "Software"), to
29 * deal in the Software without restriction, including without limitation the
30 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
31 * sell copies of the Software, and to permit persons to whom the Software is
32 * furnished to do so, subject to the following conditions:
34 * The above copyright notice and this permission notice shall be included in
35 * all copies or substantial portions of the Software.
37 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
38 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
39 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
40 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
41 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
42 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
43 * DEALINGS IN THE SOFTWARE.
47 #include <xen/balloon.h>
48 #include <xen/evtchn.h>
49 #include <xen/gnttab.h>
53 #include "../../usb/core/hub.h"
56 int usbif_reqs = USBIF_BACK_MAX_PENDING_REQS;
57 module_param_named(reqs, usbif_reqs, int, 0);
58 MODULE_PARM_DESC(reqs, "Number of usbback requests to allocate");
60 struct pending_req_segment {
68 uint16_t id; /* request id */
71 struct list_head urb_list;
76 dma_addr_t transfer_dma;
77 struct usb_ctrlrequest *setup;
79 /* request segments */
80 uint16_t nr_buffer_segs; /* number of urb->transfer_buffer segments */
81 uint16_t nr_extra_segs; /* number of iso_frame_desc segments (ISO) */
82 struct pending_req_segment *seg;
84 struct list_head free_list;
87 static pending_req_t *pending_reqs;
88 static struct list_head pending_free;
89 static DEFINE_SPINLOCK(pending_free_lock);
90 static LIST_HEAD(pending_urb_free);
91 static DEFINE_SPINLOCK(urb_free_lock);
92 static DECLARE_WAIT_QUEUE_HEAD(pending_free_wq);
94 #define USBBACK_INVALID_HANDLE (~0)
96 static struct page **pending_pages;
97 static grant_handle_t *pending_grant_handles;
99 static inline int vaddr_pagenr(pending_req_t *req, int seg)
101 return (req - pending_reqs) * USBIF_MAX_SEGMENTS_PER_REQUEST + seg;
104 static inline unsigned long vaddr(pending_req_t *req, int seg)
106 unsigned long pfn = page_to_pfn(pending_pages[vaddr_pagenr(req, seg)]);
107 return (unsigned long)pfn_to_kaddr(pfn);
110 #define pending_handle(_req, _seg) \
111 (pending_grant_handles[vaddr_pagenr(_req, _seg)])
113 static pending_req_t *alloc_req(void)
115 pending_req_t *req = NULL;
118 spin_lock_irqsave(&pending_free_lock, flags);
119 if (!list_empty(&pending_free)) {
120 req = list_entry(pending_free.next, pending_req_t, free_list);
121 list_del(&req->free_list);
123 spin_unlock_irqrestore(&pending_free_lock, flags);
127 static void free_req(pending_req_t *req)
132 spin_lock_irqsave(&pending_free_lock, flags);
133 was_empty = list_empty(&pending_free);
134 list_add(&req->free_list, &pending_free);
135 spin_unlock_irqrestore(&pending_free_lock, flags);
137 wake_up(&pending_free_wq);
140 static inline void add_req_to_submitting_list(struct usbstub *stub, pending_req_t *pending_req)
144 spin_lock_irqsave(&stub->submitting_lock, flags);
145 list_add_tail(&pending_req->urb_list, &stub->submitting_list);
146 spin_unlock_irqrestore(&stub->submitting_lock, flags);
149 static inline void remove_req_from_submitting_list(struct usbstub *stub, pending_req_t *pending_req)
153 spin_lock_irqsave(&stub->submitting_lock, flags);
154 list_del_init(&pending_req->urb_list);
155 spin_unlock_irqrestore(&stub->submitting_lock, flags);
158 void usbbk_unlink_urbs(struct usbstub *stub)
160 pending_req_t *req, *tmp;
163 spin_lock_irqsave(&stub->submitting_lock, flags);
164 list_for_each_entry_safe(req, tmp, &stub->submitting_list, urb_list) {
165 usb_unlink_urb(req->urb);
167 spin_unlock_irqrestore(&stub->submitting_lock, flags);
170 static void fast_flush_area(pending_req_t *pending_req)
172 struct gnttab_unmap_grant_ref unmap[USBIF_MAX_SEGMENTS_PER_REQUEST];
173 unsigned int i, nr_segs, invcount = 0;
174 grant_handle_t handle;
177 nr_segs = pending_req->nr_buffer_segs + pending_req->nr_extra_segs;
180 for (i = 0; i < nr_segs; i++) {
181 handle = pending_handle(pending_req, i);
182 if (handle == USBBACK_INVALID_HANDLE)
184 gnttab_set_unmap_op(&unmap[invcount], vaddr(pending_req, i),
185 GNTMAP_host_map, handle);
186 pending_handle(pending_req, i) = USBBACK_INVALID_HANDLE;
190 ret = HYPERVISOR_grant_table_op(
191 GNTTABOP_unmap_grant_ref, unmap, invcount);
194 kfree(pending_req->seg);
200 static void copy_buff_to_pages(void *buff, pending_req_t *pending_req,
201 int start, int nr_pages)
203 unsigned long copied = 0;
206 for (i = start; i < start + nr_pages; i++) {
207 memcpy((void *) vaddr(pending_req, i) + pending_req->seg[i].offset,
209 pending_req->seg[i].length);
210 copied += pending_req->seg[i].length;
214 static void copy_pages_to_buff(void *buff, pending_req_t *pending_req,
215 int start, int nr_pages)
217 unsigned long copied = 0;
220 for (i = start; i < start + nr_pages; i++) {
221 memcpy(buff + copied,
222 (void *) vaddr(pending_req, i) + pending_req->seg[i].offset,
223 pending_req->seg[i].length);
224 copied += pending_req->seg[i].length;
228 static int usbbk_alloc_urb(usbif_urb_request_t *req, pending_req_t *pending_req)
232 if (usb_pipeisoc(req->pipe))
233 pending_req->urb = usb_alloc_urb(req->u.isoc.number_of_packets, GFP_KERNEL);
235 pending_req->urb = usb_alloc_urb(0, GFP_KERNEL);
236 if (!pending_req->urb) {
237 pr_err("usbback: can't alloc urb\n");
242 if (req->buffer_length) {
243 pending_req->buffer = usb_alloc_coherent(pending_req->stub->udev,
244 req->buffer_length, GFP_KERNEL,
245 &pending_req->transfer_dma);
246 if (!pending_req->buffer) {
247 pr_err("usbback: can't alloc urb buffer\n");
253 if (usb_pipecontrol(req->pipe)) {
254 pending_req->setup = kmalloc(sizeof(struct usb_ctrlrequest),
256 if (!pending_req->setup) {
257 pr_err("usbback: can't alloc usb_ctrlrequest\n");
259 goto fail_free_buffer;
266 if (req->buffer_length)
267 usb_free_coherent(pending_req->stub->udev,
270 pending_req->transfer_dma);
272 usb_free_urb(pending_req->urb);
277 static void usbbk_free_urb(struct urb *urb)
281 spin_lock_irqsave(&urb_free_lock, flags);
282 list_add(&urb->urb_list, &pending_urb_free);
283 spin_unlock_irqrestore(&urb_free_lock, flags);
286 static void _usbbk_free_urb(struct urb *urb)
288 if (usb_pipecontrol(urb->pipe))
289 kfree(urb->setup_packet);
290 if (urb->transfer_buffer_length)
291 usb_free_coherent(urb->dev, urb->transfer_buffer_length,
292 urb->transfer_buffer, urb->transfer_dma);
297 static void usbbk_free_urbs(void)
300 struct list_head tmp_list;
302 if (list_empty(&pending_urb_free))
305 INIT_LIST_HEAD(&tmp_list);
307 spin_lock_irqsave(&urb_free_lock, flags);
308 list_splice_init(&pending_urb_free, &tmp_list);
309 spin_unlock_irqrestore(&urb_free_lock, flags);
311 while (!list_empty(&tmp_list)) {
312 struct urb *next_urb = list_first_entry(&tmp_list, struct urb,
315 list_del(&next_urb->urb_list);
316 _usbbk_free_urb(next_urb);
320 static void usbbk_notify_work(usbif_t *usbif)
322 usbif->waiting_reqs = 1;
326 irqreturn_t usbbk_be_int(int irq, void *dev_id)
328 usbbk_notify_work(dev_id);
332 static void usbbk_do_response(pending_req_t *pending_req, int32_t status,
333 int32_t actual_length, int32_t error_count, uint16_t start_frame)
335 usbif_t *usbif = pending_req->usbif;
336 usbif_urb_response_t *res;
340 spin_lock_irqsave(&usbif->urb_ring_lock, flags);
341 res = RING_GET_RESPONSE(&usbif->urb_ring, usbif->urb_ring.rsp_prod_pvt);
342 res->id = pending_req->id;
343 res->status = status;
344 res->actual_length = actual_length;
345 res->error_count = error_count;
346 res->start_frame = start_frame;
347 usbif->urb_ring.rsp_prod_pvt++;
349 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&usbif->urb_ring, notify);
350 spin_unlock_irqrestore(&usbif->urb_ring_lock, flags);
353 notify_remote_via_irq(usbif->irq);
356 static void usbbk_urb_complete(struct urb *urb)
358 pending_req_t *pending_req = (pending_req_t *)urb->context;
360 if (usb_pipein(urb->pipe) && urb->status == 0 && urb->actual_length > 0)
361 copy_buff_to_pages(pending_req->buffer, pending_req,
362 0, pending_req->nr_buffer_segs);
364 if (usb_pipeisoc(urb->pipe))
365 copy_buff_to_pages(&urb->iso_frame_desc[0], pending_req,
366 pending_req->nr_buffer_segs, pending_req->nr_extra_segs);
370 fast_flush_area(pending_req);
372 usbbk_do_response(pending_req, urb->status, urb->actual_length,
373 urb->error_count, urb->start_frame);
375 remove_req_from_submitting_list(pending_req->stub, pending_req);
379 usbif_put(pending_req->usbif);
380 free_req(pending_req);
383 static int usbbk_gnttab_map(usbif_t *usbif,
384 usbif_urb_request_t *req, pending_req_t *pending_req)
387 unsigned int nr_segs;
389 struct gnttab_map_grant_ref map[USBIF_MAX_SEGMENTS_PER_REQUEST];
391 nr_segs = pending_req->nr_buffer_segs + pending_req->nr_extra_segs;
393 if (nr_segs > USBIF_MAX_SEGMENTS_PER_REQUEST) {
394 pr_err("Bad number of segments in request\n");
400 pending_req->seg = kmalloc(sizeof(struct pending_req_segment)
401 * nr_segs, GFP_KERNEL);
402 if (!pending_req->seg) {
407 if (pending_req->nr_buffer_segs) {
408 flags = GNTMAP_host_map;
409 if (usb_pipeout(req->pipe))
410 flags |= GNTMAP_readonly;
411 for (i = 0; i < pending_req->nr_buffer_segs; i++)
412 gnttab_set_map_op(&map[i], vaddr(
413 pending_req, i), flags,
418 if (pending_req->nr_extra_segs) {
419 flags = GNTMAP_host_map;
420 for (i = req->nr_buffer_segs; i < nr_segs; i++)
421 gnttab_set_map_op(&map[i], vaddr(
422 pending_req, i), flags,
427 ret = HYPERVISOR_grant_table_op(GNTTABOP_map_grant_ref,
431 for (i = 0; i < nr_segs; i++) {
432 /* Make sure than none of the map ops failed with GNTST_eagain */
433 if (unlikely(map[i].status == GNTST_eagain))
434 gnttab_check_GNTST_eagain_while(GNTTABOP_map_grant_ref, &map[i]);
436 if (unlikely(map[i].status != GNTST_okay)) {
437 pr_err("usbback: invalid buffer -- could not remap it\n");
438 map[i].handle = USBBACK_INVALID_HANDLE;
442 pending_handle(pending_req, i) = map[i].handle;
447 set_phys_to_machine(__pa(vaddr(
448 pending_req, i)) >> PAGE_SHIFT,
449 FOREIGN_FRAME(map[i].dev_bus_addr >> PAGE_SHIFT));
451 pending_req->seg[i].offset = req->seg[i].offset;
452 pending_req->seg[i].length = req->seg[i].length;
456 if (pending_req->seg[i].offset >= PAGE_SIZE ||
457 pending_req->seg[i].length > PAGE_SIZE ||
458 pending_req->seg[i].offset + pending_req->seg[i].length > PAGE_SIZE)
469 fast_flush_area(pending_req);
476 static void usbbk_init_urb(usbif_urb_request_t *req, pending_req_t *pending_req)
479 struct usb_device *udev = pending_req->stub->udev;
480 struct urb *urb = pending_req->urb;
482 switch (usb_pipetype(req->pipe)) {
483 case PIPE_ISOCHRONOUS:
484 if (usb_pipein(req->pipe))
485 pipe = usb_rcvisocpipe(udev, usb_pipeendpoint(req->pipe));
487 pipe = usb_sndisocpipe(udev, usb_pipeendpoint(req->pipe));
491 urb->transfer_flags = req->transfer_flags;
492 urb->transfer_flags |= URB_ISO_ASAP;
493 urb->transfer_buffer = pending_req->buffer;
494 urb->transfer_buffer_length = req->buffer_length;
495 urb->complete = usbbk_urb_complete;
496 urb->context = pending_req;
497 urb->interval = req->u.isoc.interval;
498 urb->start_frame = req->u.isoc.start_frame;
499 urb->number_of_packets = req->u.isoc.number_of_packets;
503 if (usb_pipein(req->pipe))
504 pipe = usb_rcvintpipe(udev, usb_pipeendpoint(req->pipe));
506 pipe = usb_sndintpipe(udev, usb_pipeendpoint(req->pipe));
508 usb_fill_int_urb(urb, udev, pipe,
509 pending_req->buffer, req->buffer_length,
511 pending_req, req->u.intr.interval);
513 * high speed interrupt endpoints use a logarithmic encoding of
514 * the endpoint interval, and usb_fill_int_urb() initializes a
515 * interrupt urb with the encoded interval value.
517 * req->u.intr.interval is the interval value that already
518 * encoded in the frontend part, and the above usb_fill_int_urb()
519 * initializes the urb->interval with double encoded value.
521 * so, simply overwrite the urb->interval with original value.
523 urb->interval = req->u.intr.interval;
524 urb->transfer_flags = req->transfer_flags;
528 if (usb_pipein(req->pipe))
529 pipe = usb_rcvctrlpipe(udev, 0);
531 pipe = usb_sndctrlpipe(udev, 0);
533 usb_fill_control_urb(urb, udev, pipe,
534 (unsigned char *) pending_req->setup,
535 pending_req->buffer, req->buffer_length,
536 usbbk_urb_complete, pending_req);
537 memcpy(pending_req->setup, req->u.ctrl, 8);
538 urb->transfer_flags = req->transfer_flags;
542 if (usb_pipein(req->pipe))
543 pipe = usb_rcvbulkpipe(udev, usb_pipeendpoint(req->pipe));
545 pipe = usb_sndbulkpipe(udev, usb_pipeendpoint(req->pipe));
547 usb_fill_bulk_urb(urb, udev, pipe,
548 pending_req->buffer, req->buffer_length,
549 usbbk_urb_complete, pending_req);
550 urb->transfer_flags = req->transfer_flags;
557 if (req->buffer_length) {
558 urb->transfer_dma = pending_req->transfer_dma;
559 urb->transfer_flags |= URB_NO_TRANSFER_DMA_MAP;
563 struct set_interface_request {
564 pending_req_t *pending_req;
567 struct work_struct work;
570 static void usbbk_set_interface_work(struct work_struct *arg)
572 struct set_interface_request *req
573 = container_of(arg, struct set_interface_request, work);
574 pending_req_t *pending_req = req->pending_req;
575 struct usb_device *udev = req->pending_req->stub->udev;
579 usb_lock_device(udev);
580 ret = usb_set_interface(udev, req->interface, req->alternate);
581 usb_unlock_device(udev);
584 usbbk_do_response(pending_req, ret, 0, 0, 0);
585 usbif_put(pending_req->usbif);
586 free_req(pending_req);
590 static int usbbk_set_interface(pending_req_t *pending_req, int interface, int alternate)
592 struct set_interface_request *req;
593 struct usb_device *udev = pending_req->stub->udev;
595 req = kmalloc(sizeof(*req), GFP_KERNEL);
598 req->pending_req = pending_req;
599 req->interface = interface;
600 req->alternate = alternate;
601 INIT_WORK(&req->work, usbbk_set_interface_work);
603 schedule_work(&req->work);
607 struct clear_halt_request {
608 pending_req_t *pending_req;
610 struct work_struct work;
613 static void usbbk_clear_halt_work(struct work_struct *arg)
615 struct clear_halt_request *req
616 = container_of(arg, struct clear_halt_request, work);
617 pending_req_t *pending_req = req->pending_req;
618 struct usb_device *udev = req->pending_req->stub->udev;
621 usb_lock_device(udev);
622 ret = usb_clear_halt(req->pending_req->stub->udev, req->pipe);
623 usb_unlock_device(udev);
626 usbbk_do_response(pending_req, ret, 0, 0, 0);
627 usbif_put(pending_req->usbif);
628 free_req(pending_req);
632 static int usbbk_clear_halt(pending_req_t *pending_req, int pipe)
634 struct clear_halt_request *req;
635 struct usb_device *udev = pending_req->stub->udev;
637 req = kmalloc(sizeof(*req), GFP_KERNEL);
640 req->pending_req = pending_req;
642 INIT_WORK(&req->work, usbbk_clear_halt_work);
645 schedule_work(&req->work);
650 struct port_reset_request {
651 pending_req_t *pending_req;
652 struct work_struct work;
655 static void usbbk_port_reset_work(struct work_struct *arg)
657 struct port_reset_request *req
658 = container_of(arg, struct port_reset_request, work);
659 pending_req_t *pending_req = req->pending_req;
660 struct usb_device *udev = pending_req->stub->udev;
663 ret = ret_lock = usb_lock_device_for_reset(udev, NULL);
665 ret = usb_reset_device(udev);
667 usb_unlock_device(udev);
671 usbbk_do_response(pending_req, ret, 0, 0, 0);
672 usbif_put(pending_req->usbif);
673 free_req(pending_req);
677 static int usbbk_port_reset(pending_req_t *pending_req)
679 struct port_reset_request *req;
680 struct usb_device *udev = pending_req->stub->udev;
682 req = kmalloc(sizeof(*req), GFP_KERNEL);
686 req->pending_req = pending_req;
687 INIT_WORK(&req->work, usbbk_port_reset_work);
690 schedule_work(&req->work);
695 static void usbbk_set_address(usbif_t *usbif, struct usbstub *stub, int cur_addr, int new_addr)
699 spin_lock_irqsave(&usbif->addr_lock, flags);
701 usbif->addr_table[cur_addr] = NULL;
703 usbif->addr_table[new_addr] = stub;
704 stub->addr = new_addr;
705 spin_unlock_irqrestore(&usbif->addr_lock, flags);
708 struct usbstub *find_attached_device(usbif_t *usbif, int portnum)
710 struct usbstub *stub;
714 spin_lock_irqsave(&usbif->stub_lock, flags);
715 list_for_each_entry(stub, &usbif->stub_list, dev_list) {
716 if (stub->portid->portnum == portnum) {
721 spin_unlock_irqrestore(&usbif->stub_lock, flags);
729 static void process_unlink_req(usbif_t *usbif,
730 usbif_urb_request_t *req, pending_req_t *pending_req)
732 pending_req_t *unlink_req = NULL;
737 devnum = usb_pipedevice(req->pipe);
738 if (unlikely(devnum == 0)) {
739 pending_req->stub = find_attached_device(usbif, usbif_pipeportnum(req->pipe));
740 if (unlikely(!pending_req->stub)) {
745 if (unlikely(!usbif->addr_table[devnum])) {
749 pending_req->stub = usbif->addr_table[devnum];
752 spin_lock_irqsave(&pending_req->stub->submitting_lock, flags);
753 list_for_each_entry(unlink_req, &pending_req->stub->submitting_list, urb_list) {
754 if (unlink_req->id == req->u.unlink.unlink_id) {
755 ret = usb_unlink_urb(unlink_req->urb);
759 spin_unlock_irqrestore(&pending_req->stub->submitting_lock, flags);
762 usbbk_do_response(pending_req, ret, 0, 0, 0);
764 free_req(pending_req);
768 static int check_and_submit_special_ctrlreq(usbif_t *usbif,
769 usbif_urb_request_t *req, pending_req_t *pending_req)
772 struct usbstub *stub = NULL;
773 struct usb_ctrlrequest *ctrl = (struct usb_ctrlrequest *) req->u.ctrl;
777 devnum = usb_pipedevice(req->pipe);
780 * When the device is first connected or reseted, USB device has no address.
781 * In this initial state, following requests are send to device address (#0),
783 * 1. GET_DESCRIPTOR (with Descriptor Type is "DEVICE") is send,
784 * and OS knows what device is connected to.
786 * 2. SET_ADDRESS is send, and then, device has its address.
788 * In the next step, SET_CONFIGURATION is send to addressed device, and then,
789 * the device is finally ready to use.
791 if (unlikely(devnum == 0)) {
792 stub = find_attached_device(usbif, usbif_pipeportnum(req->pipe));
793 if (unlikely(!stub)) {
798 switch (ctrl->bRequest) {
799 case USB_REQ_GET_DESCRIPTOR:
801 * GET_DESCRIPTOR request to device #0.
802 * through to normal urb transfer.
804 pending_req->stub = stub;
807 case USB_REQ_SET_ADDRESS:
809 * SET_ADDRESS request to device #0.
810 * add attached device to addr_table.
813 __u16 addr = le16_to_cpu(ctrl->wValue);
814 usbbk_set_address(usbif, stub, 0, addr);
824 if (unlikely(!usbif->addr_table[devnum])) {
828 pending_req->stub = usbif->addr_table[devnum];
832 * Check special request
834 switch (ctrl->bRequest) {
835 case USB_REQ_SET_ADDRESS:
837 * SET_ADDRESS request to addressed device.
838 * change addr or remove from addr_table.
841 __u16 addr = le16_to_cpu(ctrl->wValue);
842 usbbk_set_address(usbif, stub, devnum, addr);
848 case USB_REQ_SET_CONFIGURATION:
850 * linux 2.6.27 or later version only!
852 if (ctrl->RequestType == USB_RECIP_DEVICE) {
853 __u16 config = le16_to_cpu(ctrl->wValue);
854 usb_driver_set_configuration(pending_req->stub->udev, config);
859 case USB_REQ_SET_INTERFACE:
860 if (ctrl->bRequestType == USB_RECIP_INTERFACE) {
861 __u16 alt = le16_to_cpu(ctrl->wValue);
862 __u16 intf = le16_to_cpu(ctrl->wIndex);
863 usbbk_set_interface(pending_req, intf, alt);
867 case USB_REQ_CLEAR_FEATURE:
868 if (ctrl->bRequestType == USB_RECIP_ENDPOINT
869 && ctrl->wValue == USB_ENDPOINT_HALT) {
871 int ep = le16_to_cpu(ctrl->wIndex) & 0x0f;
872 int dir = le16_to_cpu(ctrl->wIndex)
875 pipe = usb_rcvctrlpipe(pending_req->stub->udev, ep);
877 pipe = usb_sndctrlpipe(pending_req->stub->udev, ep);
878 usbbk_clear_halt(pending_req, pipe);
882 #if 0 /* not tested yet */
883 case USB_REQ_SET_FEATURE:
884 if (ctrl->bRequestType == USB_RT_PORT) {
885 __u16 feat = le16_to_cpu(ctrl->wValue);
886 if (feat == USB_PORT_FEAT_RESET) {
887 usbbk_port_reset(pending_req);
900 usbbk_do_response(pending_req, ret, 0, 0, 0);
902 free_req(pending_req);
906 static void dispatch_request_to_pending_reqs(usbif_t *usbif,
907 usbif_urb_request_t *req,
908 pending_req_t *pending_req)
912 pending_req->id = req->id;
913 pending_req->usbif = usbif;
920 if (unlikely(usbif_pipeunlink(req->pipe))) {
921 process_unlink_req(usbif, req, pending_req);
925 if (usb_pipecontrol(req->pipe)) {
926 if (check_and_submit_special_ctrlreq(usbif, req, pending_req))
929 int devnum = usb_pipedevice(req->pipe);
930 if (unlikely(!usbif->addr_table[devnum])) {
934 pending_req->stub = usbif->addr_table[devnum];
939 ret = usbbk_alloc_urb(req, pending_req);
945 add_req_to_submitting_list(pending_req->stub, pending_req);
949 usbbk_init_urb(req, pending_req);
953 pending_req->nr_buffer_segs = req->nr_buffer_segs;
954 if (usb_pipeisoc(req->pipe))
955 pending_req->nr_extra_segs = req->u.isoc.nr_frame_desc_segs;
957 pending_req->nr_extra_segs = 0;
961 ret = usbbk_gnttab_map(usbif, req, pending_req);
963 pr_err("usbback: invalid buffer\n");
970 if (usb_pipeout(req->pipe) && req->buffer_length)
971 copy_pages_to_buff(pending_req->buffer,
974 pending_req->nr_buffer_segs);
975 if (usb_pipeisoc(req->pipe)) {
976 copy_pages_to_buff(&pending_req->urb->iso_frame_desc[0],
978 pending_req->nr_buffer_segs,
979 pending_req->nr_extra_segs);
984 ret = usb_submit_urb(pending_req->urb, GFP_KERNEL);
986 pr_err("usbback: failed submitting urb, error %d\n", ret);
988 goto fail_flush_area;
993 fast_flush_area(pending_req);
995 remove_req_from_submitting_list(pending_req->stub, pending_req);
997 usbbk_free_urb(pending_req->urb);
999 usbbk_do_response(pending_req, ret, 0, 0, 0);
1001 free_req(pending_req);
1004 static int usbbk_start_submit_urb(usbif_t *usbif)
1006 usbif_urb_back_ring_t *urb_ring = &usbif->urb_ring;
1007 usbif_urb_request_t *req;
1008 pending_req_t *pending_req;
1012 rc = urb_ring->req_cons;
1013 rp = urb_ring->sring->req_prod;
1017 if (RING_REQUEST_CONS_OVERFLOW(urb_ring, rc)) {
1018 pr_warning("RING_REQUEST_CONS_OVERFLOW\n");
1022 pending_req = alloc_req();
1023 if (NULL == pending_req) {
1028 req = RING_GET_REQUEST(urb_ring, rc);
1029 urb_ring->req_cons = ++rc;
1031 dispatch_request_to_pending_reqs(usbif, req,
1035 RING_FINAL_CHECK_FOR_REQUESTS(&usbif->urb_ring, more_to_do);
1042 void usbbk_hotplug_notify(usbif_t *usbif, int portnum, int speed)
1044 usbif_conn_back_ring_t *ring = &usbif->conn_ring;
1045 usbif_conn_request_t *req;
1046 usbif_conn_response_t *res;
1047 unsigned long flags;
1051 spin_lock_irqsave(&usbif->conn_ring_lock, flags);
1053 req = RING_GET_REQUEST(ring, ring->req_cons);;
1056 ring->sring->req_event = ring->req_cons + 1;
1058 res = RING_GET_RESPONSE(ring, ring->rsp_prod_pvt);
1060 res->portnum = portnum;
1062 ring->rsp_prod_pvt++;
1063 RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(ring, notify);
1065 spin_unlock_irqrestore(&usbif->conn_ring_lock, flags);
1068 notify_remote_via_irq(usbif->irq);
1071 int usbbk_schedule(void *arg)
1073 usbif_t *usbif = (usbif_t *) arg;
1077 while (!kthread_should_stop()) {
1078 wait_event_interruptible(
1080 usbif->waiting_reqs || kthread_should_stop());
1081 wait_event_interruptible(
1083 !list_empty(&pending_free) || kthread_should_stop());
1084 usbif->waiting_reqs = 0;
1087 if (usbbk_start_submit_urb(usbif))
1088 usbif->waiting_reqs = 1;
1094 usbif->xenusbd = NULL;
1101 * attach usbstub device to usbif.
1103 void usbbk_attach_device(usbif_t *usbif, struct usbstub *stub)
1105 unsigned long flags;
1107 spin_lock_irqsave(&usbif->stub_lock, flags);
1108 list_add(&stub->dev_list, &usbif->stub_list);
1109 spin_unlock_irqrestore(&usbif->stub_lock, flags);
1110 stub->usbif = usbif;
1114 * detach usbstub device from usbif.
1116 void usbbk_detach_device(usbif_t *usbif, struct usbstub *stub)
1118 unsigned long flags;
1121 usbbk_set_address(usbif, stub, stub->addr, 0);
1122 spin_lock_irqsave(&usbif->stub_lock, flags);
1123 list_del(&stub->dev_list);
1124 spin_unlock_irqrestore(&usbif->stub_lock, flags);
1128 void detach_device_without_lock(usbif_t *usbif, struct usbstub *stub)
1131 usbbk_set_address(usbif, stub, stub->addr, 0);
1132 list_del(&stub->dev_list);
1136 static int __init usbback_init(void)
1141 if (!is_running_on_xen())
1144 mmap_pages = usbif_reqs * USBIF_MAX_SEGMENTS_PER_REQUEST;
1145 pending_reqs = kzalloc(sizeof(pending_reqs[0]) *
1146 usbif_reqs, GFP_KERNEL);
1147 pending_grant_handles = kmalloc(sizeof(pending_grant_handles[0]) *
1148 mmap_pages, GFP_KERNEL);
1149 pending_pages = alloc_empty_pages_and_pagevec(mmap_pages);
1151 if (!pending_reqs || !pending_grant_handles || !pending_pages) {
1156 for (i = 0; i < mmap_pages; i++)
1157 pending_grant_handles[i] = USBBACK_INVALID_HANDLE;
1159 INIT_LIST_HEAD(&pending_free);
1161 for (i = 0; i < usbif_reqs; i++)
1162 list_add_tail(&pending_reqs[i].free_list, &pending_free);
1164 err = usbstub_init();
1168 err = usbback_xenbus_init();
1177 kfree(pending_reqs);
1178 kfree(pending_grant_handles);
1179 free_empty_pages_and_pagevec(pending_pages, mmap_pages);
1183 static void __exit usbback_exit(void)
1185 usbback_xenbus_exit();
1187 kfree(pending_reqs);
1188 kfree(pending_grant_handles);
1189 free_empty_pages_and_pagevec(pending_pages, usbif_reqs * USBIF_MAX_SEGMENTS_PER_REQUEST);
1192 module_init(usbback_init);
1193 module_exit(usbback_exit);
1196 MODULE_DESCRIPTION("Xen USB backend driver (usbback)");
1197 MODULE_LICENSE("Dual BSD/GPL");
1198 MODULE_ALIAS("xen-backend:vusb");