2 * xHCI host controller driver
4 * Copyright (C) 2008 Intel Corp.
7 * Some code borrowed from the Linux EHCI driver.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
15 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software Foundation,
20 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
23 #include <linux/pci.h>
24 #include <linux/irq.h>
25 #include <linux/log2.h>
26 #include <linux/module.h>
27 #include <linux/moduleparam.h>
28 #include <linux/slab.h>
32 #define DRIVER_AUTHOR "Sarah Sharp"
33 #define DRIVER_DESC "'eXtensible' Host Controller (xHC) Driver"
35 /* Some 0.95 hardware can't handle the chain bit on a Link TRB being cleared */
36 static int link_quirk;
37 module_param(link_quirk, int, S_IRUGO | S_IWUSR);
38 MODULE_PARM_DESC(link_quirk, "Don't clear the chain bit on a link TRB");
40 /* TODO: copied from ehci-hcd.c - can this be refactored? */
42 * handshake - spin reading hc until handshake completes or fails
43 * @ptr: address of hc register to be read
44 * @mask: bits to look at in result of read
45 * @done: value of those bits when handshake succeeds
46 * @usec: timeout in microseconds
48 * Returns negative errno, or zero on success
50 * Success happens when the "mask" bits have the specified value (hardware
51 * handshake done). There are two failure modes: "usec" have passed (major
52 * hardware flakeout), or the register reads as all-ones (hardware removed).
54 static int handshake(struct xhci_hcd *xhci, void __iomem *ptr,
55 u32 mask, u32 done, int usec)
60 result = xhci_readl(xhci, ptr);
61 if (result == ~(u32)0) /* card removed */
73 * Disable interrupts and begin the xHCI halting process.
75 void xhci_quiesce(struct xhci_hcd *xhci)
82 halted = xhci_readl(xhci, &xhci->op_regs->status) & STS_HALT;
86 cmd = xhci_readl(xhci, &xhci->op_regs->command);
88 xhci_writel(xhci, cmd, &xhci->op_regs->command);
92 * Force HC into halt state.
94 * Disable any IRQs and clear the run/stop bit.
95 * HC will complete any current and actively pipelined transactions, and
96 * should halt within 16 microframes of the run/stop bit being cleared.
97 * Read HC Halted bit in the status register to see when the HC is finished.
98 * XXX: shouldn't we set HC_STATE_HALT here somewhere?
100 int xhci_halt(struct xhci_hcd *xhci)
102 xhci_dbg(xhci, "// Halt the HC\n");
105 return handshake(xhci, &xhci->op_regs->status,
106 STS_HALT, STS_HALT, XHCI_MAX_HALT_USEC);
110 * Set the run bit and wait for the host to be running.
112 static int xhci_start(struct xhci_hcd *xhci)
117 temp = xhci_readl(xhci, &xhci->op_regs->command);
119 xhci_dbg(xhci, "// Turn on HC, cmd = 0x%x.\n",
121 xhci_writel(xhci, temp, &xhci->op_regs->command);
124 * Wait for the HCHalted Status bit to be 0 to indicate the host is
127 ret = handshake(xhci, &xhci->op_regs->status,
128 STS_HALT, 0, XHCI_MAX_HALT_USEC);
129 if (ret == -ETIMEDOUT)
130 xhci_err(xhci, "Host took too long to start, "
131 "waited %u microseconds.\n",
137 * Reset a halted HC, and set the internal HC state to HC_STATE_HALT.
139 * This resets pipelines, timers, counters, state machines, etc.
140 * Transactions will be terminated immediately, and operational registers
141 * will be set to their defaults.
143 int xhci_reset(struct xhci_hcd *xhci)
149 state = xhci_readl(xhci, &xhci->op_regs->status);
150 if ((state & STS_HALT) == 0) {
151 xhci_warn(xhci, "Host controller not halted, aborting reset.\n");
155 xhci_dbg(xhci, "// Reset the HC\n");
156 command = xhci_readl(xhci, &xhci->op_regs->command);
157 command |= CMD_RESET;
158 xhci_writel(xhci, command, &xhci->op_regs->command);
159 /* XXX: Why does EHCI set this here? Shouldn't other code do this? */
160 xhci_to_hcd(xhci)->state = HC_STATE_HALT;
162 ret = handshake(xhci, &xhci->op_regs->command,
163 CMD_RESET, 0, 250 * 1000);
167 xhci_dbg(xhci, "Wait for controller to be ready for doorbell rings\n");
169 * xHCI cannot write to any doorbells or operational registers other
170 * than status until the "Controller Not Ready" flag is cleared.
172 return handshake(xhci, &xhci->op_regs->status, STS_CNR, 0, 250 * 1000);
177 * free all IRQs request
179 static void xhci_free_irq(struct xhci_hcd *xhci)
182 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
184 /* return if using legacy interrupt */
185 if (xhci_to_hcd(xhci)->irq >= 0)
188 if (xhci->msix_entries) {
189 for (i = 0; i < xhci->msix_count; i++)
190 if (xhci->msix_entries[i].vector)
191 free_irq(xhci->msix_entries[i].vector,
193 } else if (pdev->irq >= 0)
194 free_irq(pdev->irq, xhci_to_hcd(xhci));
202 static int xhci_setup_msi(struct xhci_hcd *xhci)
205 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
207 ret = pci_enable_msi(pdev);
209 xhci_err(xhci, "failed to allocate MSI entry\n");
213 ret = request_irq(pdev->irq, (irq_handler_t)xhci_msi_irq,
214 0, "xhci_hcd", xhci_to_hcd(xhci));
216 xhci_err(xhci, "disable MSI interrupt\n");
217 pci_disable_msi(pdev);
226 static int xhci_setup_msix(struct xhci_hcd *xhci)
229 struct usb_hcd *hcd = xhci_to_hcd(xhci);
230 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
233 * calculate number of msi-x vectors supported.
234 * - HCS_MAX_INTRS: the max number of interrupts the host can handle,
235 * with max number of interrupters based on the xhci HCSPARAMS1.
236 * - num_online_cpus: maximum msi-x vectors per CPUs core.
237 * Add additional 1 vector to ensure always available interrupt.
239 xhci->msix_count = min(num_online_cpus() + 1,
240 HCS_MAX_INTRS(xhci->hcs_params1));
243 kmalloc((sizeof(struct msix_entry))*xhci->msix_count,
245 if (!xhci->msix_entries) {
246 xhci_err(xhci, "Failed to allocate MSI-X entries\n");
250 for (i = 0; i < xhci->msix_count; i++) {
251 xhci->msix_entries[i].entry = i;
252 xhci->msix_entries[i].vector = 0;
255 ret = pci_enable_msix(pdev, xhci->msix_entries, xhci->msix_count);
257 xhci_err(xhci, "Failed to enable MSI-X\n");
261 for (i = 0; i < xhci->msix_count; i++) {
262 ret = request_irq(xhci->msix_entries[i].vector,
263 (irq_handler_t)xhci_msi_irq,
264 0, "xhci_hcd", xhci_to_hcd(xhci));
269 hcd->msix_enabled = 1;
273 xhci_err(xhci, "disable MSI-X interrupt\n");
275 pci_disable_msix(pdev);
277 kfree(xhci->msix_entries);
278 xhci->msix_entries = NULL;
282 /* Free any IRQs and disable MSI-X */
283 static void xhci_cleanup_msix(struct xhci_hcd *xhci)
285 struct usb_hcd *hcd = xhci_to_hcd(xhci);
286 struct pci_dev *pdev = to_pci_dev(hcd->self.controller);
290 if (xhci->msix_entries) {
291 pci_disable_msix(pdev);
292 kfree(xhci->msix_entries);
293 xhci->msix_entries = NULL;
295 pci_disable_msi(pdev);
298 hcd->msix_enabled = 0;
303 * Initialize memory for HCD and xHC (one-time init).
305 * Program the PAGESIZE register, initialize the device context array, create
306 * device contexts (?), set up a command ring segment (or two?), create event
307 * ring (one for now).
309 int xhci_init(struct usb_hcd *hcd)
311 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
314 xhci_dbg(xhci, "xhci_init\n");
315 spin_lock_init(&xhci->lock);
317 xhci_dbg(xhci, "QUIRK: Not clearing Link TRB chain bits.\n");
318 xhci->quirks |= XHCI_LINK_TRB_QUIRK;
320 xhci_dbg(xhci, "xHCI doesn't need link TRB QUIRK\n");
322 retval = xhci_mem_init(xhci, GFP_KERNEL);
323 xhci_dbg(xhci, "Finished xhci_init\n");
328 /*-------------------------------------------------------------------------*/
331 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
332 static void xhci_event_ring_work(unsigned long arg)
337 struct xhci_hcd *xhci = (struct xhci_hcd *) arg;
340 xhci_dbg(xhci, "Poll event ring: %lu\n", jiffies);
342 spin_lock_irqsave(&xhci->lock, flags);
343 temp = xhci_readl(xhci, &xhci->op_regs->status);
344 xhci_dbg(xhci, "op reg status = 0x%x\n", temp);
345 if (temp == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
346 xhci_dbg(xhci, "HW died, polling stopped.\n");
347 spin_unlock_irqrestore(&xhci->lock, flags);
351 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
352 xhci_dbg(xhci, "ir_set 0 pending = 0x%x\n", temp);
353 xhci_dbg(xhci, "No-op commands handled = %d\n", xhci->noops_handled);
354 xhci_dbg(xhci, "HC error bitmask = 0x%x\n", xhci->error_bitmask);
355 xhci->error_bitmask = 0;
356 xhci_dbg(xhci, "Event ring:\n");
357 xhci_debug_segment(xhci, xhci->event_ring->deq_seg);
358 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
359 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
360 temp_64 &= ~ERST_PTR_MASK;
361 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
362 xhci_dbg(xhci, "Command ring:\n");
363 xhci_debug_segment(xhci, xhci->cmd_ring->deq_seg);
364 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
365 xhci_dbg_cmd_ptrs(xhci);
366 for (i = 0; i < MAX_HC_SLOTS; ++i) {
369 for (j = 0; j < 31; ++j) {
370 xhci_dbg_ep_rings(xhci, i, j, &xhci->devs[i]->eps[j]);
374 if (xhci->noops_submitted != NUM_TEST_NOOPS)
375 if (xhci_setup_one_noop(xhci))
376 xhci_ring_cmd_db(xhci);
377 spin_unlock_irqrestore(&xhci->lock, flags);
380 mod_timer(&xhci->event_ring_timer, jiffies + POLL_TIMEOUT * HZ);
382 xhci_dbg(xhci, "Quit polling the event ring.\n");
387 * Start the HC after it was halted.
389 * This function is called by the USB core when the HC driver is added.
390 * Its opposite is xhci_stop().
392 * xhci_init() must be called once before this function can be called.
393 * Reset the HC, enable device slot contexts, program DCBAAP, and
394 * set command ring pointer and event ring pointer.
396 * Setup MSI-X vectors and enable interrupts.
398 int xhci_run(struct usb_hcd *hcd)
403 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
404 struct pci_dev *pdev = to_pci_dev(xhci_to_hcd(xhci)->self.controller);
405 void (*doorbell)(struct xhci_hcd *) = NULL;
407 hcd->uses_new_polling = 1;
409 xhci_dbg(xhci, "xhci_run\n");
410 /* unregister the legacy interrupt */
412 free_irq(hcd->irq, hcd);
415 ret = xhci_setup_msix(xhci);
417 /* fall back to msi*/
418 ret = xhci_setup_msi(xhci);
421 /* fall back to legacy interrupt*/
422 ret = request_irq(pdev->irq, &usb_hcd_irq, IRQF_SHARED,
423 hcd->irq_descr, hcd);
425 xhci_err(xhci, "request interrupt %d failed\n",
429 hcd->irq = pdev->irq;
432 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
433 init_timer(&xhci->event_ring_timer);
434 xhci->event_ring_timer.data = (unsigned long) xhci;
435 xhci->event_ring_timer.function = xhci_event_ring_work;
436 /* Poll the event ring */
437 xhci->event_ring_timer.expires = jiffies + POLL_TIMEOUT * HZ;
439 xhci_dbg(xhci, "Setting event ring polling timer\n");
440 add_timer(&xhci->event_ring_timer);
443 xhci_dbg(xhci, "Command ring memory map follows:\n");
444 xhci_debug_ring(xhci, xhci->cmd_ring);
445 xhci_dbg_ring_ptrs(xhci, xhci->cmd_ring);
446 xhci_dbg_cmd_ptrs(xhci);
448 xhci_dbg(xhci, "ERST memory map follows:\n");
449 xhci_dbg_erst(xhci, &xhci->erst);
450 xhci_dbg(xhci, "Event ring:\n");
451 xhci_debug_ring(xhci, xhci->event_ring);
452 xhci_dbg_ring_ptrs(xhci, xhci->event_ring);
453 temp_64 = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
454 temp_64 &= ~ERST_PTR_MASK;
455 xhci_dbg(xhci, "ERST deq = 64'h%0lx\n", (long unsigned int) temp_64);
457 xhci_dbg(xhci, "// Set the interrupt modulation register\n");
458 temp = xhci_readl(xhci, &xhci->ir_set->irq_control);
459 temp &= ~ER_IRQ_INTERVAL_MASK;
461 xhci_writel(xhci, temp, &xhci->ir_set->irq_control);
463 /* Set the HCD state before we enable the irqs */
464 hcd->state = HC_STATE_RUNNING;
465 temp = xhci_readl(xhci, &xhci->op_regs->command);
467 xhci_dbg(xhci, "// Enable interrupts, cmd = 0x%x.\n",
469 xhci_writel(xhci, temp, &xhci->op_regs->command);
471 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
472 xhci_dbg(xhci, "// Enabling event ring interrupter %p by writing 0x%x to irq_pending\n",
473 xhci->ir_set, (unsigned int) ER_IRQ_ENABLE(temp));
474 xhci_writel(xhci, ER_IRQ_ENABLE(temp),
475 &xhci->ir_set->irq_pending);
476 xhci_print_ir_set(xhci, 0);
478 if (NUM_TEST_NOOPS > 0)
479 doorbell = xhci_setup_one_noop(xhci);
480 if (xhci->quirks & XHCI_NEC_HOST)
481 xhci_queue_vendor_command(xhci, 0, 0, 0,
482 TRB_TYPE(TRB_NEC_GET_FW));
484 if (xhci_start(xhci)) {
491 if (xhci->quirks & XHCI_NEC_HOST)
492 xhci_ring_cmd_db(xhci);
494 xhci_dbg(xhci, "Finished xhci_run\n");
501 * This function is called by the USB core when the HC driver is removed.
502 * Its opposite is xhci_run().
504 * Disable device contexts, disable IRQs, and quiesce the HC.
505 * Reset the HC, finish any completed transactions, and cleanup memory.
507 void xhci_stop(struct usb_hcd *hcd)
510 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
512 spin_lock_irq(&xhci->lock);
515 spin_unlock_irq(&xhci->lock);
517 xhci_cleanup_msix(xhci);
519 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
520 /* Tell the event ring poll function not to reschedule */
522 del_timer_sync(&xhci->event_ring_timer);
525 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
526 temp = xhci_readl(xhci, &xhci->op_regs->status);
527 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
528 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
529 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
530 &xhci->ir_set->irq_pending);
531 xhci_print_ir_set(xhci, 0);
533 xhci_dbg(xhci, "cleaning up memory\n");
534 xhci_mem_cleanup(xhci);
535 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
536 xhci_readl(xhci, &xhci->op_regs->status));
540 * Shutdown HC (not bus-specific)
542 * This is called when the machine is rebooting or halting. We assume that the
543 * machine will be powered off, and the HC's internal state will be reset.
544 * Don't bother to free memory.
546 void xhci_shutdown(struct usb_hcd *hcd)
548 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
550 spin_lock_irq(&xhci->lock);
552 spin_unlock_irq(&xhci->lock);
554 xhci_cleanup_msix(xhci);
556 xhci_dbg(xhci, "xhci_shutdown completed - status = %x\n",
557 xhci_readl(xhci, &xhci->op_regs->status));
561 static void xhci_save_registers(struct xhci_hcd *xhci)
563 xhci->s3.command = xhci_readl(xhci, &xhci->op_regs->command);
564 xhci->s3.dev_nt = xhci_readl(xhci, &xhci->op_regs->dev_notification);
565 xhci->s3.dcbaa_ptr = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
566 xhci->s3.config_reg = xhci_readl(xhci, &xhci->op_regs->config_reg);
567 xhci->s3.irq_pending = xhci_readl(xhci, &xhci->ir_set->irq_pending);
568 xhci->s3.irq_control = xhci_readl(xhci, &xhci->ir_set->irq_control);
569 xhci->s3.erst_size = xhci_readl(xhci, &xhci->ir_set->erst_size);
570 xhci->s3.erst_base = xhci_read_64(xhci, &xhci->ir_set->erst_base);
571 xhci->s3.erst_dequeue = xhci_read_64(xhci, &xhci->ir_set->erst_dequeue);
574 static void xhci_restore_registers(struct xhci_hcd *xhci)
576 xhci_writel(xhci, xhci->s3.command, &xhci->op_regs->command);
577 xhci_writel(xhci, xhci->s3.dev_nt, &xhci->op_regs->dev_notification);
578 xhci_write_64(xhci, xhci->s3.dcbaa_ptr, &xhci->op_regs->dcbaa_ptr);
579 xhci_writel(xhci, xhci->s3.config_reg, &xhci->op_regs->config_reg);
580 xhci_writel(xhci, xhci->s3.irq_pending, &xhci->ir_set->irq_pending);
581 xhci_writel(xhci, xhci->s3.irq_control, &xhci->ir_set->irq_control);
582 xhci_writel(xhci, xhci->s3.erst_size, &xhci->ir_set->erst_size);
583 xhci_write_64(xhci, xhci->s3.erst_base, &xhci->ir_set->erst_base);
586 static void xhci_set_cmd_ring_deq(struct xhci_hcd *xhci)
590 /* step 2: initialize command ring buffer */
591 val_64 = xhci_read_64(xhci, &xhci->op_regs->cmd_ring);
592 val_64 = (val_64 & (u64) CMD_RING_RSVD_BITS) |
593 (xhci_trb_virt_to_dma(xhci->cmd_ring->deq_seg,
594 xhci->cmd_ring->dequeue) &
595 (u64) ~CMD_RING_RSVD_BITS) |
596 xhci->cmd_ring->cycle_state;
597 xhci_dbg(xhci, "// Setting command ring address to 0x%llx\n",
598 (long unsigned long) val_64);
599 xhci_write_64(xhci, val_64, &xhci->op_regs->cmd_ring);
603 * The whole command ring must be cleared to zero when we suspend the host.
605 * The host doesn't save the command ring pointer in the suspend well, so we
606 * need to re-program it on resume. Unfortunately, the pointer must be 64-byte
607 * aligned, because of the reserved bits in the command ring dequeue pointer
608 * register. Therefore, we can't just set the dequeue pointer back in the
609 * middle of the ring (TRBs are 16-byte aligned).
611 static void xhci_clear_command_ring(struct xhci_hcd *xhci)
613 struct xhci_ring *ring;
614 struct xhci_segment *seg;
616 ring = xhci->cmd_ring;
619 memset(seg->trbs, 0, SEGMENT_SIZE);
621 } while (seg != ring->deq_seg);
623 /* Reset the software enqueue and dequeue pointers */
624 ring->deq_seg = ring->first_seg;
625 ring->dequeue = ring->first_seg->trbs;
626 ring->enq_seg = ring->deq_seg;
627 ring->enqueue = ring->dequeue;
630 * Ring is now zeroed, so the HW should look for change of ownership
631 * when the cycle bit is set to 1.
633 ring->cycle_state = 1;
636 * Reset the hardware dequeue pointer.
637 * Yes, this will need to be re-written after resume, but we're paranoid
638 * and want to make sure the hardware doesn't access bogus memory
639 * because, say, the BIOS or an SMI started the host without changing
640 * the command ring pointers.
642 xhci_set_cmd_ring_deq(xhci);
646 * Stop HC (not bus-specific)
648 * This is called when the machine transition into S3/S4 mode.
651 int xhci_suspend(struct xhci_hcd *xhci)
654 struct usb_hcd *hcd = xhci_to_hcd(xhci);
658 spin_lock_irq(&xhci->lock);
659 clear_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
660 /* step 1: stop endpoint */
661 /* skipped assuming that port suspend has done */
663 /* step 2: clear Run/Stop bit */
664 command = xhci_readl(xhci, &xhci->op_regs->command);
666 xhci_writel(xhci, command, &xhci->op_regs->command);
667 if (handshake(xhci, &xhci->op_regs->status,
668 STS_HALT, STS_HALT, 100*100)) {
669 xhci_warn(xhci, "WARN: xHC CMD_RUN timeout\n");
670 spin_unlock_irq(&xhci->lock);
673 xhci_clear_command_ring(xhci);
675 /* step 3: save registers */
676 xhci_save_registers(xhci);
678 /* step 4: set CSS flag */
679 command = xhci_readl(xhci, &xhci->op_regs->command);
681 xhci_writel(xhci, command, &xhci->op_regs->command);
682 if (handshake(xhci, &xhci->op_regs->status, STS_SAVE, 0, 10*100)) {
683 xhci_warn(xhci, "WARN: xHC CMD_CSS timeout\n");
684 spin_unlock_irq(&xhci->lock);
687 spin_unlock_irq(&xhci->lock);
689 /* step 5: remove core well power */
690 /* synchronize irq when using MSI-X */
691 if (xhci->msix_entries) {
692 for (i = 0; i < xhci->msix_count; i++)
693 synchronize_irq(xhci->msix_entries[i].vector);
700 * start xHC (not bus-specific)
702 * This is called when the machine transition from S3/S4 mode.
705 int xhci_resume(struct xhci_hcd *xhci, bool hibernated)
707 u32 command, temp = 0;
708 struct usb_hcd *hcd = xhci_to_hcd(xhci);
709 int old_state, retval;
711 old_state = hcd->state;
712 if (time_before(jiffies, xhci->next_statechange))
715 spin_lock_irq(&xhci->lock);
718 /* step 1: restore register */
719 xhci_restore_registers(xhci);
720 /* step 2: initialize command ring buffer */
721 xhci_set_cmd_ring_deq(xhci);
722 /* step 3: restore state and start state*/
723 /* step 3: set CRS flag */
724 command = xhci_readl(xhci, &xhci->op_regs->command);
726 xhci_writel(xhci, command, &xhci->op_regs->command);
727 if (handshake(xhci, &xhci->op_regs->status,
728 STS_RESTORE, 0, 10*100)) {
729 xhci_dbg(xhci, "WARN: xHC CMD_CSS timeout\n");
730 spin_unlock_irq(&xhci->lock);
733 temp = xhci_readl(xhci, &xhci->op_regs->status);
736 /* If restore operation fails, re-initialize the HC during resume */
737 if ((temp & STS_SRE) || hibernated) {
738 usb_root_hub_lost_power(hcd->self.root_hub);
740 xhci_dbg(xhci, "Stop HCD\n");
743 spin_unlock_irq(&xhci->lock);
744 xhci_cleanup_msix(xhci);
746 #ifdef CONFIG_USB_XHCI_HCD_DEBUGGING
747 /* Tell the event ring poll function not to reschedule */
749 del_timer_sync(&xhci->event_ring_timer);
752 xhci_dbg(xhci, "// Disabling event ring interrupts\n");
753 temp = xhci_readl(xhci, &xhci->op_regs->status);
754 xhci_writel(xhci, temp & ~STS_EINT, &xhci->op_regs->status);
755 temp = xhci_readl(xhci, &xhci->ir_set->irq_pending);
756 xhci_writel(xhci, ER_IRQ_DISABLE(temp),
757 &xhci->ir_set->irq_pending);
758 xhci_print_ir_set(xhci, 0);
760 xhci_dbg(xhci, "cleaning up memory\n");
761 xhci_mem_cleanup(xhci);
762 xhci_dbg(xhci, "xhci_stop completed - status = %x\n",
763 xhci_readl(xhci, &xhci->op_regs->status));
765 xhci_dbg(xhci, "Initialize the HCD\n");
766 retval = xhci_init(hcd);
770 xhci_dbg(xhci, "Start the HCD\n");
771 retval = xhci_run(hcd);
773 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
774 hcd->state = HC_STATE_SUSPENDED;
778 /* step 4: set Run/Stop bit */
779 command = xhci_readl(xhci, &xhci->op_regs->command);
781 xhci_writel(xhci, command, &xhci->op_regs->command);
782 handshake(xhci, &xhci->op_regs->status, STS_HALT,
785 /* step 5: walk topology and initialize portsc,
786 * portpmsc and portli
788 /* this is done in bus_resume */
790 /* step 6: restart each of the previously
791 * Running endpoints by ringing their doorbells
794 set_bit(HCD_FLAG_HW_ACCESSIBLE, &hcd->flags);
796 hcd->state = old_state;
798 hcd->state = HC_STATE_SUSPENDED;
800 spin_unlock_irq(&xhci->lock);
803 #endif /* CONFIG_PM */
805 /*-------------------------------------------------------------------------*/
808 * xhci_get_endpoint_index - Used for passing endpoint bitmasks between the core and
809 * HCDs. Find the index for an endpoint given its descriptor. Use the return
810 * value to right shift 1 for the bitmask.
812 * Index = (epnum * 2) + direction - 1,
813 * where direction = 0 for OUT, 1 for IN.
814 * For control endpoints, the IN index is used (OUT index is unused), so
815 * index = (epnum * 2) + direction - 1 = (epnum * 2) + 1 - 1 = (epnum * 2)
817 unsigned int xhci_get_endpoint_index(struct usb_endpoint_descriptor *desc)
820 if (usb_endpoint_xfer_control(desc))
821 index = (unsigned int) (usb_endpoint_num(desc)*2);
823 index = (unsigned int) (usb_endpoint_num(desc)*2) +
824 (usb_endpoint_dir_in(desc) ? 1 : 0) - 1;
828 /* Find the flag for this endpoint (for use in the control context). Use the
829 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
832 unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
834 return 1 << (xhci_get_endpoint_index(desc) + 1);
837 /* Find the flag for this endpoint (for use in the control context). Use the
838 * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
841 unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
843 return 1 << (ep_index + 1);
846 /* Compute the last valid endpoint context index. Basically, this is the
847 * endpoint index plus one. For slot contexts with more than valid endpoint,
848 * we find the most significant bit set in the added contexts flags.
849 * e.g. ep 1 IN (with epnum 0x81) => added_ctxs = 0b1000
850 * fls(0b1000) = 4, but the endpoint context index is 3, so subtract one.
852 unsigned int xhci_last_valid_endpoint(u32 added_ctxs)
854 return fls(added_ctxs) - 1;
857 /* Returns 1 if the arguments are OK;
858 * returns 0 this is a root hub; returns -EINVAL for NULL pointers.
860 static int xhci_check_args(struct usb_hcd *hcd, struct usb_device *udev,
861 struct usb_host_endpoint *ep, int check_ep, bool check_virt_dev,
863 struct xhci_hcd *xhci;
864 struct xhci_virt_device *virt_dev;
866 if (!hcd || (check_ep && !ep) || !udev) {
867 printk(KERN_DEBUG "xHCI %s called with invalid args\n",
872 printk(KERN_DEBUG "xHCI %s called for root hub\n",
877 if (check_virt_dev) {
878 xhci = hcd_to_xhci(hcd);
879 if (!udev->slot_id || !xhci->devs
880 || !xhci->devs[udev->slot_id]) {
881 printk(KERN_DEBUG "xHCI %s called with unaddressed "
886 virt_dev = xhci->devs[udev->slot_id];
887 if (virt_dev->udev != udev) {
888 printk(KERN_DEBUG "xHCI %s called with udev and "
889 "virt_dev does not match\n", func);
897 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
898 struct usb_device *udev, struct xhci_command *command,
899 bool ctx_change, bool must_succeed);
902 * Full speed devices may have a max packet size greater than 8 bytes, but the
903 * USB core doesn't know that until it reads the first 8 bytes of the
904 * descriptor. If the usb_device's max packet size changes after that point,
905 * we need to issue an evaluate context command and wait on it.
907 static int xhci_check_maxpacket(struct xhci_hcd *xhci, unsigned int slot_id,
908 unsigned int ep_index, struct urb *urb)
910 struct xhci_container_ctx *in_ctx;
911 struct xhci_container_ctx *out_ctx;
912 struct xhci_input_control_ctx *ctrl_ctx;
913 struct xhci_ep_ctx *ep_ctx;
915 int hw_max_packet_size;
918 out_ctx = xhci->devs[slot_id]->out_ctx;
919 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
920 hw_max_packet_size = MAX_PACKET_DECODED(ep_ctx->ep_info2);
921 max_packet_size = urb->dev->ep0.desc.wMaxPacketSize;
922 if (hw_max_packet_size != max_packet_size) {
923 xhci_dbg(xhci, "Max Packet Size for ep 0 changed.\n");
924 xhci_dbg(xhci, "Max packet size in usb_device = %d\n",
926 xhci_dbg(xhci, "Max packet size in xHCI HW = %d\n",
928 xhci_dbg(xhci, "Issuing evaluate context command.\n");
930 /* Set up the modified control endpoint 0 */
931 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
932 xhci->devs[slot_id]->out_ctx, ep_index);
933 in_ctx = xhci->devs[slot_id]->in_ctx;
934 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
935 ep_ctx->ep_info2 &= ~MAX_PACKET_MASK;
936 ep_ctx->ep_info2 |= MAX_PACKET(max_packet_size);
938 /* Set up the input context flags for the command */
939 /* FIXME: This won't work if a non-default control endpoint
940 * changes max packet sizes.
942 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
943 ctrl_ctx->add_flags = EP0_FLAG;
944 ctrl_ctx->drop_flags = 0;
946 xhci_dbg(xhci, "Slot %d input context\n", slot_id);
947 xhci_dbg_ctx(xhci, in_ctx, ep_index);
948 xhci_dbg(xhci, "Slot %d output context\n", slot_id);
949 xhci_dbg_ctx(xhci, out_ctx, ep_index);
951 ret = xhci_configure_endpoint(xhci, urb->dev, NULL,
954 /* Clean up the input context for later use by bandwidth
957 ctrl_ctx->add_flags = SLOT_FLAG;
963 * non-error returns are a promise to giveback() the urb later
964 * we drop ownership so next owner (or urb unlink) can get it
966 int xhci_urb_enqueue(struct usb_hcd *hcd, struct urb *urb, gfp_t mem_flags)
968 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
971 unsigned int slot_id, ep_index;
972 struct urb_priv *urb_priv;
975 if (!urb || xhci_check_args(hcd, urb->dev, urb->ep,
976 true, true, __func__) <= 0)
979 slot_id = urb->dev->slot_id;
980 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
982 if (!HCD_HW_ACCESSIBLE(hcd)) {
984 xhci_dbg(xhci, "urb submitted during PCI suspend\n");
989 if (usb_endpoint_xfer_isoc(&urb->ep->desc))
990 size = urb->number_of_packets;
994 urb_priv = kzalloc(sizeof(struct urb_priv) +
995 size * sizeof(struct xhci_td *), mem_flags);
999 for (i = 0; i < size; i++) {
1000 urb_priv->td[i] = kzalloc(sizeof(struct xhci_td), mem_flags);
1001 if (!urb_priv->td[i]) {
1002 urb_priv->length = i;
1003 xhci_urb_free_priv(xhci, urb_priv);
1008 urb_priv->length = size;
1009 urb_priv->td_cnt = 0;
1010 urb->hcpriv = urb_priv;
1012 if (usb_endpoint_xfer_control(&urb->ep->desc)) {
1013 /* Check to see if the max packet size for the default control
1014 * endpoint changed during FS device enumeration
1016 if (urb->dev->speed == USB_SPEED_FULL) {
1017 ret = xhci_check_maxpacket(xhci, slot_id,
1023 /* We have a spinlock and interrupts disabled, so we must pass
1024 * atomic context to this function, which may allocate memory.
1026 spin_lock_irqsave(&xhci->lock, flags);
1027 if (xhci->xhc_state & XHCI_STATE_DYING)
1029 ret = xhci_queue_ctrl_tx(xhci, GFP_ATOMIC, urb,
1031 spin_unlock_irqrestore(&xhci->lock, flags);
1032 } else if (usb_endpoint_xfer_bulk(&urb->ep->desc)) {
1033 spin_lock_irqsave(&xhci->lock, flags);
1034 if (xhci->xhc_state & XHCI_STATE_DYING)
1036 if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1037 EP_GETTING_STREAMS) {
1038 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1039 "is transitioning to using streams.\n");
1041 } else if (xhci->devs[slot_id]->eps[ep_index].ep_state &
1042 EP_GETTING_NO_STREAMS) {
1043 xhci_warn(xhci, "WARN: Can't enqueue URB while bulk ep "
1044 "is transitioning to "
1045 "not having streams.\n");
1048 ret = xhci_queue_bulk_tx(xhci, GFP_ATOMIC, urb,
1051 spin_unlock_irqrestore(&xhci->lock, flags);
1052 } else if (usb_endpoint_xfer_int(&urb->ep->desc)) {
1053 spin_lock_irqsave(&xhci->lock, flags);
1054 if (xhci->xhc_state & XHCI_STATE_DYING)
1056 ret = xhci_queue_intr_tx(xhci, GFP_ATOMIC, urb,
1058 spin_unlock_irqrestore(&xhci->lock, flags);
1060 spin_lock_irqsave(&xhci->lock, flags);
1061 if (xhci->xhc_state & XHCI_STATE_DYING)
1063 ret = xhci_queue_isoc_tx_prepare(xhci, GFP_ATOMIC, urb,
1065 spin_unlock_irqrestore(&xhci->lock, flags);
1070 xhci_urb_free_priv(xhci, urb_priv);
1072 xhci_dbg(xhci, "Ep 0x%x: URB %p submitted for "
1073 "non-responsive xHCI host.\n",
1074 urb->ep->desc.bEndpointAddress, urb);
1075 spin_unlock_irqrestore(&xhci->lock, flags);
1079 /* Get the right ring for the given URB.
1080 * If the endpoint supports streams, boundary check the URB's stream ID.
1081 * If the endpoint doesn't support streams, return the singular endpoint ring.
1083 static struct xhci_ring *xhci_urb_to_transfer_ring(struct xhci_hcd *xhci,
1086 unsigned int slot_id;
1087 unsigned int ep_index;
1088 unsigned int stream_id;
1089 struct xhci_virt_ep *ep;
1091 slot_id = urb->dev->slot_id;
1092 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1093 stream_id = urb->stream_id;
1094 ep = &xhci->devs[slot_id]->eps[ep_index];
1095 /* Common case: no streams */
1096 if (!(ep->ep_state & EP_HAS_STREAMS))
1099 if (stream_id == 0) {
1101 "WARN: Slot ID %u, ep index %u has streams, "
1102 "but URB has no stream ID.\n",
1107 if (stream_id < ep->stream_info->num_streams)
1108 return ep->stream_info->stream_rings[stream_id];
1111 "WARN: Slot ID %u, ep index %u has "
1112 "stream IDs 1 to %u allocated, "
1113 "but stream ID %u is requested.\n",
1115 ep->stream_info->num_streams - 1,
1121 * Remove the URB's TD from the endpoint ring. This may cause the HC to stop
1122 * USB transfers, potentially stopping in the middle of a TRB buffer. The HC
1123 * should pick up where it left off in the TD, unless a Set Transfer Ring
1124 * Dequeue Pointer is issued.
1126 * The TRBs that make up the buffers for the canceled URB will be "removed" from
1127 * the ring. Since the ring is a contiguous structure, they can't be physically
1128 * removed. Instead, there are two options:
1130 * 1) If the HC is in the middle of processing the URB to be canceled, we
1131 * simply move the ring's dequeue pointer past those TRBs using the Set
1132 * Transfer Ring Dequeue Pointer command. This will be the common case,
1133 * when drivers timeout on the last submitted URB and attempt to cancel.
1135 * 2) If the HC is in the middle of a different TD, we turn the TRBs into a
1136 * series of 1-TRB transfer no-op TDs. (No-ops shouldn't be chained.) The
1137 * HC will need to invalidate the any TRBs it has cached after the stop
1138 * endpoint command, as noted in the xHCI 0.95 errata.
1140 * 3) The TD may have completed by the time the Stop Endpoint Command
1141 * completes, so software needs to handle that case too.
1143 * This function should protect against the TD enqueueing code ringing the
1144 * doorbell while this code is waiting for a Stop Endpoint command to complete.
1145 * It also needs to account for multiple cancellations on happening at the same
1146 * time for the same endpoint.
1148 * Note that this function can be called in any context, or so says
1149 * usb_hcd_unlink_urb()
1151 int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
1153 unsigned long flags;
1156 struct xhci_hcd *xhci;
1157 struct urb_priv *urb_priv;
1159 unsigned int ep_index;
1160 struct xhci_ring *ep_ring;
1161 struct xhci_virt_ep *ep;
1163 xhci = hcd_to_xhci(hcd);
1164 spin_lock_irqsave(&xhci->lock, flags);
1165 /* Make sure the URB hasn't completed or been unlinked already */
1166 ret = usb_hcd_check_unlink_urb(hcd, urb, status);
1167 if (ret || !urb->hcpriv)
1169 temp = xhci_readl(xhci, &xhci->op_regs->status);
1170 if (temp == 0xffffffff) {
1171 xhci_dbg(xhci, "HW died, freeing TD.\n");
1172 urb_priv = urb->hcpriv;
1174 usb_hcd_unlink_urb_from_ep(hcd, urb);
1175 spin_unlock_irqrestore(&xhci->lock, flags);
1176 usb_hcd_giveback_urb(xhci_to_hcd(xhci), urb, -ESHUTDOWN);
1177 xhci_urb_free_priv(xhci, urb_priv);
1180 if (xhci->xhc_state & XHCI_STATE_DYING) {
1181 xhci_dbg(xhci, "Ep 0x%x: URB %p to be canceled on "
1182 "non-responsive xHCI host.\n",
1183 urb->ep->desc.bEndpointAddress, urb);
1184 /* Let the stop endpoint command watchdog timer (which set this
1185 * state) finish cleaning up the endpoint TD lists. We must
1186 * have caught it in the middle of dropping a lock and giving
1192 xhci_dbg(xhci, "Cancel URB %p\n", urb);
1193 xhci_dbg(xhci, "Event ring:\n");
1194 xhci_debug_ring(xhci, xhci->event_ring);
1195 ep_index = xhci_get_endpoint_index(&urb->ep->desc);
1196 ep = &xhci->devs[urb->dev->slot_id]->eps[ep_index];
1197 ep_ring = xhci_urb_to_transfer_ring(xhci, urb);
1203 xhci_dbg(xhci, "Endpoint ring:\n");
1204 xhci_debug_ring(xhci, ep_ring);
1206 urb_priv = urb->hcpriv;
1208 for (i = urb_priv->td_cnt; i < urb_priv->length; i++) {
1209 td = urb_priv->td[i];
1210 list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
1213 /* Queue a stop endpoint command, but only if this is
1214 * the first cancellation to be handled.
1216 if (!(ep->ep_state & EP_HALT_PENDING)) {
1217 ep->ep_state |= EP_HALT_PENDING;
1218 ep->stop_cmds_pending++;
1219 ep->stop_cmd_timer.expires = jiffies +
1220 XHCI_STOP_EP_CMD_TIMEOUT * HZ;
1221 add_timer(&ep->stop_cmd_timer);
1222 xhci_queue_stop_endpoint(xhci, urb->dev->slot_id, ep_index, 0);
1223 xhci_ring_cmd_db(xhci);
1226 spin_unlock_irqrestore(&xhci->lock, flags);
1230 /* Drop an endpoint from a new bandwidth configuration for this device.
1231 * Only one call to this function is allowed per endpoint before
1232 * check_bandwidth() or reset_bandwidth() must be called.
1233 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1234 * add the endpoint to the schedule with possibly new parameters denoted by a
1235 * different endpoint descriptor in usb_host_endpoint.
1236 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1239 * The USB core will not allow URBs to be queued to an endpoint that is being
1240 * disabled, so there's no need for mutual exclusion to protect
1241 * the xhci->devs[slot_id] structure.
1243 int xhci_drop_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1244 struct usb_host_endpoint *ep)
1246 struct xhci_hcd *xhci;
1247 struct xhci_container_ctx *in_ctx, *out_ctx;
1248 struct xhci_input_control_ctx *ctrl_ctx;
1249 struct xhci_slot_ctx *slot_ctx;
1250 unsigned int last_ctx;
1251 unsigned int ep_index;
1252 struct xhci_ep_ctx *ep_ctx;
1254 u32 new_add_flags, new_drop_flags, new_slot_info;
1257 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1260 xhci = hcd_to_xhci(hcd);
1261 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1263 drop_flag = xhci_get_endpoint_flag(&ep->desc);
1264 if (drop_flag == SLOT_FLAG || drop_flag == EP0_FLAG) {
1265 xhci_dbg(xhci, "xHCI %s - can't drop slot or ep 0 %#x\n",
1266 __func__, drop_flag);
1270 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1271 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1272 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1273 ep_index = xhci_get_endpoint_index(&ep->desc);
1274 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1275 /* If the HC already knows the endpoint is disabled,
1276 * or the HCD has noted it is disabled, ignore this request
1278 if ((ep_ctx->ep_info & EP_STATE_MASK) == EP_STATE_DISABLED ||
1279 ctrl_ctx->drop_flags & xhci_get_endpoint_flag(&ep->desc)) {
1280 xhci_warn(xhci, "xHCI %s called with disabled ep %p\n",
1285 ctrl_ctx->drop_flags |= drop_flag;
1286 new_drop_flags = ctrl_ctx->drop_flags;
1288 ctrl_ctx->add_flags &= ~drop_flag;
1289 new_add_flags = ctrl_ctx->add_flags;
1291 last_ctx = xhci_last_valid_endpoint(ctrl_ctx->add_flags);
1292 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1293 /* Update the last valid endpoint context, if we deleted the last one */
1294 if ((slot_ctx->dev_info & LAST_CTX_MASK) > LAST_CTX(last_ctx)) {
1295 slot_ctx->dev_info &= ~LAST_CTX_MASK;
1296 slot_ctx->dev_info |= LAST_CTX(last_ctx);
1298 new_slot_info = slot_ctx->dev_info;
1300 xhci_endpoint_zero(xhci, xhci->devs[udev->slot_id], ep);
1302 xhci_dbg(xhci, "drop ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1303 (unsigned int) ep->desc.bEndpointAddress,
1305 (unsigned int) new_drop_flags,
1306 (unsigned int) new_add_flags,
1307 (unsigned int) new_slot_info);
1311 /* Add an endpoint to a new possible bandwidth configuration for this device.
1312 * Only one call to this function is allowed per endpoint before
1313 * check_bandwidth() or reset_bandwidth() must be called.
1314 * A call to xhci_drop_endpoint() followed by a call to xhci_add_endpoint() will
1315 * add the endpoint to the schedule with possibly new parameters denoted by a
1316 * different endpoint descriptor in usb_host_endpoint.
1317 * A call to xhci_add_endpoint() followed by a call to xhci_drop_endpoint() is
1320 * The USB core will not allow URBs to be queued to an endpoint until the
1321 * configuration or alt setting is installed in the device, so there's no need
1322 * for mutual exclusion to protect the xhci->devs[slot_id] structure.
1324 int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev,
1325 struct usb_host_endpoint *ep)
1327 struct xhci_hcd *xhci;
1328 struct xhci_container_ctx *in_ctx, *out_ctx;
1329 unsigned int ep_index;
1330 struct xhci_ep_ctx *ep_ctx;
1331 struct xhci_slot_ctx *slot_ctx;
1332 struct xhci_input_control_ctx *ctrl_ctx;
1334 unsigned int last_ctx;
1335 u32 new_add_flags, new_drop_flags, new_slot_info;
1338 ret = xhci_check_args(hcd, udev, ep, 1, true, __func__);
1340 /* So we won't queue a reset ep command for a root hub */
1344 xhci = hcd_to_xhci(hcd);
1346 added_ctxs = xhci_get_endpoint_flag(&ep->desc);
1347 last_ctx = xhci_last_valid_endpoint(added_ctxs);
1348 if (added_ctxs == SLOT_FLAG || added_ctxs == EP0_FLAG) {
1349 /* FIXME when we have to issue an evaluate endpoint command to
1350 * deal with ep0 max packet size changing once we get the
1353 xhci_dbg(xhci, "xHCI %s - can't add slot or ep 0 %#x\n",
1354 __func__, added_ctxs);
1358 in_ctx = xhci->devs[udev->slot_id]->in_ctx;
1359 out_ctx = xhci->devs[udev->slot_id]->out_ctx;
1360 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1361 ep_index = xhci_get_endpoint_index(&ep->desc);
1362 ep_ctx = xhci_get_ep_ctx(xhci, out_ctx, ep_index);
1363 /* If the HCD has already noted the endpoint is enabled,
1364 * ignore this request.
1366 if (ctrl_ctx->add_flags & xhci_get_endpoint_flag(&ep->desc)) {
1367 xhci_warn(xhci, "xHCI %s called with enabled ep %p\n",
1373 * Configuration and alternate setting changes must be done in
1374 * process context, not interrupt context (or so documenation
1375 * for usb_set_interface() and usb_set_configuration() claim).
1377 if (xhci_endpoint_init(xhci, xhci->devs[udev->slot_id],
1378 udev, ep, GFP_NOIO) < 0) {
1379 dev_dbg(&udev->dev, "%s - could not initialize ep %#x\n",
1380 __func__, ep->desc.bEndpointAddress);
1384 ctrl_ctx->add_flags |= added_ctxs;
1385 new_add_flags = ctrl_ctx->add_flags;
1387 /* If xhci_endpoint_disable() was called for this endpoint, but the
1388 * xHC hasn't been notified yet through the check_bandwidth() call,
1389 * this re-adds a new state for the endpoint from the new endpoint
1390 * descriptors. We must drop and re-add this endpoint, so we leave the
1393 new_drop_flags = ctrl_ctx->drop_flags;
1395 slot_ctx = xhci_get_slot_ctx(xhci, in_ctx);
1396 /* Update the last valid endpoint context, if we just added one past */
1397 if ((slot_ctx->dev_info & LAST_CTX_MASK) < LAST_CTX(last_ctx)) {
1398 slot_ctx->dev_info &= ~LAST_CTX_MASK;
1399 slot_ctx->dev_info |= LAST_CTX(last_ctx);
1401 new_slot_info = slot_ctx->dev_info;
1403 /* Store the usb_device pointer for later use */
1406 xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x, new slot info = %#x\n",
1407 (unsigned int) ep->desc.bEndpointAddress,
1409 (unsigned int) new_drop_flags,
1410 (unsigned int) new_add_flags,
1411 (unsigned int) new_slot_info);
1415 static void xhci_zero_in_ctx(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev)
1417 struct xhci_input_control_ctx *ctrl_ctx;
1418 struct xhci_ep_ctx *ep_ctx;
1419 struct xhci_slot_ctx *slot_ctx;
1422 /* When a device's add flag and drop flag are zero, any subsequent
1423 * configure endpoint command will leave that endpoint's state
1424 * untouched. Make sure we don't leave any old state in the input
1425 * endpoint contexts.
1427 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1428 ctrl_ctx->drop_flags = 0;
1429 ctrl_ctx->add_flags = 0;
1430 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1431 slot_ctx->dev_info &= ~LAST_CTX_MASK;
1432 /* Endpoint 0 is always valid */
1433 slot_ctx->dev_info |= LAST_CTX(1);
1434 for (i = 1; i < 31; ++i) {
1435 ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, i);
1436 ep_ctx->ep_info = 0;
1437 ep_ctx->ep_info2 = 0;
1439 ep_ctx->tx_info = 0;
1443 static int xhci_configure_endpoint_result(struct xhci_hcd *xhci,
1444 struct usb_device *udev, int *cmd_status)
1448 switch (*cmd_status) {
1450 dev_warn(&udev->dev, "Not enough host controller resources "
1451 "for new device state.\n");
1453 /* FIXME: can we allocate more resources for the HC? */
1456 dev_warn(&udev->dev, "Not enough bandwidth "
1457 "for new device state.\n");
1459 /* FIXME: can we go back to the old state? */
1462 /* the HCD set up something wrong */
1463 dev_warn(&udev->dev, "ERROR: Endpoint drop flag = 0, "
1465 "and endpoint is not disabled.\n");
1469 dev_dbg(&udev->dev, "Successful Endpoint Configure command\n");
1473 xhci_err(xhci, "ERROR: unexpected command completion "
1474 "code 0x%x.\n", *cmd_status);
1481 static int xhci_evaluate_context_result(struct xhci_hcd *xhci,
1482 struct usb_device *udev, int *cmd_status)
1485 struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
1487 switch (*cmd_status) {
1489 dev_warn(&udev->dev, "WARN: xHCI driver setup invalid evaluate "
1490 "context command.\n");
1494 dev_warn(&udev->dev, "WARN: slot not enabled for"
1495 "evaluate context command.\n");
1496 case COMP_CTX_STATE:
1497 dev_warn(&udev->dev, "WARN: invalid context state for "
1498 "evaluate context command.\n");
1499 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 1);
1503 dev_dbg(&udev->dev, "Successful evaluate context command\n");
1507 xhci_err(xhci, "ERROR: unexpected command completion "
1508 "code 0x%x.\n", *cmd_status);
1515 /* Issue a configure endpoint command or evaluate context command
1516 * and wait for it to finish.
1518 static int xhci_configure_endpoint(struct xhci_hcd *xhci,
1519 struct usb_device *udev,
1520 struct xhci_command *command,
1521 bool ctx_change, bool must_succeed)
1525 unsigned long flags;
1526 struct xhci_container_ctx *in_ctx;
1527 struct completion *cmd_completion;
1529 struct xhci_virt_device *virt_dev;
1531 spin_lock_irqsave(&xhci->lock, flags);
1532 virt_dev = xhci->devs[udev->slot_id];
1534 in_ctx = command->in_ctx;
1535 cmd_completion = command->completion;
1536 cmd_status = &command->status;
1537 command->command_trb = xhci->cmd_ring->enqueue;
1539 /* Enqueue pointer can be left pointing to the link TRB,
1540 * we must handle that
1542 if ((command->command_trb->link.control & TRB_TYPE_BITMASK)
1543 == TRB_TYPE(TRB_LINK))
1544 command->command_trb =
1545 xhci->cmd_ring->enq_seg->next->trbs;
1547 list_add_tail(&command->cmd_list, &virt_dev->cmd_list);
1549 in_ctx = virt_dev->in_ctx;
1550 cmd_completion = &virt_dev->cmd_completion;
1551 cmd_status = &virt_dev->cmd_status;
1553 init_completion(cmd_completion);
1556 ret = xhci_queue_configure_endpoint(xhci, in_ctx->dma,
1557 udev->slot_id, must_succeed);
1559 ret = xhci_queue_evaluate_context(xhci, in_ctx->dma,
1563 list_del(&command->cmd_list);
1564 spin_unlock_irqrestore(&xhci->lock, flags);
1565 xhci_dbg(xhci, "FIXME allocate a new ring segment\n");
1568 xhci_ring_cmd_db(xhci);
1569 spin_unlock_irqrestore(&xhci->lock, flags);
1571 /* Wait for the configure endpoint command to complete */
1572 timeleft = wait_for_completion_interruptible_timeout(
1574 USB_CTRL_SET_TIMEOUT);
1575 if (timeleft <= 0) {
1576 xhci_warn(xhci, "%s while waiting for %s command\n",
1577 timeleft == 0 ? "Timeout" : "Signal",
1579 "configure endpoint" :
1580 "evaluate context");
1581 /* FIXME cancel the configure endpoint command */
1586 return xhci_configure_endpoint_result(xhci, udev, cmd_status);
1587 return xhci_evaluate_context_result(xhci, udev, cmd_status);
1590 /* Called after one or more calls to xhci_add_endpoint() or
1591 * xhci_drop_endpoint(). If this call fails, the USB core is expected
1592 * to call xhci_reset_bandwidth().
1594 * Since we are in the middle of changing either configuration or
1595 * installing a new alt setting, the USB core won't allow URBs to be
1596 * enqueued for any endpoint on the old config or interface. Nothing
1597 * else should be touching the xhci->devs[slot_id] structure, so we
1598 * don't need to take the xhci->lock for manipulating that.
1600 int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1604 struct xhci_hcd *xhci;
1605 struct xhci_virt_device *virt_dev;
1606 struct xhci_input_control_ctx *ctrl_ctx;
1607 struct xhci_slot_ctx *slot_ctx;
1609 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
1612 xhci = hcd_to_xhci(hcd);
1614 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1615 virt_dev = xhci->devs[udev->slot_id];
1617 /* See section 4.6.6 - A0 = 1; A1 = D0 = D1 = 0 */
1618 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
1619 ctrl_ctx->add_flags |= SLOT_FLAG;
1620 ctrl_ctx->add_flags &= ~EP0_FLAG;
1621 ctrl_ctx->drop_flags &= ~SLOT_FLAG;
1622 ctrl_ctx->drop_flags &= ~EP0_FLAG;
1623 xhci_dbg(xhci, "New Input Control Context:\n");
1624 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
1625 xhci_dbg_ctx(xhci, virt_dev->in_ctx,
1626 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
1628 ret = xhci_configure_endpoint(xhci, udev, NULL,
1631 /* Callee should call reset_bandwidth() */
1635 xhci_dbg(xhci, "Output context after successful config ep cmd:\n");
1636 xhci_dbg_ctx(xhci, virt_dev->out_ctx,
1637 LAST_CTX_TO_EP_NUM(slot_ctx->dev_info));
1639 /* Free any rings that were dropped, but not changed. */
1640 for (i = 1; i < 31; ++i) {
1641 if ((ctrl_ctx->drop_flags & (1 << (i + 1))) &&
1642 !(ctrl_ctx->add_flags & (1 << (i + 1))))
1643 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
1645 xhci_zero_in_ctx(xhci, virt_dev);
1647 * Install any rings for completely new endpoints or changed endpoints,
1648 * and free or cache any old rings from changed endpoints.
1650 for (i = 1; i < 31; ++i) {
1651 if (!virt_dev->eps[i].new_ring)
1653 /* Only cache or free the old ring if it exists.
1654 * It may not if this is the first add of an endpoint.
1656 if (virt_dev->eps[i].ring) {
1657 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
1659 virt_dev->eps[i].ring = virt_dev->eps[i].new_ring;
1660 virt_dev->eps[i].new_ring = NULL;
1666 void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
1668 struct xhci_hcd *xhci;
1669 struct xhci_virt_device *virt_dev;
1672 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
1675 xhci = hcd_to_xhci(hcd);
1677 xhci_dbg(xhci, "%s called for udev %p\n", __func__, udev);
1678 virt_dev = xhci->devs[udev->slot_id];
1679 /* Free any rings allocated for added endpoints */
1680 for (i = 0; i < 31; ++i) {
1681 if (virt_dev->eps[i].new_ring) {
1682 xhci_ring_free(xhci, virt_dev->eps[i].new_ring);
1683 virt_dev->eps[i].new_ring = NULL;
1686 xhci_zero_in_ctx(xhci, virt_dev);
1689 static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
1690 struct xhci_container_ctx *in_ctx,
1691 struct xhci_container_ctx *out_ctx,
1692 u32 add_flags, u32 drop_flags)
1694 struct xhci_input_control_ctx *ctrl_ctx;
1695 ctrl_ctx = xhci_get_input_control_ctx(xhci, in_ctx);
1696 ctrl_ctx->add_flags = add_flags;
1697 ctrl_ctx->drop_flags = drop_flags;
1698 xhci_slot_copy(xhci, in_ctx, out_ctx);
1699 ctrl_ctx->add_flags |= SLOT_FLAG;
1701 xhci_dbg(xhci, "Input Context:\n");
1702 xhci_dbg_ctx(xhci, in_ctx, xhci_last_valid_endpoint(add_flags));
1705 static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
1706 unsigned int slot_id, unsigned int ep_index,
1707 struct xhci_dequeue_state *deq_state)
1709 struct xhci_container_ctx *in_ctx;
1710 struct xhci_ep_ctx *ep_ctx;
1714 xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
1715 xhci->devs[slot_id]->out_ctx, ep_index);
1716 in_ctx = xhci->devs[slot_id]->in_ctx;
1717 ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
1718 addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
1719 deq_state->new_deq_ptr);
1721 xhci_warn(xhci, "WARN Cannot submit config ep after "
1722 "reset ep command\n");
1723 xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
1724 deq_state->new_deq_seg,
1725 deq_state->new_deq_ptr);
1728 ep_ctx->deq = addr | deq_state->new_cycle_state;
1730 added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
1731 xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
1732 xhci->devs[slot_id]->out_ctx, added_ctxs, added_ctxs);
1735 void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci,
1736 struct usb_device *udev, unsigned int ep_index)
1738 struct xhci_dequeue_state deq_state;
1739 struct xhci_virt_ep *ep;
1741 xhci_dbg(xhci, "Cleaning up stalled endpoint ring\n");
1742 ep = &xhci->devs[udev->slot_id]->eps[ep_index];
1743 /* We need to move the HW's dequeue pointer past this TD,
1744 * or it will attempt to resend it on the next doorbell ring.
1746 xhci_find_new_dequeue_state(xhci, udev->slot_id,
1747 ep_index, ep->stopped_stream, ep->stopped_td,
1750 /* HW with the reset endpoint quirk will use the saved dequeue state to
1751 * issue a configure endpoint command later.
1753 if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
1754 xhci_dbg(xhci, "Queueing new dequeue state\n");
1755 xhci_queue_new_dequeue_state(xhci, udev->slot_id,
1756 ep_index, ep->stopped_stream, &deq_state);
1758 /* Better hope no one uses the input context between now and the
1759 * reset endpoint completion!
1760 * XXX: No idea how this hardware will react when stream rings
1763 xhci_dbg(xhci, "Setting up input context for "
1764 "configure endpoint command\n");
1765 xhci_setup_input_ctx_for_quirk(xhci, udev->slot_id,
1766 ep_index, &deq_state);
1770 /* Deal with stalled endpoints. The core should have sent the control message
1771 * to clear the halt condition. However, we need to make the xHCI hardware
1772 * reset its sequence number, since a device will expect a sequence number of
1773 * zero after the halt condition is cleared.
1774 * Context: in_interrupt
1776 void xhci_endpoint_reset(struct usb_hcd *hcd,
1777 struct usb_host_endpoint *ep)
1779 struct xhci_hcd *xhci;
1780 struct usb_device *udev;
1781 unsigned int ep_index;
1782 unsigned long flags;
1784 struct xhci_virt_ep *virt_ep;
1786 xhci = hcd_to_xhci(hcd);
1787 udev = (struct usb_device *) ep->hcpriv;
1788 /* Called with a root hub endpoint (or an endpoint that wasn't added
1789 * with xhci_add_endpoint()
1793 ep_index = xhci_get_endpoint_index(&ep->desc);
1794 virt_ep = &xhci->devs[udev->slot_id]->eps[ep_index];
1795 if (!virt_ep->stopped_td) {
1796 xhci_dbg(xhci, "Endpoint 0x%x not halted, refusing to reset.\n",
1797 ep->desc.bEndpointAddress);
1800 if (usb_endpoint_xfer_control(&ep->desc)) {
1801 xhci_dbg(xhci, "Control endpoint stall already handled.\n");
1805 xhci_dbg(xhci, "Queueing reset endpoint command\n");
1806 spin_lock_irqsave(&xhci->lock, flags);
1807 ret = xhci_queue_reset_ep(xhci, udev->slot_id, ep_index);
1809 * Can't change the ring dequeue pointer until it's transitioned to the
1810 * stopped state, which is only upon a successful reset endpoint
1811 * command. Better hope that last command worked!
1814 xhci_cleanup_stalled_ring(xhci, udev, ep_index);
1815 kfree(virt_ep->stopped_td);
1816 xhci_ring_cmd_db(xhci);
1818 virt_ep->stopped_td = NULL;
1819 virt_ep->stopped_trb = NULL;
1820 virt_ep->stopped_stream = 0;
1821 spin_unlock_irqrestore(&xhci->lock, flags);
1824 xhci_warn(xhci, "FIXME allocate a new ring segment\n");
1827 static int xhci_check_streams_endpoint(struct xhci_hcd *xhci,
1828 struct usb_device *udev, struct usb_host_endpoint *ep,
1829 unsigned int slot_id)
1832 unsigned int ep_index;
1833 unsigned int ep_state;
1837 ret = xhci_check_args(xhci_to_hcd(xhci), udev, ep, 1, true, __func__);
1840 if (ep->ss_ep_comp.bmAttributes == 0) {
1841 xhci_warn(xhci, "WARN: SuperSpeed Endpoint Companion"
1842 " descriptor for ep 0x%x does not support streams\n",
1843 ep->desc.bEndpointAddress);
1847 ep_index = xhci_get_endpoint_index(&ep->desc);
1848 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1849 if (ep_state & EP_HAS_STREAMS ||
1850 ep_state & EP_GETTING_STREAMS) {
1851 xhci_warn(xhci, "WARN: SuperSpeed bulk endpoint 0x%x "
1852 "already has streams set up.\n",
1853 ep->desc.bEndpointAddress);
1854 xhci_warn(xhci, "Send email to xHCI maintainer and ask for "
1855 "dynamic stream context array reallocation.\n");
1858 if (!list_empty(&xhci->devs[slot_id]->eps[ep_index].ring->td_list)) {
1859 xhci_warn(xhci, "Cannot setup streams for SuperSpeed bulk "
1860 "endpoint 0x%x; URBs are pending.\n",
1861 ep->desc.bEndpointAddress);
1867 static void xhci_calculate_streams_entries(struct xhci_hcd *xhci,
1868 unsigned int *num_streams, unsigned int *num_stream_ctxs)
1870 unsigned int max_streams;
1872 /* The stream context array size must be a power of two */
1873 *num_stream_ctxs = roundup_pow_of_two(*num_streams);
1875 * Find out how many primary stream array entries the host controller
1876 * supports. Later we may use secondary stream arrays (similar to 2nd
1877 * level page entries), but that's an optional feature for xHCI host
1878 * controllers. xHCs must support at least 4 stream IDs.
1880 max_streams = HCC_MAX_PSA(xhci->hcc_params);
1881 if (*num_stream_ctxs > max_streams) {
1882 xhci_dbg(xhci, "xHCI HW only supports %u stream ctx entries.\n",
1884 *num_stream_ctxs = max_streams;
1885 *num_streams = max_streams;
1889 /* Returns an error code if one of the endpoint already has streams.
1890 * This does not change any data structures, it only checks and gathers
1893 static int xhci_calculate_streams_and_bitmask(struct xhci_hcd *xhci,
1894 struct usb_device *udev,
1895 struct usb_host_endpoint **eps, unsigned int num_eps,
1896 unsigned int *num_streams, u32 *changed_ep_bitmask)
1898 unsigned int max_streams;
1899 unsigned int endpoint_flag;
1903 for (i = 0; i < num_eps; i++) {
1904 ret = xhci_check_streams_endpoint(xhci, udev,
1905 eps[i], udev->slot_id);
1909 max_streams = USB_SS_MAX_STREAMS(
1910 eps[i]->ss_ep_comp.bmAttributes);
1911 if (max_streams < (*num_streams - 1)) {
1912 xhci_dbg(xhci, "Ep 0x%x only supports %u stream IDs.\n",
1913 eps[i]->desc.bEndpointAddress,
1915 *num_streams = max_streams+1;
1918 endpoint_flag = xhci_get_endpoint_flag(&eps[i]->desc);
1919 if (*changed_ep_bitmask & endpoint_flag)
1921 *changed_ep_bitmask |= endpoint_flag;
1926 static u32 xhci_calculate_no_streams_bitmask(struct xhci_hcd *xhci,
1927 struct usb_device *udev,
1928 struct usb_host_endpoint **eps, unsigned int num_eps)
1930 u32 changed_ep_bitmask = 0;
1931 unsigned int slot_id;
1932 unsigned int ep_index;
1933 unsigned int ep_state;
1936 slot_id = udev->slot_id;
1937 if (!xhci->devs[slot_id])
1940 for (i = 0; i < num_eps; i++) {
1941 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
1942 ep_state = xhci->devs[slot_id]->eps[ep_index].ep_state;
1943 /* Are streams already being freed for the endpoint? */
1944 if (ep_state & EP_GETTING_NO_STREAMS) {
1945 xhci_warn(xhci, "WARN Can't disable streams for "
1947 "streams are being disabled already.",
1948 eps[i]->desc.bEndpointAddress);
1951 /* Are there actually any streams to free? */
1952 if (!(ep_state & EP_HAS_STREAMS) &&
1953 !(ep_state & EP_GETTING_STREAMS)) {
1954 xhci_warn(xhci, "WARN Can't disable streams for "
1956 "streams are already disabled!",
1957 eps[i]->desc.bEndpointAddress);
1958 xhci_warn(xhci, "WARN xhci_free_streams() called "
1959 "with non-streams endpoint\n");
1962 changed_ep_bitmask |= xhci_get_endpoint_flag(&eps[i]->desc);
1964 return changed_ep_bitmask;
1968 * The USB device drivers use this function (though the HCD interface in USB
1969 * core) to prepare a set of bulk endpoints to use streams. Streams are used to
1970 * coordinate mass storage command queueing across multiple endpoints (basically
1971 * a stream ID == a task ID).
1973 * Setting up streams involves allocating the same size stream context array
1974 * for each endpoint and issuing a configure endpoint command for all endpoints.
1976 * Don't allow the call to succeed if one endpoint only supports one stream
1977 * (which means it doesn't support streams at all).
1979 * Drivers may get less stream IDs than they asked for, if the host controller
1980 * hardware or endpoints claim they can't support the number of requested
1983 int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev,
1984 struct usb_host_endpoint **eps, unsigned int num_eps,
1985 unsigned int num_streams, gfp_t mem_flags)
1988 struct xhci_hcd *xhci;
1989 struct xhci_virt_device *vdev;
1990 struct xhci_command *config_cmd;
1991 unsigned int ep_index;
1992 unsigned int num_stream_ctxs;
1993 unsigned long flags;
1994 u32 changed_ep_bitmask = 0;
1999 /* Add one to the number of streams requested to account for
2000 * stream 0 that is reserved for xHCI usage.
2003 xhci = hcd_to_xhci(hcd);
2004 xhci_dbg(xhci, "Driver wants %u stream IDs (including stream 0).\n",
2007 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
2009 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
2013 /* Check to make sure all endpoints are not already configured for
2014 * streams. While we're at it, find the maximum number of streams that
2015 * all the endpoints will support and check for duplicate endpoints.
2017 spin_lock_irqsave(&xhci->lock, flags);
2018 ret = xhci_calculate_streams_and_bitmask(xhci, udev, eps,
2019 num_eps, &num_streams, &changed_ep_bitmask);
2021 xhci_free_command(xhci, config_cmd);
2022 spin_unlock_irqrestore(&xhci->lock, flags);
2025 if (num_streams <= 1) {
2026 xhci_warn(xhci, "WARN: endpoints can't handle "
2027 "more than one stream.\n");
2028 xhci_free_command(xhci, config_cmd);
2029 spin_unlock_irqrestore(&xhci->lock, flags);
2032 vdev = xhci->devs[udev->slot_id];
2033 /* Mark each endpoint as being in transistion, so
2034 * xhci_urb_enqueue() will reject all URBs.
2036 for (i = 0; i < num_eps; i++) {
2037 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2038 vdev->eps[ep_index].ep_state |= EP_GETTING_STREAMS;
2040 spin_unlock_irqrestore(&xhci->lock, flags);
2042 /* Setup internal data structures and allocate HW data structures for
2043 * streams (but don't install the HW structures in the input context
2044 * until we're sure all memory allocation succeeded).
2046 xhci_calculate_streams_entries(xhci, &num_streams, &num_stream_ctxs);
2047 xhci_dbg(xhci, "Need %u stream ctx entries for %u stream IDs.\n",
2048 num_stream_ctxs, num_streams);
2050 for (i = 0; i < num_eps; i++) {
2051 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2052 vdev->eps[ep_index].stream_info = xhci_alloc_stream_info(xhci,
2054 num_streams, mem_flags);
2055 if (!vdev->eps[ep_index].stream_info)
2057 /* Set maxPstreams in endpoint context and update deq ptr to
2058 * point to stream context array. FIXME
2062 /* Set up the input context for a configure endpoint command. */
2063 for (i = 0; i < num_eps; i++) {
2064 struct xhci_ep_ctx *ep_ctx;
2066 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2067 ep_ctx = xhci_get_ep_ctx(xhci, config_cmd->in_ctx, ep_index);
2069 xhci_endpoint_copy(xhci, config_cmd->in_ctx,
2070 vdev->out_ctx, ep_index);
2071 xhci_setup_streams_ep_input_ctx(xhci, ep_ctx,
2072 vdev->eps[ep_index].stream_info);
2074 /* Tell the HW to drop its old copy of the endpoint context info
2075 * and add the updated copy from the input context.
2077 xhci_setup_input_ctx_for_config_ep(xhci, config_cmd->in_ctx,
2078 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
2080 /* Issue and wait for the configure endpoint command */
2081 ret = xhci_configure_endpoint(xhci, udev, config_cmd,
2084 /* xHC rejected the configure endpoint command for some reason, so we
2085 * leave the old ring intact and free our internal streams data
2091 spin_lock_irqsave(&xhci->lock, flags);
2092 for (i = 0; i < num_eps; i++) {
2093 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2094 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
2095 xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n",
2096 udev->slot_id, ep_index);
2097 vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS;
2099 xhci_free_command(xhci, config_cmd);
2100 spin_unlock_irqrestore(&xhci->lock, flags);
2102 /* Subtract 1 for stream 0, which drivers can't use */
2103 return num_streams - 1;
2106 /* If it didn't work, free the streams! */
2107 for (i = 0; i < num_eps; i++) {
2108 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2109 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
2110 vdev->eps[ep_index].stream_info = NULL;
2111 /* FIXME Unset maxPstreams in endpoint context and
2112 * update deq ptr to point to normal string ring.
2114 vdev->eps[ep_index].ep_state &= ~EP_GETTING_STREAMS;
2115 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
2116 xhci_endpoint_zero(xhci, vdev, eps[i]);
2118 xhci_free_command(xhci, config_cmd);
2122 /* Transition the endpoint from using streams to being a "normal" endpoint
2125 * Modify the endpoint context state, submit a configure endpoint command,
2126 * and free all endpoint rings for streams if that completes successfully.
2128 int xhci_free_streams(struct usb_hcd *hcd, struct usb_device *udev,
2129 struct usb_host_endpoint **eps, unsigned int num_eps,
2133 struct xhci_hcd *xhci;
2134 struct xhci_virt_device *vdev;
2135 struct xhci_command *command;
2136 unsigned int ep_index;
2137 unsigned long flags;
2138 u32 changed_ep_bitmask;
2140 xhci = hcd_to_xhci(hcd);
2141 vdev = xhci->devs[udev->slot_id];
2143 /* Set up a configure endpoint command to remove the streams rings */
2144 spin_lock_irqsave(&xhci->lock, flags);
2145 changed_ep_bitmask = xhci_calculate_no_streams_bitmask(xhci,
2146 udev, eps, num_eps);
2147 if (changed_ep_bitmask == 0) {
2148 spin_unlock_irqrestore(&xhci->lock, flags);
2152 /* Use the xhci_command structure from the first endpoint. We may have
2153 * allocated too many, but the driver may call xhci_free_streams() for
2154 * each endpoint it grouped into one call to xhci_alloc_streams().
2156 ep_index = xhci_get_endpoint_index(&eps[0]->desc);
2157 command = vdev->eps[ep_index].stream_info->free_streams_command;
2158 for (i = 0; i < num_eps; i++) {
2159 struct xhci_ep_ctx *ep_ctx;
2161 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2162 ep_ctx = xhci_get_ep_ctx(xhci, command->in_ctx, ep_index);
2163 xhci->devs[udev->slot_id]->eps[ep_index].ep_state |=
2164 EP_GETTING_NO_STREAMS;
2166 xhci_endpoint_copy(xhci, command->in_ctx,
2167 vdev->out_ctx, ep_index);
2168 xhci_setup_no_streams_ep_input_ctx(xhci, ep_ctx,
2169 &vdev->eps[ep_index]);
2171 xhci_setup_input_ctx_for_config_ep(xhci, command->in_ctx,
2172 vdev->out_ctx, changed_ep_bitmask, changed_ep_bitmask);
2173 spin_unlock_irqrestore(&xhci->lock, flags);
2175 /* Issue and wait for the configure endpoint command,
2176 * which must succeed.
2178 ret = xhci_configure_endpoint(xhci, udev, command,
2181 /* xHC rejected the configure endpoint command for some reason, so we
2182 * leave the streams rings intact.
2187 spin_lock_irqsave(&xhci->lock, flags);
2188 for (i = 0; i < num_eps; i++) {
2189 ep_index = xhci_get_endpoint_index(&eps[i]->desc);
2190 xhci_free_stream_info(xhci, vdev->eps[ep_index].stream_info);
2191 vdev->eps[ep_index].stream_info = NULL;
2192 /* FIXME Unset maxPstreams in endpoint context and
2193 * update deq ptr to point to normal string ring.
2195 vdev->eps[ep_index].ep_state &= ~EP_GETTING_NO_STREAMS;
2196 vdev->eps[ep_index].ep_state &= ~EP_HAS_STREAMS;
2198 spin_unlock_irqrestore(&xhci->lock, flags);
2204 * This submits a Reset Device Command, which will set the device state to 0,
2205 * set the device address to 0, and disable all the endpoints except the default
2206 * control endpoint. The USB core should come back and call
2207 * xhci_address_device(), and then re-set up the configuration. If this is
2208 * called because of a usb_reset_and_verify_device(), then the old alternate
2209 * settings will be re-installed through the normal bandwidth allocation
2212 * Wait for the Reset Device command to finish. Remove all structures
2213 * associated with the endpoints that were disabled. Clear the input device
2214 * structure? Cache the rings? Reset the control endpoint 0 max packet size?
2216 * If the virt_dev to be reset does not exist or does not match the udev,
2217 * it means the device is lost, possibly due to the xHC restore error and
2218 * re-initialization during S3/S4. In this case, call xhci_alloc_dev() to
2219 * re-allocate the device.
2221 int xhci_discover_or_reset_device(struct usb_hcd *hcd, struct usb_device *udev)
2224 unsigned long flags;
2225 struct xhci_hcd *xhci;
2226 unsigned int slot_id;
2227 struct xhci_virt_device *virt_dev;
2228 struct xhci_command *reset_device_cmd;
2230 int last_freed_endpoint;
2231 struct xhci_slot_ctx *slot_ctx;
2233 ret = xhci_check_args(hcd, udev, NULL, 0, false, __func__);
2236 xhci = hcd_to_xhci(hcd);
2237 slot_id = udev->slot_id;
2238 virt_dev = xhci->devs[slot_id];
2240 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
2241 "not exist. Re-allocate the device\n", slot_id);
2242 ret = xhci_alloc_dev(hcd, udev);
2249 if (virt_dev->udev != udev) {
2250 /* If the virt_dev and the udev does not match, this virt_dev
2251 * may belong to another udev.
2252 * Re-allocate the device.
2254 xhci_dbg(xhci, "The device to be reset with slot ID %u does "
2255 "not match the udev. Re-allocate the device\n",
2257 ret = xhci_alloc_dev(hcd, udev);
2264 /* If device is not setup, there is no point in resetting it */
2265 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
2266 if (GET_SLOT_STATE(le32_to_cpu(slot_ctx->dev_state)) ==
2267 SLOT_STATE_DISABLED)
2270 xhci_dbg(xhci, "Resetting device with slot ID %u\n", slot_id);
2271 /* Allocate the command structure that holds the struct completion.
2272 * Assume we're in process context, since the normal device reset
2273 * process has to wait for the device anyway. Storage devices are
2274 * reset as part of error handling, so use GFP_NOIO instead of
2277 reset_device_cmd = xhci_alloc_command(xhci, false, true, GFP_NOIO);
2278 if (!reset_device_cmd) {
2279 xhci_dbg(xhci, "Couldn't allocate command structure.\n");
2283 /* Attempt to submit the Reset Device command to the command ring */
2284 spin_lock_irqsave(&xhci->lock, flags);
2285 reset_device_cmd->command_trb = xhci->cmd_ring->enqueue;
2287 /* Enqueue pointer can be left pointing to the link TRB,
2288 * we must handle that
2290 if ((reset_device_cmd->command_trb->link.control & TRB_TYPE_BITMASK)
2291 == TRB_TYPE(TRB_LINK))
2292 reset_device_cmd->command_trb =
2293 xhci->cmd_ring->enq_seg->next->trbs;
2295 list_add_tail(&reset_device_cmd->cmd_list, &virt_dev->cmd_list);
2296 ret = xhci_queue_reset_device(xhci, slot_id);
2298 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2299 list_del(&reset_device_cmd->cmd_list);
2300 spin_unlock_irqrestore(&xhci->lock, flags);
2301 goto command_cleanup;
2303 xhci_ring_cmd_db(xhci);
2304 spin_unlock_irqrestore(&xhci->lock, flags);
2306 /* Wait for the Reset Device command to finish */
2307 timeleft = wait_for_completion_interruptible_timeout(
2308 reset_device_cmd->completion,
2309 USB_CTRL_SET_TIMEOUT);
2310 if (timeleft <= 0) {
2311 xhci_warn(xhci, "%s while waiting for reset device command\n",
2312 timeleft == 0 ? "Timeout" : "Signal");
2313 spin_lock_irqsave(&xhci->lock, flags);
2314 /* The timeout might have raced with the event ring handler, so
2315 * only delete from the list if the item isn't poisoned.
2317 if (reset_device_cmd->cmd_list.next != LIST_POISON1)
2318 list_del(&reset_device_cmd->cmd_list);
2319 spin_unlock_irqrestore(&xhci->lock, flags);
2321 goto command_cleanup;
2324 /* The Reset Device command can't fail, according to the 0.95/0.96 spec,
2325 * unless we tried to reset a slot ID that wasn't enabled,
2326 * or the device wasn't in the addressed or configured state.
2328 ret = reset_device_cmd->status;
2330 case COMP_EBADSLT: /* 0.95 completion code for bad slot ID */
2331 case COMP_CTX_STATE: /* 0.96 completion code for same thing */
2332 xhci_info(xhci, "Can't reset device (slot ID %u) in %s state\n",
2334 xhci_get_slot_state(xhci, virt_dev->out_ctx));
2335 xhci_info(xhci, "Not freeing device rings.\n");
2336 /* Don't treat this as an error. May change my mind later. */
2338 goto command_cleanup;
2340 xhci_dbg(xhci, "Successful reset device command.\n");
2343 if (xhci_is_vendor_info_code(xhci, ret))
2345 xhci_warn(xhci, "Unknown completion code %u for "
2346 "reset device command.\n", ret);
2348 goto command_cleanup;
2351 /* Everything but endpoint 0 is disabled, so free or cache the rings. */
2352 last_freed_endpoint = 1;
2353 for (i = 1; i < 31; ++i) {
2354 if (!virt_dev->eps[i].ring)
2356 xhci_free_or_cache_endpoint_ring(xhci, virt_dev, i);
2357 last_freed_endpoint = i;
2359 xhci_dbg(xhci, "Output context after successful reset device cmd:\n");
2360 xhci_dbg_ctx(xhci, virt_dev->out_ctx, last_freed_endpoint);
2364 xhci_free_command(xhci, reset_device_cmd);
2369 * At this point, the struct usb_device is about to go away, the device has
2370 * disconnected, and all traffic has been stopped and the endpoints have been
2371 * disabled. Free any HC data structures associated with that device.
2373 void xhci_free_dev(struct usb_hcd *hcd, struct usb_device *udev)
2375 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2376 struct xhci_virt_device *virt_dev;
2377 unsigned long flags;
2381 ret = xhci_check_args(hcd, udev, NULL, 0, true, __func__);
2385 virt_dev = xhci->devs[udev->slot_id];
2387 /* Stop any wayward timer functions (which may grab the lock) */
2388 for (i = 0; i < 31; ++i) {
2389 virt_dev->eps[i].ep_state &= ~EP_HALT_PENDING;
2390 del_timer_sync(&virt_dev->eps[i].stop_cmd_timer);
2393 spin_lock_irqsave(&xhci->lock, flags);
2394 /* Don't disable the slot if the host controller is dead. */
2395 state = xhci_readl(xhci, &xhci->op_regs->status);
2396 if (state == 0xffffffff || (xhci->xhc_state & XHCI_STATE_DYING)) {
2397 xhci_free_virt_device(xhci, udev->slot_id);
2398 spin_unlock_irqrestore(&xhci->lock, flags);
2402 if (xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id)) {
2403 spin_unlock_irqrestore(&xhci->lock, flags);
2404 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2407 xhci_ring_cmd_db(xhci);
2408 spin_unlock_irqrestore(&xhci->lock, flags);
2410 * Event command completion handler will free any data structures
2411 * associated with the slot. XXX Can free sleep?
2416 * Returns 0 if the xHC ran out of device slots, the Enable Slot command
2417 * timed out, or allocating memory failed. Returns 1 on success.
2419 int xhci_alloc_dev(struct usb_hcd *hcd, struct usb_device *udev)
2421 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2422 unsigned long flags;
2426 spin_lock_irqsave(&xhci->lock, flags);
2427 ret = xhci_queue_slot_control(xhci, TRB_ENABLE_SLOT, 0);
2429 spin_unlock_irqrestore(&xhci->lock, flags);
2430 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2433 xhci_ring_cmd_db(xhci);
2434 spin_unlock_irqrestore(&xhci->lock, flags);
2436 /* XXX: how much time for xHC slot assignment? */
2437 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
2438 USB_CTRL_SET_TIMEOUT);
2439 if (timeleft <= 0) {
2440 xhci_warn(xhci, "%s while waiting for a slot\n",
2441 timeleft == 0 ? "Timeout" : "Signal");
2442 /* FIXME cancel the enable slot request */
2446 if (!xhci->slot_id) {
2447 xhci_err(xhci, "Error while assigning device slot ID\n");
2450 /* xhci_alloc_virt_device() does not touch rings; no need to lock.
2451 * Use GFP_NOIO, since this function can be called from
2452 * xhci_discover_or_reset_device(), which may be called as part of
2453 * mass storage driver error handling.
2455 if (!xhci_alloc_virt_device(xhci, xhci->slot_id, udev, GFP_NOIO)) {
2456 /* Disable slot, if we can do it without mem alloc */
2457 xhci_warn(xhci, "Could not allocate xHCI USB device data structures\n");
2458 spin_lock_irqsave(&xhci->lock, flags);
2459 if (!xhci_queue_slot_control(xhci, TRB_DISABLE_SLOT, udev->slot_id))
2460 xhci_ring_cmd_db(xhci);
2461 spin_unlock_irqrestore(&xhci->lock, flags);
2464 udev->slot_id = xhci->slot_id;
2465 /* Is this a LS or FS device under a HS hub? */
2466 /* Hub or peripherial? */
2471 * Issue an Address Device command (which will issue a SetAddress request to
2473 * We should be protected by the usb_address0_mutex in khubd's hub_port_init, so
2474 * we should only issue and wait on one address command at the same time.
2476 * We add one to the device address issued by the hardware because the USB core
2477 * uses address 1 for the root hubs (even though they're not really devices).
2479 int xhci_address_device(struct usb_hcd *hcd, struct usb_device *udev)
2481 unsigned long flags;
2483 struct xhci_virt_device *virt_dev;
2485 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2486 struct xhci_slot_ctx *slot_ctx;
2487 struct xhci_input_control_ctx *ctrl_ctx;
2490 if (!udev->slot_id) {
2491 xhci_dbg(xhci, "Bad Slot ID %d\n", udev->slot_id);
2495 virt_dev = xhci->devs[udev->slot_id];
2497 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
2499 * If this is the first Set Address since device plug-in or
2500 * virt_device realloaction after a resume with an xHCI power loss,
2501 * then set up the slot context.
2503 if (!slot_ctx->dev_info)
2504 xhci_setup_addressable_virt_dev(xhci, udev);
2505 /* Otherwise, update the control endpoint ring enqueue pointer. */
2507 xhci_copy_ep0_dequeue_into_input_ctx(xhci, udev);
2508 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
2509 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
2511 spin_lock_irqsave(&xhci->lock, flags);
2512 ret = xhci_queue_address_device(xhci, virt_dev->in_ctx->dma,
2515 spin_unlock_irqrestore(&xhci->lock, flags);
2516 xhci_dbg(xhci, "FIXME: allocate a command ring segment\n");
2519 xhci_ring_cmd_db(xhci);
2520 spin_unlock_irqrestore(&xhci->lock, flags);
2522 /* ctrl tx can take up to 5 sec; XXX: need more time for xHC? */
2523 timeleft = wait_for_completion_interruptible_timeout(&xhci->addr_dev,
2524 USB_CTRL_SET_TIMEOUT);
2525 /* FIXME: From section 4.3.4: "Software shall be responsible for timing
2526 * the SetAddress() "recovery interval" required by USB and aborting the
2527 * command on a timeout.
2529 if (timeleft <= 0) {
2530 xhci_warn(xhci, "%s while waiting for a slot\n",
2531 timeleft == 0 ? "Timeout" : "Signal");
2532 /* FIXME cancel the address device command */
2536 switch (virt_dev->cmd_status) {
2537 case COMP_CTX_STATE:
2539 xhci_err(xhci, "Setup ERROR: address device command for slot %d.\n",
2544 dev_warn(&udev->dev, "Device not responding to set address.\n");
2548 xhci_dbg(xhci, "Successful Address Device command\n");
2551 xhci_err(xhci, "ERROR: unexpected command completion "
2552 "code 0x%x.\n", virt_dev->cmd_status);
2553 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
2554 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
2561 temp_64 = xhci_read_64(xhci, &xhci->op_regs->dcbaa_ptr);
2562 xhci_dbg(xhci, "Op regs DCBAA ptr = %#016llx\n", temp_64);
2563 xhci_dbg(xhci, "Slot ID %d dcbaa entry @%p = %#016llx\n",
2565 &xhci->dcbaa->dev_context_ptrs[udev->slot_id],
2566 (unsigned long long)
2567 xhci->dcbaa->dev_context_ptrs[udev->slot_id]);
2568 xhci_dbg(xhci, "Output Context DMA address = %#08llx\n",
2569 (unsigned long long)virt_dev->out_ctx->dma);
2570 xhci_dbg(xhci, "Slot ID %d Input Context:\n", udev->slot_id);
2571 xhci_dbg_ctx(xhci, virt_dev->in_ctx, 2);
2572 xhci_dbg(xhci, "Slot ID %d Output Context:\n", udev->slot_id);
2573 xhci_dbg_ctx(xhci, virt_dev->out_ctx, 2);
2575 * USB core uses address 1 for the roothubs, so we add one to the
2576 * address given back to us by the HC.
2578 slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->out_ctx);
2579 /* Use kernel assigned address for devices; store xHC assigned
2580 * address locally. */
2581 virt_dev->address = (slot_ctx->dev_state & DEV_ADDR_MASK) + 1;
2582 /* Zero the input context control for later use */
2583 ctrl_ctx = xhci_get_input_control_ctx(xhci, virt_dev->in_ctx);
2584 ctrl_ctx->add_flags = 0;
2585 ctrl_ctx->drop_flags = 0;
2587 xhci_dbg(xhci, "Internal device address = %d\n", virt_dev->address);
2592 /* Once a hub descriptor is fetched for a device, we need to update the xHC's
2593 * internal data structures for the device.
2595 int xhci_update_hub_device(struct usb_hcd *hcd, struct usb_device *hdev,
2596 struct usb_tt *tt, gfp_t mem_flags)
2598 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2599 struct xhci_virt_device *vdev;
2600 struct xhci_command *config_cmd;
2601 struct xhci_input_control_ctx *ctrl_ctx;
2602 struct xhci_slot_ctx *slot_ctx;
2603 unsigned long flags;
2604 unsigned think_time;
2607 /* Ignore root hubs */
2611 vdev = xhci->devs[hdev->slot_id];
2613 xhci_warn(xhci, "Cannot update hub desc for unknown device.\n");
2616 config_cmd = xhci_alloc_command(xhci, true, true, mem_flags);
2618 xhci_dbg(xhci, "Could not allocate xHCI command structure.\n");
2622 spin_lock_irqsave(&xhci->lock, flags);
2623 xhci_slot_copy(xhci, config_cmd->in_ctx, vdev->out_ctx);
2624 ctrl_ctx = xhci_get_input_control_ctx(xhci, config_cmd->in_ctx);
2625 ctrl_ctx->add_flags |= SLOT_FLAG;
2626 slot_ctx = xhci_get_slot_ctx(xhci, config_cmd->in_ctx);
2627 slot_ctx->dev_info |= DEV_HUB;
2629 slot_ctx->dev_info |= DEV_MTT;
2630 if (xhci->hci_version > 0x95) {
2631 xhci_dbg(xhci, "xHCI version %x needs hub "
2632 "TT think time and number of ports\n",
2633 (unsigned int) xhci->hci_version);
2634 slot_ctx->dev_info2 |= XHCI_MAX_PORTS(hdev->maxchild);
2635 /* Set TT think time - convert from ns to FS bit times.
2636 * 0 = 8 FS bit times, 1 = 16 FS bit times,
2637 * 2 = 24 FS bit times, 3 = 32 FS bit times.
2639 think_time = tt->think_time;
2640 if (think_time != 0)
2641 think_time = (think_time / 666) - 1;
2642 slot_ctx->tt_info |= TT_THINK_TIME(think_time);
2644 xhci_dbg(xhci, "xHCI version %x doesn't need hub "
2645 "TT think time or number of ports\n",
2646 (unsigned int) xhci->hci_version);
2648 slot_ctx->dev_state = 0;
2649 spin_unlock_irqrestore(&xhci->lock, flags);
2651 xhci_dbg(xhci, "Set up %s for hub device.\n",
2652 (xhci->hci_version > 0x95) ?
2653 "configure endpoint" : "evaluate context");
2654 xhci_dbg(xhci, "Slot %u Input Context:\n", hdev->slot_id);
2655 xhci_dbg_ctx(xhci, config_cmd->in_ctx, 0);
2657 /* Issue and wait for the configure endpoint or
2658 * evaluate context command.
2660 if (xhci->hci_version > 0x95)
2661 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
2664 ret = xhci_configure_endpoint(xhci, hdev, config_cmd,
2667 xhci_dbg(xhci, "Slot %u Output Context:\n", hdev->slot_id);
2668 xhci_dbg_ctx(xhci, vdev->out_ctx, 0);
2670 xhci_free_command(xhci, config_cmd);
2674 int xhci_get_frame(struct usb_hcd *hcd)
2676 struct xhci_hcd *xhci = hcd_to_xhci(hcd);
2677 /* EHCI mods by the periodic size. Why? */
2678 return xhci_readl(xhci, &xhci->run_regs->microframe_index) >> 3;
2681 MODULE_DESCRIPTION(DRIVER_DESC);
2682 MODULE_AUTHOR(DRIVER_AUTHOR);
2683 MODULE_LICENSE("GPL");
2685 static int __init xhci_hcd_init(void)
2690 retval = xhci_register_pci();
2693 printk(KERN_DEBUG "Problem registering PCI driver.");
2698 * Check the compiler generated sizes of structures that must be laid
2699 * out in specific ways for hardware access.
2701 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
2702 BUILD_BUG_ON(sizeof(struct xhci_slot_ctx) != 8*32/8);
2703 BUILD_BUG_ON(sizeof(struct xhci_ep_ctx) != 8*32/8);
2704 /* xhci_device_control has eight fields, and also
2705 * embeds one xhci_slot_ctx and 31 xhci_ep_ctx
2707 BUILD_BUG_ON(sizeof(struct xhci_stream_ctx) != 4*32/8);
2708 BUILD_BUG_ON(sizeof(union xhci_trb) != 4*32/8);
2709 BUILD_BUG_ON(sizeof(struct xhci_erst_entry) != 4*32/8);
2710 BUILD_BUG_ON(sizeof(struct xhci_cap_regs) != 7*32/8);
2711 BUILD_BUG_ON(sizeof(struct xhci_intr_reg) != 8*32/8);
2712 /* xhci_run_regs has eight fields and embeds 128 xhci_intr_regs */
2713 BUILD_BUG_ON(sizeof(struct xhci_run_regs) != (8+8*128)*32/8);
2714 BUILD_BUG_ON(sizeof(struct xhci_doorbell_array) != 256*32/8);
2717 module_init(xhci_hcd_init);
2719 static void __exit xhci_hcd_cleanup(void)
2722 xhci_unregister_pci();
2725 module_exit(xhci_hcd_cleanup);