2 * Copyright (c) 2001-2002 by David Brownell
4 * This program is free software; you can redistribute it and/or modify it
5 * under the terms of the GNU General Public License as published by the
6 * Free Software Foundation; either version 2 of the License, or (at your
7 * option) any later version.
9 * This program is distributed in the hope that it will be useful, but
10 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
11 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software Foundation,
16 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
19 /* this file is part of ehci-hcd.c */
21 /*-------------------------------------------------------------------------*/
24 * EHCI scheduled transaction support: interrupt, iso, split iso
25 * These are called "periodic" transactions in the EHCI spec.
27 * Note that for interrupt transfers, the QH/QTD manipulation is shared
28 * with the "asynchronous" transaction support (control/bulk transfers).
29 * The only real difference is in how interrupt transfers are scheduled.
30 * We get some funky API restrictions from the current URB model, which
31 * works notably better for reading transfers than for writing. (And
32 * which accordingly needs to change before it'll work inside devices,
33 * or with "USB On The Go" additions to USB 2.0 ...)
36 static int ehci_get_frame (struct usb_hcd *hcd);
38 /*-------------------------------------------------------------------------*/
41 * periodic_next_shadow - return "next" pointer on shadow list
42 * @periodic: host pointer to qh/itd/sitd
43 * @tag: hardware tag for type of this record
45 static union ehci_shadow *
46 periodic_next_shadow (union ehci_shadow *periodic, int tag)
50 return &periodic->qh->qh_next;
52 return &periodic->fstn->fstn_next;
54 return &periodic->itd->itd_next;
57 return &periodic->sitd->sitd_next;
58 #endif /* have_split_iso */
60 dbg ("BAD shadow %p tag %d", periodic->ptr, tag);
65 /* returns true after successful unlink */
66 /* caller must hold ehci->lock */
67 static int periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
69 union ehci_shadow *prev_p = &ehci->pshadow [frame];
70 u32 *hw_p = &ehci->periodic [frame];
71 union ehci_shadow here = *prev_p;
72 union ehci_shadow *next_p;
74 /* find predecessor of "ptr"; hw and shadow lists are in sync */
75 while (here.ptr && here.ptr != ptr) {
76 prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p));
77 hw_p = &here.qh->hw_next;
80 /* an interrupt entry (at list end) could have been shared */
82 dbg ("entry %p no longer on frame [%d]", ptr, frame);
85 // vdbg ("periodic unlink %p from frame %d", ptr, frame);
87 /* update hardware list ... HC may still know the old structure, so
88 * don't change hw_next until it'll have purged its cache
90 next_p = periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p));
91 *hw_p = here.qh->hw_next;
93 /* unlink from shadow list; HCD won't see old structure again */
100 /* how many of the uframe's 125 usecs are allocated? */
101 static unsigned short
102 periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
104 u32 *hw_p = &ehci->periodic [frame];
105 union ehci_shadow *q = &ehci->pshadow [frame];
109 switch (Q_NEXT_TYPE (*hw_p)) {
111 /* is it in the S-mask? */
112 if (q->qh->hw_info2 & cpu_to_le32 (1 << uframe))
113 usecs += q->qh->usecs;
115 if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe)))
116 usecs += q->qh->c_usecs;
120 /* for "save place" FSTNs, count the relevant INTR
121 * bandwidth from the previous frame
123 if (q->fstn->hw_prev != EHCI_LIST_END) {
124 dbg ("not counting FSTN bandwidth yet ...");
126 q = &q->fstn->fstn_next;
129 /* NOTE the "one uframe per itd" policy */
130 if (q->itd->hw_transaction [uframe] != 0)
131 usecs += q->itd->usecs;
132 q = &q->itd->itd_next;
134 #ifdef have_split_iso
136 temp = q->sitd->hw_fullspeed_ep &
137 __constant_cpu_to_le32 (1 << 31);
139 // FIXME: this doesn't count data bytes right...
141 /* is it in the S-mask? (count SPLIT, DATA) */
142 if (q->sitd->hw_uframe & cpu_to_le32 (1 << uframe)) {
144 usecs += HS_USECS (188);
146 usecs += HS_USECS (1);
149 /* ... C-mask? (count CSPLIT, DATA) */
150 if (q->sitd->hw_uframe &
151 cpu_to_le32 (1 << (8 + uframe))) {
153 usecs += HS_USECS (0);
155 usecs += HS_USECS (188);
157 q = &q->sitd->sitd_next;
159 #endif /* have_split_iso */
166 err ("overallocated uframe %d, periodic is %d usecs",
167 frame * 8 + uframe, usecs);
172 /*-------------------------------------------------------------------------*/
174 static int enable_periodic (struct ehci_hcd *ehci)
179 /* did clearing PSE did take effect yet?
180 * takes effect only at frame boundaries...
182 status = handshake (&ehci->regs->status, STS_PSS, 0, 9 * 125);
184 ehci->hcd.state = USB_STATE_HALT;
188 cmd = readl (&ehci->regs->command) | CMD_PSE;
189 writel (cmd, &ehci->regs->command);
190 /* posted write ... PSS happens later */
191 ehci->hcd.state = USB_STATE_RUNNING;
193 /* make sure ehci_work scans these */
194 ehci->next_uframe = readl (&ehci->regs->frame_index)
195 % (ehci->periodic_size << 3);
199 static int disable_periodic (struct ehci_hcd *ehci)
204 /* did setting PSE not take effect yet?
205 * takes effect only at frame boundaries...
207 status = handshake (&ehci->regs->status, STS_PSS, STS_PSS, 9 * 125);
209 ehci->hcd.state = USB_STATE_HALT;
213 cmd = readl (&ehci->regs->command) & ~CMD_PSE;
214 writel (cmd, &ehci->regs->command);
215 /* posted write ... */
217 ehci->next_uframe = -1;
221 /*-------------------------------------------------------------------------*/
223 // FIXME microframe periods not yet handled
225 static void intr_deschedule (
226 struct ehci_hcd *ehci,
231 unsigned frame = qh->start;
234 periodic_unlink (ehci, frame, qh);
237 } while (frame < ehci->periodic_size);
239 qh->qh_state = QH_STATE_UNLINK;
241 ehci->periodic_sched--;
243 /* maybe turn off periodic schedule */
244 if (!ehci->periodic_sched)
245 status = disable_periodic (ehci);
248 vdbg ("periodic schedule still enabled");
252 * If the hc may be looking at this qh, then delay a uframe
253 * (yeech!) to be sure it's done.
254 * No other threads may be mucking with this qh.
256 if (((ehci_get_frame (&ehci->hcd) - frame) % qh->period) == 0) {
259 qh->hw_next = EHCI_LIST_END;
261 /* we may not be IDLE yet, but if the qh is empty
262 * the race is very short. then if qh also isn't
263 * rescheduled soon, it won't matter. otherwise...
265 vdbg ("intr_deschedule...");
268 qh->hw_next = EHCI_LIST_END;
270 qh->qh_state = QH_STATE_IDLE;
272 /* update per-qh bandwidth utilization (for usbfs) */
273 hcd_to_bus (&ehci->hcd)->bandwidth_allocated -=
274 (qh->usecs + qh->c_usecs) / qh->period;
276 dbg ("descheduled qh %p, period = %d frame = %d count = %d, urbs = %d",
277 qh, qh->period, frame,
278 atomic_read (&qh->refcount), ehci->periodic_sched);
281 static int check_period (
282 struct ehci_hcd *ehci,
288 /* complete split running into next frame?
289 * given FSTN support, we could sometimes check...
295 * 80% periodic == 100 usec/uframe available
296 * convert "usecs we need" to "max already claimed"
303 // FIXME delete when intr_submit handles non-empty queues
304 // this gives us a one intr/frame limit (vs N/uframe)
305 // ... and also lets us avoid tracking split transactions
306 // that might collide at a given TT/hub.
307 if (ehci->pshadow [frame].ptr)
310 claimed = periodic_usecs (ehci, frame, uframe);
314 // FIXME update to handle sub-frame periods
315 } while ((frame += period) < ehci->periodic_size);
321 static int check_intr_schedule (
322 struct ehci_hcd *ehci,
325 const struct ehci_qh *qh,
329 int retval = -ENOSPC;
331 if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
335 *c_maskp = cpu_to_le32 (0);
339 /* This is a split transaction; check the bandwidth available for
340 * the completion too. Check both worst and best case gaps: worst
341 * case is SPLIT near uframe end, and CSPLIT near start ... best is
342 * vice versa. Difference can be almost two uframe times, but we
343 * reserve unnecessary bandwidth (waste it) this way. (Actually
344 * even better cases exist, like immediate device NAK.)
346 * FIXME don't even bother unless we know this TT is idle in that
347 * range of uframes ... for now, check_period() allows only one
348 * interrupt transfer per frame, so needn't check "TT busy" status
349 * when scheduling a split (QH, SITD, or FSTN).
351 * FIXME ehci 0.96 and above can use FSTNs
353 if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
354 qh->period, qh->c_usecs))
356 if (!check_period (ehci, frame, uframe + qh->gap_uf,
357 qh->period, qh->c_usecs))
360 *c_maskp = cpu_to_le32 (0x03 << (8 + uframe + qh->gap_uf));
366 static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
371 unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
373 qh->hw_next = EHCI_LIST_END;
376 /* reuse the previous schedule slots, if we can */
377 if (frame < qh->period) {
378 uframe = ffs (le32_to_cpup (&qh->hw_info2) & 0x00ff);
379 status = check_intr_schedule (ehci, frame, --uframe,
387 /* else scan the schedule to find a group of slots such that all
388 * uframes have enough periodic bandwidth available.
391 frame = qh->period - 1;
393 for (uframe = 0; uframe < 8; uframe++) {
394 status = check_intr_schedule (ehci,
400 } while (status && --frame);
405 /* reset S-frame and (maybe) C-frame masks */
406 qh->hw_info2 &= ~0xffff;
407 qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask;
409 dbg ("reused previous qh %p schedule", qh);
411 /* stuff into the periodic schedule */
412 qh->qh_state = QH_STATE_LINKED;
413 dbg ("scheduled qh %p usecs %d/%d period %d.0 starting %d.%d (gap %d)",
414 qh, qh->usecs, qh->c_usecs,
415 qh->period, frame, uframe, qh->gap_uf);
417 if (unlikely (ehci->pshadow [frame].ptr != 0)) {
419 // FIXME -- just link toward the end, before any qh with a shorter period,
420 // AND accommodate it already having been linked here (after some other qh)
421 // AS WELL AS updating the schedule checking logic
425 ehci->pshadow [frame].qh = qh_get (qh);
426 ehci->periodic [frame] =
427 QH_NEXT (qh->qh_dma);
431 } while (frame < ehci->periodic_size);
433 /* update per-qh bandwidth for usbfs */
434 hcd_to_bus (&ehci->hcd)->bandwidth_allocated +=
435 (qh->usecs + qh->c_usecs) / qh->period;
437 /* maybe enable periodic schedule processing */
438 if (!ehci->periodic_sched++)
439 status = enable_periodic (ehci);
444 static int intr_submit (
445 struct ehci_hcd *ehci,
447 struct list_head *qtd_list,
456 struct list_head empty;
458 /* get endpoint and transfer/schedule data */
459 epnum = usb_pipeendpoint (urb->pipe);
460 is_input = usb_pipein (urb->pipe);
464 spin_lock_irqsave (&ehci->lock, flags);
465 dev = (struct hcd_dev *)urb->dev->hcpriv;
467 /* get qh and force any scheduling errors */
468 INIT_LIST_HEAD (&empty);
469 qh = qh_append_tds (ehci, urb, &empty, epnum, &dev->ep [epnum]);
474 if (qh->qh_state == QH_STATE_IDLE) {
475 if ((status = qh_schedule (ehci, qh)) != 0)
479 /* then queue the urb's tds to the qh */
480 qh = qh_append_tds (ehci, urb, qtd_list, epnum, &dev->ep [epnum]);
483 /* ... update usbfs periodic stats */
484 hcd_to_bus (&ehci->hcd)->bandwidth_int_reqs++;
487 spin_unlock_irqrestore (&ehci->lock, flags);
489 qtd_list_free (ehci, urb, qtd_list);
496 struct ehci_hcd *ehci,
503 /* nothing to report? */
504 if (likely ((qh->hw_token & __constant_cpu_to_le32 (QTD_STS_ACTIVE))
507 if (unlikely (list_empty (&qh->qtd_list))) {
508 dbg ("intr qh %p no TDs?", qh);
512 /* handle any completions */
513 count = qh_completions (ehci, qh, regs);
515 if (unlikely (list_empty (&qh->qtd_list)))
516 intr_deschedule (ehci, qh, 0);
521 /*-------------------------------------------------------------------------*/
524 itd_free_list (struct ehci_hcd *ehci, struct urb *urb)
526 struct ehci_itd *first_itd = urb->hcpriv;
528 while (!list_empty (&first_itd->itd_list)) {
529 struct ehci_itd *itd;
532 first_itd->itd_list.next,
533 struct ehci_itd, itd_list);
534 list_del (&itd->itd_list);
535 pci_pool_free (ehci->itd_pool, itd, itd->itd_dma);
537 pci_pool_free (ehci->itd_pool, first_itd, first_itd->itd_dma);
543 struct ehci_hcd *ehci,
544 struct ehci_itd *itd,
546 unsigned index, // urb->iso_frame_desc [index]
547 dma_addr_t dma // mapped transfer buffer
551 unsigned i, epnum, maxp, multi;
555 itd->hw_next = EHCI_LIST_END;
559 /* tell itd about its transfer buffer, max 2 pages */
560 length = urb->iso_frame_desc [index].length;
561 dma += urb->iso_frame_desc [index].offset;
562 temp = dma & ~0x0fff;
563 for (i = 0; i < 2; i++) {
564 itd->hw_bufp [i] = cpu_to_le32 ((u32) temp);
565 itd->hw_bufp_hi [i] = cpu_to_le32 ((u32)(temp >> 32));
571 * this might be a "high bandwidth" highspeed endpoint,
572 * as encoded in the ep descriptor's maxpacket field
574 epnum = usb_pipeendpoint (urb->pipe);
575 is_input = usb_pipein (urb->pipe);
577 maxp = urb->dev->epmaxpacketin [epnum];
580 maxp = urb->dev->epmaxpacketout [epnum];
583 buf1 |= (maxp & 0x03ff);
585 multi += (maxp >> 11) & 0x03;
589 /* transfer can't fit in any uframe? */
590 if (length < 0 || maxp < length) {
591 dbg ("BAD iso packet: %d bytes, max %d, urb %p [%d] (of %d)",
592 length, maxp, urb, index,
593 urb->iso_frame_desc [index].length);
596 itd->usecs = usb_calc_bus_time (USB_SPEED_HIGH, is_input, 1, length);
598 /* "plus" info in low order bits of buffer pointers */
599 itd->hw_bufp [0] |= cpu_to_le32 ((epnum << 8) | urb->dev->devnum);
600 itd->hw_bufp [1] |= cpu_to_le32 (buf1);
601 itd->hw_bufp [2] |= cpu_to_le32 (multi);
603 /* figure hw_transaction[] value (it's scheduled later) */
604 itd->transaction = EHCI_ISOC_ACTIVE;
605 itd->transaction |= dma & 0x0fff; /* offset; buffer=0 */
606 if ((index + 1) == urb->number_of_packets)
607 itd->transaction |= EHCI_ITD_IOC; /* end-of-urb irq */
608 itd->transaction |= length << 16;
609 cpu_to_le32s (&itd->transaction);
615 itd_urb_transaction (
616 struct ehci_hcd *ehci,
621 struct ehci_itd *first_itd, *itd;
625 /* allocate/init ITDs */
626 for (frame_index = 0, first_itd = 0;
627 frame_index < urb->number_of_packets;
629 itd = pci_pool_alloc (ehci->itd_pool, mem_flags, &itd_dma);
634 memset (itd, 0, sizeof *itd);
635 itd->itd_dma = itd_dma;
637 status = itd_fill (ehci, itd, urb, frame_index,
643 list_add_tail (&itd->itd_list,
644 &first_itd->itd_list);
646 INIT_LIST_HEAD (&itd->itd_list);
647 urb->hcpriv = first_itd = itd;
650 urb->error_count = 0;
655 itd_free_list (ehci, urb);
659 /*-------------------------------------------------------------------------*/
662 itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
664 /* always prepend ITD/SITD ... only QH tree is order-sensitive */
665 itd->itd_next = ehci->pshadow [frame];
666 itd->hw_next = ehci->periodic [frame];
667 ehci->pshadow [frame].itd = itd;
668 ehci->periodic [frame] = cpu_to_le32 (itd->itd_dma) | Q_TYPE_ITD;
672 * return zero on success, else -errno
673 * - start holds first uframe to start scheduling into
674 * - max is the first uframe it's NOT (!) OK to start scheduling into
675 * math to be done modulo "mod" (ehci->periodic_size << 3)
677 static int get_iso_range (
678 struct ehci_hcd *ehci,
684 struct list_head *lh;
685 struct hcd_dev *dev = urb->dev->hcpriv;
687 unsigned now, span, end;
689 span = urb->interval * urb->number_of_packets;
691 /* first see if we know when the next transfer SHOULD happen */
692 list_for_each (lh, &dev->urb_list) {
694 struct ehci_itd *itd;
697 u = list_entry (lh, struct urb, urb_list);
698 if (u == urb || u->pipe != urb->pipe)
700 if (u->interval != urb->interval) { /* must not change! */
701 dbg ("urb %p interval %d ... != %p interval %d",
702 u, u->interval, urb, urb->interval);
706 /* URB for this endpoint... covers through when? */
708 s = itd->uframe + u->interval * u->number_of_packets;
713 * So far we can only queue two ISO URBs...
715 * FIXME do interval math, figure out whether
716 * this URB is "before" or not ... also, handle
717 * the case where the URB might have completed,
718 * but hasn't yet been processed.
720 dbg ("NYET: queue >2 URBs per ISO endpoint");
725 /* calculate the legal range [start,max) */
726 now = readl (&ehci->regs->frame_index) + 1; /* next uframe */
727 if (!ehci->periodic_sched)
728 now += 8; /* startup delay */
732 *start = now + ehci->i_thresh + /* paranoia */ 1;
734 if (*max < *start + 1)
738 *max = (last + 1) % mod;
741 /* explicit start frame? */
742 if (!(urb->transfer_flags & URB_ISO_ASAP)) {
745 /* sanity check: must be in range */
746 urb->start_frame %= ehci->periodic_size;
747 temp = urb->start_frame << 3;
753 /* use that explicit start frame */
754 *start = urb->start_frame << 3;
760 // FIXME minimize wraparound to "now" ... insist max+span
761 // (and start+span) remains a few frames short of "end"
763 *max %= ehci->periodic_size;
764 if ((*start + span) < end)
770 itd_schedule (struct ehci_hcd *ehci, struct urb *urb)
772 unsigned start, max, i;
774 unsigned mod = ehci->periodic_size << 3;
776 for (i = 0; i < urb->number_of_packets; i++) {
777 urb->iso_frame_desc [i].status = -EINPROGRESS;
778 urb->iso_frame_desc [i].actual_length = 0;
781 if ((status = get_iso_range (ehci, urb, &start, &max, mod)) != 0)
787 struct ehci_itd *itd;
789 /* check schedule: enough space? */
792 for (i = 0, uframe = start;
793 i < urb->number_of_packets;
794 i++, uframe += urb->interval) {
797 /* can't commit more than 80% periodic == 100 usec */
798 if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7)
799 > (100 - itd->usecs)) {
803 itd = list_entry (itd->itd_list.next,
804 struct ehci_itd, itd_list);
809 /* that's where we'll schedule this! */
811 urb->start_frame = start >> 3;
812 vdbg ("ISO urb %p (%d packets period %d) starting %d.%d",
813 urb, urb->number_of_packets, urb->interval,
814 urb->start_frame, start & 0x7);
815 for (i = 0, uframe = start, usecs = 0;
816 i < urb->number_of_packets;
817 i++, uframe += urb->interval) {
820 itd->uframe = uframe;
821 itd->hw_transaction [uframe & 0x07] = itd->transaction;
822 itd_link (ehci, (uframe >> 3) % ehci->periodic_size,
827 itd = list_entry (itd->itd_list.next,
828 struct ehci_itd, itd_list);
831 /* update bandwidth utilization records (for usbfs)
833 * FIXME This claims each URB queued to an endpoint, as if
834 * transfers were concurrent, not sequential. So bandwidth
835 * typically gets double-billed ... comes from tying it to
836 * URBs rather than endpoints in the schedule. Luckily we
837 * don't use this usbfs data for serious decision making.
839 usecs /= urb->number_of_packets;
840 usecs /= urb->interval;
844 usb_claim_bandwidth (urb->dev, urb, usecs, 1);
846 /* maybe enable periodic schedule processing */
847 if (!ehci->periodic_sched++) {
848 if ((status = enable_periodic (ehci)) != 0) {
849 // FIXME deschedule right away
850 err ("itd_schedule, enable = %d", status);
856 } while ((start = ++start % mod) != max);
858 /* no room in the schedule */
859 dbg ("urb %p, CAN'T SCHEDULE", urb);
863 /*-------------------------------------------------------------------------*/
865 #define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
869 struct ehci_hcd *ehci,
870 struct ehci_itd *itd,
874 struct urb *urb = itd->urb;
875 struct usb_iso_packet_descriptor *desc;
878 /* update status for this uframe's transfers */
879 desc = &urb->iso_frame_desc [itd->index];
881 t = itd->hw_transaction [uframe];
882 itd->hw_transaction [uframe] = 0;
883 if (t & EHCI_ISOC_ACTIVE)
884 desc->status = -EXDEV;
885 else if (t & ISO_ERRS) {
887 if (t & EHCI_ISOC_BUF_ERR)
888 desc->status = usb_pipein (urb->pipe)
889 ? -ENOSR /* couldn't read */
890 : -ECOMM; /* couldn't write */
891 else if (t & EHCI_ISOC_BABBLE)
892 desc->status = -EOVERFLOW;
893 else /* (t & EHCI_ISOC_XACTERR) */
894 desc->status = -EPROTO;
896 /* HC need not update length with this error */
897 if (!(t & EHCI_ISOC_BABBLE))
898 desc->actual_length += EHCI_ITD_LENGTH (t);
901 desc->actual_length += EHCI_ITD_LENGTH (t);
904 vdbg ("itd %p urb %p packet %d/%d trans %x status %d len %d",
905 itd, urb, itd->index + 1, urb->number_of_packets,
906 t, desc->status, desc->actual_length);
908 /* handle completion now? */
909 if ((itd->index + 1) != urb->number_of_packets)
913 * Always give the urb back to the driver ... expect it to submit
914 * a new urb (or resubmit this), and to have another already queued
915 * when un-interrupted transfers are needed.
917 * NOTE that for now we don't accelerate ISO unlinks; they just
918 * happen according to the current schedule. Means a delay of
919 * up to about a second (max).
921 itd_free_list (ehci, urb);
922 if (urb->status == -EINPROGRESS)
925 /* complete() can reenter this HCD */
926 spin_unlock (&ehci->lock);
927 usb_hcd_giveback_urb (&ehci->hcd, urb, regs);
928 spin_lock (&ehci->lock);
930 /* defer stopping schedule; completion can submit */
931 ehci->periodic_sched--;
932 if (!ehci->periodic_sched)
933 (void) disable_periodic (ehci);
938 /*-------------------------------------------------------------------------*/
940 static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags)
945 dbg ("itd_submit urb %p", urb);
947 /* allocate ITDs w/o locking anything */
948 status = itd_urb_transaction (ehci, urb, mem_flags);
952 /* schedule ... need to lock */
953 spin_lock_irqsave (&ehci->lock, flags);
954 status = itd_schedule (ehci, urb);
955 spin_unlock_irqrestore (&ehci->lock, flags);
957 itd_free_list (ehci, urb);
962 #ifdef have_split_iso
964 /*-------------------------------------------------------------------------*/
967 * "Split ISO TDs" ... used for USB 1.1 devices going through
968 * the TTs in USB 2.0 hubs.
970 * FIXME not yet implemented
973 #endif /* have_split_iso */
975 /*-------------------------------------------------------------------------*/
978 scan_periodic (struct ehci_hcd *ehci, struct pt_regs *regs)
980 unsigned frame, clock, now_uframe, mod;
983 mod = ehci->periodic_size << 3;
986 * When running, scan from last scan point up to "now"
987 * else clean up by scanning everything that's left.
988 * Touches as few pages as possible: cache-friendly.
989 * Don't scan ISO entries more than once, though.
991 frame = ehci->next_uframe >> 3;
992 if (HCD_IS_RUNNING (ehci->hcd.state))
993 now_uframe = readl (&ehci->regs->frame_index);
995 now_uframe = (frame << 3) - 1;
997 clock = now_uframe >> 3;
1000 union ehci_shadow q, *q_p;
1005 /* scan schedule to _before_ current frame index */
1007 uframes = now_uframe & 0x07;
1011 q_p = &ehci->pshadow [frame];
1012 hw_p = &ehci->periodic [frame];
1014 type = Q_NEXT_TYPE (*hw_p);
1016 /* scan each element in frame's queue for completions */
1017 while (q.ptr != 0) {
1020 union ehci_shadow temp;
1024 last = (q.qh->hw_next == EHCI_LIST_END);
1025 temp = q.qh->qh_next;
1026 type = Q_NEXT_TYPE (q.qh->hw_next);
1027 count += intr_complete (ehci, frame,
1028 qh_get (q.qh), regs);
1029 qh_put (ehci, q.qh);
1033 last = (q.fstn->hw_next == EHCI_LIST_END);
1034 /* for "save place" FSTNs, look at QH entries
1035 * in the previous frame for completions.
1037 if (q.fstn->hw_prev != EHCI_LIST_END) {
1038 dbg ("ignoring completions from FSTNs");
1040 type = Q_NEXT_TYPE (q.fstn->hw_next);
1041 q = q.fstn->fstn_next;
1044 last = (q.itd->hw_next == EHCI_LIST_END);
1046 /* Unlink each (S)ITD we see, since the ISO
1047 * URB model forces constant rescheduling.
1048 * That complicates sharing uframes in ITDs,
1049 * and means we need to skip uframes the HC
1050 * hasn't yet processed.
1052 for (uf = 0; uf < uframes; uf++) {
1053 if (q.itd->hw_transaction [uf] != 0) {
1055 *q_p = q.itd->itd_next;
1056 *hw_p = q.itd->hw_next;
1057 type = Q_NEXT_TYPE (*hw_p);
1059 /* might free q.itd ... */
1060 count += itd_complete (ehci,
1061 temp.itd, uf, regs);
1065 /* we might skip this ITD's uframe ... */
1066 if (uf == uframes) {
1067 q_p = &q.itd->itd_next;
1068 hw_p = &q.itd->hw_next;
1069 type = Q_NEXT_TYPE (q.itd->hw_next);
1074 #ifdef have_split_iso
1076 last = (q.sitd->hw_next == EHCI_LIST_END);
1077 sitd_complete (ehci, q.sitd);
1078 type = Q_NEXT_TYPE (q.sitd->hw_next);
1080 // FIXME unlink SITD after split completes
1081 q = q.sitd->sitd_next;
1083 #endif /* have_split_iso */
1085 dbg ("corrupt type %d frame %d shadow %p",
1086 type, frame, q.ptr);
1092 /* did completion remove an interior q entry? */
1093 if (unlikely (q.ptr == 0 && !last))
1097 /* stop when we catch up to the HC */
1099 // FIXME: this assumes we won't get lapped when
1100 // latencies climb; that should be rare, but...
1101 // detect it, and just go all the way around.
1102 // FLR might help detect this case, so long as latencies
1103 // don't exceed periodic_size msec (default 1.024 sec).
1105 // FIXME: likewise assumes HC doesn't halt mid-scan
1107 if (frame == clock) {
1110 if (!HCD_IS_RUNNING (ehci->hcd.state))
1112 ehci->next_uframe = now_uframe;
1113 now = readl (&ehci->regs->frame_index) % mod;
1114 if (now_uframe == now)
1117 /* rescan the rest of this frame, then ... */
1119 clock = now_uframe >> 3;
1121 frame = (frame + 1) % ehci->periodic_size;