2 * Copyright (c) 2001-2003 by David Brownell
3 * Copyright (c) 2003 Michal Sojka, for high-speed iso transfers
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms of the GNU General Public License as published by the
7 * Free Software Foundation; either version 2 of the License, or (at your
8 * option) any later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software Foundation,
17 * Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
20 /* this file is part of ehci-hcd.c */
22 /*-------------------------------------------------------------------------*/
25 * EHCI scheduled transaction support: interrupt, iso, split iso
26 * These are called "periodic" transactions in the EHCI spec.
28 * Note that for interrupt transfers, the QH/QTD manipulation is shared
29 * with the "asynchronous" transaction support (control/bulk transfers).
30 * The only real difference is in how interrupt transfers are scheduled.
32 * For ISO, we make an "iso_stream" head to serve the same role as a QH.
33 * It keeps track of every ITD (or SITD) that's linked, and holds enough
34 * pre-calculated schedule data to make appending to the queue be quick.
37 static int ehci_get_frame (struct usb_hcd *hcd);
39 /*-------------------------------------------------------------------------*/
42 * periodic_next_shadow - return "next" pointer on shadow list
43 * @periodic: host pointer to qh/itd/sitd
44 * @tag: hardware tag for type of this record
46 static union ehci_shadow *
47 periodic_next_shadow (union ehci_shadow *periodic, int tag)
51 return &periodic->qh->qh_next;
53 return &periodic->fstn->fstn_next;
55 return &periodic->itd->itd_next;
58 return &periodic->sitd->sitd_next;
59 #endif /* have_split_iso */
61 dbg ("BAD shadow %p tag %d", periodic->ptr, tag);
66 /* returns true after successful unlink */
67 /* caller must hold ehci->lock */
68 static int periodic_unlink (struct ehci_hcd *ehci, unsigned frame, void *ptr)
70 union ehci_shadow *prev_p = &ehci->pshadow [frame];
71 u32 *hw_p = &ehci->periodic [frame];
72 union ehci_shadow here = *prev_p;
73 union ehci_shadow *next_p;
75 /* find predecessor of "ptr"; hw and shadow lists are in sync */
76 while (here.ptr && here.ptr != ptr) {
77 prev_p = periodic_next_shadow (prev_p, Q_NEXT_TYPE (*hw_p));
78 hw_p = &here.qh->hw_next;
81 /* an interrupt entry (at list end) could have been shared */
83 dbg ("entry %p no longer on frame [%d]", ptr, frame);
86 // vdbg ("periodic unlink %p from frame %d", ptr, frame);
88 /* update hardware list ... HC may still know the old structure, so
89 * don't change hw_next until it'll have purged its cache
91 next_p = periodic_next_shadow (&here, Q_NEXT_TYPE (*hw_p));
92 *hw_p = here.qh->hw_next;
94 /* unlink from shadow list; HCD won't see old structure again */
101 /* how many of the uframe's 125 usecs are allocated? */
102 static unsigned short
103 periodic_usecs (struct ehci_hcd *ehci, unsigned frame, unsigned uframe)
105 u32 *hw_p = &ehci->periodic [frame];
106 union ehci_shadow *q = &ehci->pshadow [frame];
110 switch (Q_NEXT_TYPE (*hw_p)) {
112 /* is it in the S-mask? */
113 if (q->qh->hw_info2 & cpu_to_le32 (1 << uframe))
114 usecs += q->qh->usecs;
116 if (q->qh->hw_info2 & cpu_to_le32 (1 << (8 + uframe)))
117 usecs += q->qh->c_usecs;
121 /* for "save place" FSTNs, count the relevant INTR
122 * bandwidth from the previous frame
124 if (q->fstn->hw_prev != EHCI_LIST_END) {
125 dbg ("not counting FSTN bandwidth yet ...");
127 q = &q->fstn->fstn_next;
130 usecs += q->itd->usecs [uframe];
131 q = &q->itd->itd_next;
133 #ifdef have_split_iso
135 temp = q->sitd->hw_fullspeed_ep &
136 __constant_cpu_to_le32 (1 << 31);
138 // FIXME: this doesn't count data bytes right...
140 /* is it in the S-mask? (count SPLIT, DATA) */
141 if (q->sitd->hw_uframe & cpu_to_le32 (1 << uframe)) {
143 usecs += HS_USECS (188);
145 usecs += HS_USECS (1);
148 /* ... C-mask? (count CSPLIT, DATA) */
149 if (q->sitd->hw_uframe &
150 cpu_to_le32 (1 << (8 + uframe))) {
152 usecs += HS_USECS (0);
154 usecs += HS_USECS (188);
156 q = &q->sitd->sitd_next;
158 #endif /* have_split_iso */
165 err ("overallocated uframe %d, periodic is %d usecs",
166 frame * 8 + uframe, usecs);
171 /*-------------------------------------------------------------------------*/
173 static int enable_periodic (struct ehci_hcd *ehci)
178 /* did clearing PSE did take effect yet?
179 * takes effect only at frame boundaries...
181 status = handshake (&ehci->regs->status, STS_PSS, 0, 9 * 125);
183 ehci->hcd.state = USB_STATE_HALT;
187 cmd = readl (&ehci->regs->command) | CMD_PSE;
188 writel (cmd, &ehci->regs->command);
189 /* posted write ... PSS happens later */
190 ehci->hcd.state = USB_STATE_RUNNING;
192 /* make sure ehci_work scans these */
193 ehci->next_uframe = readl (&ehci->regs->frame_index)
194 % (ehci->periodic_size << 3);
198 static int disable_periodic (struct ehci_hcd *ehci)
203 /* did setting PSE not take effect yet?
204 * takes effect only at frame boundaries...
206 status = handshake (&ehci->regs->status, STS_PSS, STS_PSS, 9 * 125);
208 ehci->hcd.state = USB_STATE_HALT;
212 cmd = readl (&ehci->regs->command) & ~CMD_PSE;
213 writel (cmd, &ehci->regs->command);
214 /* posted write ... */
216 ehci->next_uframe = -1;
220 /*-------------------------------------------------------------------------*/
222 // FIXME microframe periods not yet handled
224 static void intr_deschedule (
225 struct ehci_hcd *ehci,
230 unsigned frame = qh->start;
233 periodic_unlink (ehci, frame, qh);
236 } while (frame < ehci->periodic_size);
238 qh->qh_state = QH_STATE_UNLINK;
240 ehci->periodic_sched--;
242 /* maybe turn off periodic schedule */
243 if (!ehci->periodic_sched)
244 status = disable_periodic (ehci);
247 vdbg ("periodic schedule still enabled");
251 * If the hc may be looking at this qh, then delay a uframe
252 * (yeech!) to be sure it's done.
253 * No other threads may be mucking with this qh.
255 if (((ehci_get_frame (&ehci->hcd) - frame) % qh->period) == 0) {
258 qh->hw_next = EHCI_LIST_END;
260 /* we may not be IDLE yet, but if the qh is empty
261 * the race is very short. then if qh also isn't
262 * rescheduled soon, it won't matter. otherwise...
264 vdbg ("intr_deschedule...");
267 qh->hw_next = EHCI_LIST_END;
269 qh->qh_state = QH_STATE_IDLE;
271 /* update per-qh bandwidth utilization (for usbfs) */
272 hcd_to_bus (&ehci->hcd)->bandwidth_allocated -=
273 (qh->usecs + qh->c_usecs) / qh->period;
275 dbg ("descheduled qh %p, period = %d frame = %d count = %d, urbs = %d",
276 qh, qh->period, frame,
277 atomic_read (&qh->refcount), ehci->periodic_sched);
280 static int check_period (
281 struct ehci_hcd *ehci,
287 /* complete split running into next frame?
288 * given FSTN support, we could sometimes check...
294 * 80% periodic == 100 usec/uframe available
295 * convert "usecs we need" to "max already claimed"
302 // FIXME delete when intr_submit handles non-empty queues
303 // this gives us a one intr/frame limit (vs N/uframe)
304 // ... and also lets us avoid tracking split transactions
305 // that might collide at a given TT/hub.
306 if (ehci->pshadow [frame].ptr)
309 claimed = periodic_usecs (ehci, frame, uframe);
313 // FIXME update to handle sub-frame periods
314 } while ((frame += period) < ehci->periodic_size);
320 static int check_intr_schedule (
321 struct ehci_hcd *ehci,
324 const struct ehci_qh *qh,
328 int retval = -ENOSPC;
330 if (!check_period (ehci, frame, uframe, qh->period, qh->usecs))
334 *c_maskp = cpu_to_le32 (0);
338 /* This is a split transaction; check the bandwidth available for
339 * the completion too. Check both worst and best case gaps: worst
340 * case is SPLIT near uframe end, and CSPLIT near start ... best is
341 * vice versa. Difference can be almost two uframe times, but we
342 * reserve unnecessary bandwidth (waste it) this way. (Actually
343 * even better cases exist, like immediate device NAK.)
345 * FIXME don't even bother unless we know this TT is idle in that
346 * range of uframes ... for now, check_period() allows only one
347 * interrupt transfer per frame, so needn't check "TT busy" status
348 * when scheduling a split (QH, SITD, or FSTN).
350 * FIXME ehci 0.96 and above can use FSTNs
352 if (!check_period (ehci, frame, uframe + qh->gap_uf + 1,
353 qh->period, qh->c_usecs))
355 if (!check_period (ehci, frame, uframe + qh->gap_uf,
356 qh->period, qh->c_usecs))
359 *c_maskp = cpu_to_le32 (0x03 << (8 + uframe + qh->gap_uf));
365 static int qh_schedule (struct ehci_hcd *ehci, struct ehci_qh *qh)
370 unsigned frame; /* 0..(qh->period - 1), or NO_FRAME */
372 qh->hw_next = EHCI_LIST_END;
375 /* reuse the previous schedule slots, if we can */
376 if (frame < qh->period) {
377 uframe = ffs (le32_to_cpup (&qh->hw_info2) & 0x00ff);
378 status = check_intr_schedule (ehci, frame, --uframe,
386 /* else scan the schedule to find a group of slots such that all
387 * uframes have enough periodic bandwidth available.
390 frame = qh->period - 1;
392 for (uframe = 0; uframe < 8; uframe++) {
393 status = check_intr_schedule (ehci,
399 } while (status && frame--);
404 /* reset S-frame and (maybe) C-frame masks */
405 qh->hw_info2 &= ~0xffff;
406 qh->hw_info2 |= cpu_to_le32 (1 << uframe) | c_mask;
408 dbg ("reused previous qh %p schedule", qh);
410 /* stuff into the periodic schedule */
411 qh->qh_state = QH_STATE_LINKED;
412 dbg ("scheduled qh %p usecs %d/%d period %d.0 starting %d.%d (gap %d)",
413 qh, qh->usecs, qh->c_usecs,
414 qh->period, frame, uframe, qh->gap_uf);
416 if (unlikely (ehci->pshadow [frame].ptr != 0)) {
418 // FIXME -- just link toward the end, before any qh with a shorter period,
419 // AND accommodate it already having been linked here (after some other qh)
420 // AS WELL AS updating the schedule checking logic
424 ehci->pshadow [frame].qh = qh_get (qh);
425 ehci->periodic [frame] =
426 QH_NEXT (qh->qh_dma);
430 } while (frame < ehci->periodic_size);
432 /* update per-qh bandwidth for usbfs */
433 hcd_to_bus (&ehci->hcd)->bandwidth_allocated +=
434 (qh->usecs + qh->c_usecs) / qh->period;
436 /* maybe enable periodic schedule processing */
437 if (!ehci->periodic_sched++)
438 status = enable_periodic (ehci);
443 static int intr_submit (
444 struct ehci_hcd *ehci,
446 struct list_head *qtd_list,
455 struct list_head empty;
457 /* get endpoint and transfer/schedule data */
458 epnum = usb_pipeendpoint (urb->pipe);
459 is_input = usb_pipein (urb->pipe);
463 spin_lock_irqsave (&ehci->lock, flags);
464 dev = (struct hcd_dev *)urb->dev->hcpriv;
466 /* get qh and force any scheduling errors */
467 INIT_LIST_HEAD (&empty);
468 qh = qh_append_tds (ehci, urb, &empty, epnum, &dev->ep [epnum]);
473 if (qh->qh_state == QH_STATE_IDLE) {
474 if ((status = qh_schedule (ehci, qh)) != 0)
478 /* then queue the urb's tds to the qh */
479 qh = qh_append_tds (ehci, urb, qtd_list, epnum, &dev->ep [epnum]);
482 /* ... update usbfs periodic stats */
483 hcd_to_bus (&ehci->hcd)->bandwidth_int_reqs++;
486 spin_unlock_irqrestore (&ehci->lock, flags);
488 qtd_list_free (ehci, urb, qtd_list);
495 struct ehci_hcd *ehci,
502 /* nothing to report? */
503 if (likely ((qh->hw_token & __constant_cpu_to_le32 (QTD_STS_ACTIVE))
506 if (unlikely (list_empty (&qh->qtd_list))) {
507 dbg ("intr qh %p no TDs?", qh);
511 /* handle any completions */
512 count = qh_completions (ehci, qh, regs);
514 if (unlikely (list_empty (&qh->qtd_list)))
515 intr_deschedule (ehci, qh, 0);
520 /*-------------------------------------------------------------------------*/
522 static inline struct ehci_iso_stream *
523 iso_stream_alloc (int mem_flags)
525 struct ehci_iso_stream *stream;
527 stream = kmalloc(sizeof *stream, mem_flags);
528 if (likely (stream != 0)) {
529 memset (stream, 0, sizeof(*stream));
530 INIT_LIST_HEAD(&stream->itd_list);
531 INIT_LIST_HEAD(&stream->free_itd_list);
532 stream->next_uframe = -1;
533 stream->refcount = 1;
540 struct ehci_iso_stream *stream,
541 struct usb_device *dev,
547 unsigned epnum, maxp, multi;
552 * this might be a "high bandwidth" highspeed endpoint,
553 * as encoded in the ep descriptor's wMaxPacket field
555 epnum = usb_pipeendpoint (pipe);
556 is_input = usb_pipein (pipe) ? USB_DIR_IN : 0;
558 maxp = dev->epmaxpacketin [epnum];
561 maxp = dev->epmaxpacketout [epnum];
565 multi = hb_mult(maxp);
566 maxp = max_packet(maxp);
570 stream->dev = (struct hcd_dev *)dev->hcpriv;
572 stream->bEndpointAddress = is_input | epnum;
573 stream->interval = interval;
576 stream->buf0 = cpu_to_le32 ((epnum << 8) | dev->devnum);
577 stream->buf1 = cpu_to_le32 (buf1);
578 stream->buf2 = cpu_to_le32 (multi);
580 /* usbfs wants to report the average usecs per frame tied up
581 * when transfers on this endpoint are scheduled ...
583 stream->usecs = HS_USECS_ISO (maxp);
584 bandwidth = stream->usecs * 8;
585 bandwidth /= 1 << (interval - 1);
586 stream->bandwidth = bandwidth;
590 iso_stream_put(struct ehci_hcd *ehci, struct ehci_iso_stream *stream)
594 /* free whenever just a dev->ep reference remains.
595 * not like a QH -- no persistent state (toggle, halt)
597 if (stream->refcount == 1) {
600 // BUG_ON (!list_empty(&stream->itd_list));
602 while (!list_empty (&stream->free_itd_list)) {
603 struct ehci_itd *itd;
605 itd = list_entry (stream->free_itd_list.next,
606 struct ehci_itd, itd_list);
607 list_del (&itd->itd_list);
608 pci_pool_free (ehci->itd_pool, itd, itd->itd_dma);
611 is_in = (stream->bEndpointAddress & USB_DIR_IN) ? 0x10 : 0;
612 stream->bEndpointAddress &= 0x0f;
613 stream->dev->ep [is_in + stream->bEndpointAddress] = 0;
615 if (stream->rescheduled) {
616 ehci_info (ehci, "ep%d%s-iso rescheduled "
617 "%lu times in %lu seconds\n",
618 stream->bEndpointAddress, is_in ? "in" : "out",
620 ((jiffies - stream->start)/HZ)
628 static inline struct ehci_iso_stream *
629 iso_stream_get (struct ehci_iso_stream *stream)
631 if (likely (stream != 0))
636 static struct ehci_iso_stream *
637 iso_stream_find (struct ehci_hcd *ehci, struct urb *urb)
641 struct ehci_iso_stream *stream;
644 epnum = usb_pipeendpoint (urb->pipe);
645 if (usb_pipein(urb->pipe))
648 spin_lock_irqsave (&ehci->lock, flags);
650 dev = (struct hcd_dev *)urb->dev->hcpriv;
651 stream = dev->ep [epnum];
653 if (unlikely (stream == 0)) {
654 stream = iso_stream_alloc(GFP_ATOMIC);
655 if (likely (stream != 0)) {
656 /* dev->ep owns the initial refcount */
657 dev->ep[epnum] = stream;
658 iso_stream_init(stream, urb->dev, urb->pipe,
662 /* if dev->ep [epnum] is a QH, info1.maxpacket is nonzero */
663 } else if (unlikely (stream->hw_info1 != 0)) {
664 ehci_dbg (ehci, "dev %s ep%d%s, not iso??\n",
665 urb->dev->devpath, epnum & 0x0f,
666 (epnum & 0x10) ? "in" : "out");
670 /* caller guarantees an eventual matching iso_stream_put */
671 stream = iso_stream_get (stream);
673 spin_unlock_irqrestore (&ehci->lock, flags);
677 /*-------------------------------------------------------------------------*/
679 static inline struct ehci_itd_sched *
680 itd_sched_alloc (unsigned packets, int mem_flags)
682 struct ehci_itd_sched *itd_sched;
683 int size = sizeof *itd_sched;
685 size += packets * sizeof (struct ehci_iso_uframe);
686 itd_sched = kmalloc (size, mem_flags);
687 if (likely (itd_sched != 0)) {
688 memset(itd_sched, 0, size);
689 INIT_LIST_HEAD (&itd_sched->itd_list);
696 struct ehci_itd_sched *itd_sched,
697 struct ehci_iso_stream *stream,
702 dma_addr_t dma = urb->transfer_dma;
704 /* how many uframes are needed for these transfers */
705 itd_sched->span = urb->number_of_packets * stream->interval;
707 /* figure out per-uframe itd fields that we'll need later
708 * when we fit new itds into the schedule.
710 for (i = 0; i < urb->number_of_packets; i++) {
711 struct ehci_iso_uframe *uframe = &itd_sched->packet [i];
716 length = urb->iso_frame_desc [i].length;
717 buf = dma + urb->iso_frame_desc [i].offset;
719 trans = EHCI_ISOC_ACTIVE;
720 trans |= buf & 0x0fff;
721 if (unlikely ((i + 1) == urb->number_of_packets))
722 trans |= EHCI_ITD_IOC;
723 trans |= length << 16;
724 uframe->transaction = cpu_to_le32 (trans);
726 /* might need to cross a buffer page within a td */
727 uframe->bufp = (buf & ~(u64)0x0fff);
729 if (unlikely ((uframe->bufp != (buf & ~(u64)0x0fff))))
737 struct ehci_iso_stream *stream,
738 struct ehci_itd_sched *itd_sched
741 list_splice (&itd_sched->itd_list, &stream->free_itd_list);
746 itd_urb_transaction (
747 struct ehci_iso_stream *stream,
748 struct ehci_hcd *ehci,
753 struct ehci_itd *itd;
758 struct ehci_itd_sched *itd_sched;
760 itd_sched = itd_sched_alloc (urb->number_of_packets, mem_flags);
761 if (unlikely (itd_sched == 0))
764 status = itd_sched_init (itd_sched, stream, urb);
765 if (unlikely (status != 0)) {
766 itd_sched_free (stream, itd_sched);
770 if (urb->interval < 8)
771 num_itds = 1 + (itd_sched->span + 7) / 8;
773 num_itds = urb->number_of_packets;
775 /* allocate/init ITDs */
776 for (i = 0; i < num_itds; i++) {
778 /* free_itd_list.next might be cache-hot ... but maybe
779 * the HC caches it too. avoid that issue for now.
782 /* prefer previously-allocated itds */
783 if (likely (!list_empty(&stream->free_itd_list))) {
784 itd = list_entry (stream->free_itd_list.prev,
785 struct ehci_itd, itd_list);
786 list_del (&itd->itd_list);
787 itd_dma = itd->itd_dma;
789 itd = pci_pool_alloc (ehci->itd_pool, mem_flags,
792 if (unlikely (0 == itd)) {
793 itd_sched_free (stream, itd_sched);
796 memset (itd, 0, sizeof *itd);
797 itd->itd_dma = itd_dma;
798 list_add (&itd->itd_list, &itd_sched->itd_list);
801 /* temporarily store schedule info in hcpriv */
802 urb->hcpriv = itd_sched;
803 urb->error_count = 0;
808 * This scheduler plans almost as far into the future as it has actual
809 * periodic schedule slots. (Affected by TUNE_FLS, which defaults to
810 * "as small as possible" to be cache-friendlier.) That limits the size
811 * transfers you can stream reliably; avoid more than 64 msec per urb.
812 * Also avoid queue depths of less than the system's worst irq latency.
815 #define SCHEDULE_SLOP 10 /* frames */
818 itd_stream_schedule (
819 struct ehci_hcd *ehci,
821 struct ehci_iso_stream *stream
824 u32 now, start, end, max;
826 unsigned mod = ehci->periodic_size << 3;
827 struct ehci_itd_sched *itd_sched = urb->hcpriv;
829 if (unlikely (itd_sched->span > (mod - 8 * SCHEDULE_SLOP))) {
830 ehci_dbg (ehci, "iso request %p too long\n", urb);
835 now = readl (&ehci->regs->frame_index) % mod;
837 /* when's the last uframe this urb could start? */
839 max -= itd_sched->span;
840 max -= 8 * SCHEDULE_SLOP;
842 /* typical case: reuse current schedule. stream is still active,
843 * and no gaps from host falling behind (irq delays etc)
845 if (likely (!list_empty (&stream->itd_list))) {
847 start = stream->next_uframe;
850 if (likely (start < max))
854 * (a) we missed some uframes ... can reschedule
855 * (b) trying to overcommit the schedule
856 * FIXME (b) should be a hard failure
860 /* need to schedule; when's the next (u)frame we could start?
861 * this is bigger than ehci->i_thresh allows; scheduling itself
862 * isn't free, the slop should handle reasonably slow cpus. it
863 * can also help high bandwidth if the dma and irq loads don't
864 * jump until after the queue is primed.
866 start = SCHEDULE_SLOP * 8 + (now & ~0x07);
869 ehci_vdbg (ehci, "%s schedule from %d (%d..%d), was %d\n",
870 __FUNCTION__, now, start, max,
871 stream->next_uframe);
873 /* NOTE: assumes URB_ISO_ASAP, to limit complexity/bugs */
875 if (likely (max > (start + urb->interval)))
876 max = start + urb->interval;
878 /* hack: account for itds already scheduled to this endpoint */
879 if (unlikely (list_empty (&stream->itd_list)))
882 /* within [start..max] find a uframe slot with enough bandwidth */
886 int enough_space = 1;
888 /* check schedule: enough space? */
893 /* can't commit more than 80% periodic == 100 usec */
894 if (periodic_usecs (ehci, uframe >> 3, uframe & 0x7)
895 > (100 - stream->usecs)) {
900 /* we know urb->interval is 2^N uframes */
901 uframe += urb->interval;
902 } while (uframe != end);
904 /* (re)schedule it here if there's enough bandwidth */
907 if (unlikely (!list_empty (&stream->itd_list))) {
908 /* host fell behind ... maybe irq latencies
909 * delayed this request queue for too long.
911 stream->rescheduled++;
912 dev_dbg (&urb->dev->dev,
913 "iso%d%s %d.%d skip %d.%d\n",
914 stream->bEndpointAddress & 0x0f,
915 (stream->bEndpointAddress & USB_DIR_IN)
917 stream->next_uframe >> 3,
918 stream->next_uframe & 0x7,
919 start >> 3, start & 0x7);
921 stream->next_uframe = start;
925 } while (++start < max);
927 /* no room in the schedule */
928 ehci_dbg (ehci, "iso %ssched full %p (now %d end %d max %d)\n",
929 list_empty (&stream->itd_list) ? "" : "re",
934 itd_sched_free (stream, itd_sched);
939 urb->start_frame = stream->next_uframe;
943 /*-------------------------------------------------------------------------*/
946 itd_init (struct ehci_iso_stream *stream, struct ehci_itd *itd)
950 itd->hw_next = EHCI_LIST_END;
951 itd->hw_bufp [0] = stream->buf0;
952 itd->hw_bufp [1] = stream->buf1;
953 itd->hw_bufp [2] = stream->buf2;
955 for (i = 0; i < 8; i++)
958 /* All other fields are filled when scheduling */
963 struct ehci_itd *itd,
964 struct ehci_itd_sched *itd_sched,
970 struct ehci_iso_uframe *uf = &itd_sched->packet [index];
971 unsigned pg = itd->pg;
973 // BUG_ON (pg == 6 && uf->cross);
976 itd->index [uframe] = index;
978 itd->hw_transaction [uframe] = uf->transaction;
979 itd->hw_transaction [uframe] |= cpu_to_le32 (pg << 12);
980 itd->hw_bufp [pg] |= cpu_to_le32 (uf->bufp & ~(u32)0);
981 itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(uf->bufp >> 32));
983 /* iso_frame_desc[].offset must be strictly increasing */
984 if (unlikely (!first && uf->cross)) {
985 u64 bufp = uf->bufp + 4096;
987 itd->hw_bufp [pg] |= cpu_to_le32 (bufp & ~(u32)0);
988 itd->hw_bufp_hi [pg] |= cpu_to_le32 ((u32)(bufp >> 32));
993 itd_link (struct ehci_hcd *ehci, unsigned frame, struct ehci_itd *itd)
995 /* always prepend ITD/SITD ... only QH tree is order-sensitive */
996 itd->itd_next = ehci->pshadow [frame];
997 itd->hw_next = ehci->periodic [frame];
998 ehci->pshadow [frame].itd = itd;
1001 ehci->periodic [frame] = cpu_to_le32 (itd->itd_dma) | Q_TYPE_ITD;
1004 /* fit urb's itds into the selected schedule slot; activate as needed */
1007 struct ehci_hcd *ehci,
1010 struct ehci_iso_stream *stream
1013 int packet, first = 1;
1014 unsigned next_uframe, uframe, frame;
1015 struct ehci_itd_sched *itd_sched = urb->hcpriv;
1016 struct ehci_itd *itd;
1018 next_uframe = stream->next_uframe % mod;
1020 if (unlikely (list_empty(&stream->itd_list))) {
1021 hcd_to_bus (&ehci->hcd)->bandwidth_allocated
1022 += stream->bandwidth;
1024 "schedule devp %s ep%d%s-iso period %d start %d.%d\n",
1025 urb->dev->devpath, stream->bEndpointAddress & 0x0f,
1026 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out",
1028 next_uframe >> 3, next_uframe & 0x7);
1029 stream->start = jiffies;
1031 hcd_to_bus (&ehci->hcd)->bandwidth_isoc_reqs++;
1033 /* fill iTDs uframe by uframe */
1034 for (packet = 0, itd = 0; packet < urb->number_of_packets; ) {
1036 /* ASSERT: we have all necessary itds */
1037 // BUG_ON (list_empty (&itd_sched->itd_list));
1039 /* ASSERT: no itds for this endpoint in this uframe */
1041 itd = list_entry (itd_sched->itd_list.next,
1042 struct ehci_itd, itd_list);
1043 list_move_tail (&itd->itd_list, &stream->itd_list);
1044 itd->stream = iso_stream_get (stream);
1045 itd->urb = usb_get_urb (urb);
1047 itd_init (stream, itd);
1050 uframe = next_uframe & 0x07;
1051 frame = next_uframe >> 3;
1053 itd->usecs [uframe] = stream->usecs;
1054 itd_patch (itd, itd_sched, packet, uframe, first);
1057 next_uframe += stream->interval;
1061 /* link completed itds into the schedule */
1062 if (((next_uframe >> 3) != frame)
1063 || packet == urb->number_of_packets) {
1064 itd_link (ehci, frame % ehci->periodic_size, itd);
1068 stream->next_uframe = next_uframe;
1070 /* don't need that schedule data any more */
1071 itd_sched_free (stream, itd_sched);
1074 if (unlikely (!ehci->periodic_sched++))
1075 return enable_periodic (ehci);
1079 #define ISO_ERRS (EHCI_ISOC_BUF_ERR | EHCI_ISOC_BABBLE | EHCI_ISOC_XACTERR)
1083 struct ehci_hcd *ehci,
1084 struct ehci_itd *itd,
1085 struct pt_regs *regs
1087 struct urb *urb = itd->urb;
1088 struct usb_iso_packet_descriptor *desc;
1092 struct ehci_iso_stream *stream = itd->stream;
1093 struct usb_device *dev;
1095 /* for each uframe with a packet */
1096 for (uframe = 0; uframe < 8; uframe++) {
1097 if (likely (itd->index[uframe] == -1))
1099 urb_index = itd->index[uframe];
1100 desc = &urb->iso_frame_desc [urb_index];
1102 t = le32_to_cpup (&itd->hw_transaction [uframe]);
1103 itd->hw_transaction [uframe] = 0;
1105 /* report transfer status */
1106 if (unlikely (t & ISO_ERRS)) {
1108 if (t & EHCI_ISOC_BUF_ERR)
1109 desc->status = usb_pipein (urb->pipe)
1110 ? -ENOSR /* hc couldn't read */
1111 : -ECOMM; /* hc couldn't write */
1112 else if (t & EHCI_ISOC_BABBLE)
1113 desc->status = -EOVERFLOW;
1114 else /* (t & EHCI_ISOC_XACTERR) */
1115 desc->status = -EPROTO;
1117 /* HC need not update length with this error */
1118 if (!(t & EHCI_ISOC_BABBLE))
1119 desc->actual_length = EHCI_ITD_LENGTH (t);
1120 } else if (likely ((t & EHCI_ISOC_ACTIVE) == 0)) {
1122 desc->actual_length = EHCI_ITD_LENGTH (t);
1129 list_move (&itd->itd_list, &stream->free_itd_list);
1130 iso_stream_put (ehci, stream);
1132 /* handle completion now? */
1133 if (likely ((urb_index + 1) != urb->number_of_packets))
1136 /* ASSERT: it's really the last itd for this urb
1137 list_for_each_entry (itd, &stream->itd_list, itd_list)
1138 BUG_ON (itd->urb == urb);
1141 /* give urb back to the driver ... can be out-of-order */
1142 dev = usb_get_dev (urb->dev);
1143 ehci_urb_done (ehci, urb, regs);
1146 /* defer stopping schedule; completion can submit */
1147 ehci->periodic_sched--;
1148 if (unlikely (!ehci->periodic_sched))
1149 (void) disable_periodic (ehci);
1150 hcd_to_bus (&ehci->hcd)->bandwidth_isoc_reqs--;
1152 if (unlikely (list_empty (&stream->itd_list))) {
1153 hcd_to_bus (&ehci->hcd)->bandwidth_allocated
1154 -= stream->bandwidth;
1156 "deschedule devp %s ep%d%s-iso\n",
1157 dev->devpath, stream->bEndpointAddress & 0x0f,
1158 (stream->bEndpointAddress & USB_DIR_IN) ? "in" : "out");
1160 iso_stream_put (ehci, stream);
1166 /*-------------------------------------------------------------------------*/
1168 static int itd_submit (struct ehci_hcd *ehci, struct urb *urb, int mem_flags)
1170 int status = -EINVAL;
1171 unsigned long flags;
1172 struct ehci_iso_stream *stream;
1174 /* Get iso_stream head */
1175 stream = iso_stream_find (ehci, urb);
1176 if (unlikely (stream == 0)) {
1177 ehci_dbg (ehci, "can't get iso stream\n");
1180 if (unlikely (urb->interval != stream->interval)) {
1181 ehci_dbg (ehci, "can't change iso interval %d --> %d\n",
1182 stream->interval, urb->interval);
1186 #ifdef EHCI_URB_TRACE
1188 "%s %s urb %p ep%d%s len %d, %d pkts %d uframes [%p]\n",
1189 __FUNCTION__, urb->dev->devpath, urb,
1190 usb_pipeendpoint (urb->pipe),
1191 usb_pipein (urb->pipe) ? "in" : "out",
1192 urb->transfer_buffer_length,
1193 urb->number_of_packets, urb->interval,
1197 /* allocate ITDs w/o locking anything */
1198 status = itd_urb_transaction (stream, ehci, urb, mem_flags);
1199 if (unlikely (status < 0)) {
1200 ehci_dbg (ehci, "can't init itds\n");
1204 /* schedule ... need to lock */
1205 spin_lock_irqsave (&ehci->lock, flags);
1206 status = itd_stream_schedule (ehci, urb, stream);
1207 if (likely (status == 0))
1208 itd_link_urb (ehci, urb, ehci->periodic_size << 3, stream);
1209 spin_unlock_irqrestore (&ehci->lock, flags);
1212 if (unlikely (status < 0))
1213 iso_stream_put (ehci, stream);
1217 #ifdef have_split_iso
1219 /*-------------------------------------------------------------------------*/
1222 * "Split ISO TDs" ... used for USB 1.1 devices going through
1223 * the TTs in USB 2.0 hubs.
1225 * FIXME not yet implemented
1228 #endif /* have_split_iso */
1230 /*-------------------------------------------------------------------------*/
1233 scan_periodic (struct ehci_hcd *ehci, struct pt_regs *regs)
1235 unsigned frame, clock, now_uframe, mod;
1238 mod = ehci->periodic_size << 3;
1241 * When running, scan from last scan point up to "now"
1242 * else clean up by scanning everything that's left.
1243 * Touches as few pages as possible: cache-friendly.
1245 now_uframe = ehci->next_uframe;
1246 if (HCD_IS_RUNNING (ehci->hcd.state))
1247 clock = readl (&ehci->regs->frame_index) % mod;
1249 clock = now_uframe + mod - 1;
1252 union ehci_shadow q, *q_p;
1256 frame = now_uframe >> 3;
1258 /* scan schedule to _before_ current frame index */
1259 if ((frame == (clock >> 3))
1260 && HCD_IS_RUNNING (ehci->hcd.state))
1261 uframes = now_uframe & 0x07;
1265 q_p = &ehci->pshadow [frame];
1266 hw_p = &ehci->periodic [frame];
1268 type = Q_NEXT_TYPE (*hw_p);
1270 /* scan each element in frame's queue for completions */
1271 while (q.ptr != 0) {
1274 union ehci_shadow temp;
1278 last = (q.qh->hw_next == EHCI_LIST_END);
1279 temp = q.qh->qh_next;
1280 type = Q_NEXT_TYPE (q.qh->hw_next);
1281 count += intr_complete (ehci, frame,
1282 qh_get (q.qh), regs);
1283 qh_put (ehci, q.qh);
1287 last = (q.fstn->hw_next == EHCI_LIST_END);
1288 /* for "save place" FSTNs, look at QH entries
1289 * in the previous frame for completions.
1291 if (q.fstn->hw_prev != EHCI_LIST_END) {
1292 dbg ("ignoring completions from FSTNs");
1294 type = Q_NEXT_TYPE (q.fstn->hw_next);
1295 q = q.fstn->fstn_next;
1298 last = (q.itd->hw_next == EHCI_LIST_END);
1300 /* skip itds for later in the frame */
1302 for (uf = uframes; uf < 8; uf++) {
1303 if (0 == (q.itd->hw_transaction [uf]
1306 q_p = &q.itd->itd_next;
1307 hw_p = &q.itd->hw_next;
1308 type = Q_NEXT_TYPE (q.itd->hw_next);
1315 /* this one's ready ... HC won't cache the
1316 * pointer for much longer, if at all.
1318 *q_p = q.itd->itd_next;
1319 *hw_p = q.itd->hw_next;
1322 /* always rescan here; simpler */
1323 count += itd_complete (ehci, q.itd, regs);
1325 #ifdef have_split_iso
1327 last = (q.sitd->hw_next == EHCI_LIST_END);
1328 sitd_complete (ehci, q.sitd);
1329 type = Q_NEXT_TYPE (q.sitd->hw_next);
1331 // FIXME unlink SITD after split completes
1332 q = q.sitd->sitd_next;
1334 #endif /* have_split_iso */
1336 dbg ("corrupt type %d frame %d shadow %p",
1337 type, frame, q.ptr);
1343 /* did completion remove an interior q entry? */
1344 if (unlikely (q.ptr == 0 && !last))
1348 /* stop when we catch up to the HC */
1350 // FIXME: this assumes we won't get lapped when
1351 // latencies climb; that should be rare, but...
1352 // detect it, and just go all the way around.
1353 // FLR might help detect this case, so long as latencies
1354 // don't exceed periodic_size msec (default 1.024 sec).
1356 // FIXME: likewise assumes HC doesn't halt mid-scan
1358 if (now_uframe == clock) {
1361 if (!HCD_IS_RUNNING (ehci->hcd.state))
1363 ehci->next_uframe = now_uframe;
1364 now = readl (&ehci->regs->frame_index) % mod;
1365 if (now_uframe == now)
1368 /* rescan the rest of this frame, then ... */
1371 /* FIXME sometimes we can scan the next frame
1372 * right away, not always inching up on it ...