2 drivers/net/tulip/interrupt.c
4 Maintained by Jeff Garzik <jgarzik@pobox.com>
5 Copyright 2000,2001 The Linux Kernel Team
6 Written/copyright 1994-2001 by Donald Becker.
8 This software may be used and distributed according to the terms
9 of the GNU General Public License, incorporated herein by reference.
11 Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
12 for more information on this driver, or visit the project
13 Web page at http://sourceforge.net/projects/tulip/
18 #include <linux/config.h>
19 #include <linux/etherdevice.h>
20 #include <linux/pci.h>
22 int tulip_rx_copybreak;
23 unsigned int tulip_max_interrupt_work;
25 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
27 #define MIT_TABLE 15 /* We use 0 or max */
29 unsigned int mit_table[MIT_SIZE+1] =
31 /* CRS11 21143 hardware Mitigation Control Interrupt
32 We use only RX mitigation we other techniques for
35 31 Cycle Size (timer control)
36 30:27 TX timer in 16 * Cycle size
37 26:24 TX No pkts before Int.
38 23:20 RX timer in Cycle size
39 19:17 RX No pkts before Int.
40 16 Continues Mode (CM)
43 0x0, /* IM disabled */
44 0x80150000, /* RX time = 1, RX pkts = 2, CM = 1 */
58 // 0x80FF0000 /* RX time = 16, RX pkts = 7, CM = 1 */
59 0x80F10000 /* RX time = 16, RX pkts = 0, CM = 1 */
64 int tulip_refill_rx(struct net_device *dev)
66 struct tulip_private *tp = (struct tulip_private *)dev->priv;
70 /* Refill the Rx ring buffers. */
71 for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
72 entry = tp->dirty_rx % RX_RING_SIZE;
73 if (tp->rx_buffers[entry].skb == NULL) {
77 skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
81 mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ,
83 tp->rx_buffers[entry].mapping = mapping;
85 skb->dev = dev; /* Mark as being used by this device. */
86 tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
89 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
91 if(tp->chip_id == LC82C168) {
92 if(((inl(dev->base_addr + CSR5)>>17)&0x07) == 4) {
93 /* Rx stopped due to out of buffers,
96 outl(0x01, dev->base_addr + CSR2);
102 #ifdef CONFIG_TULIP_NAPI
104 void oom_timer(unsigned long data)
106 struct net_device *dev = (struct net_device *)data;
107 netif_rx_schedule(dev);
110 int tulip_poll(struct net_device *dev, int *budget)
112 struct tulip_private *tp = (struct tulip_private *)dev->priv;
113 int entry = tp->cur_rx % RX_RING_SIZE;
114 int rx_work_limit = *budget;
117 if (!netif_running(dev))
120 if (rx_work_limit > dev->quota)
121 rx_work_limit = dev->quota;
123 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
125 /* that one buffer is needed for mit activation; or might be a
126 bug in the ring buffer code; check later -- JHS*/
128 if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
132 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
133 tp->rx_ring[entry].status);
136 /* Acknowledge current RX interrupt sources. */
137 outl((RxIntr | RxNoBuf), dev->base_addr + CSR5);
140 /* If we own the next entry, it is a new packet. Send it up. */
141 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
142 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
145 if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
149 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
150 dev->name, entry, status);
151 if (--rx_work_limit < 0)
154 if ((status & 0x38008300) != 0x0300) {
155 if ((status & 0x38000300) != 0x0300) {
156 /* Ingore earlier buffers. */
157 if ((status & 0xffff) != 0x7fff) {
159 printk(KERN_WARNING "%s: Oversized Ethernet frame "
160 "spanned multiple buffers, status %8.8x!\n",
162 tp->stats.rx_length_errors++;
164 } else if (status & RxDescFatalErr) {
165 /* There was a fatal error. */
167 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
169 tp->stats.rx_errors++; /* end of a packet.*/
170 if (status & 0x0890) tp->stats.rx_length_errors++;
171 if (status & 0x0004) tp->stats.rx_frame_errors++;
172 if (status & 0x0002) tp->stats.rx_crc_errors++;
173 if (status & 0x0001) tp->stats.rx_fifo_errors++;
176 /* Omit the four octet CRC from the length. */
177 short pkt_len = ((status >> 16) & 0x7ff) - 4;
180 #ifndef final_version
181 if (pkt_len > 1518) {
182 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
183 dev->name, pkt_len, pkt_len);
185 tp->stats.rx_length_errors++;
188 /* Check if the packet is long enough to accept without copying
189 to a minimally-sized skbuff. */
190 if (pkt_len < tulip_rx_copybreak
191 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
193 skb_reserve(skb, 2); /* 16 byte align the IP header */
194 pci_dma_sync_single(tp->pdev,
195 tp->rx_buffers[entry].mapping,
196 pkt_len, PCI_DMA_FROMDEVICE);
197 #if ! defined(__alpha__)
198 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
200 skb_put(skb, pkt_len);
202 memcpy(skb_put(skb, pkt_len),
203 tp->rx_buffers[entry].skb->tail,
206 } else { /* Pass up the skb already on the Rx ring. */
207 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
210 #ifndef final_version
211 if (tp->rx_buffers[entry].mapping !=
212 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
213 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
214 "do not match in tulip_rx: %08x vs. %llx %p / %p.\n",
216 le32_to_cpu(tp->rx_ring[entry].buffer1),
217 (unsigned long long)tp->rx_buffers[entry].mapping,
222 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
223 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
225 tp->rx_buffers[entry].skb = NULL;
226 tp->rx_buffers[entry].mapping = 0;
228 skb->protocol = eth_type_trans(skb, dev);
230 netif_receive_skb(skb);
232 dev->last_rx = jiffies;
233 tp->stats.rx_packets++;
234 tp->stats.rx_bytes += pkt_len;
238 entry = (++tp->cur_rx) % RX_RING_SIZE;
239 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
240 tulip_refill_rx(dev);
244 /* New ack strategy... irq does not ack Rx any longer
245 hopefully this helps */
247 /* Really bad things can happen here... If new packet arrives
248 * and an irq arrives (tx or just due to occasionally unset
249 * mask), it will be acked by irq handler, but new thread
250 * is not scheduled. It is major hole in design.
251 * No idea how to fix this if "playing with fire" will fail
252 * tomorrow (night 011029). If it will not fail, we won
253 * finally: amount of IO did not increase at all. */
254 } while ((inl(dev->base_addr + CSR5) & RxIntr));
258 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
260 /* We use this simplistic scheme for IM. It's proven by
261 real life installations. We can have IM enabled
262 continuesly but this would cause unnecessary latency.
263 Unfortunely we can't use all the NET_RX_* feedback here.
264 This would turn on IM for devices that is not contributing
265 to backlog congestion with unnecessary latency.
267 We monitor the the device RX-ring and have:
269 HW Interrupt Mitigation either ON or OFF.
271 ON: More then 1 pkt received (per intr.) OR we are dropping
272 OFF: Only 1 pkt received
274 Note. We only use min and max (0, 15) settings from mit_table */
277 if( tp->flags & HAS_INTR_MITIGATION) {
281 outl(mit_table[MIT_TABLE], dev->base_addr + CSR11);
287 outl(0, dev->base_addr + CSR11);
292 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
294 dev->quota -= received;
297 tulip_refill_rx(dev);
299 /* If RX ring is not full we are out of memory. */
300 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
302 /* Remove us from polling list and enable RX intr. */
304 netif_rx_complete(dev);
305 outl(tulip_tbl[tp->chip_id].valid_intrs, dev->base_addr+CSR7);
307 /* The last op happens after poll completion. Which means the following:
308 * 1. it can race with disabling irqs in irq handler
309 * 2. it can race with dise/enabling irqs in other poll threads
310 * 3. if an irq raised after beginning loop, it will be immediately
313 * Summarizing: the logic results in some redundant irqs both
314 * due to races in masking and due to too late acking of already
315 * processed irqs. But it must not result in losing events.
323 received = dev->quota; /* Not to happen */
325 dev->quota -= received;
328 if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
329 tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
330 tulip_refill_rx(dev);
332 if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
337 oom: /* Executed with RX ints disabled */
340 /* Start timer, stop polling, but do not enable rx interrupts. */
341 mod_timer(&tp->oom_timer, jiffies+1);
343 /* Think: timer_pending() was an explicit signature of bug.
344 * Timer can be pending now but fired and completed
345 * before we did netif_rx_complete(). See? We would lose it. */
347 /* remove ourselves from the polling list */
348 netif_rx_complete(dev);
353 #else /* CONFIG_TULIP_NAPI */
355 static int tulip_rx(struct net_device *dev)
357 struct tulip_private *tp = (struct tulip_private *)dev->priv;
358 int entry = tp->cur_rx % RX_RING_SIZE;
359 int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
363 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
364 tp->rx_ring[entry].status);
365 /* If we own the next entry, it is a new packet. Send it up. */
366 while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
367 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
370 printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
371 dev->name, entry, status);
372 if (--rx_work_limit < 0)
374 if ((status & 0x38008300) != 0x0300) {
375 if ((status & 0x38000300) != 0x0300) {
376 /* Ingore earlier buffers. */
377 if ((status & 0xffff) != 0x7fff) {
379 printk(KERN_WARNING "%s: Oversized Ethernet frame "
380 "spanned multiple buffers, status %8.8x!\n",
382 tp->stats.rx_length_errors++;
384 } else if (status & RxDescFatalErr) {
385 /* There was a fatal error. */
387 printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
389 tp->stats.rx_errors++; /* end of a packet.*/
390 if (status & 0x0890) tp->stats.rx_length_errors++;
391 if (status & 0x0004) tp->stats.rx_frame_errors++;
392 if (status & 0x0002) tp->stats.rx_crc_errors++;
393 if (status & 0x0001) tp->stats.rx_fifo_errors++;
396 /* Omit the four octet CRC from the length. */
397 short pkt_len = ((status >> 16) & 0x7ff) - 4;
400 #ifndef final_version
401 if (pkt_len > 1518) {
402 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
403 dev->name, pkt_len, pkt_len);
405 tp->stats.rx_length_errors++;
409 /* Check if the packet is long enough to accept without copying
410 to a minimally-sized skbuff. */
411 if (pkt_len < tulip_rx_copybreak
412 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
414 skb_reserve(skb, 2); /* 16 byte align the IP header */
415 pci_dma_sync_single(tp->pdev,
416 tp->rx_buffers[entry].mapping,
417 pkt_len, PCI_DMA_FROMDEVICE);
418 #if ! defined(__alpha__)
419 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
421 skb_put(skb, pkt_len);
423 memcpy(skb_put(skb, pkt_len),
424 tp->rx_buffers[entry].skb->tail,
427 } else { /* Pass up the skb already on the Rx ring. */
428 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
431 #ifndef final_version
432 if (tp->rx_buffers[entry].mapping !=
433 le32_to_cpu(tp->rx_ring[entry].buffer1)) {
434 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
435 "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
437 le32_to_cpu(tp->rx_ring[entry].buffer1),
438 (long long)tp->rx_buffers[entry].mapping,
443 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
444 PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
446 tp->rx_buffers[entry].skb = NULL;
447 tp->rx_buffers[entry].mapping = 0;
449 skb->protocol = eth_type_trans(skb, dev);
453 dev->last_rx = jiffies;
454 tp->stats.rx_packets++;
455 tp->stats.rx_bytes += pkt_len;
458 entry = (++tp->cur_rx) % RX_RING_SIZE;
462 #endif /* CONFIG_TULIP_NAPI */
464 static inline unsigned int phy_interrupt (struct net_device *dev)
467 int csr12 = inl(dev->base_addr + CSR12) & 0xff;
468 struct tulip_private *tp = (struct tulip_private *)dev->priv;
470 if (csr12 != tp->csr12_shadow) {
472 outl(csr12 | 0x02, dev->base_addr + CSR12);
473 tp->csr12_shadow = csr12;
474 /* do link change stuff */
475 spin_lock(&tp->lock);
476 tulip_check_duplex(dev);
477 spin_unlock(&tp->lock);
478 /* clear irq ack bit */
479 outl(csr12 & ~0x02, dev->base_addr + CSR12);
488 /* The interrupt handler does all of the Rx thread work and cleans up
489 after the Tx thread. */
490 irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
492 struct net_device *dev = (struct net_device *)dev_instance;
493 struct tulip_private *tp = (struct tulip_private *)dev->priv;
494 long ioaddr = dev->base_addr;
500 int maxrx = RX_RING_SIZE;
501 int maxtx = TX_RING_SIZE;
502 int maxoi = TX_RING_SIZE;
503 #ifdef CONFIG_TULIP_NAPI
508 unsigned int work_count = tulip_max_interrupt_work;
509 unsigned int handled = 0;
511 /* Let's see whether the interrupt really is for us */
512 csr5 = inl(ioaddr + CSR5);
514 if (tp->flags & HAS_PHY_IRQ)
515 handled = phy_interrupt (dev);
517 if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
518 return IRQ_RETVAL(handled);
524 #ifdef CONFIG_TULIP_NAPI
526 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
528 /* Mask RX intrs and add the device to poll list. */
529 outl(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
530 netif_rx_schedule(dev);
532 if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
536 /* Acknowledge the interrupt sources we handle here ASAP
537 the poll function does Rx and RxNoBuf acking */
539 outl(csr5 & 0x0001ff3f, ioaddr + CSR5);
542 /* Acknowledge all of the current interrupt sources ASAP. */
543 outl(csr5 & 0x0001ffff, ioaddr + CSR5);
546 if (csr5 & (RxIntr | RxNoBuf)) {
548 tulip_refill_rx(dev);
551 #endif /* CONFIG_TULIP_NAPI */
554 printk(KERN_DEBUG "%s: interrupt csr5=%#8.8x new csr5=%#8.8x.\n",
555 dev->name, csr5, inl(dev->base_addr + CSR5));
558 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
559 unsigned int dirty_tx;
561 spin_lock(&tp->lock);
563 for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
565 int entry = dirty_tx % TX_RING_SIZE;
566 int status = le32_to_cpu(tp->tx_ring[entry].status);
569 break; /* It still has not been Txed */
571 /* Check for Rx filter setup frames. */
572 if (tp->tx_buffers[entry].skb == NULL) {
573 /* test because dummy frames not mapped */
574 if (tp->tx_buffers[entry].mapping)
575 pci_unmap_single(tp->pdev,
576 tp->tx_buffers[entry].mapping,
577 sizeof(tp->setup_frame),
582 if (status & 0x8000) {
583 /* There was an major error, log it. */
584 #ifndef final_version
586 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
589 tp->stats.tx_errors++;
590 if (status & 0x4104) tp->stats.tx_aborted_errors++;
591 if (status & 0x0C00) tp->stats.tx_carrier_errors++;
592 if (status & 0x0200) tp->stats.tx_window_errors++;
593 if (status & 0x0002) tp->stats.tx_fifo_errors++;
594 if ((status & 0x0080) && tp->full_duplex == 0)
595 tp->stats.tx_heartbeat_errors++;
597 tp->stats.tx_bytes +=
598 tp->tx_buffers[entry].skb->len;
599 tp->stats.collisions += (status >> 3) & 15;
600 tp->stats.tx_packets++;
603 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
604 tp->tx_buffers[entry].skb->len,
607 /* Free the original skb. */
608 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
609 tp->tx_buffers[entry].skb = NULL;
610 tp->tx_buffers[entry].mapping = 0;
614 #ifndef final_version
615 if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
616 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
617 dev->name, dirty_tx, tp->cur_tx);
618 dirty_tx += TX_RING_SIZE;
622 if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
623 netif_wake_queue(dev);
625 tp->dirty_tx = dirty_tx;
628 printk(KERN_WARNING "%s: The transmitter stopped."
629 " CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
630 dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
631 tulip_restart_rxtx(tp);
633 spin_unlock(&tp->lock);
637 if (csr5 & AbnormalIntr) { /* Abnormal error summary bit. */
638 if (csr5 == 0xffffffff)
640 if (csr5 & TxJabber) tp->stats.tx_errors++;
641 if (csr5 & TxFIFOUnderflow) {
642 if ((tp->csr6 & 0xC000) != 0xC000)
643 tp->csr6 += 0x4000; /* Bump up the Tx threshold */
645 tp->csr6 |= 0x00200000; /* Store-n-forward. */
646 /* Restart the transmit process. */
647 tulip_restart_rxtx(tp);
648 outl(0, ioaddr + CSR1);
650 if (csr5 & (RxDied | RxNoBuf)) {
651 if (tp->flags & COMET_MAC_ADDR) {
652 outl(tp->mc_filter[0], ioaddr + 0xAC);
653 outl(tp->mc_filter[1], ioaddr + 0xB0);
656 if (csr5 & RxDied) { /* Missed a Rx frame. */
657 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
658 tp->stats.rx_errors++;
659 tulip_start_rxtx(tp);
662 * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
663 * call is ever done under the spinlock
665 if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
667 (tp->link_change)(dev, csr5);
669 if (csr5 & SytemError) {
670 int error = (csr5 >> 23) & 7;
671 /* oops, we hit a PCI error. The code produced corresponds
676 * Note that on parity error, we should do a software reset
677 * of the chip to get it back into a sane state (according
678 * to the 21142/3 docs that is).
681 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
682 dev->name, tp->nir, error);
684 /* Clear all error sources, included undocumented ones! */
685 outl(0x0800f7ba, ioaddr + CSR5);
688 if (csr5 & TimerInt) {
691 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
693 outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
697 if (tx > maxtx || rx > maxrx || oi > maxoi) {
699 printk(KERN_WARNING "%s: Too much work during an interrupt, "
700 "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
702 /* Acknowledge all interrupt sources. */
703 outl(0x8001ffff, ioaddr + CSR5);
704 if (tp->flags & HAS_INTR_MITIGATION) {
705 /* Josip Loncaric at ICASE did extensive experimentation
706 to develop a good interrupt mitigation setting.*/
707 outl(0x8b240000, ioaddr + CSR11);
708 } else if (tp->chip_id == LC82C168) {
709 /* the LC82C168 doesn't have a hw timer.*/
710 outl(0x00, ioaddr + CSR7);
711 mod_timer(&tp->timer, RUN_AT(HZ/50));
713 /* Mask all interrupting sources, set timer to
715 outl(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
716 outl(0x0012, ioaddr + CSR11);
725 csr5 = inl(ioaddr + CSR5);
727 #ifdef CONFIG_TULIP_NAPI
730 } while ((csr5 & (TxNoBuf |
741 } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
743 tulip_refill_rx(dev);
745 /* check if the card is in suspend mode */
746 entry = tp->dirty_rx % RX_RING_SIZE;
747 if (tp->rx_buffers[entry].skb == NULL) {
749 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
750 if (tp->chip_id == LC82C168) {
751 outl(0x00, ioaddr + CSR7);
752 mod_timer(&tp->timer, RUN_AT(HZ/50));
754 if (tp->ttimer == 0 || (inl(ioaddr + CSR11) & 0xffff) == 0) {
756 printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
757 outl(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
759 outl(TimerInt, ioaddr + CSR5);
760 outl(12, ioaddr + CSR11);
765 #endif /* CONFIG_TULIP_NAPI */
767 if ((missed = inl(ioaddr + CSR8) & 0x1ffff)) {
768 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
772 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
773 dev->name, inl(ioaddr + CSR5));