+- add patches.fixes/linux-post-2.6.3-20040220
[linux-flexiantxendom0-3.2.10.git] / drivers / net / tulip / interrupt.c
1 /*
2         drivers/net/tulip/interrupt.c
3
4         Maintained by Jeff Garzik <jgarzik@pobox.com>
5         Copyright 2000,2001  The Linux Kernel Team
6         Written/copyright 1994-2001 by Donald Becker.
7
8         This software may be used and distributed according to the terms
9         of the GNU General Public License, incorporated herein by reference.
10
11         Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
12         for more information on this driver, or visit the project
13         Web page at http://sourceforge.net/projects/tulip/
14
15 */
16
17 #include "tulip.h"
18 #include <linux/config.h>
19 #include <linux/etherdevice.h>
20 #include <linux/pci.h>
21
22 int tulip_rx_copybreak;
23 unsigned int tulip_max_interrupt_work;
24
25 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
26 #define MIT_SIZE 15
27 #define MIT_TABLE 15 /* We use 0 or max */
28
29 unsigned int mit_table[MIT_SIZE+1] =
30 {
31         /*  CRS11 21143 hardware Mitigation Control Interrupt
32             We use only RX mitigation we other techniques for
33             TX intr. mitigation.
34
35            31    Cycle Size (timer control)
36            30:27 TX timer in 16 * Cycle size
37            26:24 TX No pkts before Int.
38            23:20 RX timer in Cycle size
39            19:17 RX No pkts before Int.
40            16       Continues Mode (CM)
41         */
42
43         0x0,             /* IM disabled */
44         0x80150000,      /* RX time = 1, RX pkts = 2, CM = 1 */
45         0x80150000,
46         0x80270000,
47         0x80370000,
48         0x80490000,
49         0x80590000,
50         0x80690000,
51         0x807B0000,
52         0x808B0000,
53         0x809D0000,
54         0x80AD0000,
55         0x80BD0000,
56         0x80CF0000,
57         0x80DF0000,
58 //       0x80FF0000      /* RX time = 16, RX pkts = 7, CM = 1 */
59         0x80F10000      /* RX time = 16, RX pkts = 0, CM = 1 */
60 };
61 #endif
62
63
64 int tulip_refill_rx(struct net_device *dev)
65 {
66         struct tulip_private *tp = (struct tulip_private *)dev->priv;
67         int entry;
68         int refilled = 0;
69
70         /* Refill the Rx ring buffers. */
71         for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
72                 entry = tp->dirty_rx % RX_RING_SIZE;
73                 if (tp->rx_buffers[entry].skb == NULL) {
74                         struct sk_buff *skb;
75                         dma_addr_t mapping;
76
77                         skb = tp->rx_buffers[entry].skb = dev_alloc_skb(PKT_BUF_SZ);
78                         if (skb == NULL)
79                                 break;
80
81                         mapping = pci_map_single(tp->pdev, skb->tail, PKT_BUF_SZ,
82                                                  PCI_DMA_FROMDEVICE);
83                         tp->rx_buffers[entry].mapping = mapping;
84
85                         skb->dev = dev;                 /* Mark as being used by this device. */
86                         tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
87                         refilled++;
88                 }
89                 tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
90         }
91         if(tp->chip_id == LC82C168) {
92                 if(((inl(dev->base_addr + CSR5)>>17)&0x07) == 4) {
93                         /* Rx stopped due to out of buffers,
94                          * restart it
95                          */
96                         outl(0x01, dev->base_addr + CSR2);
97                 }
98         }
99         return refilled;
100 }
101
102 #ifdef CONFIG_TULIP_NAPI
103
104 void oom_timer(unsigned long data)
105 {
106         struct net_device *dev = (struct net_device *)data;
107         netif_rx_schedule(dev);
108 }
109
110 int tulip_poll(struct net_device *dev, int *budget)
111 {
112         struct tulip_private *tp = (struct tulip_private *)dev->priv;
113         int entry = tp->cur_rx % RX_RING_SIZE;
114         int rx_work_limit = *budget;
115         int received = 0;
116
117         if (!netif_running(dev))
118                 goto done;
119
120         if (rx_work_limit > dev->quota)
121                 rx_work_limit = dev->quota;
122
123 #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
124
125 /* that one buffer is needed for mit activation; or might be a
126    bug in the ring buffer code; check later -- JHS*/
127
128         if (rx_work_limit >=RX_RING_SIZE) rx_work_limit--;
129 #endif
130
131         if (tulip_debug > 4)
132                 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
133                            tp->rx_ring[entry].status);
134
135        do {
136                /* Acknowledge current RX interrupt sources. */
137                outl((RxIntr | RxNoBuf), dev->base_addr + CSR5);
138  
139  
140                /* If we own the next entry, it is a new packet. Send it up. */
141                while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
142                        s32 status = le32_to_cpu(tp->rx_ring[entry].status);
143  
144  
145                        if (tp->dirty_rx + RX_RING_SIZE == tp->cur_rx)
146                                break;
147  
148                        if (tulip_debug > 5)
149                                printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
150                                       dev->name, entry, status);
151                        if (--rx_work_limit < 0)
152                                goto not_done;
153  
154                        if ((status & 0x38008300) != 0x0300) {
155                                if ((status & 0x38000300) != 0x0300) {
156                                 /* Ingore earlier buffers. */
157                                        if ((status & 0xffff) != 0x7fff) {
158                                                if (tulip_debug > 1)
159                                                        printk(KERN_WARNING "%s: Oversized Ethernet frame "
160                                                               "spanned multiple buffers, status %8.8x!\n",
161                                                               dev->name, status);
162                                                tp->stats.rx_length_errors++;
163                                        }
164                                } else if (status & RxDescFatalErr) {
165                                 /* There was a fatal error. */
166                                        if (tulip_debug > 2)
167                                                printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
168                                                       dev->name, status);
169                                        tp->stats.rx_errors++; /* end of a packet.*/
170                                        if (status & 0x0890) tp->stats.rx_length_errors++;
171                                        if (status & 0x0004) tp->stats.rx_frame_errors++;
172                                        if (status & 0x0002) tp->stats.rx_crc_errors++;
173                                        if (status & 0x0001) tp->stats.rx_fifo_errors++;
174                                }
175                        } else {
176                                /* Omit the four octet CRC from the length. */
177                                short pkt_len = ((status >> 16) & 0x7ff) - 4;
178                                struct sk_buff *skb;
179   
180 #ifndef final_version
181                                if (pkt_len > 1518) {
182                                        printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
183                                               dev->name, pkt_len, pkt_len);
184                                        pkt_len = 1518;
185                                        tp->stats.rx_length_errors++;
186                                }
187 #endif
188                                /* Check if the packet is long enough to accept without copying
189                                   to a minimally-sized skbuff. */
190                                if (pkt_len < tulip_rx_copybreak
191                                    && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
192                                        skb->dev = dev;
193                                        skb_reserve(skb, 2);    /* 16 byte align the IP header */
194                                        pci_dma_sync_single(tp->pdev,
195                                                            tp->rx_buffers[entry].mapping,
196                                                            pkt_len, PCI_DMA_FROMDEVICE);
197 #if ! defined(__alpha__)
198                                        eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
199                                                         pkt_len, 0);
200                                        skb_put(skb, pkt_len);
201 #else
202                                        memcpy(skb_put(skb, pkt_len),
203                                               tp->rx_buffers[entry].skb->tail,
204                                               pkt_len);
205 #endif
206                                } else {        /* Pass up the skb already on the Rx ring. */
207                                        char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
208                                                             pkt_len);
209   
210 #ifndef final_version
211                                        if (tp->rx_buffers[entry].mapping !=
212                                            le32_to_cpu(tp->rx_ring[entry].buffer1)) {
213                                                printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
214                                                       "do not match in tulip_rx: %08x vs. %llx %p / %p.\n",
215                                                       dev->name,
216                                                       le32_to_cpu(tp->rx_ring[entry].buffer1),
217                                                       (unsigned long long)tp->rx_buffers[entry].mapping,
218                                                       skb->head, temp);
219                                        }
220 #endif
221   
222                                        pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
223                                                         PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
224   
225                                        tp->rx_buffers[entry].skb = NULL;
226                                        tp->rx_buffers[entry].mapping = 0;
227                                }
228                                skb->protocol = eth_type_trans(skb, dev);
229   
230                                netif_receive_skb(skb);
231  
232                                dev->last_rx = jiffies;
233                                tp->stats.rx_packets++;
234                                tp->stats.rx_bytes += pkt_len;
235                        }
236                        received++;
237
238                        entry = (++tp->cur_rx) % RX_RING_SIZE;
239                        if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/4)
240                                tulip_refill_rx(dev);
241  
242                 }
243  
244                /* New ack strategy... irq does not ack Rx any longer
245                   hopefully this helps */
246  
247                /* Really bad things can happen here... If new packet arrives
248                 * and an irq arrives (tx or just due to occasionally unset
249                 * mask), it will be acked by irq handler, but new thread
250                 * is not scheduled. It is major hole in design.
251                 * No idea how to fix this if "playing with fire" will fail
252                 * tomorrow (night 011029). If it will not fail, we won
253                 * finally: amount of IO did not increase at all. */
254        } while ((inl(dev->base_addr + CSR5) & RxIntr));
255  
256 done:
257  
258  #ifdef CONFIG_TULIP_NAPI_HW_MITIGATION
259   
260           /* We use this simplistic scheme for IM. It's proven by
261              real life installations. We can have IM enabled
262             continuesly but this would cause unnecessary latency. 
263             Unfortunely we can't use all the NET_RX_* feedback here. 
264             This would turn on IM for devices that is not contributing 
265             to backlog congestion with unnecessary latency. 
266   
267              We monitor the the device RX-ring and have:
268   
269              HW Interrupt Mitigation either ON or OFF.
270   
271             ON:  More then 1 pkt received (per intr.) OR we are dropping 
272              OFF: Only 1 pkt received
273             
274              Note. We only use min and max (0, 15) settings from mit_table */
275   
276   
277           if( tp->flags &  HAS_INTR_MITIGATION) {
278                  if( received > 1 ) {
279                          if( ! tp->mit_on ) {
280                                  tp->mit_on = 1;
281                                  outl(mit_table[MIT_TABLE], dev->base_addr + CSR11);
282                          }
283                   }
284                  else {
285                          if( tp->mit_on ) {
286                                  tp->mit_on = 0;
287                                  outl(0, dev->base_addr + CSR11);
288                          }
289                   }
290           }
291
292 #endif /* CONFIG_TULIP_NAPI_HW_MITIGATION */
293  
294          dev->quota -= received;
295          *budget -= received;
296  
297          tulip_refill_rx(dev);
298          
299          /* If RX ring is not full we are out of memory. */
300          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
301  
302          /* Remove us from polling list and enable RX intr. */
303  
304          netif_rx_complete(dev);
305          outl(tulip_tbl[tp->chip_id].valid_intrs, dev->base_addr+CSR7);
306  
307          /* The last op happens after poll completion. Which means the following:
308           * 1. it can race with disabling irqs in irq handler
309           * 2. it can race with dise/enabling irqs in other poll threads
310           * 3. if an irq raised after beginning loop, it will be immediately
311           *    triggered here.
312           *
313           * Summarizing: the logic results in some redundant irqs both
314           * due to races in masking and due to too late acking of already
315           * processed irqs. But it must not result in losing events.
316           */
317  
318          return 0;
319  
320  not_done:
321          if (!received) {
322
323                  received = dev->quota; /* Not to happen */
324          }
325          dev->quota -= received;
326          *budget -= received;
327  
328          if (tp->cur_rx - tp->dirty_rx > RX_RING_SIZE/2 ||
329              tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL)
330                  tulip_refill_rx(dev);
331  
332          if (tp->rx_buffers[tp->dirty_rx % RX_RING_SIZE].skb == NULL) goto oom;
333  
334          return 1;
335  
336  
337  oom:    /* Executed with RX ints disabled */
338  
339          
340          /* Start timer, stop polling, but do not enable rx interrupts. */
341          mod_timer(&tp->oom_timer, jiffies+1);
342        
343          /* Think: timer_pending() was an explicit signature of bug.
344           * Timer can be pending now but fired and completed
345           * before we did netif_rx_complete(). See? We would lose it. */
346  
347          /* remove ourselves from the polling list */
348          netif_rx_complete(dev);
349  
350          return 0;
351 }
352
353 #else /* CONFIG_TULIP_NAPI */
354
355 static int tulip_rx(struct net_device *dev)
356 {
357         struct tulip_private *tp = (struct tulip_private *)dev->priv;
358         int entry = tp->cur_rx % RX_RING_SIZE;
359         int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
360         int received = 0;
361
362         if (tulip_debug > 4)
363                 printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
364                            tp->rx_ring[entry].status);
365         /* If we own the next entry, it is a new packet. Send it up. */
366         while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
367                 s32 status = le32_to_cpu(tp->rx_ring[entry].status);
368
369                 if (tulip_debug > 5)
370                         printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
371                                    dev->name, entry, status);
372                 if (--rx_work_limit < 0)
373                         break;
374                 if ((status & 0x38008300) != 0x0300) {
375                         if ((status & 0x38000300) != 0x0300) {
376                                 /* Ingore earlier buffers. */
377                                 if ((status & 0xffff) != 0x7fff) {
378                                         if (tulip_debug > 1)
379                                                 printk(KERN_WARNING "%s: Oversized Ethernet frame "
380                                                            "spanned multiple buffers, status %8.8x!\n",
381                                                            dev->name, status);
382                                         tp->stats.rx_length_errors++;
383                                 }
384                         } else if (status & RxDescFatalErr) {
385                                 /* There was a fatal error. */
386                                 if (tulip_debug > 2)
387                                         printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
388                                                    dev->name, status);
389                                 tp->stats.rx_errors++; /* end of a packet.*/
390                                 if (status & 0x0890) tp->stats.rx_length_errors++;
391                                 if (status & 0x0004) tp->stats.rx_frame_errors++;
392                                 if (status & 0x0002) tp->stats.rx_crc_errors++;
393                                 if (status & 0x0001) tp->stats.rx_fifo_errors++;
394                         }
395                 } else {
396                         /* Omit the four octet CRC from the length. */
397                         short pkt_len = ((status >> 16) & 0x7ff) - 4;
398                         struct sk_buff *skb;
399
400 #ifndef final_version
401                         if (pkt_len > 1518) {
402                                 printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
403                                            dev->name, pkt_len, pkt_len);
404                                 pkt_len = 1518;
405                                 tp->stats.rx_length_errors++;
406                         }
407 #endif
408
409                         /* Check if the packet is long enough to accept without copying
410                            to a minimally-sized skbuff. */
411                         if (pkt_len < tulip_rx_copybreak
412                                 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
413                                 skb->dev = dev;
414                                 skb_reserve(skb, 2);    /* 16 byte align the IP header */
415                                 pci_dma_sync_single(tp->pdev,
416                                                     tp->rx_buffers[entry].mapping,
417                                                     pkt_len, PCI_DMA_FROMDEVICE);
418 #if ! defined(__alpha__)
419                                 eth_copy_and_sum(skb, tp->rx_buffers[entry].skb->tail,
420                                                  pkt_len, 0);
421                                 skb_put(skb, pkt_len);
422 #else
423                                 memcpy(skb_put(skb, pkt_len),
424                                        tp->rx_buffers[entry].skb->tail,
425                                        pkt_len);
426 #endif
427                         } else {        /* Pass up the skb already on the Rx ring. */
428                                 char *temp = skb_put(skb = tp->rx_buffers[entry].skb,
429                                                      pkt_len);
430
431 #ifndef final_version
432                                 if (tp->rx_buffers[entry].mapping !=
433                                     le32_to_cpu(tp->rx_ring[entry].buffer1)) {
434                                         printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
435                                                "do not match in tulip_rx: %08x vs. %Lx %p / %p.\n",
436                                                dev->name,
437                                                le32_to_cpu(tp->rx_ring[entry].buffer1),
438                                                (long long)tp->rx_buffers[entry].mapping,
439                                                skb->head, temp);
440                                 }
441 #endif
442
443                                 pci_unmap_single(tp->pdev, tp->rx_buffers[entry].mapping,
444                                                  PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
445
446                                 tp->rx_buffers[entry].skb = NULL;
447                                 tp->rx_buffers[entry].mapping = 0;
448                         }
449                         skb->protocol = eth_type_trans(skb, dev);
450
451                         netif_rx(skb);
452
453                         dev->last_rx = jiffies;
454                         tp->stats.rx_packets++;
455                         tp->stats.rx_bytes += pkt_len;
456                 }
457                 received++;
458                 entry = (++tp->cur_rx) % RX_RING_SIZE;
459         }
460         return received;
461 }
462 #endif  /* CONFIG_TULIP_NAPI */
463
464 static inline unsigned int phy_interrupt (struct net_device *dev)
465 {
466 #ifdef __hppa__
467         int csr12 = inl(dev->base_addr + CSR12) & 0xff;
468         struct tulip_private *tp = (struct tulip_private *)dev->priv;
469
470         if (csr12 != tp->csr12_shadow) {
471                 /* ack interrupt */
472                 outl(csr12 | 0x02, dev->base_addr + CSR12);
473                 tp->csr12_shadow = csr12;
474                 /* do link change stuff */
475                 spin_lock(&tp->lock);
476                 tulip_check_duplex(dev);
477                 spin_unlock(&tp->lock);
478                 /* clear irq ack bit */
479                 outl(csr12 & ~0x02, dev->base_addr + CSR12);
480
481                 return 1;
482         }
483 #endif
484
485         return 0;
486 }
487
488 /* The interrupt handler does all of the Rx thread work and cleans up
489    after the Tx thread. */
490 irqreturn_t tulip_interrupt(int irq, void *dev_instance, struct pt_regs *regs)
491 {
492         struct net_device *dev = (struct net_device *)dev_instance;
493         struct tulip_private *tp = (struct tulip_private *)dev->priv;
494         long ioaddr = dev->base_addr;
495         int csr5;
496         int missed;
497         int rx = 0;
498         int tx = 0;
499         int oi = 0;
500         int maxrx = RX_RING_SIZE;
501         int maxtx = TX_RING_SIZE;
502         int maxoi = TX_RING_SIZE;
503 #ifdef CONFIG_TULIP_NAPI
504         int rxd = 0;
505 #else
506         int entry;
507 #endif
508         unsigned int work_count = tulip_max_interrupt_work;
509         unsigned int handled = 0;
510
511         /* Let's see whether the interrupt really is for us */
512         csr5 = inl(ioaddr + CSR5);
513
514         if (tp->flags & HAS_PHY_IRQ) 
515                 handled = phy_interrupt (dev);
516     
517         if ((csr5 & (NormalIntr|AbnormalIntr)) == 0)
518                 return IRQ_RETVAL(handled);
519
520         tp->nir++;
521
522         do {
523
524 #ifdef CONFIG_TULIP_NAPI
525
526                 if (!rxd && (csr5 & (RxIntr | RxNoBuf))) {
527                         rxd++;
528                         /* Mask RX intrs and add the device to poll list. */
529                         outl(tulip_tbl[tp->chip_id].valid_intrs&~RxPollInt, ioaddr + CSR7);
530                         netif_rx_schedule(dev);
531                         
532                         if (!(csr5&~(AbnormalIntr|NormalIntr|RxPollInt|TPLnkPass)))
533                                break;
534                 }
535                 
536                /* Acknowledge the interrupt sources we handle here ASAP
537                   the poll function does Rx and RxNoBuf acking */
538                 
539                 outl(csr5 & 0x0001ff3f, ioaddr + CSR5);
540
541 #else 
542                 /* Acknowledge all of the current interrupt sources ASAP. */
543                 outl(csr5 & 0x0001ffff, ioaddr + CSR5);
544
545
546                 if (csr5 & (RxIntr | RxNoBuf)) {
547                                 rx += tulip_rx(dev);
548                         tulip_refill_rx(dev);
549                 }
550
551 #endif /*  CONFIG_TULIP_NAPI */
552                 
553                 if (tulip_debug > 4)
554                         printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
555                                dev->name, csr5, inl(dev->base_addr + CSR5));
556                 
557
558                 if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
559                         unsigned int dirty_tx;
560
561                         spin_lock(&tp->lock);
562
563                         for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
564                                  dirty_tx++) {
565                                 int entry = dirty_tx % TX_RING_SIZE;
566                                 int status = le32_to_cpu(tp->tx_ring[entry].status);
567
568                                 if (status < 0)
569                                         break;                  /* It still has not been Txed */
570
571                                 /* Check for Rx filter setup frames. */
572                                 if (tp->tx_buffers[entry].skb == NULL) {
573                                         /* test because dummy frames not mapped */
574                                         if (tp->tx_buffers[entry].mapping)
575                                                 pci_unmap_single(tp->pdev,
576                                                          tp->tx_buffers[entry].mapping,
577                                                          sizeof(tp->setup_frame),
578                                                          PCI_DMA_TODEVICE);
579                                         continue;
580                                 }
581
582                                 if (status & 0x8000) {
583                                         /* There was an major error, log it. */
584 #ifndef final_version
585                                         if (tulip_debug > 1)
586                                                 printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
587                                                            dev->name, status);
588 #endif
589                                         tp->stats.tx_errors++;
590                                         if (status & 0x4104) tp->stats.tx_aborted_errors++;
591                                         if (status & 0x0C00) tp->stats.tx_carrier_errors++;
592                                         if (status & 0x0200) tp->stats.tx_window_errors++;
593                                         if (status & 0x0002) tp->stats.tx_fifo_errors++;
594                                         if ((status & 0x0080) && tp->full_duplex == 0)
595                                                 tp->stats.tx_heartbeat_errors++;
596                                 } else {
597                                         tp->stats.tx_bytes +=
598                                                 tp->tx_buffers[entry].skb->len;
599                                         tp->stats.collisions += (status >> 3) & 15;
600                                         tp->stats.tx_packets++;
601                                 }
602
603                                 pci_unmap_single(tp->pdev, tp->tx_buffers[entry].mapping,
604                                                  tp->tx_buffers[entry].skb->len,
605                                                  PCI_DMA_TODEVICE);
606
607                                 /* Free the original skb. */
608                                 dev_kfree_skb_irq(tp->tx_buffers[entry].skb);
609                                 tp->tx_buffers[entry].skb = NULL;
610                                 tp->tx_buffers[entry].mapping = 0;
611                                 tx++;
612                         }
613
614 #ifndef final_version
615                         if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
616                                 printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
617                                            dev->name, dirty_tx, tp->cur_tx);
618                                 dirty_tx += TX_RING_SIZE;
619                         }
620 #endif
621
622                         if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
623                                 netif_wake_queue(dev);
624
625                         tp->dirty_tx = dirty_tx;
626                         if (csr5 & TxDied) {
627                                 if (tulip_debug > 2)
628                                         printk(KERN_WARNING "%s: The transmitter stopped."
629                                                    "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
630                                                    dev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
631                                 tulip_restart_rxtx(tp);
632                         }
633                         spin_unlock(&tp->lock);
634                 }
635
636                 /* Log errors. */
637                 if (csr5 & AbnormalIntr) {      /* Abnormal error summary bit. */
638                         if (csr5 == 0xffffffff)
639                                 break;
640                         if (csr5 & TxJabber) tp->stats.tx_errors++;
641                         if (csr5 & TxFIFOUnderflow) {
642                                 if ((tp->csr6 & 0xC000) != 0xC000)
643                                         tp->csr6 += 0x4000;     /* Bump up the Tx threshold */
644                                 else
645                                         tp->csr6 |= 0x00200000;  /* Store-n-forward. */
646                                 /* Restart the transmit process. */
647                                 tulip_restart_rxtx(tp);
648                                 outl(0, ioaddr + CSR1);
649                         }
650                         if (csr5 & (RxDied | RxNoBuf)) {
651                                 if (tp->flags & COMET_MAC_ADDR) {
652                                         outl(tp->mc_filter[0], ioaddr + 0xAC);
653                                         outl(tp->mc_filter[1], ioaddr + 0xB0);
654                                 }
655                         }
656                         if (csr5 & RxDied) {            /* Missed a Rx frame. */
657                                 tp->stats.rx_missed_errors += inl(ioaddr + CSR8) & 0xffff;
658                                 tp->stats.rx_errors++;
659                                 tulip_start_rxtx(tp);
660                         }
661                         /*
662                          * NB: t21142_lnk_change() does a del_timer_sync(), so be careful if this
663                          * call is ever done under the spinlock
664                          */
665                         if (csr5 & (TPLnkPass | TPLnkFail | 0x08000000)) {
666                                 if (tp->link_change)
667                                         (tp->link_change)(dev, csr5);
668                         }
669                         if (csr5 & SytemError) {
670                                 int error = (csr5 >> 23) & 7;
671                                 /* oops, we hit a PCI error.  The code produced corresponds
672                                  * to the reason:
673                                  *  0 - parity error
674                                  *  1 - master abort
675                                  *  2 - target abort
676                                  * Note that on parity error, we should do a software reset
677                                  * of the chip to get it back into a sane state (according
678                                  * to the 21142/3 docs that is).
679                                  *   -- rmk
680                                  */
681                                 printk(KERN_ERR "%s: (%lu) System Error occurred (%d)\n",
682                                         dev->name, tp->nir, error);
683                         }
684                         /* Clear all error sources, included undocumented ones! */
685                         outl(0x0800f7ba, ioaddr + CSR5);
686                         oi++;
687                 }
688                 if (csr5 & TimerInt) {
689
690                         if (tulip_debug > 2)
691                                 printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
692                                            dev->name, csr5);
693                         outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
694                         tp->ttimer = 0;
695                         oi++;
696                 }
697                 if (tx > maxtx || rx > maxrx || oi > maxoi) {
698                         if (tulip_debug > 1)
699                                 printk(KERN_WARNING "%s: Too much work during an interrupt, "
700                                            "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", dev->name, csr5, tp->nir, tx, rx, oi);
701
702                        /* Acknowledge all interrupt sources. */
703                         outl(0x8001ffff, ioaddr + CSR5);
704                         if (tp->flags & HAS_INTR_MITIGATION) {
705                      /* Josip Loncaric at ICASE did extensive experimentation
706                         to develop a good interrupt mitigation setting.*/
707                                 outl(0x8b240000, ioaddr + CSR11);
708                         } else if (tp->chip_id == LC82C168) {
709                                 /* the LC82C168 doesn't have a hw timer.*/
710                                 outl(0x00, ioaddr + CSR7);
711                                 mod_timer(&tp->timer, RUN_AT(HZ/50));
712                         } else {
713                           /* Mask all interrupting sources, set timer to
714                                 re-enable. */
715                                 outl(((~csr5) & 0x0001ebef) | AbnormalIntr | TimerInt, ioaddr + CSR7);
716                                 outl(0x0012, ioaddr + CSR11);
717                         }
718                         break;
719                 }
720
721                 work_count--;
722                 if (work_count == 0)
723                         break;
724
725                 csr5 = inl(ioaddr + CSR5);
726
727 #ifdef CONFIG_TULIP_NAPI
728                 if (rxd)
729                         csr5 &= ~RxPollInt;
730         } while ((csr5 & (TxNoBuf | 
731                           TxDied | 
732                           TxIntr | 
733                           TimerInt |
734                           /* Abnormal intr. */
735                           RxDied | 
736                           TxFIFOUnderflow | 
737                           TxJabber | 
738                           TPLnkFail |  
739                           SytemError )) != 0);
740 #else 
741         } while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
742
743         tulip_refill_rx(dev);
744
745         /* check if the card is in suspend mode */
746         entry = tp->dirty_rx % RX_RING_SIZE;
747         if (tp->rx_buffers[entry].skb == NULL) {
748                 if (tulip_debug > 1)
749                         printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", dev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
750                 if (tp->chip_id == LC82C168) {
751                         outl(0x00, ioaddr + CSR7);
752                         mod_timer(&tp->timer, RUN_AT(HZ/50));
753                 } else {
754                         if (tp->ttimer == 0 || (inl(ioaddr + CSR11) & 0xffff) == 0) {
755                                 if (tulip_debug > 1)
756                                         printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", dev->name, tp->nir);
757                                 outl(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
758                                         ioaddr + CSR7);
759                                 outl(TimerInt, ioaddr + CSR5);
760                                 outl(12, ioaddr + CSR11);
761                                 tp->ttimer = 1;
762                         }
763                 }
764         }
765 #endif /* CONFIG_TULIP_NAPI */
766
767         if ((missed = inl(ioaddr + CSR8) & 0x1ffff)) {
768                 tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
769         }
770
771         if (tulip_debug > 4)
772                 printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
773                            dev->name, inl(ioaddr + CSR5));
774
775         return IRQ_HANDLED;
776 }