1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
3 Written 1998-2001 by Donald Becker.
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
30 Linux kernel version history:
33 - Jeff Garzik: softnet 'n stuff
36 - Justin Guyett: softnet and locking fixes
37 - Jeff Garzik: use PCI interface
40 - Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
43 - Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
44 code) update "Theory of Operation" with
45 softnet/locking changes
46 - Dave Miller: PCI DMA and endian fixups
47 - Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
50 - Urban Widmark: fix gcc 2.95.2 problem and
51 remove writel's to fixed address 0x7c
54 - Urban Widmark: mdio locking, bounce buffer changes
55 merges from Beckers 1.05 version
56 added netif_running_on/off support
59 - Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
60 set netif_running_on/off on startup, del_timer_sync
63 - Manfred Spraul: added reset into tx_timeout
66 - Urban Widmark: merges from Beckers 1.10 version
67 (media selection + eeprom reload)
68 - David Vrabel: merges from D-Link "1.11" version
69 (disable WOL and PME on startup)
72 - Manfred Spraul: use "singlecopy" for unaligned buffers
73 don't allocate bounce buffers for !ReqTxAlign cards
76 - David Woodhouse: Set dev->base_addr before the first time we call
77 wait_for_reset(). It's a lot happier that way.
78 Free np->tx_bufs only if we actually allocated it.
81 - Martin Eriksson: Allow Memory-Mapped IO to be enabled.
85 - Replace some MII-related magic numbers with constants
88 - fixes comments for Rhine-III
89 - removes W_MAX_TIMEOUT (unused)
90 - adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card
91 is R-I and has Davicom chip, flag is referenced in kernel driver)
92 - sends chip_id as a parameter to wait_for_reset since np is not
93 initialized on first call
94 - changes mmio "else if (chip_id==VT6102)" to "else" so it will work
95 for Rhine-III's (documentation says same bit is correct)
96 - transmit frame queue message is off by one - fixed
97 - adds IntrNormalSummary to "Something Wicked" exclusion list
98 so normal interrupts will not trigger the message (src: Donald Becker)
100 - show confused chip where to continue after Tx error
101 - location of collision counter is chip specific
102 - allow selecting backoff algorithm (module parameter)
105 - Use new MII lib helper generic_mii_ioctl
107 LK1.1.16 (Roger Luethi)
109 - Handle Tx buffer underrun
110 - Fix bugs in full duplex handling
111 - New reset code uses "force reset" cmd on Rhine-II
114 LK1.1.17 (Roger Luethi)
115 - Fix race in via_rhine_start_tx()
116 - On errors, wait for Tx engine to turn off before scavenging
117 - Handle Tx descriptor write-back race on Rhine-II
118 - Force flushing for PCI posted writes
119 - More reset code changes
121 LK1.1.18 (Roger Luethi)
122 - No filtering multicast in promisc mode (Edward Peng)
123 - Fix for Rhine-I Tx timeouts
125 LK1.1.19 (Roger Luethi)
126 - Increase Tx threshold for unspecified errors
128 LK1.2.0-2.6 (Roger Luethi)
130 - Rewrite PHY, media handling (remove options, full_duplex, backoff)
131 - Fix Tx engine race for good
135 #define DRV_NAME "via-rhine"
136 #define DRV_VERSION "1.2.0-2.6"
137 #define DRV_RELDATE "June-10-2004"
140 /* A few user-configurable values.
141 These may be modified when a driver module is loaded. */
143 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
144 static int max_interrupt_work = 20;
146 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
147 Setting to > 1518 effectively disables this feature. */
148 static int rx_copybreak;
151 * In case you are looking for 'options[]' or 'full_duplex[]', they
152 * are gone. Use ethtool(8) instead.
155 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
156 The Rhine has a 64 element 8390-like hash table. */
157 static const int multicast_filter_limit = 32;
160 /* Operational parameters that are set at compile time. */
162 /* Keep the ring sizes a power of two for compile efficiency.
163 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
164 Making the Tx ring too large decreases the effectiveness of channel
165 bonding and packet priority.
166 There are no ill effects from too-large receive rings. */
167 #define TX_RING_SIZE 16
168 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
169 #define RX_RING_SIZE 16
172 /* Operational parameters that usually are not changed. */
174 /* Time in jiffies before concluding the transmitter is hung. */
175 #define TX_TIMEOUT (2*HZ)
177 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
179 #include <linux/module.h>
180 #include <linux/moduleparam.h>
181 #include <linux/kernel.h>
182 #include <linux/string.h>
183 #include <linux/timer.h>
184 #include <linux/errno.h>
185 #include <linux/ioport.h>
186 #include <linux/slab.h>
187 #include <linux/interrupt.h>
188 #include <linux/pci.h>
189 #include <linux/netdevice.h>
190 #include <linux/etherdevice.h>
191 #include <linux/skbuff.h>
192 #include <linux/init.h>
193 #include <linux/delay.h>
194 #include <linux/mii.h>
195 #include <linux/ethtool.h>
196 #include <linux/crc32.h>
197 #include <linux/bitops.h>
198 #include <asm/processor.h> /* Processor type for cache alignment. */
201 #include <asm/uaccess.h>
203 /* These identify the driver base version and may not be removed. */
204 static char version[] __devinitdata =
205 KERN_INFO DRV_NAME ".c:v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker\n";
207 /* This driver was written to use PCI memory space. Some early versions
208 of the Rhine may only work correctly with I/O space accesses. */
209 #ifdef CONFIG_VIA_RHINE_MMIO
214 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
215 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
216 MODULE_LICENSE("GPL");
218 module_param(max_interrupt_work, int, 0);
219 module_param(debug, int, 0);
220 module_param(rx_copybreak, int, 0);
221 MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
222 MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
223 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
228 I. Board Compatibility
230 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
233 II. Board-specific settings
235 Boards with this chip are functional only in a bus-master PCI slot.
237 Many operational settings are loaded from the EEPROM to the Config word at
238 offset 0x78. For most of these settings, this driver assumes that they are
240 If this driver is compiled to use PCI memory space operations the EEPROM
241 must be configured to enable memory ops.
243 III. Driver operation
247 This driver uses two statically allocated fixed-size descriptor lists
248 formed into rings by a branch from the final descriptor to the beginning of
249 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
251 IIIb/c. Transmit/Receive Structure
253 This driver attempts to use a zero-copy receive and transmit scheme.
255 Alas, all data buffers are required to start on a 32 bit boundary, so
256 the driver must often copy transmit packets into bounce buffers.
258 The driver allocates full frame size skbuffs for the Rx ring buffers at
259 open() time and passes the skb->data field to the chip as receive data
260 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
261 a fresh skbuff is allocated and the frame is copied to the new skbuff.
262 When the incoming frame is larger, the skbuff is passed directly up the
263 protocol stack. Buffers consumed this way are replaced by newly allocated
264 skbuffs in the last phase of rhine_rx().
266 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
267 using a full-sized skbuff for small frames vs. the copying costs of larger
268 frames. New boards are typically used in generously configured machines
269 and the underfilled buffers have negligible impact compared to the benefit of
270 a single allocation size, so the default value of zero results in never
271 copying packets. When copying is done, the cost is usually mitigated by using
272 a combined copy/checksum routine. Copying also preloads the cache, which is
273 most useful with small frames.
275 Since the VIA chips are only able to transfer data to buffers on 32 bit
276 boundaries, the IP header at offset 14 in an ethernet frame isn't
277 longword aligned for further processing. Copying these unaligned buffers
278 has the beneficial effect of 16-byte aligning the IP header.
280 IIId. Synchronization
282 The driver runs as two independent, single-threaded flows of control. One
283 is the send-packet routine, which enforces single-threaded use by the
284 dev->priv->lock spinlock. The other thread is the interrupt handler, which
285 is single threaded by the hardware and interrupt handling software.
287 The send packet thread has partial control over the Tx ring. It locks the
288 dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
289 is not available it stops the transmit queue by calling netif_stop_queue.
291 The interrupt handler has exclusive control over the Rx ring and records stats
292 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
293 empty by incrementing the dirty_tx mark. If at least half of the entries in
294 the Rx ring are available the transmit queue is woken up if it was stopped.
300 Preliminary VT86C100A manual from http://www.via.com.tw/
301 http://www.scyld.com/expert/100mbps.html
302 http://www.scyld.com/expert/NWay.html
303 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
304 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
309 The VT86C100A manual is not reliable information.
310 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
311 in significant performance degradation for bounce buffer copies on transmit
312 and unaligned IP headers on receive.
313 The chip does not pad to minimum transmit length.
318 /* This table drives the PCI probe routines. It's mostly boilerplate in all
319 of the drivers, and will likely be provided by some future kernel.
320 Note the matching code -- the first table entry matchs all 56** cards but
321 second only the 1234 card.
328 VT8231 = 0x50, /* Integrated MAC */
329 VT8233 = 0x60, /* Integrated MAC */
330 VT8235 = 0x74, /* Integrated MAC */
331 VT8237 = 0x78, /* Integrated MAC */
338 VT6105M = 0x90, /* Management adapter */
342 rqWOL = 0x0001, /* Wake-On-LAN support */
343 rqForceReset = 0x0002,
344 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
345 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
346 rqRhineI = 0x0100, /* See comment below */
349 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
350 * MMIO as well as for the collision counter and the Tx FIFO underflow
351 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
354 /* Beware of PCI posted writes */
355 #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
357 static struct pci_device_id rhine_pci_tbl[] =
359 {0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT86C100A */
360 {0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6102 */
361 {0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* 6105{,L,LOM} */
362 {0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, }, /* VT6105M */
363 { } /* terminate list */
365 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
368 /* Offsets to the device registers. */
369 enum register_offsets {
370 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
372 IntrStatus=0x0C, IntrEnable=0x0E,
373 MulticastFilter0=0x10, MulticastFilter1=0x14,
374 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
375 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
376 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
377 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
378 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
379 StickyHW=0x83, IntrStatus2=0x84,
380 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
381 WOLcrClr1=0xA6, WOLcgClr=0xA7,
382 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
385 /* Bits in ConfigD */
387 BackOptional=0x01, BackModify=0x02,
388 BackCaptureEffect=0x04, BackRandom=0x08
392 /* Registers we check that mmio and reg are the same. */
393 int mmio_verify_registers[] = {
394 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
399 /* Bits in the interrupt status/mask registers. */
400 enum intr_status_bits {
401 IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
402 IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
404 IntrStatsMax=0x0080, IntrRxEarly=0x0100,
405 IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
406 IntrTxAborted=0x2000, IntrLinkChange=0x4000,
408 IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
409 IntrTxDescRace=0x080000, /* mapped from IntrStatus2 */
410 IntrTxErrSummary=0x082218,
413 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
422 /* The Rx and Tx buffer descriptors. */
425 u32 desc_length; /* Chain flag, Buffer/frame length */
431 u32 desc_length; /* Chain flag, Tx Config, Frame length */
436 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
437 #define TXDESC 0x00e08000
439 enum rx_status_bits {
440 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
443 /* Bits in *_desc.*_status */
444 enum desc_status_bits {
448 /* Bits in ChipCmd. */
450 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
451 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
452 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
453 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
456 struct rhine_private {
457 /* Descriptor rings */
458 struct rx_desc *rx_ring;
459 struct tx_desc *tx_ring;
460 dma_addr_t rx_ring_dma;
461 dma_addr_t tx_ring_dma;
463 /* The addresses of receive-in-place skbuffs. */
464 struct sk_buff *rx_skbuff[RX_RING_SIZE];
465 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
467 /* The saved address of a sent-in-place packet/buffer, for later free(). */
468 struct sk_buff *tx_skbuff[TX_RING_SIZE];
469 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
471 /* Tx bounce buffers */
472 unsigned char *tx_buf[TX_RING_SIZE];
473 unsigned char *tx_bufs;
474 dma_addr_t tx_bufs_dma;
476 struct pci_dev *pdev;
478 struct net_device_stats stats;
481 /* Frequently used values: keep some adjacent for cache effect. */
483 struct rx_desc *rx_head_desc;
484 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
485 unsigned int cur_tx, dirty_tx;
486 unsigned int rx_buf_sz; /* Based on MTU+slack. */
489 u8 tx_thresh, rx_thresh;
491 struct mii_if_info mii_if;
495 static int mdio_read(struct net_device *dev, int phy_id, int location);
496 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
497 static int rhine_open(struct net_device *dev);
498 static void rhine_tx_timeout(struct net_device *dev);
499 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev);
500 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *regs);
501 static void rhine_tx(struct net_device *dev);
502 static void rhine_rx(struct net_device *dev);
503 static void rhine_error(struct net_device *dev, int intr_status);
504 static void rhine_set_rx_mode(struct net_device *dev);
505 static struct net_device_stats *rhine_get_stats(struct net_device *dev);
506 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
507 static struct ethtool_ops netdev_ethtool_ops;
508 static int rhine_close(struct net_device *dev);
509 static void rhine_shutdown (struct device *gdev);
511 #define RHINE_WAIT_FOR(condition) do { \
513 while (!(condition) && --i) \
515 if (debug > 1 && i < 512) \
516 printk(KERN_INFO "%s: %4d cycles used @ %s:%d\n", \
517 DRV_NAME, 1024-i, __func__, __LINE__); \
520 static inline u32 get_intr_status(struct net_device *dev)
522 struct rhine_private *rp = netdev_priv(dev);
523 void __iomem *ioaddr = rp->base;
526 intr_status = ioread16(ioaddr + IntrStatus);
527 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
528 if (rp->quirks & rqStatusWBRace)
529 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
534 * Get power related registers into sane state.
535 * Notify user about past WOL event.
537 static void rhine_power_init(struct net_device *dev)
539 struct rhine_private *rp = netdev_priv(dev);
540 void __iomem *ioaddr = rp->base;
543 if (rp->quirks & rqWOL) {
544 /* Make sure chip is in power state D0 */
545 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
547 /* Disable "force PME-enable" */
548 iowrite8(0x80, ioaddr + WOLcgClr);
550 /* Clear power-event config bits (WOL) */
551 iowrite8(0xFF, ioaddr + WOLcrClr);
552 /* More recent cards can manage two additional patterns */
553 if (rp->quirks & rq6patterns)
554 iowrite8(0x03, ioaddr + WOLcrClr1);
556 /* Save power-event status bits */
557 wolstat = ioread8(ioaddr + PwrcsrSet);
558 if (rp->quirks & rq6patterns)
559 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
561 /* Clear power-event status bits */
562 iowrite8(0xFF, ioaddr + PwrcsrClr);
563 if (rp->quirks & rq6patterns)
564 iowrite8(0x03, ioaddr + PwrcsrClr1);
570 reason = "Magic packet";
573 reason = "Link went up";
576 reason = "Link went down";
579 reason = "Unicast packet";
582 reason = "Multicast/broadcast packet";
587 printk(KERN_INFO "%s: Woke system up. Reason: %s.\n",
593 static void rhine_chip_reset(struct net_device *dev)
595 struct rhine_private *rp = netdev_priv(dev);
596 void __iomem *ioaddr = rp->base;
598 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
601 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
602 printk(KERN_INFO "%s: Reset not complete yet. "
603 "Trying harder.\n", DRV_NAME);
606 if (rp->quirks & rqForceReset)
607 iowrite8(0x40, ioaddr + MiscCmd);
609 /* Reset can take somewhat longer (rare) */
610 RHINE_WAIT_FOR(!(ioread8(ioaddr + ChipCmd1) & Cmd1Reset));
614 printk(KERN_INFO "%s: Reset %s.\n", dev->name,
615 (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) ?
616 "failed" : "succeeded");
620 static void enable_mmio(long pioaddr, u32 quirks)
623 if (quirks & rqRhineI) {
624 /* More recent docs say that this bit is reserved ... */
625 n = inb(pioaddr + ConfigA) | 0x20;
626 outb(n, pioaddr + ConfigA);
628 n = inb(pioaddr + ConfigD) | 0x80;
629 outb(n, pioaddr + ConfigD);
635 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
636 * (plus 0x6C for Rhine-I/II)
638 static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
640 struct rhine_private *rp = netdev_priv(dev);
641 void __iomem *ioaddr = rp->base;
643 outb(0x20, pioaddr + MACRegEEcsr);
644 RHINE_WAIT_FOR(!(inb(pioaddr + MACRegEEcsr) & 0x20));
648 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
649 * MMIO. If reloading EEPROM was done first this could be avoided, but
650 * it is not known if that still works with the "win98-reboot" problem.
652 enable_mmio(pioaddr, rp->quirks);
655 /* Turn off EEPROM-controlled wake-up (magic packet) */
656 if (rp->quirks & rqWOL)
657 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
661 #ifdef CONFIG_NET_POLL_CONTROLLER
662 static void rhine_poll(struct net_device *dev)
664 disable_irq(dev->irq);
665 rhine_interrupt(dev->irq, (void *)dev, NULL);
666 enable_irq(dev->irq);
670 static void rhine_hw_init(struct net_device *dev, long pioaddr)
672 struct rhine_private *rp = netdev_priv(dev);
674 /* Reset the chip to erase previous misconfiguration. */
675 rhine_chip_reset(dev);
677 /* Rhine-I needs extra time to recuperate before EEPROM reload */
678 if (rp->quirks & rqRhineI)
681 /* Reload EEPROM controlled bytes cleared by soft reset */
682 rhine_reload_eeprom(pioaddr, dev);
685 static int __devinit rhine_init_one(struct pci_dev *pdev,
686 const struct pci_device_id *ent)
688 struct net_device *dev;
689 struct rhine_private *rp;
695 void __iomem *ioaddr;
704 /* when built into the kernel, we only print version if device is found */
706 static int printed_version;
707 if (!printed_version++)
711 pci_read_config_byte(pdev, PCI_REVISION_ID, &pci_rev);
717 if (pci_rev < VTunknown0) {
721 else if (pci_rev >= VT6102) {
722 quirks = rqWOL | rqForceReset;
723 if (pci_rev < VT6105) {
725 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
728 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
729 if (pci_rev >= VT6105_B0)
730 quirks |= rq6patterns;
731 if (pci_rev < VT6105M)
734 name = "Rhine III (Management Adapter)";
738 rc = pci_enable_device(pdev);
742 /* this should always be supported */
743 rc = pci_set_dma_mask(pdev, 0xffffffff);
745 printk(KERN_ERR "32-bit PCI DMA addresses not supported by "
751 if ((pci_resource_len(pdev, 0) < io_size) ||
752 (pci_resource_len(pdev, 1) < io_size)) {
754 printk(KERN_ERR "Insufficient PCI resources, aborting\n");
758 pioaddr = pci_resource_start(pdev, 0);
759 memaddr = pci_resource_start(pdev, 1);
761 pci_set_master(pdev);
763 dev = alloc_etherdev(sizeof(struct rhine_private));
766 printk(KERN_ERR "alloc_etherdev failed\n");
769 SET_MODULE_OWNER(dev);
770 SET_NETDEV_DEV(dev, &pdev->dev);
772 rp = netdev_priv(dev);
774 rp->pioaddr = pioaddr;
777 rc = pci_request_regions(pdev, DRV_NAME);
779 goto err_out_free_netdev;
781 ioaddr = pci_iomap(pdev, bar, io_size);
784 printk(KERN_ERR "ioremap failed for device %s, region 0x%X "
785 "@ 0x%lX\n", pci_name(pdev), io_size, memaddr);
786 goto err_out_free_res;
790 enable_mmio(pioaddr, quirks);
792 /* Check that selected MMIO registers match the PIO ones */
794 while (mmio_verify_registers[i]) {
795 int reg = mmio_verify_registers[i++];
796 unsigned char a = inb(pioaddr+reg);
797 unsigned char b = readb(ioaddr+reg);
800 printk(KERN_ERR "MMIO do not match PIO [%02x] "
801 "(%02x != %02x)\n", reg, a, b);
805 #endif /* USE_MMIO */
807 dev->base_addr = (unsigned long)ioaddr;
810 /* Get chip registers into a sane state */
811 rhine_power_init(dev);
812 rhine_hw_init(dev, pioaddr);
814 for (i = 0; i < 6; i++)
815 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
817 if (!is_valid_ether_addr(dev->dev_addr)) {
819 printk(KERN_ERR "Invalid MAC address\n");
823 /* For Rhine-I/II, phy_id is loaded from EEPROM */
825 phy_id = ioread8(ioaddr + 0x6C);
827 dev->irq = pdev->irq;
829 spin_lock_init(&rp->lock);
830 rp->mii_if.dev = dev;
831 rp->mii_if.mdio_read = mdio_read;
832 rp->mii_if.mdio_write = mdio_write;
833 rp->mii_if.phy_id_mask = 0x1f;
834 rp->mii_if.reg_num_mask = 0x1f;
836 /* The chip-specific entries in the device structure. */
837 dev->open = rhine_open;
838 dev->hard_start_xmit = rhine_start_tx;
839 dev->stop = rhine_close;
840 dev->get_stats = rhine_get_stats;
841 dev->set_multicast_list = rhine_set_rx_mode;
842 dev->do_ioctl = netdev_ioctl;
843 dev->ethtool_ops = &netdev_ethtool_ops;
844 dev->tx_timeout = rhine_tx_timeout;
845 dev->watchdog_timeo = TX_TIMEOUT;
846 #ifdef CONFIG_NET_POLL_CONTROLLER
847 dev->poll_controller = rhine_poll;
849 if (rp->quirks & rqRhineI)
850 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
852 /* dev->name not defined before register_netdev()! */
853 rc = register_netdev(dev);
857 printk(KERN_INFO "%s: VIA %s at 0x%lx, ",
866 for (i = 0; i < 5; i++)
867 printk("%2.2x:", dev->dev_addr[i]);
868 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
870 pci_set_drvdata(pdev, dev);
874 int mii_status = mdio_read(dev, phy_id, 1);
875 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
876 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
877 if (mii_status != 0xffff && mii_status != 0x0000) {
878 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
879 printk(KERN_INFO "%s: MII PHY found at address "
880 "%d, status 0x%4.4x advertising %4.4x "
881 "Link %4.4x.\n", dev->name, phy_id,
882 mii_status, rp->mii_if.advertising,
883 mdio_read(dev, phy_id, 5));
885 /* set IFF_RUNNING */
886 if (mii_status & BMSR_LSTATUS)
887 netif_carrier_on(dev);
889 netif_carrier_off(dev);
893 rp->mii_if.phy_id = phy_id;
898 pci_iounmap(pdev, ioaddr);
900 pci_release_regions(pdev);
907 static int alloc_ring(struct net_device* dev)
909 struct rhine_private *rp = netdev_priv(dev);
913 ring = pci_alloc_consistent(rp->pdev,
914 RX_RING_SIZE * sizeof(struct rx_desc) +
915 TX_RING_SIZE * sizeof(struct tx_desc),
918 printk(KERN_ERR "Could not allocate DMA memory.\n");
921 if (rp->quirks & rqRhineI) {
922 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
923 PKT_BUF_SZ * TX_RING_SIZE,
925 if (rp->tx_bufs == NULL) {
926 pci_free_consistent(rp->pdev,
927 RX_RING_SIZE * sizeof(struct rx_desc) +
928 TX_RING_SIZE * sizeof(struct tx_desc),
935 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
936 rp->rx_ring_dma = ring_dma;
937 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
942 static void free_ring(struct net_device* dev)
944 struct rhine_private *rp = netdev_priv(dev);
946 pci_free_consistent(rp->pdev,
947 RX_RING_SIZE * sizeof(struct rx_desc) +
948 TX_RING_SIZE * sizeof(struct tx_desc),
949 rp->rx_ring, rp->rx_ring_dma);
953 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
954 rp->tx_bufs, rp->tx_bufs_dma);
960 static void alloc_rbufs(struct net_device *dev)
962 struct rhine_private *rp = netdev_priv(dev);
966 rp->dirty_rx = rp->cur_rx = 0;
968 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
969 rp->rx_head_desc = &rp->rx_ring[0];
970 next = rp->rx_ring_dma;
972 /* Init the ring entries */
973 for (i = 0; i < RX_RING_SIZE; i++) {
974 rp->rx_ring[i].rx_status = 0;
975 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
976 next += sizeof(struct rx_desc);
977 rp->rx_ring[i].next_desc = cpu_to_le32(next);
978 rp->rx_skbuff[i] = NULL;
980 /* Mark the last entry as wrapping the ring. */
981 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
983 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
984 for (i = 0; i < RX_RING_SIZE; i++) {
985 struct sk_buff *skb = dev_alloc_skb(rp->rx_buf_sz);
986 rp->rx_skbuff[i] = skb;
989 skb->dev = dev; /* Mark as being used by this device. */
991 rp->rx_skbuff_dma[i] =
992 pci_map_single(rp->pdev, skb->tail, rp->rx_buf_sz,
995 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
996 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
998 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1001 static void free_rbufs(struct net_device* dev)
1003 struct rhine_private *rp = netdev_priv(dev);
1006 /* Free all the skbuffs in the Rx queue. */
1007 for (i = 0; i < RX_RING_SIZE; i++) {
1008 rp->rx_ring[i].rx_status = 0;
1009 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1010 if (rp->rx_skbuff[i]) {
1011 pci_unmap_single(rp->pdev,
1012 rp->rx_skbuff_dma[i],
1013 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1014 dev_kfree_skb(rp->rx_skbuff[i]);
1016 rp->rx_skbuff[i] = NULL;
1020 static void alloc_tbufs(struct net_device* dev)
1022 struct rhine_private *rp = netdev_priv(dev);
1026 rp->dirty_tx = rp->cur_tx = 0;
1027 next = rp->tx_ring_dma;
1028 for (i = 0; i < TX_RING_SIZE; i++) {
1029 rp->tx_skbuff[i] = NULL;
1030 rp->tx_ring[i].tx_status = 0;
1031 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1032 next += sizeof(struct tx_desc);
1033 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1034 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1036 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1040 static void free_tbufs(struct net_device* dev)
1042 struct rhine_private *rp = netdev_priv(dev);
1045 for (i = 0; i < TX_RING_SIZE; i++) {
1046 rp->tx_ring[i].tx_status = 0;
1047 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1048 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1049 if (rp->tx_skbuff[i]) {
1050 if (rp->tx_skbuff_dma[i]) {
1051 pci_unmap_single(rp->pdev,
1052 rp->tx_skbuff_dma[i],
1053 rp->tx_skbuff[i]->len,
1056 dev_kfree_skb(rp->tx_skbuff[i]);
1058 rp->tx_skbuff[i] = NULL;
1059 rp->tx_buf[i] = NULL;
1063 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1065 struct rhine_private *rp = netdev_priv(dev);
1066 void __iomem *ioaddr = rp->base;
1068 mii_check_media(&rp->mii_if, debug, init_media);
1070 if (rp->mii_if.full_duplex)
1071 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1074 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1078 static void init_registers(struct net_device *dev)
1080 struct rhine_private *rp = netdev_priv(dev);
1081 void __iomem *ioaddr = rp->base;
1084 for (i = 0; i < 6; i++)
1085 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1087 /* Initialize other registers. */
1088 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1089 /* Configure initial FIFO thresholds. */
1090 iowrite8(0x20, ioaddr + TxConfig);
1091 rp->tx_thresh = 0x20;
1092 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1094 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1095 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1097 rhine_set_rx_mode(dev);
1099 /* Enable interrupts by setting the interrupt mask. */
1100 iowrite16(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
1101 IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
1102 IntrTxDone | IntrTxError | IntrTxUnderrun |
1103 IntrPCIErr | IntrStatsMax | IntrLinkChange,
1104 ioaddr + IntrEnable);
1106 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1108 rhine_check_media(dev, 1);
1111 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1112 static void rhine_enable_linkmon(void __iomem *ioaddr)
1114 iowrite8(0, ioaddr + MIICmd);
1115 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1116 iowrite8(0x80, ioaddr + MIICmd);
1118 RHINE_WAIT_FOR((ioread8(ioaddr + MIIRegAddr) & 0x20));
1120 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1123 /* Disable MII link status auto-polling (required for MDIO access) */
1124 static void rhine_disable_linkmon(void __iomem *ioaddr, u32 quirks)
1126 iowrite8(0, ioaddr + MIICmd);
1128 if (quirks & rqRhineI) {
1129 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1131 /* Can be called from ISR. Evil. */
1134 /* 0x80 must be set immediately before turning it off */
1135 iowrite8(0x80, ioaddr + MIICmd);
1137 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x20);
1139 /* Heh. Now clear 0x80 again. */
1140 iowrite8(0, ioaddr + MIICmd);
1143 RHINE_WAIT_FOR(ioread8(ioaddr + MIIRegAddr) & 0x80);
1146 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1148 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1150 struct rhine_private *rp = netdev_priv(dev);
1151 void __iomem *ioaddr = rp->base;
1154 rhine_disable_linkmon(ioaddr, rp->quirks);
1156 /* rhine_disable_linkmon already cleared MIICmd */
1157 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1158 iowrite8(regnum, ioaddr + MIIRegAddr);
1159 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1160 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x40));
1161 result = ioread16(ioaddr + MIIData);
1163 rhine_enable_linkmon(ioaddr);
1167 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1169 struct rhine_private *rp = netdev_priv(dev);
1170 void __iomem *ioaddr = rp->base;
1172 rhine_disable_linkmon(ioaddr, rp->quirks);
1174 /* rhine_disable_linkmon already cleared MIICmd */
1175 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1176 iowrite8(regnum, ioaddr + MIIRegAddr);
1177 iowrite16(value, ioaddr + MIIData);
1178 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1179 RHINE_WAIT_FOR(!(ioread8(ioaddr + MIICmd) & 0x20));
1181 rhine_enable_linkmon(ioaddr);
1184 static int rhine_open(struct net_device *dev)
1186 struct rhine_private *rp = netdev_priv(dev);
1187 void __iomem *ioaddr = rp->base;
1190 rc = request_irq(rp->pdev->irq, &rhine_interrupt, SA_SHIRQ, dev->name,
1196 printk(KERN_DEBUG "%s: rhine_open() irq %d.\n",
1197 dev->name, rp->pdev->irq);
1199 rc = alloc_ring(dev);
1204 rhine_chip_reset(dev);
1205 init_registers(dev);
1207 printk(KERN_DEBUG "%s: Done rhine_open(), status %4.4x "
1208 "MII status: %4.4x.\n",
1209 dev->name, ioread16(ioaddr + ChipCmd),
1210 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1212 netif_start_queue(dev);
1217 static void rhine_tx_timeout(struct net_device *dev)
1219 struct rhine_private *rp = netdev_priv(dev);
1220 void __iomem *ioaddr = rp->base;
1222 printk(KERN_WARNING "%s: Transmit timed out, status %4.4x, PHY status "
1223 "%4.4x, resetting...\n",
1224 dev->name, ioread16(ioaddr + IntrStatus),
1225 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1227 /* protect against concurrent rx interrupts */
1228 disable_irq(rp->pdev->irq);
1230 spin_lock(&rp->lock);
1232 /* clear all descriptors */
1238 /* Reinitialize the hardware. */
1239 rhine_chip_reset(dev);
1240 init_registers(dev);
1242 spin_unlock(&rp->lock);
1243 enable_irq(rp->pdev->irq);
1245 dev->trans_start = jiffies;
1246 rp->stats.tx_errors++;
1247 netif_wake_queue(dev);
1250 static int rhine_start_tx(struct sk_buff *skb, struct net_device *dev)
1252 struct rhine_private *rp = netdev_priv(dev);
1253 void __iomem *ioaddr = rp->base;
1256 /* Caution: the write order is important here, set the field
1257 with the "ownership" bits last. */
1259 /* Calculate the next Tx descriptor entry. */
1260 entry = rp->cur_tx % TX_RING_SIZE;
1262 if (skb->len < ETH_ZLEN) {
1263 skb = skb_padto(skb, ETH_ZLEN);
1268 rp->tx_skbuff[entry] = skb;
1270 if ((rp->quirks & rqRhineI) &&
1271 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_HW)) {
1272 /* Must use alignment buffer. */
1273 if (skb->len > PKT_BUF_SZ) {
1274 /* packet too long, drop it */
1276 rp->tx_skbuff[entry] = NULL;
1277 rp->stats.tx_dropped++;
1280 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1281 rp->tx_skbuff_dma[entry] = 0;
1282 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1283 (rp->tx_buf[entry] -
1286 rp->tx_skbuff_dma[entry] =
1287 pci_map_single(rp->pdev, skb->data, skb->len,
1289 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1292 rp->tx_ring[entry].desc_length =
1293 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1296 spin_lock_irq(&rp->lock);
1298 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1303 /* Non-x86 Todo: explicitly flush cache lines here. */
1305 /* Wake the potentially-idle transmit channel */
1306 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1310 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1311 netif_stop_queue(dev);
1313 dev->trans_start = jiffies;
1315 spin_unlock_irq(&rp->lock);
1318 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
1319 dev->name, rp->cur_tx-1, entry);
1324 /* The interrupt handler does all of the Rx thread work and cleans up
1325 after the Tx thread. */
1326 static irqreturn_t rhine_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1328 struct net_device *dev = dev_instance;
1329 struct rhine_private *rp = netdev_priv(dev);
1330 void __iomem *ioaddr = rp->base;
1332 int boguscnt = max_interrupt_work;
1335 while ((intr_status = get_intr_status(dev))) {
1338 /* Acknowledge all of the current interrupt sources ASAP. */
1339 if (intr_status & IntrTxDescRace)
1340 iowrite8(0x08, ioaddr + IntrStatus2);
1341 iowrite16(intr_status & 0xffff, ioaddr + IntrStatus);
1345 printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n",
1346 dev->name, intr_status);
1348 if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
1349 IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
1352 if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
1353 if (intr_status & IntrTxErrSummary) {
1354 /* Avoid scavenging before Tx engine turned off */
1355 RHINE_WAIT_FOR(!(ioread8(ioaddr+ChipCmd) & CmdTxOn));
1357 ioread8(ioaddr+ChipCmd) & CmdTxOn)
1358 printk(KERN_WARNING "%s: "
1359 "rhine_interrupt() Tx engine"
1360 "still on.\n", dev->name);
1365 /* Abnormal error summary/uncommon events handlers. */
1366 if (intr_status & (IntrPCIErr | IntrLinkChange |
1367 IntrStatsMax | IntrTxError | IntrTxAborted |
1368 IntrTxUnderrun | IntrTxDescRace))
1369 rhine_error(dev, intr_status);
1371 if (--boguscnt < 0) {
1372 printk(KERN_WARNING "%s: Too much work at interrupt, "
1374 dev->name, intr_status);
1380 printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n",
1381 dev->name, ioread16(ioaddr + IntrStatus));
1382 return IRQ_RETVAL(handled);
1385 /* This routine is logically part of the interrupt handler, but isolated
1387 static void rhine_tx(struct net_device *dev)
1389 struct rhine_private *rp = netdev_priv(dev);
1390 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1392 spin_lock(&rp->lock);
1394 /* find and cleanup dirty tx descriptors */
1395 while (rp->dirty_tx != rp->cur_tx) {
1396 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1398 printk(KERN_DEBUG " Tx scavenge %d status %8.8x.\n",
1400 if (txstatus & DescOwn)
1402 if (txstatus & 0x8000) {
1404 printk(KERN_DEBUG "%s: Transmit error, "
1405 "Tx status %8.8x.\n",
1406 dev->name, txstatus);
1407 rp->stats.tx_errors++;
1408 if (txstatus & 0x0400) rp->stats.tx_carrier_errors++;
1409 if (txstatus & 0x0200) rp->stats.tx_window_errors++;
1410 if (txstatus & 0x0100) rp->stats.tx_aborted_errors++;
1411 if (txstatus & 0x0080) rp->stats.tx_heartbeat_errors++;
1412 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1413 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1414 rp->stats.tx_fifo_errors++;
1415 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1416 break; /* Keep the skb - we try again */
1418 /* Transmitter restarted in 'abnormal' handler. */
1420 if (rp->quirks & rqRhineI)
1421 rp->stats.collisions += (txstatus >> 3) & 0x0F;
1423 rp->stats.collisions += txstatus & 0x0F;
1425 printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n",
1426 (txstatus >> 3) & 0xF,
1428 rp->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1429 rp->stats.tx_packets++;
1431 /* Free the original skb. */
1432 if (rp->tx_skbuff_dma[entry]) {
1433 pci_unmap_single(rp->pdev,
1434 rp->tx_skbuff_dma[entry],
1435 rp->tx_skbuff[entry]->len,
1438 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1439 rp->tx_skbuff[entry] = NULL;
1440 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1442 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1443 netif_wake_queue(dev);
1445 spin_unlock(&rp->lock);
1448 /* This routine is logically part of the interrupt handler, but isolated
1449 for clarity and better register allocation. */
1450 static void rhine_rx(struct net_device *dev)
1452 struct rhine_private *rp = netdev_priv(dev);
1453 int entry = rp->cur_rx % RX_RING_SIZE;
1454 int boguscnt = rp->dirty_rx + RX_RING_SIZE - rp->cur_rx;
1457 printk(KERN_DEBUG "%s: rhine_rx(), entry %d status %8.8x.\n",
1459 le32_to_cpu(rp->rx_head_desc->rx_status));
1462 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1463 while (!(rp->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
1464 struct rx_desc *desc = rp->rx_head_desc;
1465 u32 desc_status = le32_to_cpu(desc->rx_status);
1466 int data_size = desc_status >> 16;
1469 printk(KERN_DEBUG " rhine_rx() status is %8.8x.\n",
1473 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1474 if ((desc_status & RxWholePkt) != RxWholePkt) {
1475 printk(KERN_WARNING "%s: Oversized Ethernet "
1476 "frame spanned multiple buffers, entry "
1477 "%#x length %d status %8.8x!\n",
1478 dev->name, entry, data_size,
1480 printk(KERN_WARNING "%s: Oversized Ethernet "
1481 "frame %p vs %p.\n", dev->name,
1482 rp->rx_head_desc, &rp->rx_ring[entry]);
1483 rp->stats.rx_length_errors++;
1484 } else if (desc_status & RxErr) {
1485 /* There was a error. */
1487 printk(KERN_DEBUG " rhine_rx() Rx "
1488 "error was %8.8x.\n",
1490 rp->stats.rx_errors++;
1491 if (desc_status & 0x0030) rp->stats.rx_length_errors++;
1492 if (desc_status & 0x0048) rp->stats.rx_fifo_errors++;
1493 if (desc_status & 0x0004) rp->stats.rx_frame_errors++;
1494 if (desc_status & 0x0002) {
1495 /* this can also be updated outside the interrupt handler */
1496 spin_lock(&rp->lock);
1497 rp->stats.rx_crc_errors++;
1498 spin_unlock(&rp->lock);
1502 struct sk_buff *skb;
1503 /* Length should omit the CRC */
1504 int pkt_len = data_size - 4;
1506 /* Check if the packet is long enough to accept without
1507 copying to a minimally-sized skbuff. */
1508 if (pkt_len < rx_copybreak &&
1509 (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1511 skb_reserve(skb, 2); /* 16 byte align the IP header */
1512 pci_dma_sync_single_for_cpu(rp->pdev,
1513 rp->rx_skbuff_dma[entry],
1515 PCI_DMA_FROMDEVICE);
1517 eth_copy_and_sum(skb,
1518 rp->rx_skbuff[entry]->tail,
1520 skb_put(skb, pkt_len);
1521 pci_dma_sync_single_for_device(rp->pdev,
1522 rp->rx_skbuff_dma[entry],
1524 PCI_DMA_FROMDEVICE);
1526 skb = rp->rx_skbuff[entry];
1528 printk(KERN_ERR "%s: Inconsistent Rx "
1529 "descriptor chain.\n",
1533 rp->rx_skbuff[entry] = NULL;
1534 skb_put(skb, pkt_len);
1535 pci_unmap_single(rp->pdev,
1536 rp->rx_skbuff_dma[entry],
1538 PCI_DMA_FROMDEVICE);
1540 skb->protocol = eth_type_trans(skb, dev);
1542 dev->last_rx = jiffies;
1543 rp->stats.rx_bytes += pkt_len;
1544 rp->stats.rx_packets++;
1546 entry = (++rp->cur_rx) % RX_RING_SIZE;
1547 rp->rx_head_desc = &rp->rx_ring[entry];
1550 /* Refill the Rx ring buffers. */
1551 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1552 struct sk_buff *skb;
1553 entry = rp->dirty_rx % RX_RING_SIZE;
1554 if (rp->rx_skbuff[entry] == NULL) {
1555 skb = dev_alloc_skb(rp->rx_buf_sz);
1556 rp->rx_skbuff[entry] = skb;
1558 break; /* Better luck next round. */
1559 skb->dev = dev; /* Mark as being used by this device. */
1560 rp->rx_skbuff_dma[entry] =
1561 pci_map_single(rp->pdev, skb->tail,
1563 PCI_DMA_FROMDEVICE);
1564 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1566 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1571 * Clears the "tally counters" for CRC errors and missed frames(?).
1572 * It has been reported that some chips need a write of 0 to clear
1573 * these, for others the counters are set to 1 when written to and
1574 * instead cleared when read. So we clear them both ways ...
1576 static inline void clear_tally_counters(void __iomem *ioaddr)
1578 iowrite32(0, ioaddr + RxMissed);
1579 ioread16(ioaddr + RxCRCErrs);
1580 ioread16(ioaddr + RxMissed);
1583 static void rhine_restart_tx(struct net_device *dev) {
1584 struct rhine_private *rp = netdev_priv(dev);
1585 void __iomem *ioaddr = rp->base;
1586 int entry = rp->dirty_tx % TX_RING_SIZE;
1590 * If new errors occured, we need to sort them out before doing Tx.
1591 * In that case the ISR will be back here RSN anyway.
1593 intr_status = get_intr_status(dev);
1595 if ((intr_status & IntrTxErrSummary) == 0) {
1597 /* We know better than the chip where it should continue. */
1598 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1599 ioaddr + TxRingPtr);
1601 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1603 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1608 /* This should never happen */
1610 printk(KERN_WARNING "%s: rhine_restart_tx() "
1611 "Another error occured %8.8x.\n",
1612 dev->name, intr_status);
1617 static void rhine_error(struct net_device *dev, int intr_status)
1619 struct rhine_private *rp = netdev_priv(dev);
1620 void __iomem *ioaddr = rp->base;
1622 spin_lock(&rp->lock);
1624 if (intr_status & IntrLinkChange)
1625 rhine_check_media(dev, 0);
1626 if (intr_status & IntrStatsMax) {
1627 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1628 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1629 clear_tally_counters(ioaddr);
1631 if (intr_status & IntrTxAborted) {
1633 printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n",
1634 dev->name, intr_status);
1636 if (intr_status & IntrTxUnderrun) {
1637 if (rp->tx_thresh < 0xE0)
1638 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1640 printk(KERN_INFO "%s: Transmitter underrun, Tx "
1641 "threshold now %2.2x.\n",
1642 dev->name, rp->tx_thresh);
1644 if (intr_status & IntrTxDescRace) {
1646 printk(KERN_INFO "%s: Tx descriptor write-back race.\n",
1649 if ((intr_status & IntrTxError) &&
1650 (intr_status & (IntrTxAborted |
1651 IntrTxUnderrun | IntrTxDescRace)) == 0) {
1652 if (rp->tx_thresh < 0xE0) {
1653 iowrite8(rp->tx_thresh += 0x20, ioaddr + TxConfig);
1656 printk(KERN_INFO "%s: Unspecified error. Tx "
1657 "threshold now %2.2x.\n",
1658 dev->name, rp->tx_thresh);
1660 if (intr_status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
1662 rhine_restart_tx(dev);
1664 if (intr_status & ~(IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
1665 IntrTxError | IntrTxAborted | IntrNormalSummary |
1668 printk(KERN_ERR "%s: Something Wicked happened! "
1669 "%8.8x.\n", dev->name, intr_status);
1672 spin_unlock(&rp->lock);
1675 static struct net_device_stats *rhine_get_stats(struct net_device *dev)
1677 struct rhine_private *rp = netdev_priv(dev);
1678 void __iomem *ioaddr = rp->base;
1679 unsigned long flags;
1681 spin_lock_irqsave(&rp->lock, flags);
1682 rp->stats.rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
1683 rp->stats.rx_missed_errors += ioread16(ioaddr + RxMissed);
1684 clear_tally_counters(ioaddr);
1685 spin_unlock_irqrestore(&rp->lock, flags);
1690 static void rhine_set_rx_mode(struct net_device *dev)
1692 struct rhine_private *rp = netdev_priv(dev);
1693 void __iomem *ioaddr = rp->base;
1694 u32 mc_filter[2]; /* Multicast hash filter */
1695 u8 rx_mode; /* Note: 0x02=accept runt, 0x01=accept errs */
1697 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1698 /* Unconditionally log net taps. */
1699 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
1702 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1703 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1704 } else if ((dev->mc_count > multicast_filter_limit)
1705 || (dev->flags & IFF_ALLMULTI)) {
1706 /* Too many to match, or accept all multicasts. */
1707 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
1708 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
1711 struct dev_mc_list *mclist;
1713 memset(mc_filter, 0, sizeof(mc_filter));
1714 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1715 i++, mclist = mclist->next) {
1716 int bit_nr = ether_crc(ETH_ALEN, mclist->dmi_addr) >> 26;
1718 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
1720 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
1721 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
1724 iowrite8(rp->rx_thresh | rx_mode, ioaddr + RxConfig);
1727 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
1729 struct rhine_private *rp = netdev_priv(dev);
1731 strcpy(info->driver, DRV_NAME);
1732 strcpy(info->version, DRV_VERSION);
1733 strcpy(info->bus_info, pci_name(rp->pdev));
1736 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1738 struct rhine_private *rp = netdev_priv(dev);
1741 spin_lock_irq(&rp->lock);
1742 rc = mii_ethtool_gset(&rp->mii_if, cmd);
1743 spin_unlock_irq(&rp->lock);
1748 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
1750 struct rhine_private *rp = netdev_priv(dev);
1753 spin_lock_irq(&rp->lock);
1754 rc = mii_ethtool_sset(&rp->mii_if, cmd);
1755 spin_unlock_irq(&rp->lock);
1760 static int netdev_nway_reset(struct net_device *dev)
1762 struct rhine_private *rp = netdev_priv(dev);
1764 return mii_nway_restart(&rp->mii_if);
1767 static u32 netdev_get_link(struct net_device *dev)
1769 struct rhine_private *rp = netdev_priv(dev);
1771 return mii_link_ok(&rp->mii_if);
1774 static u32 netdev_get_msglevel(struct net_device *dev)
1779 static void netdev_set_msglevel(struct net_device *dev, u32 value)
1784 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1786 struct rhine_private *rp = netdev_priv(dev);
1788 if (!(rp->quirks & rqWOL))
1791 spin_lock_irq(&rp->lock);
1792 wol->supported = WAKE_PHY | WAKE_MAGIC |
1793 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1794 wol->wolopts = rp->wolopts;
1795 spin_unlock_irq(&rp->lock);
1798 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
1800 struct rhine_private *rp = netdev_priv(dev);
1801 u32 support = WAKE_PHY | WAKE_MAGIC |
1802 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
1804 if (!(rp->quirks & rqWOL))
1807 if (wol->wolopts & ~support)
1810 spin_lock_irq(&rp->lock);
1811 rp->wolopts = wol->wolopts;
1812 spin_unlock_irq(&rp->lock);
1817 static struct ethtool_ops netdev_ethtool_ops = {
1818 .get_drvinfo = netdev_get_drvinfo,
1819 .get_settings = netdev_get_settings,
1820 .set_settings = netdev_set_settings,
1821 .nway_reset = netdev_nway_reset,
1822 .get_link = netdev_get_link,
1823 .get_msglevel = netdev_get_msglevel,
1824 .set_msglevel = netdev_set_msglevel,
1825 .get_wol = rhine_get_wol,
1826 .set_wol = rhine_set_wol,
1827 .get_sg = ethtool_op_get_sg,
1828 .get_tx_csum = ethtool_op_get_tx_csum,
1831 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1833 struct rhine_private *rp = netdev_priv(dev);
1836 if (!netif_running(dev))
1839 spin_lock_irq(&rp->lock);
1840 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
1841 spin_unlock_irq(&rp->lock);
1846 static int rhine_close(struct net_device *dev)
1848 struct rhine_private *rp = netdev_priv(dev);
1849 void __iomem *ioaddr = rp->base;
1851 spin_lock_irq(&rp->lock);
1853 netif_stop_queue(dev);
1856 printk(KERN_DEBUG "%s: Shutting down ethercard, "
1857 "status was %4.4x.\n",
1858 dev->name, ioread16(ioaddr + ChipCmd));
1860 /* Switch to loopback mode to avoid hardware races. */
1861 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
1863 /* Disable interrupts by clearing the interrupt mask. */
1864 iowrite16(0x0000, ioaddr + IntrEnable);
1866 /* Stop the chip's Tx and Rx processes. */
1867 iowrite16(CmdStop, ioaddr + ChipCmd);
1869 spin_unlock_irq(&rp->lock);
1871 free_irq(rp->pdev->irq, dev);
1880 static void __devexit rhine_remove_one(struct pci_dev *pdev)
1882 struct net_device *dev = pci_get_drvdata(pdev);
1883 struct rhine_private *rp = netdev_priv(dev);
1885 unregister_netdev(dev);
1887 pci_iounmap(pdev, rp->base);
1888 pci_release_regions(pdev);
1891 pci_disable_device(pdev);
1892 pci_set_drvdata(pdev, NULL);
1895 static void rhine_shutdown (struct device *gendev)
1897 struct pci_dev *pdev = to_pci_dev(gendev);
1898 struct net_device *dev = pci_get_drvdata(pdev);
1899 struct rhine_private *rp = netdev_priv(dev);
1900 void __iomem *ioaddr = rp->base;
1902 rhine_power_init(dev);
1904 /* Make sure we use pattern 0, 1 and not 4, 5 */
1905 if (rp->quirks & rq6patterns)
1906 iowrite8(0x04, ioaddr + 0xA7);
1908 if (rp->wolopts & WAKE_MAGIC) {
1909 iowrite8(WOLmagic, ioaddr + WOLcrSet);
1911 * Turn EEPROM-controlled wake-up back on -- some hardware may
1912 * not cooperate otherwise.
1914 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
1917 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
1918 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
1920 if (rp->wolopts & WAKE_PHY)
1921 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
1923 if (rp->wolopts & WAKE_UCAST)
1924 iowrite8(WOLucast, ioaddr + WOLcrSet);
1927 /* Enable legacy WOL (for old motherboards) */
1928 iowrite8(0x01, ioaddr + PwcfgSet);
1929 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
1932 /* Hit power state D3 (sleep) */
1933 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
1935 /* TODO: Check use of pci_enable_wake() */
1940 static int rhine_suspend(struct pci_dev *pdev, u32 state)
1942 struct net_device *dev = pci_get_drvdata(pdev);
1943 struct rhine_private *rp = netdev_priv(dev);
1944 unsigned long flags;
1946 if (!netif_running(dev))
1949 netif_device_detach(dev);
1950 pci_save_state(pdev);
1952 spin_lock_irqsave(&rp->lock, flags);
1953 rhine_shutdown(&pdev->dev);
1954 spin_unlock_irqrestore(&rp->lock, flags);
1956 free_irq(dev->irq, dev);
1960 static int rhine_resume(struct pci_dev *pdev)
1962 struct net_device *dev = pci_get_drvdata(pdev);
1963 struct rhine_private *rp = netdev_priv(dev);
1964 unsigned long flags;
1967 if (!netif_running(dev))
1970 if (request_irq(dev->irq, rhine_interrupt, SA_SHIRQ, dev->name, dev))
1971 printk(KERN_ERR "via-rhine %s: request_irq failed\n", dev->name);
1973 ret = pci_set_power_state(pdev, PCI_D0);
1975 printk(KERN_INFO "%s: Entering power state D0 %s (%d).\n",
1976 dev->name, ret ? "failed" : "succeeded", ret);
1978 pci_restore_state(pdev);
1980 spin_lock_irqsave(&rp->lock, flags);
1982 enable_mmio(rp->pioaddr, rp->quirks);
1984 rhine_power_init(dev);
1989 init_registers(dev);
1990 spin_unlock_irqrestore(&rp->lock, flags);
1992 netif_device_attach(dev);
1996 #endif /* CONFIG_PM */
1998 static struct pci_driver rhine_driver = {
2000 .id_table = rhine_pci_tbl,
2001 .probe = rhine_init_one,
2002 .remove = __devexit_p(rhine_remove_one),
2004 .suspend = rhine_suspend,
2005 .resume = rhine_resume,
2006 #endif /* CONFIG_PM */
2008 .shutdown = rhine_shutdown,
2013 static int __init rhine_init(void)
2015 /* when a module, this is printed whether or not devices are found in probe */
2019 return pci_module_init(&rhine_driver);
2023 static void __exit rhine_cleanup(void)
2025 pci_unregister_driver(&rhine_driver);
2029 module_init(rhine_init);
2030 module_exit(rhine_cleanup);