1 /* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
3 Written 1998-2001 by Donald Becker.
5 Current Maintainer: Roger Luethi <rl@hellgate.ch>
7 This software may be used and distributed according to the terms of
8 the GNU General Public License (GPL), incorporated herein by reference.
9 Drivers based on or derived from this code fall under the GPL and must
10 retain the authorship, copyright and license notice. This file is not
11 a complete program and may only be used when the entire operating
12 system is licensed under the GPL.
14 This driver is designed for the VIA VT86C100A Rhine-I.
15 It also works with the Rhine-II (6102) and Rhine-III (6105/6105L/6105LOM
16 and management NIC 6105M).
18 The author may be reached as becker@scyld.com, or C/O
19 Scyld Computing Corporation
20 410 Severn Ave., Suite 210
24 This driver contains some changes from the original Donald Becker
25 version. He may or may not be interested in bug reports on this
26 code. You can find his versions at:
27 http://www.scyld.com/network/via-rhine.html
28 [link no longer provides useful info -jgarzik]
32 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
34 #define DRV_NAME "via-rhine"
35 #define DRV_VERSION "1.5.0"
36 #define DRV_RELDATE "2010-10-09"
38 #include <linux/types.h>
40 /* A few user-configurable values.
41 These may be modified when a driver module is loaded. */
43 #define RHINE_MSG_DEFAULT \
46 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
47 Setting to > 1518 effectively disables this feature. */
48 #if defined(__alpha__) || defined(__arm__) || defined(__hppa__) || \
49 defined(CONFIG_SPARC) || defined(__ia64__) || \
50 defined(__sh__) || defined(__mips__)
51 static int rx_copybreak = 1518;
53 static int rx_copybreak;
56 /* Work-around for broken BIOSes: they are unable to get the chip back out of
57 power state D3 so PXE booting fails. bootparam(7): via-rhine.avoid_D3=1 */
61 * In case you are looking for 'options[]' or 'full_duplex[]', they
62 * are gone. Use ethtool(8) instead.
65 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
66 The Rhine has a 64 element 8390-like hash table. */
67 static const int multicast_filter_limit = 32;
70 /* Operational parameters that are set at compile time. */
72 /* Keep the ring sizes a power of two for compile efficiency.
73 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
74 Making the Tx ring too large decreases the effectiveness of channel
75 bonding and packet priority.
76 There are no ill effects from too-large receive rings. */
77 #define TX_RING_SIZE 16
78 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
79 #define RX_RING_SIZE 64
81 /* Operational parameters that usually are not changed. */
83 /* Time in jiffies before concluding the transmitter is hung. */
84 #define TX_TIMEOUT (2*HZ)
86 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
88 #include <linux/module.h>
89 #include <linux/moduleparam.h>
90 #include <linux/kernel.h>
91 #include <linux/string.h>
92 #include <linux/timer.h>
93 #include <linux/errno.h>
94 #include <linux/ioport.h>
95 #include <linux/interrupt.h>
96 #include <linux/pci.h>
97 #include <linux/dma-mapping.h>
98 #include <linux/netdevice.h>
99 #include <linux/etherdevice.h>
100 #include <linux/skbuff.h>
101 #include <linux/init.h>
102 #include <linux/delay.h>
103 #include <linux/mii.h>
104 #include <linux/ethtool.h>
105 #include <linux/crc32.h>
106 #include <linux/if_vlan.h>
107 #include <linux/bitops.h>
108 #include <linux/workqueue.h>
109 #include <asm/processor.h> /* Processor type for cache alignment. */
112 #include <asm/uaccess.h>
113 #include <linux/dmi.h>
115 /* These identify the driver base version and may not be removed. */
116 static const char version[] __devinitconst =
117 "v1.10-LK" DRV_VERSION " " DRV_RELDATE " Written by Donald Becker";
119 /* This driver was written to use PCI memory space. Some early versions
120 of the Rhine may only work correctly with I/O space accesses. */
121 #ifdef CONFIG_VIA_RHINE_MMIO
126 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
127 MODULE_DESCRIPTION("VIA Rhine PCI Fast Ethernet driver");
128 MODULE_LICENSE("GPL");
130 module_param(debug, int, 0);
131 module_param(rx_copybreak, int, 0);
132 module_param(avoid_D3, bool, 0);
133 MODULE_PARM_DESC(debug, "VIA Rhine debug message flags");
134 MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
135 MODULE_PARM_DESC(avoid_D3, "Avoid power state D3 (work-around for broken BIOSes)");
143 I. Board Compatibility
145 This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
148 II. Board-specific settings
150 Boards with this chip are functional only in a bus-master PCI slot.
152 Many operational settings are loaded from the EEPROM to the Config word at
153 offset 0x78. For most of these settings, this driver assumes that they are
155 If this driver is compiled to use PCI memory space operations the EEPROM
156 must be configured to enable memory ops.
158 III. Driver operation
162 This driver uses two statically allocated fixed-size descriptor lists
163 formed into rings by a branch from the final descriptor to the beginning of
164 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
166 IIIb/c. Transmit/Receive Structure
168 This driver attempts to use a zero-copy receive and transmit scheme.
170 Alas, all data buffers are required to start on a 32 bit boundary, so
171 the driver must often copy transmit packets into bounce buffers.
173 The driver allocates full frame size skbuffs for the Rx ring buffers at
174 open() time and passes the skb->data field to the chip as receive data
175 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
176 a fresh skbuff is allocated and the frame is copied to the new skbuff.
177 When the incoming frame is larger, the skbuff is passed directly up the
178 protocol stack. Buffers consumed this way are replaced by newly allocated
179 skbuffs in the last phase of rhine_rx().
181 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
182 using a full-sized skbuff for small frames vs. the copying costs of larger
183 frames. New boards are typically used in generously configured machines
184 and the underfilled buffers have negligible impact compared to the benefit of
185 a single allocation size, so the default value of zero results in never
186 copying packets. When copying is done, the cost is usually mitigated by using
187 a combined copy/checksum routine. Copying also preloads the cache, which is
188 most useful with small frames.
190 Since the VIA chips are only able to transfer data to buffers on 32 bit
191 boundaries, the IP header at offset 14 in an ethernet frame isn't
192 longword aligned for further processing. Copying these unaligned buffers
193 has the beneficial effect of 16-byte aligning the IP header.
195 IIId. Synchronization
197 The driver runs as two independent, single-threaded flows of control. One
198 is the send-packet routine, which enforces single-threaded use by the
199 netdev_priv(dev)->lock spinlock. The other thread is the interrupt handler,
200 which is single threaded by the hardware and interrupt handling software.
202 The send packet thread has partial control over the Tx ring. It locks the
203 netdev_priv(dev)->lock whenever it's queuing a Tx packet. If the next slot in
204 the ring is not available it stops the transmit queue by
205 calling netif_stop_queue.
207 The interrupt handler has exclusive control over the Rx ring and records stats
208 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
209 empty by incrementing the dirty_tx mark. If at least half of the entries in
210 the Rx ring are available the transmit queue is woken up if it was stopped.
216 Preliminary VT86C100A manual from http://www.via.com.tw/
217 http://www.scyld.com/expert/100mbps.html
218 http://www.scyld.com/expert/NWay.html
219 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
220 ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
225 The VT86C100A manual is not reliable information.
226 The 3043 chip does not handle unaligned transmit or receive buffers, resulting
227 in significant performance degradation for bounce buffer copies on transmit
228 and unaligned IP headers on receive.
229 The chip does not pad to minimum transmit length.
234 /* This table drives the PCI probe routines. It's mostly boilerplate in all
235 of the drivers, and will likely be provided by some future kernel.
236 Note the matching code -- the first table entry matchs all 56** cards but
237 second only the 1234 card.
244 VT8231 = 0x50, /* Integrated MAC */
245 VT8233 = 0x60, /* Integrated MAC */
246 VT8235 = 0x74, /* Integrated MAC */
247 VT8237 = 0x78, /* Integrated MAC */
254 VT6105M = 0x90, /* Management adapter */
258 rqWOL = 0x0001, /* Wake-On-LAN support */
259 rqForceReset = 0x0002,
260 rq6patterns = 0x0040, /* 6 instead of 4 patterns for WOL */
261 rqStatusWBRace = 0x0080, /* Tx Status Writeback Error possible */
262 rqRhineI = 0x0100, /* See comment below */
265 * rqRhineI: VT86C100A (aka Rhine-I) uses different bits to enable
266 * MMIO as well as for the collision counter and the Tx FIFO underflow
267 * indicator. In addition, Tx and Rx buffers need to 4 byte aligned.
270 /* Beware of PCI posted writes */
271 #define IOSYNC do { ioread8(ioaddr + StationAddr); } while (0)
273 static DEFINE_PCI_DEVICE_TABLE(rhine_pci_tbl) = {
274 { 0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, }, /* VT86C100A */
275 { 0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6102 */
276 { 0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, }, /* 6105{,L,LOM} */
277 { 0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, }, /* VT6105M */
278 { } /* terminate list */
280 MODULE_DEVICE_TABLE(pci, rhine_pci_tbl);
283 /* Offsets to the device registers. */
284 enum register_offsets {
285 StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
286 ChipCmd1=0x09, TQWake=0x0A,
287 IntrStatus=0x0C, IntrEnable=0x0E,
288 MulticastFilter0=0x10, MulticastFilter1=0x14,
289 RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
290 MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E, PCIBusConfig1=0x6F,
291 MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
292 ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
293 RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
294 StickyHW=0x83, IntrStatus2=0x84,
295 CamMask=0x88, CamCon=0x92, CamAddr=0x93,
296 WOLcrSet=0xA0, PwcfgSet=0xA1, WOLcgSet=0xA3, WOLcrClr=0xA4,
297 WOLcrClr1=0xA6, WOLcgClr=0xA7,
298 PwrcsrSet=0xA8, PwrcsrSet1=0xA9, PwrcsrClr=0xAC, PwrcsrClr1=0xAD,
301 /* Bits in ConfigD */
303 BackOptional=0x01, BackModify=0x02,
304 BackCaptureEffect=0x04, BackRandom=0x08
307 /* Bits in the TxConfig (TCR) register */
310 TCR_LB0=0x02, /* loopback[0] */
311 TCR_LB1=0x04, /* loopback[1] */
319 /* Bits in the CamCon (CAMC) register */
327 /* Bits in the PCIBusConfig1 (BCR1) register */
335 BCR1_TXQNOBK=0x40, /* for VT6105 */
336 BCR1_VIDFR=0x80, /* for VT6105 */
337 BCR1_MED0=0x40, /* for VT6102 */
338 BCR1_MED1=0x80, /* for VT6102 */
342 /* Registers we check that mmio and reg are the same. */
343 static const int mmio_verify_registers[] = {
344 RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
349 /* Bits in the interrupt status/mask registers. */
350 enum intr_status_bits {
354 IntrTxError = 0x0008,
355 IntrRxEmpty = 0x0020,
357 IntrStatsMax = 0x0080,
358 IntrRxEarly = 0x0100,
359 IntrTxUnderrun = 0x0210,
360 IntrRxOverflow = 0x0400,
361 IntrRxDropped = 0x0800,
362 IntrRxNoBuf = 0x1000,
363 IntrTxAborted = 0x2000,
364 IntrLinkChange = 0x4000,
365 IntrRxWakeUp = 0x8000,
366 IntrTxDescRace = 0x080000, /* mapped from IntrStatus2 */
367 IntrNormalSummary = IntrRxDone | IntrTxDone,
368 IntrTxErrSummary = IntrTxDescRace | IntrTxAborted | IntrTxError |
372 /* Bits in WOLcrSet/WOLcrClr and PwrcsrSet/PwrcsrClr */
381 /* The Rx and Tx buffer descriptors. */
384 __le32 desc_length; /* Chain flag, Buffer/frame length */
390 __le32 desc_length; /* Chain flag, Tx Config, Frame length */
395 /* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
396 #define TXDESC 0x00e08000
398 enum rx_status_bits {
399 RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
402 /* Bits in *_desc.*_status */
403 enum desc_status_bits {
407 /* Bits in *_desc.*_length */
408 enum desc_length_bits {
412 /* Bits in ChipCmd. */
414 CmdInit=0x01, CmdStart=0x02, CmdStop=0x04, CmdRxOn=0x08,
415 CmdTxOn=0x10, Cmd1TxDemand=0x20, CmdRxDemand=0x40,
416 Cmd1EarlyRx=0x01, Cmd1EarlyTx=0x02, Cmd1FDuplex=0x04,
417 Cmd1NoTxPoll=0x08, Cmd1Reset=0x80,
420 struct rhine_private {
421 /* Bit mask for configured VLAN ids */
422 unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
424 /* Descriptor rings */
425 struct rx_desc *rx_ring;
426 struct tx_desc *tx_ring;
427 dma_addr_t rx_ring_dma;
428 dma_addr_t tx_ring_dma;
430 /* The addresses of receive-in-place skbuffs. */
431 struct sk_buff *rx_skbuff[RX_RING_SIZE];
432 dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
434 /* The saved address of a sent-in-place packet/buffer, for later free(). */
435 struct sk_buff *tx_skbuff[TX_RING_SIZE];
436 dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
438 /* Tx bounce buffers (Rhine-I only) */
439 unsigned char *tx_buf[TX_RING_SIZE];
440 unsigned char *tx_bufs;
441 dma_addr_t tx_bufs_dma;
443 struct pci_dev *pdev;
445 struct net_device *dev;
446 struct napi_struct napi;
448 struct mutex task_lock;
450 struct work_struct slow_event_task;
451 struct work_struct reset_task;
455 /* Frequently used values: keep some adjacent for cache effect. */
457 struct rx_desc *rx_head_desc;
458 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
459 unsigned int cur_tx, dirty_tx;
460 unsigned int rx_buf_sz; /* Based on MTU+slack. */
463 u8 tx_thresh, rx_thresh;
465 struct mii_if_info mii_if;
469 #define BYTE_REG_BITS_ON(x, p) do { iowrite8((ioread8((p))|(x)), (p)); } while (0)
470 #define WORD_REG_BITS_ON(x, p) do { iowrite16((ioread16((p))|(x)), (p)); } while (0)
471 #define DWORD_REG_BITS_ON(x, p) do { iowrite32((ioread32((p))|(x)), (p)); } while (0)
473 #define BYTE_REG_BITS_IS_ON(x, p) (ioread8((p)) & (x))
474 #define WORD_REG_BITS_IS_ON(x, p) (ioread16((p)) & (x))
475 #define DWORD_REG_BITS_IS_ON(x, p) (ioread32((p)) & (x))
477 #define BYTE_REG_BITS_OFF(x, p) do { iowrite8(ioread8((p)) & (~(x)), (p)); } while (0)
478 #define WORD_REG_BITS_OFF(x, p) do { iowrite16(ioread16((p)) & (~(x)), (p)); } while (0)
479 #define DWORD_REG_BITS_OFF(x, p) do { iowrite32(ioread32((p)) & (~(x)), (p)); } while (0)
481 #define BYTE_REG_BITS_SET(x, m, p) do { iowrite8((ioread8((p)) & (~(m)))|(x), (p)); } while (0)
482 #define WORD_REG_BITS_SET(x, m, p) do { iowrite16((ioread16((p)) & (~(m)))|(x), (p)); } while (0)
483 #define DWORD_REG_BITS_SET(x, m, p) do { iowrite32((ioread32((p)) & (~(m)))|(x), (p)); } while (0)
486 static int mdio_read(struct net_device *dev, int phy_id, int location);
487 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
488 static int rhine_open(struct net_device *dev);
489 static void rhine_reset_task(struct work_struct *work);
490 static void rhine_slow_event_task(struct work_struct *work);
491 static void rhine_tx_timeout(struct net_device *dev);
492 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
493 struct net_device *dev);
494 static irqreturn_t rhine_interrupt(int irq, void *dev_instance);
495 static void rhine_tx(struct net_device *dev);
496 static int rhine_rx(struct net_device *dev, int limit);
497 static void rhine_set_rx_mode(struct net_device *dev);
498 static struct net_device_stats *rhine_get_stats(struct net_device *dev);
499 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
500 static const struct ethtool_ops netdev_ethtool_ops;
501 static int rhine_close(struct net_device *dev);
502 static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid);
503 static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid);
504 static void rhine_restart_tx(struct net_device *dev);
506 static void rhine_wait_bit(struct rhine_private *rp, u8 reg, u8 mask, bool high)
508 void __iomem *ioaddr = rp->base;
511 for (i = 0; i < 1024; i++) {
512 if (high ^ !!(ioread8(ioaddr + reg) & mask))
517 netif_dbg(rp, hw, rp->dev, "%s bit wait (%02x/%02x) cycle "
518 "count: %04d\n", high ? "high" : "low", reg, mask, i);
522 static void rhine_wait_bit_high(struct rhine_private *rp, u8 reg, u8 mask)
524 rhine_wait_bit(rp, reg, mask, true);
527 static void rhine_wait_bit_low(struct rhine_private *rp, u8 reg, u8 mask)
529 rhine_wait_bit(rp, reg, mask, false);
532 static u32 rhine_get_events(struct rhine_private *rp)
534 void __iomem *ioaddr = rp->base;
537 intr_status = ioread16(ioaddr + IntrStatus);
538 /* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
539 if (rp->quirks & rqStatusWBRace)
540 intr_status |= ioread8(ioaddr + IntrStatus2) << 16;
544 static void rhine_ack_events(struct rhine_private *rp, u32 mask)
546 void __iomem *ioaddr = rp->base;
548 if (rp->quirks & rqStatusWBRace)
549 iowrite8(mask >> 16, ioaddr + IntrStatus2);
550 iowrite16(mask, ioaddr + IntrStatus);
555 * Get power related registers into sane state.
556 * Notify user about past WOL event.
558 static void rhine_power_init(struct net_device *dev)
560 struct rhine_private *rp = netdev_priv(dev);
561 void __iomem *ioaddr = rp->base;
564 if (rp->quirks & rqWOL) {
565 /* Make sure chip is in power state D0 */
566 iowrite8(ioread8(ioaddr + StickyHW) & 0xFC, ioaddr + StickyHW);
568 /* Disable "force PME-enable" */
569 iowrite8(0x80, ioaddr + WOLcgClr);
571 /* Clear power-event config bits (WOL) */
572 iowrite8(0xFF, ioaddr + WOLcrClr);
573 /* More recent cards can manage two additional patterns */
574 if (rp->quirks & rq6patterns)
575 iowrite8(0x03, ioaddr + WOLcrClr1);
577 /* Save power-event status bits */
578 wolstat = ioread8(ioaddr + PwrcsrSet);
579 if (rp->quirks & rq6patterns)
580 wolstat |= (ioread8(ioaddr + PwrcsrSet1) & 0x03) << 8;
582 /* Clear power-event status bits */
583 iowrite8(0xFF, ioaddr + PwrcsrClr);
584 if (rp->quirks & rq6patterns)
585 iowrite8(0x03, ioaddr + PwrcsrClr1);
591 reason = "Magic packet";
594 reason = "Link went up";
597 reason = "Link went down";
600 reason = "Unicast packet";
603 reason = "Multicast/broadcast packet";
608 netdev_info(dev, "Woke system up. Reason: %s\n",
614 static void rhine_chip_reset(struct net_device *dev)
616 struct rhine_private *rp = netdev_priv(dev);
617 void __iomem *ioaddr = rp->base;
620 iowrite8(Cmd1Reset, ioaddr + ChipCmd1);
623 if (ioread8(ioaddr + ChipCmd1) & Cmd1Reset) {
624 netdev_info(dev, "Reset not complete yet. Trying harder.\n");
627 if (rp->quirks & rqForceReset)
628 iowrite8(0x40, ioaddr + MiscCmd);
630 /* Reset can take somewhat longer (rare) */
631 rhine_wait_bit_low(rp, ChipCmd1, Cmd1Reset);
634 cmd1 = ioread8(ioaddr + ChipCmd1);
635 netif_info(rp, hw, dev, "Reset %s\n", (cmd1 & Cmd1Reset) ?
636 "failed" : "succeeded");
640 static void enable_mmio(long pioaddr, u32 quirks)
643 if (quirks & rqRhineI) {
644 /* More recent docs say that this bit is reserved ... */
645 n = inb(pioaddr + ConfigA) | 0x20;
646 outb(n, pioaddr + ConfigA);
648 n = inb(pioaddr + ConfigD) | 0x80;
649 outb(n, pioaddr + ConfigD);
655 * Loads bytes 0x00-0x05, 0x6E-0x6F, 0x78-0x7B from EEPROM
656 * (plus 0x6C for Rhine-I/II)
658 static void __devinit rhine_reload_eeprom(long pioaddr, struct net_device *dev)
660 struct rhine_private *rp = netdev_priv(dev);
661 void __iomem *ioaddr = rp->base;
664 outb(0x20, pioaddr + MACRegEEcsr);
665 for (i = 0; i < 1024; i++) {
666 if (!(inb(pioaddr + MACRegEEcsr) & 0x20))
670 pr_info("%4d cycles used @ %s:%d\n", i, __func__, __LINE__);
674 * Reloading from EEPROM overwrites ConfigA-D, so we must re-enable
675 * MMIO. If reloading EEPROM was done first this could be avoided, but
676 * it is not known if that still works with the "win98-reboot" problem.
678 enable_mmio(pioaddr, rp->quirks);
681 /* Turn off EEPROM-controlled wake-up (magic packet) */
682 if (rp->quirks & rqWOL)
683 iowrite8(ioread8(ioaddr + ConfigA) & 0xFC, ioaddr + ConfigA);
687 #ifdef CONFIG_NET_POLL_CONTROLLER
688 static void rhine_poll(struct net_device *dev)
690 disable_irq(dev->irq);
691 rhine_interrupt(dev->irq, (void *)dev);
692 enable_irq(dev->irq);
696 static void rhine_kick_tx_threshold(struct rhine_private *rp)
698 if (rp->tx_thresh < 0xe0) {
699 void __iomem *ioaddr = rp->base;
701 rp->tx_thresh += 0x20;
702 BYTE_REG_BITS_SET(rp->tx_thresh, 0x80, ioaddr + TxConfig);
706 static void rhine_tx_err(struct rhine_private *rp, u32 status)
708 struct net_device *dev = rp->dev;
710 if (status & IntrTxAborted) {
711 netif_info(rp, tx_err, dev,
712 "Abort %08x, frame dropped\n", status);
715 if (status & IntrTxUnderrun) {
716 rhine_kick_tx_threshold(rp);
717 netif_info(rp, tx_err ,dev, "Transmitter underrun, "
718 "Tx threshold now %02x\n", rp->tx_thresh);
721 if (status & IntrTxDescRace)
722 netif_info(rp, tx_err, dev, "Tx descriptor write-back race\n");
724 if ((status & IntrTxError) &&
725 (status & (IntrTxAborted | IntrTxUnderrun | IntrTxDescRace)) == 0) {
726 rhine_kick_tx_threshold(rp);
727 netif_info(rp, tx_err, dev, "Unspecified error. "
728 "Tx threshold now %02x\n", rp->tx_thresh);
731 rhine_restart_tx(dev);
734 static void rhine_update_rx_crc_and_missed_errord(struct rhine_private *rp)
736 void __iomem *ioaddr = rp->base;
737 struct net_device_stats *stats = &rp->dev->stats;
739 stats->rx_crc_errors += ioread16(ioaddr + RxCRCErrs);
740 stats->rx_missed_errors += ioread16(ioaddr + RxMissed);
743 * Clears the "tally counters" for CRC errors and missed frames(?).
744 * It has been reported that some chips need a write of 0 to clear
745 * these, for others the counters are set to 1 when written to and
746 * instead cleared when read. So we clear them both ways ...
748 iowrite32(0, ioaddr + RxMissed);
749 ioread16(ioaddr + RxCRCErrs);
750 ioread16(ioaddr + RxMissed);
753 #define RHINE_EVENT_NAPI_RX (IntrRxDone | \
761 #define RHINE_EVENT_NAPI_TX_ERR (IntrTxError | \
765 #define RHINE_EVENT_NAPI_TX (IntrTxDone | RHINE_EVENT_NAPI_TX_ERR)
767 #define RHINE_EVENT_NAPI (RHINE_EVENT_NAPI_RX | \
768 RHINE_EVENT_NAPI_TX | \
770 #define RHINE_EVENT_SLOW (IntrPCIErr | IntrLinkChange)
771 #define RHINE_EVENT (RHINE_EVENT_NAPI | RHINE_EVENT_SLOW)
773 static int rhine_napipoll(struct napi_struct *napi, int budget)
775 struct rhine_private *rp = container_of(napi, struct rhine_private, napi);
776 struct net_device *dev = rp->dev;
777 void __iomem *ioaddr = rp->base;
778 u16 enable_mask = RHINE_EVENT & 0xffff;
782 status = rhine_get_events(rp);
783 rhine_ack_events(rp, status & ~RHINE_EVENT_SLOW);
785 if (status & RHINE_EVENT_NAPI_RX)
786 work_done += rhine_rx(dev, budget);
788 if (status & RHINE_EVENT_NAPI_TX) {
789 if (status & RHINE_EVENT_NAPI_TX_ERR) {
790 /* Avoid scavenging before Tx engine turned off */
791 rhine_wait_bit_low(rp, ChipCmd, CmdTxOn);
792 if (ioread8(ioaddr + ChipCmd) & CmdTxOn)
793 netif_warn(rp, tx_err, dev, "Tx still on\n");
798 if (status & RHINE_EVENT_NAPI_TX_ERR)
799 rhine_tx_err(rp, status);
802 if (status & IntrStatsMax) {
803 spin_lock(&rp->lock);
804 rhine_update_rx_crc_and_missed_errord(rp);
805 spin_unlock(&rp->lock);
808 if (status & RHINE_EVENT_SLOW) {
809 enable_mask &= ~RHINE_EVENT_SLOW;
810 schedule_work(&rp->slow_event_task);
813 if (work_done < budget) {
815 iowrite16(enable_mask, ioaddr + IntrEnable);
821 static void __devinit rhine_hw_init(struct net_device *dev, long pioaddr)
823 struct rhine_private *rp = netdev_priv(dev);
825 /* Reset the chip to erase previous misconfiguration. */
826 rhine_chip_reset(dev);
828 /* Rhine-I needs extra time to recuperate before EEPROM reload */
829 if (rp->quirks & rqRhineI)
832 /* Reload EEPROM controlled bytes cleared by soft reset */
833 rhine_reload_eeprom(pioaddr, dev);
836 static const struct net_device_ops rhine_netdev_ops = {
837 .ndo_open = rhine_open,
838 .ndo_stop = rhine_close,
839 .ndo_start_xmit = rhine_start_tx,
840 .ndo_get_stats = rhine_get_stats,
841 .ndo_set_rx_mode = rhine_set_rx_mode,
842 .ndo_change_mtu = eth_change_mtu,
843 .ndo_validate_addr = eth_validate_addr,
844 .ndo_set_mac_address = eth_mac_addr,
845 .ndo_do_ioctl = netdev_ioctl,
846 .ndo_tx_timeout = rhine_tx_timeout,
847 .ndo_vlan_rx_add_vid = rhine_vlan_rx_add_vid,
848 .ndo_vlan_rx_kill_vid = rhine_vlan_rx_kill_vid,
849 #ifdef CONFIG_NET_POLL_CONTROLLER
850 .ndo_poll_controller = rhine_poll,
854 static int __devinit rhine_init_one(struct pci_dev *pdev,
855 const struct pci_device_id *ent)
857 struct net_device *dev;
858 struct rhine_private *rp;
863 void __iomem *ioaddr;
872 /* when built into the kernel, we only print version if device is found */
874 pr_info_once("%s\n", version);
881 if (pdev->revision < VTunknown0) {
885 else if (pdev->revision >= VT6102) {
886 quirks = rqWOL | rqForceReset;
887 if (pdev->revision < VT6105) {
889 quirks |= rqStatusWBRace; /* Rhine-II exclusive */
892 phy_id = 1; /* Integrated PHY, phy_id fixed to 1 */
893 if (pdev->revision >= VT6105_B0)
894 quirks |= rq6patterns;
895 if (pdev->revision < VT6105M)
898 name = "Rhine III (Management Adapter)";
902 rc = pci_enable_device(pdev);
906 /* this should always be supported */
907 rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
910 "32-bit PCI DMA addresses not supported by the card!?\n");
915 if ((pci_resource_len(pdev, 0) < io_size) ||
916 (pci_resource_len(pdev, 1) < io_size)) {
918 dev_err(&pdev->dev, "Insufficient PCI resources, aborting\n");
922 pioaddr = pci_resource_start(pdev, 0);
923 memaddr = pci_resource_start(pdev, 1);
925 pci_set_master(pdev);
927 dev = alloc_etherdev(sizeof(struct rhine_private));
932 SET_NETDEV_DEV(dev, &pdev->dev);
934 rp = netdev_priv(dev);
937 rp->pioaddr = pioaddr;
939 rp->msg_enable = netif_msg_init(debug, RHINE_MSG_DEFAULT);
941 rc = pci_request_regions(pdev, DRV_NAME);
943 goto err_out_free_netdev;
945 ioaddr = pci_iomap(pdev, bar, io_size);
949 "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
950 pci_name(pdev), io_size, memaddr);
951 goto err_out_free_res;
955 enable_mmio(pioaddr, quirks);
957 /* Check that selected MMIO registers match the PIO ones */
959 while (mmio_verify_registers[i]) {
960 int reg = mmio_verify_registers[i++];
961 unsigned char a = inb(pioaddr+reg);
962 unsigned char b = readb(ioaddr+reg);
966 "MMIO do not match PIO [%02x] (%02x != %02x)\n",
971 #endif /* USE_MMIO */
973 dev->base_addr = (unsigned long)ioaddr;
976 /* Get chip registers into a sane state */
977 rhine_power_init(dev);
978 rhine_hw_init(dev, pioaddr);
980 for (i = 0; i < 6; i++)
981 dev->dev_addr[i] = ioread8(ioaddr + StationAddr + i);
983 if (!is_valid_ether_addr(dev->dev_addr)) {
984 /* Report it and use a random ethernet address instead */
985 netdev_err(dev, "Invalid MAC address: %pM\n", dev->dev_addr);
986 eth_hw_addr_random(dev);
987 netdev_info(dev, "Using random MAC address: %pM\n",
990 memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
992 /* For Rhine-I/II, phy_id is loaded from EEPROM */
994 phy_id = ioread8(ioaddr + 0x6C);
996 dev->irq = pdev->irq;
998 spin_lock_init(&rp->lock);
999 mutex_init(&rp->task_lock);
1000 INIT_WORK(&rp->reset_task, rhine_reset_task);
1001 INIT_WORK(&rp->slow_event_task, rhine_slow_event_task);
1003 rp->mii_if.dev = dev;
1004 rp->mii_if.mdio_read = mdio_read;
1005 rp->mii_if.mdio_write = mdio_write;
1006 rp->mii_if.phy_id_mask = 0x1f;
1007 rp->mii_if.reg_num_mask = 0x1f;
1009 /* The chip-specific entries in the device structure. */
1010 dev->netdev_ops = &rhine_netdev_ops;
1011 dev->ethtool_ops = &netdev_ethtool_ops,
1012 dev->watchdog_timeo = TX_TIMEOUT;
1014 netif_napi_add(dev, &rp->napi, rhine_napipoll, 64);
1016 if (rp->quirks & rqRhineI)
1017 dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
1019 if (pdev->revision >= VT6105M)
1020 dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX |
1021 NETIF_F_HW_VLAN_FILTER;
1023 /* dev->name not defined before register_netdev()! */
1024 rc = register_netdev(dev);
1028 netdev_info(dev, "VIA %s at 0x%lx, %pM, IRQ %d\n",
1035 dev->dev_addr, pdev->irq);
1037 pci_set_drvdata(pdev, dev);
1041 int mii_status = mdio_read(dev, phy_id, 1);
1042 mii_cmd = mdio_read(dev, phy_id, MII_BMCR) & ~BMCR_ISOLATE;
1043 mdio_write(dev, phy_id, MII_BMCR, mii_cmd);
1044 if (mii_status != 0xffff && mii_status != 0x0000) {
1045 rp->mii_if.advertising = mdio_read(dev, phy_id, 4);
1047 "MII PHY found at address %d, status 0x%04x advertising %04x Link %04x\n",
1049 mii_status, rp->mii_if.advertising,
1050 mdio_read(dev, phy_id, 5));
1052 /* set IFF_RUNNING */
1053 if (mii_status & BMSR_LSTATUS)
1054 netif_carrier_on(dev);
1056 netif_carrier_off(dev);
1060 rp->mii_if.phy_id = phy_id;
1062 netif_info(rp, probe, dev, "No D3 power state at shutdown\n");
1067 pci_iounmap(pdev, ioaddr);
1069 pci_release_regions(pdev);
1070 err_out_free_netdev:
1076 static int alloc_ring(struct net_device* dev)
1078 struct rhine_private *rp = netdev_priv(dev);
1080 dma_addr_t ring_dma;
1082 ring = pci_alloc_consistent(rp->pdev,
1083 RX_RING_SIZE * sizeof(struct rx_desc) +
1084 TX_RING_SIZE * sizeof(struct tx_desc),
1087 netdev_err(dev, "Could not allocate DMA memory\n");
1090 if (rp->quirks & rqRhineI) {
1091 rp->tx_bufs = pci_alloc_consistent(rp->pdev,
1092 PKT_BUF_SZ * TX_RING_SIZE,
1094 if (rp->tx_bufs == NULL) {
1095 pci_free_consistent(rp->pdev,
1096 RX_RING_SIZE * sizeof(struct rx_desc) +
1097 TX_RING_SIZE * sizeof(struct tx_desc),
1104 rp->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
1105 rp->rx_ring_dma = ring_dma;
1106 rp->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
1111 static void free_ring(struct net_device* dev)
1113 struct rhine_private *rp = netdev_priv(dev);
1115 pci_free_consistent(rp->pdev,
1116 RX_RING_SIZE * sizeof(struct rx_desc) +
1117 TX_RING_SIZE * sizeof(struct tx_desc),
1118 rp->rx_ring, rp->rx_ring_dma);
1122 pci_free_consistent(rp->pdev, PKT_BUF_SZ * TX_RING_SIZE,
1123 rp->tx_bufs, rp->tx_bufs_dma);
1129 static void alloc_rbufs(struct net_device *dev)
1131 struct rhine_private *rp = netdev_priv(dev);
1135 rp->dirty_rx = rp->cur_rx = 0;
1137 rp->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
1138 rp->rx_head_desc = &rp->rx_ring[0];
1139 next = rp->rx_ring_dma;
1141 /* Init the ring entries */
1142 for (i = 0; i < RX_RING_SIZE; i++) {
1143 rp->rx_ring[i].rx_status = 0;
1144 rp->rx_ring[i].desc_length = cpu_to_le32(rp->rx_buf_sz);
1145 next += sizeof(struct rx_desc);
1146 rp->rx_ring[i].next_desc = cpu_to_le32(next);
1147 rp->rx_skbuff[i] = NULL;
1149 /* Mark the last entry as wrapping the ring. */
1150 rp->rx_ring[i-1].next_desc = cpu_to_le32(rp->rx_ring_dma);
1152 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
1153 for (i = 0; i < RX_RING_SIZE; i++) {
1154 struct sk_buff *skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1155 rp->rx_skbuff[i] = skb;
1159 rp->rx_skbuff_dma[i] =
1160 pci_map_single(rp->pdev, skb->data, rp->rx_buf_sz,
1161 PCI_DMA_FROMDEVICE);
1163 rp->rx_ring[i].addr = cpu_to_le32(rp->rx_skbuff_dma[i]);
1164 rp->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
1166 rp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
1169 static void free_rbufs(struct net_device* dev)
1171 struct rhine_private *rp = netdev_priv(dev);
1174 /* Free all the skbuffs in the Rx queue. */
1175 for (i = 0; i < RX_RING_SIZE; i++) {
1176 rp->rx_ring[i].rx_status = 0;
1177 rp->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1178 if (rp->rx_skbuff[i]) {
1179 pci_unmap_single(rp->pdev,
1180 rp->rx_skbuff_dma[i],
1181 rp->rx_buf_sz, PCI_DMA_FROMDEVICE);
1182 dev_kfree_skb(rp->rx_skbuff[i]);
1184 rp->rx_skbuff[i] = NULL;
1188 static void alloc_tbufs(struct net_device* dev)
1190 struct rhine_private *rp = netdev_priv(dev);
1194 rp->dirty_tx = rp->cur_tx = 0;
1195 next = rp->tx_ring_dma;
1196 for (i = 0; i < TX_RING_SIZE; i++) {
1197 rp->tx_skbuff[i] = NULL;
1198 rp->tx_ring[i].tx_status = 0;
1199 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1200 next += sizeof(struct tx_desc);
1201 rp->tx_ring[i].next_desc = cpu_to_le32(next);
1202 if (rp->quirks & rqRhineI)
1203 rp->tx_buf[i] = &rp->tx_bufs[i * PKT_BUF_SZ];
1205 rp->tx_ring[i-1].next_desc = cpu_to_le32(rp->tx_ring_dma);
1209 static void free_tbufs(struct net_device* dev)
1211 struct rhine_private *rp = netdev_priv(dev);
1214 for (i = 0; i < TX_RING_SIZE; i++) {
1215 rp->tx_ring[i].tx_status = 0;
1216 rp->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
1217 rp->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
1218 if (rp->tx_skbuff[i]) {
1219 if (rp->tx_skbuff_dma[i]) {
1220 pci_unmap_single(rp->pdev,
1221 rp->tx_skbuff_dma[i],
1222 rp->tx_skbuff[i]->len,
1225 dev_kfree_skb(rp->tx_skbuff[i]);
1227 rp->tx_skbuff[i] = NULL;
1228 rp->tx_buf[i] = NULL;
1232 static void rhine_check_media(struct net_device *dev, unsigned int init_media)
1234 struct rhine_private *rp = netdev_priv(dev);
1235 void __iomem *ioaddr = rp->base;
1237 mii_check_media(&rp->mii_if, netif_msg_link(rp), init_media);
1239 if (rp->mii_if.full_duplex)
1240 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1FDuplex,
1243 iowrite8(ioread8(ioaddr + ChipCmd1) & ~Cmd1FDuplex,
1246 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1247 rp->mii_if.force_media, netif_carrier_ok(dev));
1250 /* Called after status of force_media possibly changed */
1251 static void rhine_set_carrier(struct mii_if_info *mii)
1253 struct net_device *dev = mii->dev;
1254 struct rhine_private *rp = netdev_priv(dev);
1256 if (mii->force_media) {
1257 /* autoneg is off: Link is always assumed to be up */
1258 if (!netif_carrier_ok(dev))
1259 netif_carrier_on(dev);
1260 } else /* Let MMI library update carrier status */
1261 rhine_check_media(dev, 0);
1263 netif_info(rp, link, dev, "force_media %d, carrier %d\n",
1264 mii->force_media, netif_carrier_ok(dev));
1268 * rhine_set_cam - set CAM multicast filters
1269 * @ioaddr: register block of this Rhine
1270 * @idx: multicast CAM index [0..MCAM_SIZE-1]
1271 * @addr: multicast address (6 bytes)
1273 * Load addresses into multicast filters.
1275 static void rhine_set_cam(void __iomem *ioaddr, int idx, u8 *addr)
1279 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1282 /* Paranoid -- idx out of range should never happen */
1283 idx &= (MCAM_SIZE - 1);
1285 iowrite8((u8) idx, ioaddr + CamAddr);
1287 for (i = 0; i < 6; i++, addr++)
1288 iowrite8(*addr, ioaddr + MulticastFilter0 + i);
1292 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1295 iowrite8(0, ioaddr + CamCon);
1299 * rhine_set_vlan_cam - set CAM VLAN filters
1300 * @ioaddr: register block of this Rhine
1301 * @idx: VLAN CAM index [0..VCAM_SIZE-1]
1302 * @addr: VLAN ID (2 bytes)
1304 * Load addresses into VLAN filters.
1306 static void rhine_set_vlan_cam(void __iomem *ioaddr, int idx, u8 *addr)
1308 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1311 /* Paranoid -- idx out of range should never happen */
1312 idx &= (VCAM_SIZE - 1);
1314 iowrite8((u8) idx, ioaddr + CamAddr);
1316 iowrite16(*((u16 *) addr), ioaddr + MulticastFilter0 + 6);
1320 iowrite8(CAMC_CAMWR | CAMC_CAMEN, ioaddr + CamCon);
1323 iowrite8(0, ioaddr + CamCon);
1327 * rhine_set_cam_mask - set multicast CAM mask
1328 * @ioaddr: register block of this Rhine
1329 * @mask: multicast CAM mask
1331 * Mask sets multicast filters active/inactive.
1333 static void rhine_set_cam_mask(void __iomem *ioaddr, u32 mask)
1335 iowrite8(CAMC_CAMEN, ioaddr + CamCon);
1339 iowrite32(mask, ioaddr + CamMask);
1342 iowrite8(0, ioaddr + CamCon);
1346 * rhine_set_vlan_cam_mask - set VLAN CAM mask
1347 * @ioaddr: register block of this Rhine
1348 * @mask: VLAN CAM mask
1350 * Mask sets VLAN filters active/inactive.
1352 static void rhine_set_vlan_cam_mask(void __iomem *ioaddr, u32 mask)
1354 iowrite8(CAMC_CAMEN | CAMC_VCAMSL, ioaddr + CamCon);
1358 iowrite32(mask, ioaddr + CamMask);
1361 iowrite8(0, ioaddr + CamCon);
1365 * rhine_init_cam_filter - initialize CAM filters
1366 * @dev: network device
1368 * Initialize (disable) hardware VLAN and multicast support on this
1371 static void rhine_init_cam_filter(struct net_device *dev)
1373 struct rhine_private *rp = netdev_priv(dev);
1374 void __iomem *ioaddr = rp->base;
1376 /* Disable all CAMs */
1377 rhine_set_vlan_cam_mask(ioaddr, 0);
1378 rhine_set_cam_mask(ioaddr, 0);
1380 /* disable hardware VLAN support */
1381 BYTE_REG_BITS_ON(TCR_PQEN, ioaddr + TxConfig);
1382 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
1386 * rhine_update_vcam - update VLAN CAM filters
1387 * @rp: rhine_private data of this Rhine
1389 * Update VLAN CAM filters to match configuration change.
1391 static void rhine_update_vcam(struct net_device *dev)
1393 struct rhine_private *rp = netdev_priv(dev);
1394 void __iomem *ioaddr = rp->base;
1396 u32 vCAMmask = 0; /* 32 vCAMs (6105M and better) */
1399 for_each_set_bit(vid, rp->active_vlans, VLAN_N_VID) {
1400 rhine_set_vlan_cam(ioaddr, i, (u8 *)&vid);
1402 if (++i >= VCAM_SIZE)
1405 rhine_set_vlan_cam_mask(ioaddr, vCAMmask);
1408 static int rhine_vlan_rx_add_vid(struct net_device *dev, unsigned short vid)
1410 struct rhine_private *rp = netdev_priv(dev);
1412 spin_lock_bh(&rp->lock);
1413 set_bit(vid, rp->active_vlans);
1414 rhine_update_vcam(dev);
1415 spin_unlock_bh(&rp->lock);
1419 static int rhine_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
1421 struct rhine_private *rp = netdev_priv(dev);
1423 spin_lock_bh(&rp->lock);
1424 clear_bit(vid, rp->active_vlans);
1425 rhine_update_vcam(dev);
1426 spin_unlock_bh(&rp->lock);
1430 static void init_registers(struct net_device *dev)
1432 struct rhine_private *rp = netdev_priv(dev);
1433 void __iomem *ioaddr = rp->base;
1436 for (i = 0; i < 6; i++)
1437 iowrite8(dev->dev_addr[i], ioaddr + StationAddr + i);
1439 /* Initialize other registers. */
1440 iowrite16(0x0006, ioaddr + PCIBusConfig); /* Tune configuration??? */
1441 /* Configure initial FIFO thresholds. */
1442 iowrite8(0x20, ioaddr + TxConfig);
1443 rp->tx_thresh = 0x20;
1444 rp->rx_thresh = 0x60; /* Written in rhine_set_rx_mode(). */
1446 iowrite32(rp->rx_ring_dma, ioaddr + RxRingPtr);
1447 iowrite32(rp->tx_ring_dma, ioaddr + TxRingPtr);
1449 rhine_set_rx_mode(dev);
1451 if (rp->pdev->revision >= VT6105M)
1452 rhine_init_cam_filter(dev);
1454 napi_enable(&rp->napi);
1456 iowrite16(RHINE_EVENT & 0xffff, ioaddr + IntrEnable);
1458 iowrite16(CmdStart | CmdTxOn | CmdRxOn | (Cmd1NoTxPoll << 8),
1460 rhine_check_media(dev, 1);
1463 /* Enable MII link status auto-polling (required for IntrLinkChange) */
1464 static void rhine_enable_linkmon(struct rhine_private *rp)
1466 void __iomem *ioaddr = rp->base;
1468 iowrite8(0, ioaddr + MIICmd);
1469 iowrite8(MII_BMSR, ioaddr + MIIRegAddr);
1470 iowrite8(0x80, ioaddr + MIICmd);
1472 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1474 iowrite8(MII_BMSR | 0x40, ioaddr + MIIRegAddr);
1477 /* Disable MII link status auto-polling (required for MDIO access) */
1478 static void rhine_disable_linkmon(struct rhine_private *rp)
1480 void __iomem *ioaddr = rp->base;
1482 iowrite8(0, ioaddr + MIICmd);
1484 if (rp->quirks & rqRhineI) {
1485 iowrite8(0x01, ioaddr + MIIRegAddr); // MII_BMSR
1487 /* Can be called from ISR. Evil. */
1490 /* 0x80 must be set immediately before turning it off */
1491 iowrite8(0x80, ioaddr + MIICmd);
1493 rhine_wait_bit_high(rp, MIIRegAddr, 0x20);
1495 /* Heh. Now clear 0x80 again. */
1496 iowrite8(0, ioaddr + MIICmd);
1499 rhine_wait_bit_high(rp, MIIRegAddr, 0x80);
1502 /* Read and write over the MII Management Data I/O (MDIO) interface. */
1504 static int mdio_read(struct net_device *dev, int phy_id, int regnum)
1506 struct rhine_private *rp = netdev_priv(dev);
1507 void __iomem *ioaddr = rp->base;
1510 rhine_disable_linkmon(rp);
1512 /* rhine_disable_linkmon already cleared MIICmd */
1513 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1514 iowrite8(regnum, ioaddr + MIIRegAddr);
1515 iowrite8(0x40, ioaddr + MIICmd); /* Trigger read */
1516 rhine_wait_bit_low(rp, MIICmd, 0x40);
1517 result = ioread16(ioaddr + MIIData);
1519 rhine_enable_linkmon(rp);
1523 static void mdio_write(struct net_device *dev, int phy_id, int regnum, int value)
1525 struct rhine_private *rp = netdev_priv(dev);
1526 void __iomem *ioaddr = rp->base;
1528 rhine_disable_linkmon(rp);
1530 /* rhine_disable_linkmon already cleared MIICmd */
1531 iowrite8(phy_id, ioaddr + MIIPhyAddr);
1532 iowrite8(regnum, ioaddr + MIIRegAddr);
1533 iowrite16(value, ioaddr + MIIData);
1534 iowrite8(0x20, ioaddr + MIICmd); /* Trigger write */
1535 rhine_wait_bit_low(rp, MIICmd, 0x20);
1537 rhine_enable_linkmon(rp);
1540 static void rhine_task_disable(struct rhine_private *rp)
1542 mutex_lock(&rp->task_lock);
1543 rp->task_enable = false;
1544 mutex_unlock(&rp->task_lock);
1546 cancel_work_sync(&rp->slow_event_task);
1547 cancel_work_sync(&rp->reset_task);
1550 static void rhine_task_enable(struct rhine_private *rp)
1552 mutex_lock(&rp->task_lock);
1553 rp->task_enable = true;
1554 mutex_unlock(&rp->task_lock);
1557 static int rhine_open(struct net_device *dev)
1559 struct rhine_private *rp = netdev_priv(dev);
1560 void __iomem *ioaddr = rp->base;
1563 rc = request_irq(rp->pdev->irq, rhine_interrupt, IRQF_SHARED, dev->name,
1568 netif_dbg(rp, ifup, dev, "%s() irq %d\n", __func__, rp->pdev->irq);
1570 rc = alloc_ring(dev);
1572 free_irq(rp->pdev->irq, dev);
1577 rhine_chip_reset(dev);
1578 rhine_task_enable(rp);
1579 init_registers(dev);
1581 netif_dbg(rp, ifup, dev, "%s() Done - status %04x MII status: %04x\n",
1582 __func__, ioread16(ioaddr + ChipCmd),
1583 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1585 netif_start_queue(dev);
1590 static void rhine_reset_task(struct work_struct *work)
1592 struct rhine_private *rp = container_of(work, struct rhine_private,
1594 struct net_device *dev = rp->dev;
1596 mutex_lock(&rp->task_lock);
1598 if (!rp->task_enable)
1601 napi_disable(&rp->napi);
1602 spin_lock_bh(&rp->lock);
1604 /* clear all descriptors */
1610 /* Reinitialize the hardware. */
1611 rhine_chip_reset(dev);
1612 init_registers(dev);
1614 spin_unlock_bh(&rp->lock);
1616 dev->trans_start = jiffies; /* prevent tx timeout */
1617 dev->stats.tx_errors++;
1618 netif_wake_queue(dev);
1621 mutex_unlock(&rp->task_lock);
1624 static void rhine_tx_timeout(struct net_device *dev)
1626 struct rhine_private *rp = netdev_priv(dev);
1627 void __iomem *ioaddr = rp->base;
1629 netdev_warn(dev, "Transmit timed out, status %04x, PHY status %04x, resetting...\n",
1630 ioread16(ioaddr + IntrStatus),
1631 mdio_read(dev, rp->mii_if.phy_id, MII_BMSR));
1633 schedule_work(&rp->reset_task);
1636 static netdev_tx_t rhine_start_tx(struct sk_buff *skb,
1637 struct net_device *dev)
1639 struct rhine_private *rp = netdev_priv(dev);
1640 void __iomem *ioaddr = rp->base;
1643 /* Caution: the write order is important here, set the field
1644 with the "ownership" bits last. */
1646 /* Calculate the next Tx descriptor entry. */
1647 entry = rp->cur_tx % TX_RING_SIZE;
1649 if (skb_padto(skb, ETH_ZLEN))
1650 return NETDEV_TX_OK;
1652 rp->tx_skbuff[entry] = skb;
1654 if ((rp->quirks & rqRhineI) &&
1655 (((unsigned long)skb->data & 3) || skb_shinfo(skb)->nr_frags != 0 || skb->ip_summed == CHECKSUM_PARTIAL)) {
1656 /* Must use alignment buffer. */
1657 if (skb->len > PKT_BUF_SZ) {
1658 /* packet too long, drop it */
1660 rp->tx_skbuff[entry] = NULL;
1661 dev->stats.tx_dropped++;
1662 return NETDEV_TX_OK;
1665 /* Padding is not copied and so must be redone. */
1666 skb_copy_and_csum_dev(skb, rp->tx_buf[entry]);
1667 if (skb->len < ETH_ZLEN)
1668 memset(rp->tx_buf[entry] + skb->len, 0,
1669 ETH_ZLEN - skb->len);
1670 rp->tx_skbuff_dma[entry] = 0;
1671 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_bufs_dma +
1672 (rp->tx_buf[entry] -
1675 rp->tx_skbuff_dma[entry] =
1676 pci_map_single(rp->pdev, skb->data, skb->len,
1678 rp->tx_ring[entry].addr = cpu_to_le32(rp->tx_skbuff_dma[entry]);
1681 rp->tx_ring[entry].desc_length =
1682 cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
1684 if (unlikely(vlan_tx_tag_present(skb))) {
1685 rp->tx_ring[entry].tx_status = cpu_to_le32((vlan_tx_tag_get(skb)) << 16);
1686 /* request tagging */
1687 rp->tx_ring[entry].desc_length |= cpu_to_le32(0x020000);
1690 rp->tx_ring[entry].tx_status = 0;
1694 rp->tx_ring[entry].tx_status |= cpu_to_le32(DescOwn);
1699 /* Non-x86 Todo: explicitly flush cache lines here. */
1701 if (vlan_tx_tag_present(skb))
1702 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1703 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1705 /* Wake the potentially-idle transmit channel */
1706 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1710 if (rp->cur_tx == rp->dirty_tx + TX_QUEUE_LEN)
1711 netif_stop_queue(dev);
1713 netif_dbg(rp, tx_queued, dev, "Transmit frame #%d queued in slot %d\n",
1714 rp->cur_tx - 1, entry);
1716 return NETDEV_TX_OK;
1719 static void rhine_irq_disable(struct rhine_private *rp)
1721 iowrite16(0x0000, rp->base + IntrEnable);
1725 /* The interrupt handler does all of the Rx thread work and cleans up
1726 after the Tx thread. */
1727 static irqreturn_t rhine_interrupt(int irq, void *dev_instance)
1729 struct net_device *dev = dev_instance;
1730 struct rhine_private *rp = netdev_priv(dev);
1734 status = rhine_get_events(rp);
1736 netif_dbg(rp, intr, dev, "Interrupt, status %08x\n", status);
1738 if (status & RHINE_EVENT) {
1741 rhine_irq_disable(rp);
1742 napi_schedule(&rp->napi);
1745 if (status & ~(IntrLinkChange | IntrStatsMax | RHINE_EVENT_NAPI)) {
1746 netif_err(rp, intr, dev, "Something Wicked happened! %08x\n",
1750 return IRQ_RETVAL(handled);
1753 /* This routine is logically part of the interrupt handler, but isolated
1755 static void rhine_tx(struct net_device *dev)
1757 struct rhine_private *rp = netdev_priv(dev);
1758 int txstatus = 0, entry = rp->dirty_tx % TX_RING_SIZE;
1760 /* find and cleanup dirty tx descriptors */
1761 while (rp->dirty_tx != rp->cur_tx) {
1762 txstatus = le32_to_cpu(rp->tx_ring[entry].tx_status);
1763 netif_dbg(rp, tx_done, dev, "Tx scavenge %d status %08x\n",
1765 if (txstatus & DescOwn)
1767 if (txstatus & 0x8000) {
1768 netif_dbg(rp, tx_done, dev,
1769 "Transmit error, Tx status %08x\n", txstatus);
1770 dev->stats.tx_errors++;
1771 if (txstatus & 0x0400)
1772 dev->stats.tx_carrier_errors++;
1773 if (txstatus & 0x0200)
1774 dev->stats.tx_window_errors++;
1775 if (txstatus & 0x0100)
1776 dev->stats.tx_aborted_errors++;
1777 if (txstatus & 0x0080)
1778 dev->stats.tx_heartbeat_errors++;
1779 if (((rp->quirks & rqRhineI) && txstatus & 0x0002) ||
1780 (txstatus & 0x0800) || (txstatus & 0x1000)) {
1781 dev->stats.tx_fifo_errors++;
1782 rp->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
1783 break; /* Keep the skb - we try again */
1785 /* Transmitter restarted in 'abnormal' handler. */
1787 if (rp->quirks & rqRhineI)
1788 dev->stats.collisions += (txstatus >> 3) & 0x0F;
1790 dev->stats.collisions += txstatus & 0x0F;
1791 netif_dbg(rp, tx_done, dev, "collisions: %1.1x:%1.1x\n",
1792 (txstatus >> 3) & 0xF, txstatus & 0xF);
1793 dev->stats.tx_bytes += rp->tx_skbuff[entry]->len;
1794 dev->stats.tx_packets++;
1796 /* Free the original skb. */
1797 if (rp->tx_skbuff_dma[entry]) {
1798 pci_unmap_single(rp->pdev,
1799 rp->tx_skbuff_dma[entry],
1800 rp->tx_skbuff[entry]->len,
1803 dev_kfree_skb_irq(rp->tx_skbuff[entry]);
1804 rp->tx_skbuff[entry] = NULL;
1805 entry = (++rp->dirty_tx) % TX_RING_SIZE;
1807 if ((rp->cur_tx - rp->dirty_tx) < TX_QUEUE_LEN - 4)
1808 netif_wake_queue(dev);
1812 * rhine_get_vlan_tci - extract TCI from Rx data buffer
1813 * @skb: pointer to sk_buff
1814 * @data_size: used data area of the buffer including CRC
1816 * If hardware VLAN tag extraction is enabled and the chip indicates a 802.1Q
1817 * packet, the extracted 802.1Q header (2 bytes TPID + 2 bytes TCI) is 4-byte
1818 * aligned following the CRC.
1820 static inline u16 rhine_get_vlan_tci(struct sk_buff *skb, int data_size)
1822 u8 *trailer = (u8 *)skb->data + ((data_size + 3) & ~3) + 2;
1823 return be16_to_cpup((__be16 *)trailer);
1826 /* Process up to limit frames from receive ring */
1827 static int rhine_rx(struct net_device *dev, int limit)
1829 struct rhine_private *rp = netdev_priv(dev);
1831 int entry = rp->cur_rx % RX_RING_SIZE;
1833 netif_dbg(rp, rx_status, dev, "%s(), entry %d status %08x\n", __func__,
1834 entry, le32_to_cpu(rp->rx_head_desc->rx_status));
1836 /* If EOP is set on the next entry, it's a new packet. Send it up. */
1837 for (count = 0; count < limit; ++count) {
1838 struct rx_desc *desc = rp->rx_head_desc;
1839 u32 desc_status = le32_to_cpu(desc->rx_status);
1840 u32 desc_length = le32_to_cpu(desc->desc_length);
1841 int data_size = desc_status >> 16;
1843 if (desc_status & DescOwn)
1846 netif_dbg(rp, rx_status, dev, "%s() status %08x\n", __func__,
1849 if ((desc_status & (RxWholePkt | RxErr)) != RxWholePkt) {
1850 if ((desc_status & RxWholePkt) != RxWholePkt) {
1852 "Oversized Ethernet frame spanned multiple buffers, "
1853 "entry %#x length %d status %08x!\n",
1857 "Oversized Ethernet frame %p vs %p\n",
1859 &rp->rx_ring[entry]);
1860 dev->stats.rx_length_errors++;
1861 } else if (desc_status & RxErr) {
1862 /* There was a error. */
1863 netif_dbg(rp, rx_err, dev,
1864 "%s() Rx error %08x\n", __func__,
1866 dev->stats.rx_errors++;
1867 if (desc_status & 0x0030)
1868 dev->stats.rx_length_errors++;
1869 if (desc_status & 0x0048)
1870 dev->stats.rx_fifo_errors++;
1871 if (desc_status & 0x0004)
1872 dev->stats.rx_frame_errors++;
1873 if (desc_status & 0x0002) {
1874 /* this can also be updated outside the interrupt handler */
1875 spin_lock(&rp->lock);
1876 dev->stats.rx_crc_errors++;
1877 spin_unlock(&rp->lock);
1881 struct sk_buff *skb = NULL;
1882 /* Length should omit the CRC */
1883 int pkt_len = data_size - 4;
1886 /* Check if the packet is long enough to accept without
1887 copying to a minimally-sized skbuff. */
1888 if (pkt_len < rx_copybreak)
1889 skb = netdev_alloc_skb_ip_align(dev, pkt_len);
1891 pci_dma_sync_single_for_cpu(rp->pdev,
1892 rp->rx_skbuff_dma[entry],
1894 PCI_DMA_FROMDEVICE);
1896 skb_copy_to_linear_data(skb,
1897 rp->rx_skbuff[entry]->data,
1899 skb_put(skb, pkt_len);
1900 pci_dma_sync_single_for_device(rp->pdev,
1901 rp->rx_skbuff_dma[entry],
1903 PCI_DMA_FROMDEVICE);
1905 skb = rp->rx_skbuff[entry];
1907 netdev_err(dev, "Inconsistent Rx descriptor chain\n");
1910 rp->rx_skbuff[entry] = NULL;
1911 skb_put(skb, pkt_len);
1912 pci_unmap_single(rp->pdev,
1913 rp->rx_skbuff_dma[entry],
1915 PCI_DMA_FROMDEVICE);
1918 if (unlikely(desc_length & DescTag))
1919 vlan_tci = rhine_get_vlan_tci(skb, data_size);
1921 skb->protocol = eth_type_trans(skb, dev);
1923 if (unlikely(desc_length & DescTag))
1924 __vlan_hwaccel_put_tag(skb, vlan_tci);
1925 netif_receive_skb(skb);
1926 dev->stats.rx_bytes += pkt_len;
1927 dev->stats.rx_packets++;
1929 entry = (++rp->cur_rx) % RX_RING_SIZE;
1930 rp->rx_head_desc = &rp->rx_ring[entry];
1933 /* Refill the Rx ring buffers. */
1934 for (; rp->cur_rx - rp->dirty_rx > 0; rp->dirty_rx++) {
1935 struct sk_buff *skb;
1936 entry = rp->dirty_rx % RX_RING_SIZE;
1937 if (rp->rx_skbuff[entry] == NULL) {
1938 skb = netdev_alloc_skb(dev, rp->rx_buf_sz);
1939 rp->rx_skbuff[entry] = skb;
1941 break; /* Better luck next round. */
1942 rp->rx_skbuff_dma[entry] =
1943 pci_map_single(rp->pdev, skb->data,
1945 PCI_DMA_FROMDEVICE);
1946 rp->rx_ring[entry].addr = cpu_to_le32(rp->rx_skbuff_dma[entry]);
1948 rp->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
1954 static void rhine_restart_tx(struct net_device *dev) {
1955 struct rhine_private *rp = netdev_priv(dev);
1956 void __iomem *ioaddr = rp->base;
1957 int entry = rp->dirty_tx % TX_RING_SIZE;
1961 * If new errors occurred, we need to sort them out before doing Tx.
1962 * In that case the ISR will be back here RSN anyway.
1964 intr_status = rhine_get_events(rp);
1966 if ((intr_status & IntrTxErrSummary) == 0) {
1968 /* We know better than the chip where it should continue. */
1969 iowrite32(rp->tx_ring_dma + entry * sizeof(struct tx_desc),
1970 ioaddr + TxRingPtr);
1972 iowrite8(ioread8(ioaddr + ChipCmd) | CmdTxOn,
1975 if (rp->tx_ring[entry].desc_length & cpu_to_le32(0x020000))
1976 /* Tx queues are bits 7-0 (first Tx queue: bit 7) */
1977 BYTE_REG_BITS_ON(1 << 7, ioaddr + TQWake);
1979 iowrite8(ioread8(ioaddr + ChipCmd1) | Cmd1TxDemand,
1984 /* This should never happen */
1985 netif_warn(rp, tx_err, dev, "another error occurred %08x\n",
1991 static void rhine_slow_event_task(struct work_struct *work)
1993 struct rhine_private *rp =
1994 container_of(work, struct rhine_private, slow_event_task);
1995 struct net_device *dev = rp->dev;
1998 mutex_lock(&rp->task_lock);
2000 if (!rp->task_enable)
2003 intr_status = rhine_get_events(rp);
2004 rhine_ack_events(rp, intr_status & RHINE_EVENT_SLOW);
2006 if (intr_status & IntrLinkChange)
2007 rhine_check_media(dev, 0);
2009 if (intr_status & IntrPCIErr)
2010 netif_warn(rp, hw, dev, "PCI error\n");
2012 napi_disable(&rp->napi);
2013 rhine_irq_disable(rp);
2014 /* Slow and safe. Consider __napi_schedule as a replacement ? */
2015 napi_enable(&rp->napi);
2016 napi_schedule(&rp->napi);
2019 mutex_unlock(&rp->task_lock);
2022 static struct net_device_stats *rhine_get_stats(struct net_device *dev)
2024 struct rhine_private *rp = netdev_priv(dev);
2026 spin_lock_bh(&rp->lock);
2027 rhine_update_rx_crc_and_missed_errord(rp);
2028 spin_unlock_bh(&rp->lock);
2033 static void rhine_set_rx_mode(struct net_device *dev)
2035 struct rhine_private *rp = netdev_priv(dev);
2036 void __iomem *ioaddr = rp->base;
2037 u32 mc_filter[2]; /* Multicast hash filter */
2038 u8 rx_mode = 0x0C; /* Note: 0x02=accept runt, 0x01=accept errs */
2039 struct netdev_hw_addr *ha;
2041 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
2043 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2044 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2045 } else if ((netdev_mc_count(dev) > multicast_filter_limit) ||
2046 (dev->flags & IFF_ALLMULTI)) {
2047 /* Too many to match, or accept all multicasts. */
2048 iowrite32(0xffffffff, ioaddr + MulticastFilter0);
2049 iowrite32(0xffffffff, ioaddr + MulticastFilter1);
2050 } else if (rp->pdev->revision >= VT6105M) {
2052 u32 mCAMmask = 0; /* 32 mCAMs (6105M and better) */
2053 netdev_for_each_mc_addr(ha, dev) {
2056 rhine_set_cam(ioaddr, i, ha->addr);
2060 rhine_set_cam_mask(ioaddr, mCAMmask);
2062 memset(mc_filter, 0, sizeof(mc_filter));
2063 netdev_for_each_mc_addr(ha, dev) {
2064 int bit_nr = ether_crc(ETH_ALEN, ha->addr) >> 26;
2066 mc_filter[bit_nr >> 5] |= 1 << (bit_nr & 31);
2068 iowrite32(mc_filter[0], ioaddr + MulticastFilter0);
2069 iowrite32(mc_filter[1], ioaddr + MulticastFilter1);
2071 /* enable/disable VLAN receive filtering */
2072 if (rp->pdev->revision >= VT6105M) {
2073 if (dev->flags & IFF_PROMISC)
2074 BYTE_REG_BITS_OFF(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2076 BYTE_REG_BITS_ON(BCR1_VIDFR, ioaddr + PCIBusConfig1);
2078 BYTE_REG_BITS_ON(rx_mode, ioaddr + RxConfig);
2081 static void netdev_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
2083 struct rhine_private *rp = netdev_priv(dev);
2085 strlcpy(info->driver, DRV_NAME, sizeof(info->driver));
2086 strlcpy(info->version, DRV_VERSION, sizeof(info->version));
2087 strlcpy(info->bus_info, pci_name(rp->pdev), sizeof(info->bus_info));
2090 static int netdev_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2092 struct rhine_private *rp = netdev_priv(dev);
2095 mutex_lock(&rp->task_lock);
2096 rc = mii_ethtool_gset(&rp->mii_if, cmd);
2097 mutex_unlock(&rp->task_lock);
2102 static int netdev_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
2104 struct rhine_private *rp = netdev_priv(dev);
2107 mutex_lock(&rp->task_lock);
2108 rc = mii_ethtool_sset(&rp->mii_if, cmd);
2109 rhine_set_carrier(&rp->mii_if);
2110 mutex_unlock(&rp->task_lock);
2115 static int netdev_nway_reset(struct net_device *dev)
2117 struct rhine_private *rp = netdev_priv(dev);
2119 return mii_nway_restart(&rp->mii_if);
2122 static u32 netdev_get_link(struct net_device *dev)
2124 struct rhine_private *rp = netdev_priv(dev);
2126 return mii_link_ok(&rp->mii_if);
2129 static u32 netdev_get_msglevel(struct net_device *dev)
2131 struct rhine_private *rp = netdev_priv(dev);
2133 return rp->msg_enable;
2136 static void netdev_set_msglevel(struct net_device *dev, u32 value)
2138 struct rhine_private *rp = netdev_priv(dev);
2140 rp->msg_enable = value;
2143 static void rhine_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2145 struct rhine_private *rp = netdev_priv(dev);
2147 if (!(rp->quirks & rqWOL))
2150 spin_lock_irq(&rp->lock);
2151 wol->supported = WAKE_PHY | WAKE_MAGIC |
2152 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2153 wol->wolopts = rp->wolopts;
2154 spin_unlock_irq(&rp->lock);
2157 static int rhine_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
2159 struct rhine_private *rp = netdev_priv(dev);
2160 u32 support = WAKE_PHY | WAKE_MAGIC |
2161 WAKE_UCAST | WAKE_MCAST | WAKE_BCAST; /* Untested */
2163 if (!(rp->quirks & rqWOL))
2166 if (wol->wolopts & ~support)
2169 spin_lock_irq(&rp->lock);
2170 rp->wolopts = wol->wolopts;
2171 spin_unlock_irq(&rp->lock);
2176 static const struct ethtool_ops netdev_ethtool_ops = {
2177 .get_drvinfo = netdev_get_drvinfo,
2178 .get_settings = netdev_get_settings,
2179 .set_settings = netdev_set_settings,
2180 .nway_reset = netdev_nway_reset,
2181 .get_link = netdev_get_link,
2182 .get_msglevel = netdev_get_msglevel,
2183 .set_msglevel = netdev_set_msglevel,
2184 .get_wol = rhine_get_wol,
2185 .set_wol = rhine_set_wol,
2188 static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
2190 struct rhine_private *rp = netdev_priv(dev);
2193 if (!netif_running(dev))
2196 mutex_lock(&rp->task_lock);
2197 rc = generic_mii_ioctl(&rp->mii_if, if_mii(rq), cmd, NULL);
2198 rhine_set_carrier(&rp->mii_if);
2199 mutex_unlock(&rp->task_lock);
2204 static int rhine_close(struct net_device *dev)
2206 struct rhine_private *rp = netdev_priv(dev);
2207 void __iomem *ioaddr = rp->base;
2209 rhine_task_disable(rp);
2210 napi_disable(&rp->napi);
2211 netif_stop_queue(dev);
2213 netif_dbg(rp, ifdown, dev, "Shutting down ethercard, status was %04x\n",
2214 ioread16(ioaddr + ChipCmd));
2216 /* Switch to loopback mode to avoid hardware races. */
2217 iowrite8(rp->tx_thresh | 0x02, ioaddr + TxConfig);
2219 rhine_irq_disable(rp);
2221 /* Stop the chip's Tx and Rx processes. */
2222 iowrite16(CmdStop, ioaddr + ChipCmd);
2224 free_irq(rp->pdev->irq, dev);
2233 static void __devexit rhine_remove_one(struct pci_dev *pdev)
2235 struct net_device *dev = pci_get_drvdata(pdev);
2236 struct rhine_private *rp = netdev_priv(dev);
2238 unregister_netdev(dev);
2240 pci_iounmap(pdev, rp->base);
2241 pci_release_regions(pdev);
2244 pci_disable_device(pdev);
2245 pci_set_drvdata(pdev, NULL);
2248 static void rhine_shutdown (struct pci_dev *pdev)
2250 struct net_device *dev = pci_get_drvdata(pdev);
2251 struct rhine_private *rp = netdev_priv(dev);
2252 void __iomem *ioaddr = rp->base;
2254 if (!(rp->quirks & rqWOL))
2255 return; /* Nothing to do for non-WOL adapters */
2257 rhine_power_init(dev);
2259 /* Make sure we use pattern 0, 1 and not 4, 5 */
2260 if (rp->quirks & rq6patterns)
2261 iowrite8(0x04, ioaddr + WOLcgClr);
2263 spin_lock(&rp->lock);
2265 if (rp->wolopts & WAKE_MAGIC) {
2266 iowrite8(WOLmagic, ioaddr + WOLcrSet);
2268 * Turn EEPROM-controlled wake-up back on -- some hardware may
2269 * not cooperate otherwise.
2271 iowrite8(ioread8(ioaddr + ConfigA) | 0x03, ioaddr + ConfigA);
2274 if (rp->wolopts & (WAKE_BCAST|WAKE_MCAST))
2275 iowrite8(WOLbmcast, ioaddr + WOLcgSet);
2277 if (rp->wolopts & WAKE_PHY)
2278 iowrite8(WOLlnkon | WOLlnkoff, ioaddr + WOLcrSet);
2280 if (rp->wolopts & WAKE_UCAST)
2281 iowrite8(WOLucast, ioaddr + WOLcrSet);
2284 /* Enable legacy WOL (for old motherboards) */
2285 iowrite8(0x01, ioaddr + PwcfgSet);
2286 iowrite8(ioread8(ioaddr + StickyHW) | 0x04, ioaddr + StickyHW);
2289 spin_unlock(&rp->lock);
2291 if (system_state == SYSTEM_POWER_OFF && !avoid_D3) {
2292 iowrite8(ioread8(ioaddr + StickyHW) | 0x03, ioaddr + StickyHW);
2294 pci_wake_from_d3(pdev, true);
2295 pci_set_power_state(pdev, PCI_D3hot);
2299 #ifdef CONFIG_PM_SLEEP
2300 static int rhine_suspend(struct device *device)
2302 struct pci_dev *pdev = to_pci_dev(device);
2303 struct net_device *dev = pci_get_drvdata(pdev);
2304 struct rhine_private *rp = netdev_priv(dev);
2306 if (!netif_running(dev))
2309 rhine_task_disable(rp);
2310 rhine_irq_disable(rp);
2311 napi_disable(&rp->napi);
2313 netif_device_detach(dev);
2315 rhine_shutdown(pdev);
2320 static int rhine_resume(struct device *device)
2322 struct pci_dev *pdev = to_pci_dev(device);
2323 struct net_device *dev = pci_get_drvdata(pdev);
2324 struct rhine_private *rp = netdev_priv(dev);
2326 if (!netif_running(dev))
2330 enable_mmio(rp->pioaddr, rp->quirks);
2332 rhine_power_init(dev);
2337 rhine_task_enable(rp);
2338 spin_lock_bh(&rp->lock);
2339 init_registers(dev);
2340 spin_unlock_bh(&rp->lock);
2342 netif_device_attach(dev);
2347 static SIMPLE_DEV_PM_OPS(rhine_pm_ops, rhine_suspend, rhine_resume);
2348 #define RHINE_PM_OPS (&rhine_pm_ops)
2352 #define RHINE_PM_OPS NULL
2354 #endif /* !CONFIG_PM_SLEEP */
2356 static struct pci_driver rhine_driver = {
2358 .id_table = rhine_pci_tbl,
2359 .probe = rhine_init_one,
2360 .remove = __devexit_p(rhine_remove_one),
2361 .shutdown = rhine_shutdown,
2362 .driver.pm = RHINE_PM_OPS,
2365 static struct dmi_system_id __initdata rhine_dmi_table[] = {
2369 DMI_MATCH(DMI_BIOS_VENDOR, "Award Software International, Inc."),
2370 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2376 DMI_MATCH(DMI_BIOS_VENDOR, "Phoenix Technologies, LTD"),
2377 DMI_MATCH(DMI_BIOS_VERSION, "6.00 PG"),
2383 static int __init rhine_init(void)
2385 /* when a module, this is printed whether or not devices are found in probe */
2387 pr_info("%s\n", version);
2389 if (dmi_check_system(rhine_dmi_table)) {
2390 /* these BIOSes fail at PXE boot if chip is in D3 */
2392 pr_warn("Broken BIOS detected, avoid_D3 enabled\n");
2395 pr_info("avoid_D3 set\n");
2397 return pci_register_driver(&rhine_driver);
2401 static void __exit rhine_cleanup(void)
2403 pci_unregister_driver(&rhine_driver);
2407 module_init(rhine_init);
2408 module_exit(rhine_cleanup);