1 /* sundance.c: A Linux device driver for the Sundance ST201 "Alta". */
3 Written 1999-2000 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL.
12 The author may be reached as becker@scyld.com, or C/O
13 Scyld Computing Corporation
14 410 Severn Ave., Suite 210
17 Support and updates available at
18 http://www.scyld.com/network/sundance.html
21 /* These identify the driver base version and may not be removed. */
22 static const char version1[] =
23 "sundance.c:v1.01 4/09/00 Written by Donald Becker\n";
24 static const char version2[] =
25 " http://www.scyld.com/network/sundance.html\n";
27 /* The user-configurable values.
28 These may be modified when a driver module is loaded.*/
30 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
31 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
32 static int max_interrupt_work = 20;
34 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
35 Typical is a 64 element hash table based on the Ethernet CRC. */
36 static int multicast_filter_limit = 32;
38 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
39 Setting to > 1518 effectively disables this feature.
40 This chip can receive into offset buffers, so the Alpha does not
42 static int rx_copybreak = 0;
44 /* Used to pass the media type, etc.
45 Both 'options[]' and 'full_duplex[]' should exist for driver
47 The media type is usually passed in 'options[]'.
49 #define MAX_UNITS 8 /* More are supported, limit only on options */
50 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
51 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
53 /* Operational parameters that are set at compile time. */
55 /* Keep the ring sizes a power of two for compile efficiency.
56 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
57 Making the Tx ring too large decreases the effectiveness of channel
58 bonding and packet priority, and more than 128 requires modifying the
60 Large receive rings merely waste memory. */
61 #define TX_RING_SIZE 16
62 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used. */
63 #define RX_RING_SIZE 32
65 /* Operational parameters that usually are not changed. */
66 /* Time in jiffies before concluding the transmitter is hung. */
67 #define TX_TIMEOUT (2*HZ)
69 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
74 #if !defined(__OPTIMIZE__)
75 #warning You must compile this file with the correct options!
76 #warning See the last lines of the source file.
77 #error You must compile this driver with "-O".
80 /* Include files, designed to support most kernel versions 2.0.0 and later. */
81 #include <linux/module.h>
82 #include <linux/kernel.h>
83 #include <linux/string.h>
84 #include <linux/timer.h>
85 #include <linux/errno.h>
86 #include <linux/ioport.h>
87 #include <linux/slab.h>
88 #include <linux/interrupt.h>
89 #include <linux/pci.h>
90 #include <linux/netdevice.h>
91 #include <linux/etherdevice.h>
92 #include <linux/skbuff.h>
93 #include <linux/init.h>
94 #include <asm/processor.h> /* Processor type for cache alignment. */
95 #include <asm/bitops.h>
98 #include <linux/spinlock.h>
101 /* Condensed operations for readability. */
102 #define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
103 #define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
106 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
107 MODULE_DESCRIPTION("Sundance Alta Ethernet driver");
108 MODULE_PARM(max_interrupt_work, "i");
109 MODULE_PARM(mtu, "i");
110 MODULE_PARM(debug, "i");
111 MODULE_PARM(rx_copybreak, "i");
112 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
113 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
118 I. Board Compatibility
120 This driver is designed for the Sundance Technologies "Alta" ST201 chip.
122 II. Board-specific settings
124 III. Driver operation
128 This driver uses two statically allocated fixed-size descriptor lists
129 formed into rings by a branch from the final descriptor to the beginning of
130 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
131 Some chips explicitly use only 2^N sized rings, while others use a
132 'next descriptor' pointer that the driver forms into rings.
134 IIIb/c. Transmit/Receive Structure
136 This driver uses a zero-copy receive and transmit scheme.
137 The driver allocates full frame size skbuffs for the Rx ring buffers at
138 open() time and passes the skb->data field to the chip as receive data
139 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
140 a fresh skbuff is allocated and the frame is copied to the new skbuff.
141 When the incoming frame is larger, the skbuff is passed directly up the
142 protocol stack. Buffers consumed this way are replaced by newly allocated
143 skbuffs in a later phase of receives.
145 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
146 using a full-sized skbuff for small frames vs. the copying costs of larger
147 frames. New boards are typically used in generously configured machines
148 and the underfilled buffers have negligible impact compared to the benefit of
149 a single allocation size, so the default value of zero results in never
150 copying packets. When copying is done, the cost is usually mitigated by using
151 a combined copy/checksum routine. Copying also preloads the cache, which is
152 most useful with small frames.
154 A subtle aspect of the operation is that the IP header at offset 14 in an
155 ethernet frame isn't longword aligned for further processing.
156 Unaligned buffers are permitted by the Sundance hardware, so
157 frames are received into the skbuff at an offset of "+2", 16-byte aligning
160 IIId. Synchronization
162 The driver runs as two independent, single-threaded flows of control. One
163 is the send-packet routine, which enforces single-threaded use by the
164 dev->tbusy flag. The other thread is the interrupt handler, which is single
165 threaded by the hardware and interrupt handling software.
167 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
168 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
169 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
170 the 'lp->tx_full' flag.
172 The interrupt handler has exclusive control over the Rx ring and records stats
173 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
174 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
175 clears both the tx_full and tbusy flags.
181 The Sundance ST201 datasheet, preliminary version.
182 http://cesdis.gsfc.nasa.gov/linux/misc/100mbps.html
183 http://cesdis.gsfc.nasa.gov/linux/misc/NWay.html
191 enum pci_id_flags_bits {
192 /* Set PCI command register bits before calling probe1(). */
193 PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
194 /* Read and map the single following PCI BAR. */
195 PCI_ADDR0=0<<4, PCI_ADDR1=1<<4, PCI_ADDR2=2<<4, PCI_ADDR3=3<<4,
196 PCI_ADDR_64BITS=0x100, PCI_NO_ACPI_WAKE=0x200, PCI_NO_MIN_LATENCY=0x400,
198 enum chip_capability_flags {CanHaveMII=1, };
200 #define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_IO | PCI_ADDR0)
202 #define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
205 static struct pci_device_id sundance_pci_tbl[] __devinitdata = {
206 { 0x1186, 0x1002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
207 { 0x13F0, 0x0201, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 1 },
210 MODULE_DEVICE_TABLE(pci, sundance_pci_tbl);
215 int pci, pci_mask, subsystem, subsystem_mask;
216 int revision, revision_mask; /* Only 8 bits. */
218 enum pci_id_flags_bits pci_flags;
219 int io_size; /* Needed for I/O region check or ioremap(). */
220 int drv_flags; /* Driver use, intended as capability flags. */
222 static struct pci_id_info pci_id_tbl[] = {
223 {"OEM Sundance Technology ST201", {0x10021186, 0xffffffff, },
224 PCI_IOTYPE, 128, CanHaveMII},
225 {"Sundance Technology Alta", {0x020113F0, 0xffffffff, },
226 PCI_IOTYPE, 128, CanHaveMII},
227 {0,}, /* 0 terminated list. */
230 /* This driver was written to use PCI memory space, however x86-oriented
231 hardware often uses I/O space accesses. */
247 /* Offsets to the device registers.
248 Unlike software-only systems, device drivers interact with complex hardware.
249 It's not useful to define symbolic names for every register bit in the
250 device. The name can only partially document the semantics and make
251 the driver longer and more difficult to read.
252 In general, only the important configuration values or bits changed
253 multiple times should be defined symbolically.
256 DMACtrl=0x00, TxListPtr=0x04, TxDMACtrl=0x08, TxDescPoll=0x0a,
257 RxDMAStatus=0x0c, RxListPtr=0x10, RxDMACtrl=0x14, RxDescPoll=0x16,
258 LEDCtrl=0x1a, ASICCtrl=0x30,
259 EEData=0x34, EECtrl=0x36, TxThreshold=0x3c,
260 FlashAddr=0x40, FlashData=0x44, TxStatus=0x46, DownCounter=0x48,
261 IntrClear=0x4a, IntrEnable=0x4c, IntrStatus=0x4e,
262 MACCtrl0=0x50, MACCtrl1=0x52, StationAddr=0x54,
263 MaxTxSize=0x5A, RxMode=0x5c, MIICtrl=0x5e,
264 MulticastFilter0=0x60, MulticastFilter1=0x64,
265 RxOctetsLow=0x68, RxOctetsHigh=0x6a, TxOctetsLow=0x6c, TxOctetsHigh=0x6e,
266 TxFramesOK=0x70, RxFramesOK=0x72, StatsCarrierError=0x74,
267 StatsLateColl=0x75, StatsMultiColl=0x76, StatsOneColl=0x77,
268 StatsTxDefer=0x78, RxMissed=0x79, StatsTxXSDefer=0x7a, StatsTxAbort=0x7b,
269 StatsBcastTx=0x7c, StatsBcastRx=0x7d, StatsMcastTx=0x7e, StatsMcastRx=0x7f,
270 /* Aliased and bogus values! */
274 /* Bits in the interrupt status/mask registers. */
275 enum intr_status_bits {
276 IntrSummary=0x0001, IntrPCIErr=0x0002, IntrMACCtrl=0x0008,
277 IntrTxDone=0x0004, IntrRxDone=0x0010, IntrRxStart=0x0020,
279 StatsMax=0x0080, LinkChange=0x0100,
280 IntrTxDMADone=0x0200, IntrRxDMADone=0x0400,
283 /* Bits in the RxMode register. */
285 AcceptAllIPMulti=0x20, AcceptMultiHash=0x10, AcceptAll=0x08,
286 AcceptBroadcast=0x04, AcceptMulticast=0x02, AcceptMyPhys=0x01,
288 /* Bits in MACCtrl. */
289 enum mac_ctrl0_bits {
290 EnbFullDuplex=0x20, EnbRcvLargeFrame=0x40,
291 EnbFlowCtrl=0x100, EnbPassRxCRC=0x200,
293 enum mac_ctrl1_bits {
294 StatsEnable=0x0020, StatsDisable=0x0040, StatsEnabled=0x0080,
295 TxEnable=0x0100, TxDisable=0x0200, TxEnabled=0x0400,
296 RxEnable=0x0800, RxDisable=0x1000, RxEnabled=0x2000,
299 /* The Rx and Tx buffer descriptors. */
300 /* Note that using only 32 bit fields simplifies conversion to big-endian
305 struct desc_frag { u32 addr, length; } frag[1];
308 /* Bits in netdev_desc.status */
309 enum desc_status_bits {
310 DescOwn=0x8000, DescEndPacket=0x4000, DescEndRing=0x2000,
311 LastFrag=0x80000000, DescIntrOnTx=0x8000, DescIntrOnDMADone=0x80000000,
314 #define PRIV_ALIGN 15 /* Required alignment mask */
315 /* Use __attribute__((aligned (L1_CACHE_BYTES))) to maintain alignment
316 within the structure. */
318 struct netdev_private {
319 /* Descriptor rings first for alignment. */
320 struct netdev_desc rx_ring[RX_RING_SIZE];
321 struct netdev_desc tx_ring[TX_RING_SIZE];
322 /* The addresses of receive-in-place skbuffs. */
323 struct sk_buff* rx_skbuff[RX_RING_SIZE];
324 /* The saved address of a sent-in-place packet/buffer, for later free(). */
325 struct sk_buff* tx_skbuff[TX_RING_SIZE];
326 struct net_device_stats stats;
327 struct timer_list timer; /* Media monitoring timer. */
328 /* Frequently used values: keep some adjacent for cache effect. */
330 int chip_id, drv_flags;
331 /* Note: Cache paragraph grouped variables. */
332 struct netdev_desc *rx_head_desc;
333 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
334 unsigned int rx_buf_sz; /* Based on MTU+slack. */
335 spinlock_t txlock; /* Group with Tx control cache line. */
336 struct netdev_desc *last_tx; /* Last Tx descriptor used. */
337 unsigned int cur_tx, dirty_tx;
338 unsigned int tx_full:1; /* The Tx queue is full. */
339 /* These values are keep track of the transceiver/media in use. */
340 unsigned int full_duplex:1; /* Full-duplex operation requested. */
341 unsigned int duplex_lock:1;
342 unsigned int medialock:1; /* Do not sense media. */
343 unsigned int default_port:4; /* Last dev->if_port value. */
344 /* Multicast and receive mode. */
345 spinlock_t mcastlock; /* SMP lock multicast updates. */
347 /* MII transceiver section. */
348 int mii_cnt; /* MII device addresses. */
349 u16 advertising; /* NWay media advertisement */
350 unsigned char phys[MII_CNT]; /* MII device addresses, only first one used. */
353 /* The station address location in the EEPROM. */
354 #define EEPROM_SA_OFFSET 0x10
356 static int eeprom_read(long ioaddr, int location);
357 static int mdio_read(struct net_device *dev, int phy_id, int location);
358 static void mdio_write(struct net_device *dev, int phy_id, int location, int value);
359 static int netdev_open(struct net_device *dev);
360 static void check_duplex(struct net_device *dev);
361 static void netdev_timer(unsigned long data);
362 static void tx_timeout(struct net_device *dev);
363 static void init_ring(struct net_device *dev);
364 static int start_tx(struct sk_buff *skb, struct net_device *dev);
365 static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
366 static void netdev_error(struct net_device *dev, int intr_status);
367 static int netdev_rx(struct net_device *dev);
368 static void netdev_error(struct net_device *dev, int intr_status);
369 static void set_rx_mode(struct net_device *dev);
370 static struct net_device_stats *get_stats(struct net_device *dev);
371 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
372 static int netdev_close(struct net_device *dev);
376 static int __devinit sundance_probe1 (struct pci_dev *pdev,
377 const struct pci_device_id *ent)
379 struct net_device *dev;
380 struct netdev_private *np;
382 int chip_idx = ent->driver_data;
384 int i, option = card_idx < MAX_UNITS ? options[card_idx] : 0;
387 if (pci_enable_device(pdev))
389 pci_set_master(pdev);
393 dev = init_etherdev(NULL, sizeof(*np));
396 SET_MODULE_OWNER(dev);
398 if (pci_request_regions(pdev, dev->name))
402 ioaddr = pci_resource_start(pdev, 0);
404 ioaddr = pci_resource_start(pdev, 1);
405 ioaddr = (long) ioremap (ioaddr, pci_id_tbl[chip_idx].io_size);
410 printk(KERN_INFO "%s: %s at 0x%lx, ",
411 dev->name, pci_id_tbl[chip_idx].name, ioaddr);
413 for (i = 0; i < 3; i++)
414 ((u16 *)dev->dev_addr)[i] =
415 le16_to_cpu(eeprom_read(ioaddr, i + EEPROM_SA_OFFSET));
416 for (i = 0; i < 5; i++)
417 printk("%2.2x:", dev->dev_addr[i]);
418 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
420 dev->base_addr = ioaddr;
424 np->chip_id = chip_idx;
425 np->drv_flags = pci_id_tbl[chip_idx].drv_flags;
426 spin_lock_init(&np->lock);
429 option = dev->mem_start;
431 /* The lower four bits are the media type. */
435 np->default_port = option & 15;
436 if (np->default_port)
439 if (card_idx < MAX_UNITS && full_duplex[card_idx] > 0)
445 /* The chip-specific entries in the device structure. */
446 dev->open = &netdev_open;
447 dev->hard_start_xmit = &start_tx;
448 dev->stop = &netdev_close;
449 dev->get_stats = &get_stats;
450 dev->set_multicast_list = &set_rx_mode;
451 dev->do_ioctl = &mii_ioctl;
452 dev->tx_timeout = &tx_timeout;
453 dev->watchdog_timeo = TX_TIMEOUT;
459 int phy, phy_idx = 0;
460 np->phys[0] = 1; /* Default setting */
461 for (phy = 0; phy < 32 && phy_idx < MII_CNT; phy++) {
462 int mii_status = mdio_read(dev, phy, 1);
463 if (mii_status != 0xffff && mii_status != 0x0000) {
464 np->phys[phy_idx++] = phy;
465 np->advertising = mdio_read(dev, phy, 4);
466 printk(KERN_INFO "%s: MII PHY found at address %d, status "
467 "0x%4.4x advertising %4.4x.\n",
468 dev->name, phy, mii_status, np->advertising);
471 np->mii_cnt = phy_idx;
473 printk(KERN_INFO "%s: No MII transceiver found!, ASIC status %x\n",
474 dev->name, readl(ioaddr + ASICCtrl));
477 /* Perhaps move the reset here? */
478 /* Reset the chip to erase previous misconfiguration. */
480 printk("ASIC Control is %x.\n", readl(ioaddr + ASICCtrl));
481 writew(0x007f, ioaddr + ASICCtrl + 2);
483 printk("ASIC Control is now %x.\n", readl(ioaddr + ASICCtrl));
490 pci_release_regions(pdev);
493 unregister_netdev (dev);
499 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces. */
500 static int eeprom_read(long ioaddr, int location)
502 int boguscnt = 1000; /* Typical 190 ticks. */
503 writew(0x0200 | (location & 0xff), ioaddr + EECtrl);
505 if (! (readw(ioaddr + EECtrl) & 0x8000)) {
506 return readw(ioaddr + EEData);
508 } while (--boguscnt > 0);
512 /* MII transceiver control section.
513 Read and write the MII registers using software-generated serial
514 MDIO protocol. See the MII specifications or DP83840A data sheet
517 The maximum data clock rate is 2.5 Mhz. The minimum timing is usually
518 met by back-to-back 33Mhz PCI cycles. */
519 #define mdio_delay() readb(mdio_addr)
521 /* Set iff a MII transceiver on any interface requires mdio preamble.
522 This only set with older tranceivers, so the extra
523 code size of a per-interface flag is not worthwhile. */
524 static char mii_preamble_required = 0;
527 MDIO_ShiftClk=0x0001, MDIO_Data=0x0002, MDIO_EnbOutput=0x0004,
529 #define MDIO_EnbIn (0)
530 #define MDIO_WRITE0 (MDIO_EnbOutput)
531 #define MDIO_WRITE1 (MDIO_Data | MDIO_EnbOutput)
533 /* Generate the preamble required for initial synchronization and
534 a few older transceivers. */
535 static void mdio_sync(long mdio_addr)
539 /* Establish sync by sending at least 32 logic ones. */
540 while (--bits >= 0) {
541 writeb(MDIO_WRITE1, mdio_addr);
543 writeb(MDIO_WRITE1 | MDIO_ShiftClk, mdio_addr);
548 static int mdio_read(struct net_device *dev, int phy_id, int location)
550 long mdio_addr = dev->base_addr + MIICtrl;
551 int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
554 if (mii_preamble_required)
555 mdio_sync(mdio_addr);
557 /* Shift the read command bits out. */
558 for (i = 15; i >= 0; i--) {
559 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
561 writeb(dataval, mdio_addr);
563 writeb(dataval | MDIO_ShiftClk, mdio_addr);
566 /* Read the two transition, 16 data, and wire-idle bits. */
567 for (i = 19; i > 0; i--) {
568 writeb(MDIO_EnbIn, mdio_addr);
570 retval = (retval << 1) | ((readb(mdio_addr) & MDIO_Data) ? 1 : 0);
571 writeb(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
574 return (retval>>1) & 0xffff;
577 static void mdio_write(struct net_device *dev, int phy_id, int location, int value)
579 long mdio_addr = dev->base_addr + MIICtrl;
580 int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location<<18) | value;
583 if (mii_preamble_required)
584 mdio_sync(mdio_addr);
586 /* Shift the command bits out. */
587 for (i = 31; i >= 0; i--) {
588 int dataval = (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
590 writeb(dataval, mdio_addr);
592 writeb(dataval | MDIO_ShiftClk, mdio_addr);
595 /* Clear out extra bits. */
596 for (i = 2; i > 0; i--) {
597 writeb(MDIO_EnbIn, mdio_addr);
599 writeb(MDIO_EnbIn | MDIO_ShiftClk, mdio_addr);
606 static int netdev_open(struct net_device *dev)
608 struct netdev_private *np = dev->priv;
609 long ioaddr = dev->base_addr;
612 /* Do we need to reset the chip??? */
614 i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
619 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
620 dev->name, dev->irq);
624 writel(virt_to_bus(np->rx_ring), ioaddr + RxListPtr);
625 /* The Tx list pointer is written as packets are queued. */
627 for (i = 0; i < 6; i++)
628 writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
630 /* Initialize other registers. */
631 /* Configure the PCI bus bursts and FIFO thresholds. */
633 if (dev->if_port == 0)
634 dev->if_port = np->default_port;
636 np->full_duplex = np->duplex_lock;
637 np->mcastlock = (spinlock_t) SPIN_LOCK_UNLOCKED;
640 writew(0, ioaddr + DownCounter);
641 /* Set the chip to poll every N*320nsec. */
642 writeb(100, ioaddr + RxDescPoll);
643 writeb(127, ioaddr + TxDescPoll);
644 netif_start_queue(dev);
646 /* Enable interrupts by setting the interrupt mask. */
647 writew(IntrRxDone | IntrRxDMADone | IntrPCIErr | IntrDrvRqst | IntrTxDone
648 | StatsMax | LinkChange, ioaddr + IntrEnable);
650 writew(StatsEnable | RxEnable | TxEnable, ioaddr + MACCtrl1);
653 printk(KERN_DEBUG "%s: Done netdev_open(), status: Rx %x Tx %x "
654 "MAC Control %x, %4.4x %4.4x.\n",
655 dev->name, readl(ioaddr + RxStatus), readb(ioaddr + TxStatus),
656 readl(ioaddr + MACCtrl0),
657 readw(ioaddr + MACCtrl1), readw(ioaddr + MACCtrl0));
659 /* Set the timer to check for link beat. */
660 init_timer(&np->timer);
661 np->timer.expires = jiffies + 3*HZ;
662 np->timer.data = (unsigned long)dev;
663 np->timer.function = &netdev_timer; /* timer handler */
664 add_timer(&np->timer);
669 static void check_duplex(struct net_device *dev)
671 struct netdev_private *np = dev->priv;
672 long ioaddr = dev->base_addr;
673 int mii_reg5 = mdio_read(dev, np->phys[0], 5);
674 int negotiated = mii_reg5 & np->advertising;
677 if (np->duplex_lock || mii_reg5 == 0xffff)
679 duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
680 if (np->full_duplex != duplex) {
681 np->full_duplex = duplex;
683 printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d "
684 "negotiated capability %4.4x.\n", dev->name,
685 duplex ? "full" : "half", np->phys[0], negotiated);
686 writew(duplex ? 0x20 : 0, ioaddr + MACCtrl0);
690 static void netdev_timer(unsigned long data)
692 struct net_device *dev = (struct net_device *)data;
693 struct netdev_private *np = dev->priv;
694 long ioaddr = dev->base_addr;
695 int next_tick = 10*HZ;
698 printk(KERN_DEBUG "%s: Media selection timer tick, intr status %4.4x, "
700 dev->name, readw(ioaddr + IntrEnable),
701 readb(ioaddr + TxStatus), readl(ioaddr + RxStatus));
704 np->timer.expires = jiffies + next_tick;
705 add_timer(&np->timer);
708 static void tx_timeout(struct net_device *dev)
710 struct netdev_private *np = dev->priv;
711 long ioaddr = dev->base_addr;
713 printk(KERN_WARNING "%s: Transmit timed out, status %2.2x,"
714 " resetting...\n", dev->name, readb(ioaddr + TxStatus));
719 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)np->rx_ring);
720 for (i = 0; i < RX_RING_SIZE; i++)
721 printk(" %8.8x", (unsigned int)np->rx_ring[i].status);
722 printk("\n"KERN_DEBUG" Tx ring %8.8x: ", (int)np->tx_ring);
723 for (i = 0; i < TX_RING_SIZE; i++)
724 printk(" %4.4x", np->tx_ring[i].status);
729 /* Perhaps we should reinitialize the hardware here. */
731 /* Stop and restart the chip's Tx processes . */
733 /* Trigger an immediate transmit demand. */
734 writew(IntrRxDone | IntrRxDMADone | IntrPCIErr | IntrDrvRqst | IntrTxDone
735 | StatsMax | LinkChange, ioaddr + IntrEnable);
737 dev->trans_start = jiffies;
738 np->stats.tx_errors++;
741 netif_wake_queue(dev);
745 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
746 static void init_ring(struct net_device *dev)
748 struct netdev_private *np = dev->priv;
752 np->cur_rx = np->cur_tx = 0;
753 np->dirty_rx = np->dirty_tx = 0;
755 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
756 np->rx_head_desc = &np->rx_ring[0];
758 /* Initialize all Rx descriptors. */
759 for (i = 0; i < RX_RING_SIZE; i++) {
760 np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);
761 np->rx_ring[i].status = 0;
762 np->rx_ring[i].frag[0].length = 0;
763 np->rx_skbuff[i] = 0;
766 np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);
768 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
769 for (i = 0; i < RX_RING_SIZE; i++) {
770 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
771 np->rx_skbuff[i] = skb;
774 skb->dev = dev; /* Mark as being used by this device. */
775 skb_reserve(skb, 2); /* 16 byte align the IP header. */
776 np->rx_ring[i].frag[0].addr = virt_to_le32desc(skb->tail);
777 np->rx_ring[i].frag[0].length = cpu_to_le32(np->rx_buf_sz | LastFrag);
779 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
781 for (i = 0; i < TX_RING_SIZE; i++) {
782 np->tx_skbuff[i] = 0;
783 np->tx_ring[i].status = 0;
788 static int start_tx(struct sk_buff *skb, struct net_device *dev)
790 struct netdev_private *np = dev->priv;
791 struct netdev_desc *txdesc;
794 /* Note: Ordering is important here, set the field with the
795 "ownership" bit last, and only then increment cur_tx. */
797 /* Calculate the next Tx descriptor entry. */
798 entry = np->cur_tx % TX_RING_SIZE;
799 np->tx_skbuff[entry] = skb;
800 txdesc = &np->tx_ring[entry];
802 txdesc->next_desc = 0;
803 /* Note: disable the interrupt generation here before releasing. */
805 cpu_to_le32((entry<<2) | DescIntrOnDMADone | DescIntrOnTx);
806 txdesc->frag[0].addr = virt_to_le32desc(skb->data);
807 txdesc->frag[0].length = cpu_to_le32(skb->len | LastFrag);
809 np->last_tx->next_desc = virt_to_le32desc(txdesc);
810 np->last_tx = txdesc;
813 /* On some architectures: explicitly flush cache lines here. */
815 if (np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 1) {
819 netif_stop_queue(dev);
821 /* Side effect: The read wakes the potentially-idle transmit channel. */
822 if (readl(dev->base_addr + TxListPtr) == 0)
823 writel(virt_to_bus(&np->tx_ring[entry]), dev->base_addr + TxListPtr);
825 dev->trans_start = jiffies;
828 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
829 dev->name, np->cur_tx, entry);
834 /* The interrupt handler does all of the Rx thread work and cleans up
835 after the Tx thread. */
836 static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
838 struct net_device *dev = (struct net_device *)dev_instance;
839 struct netdev_private *np;
841 int boguscnt = max_interrupt_work;
843 ioaddr = dev->base_addr;
845 spin_lock(&np->lock);
848 int intr_status = readw(ioaddr + IntrStatus);
849 writew(intr_status & (IntrRxDone | IntrRxDMADone | IntrPCIErr |
850 IntrDrvRqst |IntrTxDone|IntrTxDMADone |
851 StatsMax | LinkChange),
852 ioaddr + IntrStatus);
855 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
856 dev->name, intr_status);
858 if (intr_status == 0)
861 if (intr_status & (IntrRxDone|IntrRxDMADone))
864 if (intr_status & IntrTxDone) {
866 int tx_status = readw(ioaddr + TxStatus);
867 while (tx_status & 0x80) {
869 printk("%s: Transmit status is %2.2x.\n",
870 dev->name, tx_status);
871 if (tx_status & 0x1e) {
872 np->stats.tx_errors++;
873 if (tx_status & 0x10) np->stats.tx_fifo_errors++;
875 if (tx_status & 0x08) np->stats.collisions16++;
877 if (tx_status & 0x08) np->stats.collisions++;
879 if (tx_status & 0x04) np->stats.tx_fifo_errors++;
880 if (tx_status & 0x02) np->stats.tx_window_errors++;
881 /* This reset has not been verified!. */
882 if (tx_status & 0x10) { /* Reset the Tx. */
883 writew(0x001c, ioaddr + ASICCtrl + 2);
884 #if 0 /* Do we need to reset the Tx pointer here? */
885 writel(virt_to_bus(&np->tx_ring[np->dirty_tx]),
886 dev->base_addr + TxListPtr);
889 if (tx_status & 0x1e) /* Restart the Tx. */
890 writew(TxEnable, ioaddr + MACCtrl1);
892 /* Yup, this is a documentation bug. It cost me *hours*. */
893 writew(0, ioaddr + TxStatus);
894 tx_status = readb(ioaddr + TxStatus);
899 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
900 int entry = np->dirty_tx % TX_RING_SIZE;
901 if ( ! (np->tx_ring[entry].status & 0x00010000))
903 /* Free the original skb. */
904 dev_kfree_skb_irq(np->tx_skbuff[entry]);
905 np->tx_skbuff[entry] = 0;
908 && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
909 /* The ring is no longer full, clear tbusy. */
911 netif_wake_queue(dev);
914 /* Abnormal error summary/uncommon events handlers. */
915 if (intr_status & (IntrDrvRqst | IntrPCIErr | LinkChange | StatsMax))
916 netdev_error(dev, intr_status);
918 if (--boguscnt < 0) {
920 printk(KERN_WARNING "%s: Too much work at interrupt, "
921 "status=0x%4.4x / 0x%4.4x.\n",
922 dev->name, intr_status, readw(ioaddr + IntrClear));
923 /* Re-enable us in 3.2msec. */
924 writew(1000, ioaddr + DownCounter);
925 writew(IntrDrvRqst, ioaddr + IntrEnable);
931 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
932 dev->name, readw(ioaddr + IntrStatus));
934 spin_unlock(&np->lock);
937 /* This routine is logically part of the interrupt handler, but separated
938 for clarity and better register allocation. */
939 static int netdev_rx(struct net_device *dev)
941 struct netdev_private *np = dev->priv;
942 int entry = np->cur_rx % RX_RING_SIZE;
943 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
946 printk(KERN_DEBUG " In netdev_rx(), entry %d status %4.4x.\n",
947 entry, np->rx_ring[entry].status);
950 /* If EOP is set on the next entry, it's a new packet. Send it up. */
951 while (np->rx_head_desc->status & DescOwn) {
952 struct netdev_desc *desc = np->rx_head_desc;
953 u32 frame_status = le32_to_cpu(desc->status);
954 int pkt_len = frame_status & 0x1fff; /* Chip omits the CRC. */
957 printk(KERN_DEBUG " netdev_rx() status was %8.8x.\n",
961 if (frame_status & 0x001f4000) {
962 /* There was a error. */
964 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
966 np->stats.rx_errors++;
967 if (frame_status & 0x00100000) np->stats.rx_length_errors++;
968 if (frame_status & 0x00010000) np->stats.rx_fifo_errors++;
969 if (frame_status & 0x00060000) np->stats.rx_frame_errors++;
970 if (frame_status & 0x00080000) np->stats.rx_crc_errors++;
971 if (frame_status & 0x00100000) {
972 printk(KERN_WARNING "%s: Oversized Ethernet frame,"
974 dev->name, frame_status);
979 #ifndef final_version
981 printk(KERN_DEBUG " netdev_rx() normal Rx pkt length %d"
985 /* Check if the packet is long enough to accept without copying
986 to a minimally-sized skbuff. */
987 if (pkt_len < rx_copybreak
988 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
990 skb_reserve(skb, 2); /* 16 byte align the IP header */
991 eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
992 skb_put(skb, pkt_len);
994 skb_put(skb = np->rx_skbuff[entry], pkt_len);
995 np->rx_skbuff[entry] = NULL;
997 skb->protocol = eth_type_trans(skb, dev);
998 /* Note: checksum -> skb->ip_summed = CHECKSUM_UNNECESSARY; */
1000 dev->last_rx = jiffies;
1002 entry = (++np->cur_rx) % RX_RING_SIZE;
1003 np->rx_head_desc = &np->rx_ring[entry];
1006 /* Refill the Rx ring buffers. */
1007 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
1008 struct sk_buff *skb;
1009 entry = np->dirty_rx % RX_RING_SIZE;
1010 if (np->rx_skbuff[entry] == NULL) {
1011 skb = dev_alloc_skb(np->rx_buf_sz);
1012 np->rx_skbuff[entry] = skb;
1014 break; /* Better luck next round. */
1015 skb->dev = dev; /* Mark as being used by this device. */
1016 skb_reserve(skb, 2); /* Align IP on 16 byte boundaries */
1017 np->rx_ring[entry].frag[0].addr = virt_to_le32desc(skb->tail);
1019 /* Perhaps we need not reset this field. */
1020 np->rx_ring[entry].frag[0].length =
1021 cpu_to_le32(np->rx_buf_sz | LastFrag);
1022 np->rx_ring[entry].status = 0;
1025 /* No need to restart Rx engine, it will poll. */
1029 static void netdev_error(struct net_device *dev, int intr_status)
1031 long ioaddr = dev->base_addr;
1032 struct netdev_private *np = dev->priv;
1034 if (intr_status & IntrDrvRqst) {
1035 /* Stop the down counter and turn interrupts back on. */
1036 printk("%s: Turning interrupts back on.\n", dev->name);
1037 writew(0, ioaddr + DownCounter);
1038 writew(IntrRxDone | IntrRxDMADone | IntrPCIErr | IntrDrvRqst |
1039 IntrTxDone | StatsMax | LinkChange, ioaddr + IntrEnable);
1041 if (intr_status & LinkChange) {
1042 printk(KERN_ERR "%s: Link changed: Autonegotiation advertising"
1043 " %4.4x partner %4.4x.\n", dev->name,
1044 mdio_read(dev, np->phys[0], 4),
1045 mdio_read(dev, np->phys[0], 5));
1048 if (intr_status & StatsMax) {
1051 if (intr_status & IntrPCIErr) {
1052 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1053 dev->name, intr_status);
1054 /* We must do a global reset of DMA to continue. */
1058 static struct net_device_stats *get_stats(struct net_device *dev)
1060 long ioaddr = dev->base_addr;
1061 struct netdev_private *np = dev->priv;
1064 /* We should lock this segment of code for SMP eventually, although
1065 the vulnerability window is very small and statistics are
1067 /* The chip only need report frame silently dropped. */
1068 np->stats.rx_missed_errors += readb(ioaddr + RxMissed);
1069 np->stats.tx_packets += readw(ioaddr + TxFramesOK);
1070 np->stats.rx_packets += readw(ioaddr + RxFramesOK);
1071 np->stats.collisions += readb(ioaddr + StatsLateColl);
1072 np->stats.collisions += readb(ioaddr + StatsMultiColl);
1073 np->stats.collisions += readb(ioaddr + StatsOneColl);
1074 readb(ioaddr + StatsCarrierError);
1075 readb(ioaddr + StatsTxDefer);
1076 for (i = StatsTxDefer; i <= StatsMcastRx; i++)
1078 np->stats.tx_bytes += readw(ioaddr + TxOctetsLow);
1079 np->stats.tx_bytes += readw(ioaddr + TxOctetsHigh) << 16;
1080 np->stats.rx_bytes += readw(ioaddr + RxOctetsLow);
1081 np->stats.rx_bytes += readw(ioaddr + RxOctetsHigh) << 16;
1086 /* The little-endian AUTODIN II ethernet CRC calculations.
1087 A big-endian version is also available.
1088 This is slow but compact code. Do not use this routine for bulk data,
1089 use a table-based routine instead.
1090 This is common code and should be moved to net/core/crc.c.
1091 Chips may use the upper or lower CRC bits, and may reverse and/or invert
1092 them. Select the endian-ness that results in minimal calculations.
1094 static unsigned const ethernet_polynomial_le = 0xedb88320U;
1095 static inline unsigned ether_crc_le(int length, unsigned char *data)
1097 unsigned int crc = 0xffffffff; /* Initial value. */
1098 while(--length >= 0) {
1099 unsigned char current_octet = *data++;
1101 for (bit = 8; --bit >= 0; current_octet >>= 1) {
1102 if ((crc ^ current_octet) & 1) {
1104 crc ^= ethernet_polynomial_le;
1112 static void set_rx_mode(struct net_device *dev)
1114 long ioaddr = dev->base_addr;
1115 u16 mc_filter[4]; /* Multicast hash filter */
1119 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1120 /* Unconditionally log net taps. */
1121 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1122 memset(mc_filter, 0xff, sizeof(mc_filter));
1123 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptAll | AcceptMyPhys;
1124 } else if ((dev->mc_count > multicast_filter_limit)
1125 || (dev->flags & IFF_ALLMULTI)) {
1126 /* Too many to match, or accept all multicasts. */
1127 memset(mc_filter, 0xff, sizeof(mc_filter));
1128 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1129 } else if (dev->mc_count) {
1130 struct dev_mc_list *mclist;
1131 memset(mc_filter, 0, sizeof(mc_filter));
1132 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1133 i++, mclist = mclist->next) {
1134 set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x3f,
1137 rx_mode = AcceptBroadcast | AcceptMultiHash | AcceptMyPhys;
1139 writeb(AcceptBroadcast | AcceptMyPhys, ioaddr + RxMode);
1142 for (i = 0; i < 4; i++)
1143 writew(mc_filter[i], ioaddr + MulticastFilter0 + i*2);
1144 writeb(rx_mode, ioaddr + RxMode);
1147 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1149 u16 *data = (u16 *)&rq->ifr_data;
1152 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
1153 data[0] = ((struct netdev_private *)dev->priv)->phys[0] & 0x1f;
1155 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
1156 data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
1158 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
1159 if (!capable(CAP_NET_ADMIN))
1161 mdio_write(dev, data[0] & 0x1f, data[1] & 0x1f, data[2]);
1168 static int netdev_close(struct net_device *dev)
1170 long ioaddr = dev->base_addr;
1171 struct netdev_private *np = dev->priv;
1174 netif_stop_queue(dev);
1177 printk(KERN_DEBUG "%s: Shutting down ethercard, status was Tx %2.2x "
1178 "Rx %4.4x Int %2.2x.\n",
1179 dev->name, readb(ioaddr + TxStatus),
1180 readl(ioaddr + RxStatus), readw(ioaddr + IntrStatus));
1181 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1182 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1185 /* Disable interrupts by clearing the interrupt mask. */
1186 writew(0x0000, ioaddr + IntrEnable);
1188 /* Stop the chip's Tx and Rx processes. */
1189 writew(TxDisable | RxDisable | StatsDisable, ioaddr + MACCtrl1);
1193 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1194 (int)virt_to_bus(np->tx_ring));
1195 for (i = 0; i < TX_RING_SIZE; i++)
1196 printk(" #%d desc. %4.4x %8.8x %8.8x.\n",
1197 i, np->tx_ring[i].status, np->tx_ring[i].frag[0].addr,
1198 np->tx_ring[i].frag[0].length);
1199 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1200 (int)virt_to_bus(np->rx_ring));
1201 for (i = 0; i < /*RX_RING_SIZE*/4 ; i++) {
1202 printk(KERN_DEBUG " #%d desc. %4.4x %4.4x %8.8x\n",
1203 i, np->rx_ring[i].status, np->rx_ring[i].frag[0].addr,
1204 np->rx_ring[i].frag[0].length);
1207 #endif /* __i386__ debugging only */
1209 free_irq(dev->irq, dev);
1211 del_timer_sync(&np->timer);
1213 /* Free all the skbuffs in the Rx queue. */
1214 for (i = 0; i < RX_RING_SIZE; i++) {
1215 np->rx_ring[i].status = 0;
1216 np->rx_ring[i].frag[0].addr = 0xBADF00D0; /* An invalid address. */
1217 if (np->rx_skbuff[i]) {
1218 dev_kfree_skb(np->rx_skbuff[i]);
1220 np->rx_skbuff[i] = 0;
1222 for (i = 0; i < TX_RING_SIZE; i++) {
1223 if (np->tx_skbuff[i])
1224 dev_kfree_skb(np->tx_skbuff[i]);
1225 np->tx_skbuff[i] = 0;
1231 static void __devexit sundance_remove1 (struct pci_dev *pdev)
1233 struct net_device *dev = pci_get_drvdata(pdev);
1235 /* No need to check MOD_IN_USE, as sys_delete_module() checks. */
1237 unregister_netdev(dev);
1238 pci_release_regions(pdev);
1240 iounmap((char *)(dev->base_addr));
1245 pci_set_drvdata(pdev, NULL);
1248 static struct pci_driver sundance_driver = {
1250 id_table: sundance_pci_tbl,
1251 probe: sundance_probe1,
1252 remove: sundance_remove1,
1255 static int __init sundance_init(void)
1257 return pci_module_init(&sundance_driver);
1260 static void __exit sundance_exit(void)
1262 pci_unregister_driver(&sundance_driver);
1265 module_init(sundance_init);
1266 module_exit(sundance_exit);