1 /* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */
3 Written/copyright 1999-2001 by Donald Becker.
5 This software may be used and distributed according to the terms of
6 the GNU General Public License (GPL), incorporated herein by reference.
7 Drivers based on or derived from this code fall under the GPL and must
8 retain the authorship, copyright and license notice. This file is not
9 a complete program and may only be used when the entire operating
10 system is licensed under the GPL. License for under other terms may be
11 available. Contact the original author for details.
13 The original author may be reached as becker@scyld.com, or at
14 Scyld Computing Corporation
15 410 Severn Ave., Suite 210
18 Support information and updates available at
19 http://www.scyld.com/network/netsemi.html
22 Linux kernel modifications:
26 - Bug fixes and better intr performance (Tjeerd)
28 - Now reads correct MAC address from eeprom
30 - Eliminate redundant priv->tx_full flag
31 - Call netif_start_queue from dev->tx_timeout
32 - wmb() in start_tx() to flush data
34 - Clean up PCI enable (davej)
36 - Merge Donald Becker's natsemi.c version 1.07
40 /* These identify the driver base version and may not be removed. */
41 static const char version1[] =
42 "natsemi.c:v1.07 1/9/2001 Written by Donald Becker <becker@scyld.com>\n";
43 static const char version2[] =
44 " http://www.scyld.com/network/natsemi.html\n";
45 static const char version3[] =
46 " (unofficial 2.4.x kernel port, version 1.0.4, February 26, 2001 Jeff Garzik, Tjeerd Mulder)\n";
47 /* Updated to recommendations in pci-skeleton v2.03. */
49 /* Automatically extracted configuration info:
50 probe-func: natsemi_probe
51 config-in: tristate 'National Semiconductor DP8381x series PCI Ethernet support' CONFIG_NATSEMI
53 c-help-name: National Semiconductor DP8381x series PCI Ethernet support
54 c-help-symbol: CONFIG_NATSEMI
55 c-help: This driver is for the National Semiconductor DP8381x series,
56 c-help: including the 83815 chip.
57 c-help: More specific information and updates are available from
58 c-help: http://www.scyld.com/network/natsemi.html
61 /* The user-configurable values.
62 These may be modified when a driver module is loaded.*/
64 static int debug = 1; /* 1 normal messages, 0 quiet .. 7 verbose. */
65 /* Maximum events (Rx packets, etc.) to handle at each interrupt. */
66 static int max_interrupt_work = 20;
68 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
69 This chip uses a 512 element hash table based on the Ethernet CRC. */
70 static int multicast_filter_limit = 100;
72 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
73 Setting to > 1518 effectively disables this feature. */
74 static int rx_copybreak = 0;
76 /* Used to pass the media type, etc.
77 Both 'options[]' and 'full_duplex[]' should exist for driver
79 The media type is usually passed in 'options[]'.
81 #define MAX_UNITS 8 /* More are supported, limit only on options */
82 static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
83 static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
85 /* Operational parameters that are set at compile time. */
87 /* Keep the ring sizes a power of two for compile efficiency.
88 The compiler will convert <unsigned>'%'<2^N> into a bit mask.
89 Making the Tx ring too large decreases the effectiveness of channel
90 bonding and packet priority.
91 There are no ill effects from too-large receive rings. */
92 #define TX_RING_SIZE 16
93 #define TX_QUEUE_LEN 10 /* Limit ring entries actually used, min 4. */
94 #define RX_RING_SIZE 32
96 /* Operational parameters that usually are not changed. */
97 /* Time in jiffies before concluding the transmitter is hung. */
98 #define TX_TIMEOUT (2*HZ)
100 #define PKT_BUF_SZ 1536 /* Size of each temporary Rx buffer.*/
102 #if !defined(__OPTIMIZE__)
103 #warning You must compile this file with the correct options!
104 #warning See the last lines of the source file.
105 #error You must compile this driver with "-O".
108 /* Include files, designed to support most kernel versions 2.0.0 and later. */
109 #include <linux/version.h>
110 #include <linux/module.h>
111 #include <linux/kernel.h>
112 #include <linux/string.h>
113 #include <linux/timer.h>
114 #include <linux/errno.h>
115 #include <linux/ioport.h>
116 #include <linux/slab.h>
117 #include <linux/interrupt.h>
118 #include <linux/pci.h>
119 #include <linux/netdevice.h>
120 #include <linux/etherdevice.h>
121 #include <linux/skbuff.h>
122 #include <linux/init.h>
123 #include <linux/spinlock.h>
124 #include <asm/processor.h> /* Processor type for cache alignment. */
125 #include <asm/bitops.h>
128 /* Condensed operations for readability. */
129 #define virt_to_le32desc(addr) cpu_to_le32(virt_to_bus(addr))
130 #define le32desc_to_virt(addr) bus_to_virt(le32_to_cpu(addr))
132 MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
133 MODULE_DESCRIPTION("National Semiconductor DP8381x series PCI Ethernet driver");
134 MODULE_PARM(max_interrupt_work, "i");
135 MODULE_PARM(mtu, "i");
136 MODULE_PARM(debug, "i");
137 MODULE_PARM(rx_copybreak, "i");
138 MODULE_PARM(options, "1-" __MODULE_STRING(MAX_UNITS) "i");
139 MODULE_PARM(full_duplex, "1-" __MODULE_STRING(MAX_UNITS) "i");
144 I. Board Compatibility
146 This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
147 It also works with other chips in in the DP83810 series.
149 II. Board-specific settings
151 This driver requires the PCI interrupt line to be valid.
152 It honors the EEPROM-set values.
154 III. Driver operation
158 This driver uses two statically allocated fixed-size descriptor lists
159 formed into rings by a branch from the final descriptor to the beginning of
160 the list. The ring sizes are set at compile time by RX/TX_RING_SIZE.
161 The NatSemi design uses a 'next descriptor' pointer that the driver forms
164 IIIb/c. Transmit/Receive Structure
166 This driver uses a zero-copy receive and transmit scheme.
167 The driver allocates full frame size skbuffs for the Rx ring buffers at
168 open() time and passes the skb->data field to the chip as receive data
169 buffers. When an incoming frame is less than RX_COPYBREAK bytes long,
170 a fresh skbuff is allocated and the frame is copied to the new skbuff.
171 When the incoming frame is larger, the skbuff is passed directly up the
172 protocol stack. Buffers consumed this way are replaced by newly allocated
173 skbuffs in a later phase of receives.
175 The RX_COPYBREAK value is chosen to trade-off the memory wasted by
176 using a full-sized skbuff for small frames vs. the copying costs of larger
177 frames. New boards are typically used in generously configured machines
178 and the underfilled buffers have negligible impact compared to the benefit of
179 a single allocation size, so the default value of zero results in never
180 copying packets. When copying is done, the cost is usually mitigated by using
181 a combined copy/checksum routine. Copying also preloads the cache, which is
182 most useful with small frames.
184 A subtle aspect of the operation is that unaligned buffers are not permitted
185 by the hardware. Thus the IP header at offset 14 in an ethernet frame isn't
186 longword aligned for further processing. On copies frames are put into the
187 skbuff at an offset of "+2", 16-byte aligning the IP header.
189 IIId. Synchronization
191 The driver runs as two independent, single-threaded flows of control. One
192 is the send-packet routine, which enforces single-threaded use by the
193 dev->tbusy flag. The other thread is the interrupt handler, which is single
194 threaded by the hardware and interrupt handling software.
196 The send packet thread has partial control over the Tx ring and 'dev->tbusy'
197 flag. It sets the tbusy flag whenever it's queuing a Tx packet. If the next
198 queue slot is empty, it clears the tbusy flag when finished otherwise it sets
199 the 'lp->tx_full' flag.
201 The interrupt handler has exclusive control over the Rx ring and records stats
202 from the Tx ring. After reaping the stats, it marks the Tx queue entry as
203 empty by incrementing the dirty_tx mark. Iff the 'lp->tx_full' flag is set, it
204 clears both the tx_full and tbusy flags.
208 NatSemi PCI network controllers are very uncommon.
212 http://www.scyld.com/expert/100mbps.html
213 http://www.scyld.com/expert/NWay.html
214 Datasheet is available from:
215 http://www.national.com/pf/DP/DP83815.html
227 PCI_USES_MASTER = 0x04,
232 /* MMIO operations required */
233 #define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
236 /* array of board data directly indexed by pci_tbl[x].driver_data */
240 } natsemi_pci_info[] __devinitdata = {
241 { "NatSemi DP83815", PCI_IOTYPE },
244 static struct pci_device_id natsemi_pci_tbl[] __devinitdata = {
245 { 0x100B, 0x0020, PCI_ANY_ID, PCI_ANY_ID, },
248 MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
250 /* Offsets to the device registers.
251 Unlike software-only systems, device drivers interact with complex hardware.
252 It's not useful to define symbolic names for every register bit in the
255 enum register_offsets {
256 ChipCmd=0x00, ChipConfig=0x04, EECtrl=0x08, PCIBusCfg=0x0C,
257 IntrStatus=0x10, IntrMask=0x14, IntrEnable=0x18,
258 TxRingPtr=0x20, TxConfig=0x24,
259 RxRingPtr=0x30, RxConfig=0x34, ClkRun=0x3C,
260 WOLCmd=0x40, PauseCmd=0x44, RxFilterAddr=0x48, RxFilterData=0x4C,
261 BootRomAddr=0x50, BootRomData=0x54, StatsCtrl=0x5C, StatsData=0x60,
262 RxPktErrs=0x60, RxMissed=0x68, RxCRCErrs=0x64,
266 /* Bit in ChipCmd. */
268 ChipReset=0x100, RxReset=0x20, TxReset=0x10, RxOff=0x08, RxOn=0x04,
269 TxOff=0x02, TxOn=0x01,
272 /* Bits in the interrupt status/mask registers. */
273 enum intr_status_bits {
274 IntrRxDone=0x0001, IntrRxIntr=0x0002, IntrRxErr=0x0004, IntrRxEarly=0x0008,
275 IntrRxIdle=0x0010, IntrRxOverrun=0x0020,
276 IntrTxDone=0x0040, IntrTxIntr=0x0080, IntrTxErr=0x0100,
277 IntrTxIdle=0x0200, IntrTxUnderrun=0x0400,
278 StatsMax=0x0800, LinkChange=0x4000,
280 RxResetDone=0x1000000, TxResetDone=0x2000000,
281 IntrPCIErr=0x00f00000,
282 IntrNormalSummary=0x0251, IntrAbnormalSummary=0xED20,
285 /* Bits in the RxMode register. */
287 AcceptErr=0x20, AcceptRunt=0x10,
288 AcceptBroadcast=0xC0000000,
289 AcceptMulticast=0x00200000, AcceptAllMulticast=0x20000000,
290 AcceptAllPhys=0x10000000, AcceptMyPhys=0x08000000,
293 /* The Rx and Tx buffer descriptors. */
294 /* Note that using only 32 bit fields simplifies conversion to big-endian
303 /* Bits in network_desc.status */
304 enum desc_status_bits {
305 DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
306 DescNoCRC=0x10000000,
307 DescPktOK=0x08000000, RxTooLong=0x00400000,
310 #define PRIV_ALIGN 15 /* Required alignment mask */
311 struct netdev_private {
312 /* Descriptor rings first for alignment. */
313 struct netdev_desc rx_ring[RX_RING_SIZE];
314 struct netdev_desc tx_ring[TX_RING_SIZE];
315 /* The addresses of receive-in-place skbuffs. */
316 struct sk_buff* rx_skbuff[RX_RING_SIZE];
317 /* The saved address of a sent-in-place packet/buffer, for later free(). */
318 struct sk_buff* tx_skbuff[TX_RING_SIZE];
319 struct net_device_stats stats;
320 struct timer_list timer; /* Media monitoring timer. */
321 /* Frequently used values: keep some adjacent for cache effect. */
322 struct pci_dev *pci_dev;
323 struct netdev_desc *rx_head_desc;
324 unsigned int cur_rx, dirty_rx; /* Producer/consumer ring indices */
325 unsigned int cur_tx, dirty_tx;
326 unsigned int rx_buf_sz; /* Based on MTU+slack. */
327 /* These values are keep track of the transceiver/media in use. */
328 unsigned int full_duplex:1; /* Full-duplex operation requested. */
329 unsigned int duplex_lock:1;
330 unsigned int medialock:1; /* Do not sense media. */
331 unsigned int default_port:4; /* Last dev->if_port value. */
335 /* FIFO and PCI burst thresholds. */
336 u32 tx_config, rx_config;
337 /* original contents of ClkRun register */
339 /* MII transceiver section. */
340 u16 advertising; /* NWay media advertisement */
346 static int eeprom_read(long ioaddr, int location);
347 static int mdio_read(struct net_device *dev, int phy_id, int location);
348 static int netdev_open(struct net_device *dev);
349 static void check_duplex(struct net_device *dev);
350 static void netdev_timer(unsigned long data);
351 static void tx_timeout(struct net_device *dev);
352 static void init_ring(struct net_device *dev);
353 static int start_tx(struct sk_buff *skb, struct net_device *dev);
354 static void intr_handler(int irq, void *dev_instance, struct pt_regs *regs);
355 static void netdev_error(struct net_device *dev, int intr_status);
356 static int netdev_rx(struct net_device *dev);
357 static void netdev_error(struct net_device *dev, int intr_status);
358 static void set_rx_mode(struct net_device *dev);
359 static struct net_device_stats *get_stats(struct net_device *dev);
360 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
361 static int netdev_close(struct net_device *dev);
364 static int __devinit natsemi_probe1 (struct pci_dev *pdev,
365 const struct pci_device_id *ent)
367 struct net_device *dev;
368 struct netdev_private *np;
369 int i, option, irq, chip_idx = ent->driver_data;
370 static int find_cnt = -1;
371 static int printed_version;
372 unsigned long ioaddr, iosize;
373 const int pcibar = 1; /* PCI base address register */
377 if ((debug <= 1) && !printed_version++)
378 printk(KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
379 version1, version2, version3);
381 i = pci_enable_device(pdev);
384 /* natsemi has a non-standard PM control register
385 * in PCI config space. Some boards apparently need
386 * to be brought to D0 in this manner.
388 pci_read_config_dword(pdev, PCIPM, &tmp);
389 if (tmp & (0x03|0x100)) {
390 /* D0 state, disable PME assertion */
391 u32 newtmp = tmp & ~(0x03|0x100);
392 pci_write_config_dword(pdev, PCIPM, newtmp);
396 option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
397 ioaddr = pci_resource_start(pdev, pcibar);
398 iosize = pci_resource_len(pdev, pcibar);
401 if (natsemi_pci_info[chip_idx].flags & PCI_USES_MASTER)
402 pci_set_master(pdev);
404 dev = alloc_etherdev(sizeof (struct netdev_private));
407 SET_MODULE_OWNER(dev);
409 i = pci_request_regions(pdev, dev->name);
416 void *mmio = ioremap (ioaddr, iosize);
418 pci_release_regions(pdev);
422 ioaddr = (unsigned long) mmio;
425 /* Work around the dropped serial bit. */
426 prev_eedata = eeprom_read(ioaddr, 6);
427 for (i = 0; i < 3; i++) {
428 int eedata = eeprom_read(ioaddr, i + 7);
429 dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
430 dev->dev_addr[i*2+1] = eedata >> 7;
431 prev_eedata = eedata;
434 /* Reset the chip to erase previous misconfiguration. */
435 writel(ChipReset, ioaddr + ChipCmd);
437 dev->base_addr = ioaddr;
443 pci_set_drvdata(pdev, dev);
445 spin_lock_init(&np->lock);
448 option = dev->mem_start;
450 /* The lower four bits are the media type. */
454 np->default_port = option & 15;
455 if (np->default_port)
458 if (find_cnt < MAX_UNITS && full_duplex[find_cnt] > 0)
464 /* The chip-specific entries in the device structure. */
465 dev->open = &netdev_open;
466 dev->hard_start_xmit = &start_tx;
467 dev->stop = &netdev_close;
468 dev->get_stats = &get_stats;
469 dev->set_multicast_list = &set_rx_mode;
470 dev->do_ioctl = &mii_ioctl;
471 dev->tx_timeout = &tx_timeout;
472 dev->watchdog_timeo = TX_TIMEOUT;
477 i = register_netdev(dev);
479 pci_release_regions(pdev);
480 unregister_netdev(dev);
482 pci_set_drvdata(pdev, NULL);
486 printk(KERN_INFO "%s: %s at 0x%lx, ",
487 dev->name, natsemi_pci_info[chip_idx].name, ioaddr);
488 for (i = 0; i < ETH_ALEN-1; i++)
489 printk("%2.2x:", dev->dev_addr[i]);
490 printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], irq);
492 np->advertising = mdio_read(dev, 1, 4);
493 if ((readl(ioaddr + ChipConfig) & 0xe000) != 0xe000) {
494 u32 chip_config = readl(ioaddr + ChipConfig);
495 printk(KERN_INFO "%s: Transceiver default autonegotiation %s "
498 chip_config & 0x2000 ? "enabled, advertise" : "disabled, force",
499 chip_config & 0x4000 ? "0" : "",
500 chip_config & 0x8000 ? "full" : "half");
502 printk(KERN_INFO "%s: Transceiver status 0x%4.4x advertising %4.4x.\n",
503 dev->name, (int)readl(ioaddr + 0x84), np->advertising);
509 /* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
510 The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */
512 /* Delay between EEPROM clock transitions.
513 No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
514 a delay. Note that pre-2.0.34 kernels had a cache-alignment bug that
515 made udelay() unreliable.
516 The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
519 #define eeprom_delay(ee_addr) readl(ee_addr)
521 enum EEPROM_Ctrl_Bits {
522 EE_ShiftClk=0x04, EE_DataIn=0x01, EE_ChipSelect=0x08, EE_DataOut=0x02,
524 #define EE_Write0 (EE_ChipSelect)
525 #define EE_Write1 (EE_ChipSelect | EE_DataIn)
527 /* The EEPROM commands include the alway-set leading bit. */
529 EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
532 static int eeprom_read(long addr, int location)
536 int ee_addr = addr + EECtrl;
537 int read_cmd = location | EE_ReadCmd;
538 writel(EE_Write0, ee_addr);
540 /* Shift the read command bits out. */
541 for (i = 10; i >= 0; i--) {
542 short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
543 writel(dataval, ee_addr);
544 eeprom_delay(ee_addr);
545 writel(dataval | EE_ShiftClk, ee_addr);
546 eeprom_delay(ee_addr);
548 writel(EE_ChipSelect, ee_addr);
549 eeprom_delay(ee_addr);
551 for (i = 0; i < 16; i++) {
552 writel(EE_ChipSelect | EE_ShiftClk, ee_addr);
553 eeprom_delay(ee_addr);
554 retval |= (readl(ee_addr) & EE_DataOut) ? 1 << i : 0;
555 writel(EE_ChipSelect, ee_addr);
556 eeprom_delay(ee_addr);
559 /* Terminate the EEPROM access. */
560 writel(EE_Write0, ee_addr);
565 /* MII transceiver control section.
566 The 83815 series has an internal transceiver, and we present the
567 management registers as if they were MII connected. */
569 static int mdio_read(struct net_device *dev, int phy_id, int location)
571 if (phy_id == 1 && location < 32)
572 return readl(dev->base_addr + 0x80 + (location<<2)) & 0xffff;
578 static int netdev_open(struct net_device *dev)
580 struct netdev_private *np = dev->priv;
581 long ioaddr = dev->base_addr;
584 /* Do we need to reset the chip??? */
586 i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);
590 printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
591 dev->name, dev->irq);
595 writel(virt_to_bus(np->rx_ring), ioaddr + RxRingPtr);
596 writel(virt_to_bus(np->tx_ring), ioaddr + TxRingPtr);
598 for (i = 0; i < ETH_ALEN; i += 2) {
599 writel(i, ioaddr + RxFilterAddr);
600 writew(dev->dev_addr[i] + (dev->dev_addr[i+1] << 8),
601 ioaddr + RxFilterData);
604 /* Initialize other registers. */
605 /* Configure the PCI bus bursts and FIFO thresholds. */
606 /* Configure for standard, in-spec Ethernet. */
608 if (readl(ioaddr + ChipConfig) & 0x20000000) { /* Full duplex */
609 np->tx_config = 0xD0801002;
610 np->rx_config = 0x10000020;
612 np->tx_config = 0x10801002;
613 np->rx_config = 0x0020;
615 writel(np->tx_config, ioaddr + TxConfig);
616 writel(np->rx_config, ioaddr + RxConfig);
618 if (dev->if_port == 0)
619 dev->if_port = np->default_port;
622 * The PME bit is initialized from the EEPROM contents.
623 * PCI cards probably have PME disabled, but motherboard
624 * implementations may have PME set to enable WakeOnLan.
625 * With PME set the chip will scan incoming packets but
626 * nothing will be written to memory. */
627 np->SavedClkRun = readl(ioaddr + ClkRun);
628 writel(np->SavedClkRun & ~0x100, ioaddr + ClkRun);
630 netif_start_queue(dev);
635 /* Enable interrupts by setting the interrupt mask. */
636 writel(IntrNormalSummary | IntrAbnormalSummary | 0x1f, ioaddr + IntrMask);
637 writel(1, ioaddr + IntrEnable);
639 writel(RxOn | TxOn, ioaddr + ChipCmd);
640 writel(4, ioaddr + StatsCtrl); /* Clear Stats */
643 printk(KERN_DEBUG "%s: Done netdev_open(), status: %x.\n",
644 dev->name, (int)readl(ioaddr + ChipCmd));
646 /* Set the timer to check for link beat. */
647 init_timer(&np->timer);
648 np->timer.expires = jiffies + 3*HZ;
649 np->timer.data = (unsigned long)dev;
650 np->timer.function = &netdev_timer; /* timer handler */
651 add_timer(&np->timer);
656 static void check_duplex(struct net_device *dev)
658 struct netdev_private *np = dev->priv;
659 long ioaddr = dev->base_addr;
664 duplex = readl(ioaddr + ChipConfig) & 0x20000000 ? 1 : 0;
665 if (np->full_duplex != duplex) {
666 np->full_duplex = duplex;
668 printk(KERN_INFO "%s: Setting %s-duplex based on negotiated link"
669 " capability.\n", dev->name,
670 duplex ? "full" : "half");
672 np->rx_config |= 0x10000000;
673 np->tx_config |= 0xC0000000;
675 np->rx_config &= ~0x10000000;
676 np->tx_config &= ~0xC0000000;
678 writel(np->tx_config, ioaddr + TxConfig);
679 writel(np->rx_config, ioaddr + RxConfig);
683 static void netdev_timer(unsigned long data)
685 struct net_device *dev = (struct net_device *)data;
686 struct netdev_private *np = dev->priv;
687 long ioaddr = dev->base_addr;
688 int next_tick = 60*HZ;
691 printk(KERN_DEBUG "%s: Media selection timer tick, status %8.8x.\n",
692 dev->name, (int)readl(ioaddr + IntrStatus));
694 np->timer.expires = jiffies + next_tick;
695 add_timer(&np->timer);
698 static void tx_timeout(struct net_device *dev)
700 struct netdev_private *np = dev->priv;
701 long ioaddr = dev->base_addr;
703 printk(KERN_WARNING "%s: Transmit timed out, status %8.8x,"
704 " resetting...\n", dev->name, (int)readl(ioaddr + TxRingPtr));
709 printk(KERN_DEBUG " Rx ring %8.8x: ", (int)np->rx_ring);
710 for (i = 0; i < RX_RING_SIZE; i++)
711 printk(" %8.8x", (unsigned int)np->rx_ring[i].cmd_status);
712 printk("\n"KERN_DEBUG" Tx ring %8.8x: ", (int)np->tx_ring);
713 for (i = 0; i < TX_RING_SIZE; i++)
714 printk(" %4.4x", np->tx_ring[i].cmd_status);
719 /* Perhaps we should reinitialize the hardware here. */
721 /* Stop and restart the chip's Tx processes . */
723 /* Trigger an immediate transmit demand. */
725 dev->trans_start = jiffies;
726 np->stats.tx_errors++;
727 netif_wake_queue(dev);
731 /* Initialize the Rx and Tx rings, along with various 'dev' bits. */
732 static void init_ring(struct net_device *dev)
734 struct netdev_private *np = dev->priv;
737 np->cur_rx = np->cur_tx = 0;
738 np->dirty_rx = np->dirty_tx = 0;
740 np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
741 np->rx_head_desc = &np->rx_ring[0];
743 /* Initialize all Rx descriptors. */
744 for (i = 0; i < RX_RING_SIZE; i++) {
745 np->rx_ring[i].next_desc = virt_to_le32desc(&np->rx_ring[i+1]);
746 np->rx_ring[i].cmd_status = DescOwn;
747 np->rx_skbuff[i] = 0;
749 /* Mark the last entry as wrapping the ring. */
750 np->rx_ring[i-1].next_desc = virt_to_le32desc(&np->rx_ring[0]);
752 /* Fill in the Rx buffers. Handle allocation failure gracefully. */
753 for (i = 0; i < RX_RING_SIZE; i++) {
754 struct sk_buff *skb = dev_alloc_skb(np->rx_buf_sz);
755 np->rx_skbuff[i] = skb;
758 skb->dev = dev; /* Mark as being used by this device. */
759 np->rx_ring[i].addr = virt_to_le32desc(skb->tail);
760 np->rx_ring[i].cmd_status =
761 cpu_to_le32(DescIntr | np->rx_buf_sz);
763 np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
765 for (i = 0; i < TX_RING_SIZE; i++) {
766 np->tx_skbuff[i] = 0;
767 np->tx_ring[i].next_desc = virt_to_le32desc(&np->tx_ring[i+1]);
768 np->tx_ring[i].cmd_status = 0;
770 np->tx_ring[i-1].next_desc = virt_to_le32desc(&np->tx_ring[0]);
774 static int start_tx(struct sk_buff *skb, struct net_device *dev)
776 struct netdev_private *np = dev->priv;
779 /* Note: Ordering is important here, set the field with the
780 "ownership" bit last, and only then increment cur_tx. */
782 /* Calculate the next Tx descriptor entry. */
783 entry = np->cur_tx % TX_RING_SIZE;
785 np->tx_skbuff[entry] = skb;
787 np->tx_ring[entry].addr = virt_to_le32desc(skb->data);
788 np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn|DescIntr | skb->len);
791 /* StrongARM: Explicitly cache flush np->tx_ring and skb->data,skb->len. */
794 spin_lock_irq(&np->lock);
795 if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
796 netif_stop_queue(dev);
797 spin_unlock_irq(&np->lock);
799 /* Wake the potentially-idle transmit channel. */
800 writel(TxOn, dev->base_addr + ChipCmd);
802 dev->trans_start = jiffies;
805 printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
806 dev->name, np->cur_tx, entry);
811 /* The interrupt handler does all of the Rx thread work and cleans up
812 after the Tx thread. */
813 static void intr_handler(int irq, void *dev_instance, struct pt_regs *rgs)
815 struct net_device *dev = (struct net_device *)dev_instance;
816 struct netdev_private *np;
818 int boguscnt = max_interrupt_work;
820 #ifndef final_version /* Can never occur. */
822 printk (KERN_ERR "Netdev interrupt handler(): IRQ %d for unknown "
828 ioaddr = dev->base_addr;
832 u32 intr_status = readl(ioaddr + IntrStatus);
834 /* Acknowledge all of the current interrupt sources ASAP. */
835 writel(intr_status & 0x000ffff, ioaddr + IntrStatus);
838 printk(KERN_DEBUG "%s: Interrupt, status %4.4x.\n",
839 dev->name, intr_status);
841 if (intr_status == 0)
844 if (intr_status & (IntrRxDone | IntrRxIntr))
847 spin_lock(&np->lock);
849 for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
850 int entry = np->dirty_tx % TX_RING_SIZE;
851 if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
853 if (np->tx_ring[entry].cmd_status & cpu_to_le32(0x08000000)) {
854 np->stats.tx_packets++;
855 #if LINUX_VERSION_CODE > 0x20127
856 np->stats.tx_bytes += np->tx_skbuff[entry]->len;
858 } else { /* Various Tx errors */
859 int tx_status = le32_to_cpu(np->tx_ring[entry].cmd_status);
860 if (tx_status & 0x04010000) np->stats.tx_aborted_errors++;
861 if (tx_status & 0x02000000) np->stats.tx_fifo_errors++;
862 if (tx_status & 0x01000000) np->stats.tx_carrier_errors++;
863 if (tx_status & 0x00200000) np->stats.tx_window_errors++;
864 np->stats.tx_errors++;
866 /* Free the original skb. */
867 dev_kfree_skb_irq(np->tx_skbuff[entry]);
868 np->tx_skbuff[entry] = 0;
870 if (netif_queue_stopped(dev)
871 && np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
872 /* The ring is no longer full, wake queue. */
873 netif_wake_queue(dev);
876 spin_unlock(&np->lock);
878 /* Abnormal error summary/uncommon events handlers. */
879 if (intr_status & IntrAbnormalSummary)
880 netdev_error(dev, intr_status);
882 if (--boguscnt < 0) {
883 printk(KERN_WARNING "%s: Too much work at interrupt, "
885 dev->name, intr_status);
891 printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
892 dev->name, (int)readl(ioaddr + IntrStatus));
894 #ifndef final_version
895 /* Code that should never be run! Perhaps remove after testing.. */
897 static int stopit = 10;
898 if (!netif_running(dev) && --stopit < 0) {
899 printk(KERN_ERR "%s: Emergency stop, looping startup interrupt.\n",
907 /* This routine is logically part of the interrupt handler, but separated
908 for clarity and better register allocation. */
909 static int netdev_rx(struct net_device *dev)
911 struct netdev_private *np = dev->priv;
912 int entry = np->cur_rx % RX_RING_SIZE;
913 int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
914 s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
916 /* If the driver owns the next entry it's a new packet. Send it up. */
917 while (desc_status < 0) { /* e.g. & DescOwn */
919 printk(KERN_DEBUG " In netdev_rx() entry %d status was %8.8x.\n",
923 if ((desc_status & (DescMore|DescPktOK|RxTooLong)) != DescPktOK) {
924 if (desc_status & DescMore) {
925 printk(KERN_WARNING "%s: Oversized(?) Ethernet frame spanned "
926 "multiple buffers, entry %#x status %x.\n",
927 dev->name, np->cur_rx, desc_status);
928 np->stats.rx_length_errors++;
930 /* There was a error. */
932 printk(KERN_DEBUG " netdev_rx() Rx error was %8.8x.\n",
934 np->stats.rx_errors++;
935 if (desc_status & 0x06000000) np->stats.rx_over_errors++;
936 if (desc_status & 0x00600000) np->stats.rx_length_errors++;
937 if (desc_status & 0x00140000) np->stats.rx_frame_errors++;
938 if (desc_status & 0x00080000) np->stats.rx_crc_errors++;
942 int pkt_len = (desc_status & 0x0fff) - 4; /* Omit CRC size. */
943 /* Check if the packet is long enough to accept without copying
944 to a minimally-sized skbuff. */
945 if (pkt_len < rx_copybreak
946 && (skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
948 skb_reserve(skb, 2); /* 16 byte align the IP header */
950 eth_copy_and_sum(skb, np->rx_skbuff[entry]->tail, pkt_len, 0);
951 skb_put(skb, pkt_len);
953 memcpy(skb_put(skb, pkt_len), np->rx_skbuff[entry]->tail,
957 char *temp = skb_put(skb = np->rx_skbuff[entry], pkt_len);
958 np->rx_skbuff[entry] = NULL;
959 #ifndef final_version /* Remove after testing. */
960 if (le32desc_to_virt(np->rx_ring[entry].addr) != temp)
961 printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
962 "do not match in netdev_rx: %p vs. %p / %p.\n",
964 le32desc_to_virt(np->rx_ring[entry].addr),
968 #ifndef final_version /* Remove after testing. */
969 /* You will want this info for the initial debug. */
971 printk(KERN_DEBUG " Rx data %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:"
972 "%2.2x %2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x %2.2x%2.2x "
974 skb->data[0], skb->data[1], skb->data[2], skb->data[3],
975 skb->data[4], skb->data[5], skb->data[6], skb->data[7],
976 skb->data[8], skb->data[9], skb->data[10],
977 skb->data[11], skb->data[12], skb->data[13],
978 skb->data[14], skb->data[15], skb->data[16],
981 skb->protocol = eth_type_trans(skb, dev);
982 /* W/ hardware checksum: skb->ip_summed = CHECKSUM_UNNECESSARY; */
984 dev->last_rx = jiffies;
985 np->stats.rx_packets++;
986 #if LINUX_VERSION_CODE > 0x20127
987 np->stats.rx_bytes += pkt_len;
990 entry = (++np->cur_rx) % RX_RING_SIZE;
991 np->rx_head_desc = &np->rx_ring[entry];
992 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
995 /* Refill the Rx ring buffers. */
996 for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
998 entry = np->dirty_rx % RX_RING_SIZE;
999 if (np->rx_skbuff[entry] == NULL) {
1000 skb = dev_alloc_skb(np->rx_buf_sz);
1001 np->rx_skbuff[entry] = skb;
1003 break; /* Better luck next round. */
1004 skb->dev = dev; /* Mark as being used by this device. */
1005 np->rx_ring[entry].addr = virt_to_le32desc(skb->tail);
1007 np->rx_ring[entry].cmd_status =
1008 cpu_to_le32(DescIntr | np->rx_buf_sz);
1011 /* Restart Rx engine if stopped. */
1012 writel(RxOn, dev->base_addr + ChipCmd);
1016 static void netdev_error(struct net_device *dev, int intr_status)
1018 struct netdev_private *np = dev->priv;
1019 long ioaddr = dev->base_addr;
1021 if (intr_status & LinkChange) {
1022 printk(KERN_NOTICE "%s: Link changed: Autonegotiation advertising"
1023 " %4.4x partner %4.4x.\n", dev->name,
1024 (int)readl(ioaddr + 0x90), (int)readl(ioaddr + 0x94));
1027 if (intr_status & StatsMax) {
1030 if (intr_status & IntrTxUnderrun) {
1031 if ((np->tx_config & 0x3f) < 62)
1033 writel(np->tx_config, ioaddr + TxConfig);
1035 if (intr_status & WOLPkt) {
1036 int wol_status = readl(ioaddr + WOLCmd);
1037 printk(KERN_NOTICE "%s: Link wake-up event %8.8x",
1038 dev->name, wol_status);
1040 if ((intr_status & ~(LinkChange|StatsMax|RxResetDone|TxResetDone|0xA7ff))
1042 printk(KERN_ERR "%s: Something Wicked happened! %4.4x.\n",
1043 dev->name, intr_status);
1044 /* Hmmmmm, it's not clear how to recover from PCI faults. */
1045 if (intr_status & IntrPCIErr) {
1046 np->stats.tx_fifo_errors++;
1047 np->stats.rx_fifo_errors++;
1051 static struct net_device_stats *get_stats(struct net_device *dev)
1053 long ioaddr = dev->base_addr;
1054 struct netdev_private *np = dev->priv;
1056 /* We should lock this segment of code for SMP eventually, although
1057 the vulnerability window is very small and statistics are
1059 /* The chip only need report frame silently dropped. */
1060 np->stats.rx_crc_errors += readl(ioaddr + RxCRCErrs);
1061 np->stats.rx_missed_errors += readl(ioaddr + RxMissed);
1066 /* The little-endian AUTODIN II ethernet CRC calculations.
1067 A big-endian version is also available.
1068 This is slow but compact code. Do not use this routine for bulk data,
1069 use a table-based routine instead.
1070 This is common code and should be moved to net/core/crc.c.
1071 Chips may use the upper or lower CRC bits, and may reverse and/or invert
1072 them. Select the endian-ness that results in minimal calculations.
1074 static unsigned const ethernet_polynomial_le = 0xedb88320U;
1075 static inline unsigned ether_crc_le(int length, unsigned char *data)
1077 unsigned int crc = 0xffffffff; /* Initial value. */
1078 while(--length >= 0) {
1079 unsigned char current_octet = *data++;
1081 for (bit = 8; --bit >= 0; current_octet >>= 1) {
1082 if ((crc ^ current_octet) & 1) {
1084 crc ^= ethernet_polynomial_le;
1092 static void set_rx_mode(struct net_device *dev)
1094 long ioaddr = dev->base_addr;
1095 struct netdev_private *np = dev->priv;
1096 u8 mc_filter[64]; /* Multicast hash filter */
1099 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1100 /* Unconditionally log net taps. */
1101 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
1102 rx_mode = AcceptBroadcast | AcceptAllMulticast | AcceptAllPhys
1104 } else if ((dev->mc_count > multicast_filter_limit)
1105 || (dev->flags & IFF_ALLMULTI)) {
1106 rx_mode = AcceptBroadcast | AcceptAllMulticast | AcceptMyPhys;
1108 struct dev_mc_list *mclist;
1110 memset(mc_filter, 0, sizeof(mc_filter));
1111 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
1112 i++, mclist = mclist->next) {
1113 set_bit(ether_crc_le(ETH_ALEN, mclist->dmi_addr) & 0x1ff,
1116 rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
1117 for (i = 0; i < 64; i += 2) {
1118 writew(0x200 + i, ioaddr + RxFilterAddr);
1119 writew((mc_filter[i+1]<<8) + mc_filter[i], ioaddr + RxFilterData);
1122 writel(rx_mode, ioaddr + RxFilterAddr);
1123 np->cur_rx_mode = rx_mode;
1126 static int mii_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1128 struct netdev_private *np = dev->priv;
1129 u16 *data = (u16 *)&rq->ifr_data;
1132 case SIOCDEVPRIVATE: /* Get the address of the PHY in use. */
1135 case SIOCDEVPRIVATE+1: /* Read the specified MII register. */
1136 data[3] = mdio_read(dev, data[0] & 0x1f, data[1] & 0x1f);
1138 case SIOCDEVPRIVATE+2: /* Write the specified MII register */
1139 if (!capable(CAP_NET_ADMIN))
1142 u16 miireg = data[1] & 0x1f;
1143 u16 value = data[2];
1144 writew(value, dev->base_addr + 0x80 + (miireg << 2));
1147 /* Check for autonegotiation on or reset. */
1148 np->duplex_lock = (value & 0x9000) ? 0 : 1;
1149 if (np->duplex_lock)
1150 np->full_duplex = (value & 0x0100) ? 1 : 0;
1152 case 4: np->advertising = value; break;
1161 static int netdev_close(struct net_device *dev)
1163 long ioaddr = dev->base_addr;
1164 struct netdev_private *np = dev->priv;
1167 netif_stop_queue(dev);
1170 printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x "
1172 dev->name, (int)readl(ioaddr + ChipCmd),
1173 (int)readl(ioaddr + IntrStatus));
1174 printk(KERN_DEBUG "%s: Queue pointers were Tx %d / %d, Rx %d / %d.\n",
1175 dev->name, np->cur_tx, np->dirty_tx, np->cur_rx, np->dirty_rx);
1178 /* Disable interrupts using the mask. */
1179 writel(0, ioaddr + IntrMask);
1180 writel(0, ioaddr + IntrEnable);
1181 writel(2, ioaddr + StatsCtrl); /* Freeze Stats */
1183 /* Stop the chip's Tx and Rx processes. */
1184 writel(RxOff | TxOff, ioaddr + ChipCmd);
1186 del_timer_sync(&np->timer);
1190 printk("\n"KERN_DEBUG" Tx ring at %8.8x:\n",
1191 (int)virt_to_bus(np->tx_ring));
1192 for (i = 0; i < TX_RING_SIZE; i++)
1193 printk(" #%d desc. %8.8x %8.8x.\n",
1194 i, np->tx_ring[i].cmd_status, np->tx_ring[i].addr);
1195 printk("\n"KERN_DEBUG " Rx ring %8.8x:\n",
1196 (int)virt_to_bus(np->rx_ring));
1197 for (i = 0; i < RX_RING_SIZE; i++) {
1198 printk(KERN_DEBUG " #%d desc. %8.8x %8.8x\n",
1199 i, np->rx_ring[i].cmd_status, np->rx_ring[i].addr);
1202 #endif /* __i386__ debugging only */
1204 free_irq(dev->irq, dev);
1206 /* Free all the skbuffs in the Rx queue. */
1207 for (i = 0; i < RX_RING_SIZE; i++) {
1208 np->rx_ring[i].cmd_status = 0;
1209 np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
1210 if (np->rx_skbuff[i]) {
1211 #if LINUX_VERSION_CODE < 0x20100
1212 np->rx_skbuff[i]->free = 1;
1214 dev_kfree_skb(np->rx_skbuff[i]);
1216 np->rx_skbuff[i] = 0;
1218 for (i = 0; i < TX_RING_SIZE; i++) {
1219 if (np->tx_skbuff[i])
1220 dev_kfree_skb(np->tx_skbuff[i]);
1221 np->tx_skbuff[i] = 0;
1223 /* Restore PME enable bit */
1224 writel(np->SavedClkRun, ioaddr + ClkRun);
1226 writel(0x0200, ioaddr + ChipConfig); /* Power down Xcvr. */
1233 static void __devexit natsemi_remove1 (struct pci_dev *pdev)
1235 struct net_device *dev = pci_get_drvdata(pdev);
1237 unregister_netdev (dev);
1238 pci_release_regions (pdev);
1239 iounmap ((char *) dev->base_addr);
1241 pci_set_drvdata(pdev, NULL);
1244 static struct pci_driver natsemi_driver = {
1246 id_table: natsemi_pci_tbl,
1247 probe: natsemi_probe1,
1248 remove: natsemi_remove1,
1251 static int __init natsemi_init_mod (void)
1254 printk(KERN_INFO "%s" KERN_INFO "%s" KERN_INFO "%s",
1255 version1, version2, version3);
1257 return pci_module_init (&natsemi_driver);
1260 static void __exit natsemi_exit_mod (void)
1262 pci_unregister_driver (&natsemi_driver);
1265 module_init(natsemi_init_mod);
1266 module_exit(natsemi_exit_mod);