2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
6 * Driver for SGI's IOC3 based Ethernet cards as found in the PCI card.
8 * Copyright (C) 1999, 2000 Ralf Baechle
9 * Copyright (C) 1995, 1999, 2000 by Silicon Graphics, Inc.
13 * If you find problems with this drivers, then if possible do the
14 * following. Hook up a terminal to the MSC port, send an NMI to the CPUs
15 * by typing ^Tnmi (where ^T stands for <CTRL>-T). You'll see something
18 * 1A 000: *** NMI while in Kernel and no NMI vector installed on node 0
19 * 1A 000: *** Error EPC: 0xffffffff800265e4 (0xffffffff800265e4)
20 * 1A 000: *** Press ENTER to continue.
22 * Next enter the command ``lw i:0x86000f0 0x18'' and include this
23 * commands output which will look like below with your bugreport.
25 * 1A 000: POD MSC Dex> lw i:0x86000f0 0x18
26 * 1A 000: 92000000086000f0: 0021f28c 00000000 00000000 00000000
27 * 1A 000: 9200000008600100: a5000000 01cde000 00000000 000004e0
28 * 1A 000: 9200000008600110: 00000650 00000000 00110b15 00000000
29 * 1A 000: 9200000008600120: 006d0005 77bbca0a a5000000 01ce0000
30 * 1A 000: 9200000008600130: 80000500 00000500 00002538 05690008
31 * 1A 000: 9200000008600140: 00000000 00000000 000003e1 0000786d
35 * - Handle allocation failures in ioc3_alloc_skb() more gracefully.
36 * - Handle allocation failures in ioc3_init_rings().
37 * - Use prefetching for large packets. What is a good lower limit for
39 * - We're probably allocating a bit too much memory.
40 * - Workarounds for various PHYs.
41 * - Proper autonegotiation.
42 * - What exactly is net_device_stats.tx_dropped supposed to count?
43 * - Use hardware checksums.
44 * - Convert to using the PCI infrastructure / IOC3 meta driver.
46 #include <linux/init.h>
47 #include <linux/delay.h>
48 #include <linux/kernel.h>
50 #include <linux/errno.h>
51 #include <linux/module.h>
52 #include <linux/pci.h>
54 #include <linux/netdevice.h>
55 #include <linux/etherdevice.h>
56 #include <linux/skbuff.h>
58 #include <asm/byteorder.h>
60 #include <asm/pgtable.h>
61 #include <asm/sn/types.h>
62 #include <asm/sn/sn0/addrs.h>
63 #include <asm/sn/sn0/hubni.h>
64 #include <asm/sn/sn0/hubio.h>
65 #include <asm/sn/klconfig.h>
66 #include <asm/sn/ioc3.h>
67 #include <asm/sn/sn0/ip27.h>
68 #include <asm/pci/bridge.h>
70 /* 32 RX buffers. This is tunable in the range of 16 <= x < 512. */
73 /* Private ioctls that de facto are well known and used for examply
75 #define SIOCGMIIPHY (SIOCDEVPRIVATE) /* Read from current PHY */
76 #define SIOCGMIIREG (SIOCDEVPRIVATE+1) /* Read any PHY register */
77 #define SIOCSMIIREG (SIOCDEVPRIVATE+2) /* Write any PHY register */
79 /* These exist in other drivers; we don't use them at this time. */
80 #define SIOCGPARAMS (SIOCDEVPRIVATE+3) /* Read operational parameters */
81 #define SIOCSPARAMS (SIOCDEVPRIVATE+4) /* Set operational parameters */
83 /* Private per NIC data of the driver. */
87 unsigned long *rxr; /* pointer to receiver ring */
88 struct ioc3_etxd *txr;
89 struct sk_buff *rx_skbs[512];
90 struct sk_buff *tx_skbs[128];
91 struct net_device_stats stats;
92 int rx_ci; /* RX consumer index */
93 int rx_pi; /* RX producer index */
94 int tx_ci; /* TX consumer index */
95 int tx_pi; /* TX producer index */
97 u32 emcr, ehar_h, ehar_l;
98 struct timer_list negtimer;
102 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
103 static void ioc3_set_multicast_list(struct net_device *dev);
104 static int ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev);
105 static void ioc3_timeout(struct net_device *dev);
106 static inline unsigned int ioc3_hash(const unsigned char *addr);
107 static inline void ioc3_stop(struct net_device *dev);
108 static void ioc3_init(struct net_device *dev);
110 static const char ioc3_str[] = "IOC3 Ethernet";
112 /* We use this to acquire receive skb's that we can DMA directly into. */
113 #define ALIGNED_RX_SKB_ADDR(addr) \
114 ((((unsigned long)(addr) + (128 - 1)) & ~(128 - 1)) - (unsigned long)(addr))
116 #define ioc3_alloc_skb(__length, __gfp_flags) \
117 ({ struct sk_buff *__skb; \
118 __skb = alloc_skb((__length) + 128, (__gfp_flags)); \
120 int __offset = ALIGNED_RX_SKB_ADDR(__skb->data); \
122 skb_reserve(__skb, __offset); \
127 /* BEWARE: The IOC3 documentation documents the size of rx buffers as
128 1644 while it's actually 1664. This one was nasty to track down ... */
130 #define RX_BUF_ALLOC_SIZE (1664 + RX_OFFSET + 128)
132 /* DMA barrier to separate cached and uncached accesses. */
134 __asm__("sync" ::: "memory")
137 #define IOC3_SIZE 0x100000
139 #define ioc3_r(reg) \
146 #define ioc3_w(reg,val) \
148 (ioc3->reg = (val)); \
152 mcr_pack(u32 pulse, u32 sample)
154 return (pulse << 10) | (sample << 2);
158 nic_wait(struct ioc3 *ioc3)
164 } while (!(mcr & 2));
170 nic_reset(struct ioc3 *ioc3)
174 ioc3_w(mcr, mcr_pack(500, 65));
175 presence = nic_wait(ioc3);
177 ioc3_w(mcr, mcr_pack(0, 500));
184 nic_read_bit(struct ioc3 *ioc3)
188 ioc3_w(mcr, mcr_pack(6, 13));
189 result = nic_wait(ioc3);
190 ioc3_w(mcr, mcr_pack(0, 100));
197 nic_write_bit(struct ioc3 *ioc3, int bit)
200 ioc3_w(mcr, mcr_pack(6, 110));
202 ioc3_w(mcr, mcr_pack(80, 30));
208 * Read a byte from an iButton device
211 nic_read_byte(struct ioc3 *ioc3)
216 for (i = 0; i < 8; i++)
217 result = (result >> 1) | (nic_read_bit(ioc3) << 7);
223 * Write a byte to an iButton device
226 nic_write_byte(struct ioc3 *ioc3, int byte)
230 for (i = 8; i; i--) {
234 nic_write_bit(ioc3, bit);
239 nic_find(struct ioc3 *ioc3, int *last)
241 int a, b, index, disc;
246 nic_write_byte(ioc3, 0xf0);
248 /* Algorithm from ``Book of iButton Standards''. */
249 for (index = 0, disc = 0; index < 64; index++) {
250 a = nic_read_bit(ioc3);
251 b = nic_read_bit(ioc3);
254 printk("NIC search failed (not fatal).\n");
260 if (index == *last) {
261 address |= 1UL << index;
262 } else if (index > *last) {
263 address &= ~(1UL << index);
265 } else if ((address & (1UL << index)) == 0)
267 nic_write_bit(ioc3, address & (1UL << index));
271 address |= 1UL << index;
273 address &= ~(1UL << index);
274 nic_write_bit(ioc3, a);
284 static int nic_init(struct ioc3 *ioc3)
295 reg = nic_find(ioc3, &save);
297 switch (reg & 0xff) {
303 /* Let the caller try again. */
312 nic_write_byte(ioc3, 0x55);
313 for (i = 0; i < 8; i++)
314 nic_write_byte(ioc3, (reg >> (i << 3)) & 0xff);
316 reg >>= 8; /* Shift out type. */
317 for (i = 0; i < 6; i++) {
318 serial[i] = reg & 0xff;
325 printk("Found %s NIC", type);
326 if (type != "unknown") {
327 printk (" registration number %02x:%02x:%02x:%02x:%02x:%02x,"
328 " CRC %02x", serial[0], serial[1], serial[2],
329 serial[3], serial[4], serial[5], crc);
337 * Read the NIC (Number-In-a-Can) device.
339 static void ioc3_get_eaddr(struct net_device *dev, struct ioc3 *ioc3)
343 int tries = 2; /* There may be some problem with the battery? */
345 ioc3_w(gpcr_s, (1 << 21));
354 printk("Failed to read MAC address\n");
359 nic_write_byte(ioc3, 0xf0);
360 nic_write_byte(ioc3, 0x00);
361 nic_write_byte(ioc3, 0x00);
363 for (i = 13; i >= 0; i--)
364 nic[i] = nic_read_byte(ioc3);
366 printk("Ethernet address is ");
367 for (i = 2; i < 8; i++) {
368 dev->dev_addr[i - 2] = nic[i];
369 printk("%02x", nic[i]);
376 /* Caller must hold the ioc3_lock ever for MII readers. This is also
377 used to protect the transmitter side but it's low contention. */
378 static u16 mii_read(struct ioc3 *ioc3, int phy, int reg)
380 while (ioc3->micr & MICR_BUSY);
381 ioc3->micr = (phy << MICR_PHYADDR_SHIFT) | reg | MICR_READTRIG;
382 while (ioc3->micr & MICR_BUSY);
384 return ioc3->midr_r & MIDR_DATA_MASK;
387 static void mii_write(struct ioc3 *ioc3, int phy, int reg, u16 data)
389 while (ioc3->micr & MICR_BUSY);
391 ioc3->micr = (phy << MICR_PHYADDR_SHIFT) | reg;
392 while (ioc3->micr & MICR_BUSY);
395 static int ioc3_mii_init(struct net_device *dev, struct ioc3_private *ip,
398 static struct net_device_stats *ioc3_get_stats(struct net_device *dev)
400 struct ioc3_private *ip = (struct ioc3_private *) dev->priv;
401 struct ioc3 *ioc3 = ip->regs;
403 ip->stats.collisions += (ioc3->etcdc & ETCDC_COLLCNT_MASK);
408 ioc3_rx(struct net_device *dev, struct ioc3_private *ip, struct ioc3 *ioc3)
410 struct sk_buff *skb, *new_skb;
411 int rx_entry, n_entry, len;
412 struct ioc3_erxbuf *rxb;
416 rxr = (unsigned long *) ip->rxr; /* Ring base */
417 rx_entry = ip->rx_ci; /* RX consume index */
420 skb = ip->rx_skbs[rx_entry];
421 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
424 while (w0 & ERXBUF_V) {
425 err = rxb->err; /* It's valid ... */
426 if (err & ERXBUF_GOODPKT) {
427 len = (w0 >> ERXBUF_BYTECNT_SHIFT) & 0x7ff;
429 skb->protocol = eth_type_trans(skb, dev);
432 ip->rx_skbs[rx_entry] = NULL; /* Poison */
434 new_skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
436 /* Ouch, drop packet and just recycle packet
437 to keep the ring filled. */
438 ip->stats.rx_dropped++;
445 /* Because we reserve afterwards. */
446 skb_put(new_skb, (1664 + RX_OFFSET));
447 rxb = (struct ioc3_erxbuf *) new_skb->data;
448 skb_reserve(new_skb, RX_OFFSET);
450 ip->stats.rx_packets++; /* Statistics */
451 ip->stats.rx_bytes += len;
453 /* The frame is invalid and the skb never
454 reached the network layer so we can just
457 ip->stats.rx_errors++;
459 if (err & ERXBUF_CRCERR) /* Statistics */
460 ip->stats.rx_crc_errors++;
461 if (err & ERXBUF_FRAMERR)
462 ip->stats.rx_frame_errors++;
464 ip->rx_skbs[n_entry] = new_skb;
465 rxr[n_entry] = (0xa5UL << 56) |
466 ((unsigned long) rxb & TO_PHYS_MASK);
467 rxb->w0 = 0; /* Clear valid flag */
468 n_entry = (n_entry + 1) & 511; /* Update erpir */
470 /* Now go on to the next ring entry. */
471 rx_entry = (rx_entry + 1) & 511;
472 skb = ip->rx_skbs[rx_entry];
473 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
476 ioc3->erpir = (n_entry << 3) | ERPIR_ARM;
478 ip->rx_ci = rx_entry;
482 ioc3_tx(struct net_device *dev, struct ioc3_private *ip, struct ioc3 *ioc3)
484 unsigned long packets, bytes;
485 int tx_entry, o_entry;
489 spin_lock(&ip->ioc3_lock);
492 tx_entry = (etcir >> 7) & 127;
497 while (o_entry != tx_entry) {
499 skb = ip->tx_skbs[o_entry];
501 dev_kfree_skb_irq(skb);
502 ip->tx_skbs[o_entry] = NULL;
504 o_entry = (o_entry + 1) & 127; /* Next */
506 etcir = ioc3->etcir; /* More pkts sent? */
507 tx_entry = (etcir >> 7) & 127;
510 ip->stats.tx_packets += packets;
511 ip->stats.tx_bytes += bytes;
512 ip->txqlen -= packets;
514 if (ip->txqlen < 128)
515 netif_wake_queue(dev);
518 spin_unlock(&ip->ioc3_lock);
522 * Deal with fatal IOC3 errors. This condition might be caused by a hard or
523 * software problems, so we should try to recover
524 * more gracefully if this ever happens. In theory we might be flooded
525 * with such error interrupts if something really goes wrong, so we might
526 * also consider to take the interface down.
529 ioc3_error(struct net_device *dev, struct ioc3_private *ip,
530 struct ioc3 *ioc3, u32 eisr)
532 if (eisr & EISR_RXOFLO) {
533 printk(KERN_ERR "%s: RX overflow.\n", dev->name);
535 if (eisr & EISR_RXBUFOFLO) {
536 printk(KERN_ERR "%s: RX buffer overflow.\n", dev->name);
538 if (eisr & EISR_RXMEMERR) {
539 printk(KERN_ERR "%s: RX PCI error.\n", dev->name);
541 if (eisr & EISR_RXPARERR) {
542 printk(KERN_ERR "%s: RX SSRAM parity error.\n", dev->name);
544 if (eisr & EISR_TXBUFUFLO) {
545 printk(KERN_ERR "%s: TX buffer underflow.\n", dev->name);
547 if (eisr & EISR_TXMEMERR) {
548 printk(KERN_ERR "%s: TX PCI error.\n", dev->name);
553 ioc3_mii_init(dev, ip, ioc3);
555 dev->trans_start = jiffies;
556 netif_wake_queue(dev);
559 /* The interrupt handler does all of the Rx thread work and cleans up
560 after the Tx thread. */
561 static void ioc3_interrupt(int irq, void *_dev, struct pt_regs *regs)
563 struct net_device *dev = (struct net_device *)_dev;
564 struct ioc3_private *ip = dev->priv;
565 struct ioc3 *ioc3 = ip->regs;
566 const u32 enabled = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
567 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
568 EISR_TXEXPLICIT | EISR_TXMEMERR;
571 eisr = ioc3->eisr & enabled;
575 ioc3->eisr; /* Flush */
577 if (eisr & (EISR_RXOFLO | EISR_RXBUFOFLO | EISR_RXMEMERR |
578 EISR_RXPARERR | EISR_TXBUFUFLO | EISR_TXMEMERR))
579 ioc3_error(dev, ip, ioc3, eisr);
580 if (eisr & EISR_RXTIMERINT)
581 ioc3_rx(dev, ip, ioc3);
582 if (eisr & EISR_TXEXPLICIT)
583 ioc3_tx(dev, ip, ioc3);
585 eisr = ioc3->eisr & enabled;
589 static void negotiate(unsigned long data)
591 struct net_device *dev = (struct net_device *) data;
592 struct ioc3_private *ip = (struct ioc3_private *) dev->priv;
593 struct ioc3 *ioc3 = ip->regs;
595 mod_timer(&ip->negtimer, jiffies + 20 * HZ);
598 static int ioc3_mii_init(struct net_device *dev, struct ioc3_private *ip,
604 spin_lock_irq(&ip->ioc3_lock);
606 for (i = 0; i < 32; i++) {
607 word = mii_read(ioc3, i, 2);
608 if ((word != 0xffff) && (word != 0x0000)) {
610 break; /* Found a PHY */
614 spin_unlock_irq(&ip->ioc3_lock);
619 /* Autonegotiate 100mbit and fullduplex. */
620 mii0 = mii_read(ioc3, ip->phy, 0);
621 mii_write(ioc3, ip->phy, 0, mii0 | 0x3100);
623 ip->negtimer.function = &negotiate;
624 ip->negtimer.data = (unsigned long) dev;
625 mod_timer(&ip->negtimer, jiffies); /* Run it now */
627 spin_unlock_irq(&ip->ioc3_lock);
633 ioc3_clean_rx_ring(struct ioc3_private *ip)
638 for (i = ip->rx_ci; i & 15; i++) {
639 ip->rx_skbs[ip->rx_pi] = ip->rx_skbs[ip->rx_ci];
640 ip->rxr[ip->rx_pi++] = ip->rxr[ip->rx_ci++];
645 for (i = ip->rx_ci; i != ip->rx_pi; i = (i+1) & 511) {
646 struct ioc3_erxbuf *rxb;
647 skb = ip->rx_skbs[i];
648 rxb = (struct ioc3_erxbuf *) (skb->data - RX_OFFSET);
654 ioc3_clean_tx_ring(struct ioc3_private *ip)
659 for (i=0; i < 128; i++) {
660 skb = ip->tx_skbs[i];
662 ip->tx_skbs[i] = NULL;
663 dev_kfree_skb_any(skb);
672 ioc3_free_rings(struct ioc3_private *ip)
675 int rx_entry, n_entry;
678 ioc3_clean_tx_ring(ip);
679 free_pages((unsigned long)ip->txr, 2);
685 rx_entry = ip->rx_pi;
687 while (n_entry != rx_entry) {
688 skb = ip->rx_skbs[n_entry];
690 dev_kfree_skb_any(skb);
692 n_entry = (n_entry + 1) & 511;
694 free_page((unsigned long)ip->rxr);
700 ioc3_alloc_rings(struct net_device *dev, struct ioc3_private *ip,
703 struct ioc3_erxbuf *rxb;
707 if (ip->rxr == NULL) {
708 /* Allocate and initialize rx ring. 4kb = 512 entries */
709 ip->rxr = (unsigned long *) get_free_page(GFP_KERNEL|GFP_ATOMIC);
710 rxr = (unsigned long *) ip->rxr;
712 /* Now the rx buffers. The RX ring may be larger but
713 we only allocate 16 buffers for now. Need to tune
714 this for performance and memory later. */
715 for (i = 0; i < RX_BUFFS; i++) {
718 skb = ioc3_alloc_skb(RX_BUF_ALLOC_SIZE, GFP_ATOMIC);
724 ip->rx_skbs[i] = skb;
727 /* Because we reserve afterwards. */
728 skb_put(skb, (1664 + RX_OFFSET));
729 rxb = (struct ioc3_erxbuf *) skb->data;
730 rxr[i] = (0xa5UL << 56)
731 | ((unsigned long) rxb & TO_PHYS_MASK);
732 skb_reserve(skb, RX_OFFSET);
735 ip->rx_pi = RX_BUFFS;
738 if (ip->txr == NULL) {
739 /* Allocate and initialize tx rings. 16kb = 128 bufs. */
740 ip->txr = (struct ioc3_etxd *)__get_free_pages(GFP_KERNEL|GFP_ATOMIC, 2);
747 ioc3_init_rings(struct net_device *dev, struct ioc3_private *ip,
753 ioc3_alloc_rings(dev, ip, ioc3);
755 ioc3_clean_rx_ring(ip);
756 ioc3_clean_tx_ring(ip);
758 /* Now the rx ring base, consume & produce registers. */
759 ring = (0xa5UL << 56) | ((unsigned long)ip->rxr & TO_PHYS_MASK);
760 ioc3->erbr_h = ring >> 32;
761 ioc3->erbr_l = ring & 0xffffffff;
762 ioc3->ercir = (ip->rx_ci << 3);
763 ioc3->erpir = (ip->rx_pi << 3) | ERPIR_ARM;
765 ring = (0xa5UL << 56) | ((unsigned long)ip->txr & TO_PHYS_MASK);
767 ip->txqlen = 0; /* nothing queued */
769 /* Now the tx ring base, consume & produce registers. */
770 ioc3->etbr_h = ring >> 32;
771 ioc3->etbr_l = ring & 0xffffffff;
772 ioc3->etpir = (ip->tx_pi << 7);
773 ioc3->etcir = (ip->tx_ci << 7);
774 ioc3->etcir; /* Flush */
778 ioc3_ssram_disc(struct ioc3_private *ip)
780 struct ioc3 *ioc3 = ip->regs;
781 volatile u32 *ssram0 = &ioc3->ssram[0x0000];
782 volatile u32 *ssram1 = &ioc3->ssram[0x4000];
783 unsigned int pattern = 0x5555;
785 /* Assume the larger size SSRAM and enable parity checking */
786 ioc3->emcr |= (EMCR_BUFSIZ | EMCR_RAMPAR);
789 *ssram1 = ~pattern & IOC3_SSRAM_DM;
791 if ((*ssram0 & IOC3_SSRAM_DM) != pattern ||
792 (*ssram1 & IOC3_SSRAM_DM) != (~pattern & IOC3_SSRAM_DM)) {
793 /* set ssram size to 64 KB */
794 ip->emcr = EMCR_RAMPAR;
795 ioc3->emcr &= ~EMCR_BUFSIZ;
797 ip->emcr = EMCR_BUFSIZ | EMCR_RAMPAR;
801 static void ioc3_init(struct net_device *dev)
803 struct ioc3_private *ip = dev->priv;
804 struct ioc3 *ioc3 = ip->regs;
806 ioc3->emcr = EMCR_RST; /* Reset */
807 ioc3->emcr; /* flush WB */
808 udelay(4); /* Give it time ... */
814 ioc3->etcsr = (17<<ETCSR_IPGR2_SHIFT) | (11<<ETCSR_IPGR1_SHIFT) | 21;
815 ioc3->etcdc; /* Clear on read */
816 ioc3->ercsr = 15; /* RX low watermark */
817 ioc3->ertr = 0; /* Interrupt immediately */
818 ioc3->emar_h = (dev->dev_addr[5] << 8) | dev->dev_addr[4];
819 ioc3->emar_l = (dev->dev_addr[3] << 24) | (dev->dev_addr[2] << 16) |
820 (dev->dev_addr[1] << 8) | dev->dev_addr[0];
821 ioc3->ehar_h = ip->ehar_h;
822 ioc3->ehar_l = ip->ehar_l;
823 ioc3->ersr = 42; /* XXX should be random */
825 ioc3_init_rings(dev, ip, ioc3);
827 ip->emcr |= ((RX_OFFSET / 2) << EMCR_RXOFF_SHIFT) | EMCR_TXDMAEN |
828 EMCR_TXEN | EMCR_RXDMAEN | EMCR_RXEN;
829 ioc3->emcr = ip->emcr;
830 ioc3->eier = EISR_RXTIMERINT | EISR_RXOFLO | EISR_RXBUFOFLO |
831 EISR_RXMEMERR | EISR_RXPARERR | EISR_TXBUFUFLO |
832 EISR_TXEXPLICIT | EISR_TXMEMERR;
836 static inline void ioc3_stop(struct net_device *dev)
838 struct ioc3_private *ip = dev->priv;
839 struct ioc3 *ioc3 = ip->regs;
841 ioc3->emcr = 0; /* Shutup */
842 ioc3->eier = 0; /* Disable interrupts */
843 ioc3->eier; /* Flush */
847 ioc3_open(struct net_device *dev)
849 struct ioc3_private *ip;
851 if (request_irq(dev->irq, ioc3_interrupt, 0, ioc3_str, dev)) {
852 printk(KERN_ERR "%s: Can't get irq %d\n", dev->name, dev->irq);
857 ip = (struct ioc3_private *) dev->priv;
863 netif_start_queue(dev);
871 ioc3_close(struct net_device *dev)
873 struct ioc3_private *ip = dev->priv;
875 del_timer(&ip->negtimer);
876 netif_stop_queue(dev);
878 ioc3_stop(dev); /* Flush */
879 free_irq(dev->irq, dev);
888 static int ioc3_pci_init(struct pci_dev *pdev)
890 u16 mii0, mii_status, mii2, mii3, mii4;
891 struct net_device *dev = NULL; // XXX
892 struct ioc3_private *ip;
894 unsigned long ioc3_base, ioc3_size;
895 u32 vendor, model, rev;
898 dev = init_etherdev(0, sizeof(struct ioc3_private));
904 memset(ip, 0, sizeof(*ip));
907 * This probably needs to be register_netdevice, or call
908 * init_etherdev so that it calls register_netdevice. Quick
911 netif_device_attach(dev);
913 dev->irq = pdev->irq;
915 ioc3_base = pdev->resource[0].start;
916 ioc3_size = pdev->resource[0].end - ioc3_base;
917 ioc3 = (struct ioc3 *) ioremap(ioc3_base, ioc3_size);
920 spin_lock_init(&ip->ioc3_lock);
926 init_timer(&ip->negtimer);
927 ioc3_mii_init(dev, ip, ioc3);
931 printk(KERN_CRIT"%s: Didn't find a PHY, goodbye.\n", dev->name);
933 free_irq(dev->irq, dev);
939 mii0 = mii_read(ioc3, phy, 0);
940 mii_status = mii_read(ioc3, phy, 1);
941 mii2 = mii_read(ioc3, phy, 2);
942 mii3 = mii_read(ioc3, phy, 3);
943 mii4 = mii_read(ioc3, phy, 4);
944 vendor = (mii2 << 12) | (mii3 >> 4);
945 model = (mii3 >> 4) & 0x3f;
947 printk(KERN_INFO"Using PHY %d, vendor 0x%x, model %d, rev %d.\n",
948 phy, vendor, model, rev);
949 printk(KERN_INFO "%s: MII transceiver found at MDIO address "
950 "%d, config %4.4x status %4.4x.\n",
951 dev->name, phy, mii0, mii_status);
954 printk("IOC3 SSRAM has %d kbyte.\n", ip->emcr & EMCR_BUFSIZ ? 128 : 64);
956 ioc3_get_eaddr(dev, ioc3);
958 /* The IOC3-specific entries in the device structure. */
959 dev->open = ioc3_open;
960 dev->hard_start_xmit = ioc3_start_xmit;
961 dev->tx_timeout = ioc3_timeout;
962 dev->watchdog_timeo = 5 * HZ;
963 dev->stop = ioc3_close;
964 dev->get_stats = ioc3_get_stats;
965 dev->do_ioctl = ioc3_ioctl;
966 dev->set_multicast_list = ioc3_set_multicast_list;
971 static int __init ioc3_probe(void)
973 static int called = 0;
981 struct pci_dev *pdev = NULL;
983 while ((pdev = pci_find_device(PCI_VENDOR_ID_SGI,
984 PCI_DEVICE_ID_SGI_IOC3, pdev))) {
985 if (ioc3_pci_init(pdev))
991 return cards ? -ENODEV : 0;
994 static void __exit ioc3_cleanup_module(void)
996 /* Later, when we really support modules. */
1000 ioc3_start_xmit(struct sk_buff *skb, struct net_device *dev)
1003 struct ioc3_private *ip = dev->priv;
1004 struct ioc3 *ioc3 = ip->regs;
1006 struct ioc3_etxd *desc;
1009 spin_lock_irq(&ip->ioc3_lock);
1011 data = (unsigned long) skb->data;
1014 produce = ip->tx_pi;
1015 desc = &ip->txr[produce];
1018 /* Short packet, let's copy it directly into the ring. */
1019 memcpy(desc->data, skb->data, skb->len);
1020 if (len < ETH_ZLEN) {
1021 /* Very short packet, pad with zeros at the end. */
1022 memset(desc->data + len, 0, ETH_ZLEN - len);
1025 desc->cmd = len | ETXD_INTWHENDONE | ETXD_D0V;
1027 } else if ((data ^ (data + len)) & 0x4000) {
1028 unsigned long b2, s1, s2;
1030 b2 = (data | 0x3fffUL) + 1UL;
1032 s2 = data + len - b2;
1034 desc->cmd = len | ETXD_INTWHENDONE | ETXD_B1V | ETXD_B2V;
1035 desc->bufcnt = (s1 << ETXD_B1CNT_SHIFT) |
1036 (s2 << ETXD_B2CNT_SHIFT);
1037 desc->p1 = (0xa5UL << 56) | (data & TO_PHYS_MASK);
1038 desc->p2 = (0xa5UL << 56) | (data & TO_PHYS_MASK);
1040 /* Normal sized packet that doesn't cross a page boundary. */
1041 desc->cmd = len | ETXD_INTWHENDONE | ETXD_B1V;
1042 desc->bufcnt = len << ETXD_B1CNT_SHIFT;
1043 desc->p1 = (0xa5UL << 56) | (data & TO_PHYS_MASK);
1048 dev->trans_start = jiffies;
1049 ip->tx_skbs[produce] = skb; /* Remember skb */
1050 produce = (produce + 1) & 127;
1051 ip->tx_pi = produce;
1052 ioc3->etpir = produce << 7; /* Fire ... */
1056 if (ip->txqlen > 127)
1057 netif_stop_queue(dev);
1059 spin_unlock_irq(&ip->ioc3_lock);
1064 static void ioc3_timeout(struct net_device *dev)
1066 struct ioc3_private *ip = dev->priv;
1067 struct ioc3 *ioc3 = ip->regs;
1069 printk(KERN_ERR "%s: transmit timed out, resetting\n", dev->name);
1073 ioc3_mii_init(dev, ip, ioc3);
1075 dev->trans_start = jiffies;
1076 netif_wake_queue(dev);
1080 * Given a multicast ethernet address, this routine calculates the
1081 * address's bit index in the logical address filter mask
1083 #define CRC_MASK 0xEDB88320
1085 static inline unsigned int
1086 ioc3_hash(const unsigned char *addr)
1088 unsigned int temp = 0;
1094 for (crc = ~0; --len >= 0; addr++) {
1096 for (bits = 8; --bits >= 0; ) {
1097 if ((byte ^ crc) & 1)
1098 crc = (crc >> 1) ^ CRC_MASK;
1105 crc &= 0x3f; /* bit reverse lowest 6 bits for hash index */
1106 for (bits = 6; --bits >= 0; ) {
1108 temp |= (crc & 0x1);
1115 /* Provide ioctl() calls to examine the MII xcvr state. */
1116 static int ioc3_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
1118 struct ioc3_private *ip = (struct ioc3_private *) dev->priv;
1119 u16 *data = (u16 *)&rq->ifr_data;
1120 struct ioc3 *ioc3 = ip->regs;
1124 case SIOCGMIIPHY: /* Get the address of the PHY in use. */
1130 case SIOCGMIIREG: /* Read any PHY register. */
1131 spin_lock_irq(&ip->ioc3_lock);
1132 data[3] = mii_read(ioc3, data[0], data[1]);
1133 spin_unlock_irq(&ip->ioc3_lock);
1136 case SIOCSMIIREG: /* Write any PHY register. */
1137 if (!capable(CAP_NET_ADMIN))
1139 spin_lock_irq(&ip->ioc3_lock);
1140 mii_write(ioc3, data[0], data[1], data[2]);
1141 spin_unlock_irq(&ip->ioc3_lock);
1151 static void ioc3_set_multicast_list(struct net_device *dev)
1153 struct dev_mc_list *dmi = dev->mc_list;
1154 struct ioc3_private *ip = dev->priv;
1155 struct ioc3 *ioc3 = ip->regs;
1156 char *addr = dmi->dmi_addr;
1160 if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
1161 /* Unconditionally log net taps. */
1162 printk(KERN_INFO "%s: Promiscuous mode enabled.\n", dev->name);
1163 ip->emcr |= EMCR_PROMISC;
1164 ioc3->emcr = ip->emcr;
1167 ip->emcr &= ~EMCR_PROMISC;
1168 ioc3->emcr = ip->emcr; /* Clear promiscuous. */
1171 if ((dev->flags & IFF_ALLMULTI) || (dev->mc_count > 64)) {
1172 /* Too many for hashing to make sense or we want all
1173 multicast packets anyway, so skip computing all the
1174 hashes and just accept all packets. */
1175 ip->ehar_h = 0xffffffff;
1176 ip->ehar_l = 0xffffffff;
1178 for (i = 0; i < dev->mc_count; i++) {
1184 ehar |= (1UL << ioc3_hash(addr));
1186 ip->ehar_h = ehar >> 32;
1187 ip->ehar_l = ehar & 0xffffffff;
1189 ioc3->ehar_h = ip->ehar_h;
1190 ioc3->ehar_l = ip->ehar_l;
1195 MODULE_AUTHOR("Ralf Baechle <ralf@oss.sgi.com>");
1196 MODULE_DESCRIPTION("SGI IOC3 Ethernet driver");
1199 module_init(ioc3_probe);
1200 module_exit(ioc3_cleanup_module);