c5aa5710afbe56bdb4f7a9cf4654c884addef7b3
[linux-flexiantxendom0-3.2.10.git] / drivers / net / typhoon.c
1 /* typhoon.c: A Linux Ethernet device driver for 3Com 3CR990 family of NICs */
2 /*
3         Written 2002-2003 by David Dillow <dave@thedillows.org>
4         Based on code written 1998-2000 by Donald Becker <becker@scyld.com> and
5         Linux 2.2.x driver by David P. McLean <davidpmclean@yahoo.com>.
6
7         This software may be used and distributed according to the terms of
8         the GNU General Public License (GPL), incorporated herein by reference.
9         Drivers based on or derived from this code fall under the GPL and must
10         retain the authorship, copyright and license notice.  This file is not
11         a complete program and may only be used when the entire operating
12         system is licensed under the GPL.
13
14         This software is available on a public web site. It may enable
15         cryptographic capabilities of the 3Com hardware, and may be
16         exported from the United States under License Exception "TSU"
17         pursuant to 15 C.F.R. Section 740.13(e).
18
19         This work was funded by the National Library of Medicine under
20         the Department of Energy project number 0274DD06D1 and NLM project
21         number Y1-LM-2015-01.
22
23         This driver is designed for the 3Com 3CR990 Family of cards with the
24         3XP Processor. It has been tested on x86 and sparc64.
25
26         KNOWN ISSUES:
27         *) The current firmware always strips the VLAN tag off, even if
28                 we tell it not to. You should filter VLANs at the switch
29                 as a workaround (good practice in any event) until we can
30                 get this fixed.
31         *) Cannot DMA Rx packets to a 2 byte aligned address. Also firmware
32                 issue. Hopefully 3Com will fix it.
33         *) Waiting for a command response takes 8ms due to non-preemptable
34                 polling. Only significant for getting stats and creating
35                 SAs, but an ugly wart never the less.
36         *) I've not tested multicast. I think it works, but reports welcome.
37         *) Doesn't do IPSEC offloading. Yet. Keep yer pants on, it's coming.
38 */
39
40 /* Set the copy breakpoint for the copy-only-tiny-frames scheme.
41  * Setting to > 1518 effectively disables this feature.
42  */
43 static int rx_copybreak = 200;
44
45 /* end user-configurable values */
46
47 /* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
48  */
49 static const int multicast_filter_limit = 32;
50
51 /* Operational parameters that are set at compile time. */
52
53 /* Keep the ring sizes a power of two for compile efficiency.
54  * The compiler will convert <unsigned>'%'<2^N> into a bit mask.
55  * Making the Tx ring too large decreases the effectiveness of channel
56  * bonding and packet priority.
57  * There are no ill effects from too-large receive rings.
58  *
59  * We don't currently use the Hi Tx ring so, don't make it very big.
60  *
61  * Beware that if we start using the Hi Tx ring, we will need to change
62  * typhoon_num_free_tx() and typhoon_tx_complete() to account for that.
63  */
64 #define TXHI_ENTRIES            2
65 #define TXLO_ENTRIES            128
66 #define RX_ENTRIES              32
67 #define COMMAND_ENTRIES         16
68 #define RESPONSE_ENTRIES        32
69
70 #define COMMAND_RING_SIZE       (COMMAND_ENTRIES * sizeof(struct cmd_desc))
71 #define RESPONSE_RING_SIZE      (RESPONSE_ENTRIES * sizeof(struct resp_desc))
72
73 /* The 3XP will preload and remove 64 entries from the free buffer
74  * list, and we need one entry to keep the ring from wrapping, so 
75  * to keep this a power of two, we use 128 entries.
76  */
77 #define RXFREE_ENTRIES          128
78 #define RXENT_ENTRIES           (RXFREE_ENTRIES - 1)
79
80 /* Operational parameters that usually are not changed. */
81
82 /* Time in jiffies before concluding the transmitter is hung. */
83 #define TX_TIMEOUT  (2*HZ)
84
85 #define PKT_BUF_SZ              1536
86
87 #define DRV_MODULE_NAME         "typhoon"
88 #define DRV_MODULE_VERSION      "1.5.1"
89 #define DRV_MODULE_RELDATE      "03/06/26"
90 #define PFX                     DRV_MODULE_NAME ": "
91 #define ERR_PFX                 KERN_ERR PFX
92
93 #if !defined(__OPTIMIZE__)  ||  !defined(__KERNEL__)
94 #warning  You must compile this file with the correct options!
95 #warning  See the last lines of the source file.
96 #error  You must compile this driver with "-O".
97 #endif
98
99 #include <linux/module.h>
100 #include <linux/kernel.h>
101 #include <linux/string.h>
102 #include <linux/timer.h>
103 #include <linux/errno.h>
104 #include <linux/ioport.h>
105 #include <linux/slab.h>
106 #include <linux/interrupt.h>
107 #include <linux/pci.h>
108 #include <linux/netdevice.h>
109 #include <linux/etherdevice.h>
110 #include <linux/skbuff.h>
111 #include <linux/init.h>
112 #include <linux/delay.h>
113 #include <linux/ethtool.h>
114 #include <linux/if_vlan.h>
115 #include <linux/crc32.h>
116 #include <asm/processor.h>
117 #include <asm/bitops.h>
118 #include <asm/io.h>
119 #include <asm/uaccess.h>
120 #include <linux/in6.h>
121 #include <asm/checksum.h>
122 #include <linux/version.h>
123
124 #include "typhoon.h"
125 #include "typhoon-firmware.h"
126
127 static char version[] __devinitdata =
128     "typhoon.c: version " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
129
130 MODULE_AUTHOR("David Dillow <dillowd@y12.doe.gov>");
131 MODULE_LICENSE("GPL");
132 MODULE_DESCRIPTION("3Com Typhoon Family (3C990, 3CR990, and variants)");
133 MODULE_PARM(rx_copybreak, "i");
134
135 #if defined(NETIF_F_TSO) && MAX_SKB_FRAGS > 32
136 #warning Typhoon only supports 32 entries in its SG list for TSO, disabling TSO
137 #undef NETIF_F_TSO
138 #endif
139
140 #if TXLO_ENTRIES <= (2 * MAX_SKB_FRAGS)
141 #error TX ring too small!
142 #endif
143
144 struct typhoon_card_info {
145         char *name;
146         int capabilities;
147 };
148
149 #define TYPHOON_CRYPTO_NONE             0
150 #define TYPHOON_CRYPTO_DES              1
151 #define TYPHOON_CRYPTO_3DES             2
152 #define TYPHOON_CRYPTO_VARIABLE         4
153 #define TYPHOON_FIBER                   8
154
155 enum typhoon_cards {
156         TYPHOON_TX = 0, TYPHOON_TX95, TYPHOON_TX97, TYPHOON_SVR,
157         TYPHOON_SVR95, TYPHOON_SVR97, TYPHOON_TXM, TYPHOON_BSVR,
158         TYPHOON_FX95, TYPHOON_FX97, TYPHOON_FX95SVR, TYPHOON_FX97SVR,
159 };
160
161 /* directly indexed by enum typhoon_cards, above */
162 static struct typhoon_card_info typhoon_card_info[] __devinitdata = {
163         { "3Com Typhoon (3C990-TX)",
164                 TYPHOON_CRYPTO_NONE},
165         { "3Com Typhoon (3CR990-TX-95)",
166                 TYPHOON_CRYPTO_DES},
167         { "3Com Typhoon (3CR990-TX-97)",
168                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
169         { "3Com Typhoon (3C990SVR)",
170                 TYPHOON_CRYPTO_NONE},
171         { "3Com Typhoon (3CR990SVR95)",
172                 TYPHOON_CRYPTO_DES},
173         { "3Com Typhoon (3CR990SVR97)",
174                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES},
175         { "3Com Typhoon2 (3C990B-TX-M)",
176                 TYPHOON_CRYPTO_VARIABLE},
177         { "3Com Typhoon2 (3C990BSVR)",
178                 TYPHOON_CRYPTO_VARIABLE},
179         { "3Com Typhoon (3CR990-FX-95)",
180                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
181         { "3Com Typhoon (3CR990-FX-97)",
182                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
183         { "3Com Typhoon (3CR990-FX-95 Server)",
184                 TYPHOON_CRYPTO_DES | TYPHOON_FIBER},
185         { "3Com Typhoon (3CR990-FX-97 Server)",
186                 TYPHOON_CRYPTO_DES | TYPHOON_CRYPTO_3DES | TYPHOON_FIBER},
187 };
188
189 /* Notes on the new subsystem numbering scheme:
190  * bits 0-1 indicate crypto capabilites: (0) variable, (1) DES, or (2) 3DES
191  * bit 4 indicates if this card has secured firmware (we don't support it)
192  * bit 8 indicates if this is a (0) copper or (1) fiber card
193  * bits 12-16 indicate card type: (0) client and (1) server
194  */
195 static struct pci_device_id typhoon_pci_tbl[] __devinitdata = {
196         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0,TYPHOON_TX },
198         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_95,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX95 },
200         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_TX_97,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_TX97 },
202         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
203           PCI_ANY_ID, 0x1000, 0, 0, TYPHOON_TXM },
204         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990B,
205           PCI_ANY_ID, 0x2000, 0, 0, TYPHOON_BSVR },
206         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
207           PCI_ANY_ID, 0x1101, 0, 0, TYPHOON_FX95 },
208         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
209           PCI_ANY_ID, 0x1102, 0, 0, TYPHOON_FX97 },
210         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
211           PCI_ANY_ID, 0x2101, 0, 0, TYPHOON_FX95SVR },
212         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990_FX,
213           PCI_ANY_ID, 0x2102, 0, 0, TYPHOON_FX97SVR },
214         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR95,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR95 },
216         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR97,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR97 },
218         { PCI_VENDOR_ID_3COM, PCI_DEVICE_ID_3COM_3CR990SVR,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, TYPHOON_SVR },
220         { 0, }
221 };
222 MODULE_DEVICE_TABLE(pci, typhoon_pci_tbl);
223
224 /* Define the shared memory area
225  * Align everything the 3XP will normally be using.
226  * We'll need to move/align txHi if we start using that ring.
227  */
228 #define __3xp_aligned   ____cacheline_aligned
229 struct typhoon_shared {
230         struct typhoon_interface        iface;
231         struct typhoon_indexes          indexes                 __3xp_aligned;
232         struct tx_desc                  txLo[TXLO_ENTRIES]      __3xp_aligned;
233         struct rx_desc                  rxLo[RX_ENTRIES]        __3xp_aligned;
234         struct rx_desc                  rxHi[RX_ENTRIES]        __3xp_aligned;
235         struct cmd_desc                 cmd[COMMAND_ENTRIES]    __3xp_aligned;
236         struct resp_desc                resp[RESPONSE_ENTRIES]  __3xp_aligned;
237         struct rx_free                  rxBuff[RXFREE_ENTRIES]  __3xp_aligned;
238         u32                             zeroWord;
239         struct tx_desc                  txHi[TXHI_ENTRIES];
240 } __attribute__ ((packed));
241
242 struct rxbuff_ent {
243         struct sk_buff *skb;
244         dma_addr_t      dma_addr;
245 };
246
247 struct typhoon {
248         /* Tx cache line section */
249         struct transmit_ring    txLoRing        ____cacheline_aligned;  
250         struct pci_dev *        tx_pdev;
251         unsigned long           tx_ioaddr;
252         u32                     txlo_dma_addr;
253
254         /* Irq/Rx cache line section */
255         unsigned long           ioaddr          ____cacheline_aligned;
256         struct typhoon_indexes *indexes;
257         u8                      awaiting_resp;
258         u8                      duplex;
259         u8                      speed;
260         u8                      card_state;
261         struct basic_ring       rxLoRing;
262         struct pci_dev *        pdev;
263         struct net_device *     dev;
264         spinlock_t              state_lock;
265         struct vlan_group *     vlgrp;
266         struct basic_ring       rxHiRing;
267         struct basic_ring       rxBuffRing;
268         struct rxbuff_ent       rxbuffers[RXENT_ENTRIES];
269
270         /* general section */
271         spinlock_t              command_lock    ____cacheline_aligned;
272         struct basic_ring       cmdRing;
273         struct basic_ring       respRing;
274         struct net_device_stats stats;
275         struct net_device_stats stats_saved;
276         const char *            name;
277         struct typhoon_shared * shared;
278         dma_addr_t              shared_dma;
279         u16                     xcvr_select;
280         u16                     wol_events;
281         u32                     offload;
282         u32                     pci_state[16];
283
284         /* unused stuff (future use) */
285         int                     capabilities;
286         struct transmit_ring    txHiRing;
287 };
288
289 enum completion_wait_values {
290         NoWait = 0, WaitNoSleep, WaitSleep,
291 };
292
293 /* These are the values for the typhoon.card_state variable.
294  * These determine where the statistics will come from in get_stats().
295  * The sleep image does not support the statistics we need.
296  */
297 enum state_values {
298         Sleeping = 0, Running,
299 };
300
301 /* PCI writes are not guaranteed to be posted in order, but outstanding writes
302  * cannot pass a read, so this forces current writes to post.
303  */
304 #define typhoon_post_pci_writes(x) \
305         do { readl(x + TYPHOON_REG_HEARTBEAT); } while(0)
306
307 /* We'll wait up to six seconds for a reset, and half a second normally.
308  */
309 #define TYPHOON_UDELAY                  50
310 #define TYPHOON_RESET_TIMEOUT           (6 * HZ)
311 #define TYPHOON_WAIT_TIMEOUT            ((1000000 / 2) / TYPHOON_UDELAY)
312
313 #if LINUX_VERSION_CODE < KERNEL_VERSION(2, 5, 28)
314 #define typhoon_synchronize_irq(x) synchronize_irq()
315 #else
316 #define typhoon_synchronize_irq(x) synchronize_irq(x)
317 #endif
318
319 #if defined(NETIF_F_TSO)
320 #define skb_tso_size(x)         (skb_shinfo(x)->tso_size)
321 #define TSO_NUM_DESCRIPTORS     2
322 #define TSO_OFFLOAD_ON          TYPHOON_OFFLOAD_TCP_SEGMENT
323 #else
324 #define NETIF_F_TSO             0
325 #define skb_tso_size(x)         0
326 #define TSO_NUM_DESCRIPTORS     0
327 #define TSO_OFFLOAD_ON          0
328 #endif
329
330 static inline void
331 typhoon_inc_index(u32 *index, const int count, const int num_entries)
332 {
333         /* Increment a ring index -- we can use this for all rings execept
334          * the Rx rings, as they use different size descriptors
335          * otherwise, everything is the same size as a cmd_desc
336          */
337         *index += count * sizeof(struct cmd_desc);
338         *index %= num_entries * sizeof(struct cmd_desc);
339 }
340
341 static inline void
342 typhoon_inc_cmd_index(u32 *index, const int count)
343 {
344         typhoon_inc_index(index, count, COMMAND_ENTRIES);
345 }
346
347 static inline void
348 typhoon_inc_resp_index(u32 *index, const int count)
349 {
350         typhoon_inc_index(index, count, RESPONSE_ENTRIES);
351 }
352
353 static inline void
354 typhoon_inc_rxfree_index(u32 *index, const int count)
355 {
356         typhoon_inc_index(index, count, RXFREE_ENTRIES);
357 }
358
359 static inline void
360 typhoon_inc_tx_index(u32 *index, const int count)
361 {
362         /* if we start using the Hi Tx ring, this needs updateing */
363         typhoon_inc_index(index, count, TXLO_ENTRIES);
364 }
365
366 static inline void
367 typhoon_inc_rx_index(u32 *index, const int count)
368 {
369         /* sizeof(struct rx_desc) != sizeof(struct cmd_desc) */
370         *index += count * sizeof(struct rx_desc);
371         *index %= RX_ENTRIES * sizeof(struct rx_desc);
372 }
373
374 static int
375 typhoon_reset(unsigned long ioaddr, int wait_type)
376 {
377         int i, err = 0;
378         int timeout = TYPHOON_RESET_TIMEOUT;
379
380         if(wait_type == WaitNoSleep)
381                 timeout = (timeout * 1000000) / (HZ * TYPHOON_UDELAY);
382
383         writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
384         writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
385
386         writel(TYPHOON_RESET_ALL, ioaddr + TYPHOON_REG_SOFT_RESET);
387         typhoon_post_pci_writes(ioaddr);
388         udelay(1);
389         writel(TYPHOON_RESET_NONE, ioaddr + TYPHOON_REG_SOFT_RESET);
390
391         if(wait_type != NoWait) {
392                 for(i = 0; i < timeout; i++) {
393                         if(readl(ioaddr + TYPHOON_REG_STATUS) ==
394                            TYPHOON_STATUS_WAITING_FOR_HOST)
395                                 goto out;
396
397                         if(wait_type == WaitSleep) {
398                                 set_current_state(TASK_UNINTERRUPTIBLE);
399                                 schedule_timeout(1);
400                         } else
401                                 udelay(TYPHOON_UDELAY);
402                 }
403
404                 err = -ETIMEDOUT;
405         }
406
407 out:
408         writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
409         writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_STATUS);
410         udelay(100);
411         return err;
412
413         /* The 3XP seems to need a little extra time to complete the load
414          * of the sleep image before we can reliably boot it. Failure to
415          * do this occasionally results in a hung adapter after boot in
416          * typhoon_init_one() while trying to read the MAC address or
417          * putting the card to sleep. 3Com's driver waits 5ms, but
418          * that seems to be overkill -- with a 50usec delay, it survives
419          * 35000 typhoon_init_one() calls, where it only make it 25-100
420          * without it.
421          *
422          * As it turns out, still occasionally getting a hung adapter,
423          * so I'm bumping it to 100us.
424          */
425 }
426
427 static int
428 typhoon_wait_status(unsigned long ioaddr, u32 wait_value)
429 {
430         int i, err = 0;
431
432         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
433                 if(readl(ioaddr + TYPHOON_REG_STATUS) == wait_value)
434                         goto out;
435                 udelay(TYPHOON_UDELAY);
436         }
437
438         err = -ETIMEDOUT;
439
440 out:
441         return err;
442 }
443
444 static inline void
445 typhoon_media_status(struct net_device *dev, struct resp_desc *resp)
446 {
447         if(resp->parm1 & TYPHOON_MEDIA_STAT_NO_LINK)
448                 netif_carrier_off(dev);
449         else
450                 netif_carrier_on(dev);
451 }
452
453 static inline void
454 typhoon_hello(struct typhoon *tp)
455 {
456         struct basic_ring *ring = &tp->cmdRing;
457         struct cmd_desc *cmd;
458
459         /* We only get a hello request if we've not sent anything to the
460          * card in a long while. If the lock is held, then we're in the
461          * process of issuing a command, so we don't need to respond.
462          */
463         if(spin_trylock(&tp->command_lock)) {
464                 cmd = (struct cmd_desc *)(ring->ringBase + ring->lastWrite);
465                 typhoon_inc_cmd_index(&ring->lastWrite, 1);
466
467                 INIT_COMMAND_NO_RESPONSE(cmd, TYPHOON_CMD_HELLO_RESP);
468                 smp_wmb();
469                 writel(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
470                 spin_unlock(&tp->command_lock);
471         }
472 }
473
474 static int
475 typhoon_process_response(struct typhoon *tp, int resp_size,
476                                 struct resp_desc *resp_save)
477 {
478         struct typhoon_indexes *indexes = tp->indexes;
479         struct resp_desc *resp;
480         u8 *base = tp->respRing.ringBase;
481         int count, len, wrap_len;
482         u32 cleared;
483         u32 ready;
484
485         cleared = le32_to_cpu(indexes->respCleared);
486         ready = le32_to_cpu(indexes->respReady);
487         while(cleared != ready) {
488                 resp = (struct resp_desc *)(base + cleared);
489                 count = resp->numDesc + 1;
490                 if(resp_save && resp->seqNo) {
491                         if(count > resp_size) {
492                                 resp_save->flags = TYPHOON_RESP_ERROR;
493                                 goto cleanup;
494                         }
495
496                         wrap_len = 0;
497                         len = count * sizeof(*resp);
498                         if(unlikely(cleared + len > RESPONSE_RING_SIZE)) {
499                                 wrap_len = cleared + len - RESPONSE_RING_SIZE;
500                                 len = RESPONSE_RING_SIZE - cleared;
501                         }
502
503                         memcpy(resp_save, resp, len);
504                         if(unlikely(wrap_len)) {
505                                 resp_save += len / sizeof(*resp);
506                                 memcpy(resp_save, base, wrap_len);
507                         }
508
509                         resp_save = NULL;
510                 } else if(resp->cmd == TYPHOON_CMD_READ_MEDIA_STATUS) {
511                         typhoon_media_status(tp->dev, resp);
512                 } else if(resp->cmd == TYPHOON_CMD_HELLO_RESP) {
513                         typhoon_hello(tp);
514                 } else {
515                         printk(KERN_ERR "%s: dumping unexpected response "
516                                "0x%04x:%d:0x%02x:0x%04x:%08x:%08x\n",
517                                tp->name, le16_to_cpu(resp->cmd),
518                                resp->numDesc, resp->flags,
519                                le16_to_cpu(resp->parm1),
520                                le32_to_cpu(resp->parm2),
521                                le32_to_cpu(resp->parm3));
522                 }
523
524 cleanup:
525                 typhoon_inc_resp_index(&cleared, count);
526         }
527
528         indexes->respCleared = cpu_to_le32(cleared);
529         wmb();
530         return (resp_save == NULL);
531 }
532
533 static inline int
534 typhoon_num_free(int lastWrite, int lastRead, int ringSize)
535 {
536         /* this works for all descriptors but rx_desc, as they are a
537          * different size than the cmd_desc -- everyone else is the same
538          */
539         lastWrite /= sizeof(struct cmd_desc);
540         lastRead /= sizeof(struct cmd_desc);
541         return (ringSize + lastRead - lastWrite - 1) % ringSize;
542 }
543
544 static inline int
545 typhoon_num_free_cmd(struct typhoon *tp)
546 {
547         int lastWrite = tp->cmdRing.lastWrite;
548         int cmdCleared = le32_to_cpu(tp->indexes->cmdCleared);
549
550         return typhoon_num_free(lastWrite, cmdCleared, COMMAND_ENTRIES);
551 }
552
553 static inline int
554 typhoon_num_free_resp(struct typhoon *tp)
555 {
556         int respReady = le32_to_cpu(tp->indexes->respReady);
557         int respCleared = le32_to_cpu(tp->indexes->respCleared);
558
559         return typhoon_num_free(respReady, respCleared, RESPONSE_ENTRIES);
560 }
561
562 static inline int
563 typhoon_num_free_tx(struct transmit_ring *ring)
564 {
565         /* if we start using the Hi Tx ring, this needs updating */
566         return typhoon_num_free(ring->lastWrite, ring->lastRead, TXLO_ENTRIES);
567 }
568
569 static int
570 typhoon_issue_command(struct typhoon *tp, int num_cmd, struct cmd_desc *cmd,
571                       int num_resp, struct resp_desc *resp)
572 {
573         struct typhoon_indexes *indexes = tp->indexes;
574         struct basic_ring *ring = &tp->cmdRing;
575         struct resp_desc local_resp;
576         int i, err = 0;
577         int got_resp;
578         int freeCmd, freeResp;
579         int len, wrap_len;
580
581         spin_lock(&tp->command_lock);
582
583         freeCmd = typhoon_num_free_cmd(tp);
584         freeResp = typhoon_num_free_resp(tp);
585
586         if(freeCmd < num_cmd || freeResp < num_resp) {
587                 printk("%s: no descs for cmd, had (needed) %d (%d) cmd, "
588                         "%d (%d) resp\n", tp->name, freeCmd, num_cmd,
589                         freeResp, num_resp);
590                 err = -ENOMEM;
591                 goto out;
592         }
593
594         if(cmd->flags & TYPHOON_CMD_RESPOND) {
595                 /* If we're expecting a response, but the caller hasn't given
596                  * us a place to put it, we'll provide one.
597                  */
598                 tp->awaiting_resp = 1;
599                 if(resp == NULL) {
600                         resp = &local_resp;
601                         num_resp = 1;
602                 }
603         }
604
605         wrap_len = 0;
606         len = num_cmd * sizeof(*cmd);
607         if(unlikely(ring->lastWrite + len > COMMAND_RING_SIZE)) {
608                 wrap_len = ring->lastWrite + len - COMMAND_RING_SIZE;
609                 len = COMMAND_RING_SIZE - ring->lastWrite;
610         }
611
612         memcpy(ring->ringBase + ring->lastWrite, cmd, len);
613         if(unlikely(wrap_len)) {
614                 struct cmd_desc *wrap_ptr = cmd;
615                 wrap_ptr += len / sizeof(*cmd);
616                 memcpy(ring->ringBase, wrap_ptr, wrap_len);
617         }
618
619         typhoon_inc_cmd_index(&ring->lastWrite, num_cmd);
620
621         /* "I feel a presence... another warrior is on the the mesa."
622          */
623         wmb();
624         writel(ring->lastWrite, tp->ioaddr + TYPHOON_REG_CMD_READY);
625         typhoon_post_pci_writes(tp->ioaddr);
626
627         if((cmd->flags & TYPHOON_CMD_RESPOND) == 0)
628                 goto out;
629
630         /* Ugh. We'll be here about 8ms, spinning our thumbs, unable to
631          * preempt or do anything other than take interrupts. So, don't
632          * wait for a response unless you have to.
633          *
634          * I've thought about trying to sleep here, but we're called
635          * from many contexts that don't allow that. Also, given the way
636          * 3Com has implemented irq coalescing, we would likely timeout --
637          * this has been observed in real life!
638          *
639          * The big killer is we have to wait to get stats from the card,
640          * though we could go to a periodic refresh of those if we don't
641          * mind them getting somewhat stale. The rest of the waiting
642          * commands occur during open/close/suspend/resume, so they aren't
643          * time critical. Creating SAs in the future will also have to
644          * wait here.
645          */
646         got_resp = 0;
647         for(i = 0; i < TYPHOON_WAIT_TIMEOUT && !got_resp; i++) {
648                 if(indexes->respCleared != indexes->respReady)
649                         got_resp = typhoon_process_response(tp, num_resp,
650                                                                 resp);
651                 udelay(TYPHOON_UDELAY);
652         }
653
654         if(!got_resp) {
655                 err = -ETIMEDOUT;
656                 goto out;
657         }
658
659         /* Collect the error response even if we don't care about the
660          * rest of the response
661          */
662         if(resp->flags & TYPHOON_RESP_ERROR)
663                 err = -EIO;
664
665 out:
666         if(tp->awaiting_resp) {
667                 tp->awaiting_resp = 0;
668                 smp_wmb();
669
670                 /* Ugh. If a response was added to the ring between
671                  * the call to typhoon_process_response() and the clearing
672                  * of tp->awaiting_resp, we could have missed the interrupt
673                  * and it could hang in the ring an indeterminate amount of
674                  * time. So, check for it, and interrupt ourselves if this
675                  * is the case.
676                  */
677                 if(indexes->respCleared != indexes->respReady)
678                         writel(1, tp->ioaddr + TYPHOON_REG_SELF_INTERRUPT);
679         }
680
681         spin_unlock(&tp->command_lock);
682         return err;
683 }
684
685 static void
686 typhoon_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
687 {
688         struct typhoon *tp = (struct typhoon *) dev->priv;
689         struct cmd_desc xp_cmd;
690         int err;
691
692         spin_lock_bh(&tp->state_lock);
693         if(!tp->vlgrp != !grp) {
694                 /* We've either been turned on for the first time, or we've
695                  * been turned off. Update the 3XP.
696                  */
697                 if(grp)
698                         tp->offload |= TYPHOON_OFFLOAD_VLAN;
699                 else
700                         tp->offload &= ~TYPHOON_OFFLOAD_VLAN;
701
702                 /* If the interface is up, the runtime is running -- and we
703                  * must be up for the vlan core to call us.
704                  *
705                  * Do the command outside of the spin lock, as it is slow.
706                  */
707                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd,
708                                         TYPHOON_CMD_SET_OFFLOAD_TASKS);
709                 xp_cmd.parm2 = tp->offload;
710                 xp_cmd.parm3 = tp->offload;
711                 spin_unlock_bh(&tp->state_lock);
712                 err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
713                 if(err < 0)
714                         printk("%s: vlan offload error %d\n", tp->name, -err);
715                 spin_lock_bh(&tp->state_lock);
716         }
717
718         /* now make the change visible */
719         tp->vlgrp = grp;
720         spin_unlock_bh(&tp->state_lock);
721 }
722
723 static void
724 typhoon_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
725 {
726         struct typhoon *tp = (struct typhoon *) dev->priv;
727         spin_lock_bh(&tp->state_lock);
728         if(tp->vlgrp)
729                 tp->vlgrp->vlan_devices[vid] = NULL;
730         spin_unlock_bh(&tp->state_lock);
731 }
732
733 static inline void
734 typhoon_tso_fill(struct sk_buff *skb, struct transmit_ring *txRing,
735                         u32 ring_dma)
736 {
737         struct tcpopt_desc *tcpd;
738         u32 tcpd_offset = ring_dma;
739
740         tcpd = (struct tcpopt_desc *) (txRing->ringBase + txRing->lastWrite);
741         tcpd_offset += txRing->lastWrite;
742         tcpd_offset += offsetof(struct tcpopt_desc, bytesTx);
743         typhoon_inc_tx_index(&txRing->lastWrite, 1);
744
745         tcpd->flags = TYPHOON_OPT_DESC | TYPHOON_OPT_TCP_SEG;
746         tcpd->numDesc = 1;
747         tcpd->mss_flags = cpu_to_le16(skb_tso_size(skb));
748         tcpd->mss_flags |= TYPHOON_TSO_FIRST | TYPHOON_TSO_LAST;
749         tcpd->respAddrLo = cpu_to_le32(tcpd_offset);
750         tcpd->bytesTx = cpu_to_le32(skb->len);
751         tcpd->status = 0;
752 }
753
754 static int
755 typhoon_start_tx(struct sk_buff *skb, struct net_device *dev)
756 {
757         struct typhoon *tp = (struct typhoon *) dev->priv;
758         struct transmit_ring *txRing;
759         struct tx_desc *txd, *first_txd;
760         dma_addr_t skb_dma;
761         int numDesc;
762
763         /* we have two rings to choose from, but we only use txLo for now
764          * If we start using the Hi ring as well, we'll need to update
765          * typhoon_stop_runtime(), typhoon_interrupt(), typhoon_num_free_tx(),
766          * and TXHI_ENTIRES to match, as well as update the TSO code below
767          * to get the right DMA address
768          */
769         txRing = &tp->txLoRing;
770
771         /* We need one descriptor for each fragment of the sk_buff, plus the
772          * one for the ->data area of it.
773          *
774          * The docs say a maximum of 16 fragment descriptors per TCP option
775          * descriptor, then make a new packet descriptor and option descriptor
776          * for the next 16 fragments. The engineers say just an option
777          * descriptor is needed. I've tested up to 26 fragments with a single
778          * packet descriptor/option descriptor combo, so I use that for now.
779          *
780          * If problems develop with TSO, check this first.
781          */
782         numDesc = skb_shinfo(skb)->nr_frags + 1;
783         if(skb_tso_size(skb))
784                 numDesc++;
785
786         /* When checking for free space in the ring, we need to also
787          * account for the initial Tx descriptor, and we always must leave
788          * at least one descriptor unused in the ring so that it doesn't
789          * wrap and look empty.
790          *
791          * The only time we should loop here is when we hit the race
792          * between marking the queue awake and updating the cleared index.
793          * Just loop and it will appear. This comes from the acenic driver.
794          */
795         while(unlikely(typhoon_num_free_tx(txRing) < (numDesc + 2)))
796                 smp_rmb();
797
798         first_txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
799         typhoon_inc_tx_index(&txRing->lastWrite, 1);
800
801         first_txd->flags = TYPHOON_TX_DESC | TYPHOON_DESC_VALID;
802         first_txd->numDesc = 0;
803         first_txd->len = 0;
804         first_txd->addr = (u64)((unsigned long) skb) & 0xffffffff;
805         first_txd->addrHi = (u64)((unsigned long) skb) >> 32;
806         first_txd->processFlags = 0;
807
808         if(skb->ip_summed == CHECKSUM_HW) {
809                 /* The 3XP will figure out if this is UDP/TCP */
810                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_CHKSUM;
811                 first_txd->processFlags |= TYPHOON_TX_PF_UDP_CHKSUM;
812                 first_txd->processFlags |= TYPHOON_TX_PF_IP_CHKSUM;
813         }
814
815         if(vlan_tx_tag_present(skb)) {
816                 first_txd->processFlags |=
817                     TYPHOON_TX_PF_INSERT_VLAN | TYPHOON_TX_PF_VLAN_PRIORITY;
818                 first_txd->processFlags |=
819                     cpu_to_le32(htons(vlan_tx_tag_get(skb)) <<
820                                 TYPHOON_TX_PF_VLAN_TAG_SHIFT);
821         }
822
823         if(skb_tso_size(skb)) {
824                 first_txd->processFlags |= TYPHOON_TX_PF_TCP_SEGMENT;
825                 first_txd->numDesc++;
826
827                 typhoon_tso_fill(skb, txRing, tp->txlo_dma_addr);
828         }
829
830         txd = (struct tx_desc *) (txRing->ringBase + txRing->lastWrite);
831         typhoon_inc_tx_index(&txRing->lastWrite, 1);
832
833         /* No need to worry about padding packet -- the firmware pads
834          * it with zeros to ETH_ZLEN for us.
835          */
836         if(skb_shinfo(skb)->nr_frags == 0) {
837                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, skb->len,
838                                        PCI_DMA_TODEVICE);
839                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
840                 txd->len = cpu_to_le16(skb->len);
841                 txd->addr = cpu_to_le32(skb_dma);
842                 txd->addrHi = 0;
843                 first_txd->numDesc++;
844         } else {
845                 int i, len;
846
847                 len = skb_headlen(skb);
848                 skb_dma = pci_map_single(tp->tx_pdev, skb->data, len,
849                                          PCI_DMA_TODEVICE);
850                 txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
851                 txd->len = cpu_to_le16(len);
852                 txd->addr = cpu_to_le32(skb_dma);
853                 txd->addrHi = 0;
854                 first_txd->numDesc++;
855
856                 for(i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
857                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
858                         void *frag_addr;
859
860                         txd = (struct tx_desc *) (txRing->ringBase +
861                                                 txRing->lastWrite);
862                         typhoon_inc_tx_index(&txRing->lastWrite, 1);
863
864                         len = frag->size;
865                         frag_addr = (void *) page_address(frag->page) +
866                                                 frag->page_offset;
867                         skb_dma = pci_map_single(tp->tx_pdev, frag_addr, len,
868                                          PCI_DMA_TODEVICE);
869                         txd->flags = TYPHOON_FRAG_DESC | TYPHOON_DESC_VALID;
870                         txd->len = cpu_to_le16(len);
871                         txd->addr = cpu_to_le32(skb_dma);
872                         txd->addrHi = 0;
873                         first_txd->numDesc++;
874                 }
875         }
876
877         /* Kick the 3XP
878          */
879         wmb();
880         writel(txRing->lastWrite, tp->tx_ioaddr + txRing->writeRegister);
881
882         dev->trans_start = jiffies;
883
884         /* If we don't have room to put the worst case packet on the
885          * queue, then we must stop the queue. We need 2 extra
886          * descriptors -- one to prevent ring wrap, and one for the
887          * Tx header.
888          */
889         numDesc = MAX_SKB_FRAGS + TSO_NUM_DESCRIPTORS + 1;
890
891         if(typhoon_num_free_tx(txRing) < (numDesc + 2)) {
892                 netif_stop_queue(dev);
893
894                 /* A Tx complete IRQ could have gotten inbetween, making
895                  * the ring free again. Only need to recheck here, since
896                  * Tx is serialized.
897                  */
898                 if(typhoon_num_free_tx(txRing) >= (numDesc + 2))
899                         netif_wake_queue(dev);
900         }
901
902         return 0;
903 }
904
905 static void
906 typhoon_set_rx_mode(struct net_device *dev)
907 {
908         struct typhoon *tp = (struct typhoon *) dev->priv;
909         struct cmd_desc xp_cmd;
910         u32 mc_filter[2];
911         u16 filter;
912
913         filter = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
914         if(dev->flags & IFF_PROMISC) {
915                 printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
916                        dev->name);
917                 filter |= TYPHOON_RX_FILTER_PROMISCOUS;
918         } else if((dev->mc_count > multicast_filter_limit) ||
919                   (dev->flags & IFF_ALLMULTI)) {
920                 /* Too many to match, or accept all multicasts. */
921                 filter |= TYPHOON_RX_FILTER_ALL_MCAST;
922         } else if(dev->mc_count) {
923                 struct dev_mc_list *mclist;
924                 int i;
925
926                 memset(mc_filter, 0, sizeof(mc_filter));
927                 for(i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
928                     i++, mclist = mclist->next) {
929                         int bit = ether_crc(ETH_ALEN, mclist->dmi_addr) & 0x3f;
930                         mc_filter[bit >> 5] |= 1 << (bit & 0x1f);
931                 }
932
933                 INIT_COMMAND_NO_RESPONSE(&xp_cmd,
934                                          TYPHOON_CMD_SET_MULTICAST_HASH);
935                 xp_cmd.parm1 = TYPHOON_MCAST_HASH_SET;
936                 xp_cmd.parm2 = cpu_to_le32(mc_filter[0]);
937                 xp_cmd.parm3 = cpu_to_le32(mc_filter[1]);
938                 typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
939
940                 filter |= TYPHOON_RX_FILTER_MCAST_HASH;
941         }
942
943         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
944         xp_cmd.parm1 = filter;
945         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
946 }
947
948 static int
949 typhoon_do_get_stats(struct typhoon *tp)
950 {
951         struct net_device_stats *stats = &tp->stats;
952         struct net_device_stats *saved = &tp->stats_saved;
953         struct cmd_desc xp_cmd;
954         struct resp_desc xp_resp[7];
955         struct stats_resp *s = (struct stats_resp *) xp_resp;
956         int err;
957
958         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_STATS);
959         err = typhoon_issue_command(tp, 1, &xp_cmd, 7, xp_resp);
960         if(err < 0)
961                 return err;
962
963         /* 3Com's Linux driver uses txMultipleCollisions as it's
964          * collisions value, but there is some other collision info as well...
965          */
966         stats->tx_packets = le32_to_cpu(s->txPackets);
967         stats->tx_bytes = le32_to_cpu(s->txBytes);
968         stats->tx_errors = le32_to_cpu(s->txCarrierLost);
969         stats->tx_carrier_errors = le32_to_cpu(s->txCarrierLost);
970         stats->collisions = le32_to_cpu(s->txMultipleCollisions);
971         stats->rx_packets = le32_to_cpu(s->rxPacketsGood);
972         stats->rx_bytes = le32_to_cpu(s->rxBytesGood);
973         stats->rx_fifo_errors = le32_to_cpu(s->rxFifoOverruns);
974         stats->rx_errors = le32_to_cpu(s->rxFifoOverruns) +
975                         le32_to_cpu(s->BadSSD) + le32_to_cpu(s->rxCrcErrors);
976         stats->rx_crc_errors = le32_to_cpu(s->rxCrcErrors);
977         stats->rx_length_errors = le32_to_cpu(s->rxOversized);
978         tp->speed = (s->linkStatus & TYPHOON_LINK_100MBPS) ?
979                         SPEED_100 : SPEED_10;
980         tp->duplex = (s->linkStatus & TYPHOON_LINK_FULL_DUPLEX) ?
981                         DUPLEX_FULL : DUPLEX_HALF;
982
983         /* add in the saved statistics
984          */
985         stats->tx_packets += saved->tx_packets;
986         stats->tx_bytes += saved->tx_bytes;
987         stats->tx_errors += saved->tx_errors;
988         stats->collisions += saved->collisions;
989         stats->rx_packets += saved->rx_packets;
990         stats->rx_bytes += saved->rx_bytes;
991         stats->rx_fifo_errors += saved->rx_fifo_errors;
992         stats->rx_errors += saved->rx_errors;
993         stats->rx_crc_errors += saved->rx_crc_errors;
994         stats->rx_length_errors += saved->rx_length_errors;
995
996         return 0;
997 }
998
999 static struct net_device_stats *
1000 typhoon_get_stats(struct net_device *dev)
1001 {
1002         struct typhoon *tp = (struct typhoon *) dev->priv;
1003         struct net_device_stats *stats = &tp->stats;
1004         struct net_device_stats *saved = &tp->stats_saved;
1005
1006         smp_rmb();
1007         if(tp->card_state == Sleeping)
1008                 return saved;
1009
1010         if(typhoon_do_get_stats(tp) < 0) {
1011                 printk(KERN_ERR "%s: error getting stats\n", dev->name);
1012                 return saved;
1013         }
1014
1015         return stats;
1016 }
1017
1018 static int
1019 typhoon_set_mac_address(struct net_device *dev, void *addr)
1020 {
1021         struct sockaddr *saddr = (struct sockaddr *) addr;
1022
1023         if(netif_running(dev))
1024                 return -EBUSY;
1025
1026         memcpy(dev->dev_addr, saddr->sa_data, dev->addr_len);
1027         return 0;
1028 }
1029
1030 static inline void
1031 typhoon_ethtool_gdrvinfo(struct typhoon *tp, struct ethtool_drvinfo *info)
1032 {
1033         struct pci_dev *pci_dev = tp->pdev;
1034         struct cmd_desc xp_cmd;
1035         struct resp_desc xp_resp[3];
1036
1037         smp_rmb();
1038         if(tp->card_state == Sleeping) {
1039                 strcpy(info->fw_version, "Sleep image");
1040         } else {
1041                 INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_VERSIONS);
1042                 if(typhoon_issue_command(tp, 1, &xp_cmd, 3, xp_resp) < 0) {
1043                         strcpy(info->fw_version, "Unknown runtime");
1044                 } else {
1045                         strncpy(info->fw_version, (char *) &xp_resp[1], 32);
1046                         info->fw_version[31] = 0;
1047                 }
1048         }
1049
1050         strcpy(info->driver, DRV_MODULE_NAME);
1051         strcpy(info->version, DRV_MODULE_VERSION);
1052         strcpy(info->bus_info, pci_dev->slot_name);
1053 }
1054
1055 static inline void
1056 typhoon_ethtool_gset(struct typhoon *tp, struct ethtool_cmd *cmd)
1057 {
1058         cmd->supported = SUPPORTED_100baseT_Half | SUPPORTED_100baseT_Full |
1059                                 SUPPORTED_Autoneg;
1060
1061         switch (tp->xcvr_select) {
1062         case TYPHOON_XCVR_10HALF:
1063                 cmd->advertising = ADVERTISED_10baseT_Half;
1064                 break;
1065         case TYPHOON_XCVR_10FULL:
1066                 cmd->advertising = ADVERTISED_10baseT_Full;
1067                 break;
1068         case TYPHOON_XCVR_100HALF:
1069                 cmd->advertising = ADVERTISED_100baseT_Half;
1070                 break;
1071         case TYPHOON_XCVR_100FULL:
1072                 cmd->advertising = ADVERTISED_100baseT_Full;
1073                 break;
1074         case TYPHOON_XCVR_AUTONEG:
1075                 cmd->advertising = ADVERTISED_10baseT_Half |
1076                                             ADVERTISED_10baseT_Full |
1077                                             ADVERTISED_100baseT_Half |
1078                                             ADVERTISED_100baseT_Full |
1079                                             ADVERTISED_Autoneg;
1080                 break;
1081         }
1082
1083         if(tp->capabilities & TYPHOON_FIBER) {
1084                 cmd->supported |= SUPPORTED_FIBRE;
1085                 cmd->advertising |= ADVERTISED_FIBRE;
1086                 cmd->port = PORT_FIBRE;
1087         } else {
1088                 cmd->supported |= SUPPORTED_10baseT_Half |
1089                                         SUPPORTED_10baseT_Full |
1090                                         SUPPORTED_TP;
1091                 cmd->advertising |= ADVERTISED_TP;
1092                 cmd->port = PORT_TP;
1093         }
1094
1095         /* need to get stats to make these link speed/duplex valid */
1096         typhoon_do_get_stats(tp);
1097         cmd->speed = tp->speed;
1098         cmd->duplex = tp->duplex;
1099         cmd->phy_address = 0;
1100         cmd->transceiver = XCVR_INTERNAL;
1101         if(tp->xcvr_select == TYPHOON_XCVR_AUTONEG)
1102                 cmd->autoneg = AUTONEG_ENABLE;
1103         else
1104                 cmd->autoneg = AUTONEG_DISABLE;
1105         cmd->maxtxpkt = 1;
1106         cmd->maxrxpkt = 1;
1107 }
1108
1109 static inline int
1110 typhoon_ethtool_sset(struct typhoon *tp, struct ethtool_cmd *cmd)
1111 {
1112         struct cmd_desc xp_cmd;
1113         int xcvr;
1114         int err;
1115
1116         if(cmd->autoneg == AUTONEG_ENABLE) {
1117                 xcvr = TYPHOON_XCVR_AUTONEG;
1118         } else {
1119                 if(cmd->duplex == DUPLEX_HALF) {
1120                         if(cmd->speed == SPEED_10)
1121                                 xcvr = TYPHOON_XCVR_10HALF;
1122                         else if(cmd->speed == SPEED_100)
1123                                 xcvr = TYPHOON_XCVR_100HALF;
1124                         else
1125                                 return -EINVAL;
1126                 } else if(cmd->duplex == DUPLEX_FULL) {
1127                         if(cmd->speed == SPEED_10)
1128                                 xcvr = TYPHOON_XCVR_10FULL;
1129                         else if(cmd->speed == SPEED_100)
1130                                 xcvr = TYPHOON_XCVR_100FULL;
1131                         else
1132                                 return -EINVAL;
1133                 } else
1134                         return -EINVAL;
1135         }
1136
1137         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1138         xp_cmd.parm1 = cpu_to_le16(xcvr);
1139         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1140         if(err < 0)
1141                 return err;
1142
1143         tp->xcvr_select = xcvr;
1144         if(cmd->autoneg == AUTONEG_ENABLE) {
1145                 tp->speed = 0xff;       /* invalid */
1146                 tp->duplex = 0xff;      /* invalid */
1147         } else {
1148                 tp->speed = cmd->speed;
1149                 tp->duplex = cmd->duplex;
1150         }
1151
1152         return 0;
1153 }
1154
1155 static inline int
1156 typhoon_ethtool_ioctl(struct net_device *dev, void *useraddr)
1157 {
1158         struct typhoon *tp = (struct typhoon *) dev->priv;
1159         u32 ethcmd;
1160
1161         if(copy_from_user(&ethcmd, useraddr, sizeof(ethcmd)))
1162                 return -EFAULT;
1163
1164         switch (ethcmd) {
1165         case ETHTOOL_GDRVINFO: {
1166                         struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
1167
1168                         typhoon_ethtool_gdrvinfo(tp, &info);
1169                         if(copy_to_user(useraddr, &info, sizeof(info)))
1170                                 return -EFAULT;
1171                         return 0;
1172                 }
1173         case ETHTOOL_GSET: {
1174                         struct ethtool_cmd cmd = { ETHTOOL_GSET };
1175
1176                         typhoon_ethtool_gset(tp, &cmd);
1177                         if(copy_to_user(useraddr, &cmd, sizeof(cmd)))
1178                                 return -EFAULT;
1179                         return 0;
1180                 }
1181         case ETHTOOL_SSET: {
1182                         struct ethtool_cmd cmd;
1183                         if(copy_from_user(&cmd, useraddr, sizeof(cmd)))
1184                                 return -EFAULT;
1185
1186                         return typhoon_ethtool_sset(tp, &cmd);
1187                 }
1188         case ETHTOOL_GLINK:{
1189                         struct ethtool_value edata = { ETHTOOL_GLINK };
1190
1191                         edata.data = netif_carrier_ok(dev) ? 1 : 0;
1192                         if(copy_to_user(useraddr, &edata, sizeof(edata)))
1193                                 return -EFAULT;
1194                         return 0;
1195                 }
1196         case ETHTOOL_GWOL: {
1197                         struct ethtool_wolinfo wol = { ETHTOOL_GWOL };
1198
1199                         if(tp->wol_events & TYPHOON_WAKE_LINK_EVENT)
1200                                 wol.wolopts |= WAKE_PHY;
1201                         if(tp->wol_events & TYPHOON_WAKE_MAGIC_PKT)
1202                                 wol.wolopts |= WAKE_MAGIC;
1203                         if(copy_to_user(useraddr, &wol, sizeof(wol)))
1204                                 return -EFAULT;
1205                         return 0;
1206         }
1207         case ETHTOOL_SWOL: {
1208                         struct ethtool_wolinfo wol;
1209
1210                         if(copy_from_user(&wol, useraddr, sizeof(wol)))
1211                                 return -EFAULT;
1212                         tp->wol_events = 0;
1213                         if(wol.wolopts & WAKE_PHY)
1214                                 tp->wol_events |= TYPHOON_WAKE_LINK_EVENT;
1215                         if(wol.wolopts & WAKE_MAGIC)
1216                                 tp->wol_events |= TYPHOON_WAKE_MAGIC_PKT;
1217                         return 0;
1218         }
1219         default:
1220                 break;
1221         }
1222
1223         return -EOPNOTSUPP;
1224 }
1225
1226 static int
1227 typhoon_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
1228 {
1229         switch (cmd) {
1230         case SIOCETHTOOL:
1231                 return typhoon_ethtool_ioctl(dev, (void *) ifr->ifr_data);
1232         default:
1233                 break;
1234         }
1235
1236         return -EOPNOTSUPP;
1237 }
1238
1239 static int
1240 typhoon_wait_interrupt(unsigned long ioaddr)
1241 {
1242         int i, err = 0;
1243
1244         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1245                 if(readl(ioaddr + TYPHOON_REG_INTR_STATUS) &
1246                    TYPHOON_INTR_BOOTCMD)
1247                         goto out;
1248                 udelay(TYPHOON_UDELAY);
1249         }
1250
1251         err = -ETIMEDOUT;
1252
1253 out:
1254         writel(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1255         return err;
1256 }
1257
1258 #define shared_offset(x)        offsetof(struct typhoon_shared, x)
1259
1260 static void
1261 typhoon_init_interface(struct typhoon *tp)
1262 {
1263         struct typhoon_interface *iface = &tp->shared->iface;
1264         dma_addr_t shared_dma;
1265
1266         memset(tp->shared, 0, sizeof(struct typhoon_shared));
1267
1268         /* The *Hi members of iface are all init'd to zero by the memset().
1269          */
1270         shared_dma = tp->shared_dma + shared_offset(indexes);
1271         iface->ringIndex = cpu_to_le32(shared_dma);
1272
1273         shared_dma = tp->shared_dma + shared_offset(txLo);
1274         iface->txLoAddr = cpu_to_le32(shared_dma);
1275         iface->txLoSize = cpu_to_le32(TXLO_ENTRIES * sizeof(struct tx_desc));
1276
1277         shared_dma = tp->shared_dma + shared_offset(txHi);
1278         iface->txHiAddr = cpu_to_le32(shared_dma);
1279         iface->txHiSize = cpu_to_le32(TXHI_ENTRIES * sizeof(struct tx_desc));
1280
1281         shared_dma = tp->shared_dma + shared_offset(rxBuff);
1282         iface->rxBuffAddr = cpu_to_le32(shared_dma);
1283         iface->rxBuffSize = cpu_to_le32(RXFREE_ENTRIES *
1284                                         sizeof(struct rx_free));
1285
1286         shared_dma = tp->shared_dma + shared_offset(rxLo);
1287         iface->rxLoAddr = cpu_to_le32(shared_dma);
1288         iface->rxLoSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1289
1290         shared_dma = tp->shared_dma + shared_offset(rxHi);
1291         iface->rxHiAddr = cpu_to_le32(shared_dma);
1292         iface->rxHiSize = cpu_to_le32(RX_ENTRIES * sizeof(struct rx_desc));
1293
1294         shared_dma = tp->shared_dma + shared_offset(cmd);
1295         iface->cmdAddr = cpu_to_le32(shared_dma);
1296         iface->cmdSize = cpu_to_le32(COMMAND_RING_SIZE);
1297
1298         shared_dma = tp->shared_dma + shared_offset(resp);
1299         iface->respAddr = cpu_to_le32(shared_dma);
1300         iface->respSize = cpu_to_le32(RESPONSE_RING_SIZE);
1301
1302         shared_dma = tp->shared_dma + shared_offset(zeroWord);
1303         iface->zeroAddr = cpu_to_le32(shared_dma);
1304
1305         tp->indexes = &tp->shared->indexes;
1306         tp->txLoRing.ringBase = (u8 *) tp->shared->txLo;
1307         tp->txHiRing.ringBase = (u8 *) tp->shared->txHi;
1308         tp->rxLoRing.ringBase = (u8 *) tp->shared->rxLo;
1309         tp->rxHiRing.ringBase = (u8 *) tp->shared->rxHi;
1310         tp->rxBuffRing.ringBase = (u8 *) tp->shared->rxBuff;
1311         tp->cmdRing.ringBase = (u8 *) tp->shared->cmd;
1312         tp->respRing.ringBase = (u8 *) tp->shared->resp;;
1313
1314         tp->txLoRing.writeRegister = TYPHOON_REG_TX_LO_READY;
1315         tp->txHiRing.writeRegister = TYPHOON_REG_TX_HI_READY;
1316
1317         tp->txlo_dma_addr = iface->txLoAddr;
1318         tp->card_state = Sleeping;
1319         smp_wmb();
1320
1321         tp->offload = TYPHOON_OFFLOAD_IP_CHKSUM | TYPHOON_OFFLOAD_TCP_CHKSUM;
1322         tp->offload |= TYPHOON_OFFLOAD_UDP_CHKSUM | TSO_OFFLOAD_ON;
1323
1324         spin_lock_init(&tp->command_lock);
1325         spin_lock_init(&tp->state_lock);
1326 }
1327
1328 static void
1329 typhoon_init_rings(struct typhoon *tp)
1330 {
1331         memset(tp->indexes, 0, sizeof(struct typhoon_indexes));
1332
1333         tp->txLoRing.lastWrite = 0;
1334         tp->txHiRing.lastWrite = 0;
1335         tp->rxLoRing.lastWrite = 0;
1336         tp->rxHiRing.lastWrite = 0;
1337         tp->rxBuffRing.lastWrite = 0;
1338         tp->cmdRing.lastWrite = 0;
1339         tp->cmdRing.lastWrite = 0;
1340
1341         tp->txLoRing.lastRead = 0;
1342         tp->txHiRing.lastRead = 0;
1343 }
1344
1345 static int
1346 typhoon_download_firmware(struct typhoon *tp)
1347 {
1348         unsigned long ioaddr = tp->ioaddr;
1349         struct pci_dev *pdev = tp->pdev;
1350         struct typhoon_file_header *fHdr;
1351         struct typhoon_section_header *sHdr;
1352         u8 *image_data;
1353         void *dpage;
1354         dma_addr_t dpage_dma;
1355         unsigned int csum;
1356         u32 irqEnabled;
1357         u32 irqMasked;
1358         u32 numSections;
1359         u32 section_len;
1360         u32 len;
1361         u32 load_addr;
1362         int i;
1363         int err;
1364
1365         err = -EINVAL;
1366         fHdr = (struct typhoon_file_header *) typhoon_firmware_image;
1367         image_data = (u8 *) fHdr;
1368
1369         if(memcmp(fHdr->tag, "TYPHOON", 8)) {
1370                 printk(KERN_ERR "%s: Invalid firmware image!\n", tp->name);
1371                 goto err_out;
1372         }
1373
1374         /* Cannot just map the firmware image using pci_map_single() as
1375          * the firmware is part of the kernel/module image, so we allocate
1376          * some consistent memory to copy the sections into, as it is simpler,
1377          * and short-lived. If we ever split out and require a userland
1378          * firmware loader, then we can revisit this.
1379          */
1380         err = -ENOMEM;
1381         dpage = pci_alloc_consistent(pdev, PAGE_SIZE, &dpage_dma);
1382         if(!dpage) {
1383                 printk(KERN_ERR "%s: no DMA mem for firmware\n", tp->name);
1384                 goto err_out;
1385         }
1386
1387         irqEnabled = readl(ioaddr + TYPHOON_REG_INTR_ENABLE);
1388         writel(irqEnabled | TYPHOON_INTR_BOOTCMD,
1389                ioaddr + TYPHOON_REG_INTR_ENABLE);
1390         irqMasked = readl(ioaddr + TYPHOON_REG_INTR_MASK);
1391         writel(irqMasked | TYPHOON_INTR_BOOTCMD,
1392                ioaddr + TYPHOON_REG_INTR_MASK);
1393
1394         err = -ETIMEDOUT;
1395         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
1396                 printk(KERN_ERR "%s: card ready timeout\n", tp->name);
1397                 goto err_out_irq;
1398         }
1399
1400         numSections = le32_to_cpu(fHdr->numSections);
1401         load_addr = le32_to_cpu(fHdr->startAddr);
1402
1403         writel(TYPHOON_INTR_BOOTCMD, ioaddr + TYPHOON_REG_INTR_STATUS);
1404         writel(load_addr, ioaddr + TYPHOON_REG_DOWNLOAD_BOOT_ADDR);
1405         typhoon_post_pci_writes(ioaddr);
1406         writel(TYPHOON_BOOTCMD_RUNTIME_IMAGE, ioaddr + TYPHOON_REG_COMMAND);
1407
1408         image_data += sizeof(struct typhoon_file_header);
1409
1410         /* The readl() in typhoon_wait_interrupt() will force the
1411          * last write to the command register to post, so
1412          * we don't need a typhoon_post_pci_writes() after it.
1413          */
1414         for(i = 0; i < numSections; i++) {
1415                 sHdr = (struct typhoon_section_header *) image_data;
1416                 image_data += sizeof(struct typhoon_section_header);
1417                 load_addr = le32_to_cpu(sHdr->startAddr);
1418                 section_len = le32_to_cpu(sHdr->len);
1419
1420                 while(section_len) {
1421                         len = min_t(u32, section_len, PAGE_SIZE);
1422
1423                         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1424                            readl(ioaddr + TYPHOON_REG_STATUS) !=
1425                            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1426                                 printk(KERN_ERR "%s: segment ready timeout\n",
1427                                        tp->name);
1428                                 goto err_out_irq;
1429                         }
1430
1431                         /* Do an pseudo IPv4 checksum on the data -- first
1432                          * need to convert each u16 to cpu order before
1433                          * summing. Fortunately, due to the properties of
1434                          * the checksum, we can do this once, at the end.
1435                          */
1436                         csum = csum_partial_copy_nocheck(image_data, dpage,
1437                                                          len, 0);
1438                         csum = csum_fold(csum);
1439                         csum = le16_to_cpu(csum);
1440
1441                         writel(len, ioaddr + TYPHOON_REG_BOOT_LENGTH);
1442                         writel(csum, ioaddr + TYPHOON_REG_BOOT_CHECKSUM);
1443                         writel(load_addr, ioaddr + TYPHOON_REG_BOOT_DEST_ADDR);
1444                         writel(0, ioaddr + TYPHOON_REG_BOOT_DATA_HI);
1445                         writel(dpage_dma, ioaddr + TYPHOON_REG_BOOT_DATA_LO);
1446                         typhoon_post_pci_writes(ioaddr);
1447                         writel(TYPHOON_BOOTCMD_SEG_AVAILABLE,
1448                                ioaddr + TYPHOON_REG_COMMAND);
1449
1450                         image_data += len;
1451                         load_addr += len;
1452                         section_len -= len;
1453                 }
1454         }
1455
1456         if(typhoon_wait_interrupt(ioaddr) < 0 ||
1457            readl(ioaddr + TYPHOON_REG_STATUS) !=
1458            TYPHOON_STATUS_WAITING_FOR_SEGMENT) {
1459                 printk(KERN_ERR "%s: final segment ready timeout\n", tp->name);
1460                 goto err_out_irq;
1461         }
1462
1463         writel(TYPHOON_BOOTCMD_DNLD_COMPLETE, ioaddr + TYPHOON_REG_COMMAND);
1464
1465         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1466                 printk(KERN_ERR "%s: boot ready timeout, status 0x%0x\n",
1467                        tp->name, readl(ioaddr + TYPHOON_REG_STATUS));
1468                 goto err_out_irq;
1469         }
1470
1471         err = 0;
1472
1473 err_out_irq:
1474         writel(irqMasked, ioaddr + TYPHOON_REG_INTR_MASK);
1475         writel(irqEnabled, ioaddr + TYPHOON_REG_INTR_ENABLE);
1476
1477         pci_free_consistent(pdev, PAGE_SIZE, dpage, dpage_dma);
1478
1479 err_out:
1480         return err;
1481 }
1482
1483 static int
1484 typhoon_boot_3XP(struct typhoon *tp, u32 initial_status)
1485 {
1486         unsigned long ioaddr = tp->ioaddr;
1487
1488         if(typhoon_wait_status(ioaddr, initial_status) < 0) {
1489                 printk(KERN_ERR "%s: boot ready timeout\n", tp->name);
1490                 goto out_timeout;
1491         }
1492
1493         writel(0, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_HI);
1494         writel(tp->shared_dma, ioaddr + TYPHOON_REG_BOOT_RECORD_ADDR_LO);
1495         typhoon_post_pci_writes(ioaddr);
1496         writel(TYPHOON_BOOTCMD_REG_BOOT_RECORD, ioaddr + TYPHOON_REG_COMMAND);
1497
1498         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_RUNNING) < 0) {
1499                 printk(KERN_ERR "%s: boot finish timeout (status 0x%x)\n",
1500                        tp->name, readl(ioaddr + TYPHOON_REG_STATUS));
1501                 goto out_timeout;
1502         }
1503
1504         /* Clear the Transmit and Command ready registers
1505          */
1506         writel(0, ioaddr + TYPHOON_REG_TX_HI_READY);
1507         writel(0, ioaddr + TYPHOON_REG_CMD_READY);
1508         writel(0, ioaddr + TYPHOON_REG_TX_LO_READY);
1509         typhoon_post_pci_writes(ioaddr);
1510         writel(TYPHOON_BOOTCMD_BOOT, ioaddr + TYPHOON_REG_COMMAND);
1511
1512         return 0;
1513
1514 out_timeout:
1515         return -ETIMEDOUT;
1516 }
1517
1518 static u32
1519 typhoon_clean_tx(struct typhoon *tp, struct transmit_ring *txRing,
1520                         volatile u32 * index)
1521 {
1522         u32 lastRead = txRing->lastRead;
1523         struct tx_desc *tx;
1524         dma_addr_t skb_dma;
1525         int dma_len;
1526         int type;
1527
1528         while(lastRead != le32_to_cpu(*index)) {
1529                 tx = (struct tx_desc *) (txRing->ringBase + lastRead);
1530                 type = tx->flags & TYPHOON_TYPE_MASK;
1531
1532                 if(type == TYPHOON_TX_DESC) {
1533                         /* This tx_desc describes a packet.
1534                          */
1535                         unsigned long ptr = tx->addr | ((u64)tx->addrHi << 32);
1536                         struct sk_buff *skb = (struct sk_buff *) ptr;
1537                         dev_kfree_skb_irq(skb);
1538                 } else if(type == TYPHOON_FRAG_DESC) {
1539                         /* This tx_desc describes a memory mapping. Free it.
1540                          */
1541                         skb_dma = (dma_addr_t) le32_to_cpu(tx->addr);
1542                         dma_len = le16_to_cpu(tx->len);
1543                         pci_unmap_single(tp->pdev, skb_dma, dma_len,
1544                                        PCI_DMA_TODEVICE);
1545                 }
1546
1547                 tx->flags = 0;
1548                 typhoon_inc_tx_index(&lastRead, 1);
1549         }
1550
1551         return lastRead;
1552 }
1553
1554 static void
1555 typhoon_tx_complete(struct typhoon *tp, struct transmit_ring *txRing,
1556                         volatile u32 * index)
1557 {
1558         u32 lastRead;
1559         int numDesc = MAX_SKB_FRAGS + 1;
1560
1561         /* This will need changing if we start to use the Hi Tx ring. */
1562         lastRead = typhoon_clean_tx(tp, txRing, index);
1563         if(netif_queue_stopped(tp->dev) && typhoon_num_free(txRing->lastWrite,
1564                                 lastRead, TXLO_ENTRIES) > (numDesc + 2))
1565                 netif_wake_queue(tp->dev);
1566
1567         txRing->lastRead = lastRead;
1568         smp_wmb();
1569 }
1570
1571 static void
1572 typhoon_recycle_rx_skb(struct typhoon *tp, u32 idx)
1573 {
1574         struct typhoon_indexes *indexes = tp->indexes;
1575         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1576         struct basic_ring *ring = &tp->rxBuffRing;
1577         struct rx_free *r;
1578
1579         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1580                                 indexes->rxBuffCleared) {
1581                 /* no room in ring, just drop the skb
1582                  */
1583                 dev_kfree_skb_any(rxb->skb);
1584                 rxb->skb = NULL;
1585                 return;
1586         }
1587
1588         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1589         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1590         r->virtAddr = idx;
1591         r->physAddr = cpu_to_le32(rxb->dma_addr);
1592
1593         /* Tell the card about it */
1594         wmb();
1595         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1596 }
1597
1598 static int
1599 typhoon_alloc_rx_skb(struct typhoon *tp, u32 idx)
1600 {
1601         struct typhoon_indexes *indexes = tp->indexes;
1602         struct rxbuff_ent *rxb = &tp->rxbuffers[idx];
1603         struct basic_ring *ring = &tp->rxBuffRing;
1604         struct rx_free *r;
1605         struct sk_buff *skb;
1606         dma_addr_t dma_addr;
1607
1608         rxb->skb = NULL;
1609
1610         if((ring->lastWrite + sizeof(*r)) % (RXFREE_ENTRIES * sizeof(*r)) ==
1611                                 indexes->rxBuffCleared)
1612                 return -ENOMEM;
1613
1614         skb = dev_alloc_skb(PKT_BUF_SZ);
1615         if(!skb)
1616                 return -ENOMEM;
1617
1618 #if 0
1619         /* Please, 3com, fix the firmware to allow DMA to a unaligned
1620          * address! Pretty please?
1621          */
1622         skb_reserve(skb, 2);
1623 #endif
1624
1625         skb->dev = tp->dev;
1626         dma_addr = pci_map_single(tp->pdev, skb->tail,
1627                                   PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
1628
1629         /* Since no card does 64 bit DAC, the high bits will never
1630          * change from zero.
1631          */
1632         r = (struct rx_free *) (ring->ringBase + ring->lastWrite);
1633         typhoon_inc_rxfree_index(&ring->lastWrite, 1);
1634         r->virtAddr = idx;
1635         r->physAddr = cpu_to_le32(dma_addr);
1636         rxb->skb = skb;
1637         rxb->dma_addr = dma_addr;
1638
1639         /* Tell the card about it */
1640         wmb();
1641         indexes->rxBuffReady = cpu_to_le32(ring->lastWrite);
1642         return 0;
1643 }
1644
1645 static int
1646 typhoon_rx(struct typhoon *tp, struct basic_ring *rxRing, volatile u32 * ready,
1647            volatile u32 * cleared, int budget)
1648 {
1649         struct rx_desc *rx;
1650         struct sk_buff *skb, *new_skb;
1651         struct rxbuff_ent *rxb;
1652         dma_addr_t dma_addr;
1653         u32 local_ready;
1654         u32 rxaddr;
1655         int pkt_len;
1656         u32 idx;
1657         u32 csum_bits;
1658         int received;
1659
1660         received = 0;
1661         local_ready = le32_to_cpu(*ready);
1662         rxaddr = le32_to_cpu(*cleared);
1663         while(rxaddr != local_ready && budget > 0) {
1664                 rx = (struct rx_desc *) (rxRing->ringBase + rxaddr);
1665                 idx = rx->addr;
1666                 rxb = &tp->rxbuffers[idx];
1667                 skb = rxb->skb;
1668                 dma_addr = rxb->dma_addr;
1669
1670                 rxaddr += sizeof(struct rx_desc);
1671                 rxaddr %= RX_ENTRIES * sizeof(struct rx_desc);
1672
1673                 if(rx->flags & TYPHOON_RX_ERROR) {
1674                         typhoon_recycle_rx_skb(tp, idx);
1675                         continue;
1676                 }
1677
1678                 pkt_len = le16_to_cpu(rx->frameLen);
1679
1680                 if(pkt_len < rx_copybreak &&
1681                    (new_skb = dev_alloc_skb(pkt_len + 2)) != NULL) {
1682                         new_skb->dev = tp->dev;
1683                         skb_reserve(new_skb, 2);
1684                         pci_dma_sync_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1685                                             PCI_DMA_FROMDEVICE);
1686                         eth_copy_and_sum(new_skb, skb->tail, pkt_len, 0);
1687                         skb_put(new_skb, pkt_len);
1688                         typhoon_recycle_rx_skb(tp, idx);
1689                 } else {
1690                         new_skb = skb;
1691                         skb_put(new_skb, pkt_len);
1692                         pci_unmap_single(tp->pdev, dma_addr, PKT_BUF_SZ,
1693                                        PCI_DMA_FROMDEVICE);
1694                         typhoon_alloc_rx_skb(tp, idx);
1695                 }
1696                 new_skb->protocol = eth_type_trans(new_skb, tp->dev);
1697                 csum_bits = rx->rxStatus & (TYPHOON_RX_IP_CHK_GOOD |
1698                         TYPHOON_RX_UDP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD);
1699                 if(csum_bits ==
1700                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_TCP_CHK_GOOD)
1701                    || csum_bits ==
1702                    (TYPHOON_RX_IP_CHK_GOOD | TYPHOON_RX_UDP_CHK_GOOD)) {
1703                         new_skb->ip_summed = CHECKSUM_UNNECESSARY;
1704                 } else
1705                         new_skb->ip_summed = CHECKSUM_NONE;
1706
1707                 spin_lock(&tp->state_lock);
1708                 if(tp->vlgrp != NULL && rx->rxStatus & TYPHOON_RX_VLAN)
1709                         vlan_hwaccel_receive_skb(new_skb, tp->vlgrp,
1710                                                  ntohl(rx->vlanTag) & 0xffff);
1711                 else
1712                         netif_receive_skb(new_skb);
1713                 spin_unlock(&tp->state_lock);
1714
1715                 tp->dev->last_rx = jiffies;
1716                 received++;
1717                 budget--;
1718         }
1719         *cleared = cpu_to_le32(rxaddr);
1720
1721         return received;
1722 }
1723
1724 static void
1725 typhoon_fill_free_ring(struct typhoon *tp)
1726 {
1727         u32 i;
1728
1729         for(i = 0; i < RXENT_ENTRIES; i++) {
1730                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1731                 if(rxb->skb)
1732                         continue;
1733                 if(typhoon_alloc_rx_skb(tp, i) < 0)
1734                         break;
1735         }
1736 }
1737
1738 static int
1739 typhoon_poll(struct net_device *dev, int *total_budget)
1740 {
1741         struct typhoon *tp = (struct typhoon *) dev->priv;
1742         struct typhoon_indexes *indexes = tp->indexes;
1743         int orig_budget = *total_budget;
1744         int budget, work_done, done;
1745
1746         rmb();
1747         if(!tp->awaiting_resp && indexes->respReady != indexes->respCleared)
1748                         typhoon_process_response(tp, 0, NULL);
1749
1750         if(le32_to_cpu(indexes->txLoCleared) != tp->txLoRing.lastRead)
1751                 typhoon_tx_complete(tp, &tp->txLoRing, &indexes->txLoCleared);
1752
1753         if(orig_budget > dev->quota)
1754                 orig_budget = dev->quota;
1755
1756         budget = orig_budget;
1757         work_done = 0;
1758         done = 1;
1759
1760         if(indexes->rxHiCleared != indexes->rxHiReady) {
1761                 work_done = typhoon_rx(tp, &tp->rxHiRing, &indexes->rxHiReady,
1762                                         &indexes->rxHiCleared, budget);
1763                 budget -= work_done;
1764         }
1765
1766         if(indexes->rxLoCleared != indexes->rxLoReady) {
1767                 work_done += typhoon_rx(tp, &tp->rxLoRing, &indexes->rxLoReady,
1768                                         &indexes->rxLoCleared, budget);
1769         }
1770
1771         if(work_done) {
1772                 *total_budget -= work_done;
1773                 dev->quota -= work_done;
1774
1775                 if(work_done >= orig_budget)
1776                         done = 0;
1777         }
1778
1779         if(le32_to_cpu(indexes->rxBuffCleared) == tp->rxBuffRing.lastWrite) {
1780                 /* rxBuff ring is empty, try to fill it. */
1781                 typhoon_fill_free_ring(tp);
1782         }
1783
1784         if(done) {
1785                 netif_rx_complete(dev);
1786                 writel(TYPHOON_INTR_NONE, tp->ioaddr + TYPHOON_REG_INTR_MASK);
1787                 typhoon_post_pci_writes(tp->ioaddr);
1788         }
1789
1790         return (done ? 0 : 1);
1791 }
1792
1793 static irqreturn_t
1794 typhoon_interrupt(int irq, void *dev_instance, struct pt_regs *rgs)
1795 {
1796         struct net_device *dev = (struct net_device *) dev_instance;
1797         unsigned long ioaddr = dev->base_addr;
1798         u32 intr_status;
1799
1800         intr_status = readl(ioaddr + TYPHOON_REG_INTR_STATUS);
1801         if(!(intr_status & TYPHOON_INTR_HOST_INT))
1802                 return IRQ_NONE;
1803
1804         writel(intr_status, ioaddr + TYPHOON_REG_INTR_STATUS);
1805
1806         if(netif_rx_schedule_prep(dev)) {
1807                 writel(TYPHOON_INTR_ALL, ioaddr + TYPHOON_REG_INTR_MASK);
1808                 typhoon_post_pci_writes(ioaddr);
1809                 __netif_rx_schedule(dev);
1810         } else {
1811                 printk(KERN_ERR "%s: Error, poll already scheduled\n",
1812                        dev->name);
1813         }
1814         return IRQ_HANDLED;
1815 }
1816
1817 static void
1818 typhoon_free_rx_rings(struct typhoon *tp)
1819 {
1820         u32 i;
1821
1822         for(i = 0; i < RXENT_ENTRIES; i++) {
1823                 struct rxbuff_ent *rxb = &tp->rxbuffers[i];
1824                 if(rxb->skb) {
1825                         pci_unmap_single(tp->pdev, rxb->dma_addr, PKT_BUF_SZ,
1826                                        PCI_DMA_FROMDEVICE);
1827                         dev_kfree_skb(rxb->skb);
1828                         rxb->skb = NULL;
1829                 }
1830         }
1831 }
1832
1833 static int
1834 typhoon_sleep(struct typhoon *tp, int state, u16 events)
1835 {
1836         struct pci_dev *pdev = tp->pdev;
1837         unsigned long ioaddr = tp->ioaddr;
1838         struct cmd_desc xp_cmd;
1839         int err;
1840
1841         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_ENABLE_WAKE_EVENTS);
1842         xp_cmd.parm1 = events;
1843         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1844         if(err < 0) {
1845                 printk(KERN_ERR "%s: typhoon_sleep(): wake events cmd err %d\n",
1846                                 tp->name, err);
1847                 return err;
1848         }
1849
1850         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_GOTO_SLEEP);
1851         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1852         if(err < 0) {
1853                 printk(KERN_ERR "%s: typhoon_sleep(): sleep cmd err %d\n",
1854                                 tp->name, err);
1855                 return err;
1856         }
1857
1858         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_SLEEPING) < 0)
1859                 return -ETIMEDOUT;
1860
1861         pci_enable_wake(tp->pdev, state, 1);
1862         pci_disable_device(pdev);
1863         return pci_set_power_state(pdev, state);
1864 }
1865
1866 static int
1867 typhoon_wakeup(struct typhoon *tp, int wait_type)
1868 {
1869         struct pci_dev *pdev = tp->pdev;
1870         unsigned long ioaddr = tp->ioaddr;
1871
1872         pci_set_power_state(pdev, 0);
1873         pci_restore_state(pdev, tp->pci_state);
1874
1875         writel(TYPHOON_BOOTCMD_WAKEUP, ioaddr + TYPHOON_REG_COMMAND);
1876         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
1877                 return typhoon_reset(ioaddr, wait_type);
1878
1879         return 0;
1880 }
1881
1882 static int
1883 typhoon_start_runtime(struct typhoon *tp)
1884 {
1885         struct net_device *dev = tp->dev;
1886         unsigned long ioaddr = tp->ioaddr;
1887         struct cmd_desc xp_cmd;
1888         int err;
1889
1890         typhoon_init_rings(tp);
1891         typhoon_fill_free_ring(tp);
1892
1893         err = typhoon_download_firmware(tp);
1894         if(err < 0) {
1895                 printk("%s: cannot load runtime on 3XP\n", tp->name);
1896                 goto error_out;
1897         }
1898
1899         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_BOOT) < 0) {
1900                 printk("%s: cannot boot 3XP\n", tp->name);
1901                 err = -EIO;
1902                 goto error_out;
1903         }
1904
1905         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAX_PKT_SIZE);
1906         xp_cmd.parm1 = cpu_to_le16(PKT_BUF_SZ);
1907         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1908         if(err < 0)
1909                 goto error_out;
1910
1911         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
1912         xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
1913         xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
1914         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1915         if(err < 0)
1916                 goto error_out;
1917
1918         /* Disable IRQ coalescing -- we can reenable it when 3Com gives
1919          * us some more information on how to control it.
1920          */
1921         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_IRQ_COALESCE_CTRL);
1922         xp_cmd.parm1 = 0;
1923         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1924         if(err < 0)
1925                 goto error_out;
1926
1927         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_XCVR_SELECT);
1928         xp_cmd.parm1 = tp->xcvr_select;
1929         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1930         if(err < 0)
1931                 goto error_out;
1932
1933         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_VLAN_TYPE_WRITE);
1934         xp_cmd.parm1 = __constant_cpu_to_le16(ETH_P_8021Q);
1935         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1936         if(err < 0)
1937                 goto error_out;
1938
1939         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_OFFLOAD_TASKS);
1940         spin_lock_bh(&tp->state_lock);
1941         xp_cmd.parm2 = tp->offload;
1942         xp_cmd.parm3 = tp->offload;
1943         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1944         spin_unlock_bh(&tp->state_lock);
1945         if(err < 0)
1946                 goto error_out;
1947
1948         typhoon_set_rx_mode(dev);
1949
1950         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_ENABLE);
1951         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1952         if(err < 0)
1953                 goto error_out;
1954
1955         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_ENABLE);
1956         err = typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1957         if(err < 0)
1958                 goto error_out;
1959
1960         tp->card_state = Running;
1961         smp_wmb();
1962
1963         writel(TYPHOON_INTR_ENABLE_ALL, ioaddr + TYPHOON_REG_INTR_ENABLE);
1964         writel(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_MASK);
1965         typhoon_post_pci_writes(ioaddr);
1966
1967         return 0;
1968
1969 error_out:
1970         typhoon_reset(ioaddr, WaitNoSleep);
1971         typhoon_free_rx_rings(tp);
1972         typhoon_init_rings(tp);
1973         return err;
1974 }
1975
1976 static int
1977 typhoon_stop_runtime(struct typhoon *tp, int wait_type)
1978 {
1979         struct typhoon_indexes *indexes = tp->indexes;
1980         struct transmit_ring *txLo = &tp->txLoRing;
1981         unsigned long ioaddr = tp->ioaddr;
1982         struct cmd_desc xp_cmd;
1983         int i;
1984
1985         /* Disable interrupts early, since we can't schedule a poll
1986          * when called with !netif_running(). This will be posted
1987          * when we force the posting of the command.
1988          */
1989         writel(TYPHOON_INTR_NONE, ioaddr + TYPHOON_REG_INTR_ENABLE);
1990
1991         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_RX_DISABLE);
1992         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
1993
1994         /* Wait 1/2 sec for any outstanding transmits to occur
1995          * We'll cleanup after the reset if this times out.
1996          */
1997         for(i = 0; i < TYPHOON_WAIT_TIMEOUT; i++) {
1998                 if(indexes->txLoCleared == cpu_to_le32(txLo->lastWrite))
1999                         break;
2000                 udelay(TYPHOON_UDELAY);
2001         }
2002
2003         if(i == TYPHOON_WAIT_TIMEOUT)
2004                 printk(KERN_ERR
2005                        "%s: halt timed out waiting for Tx to complete\n",
2006                        tp->name);
2007
2008         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_TX_DISABLE);
2009         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2010
2011         /* save the statistics so when we bring the interface up again,
2012          * the values reported to userspace are correct.
2013          */
2014         tp->card_state = Sleeping;
2015         smp_wmb();
2016         typhoon_do_get_stats(tp);
2017         memcpy(&tp->stats_saved, &tp->stats, sizeof(struct net_device_stats));
2018
2019         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_HALT);
2020         typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL);
2021
2022         if(typhoon_wait_status(ioaddr, TYPHOON_STATUS_HALTED) < 0)
2023                 printk(KERN_ERR "%s: timed out waiting for 3XP to halt\n",
2024                        tp->name);
2025
2026         if(typhoon_reset(ioaddr, wait_type) < 0) {
2027                 printk(KERN_ERR "%s: unable to reset 3XP\n", tp->name);
2028                 return -ETIMEDOUT;
2029         }
2030
2031         /* cleanup any outstanding Tx packets */
2032         if(indexes->txLoCleared != cpu_to_le32(txLo->lastWrite)) {
2033                 indexes->txLoCleared = cpu_to_le32(txLo->lastWrite);
2034                 typhoon_clean_tx(tp, &tp->txLoRing, &indexes->txLoCleared);
2035         }
2036
2037         return 0;
2038 }
2039
2040 static void
2041 typhoon_tx_timeout(struct net_device *dev)
2042 {
2043         struct typhoon *tp = (struct typhoon *) dev->priv;
2044
2045         if(typhoon_reset(dev->base_addr, WaitNoSleep) < 0) {
2046                 printk(KERN_WARNING "%s: could not reset in tx timeout\n",
2047                                         dev->name);
2048                 goto truely_dead;
2049         }
2050
2051         /* If we ever start using the Hi ring, it will need cleaning too */
2052         typhoon_clean_tx(tp, &tp->txLoRing, &tp->indexes->txLoCleared);
2053         typhoon_free_rx_rings(tp);
2054
2055         if(typhoon_start_runtime(tp) < 0) {
2056                 printk(KERN_ERR "%s: could not start runtime in tx timeout\n",
2057                                         dev->name);
2058                 goto truely_dead;
2059         }
2060
2061         netif_wake_queue(dev);
2062         return;
2063
2064 truely_dead:
2065         /* Reset the hardware, and turn off carrier to avoid more timeouts */
2066         typhoon_reset(dev->base_addr, NoWait);
2067         netif_carrier_off(dev);
2068 }
2069
2070 static int
2071 typhoon_open(struct net_device *dev)
2072 {
2073         struct typhoon *tp = (struct typhoon *) dev->priv;
2074         int err;
2075
2076         err = typhoon_wakeup(tp, WaitSleep);
2077         if(err < 0) {
2078                 printk(KERN_ERR "%s: unable to wakeup device\n", dev->name);
2079                 goto out_sleep;
2080         }
2081
2082         err = request_irq(dev->irq, &typhoon_interrupt, SA_SHIRQ,
2083                                 dev->name, dev);
2084         if(err < 0)
2085                 goto out_sleep;
2086
2087         err = typhoon_start_runtime(tp);
2088         if(err < 0)
2089                 goto out_irq;
2090
2091         netif_start_queue(dev);
2092         return 0;
2093
2094 out_irq:
2095         free_irq(dev->irq, dev);
2096
2097 out_sleep:
2098         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2099                 printk(KERN_ERR "%s: unable to reboot into sleep img\n",
2100                                 dev->name);
2101                 typhoon_reset(dev->base_addr, NoWait);
2102                 goto out;
2103         }
2104
2105         if(typhoon_sleep(tp, 3, 0) < 0) 
2106                 printk(KERN_ERR "%s: unable to go back to sleep\n", dev->name);
2107
2108 out:
2109         return err;
2110 }
2111
2112 static int
2113 typhoon_close(struct net_device *dev)
2114 {
2115         struct typhoon *tp = (struct typhoon *) dev->priv;
2116
2117         netif_stop_queue(dev);
2118
2119         if(typhoon_stop_runtime(tp, WaitSleep) < 0)
2120                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2121
2122         /* Make sure there is no irq handler running on a different CPU. */
2123         typhoon_synchronize_irq(dev->irq);
2124         free_irq(dev->irq, dev);
2125
2126         typhoon_free_rx_rings(tp);
2127         typhoon_init_rings(tp);
2128
2129         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0)
2130                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2131
2132         if(typhoon_sleep(tp, 3, 0) < 0)
2133                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2134
2135         return 0;
2136 }
2137
2138 #ifdef CONFIG_PM
2139 static int
2140 typhoon_resume(struct pci_dev *pdev)
2141 {
2142         struct net_device *dev = pci_get_drvdata(pdev);
2143         struct typhoon *tp = (struct typhoon *) dev->priv;
2144
2145         /* If we're down, resume when we are upped.
2146          */
2147         if(!netif_running(dev))
2148                 return 0;
2149
2150         if(typhoon_wakeup(tp, WaitNoSleep) < 0) {
2151                 printk(KERN_ERR "%s: critical: could not wake up in resume\n",
2152                                 dev->name);
2153                 goto reset;
2154         }
2155
2156         if(typhoon_start_runtime(tp) < 0) {
2157                 printk(KERN_ERR "%s: critical: could not start runtime in "
2158                                 "resume\n", dev->name);
2159                 goto reset;
2160         }
2161
2162         netif_device_attach(dev);
2163         netif_start_queue(dev);
2164         return 0;
2165
2166 reset:
2167         typhoon_reset(dev->base_addr, NoWait);
2168         return -EBUSY;
2169 }
2170
2171 static int
2172 typhoon_suspend(struct pci_dev *pdev, u32 state)
2173 {
2174         struct net_device *dev = pci_get_drvdata(pdev);
2175         struct typhoon *tp = (struct typhoon *) dev->priv;
2176         struct cmd_desc xp_cmd;
2177
2178         /* If we're down, we're already suspended.
2179          */
2180         if(!netif_running(dev))
2181                 return 0;
2182
2183         spin_lock_bh(&tp->state_lock);
2184         if(tp->vlgrp && tp->wol_events & TYPHOON_WAKE_MAGIC_PKT) {
2185                 spin_unlock_bh(&tp->state_lock);
2186                 printk(KERN_ERR "%s: cannot do WAKE_MAGIC with VLANS\n",
2187                                 dev->name);
2188                 return -EBUSY;
2189         }
2190         spin_unlock_bh(&tp->state_lock);
2191
2192         netif_device_detach(dev);
2193
2194         if(typhoon_stop_runtime(tp, WaitNoSleep) < 0) {
2195                 printk(KERN_ERR "%s: unable to stop runtime\n", dev->name);
2196                 goto need_resume;
2197         }
2198
2199         typhoon_free_rx_rings(tp);
2200         typhoon_init_rings(tp);
2201
2202         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2203                 printk(KERN_ERR "%s: unable to boot sleep image\n", dev->name);
2204                 goto need_resume;
2205         }
2206
2207         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_MAC_ADDRESS);
2208         xp_cmd.parm1 = cpu_to_le16(ntohs(*(u16 *)&dev->dev_addr[0]));
2209         xp_cmd.parm2 = cpu_to_le32(ntohl(*(u32 *)&dev->dev_addr[2]));
2210         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2211                 printk(KERN_ERR "%s: unable to set mac address in suspend\n",
2212                                 dev->name);
2213                 goto need_resume;
2214         }
2215
2216         INIT_COMMAND_NO_RESPONSE(&xp_cmd, TYPHOON_CMD_SET_RX_FILTER);
2217         xp_cmd.parm1 = TYPHOON_RX_FILTER_DIRECTED | TYPHOON_RX_FILTER_BROADCAST;
2218         if(typhoon_issue_command(tp, 1, &xp_cmd, 0, NULL) < 0) {
2219                 printk(KERN_ERR "%s: unable to set rx filter in suspend\n",
2220                                 dev->name);
2221                 goto need_resume;
2222         }
2223
2224         if(typhoon_sleep(tp, state, tp->wol_events) < 0) {
2225                 printk(KERN_ERR "%s: unable to put card to sleep\n", dev->name);
2226                 goto need_resume;
2227         }
2228
2229         return 0;
2230
2231 need_resume:
2232         typhoon_resume(pdev);
2233         return -EBUSY;
2234 }
2235
2236 static int
2237 typhoon_enable_wake(struct pci_dev *pdev, u32 state, int enable)
2238 {
2239         return pci_enable_wake(pdev, state, enable);
2240 }
2241 #endif
2242
2243 static int __devinit
2244 typhoon_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
2245 {
2246         static int did_version = 0;
2247         struct net_device *dev;
2248         struct typhoon *tp;
2249         int card_id = (int) ent->driver_data;
2250         unsigned long ioaddr;
2251         void *shared;
2252         dma_addr_t shared_dma;
2253         struct cmd_desc xp_cmd;
2254         struct resp_desc xp_resp;
2255         int i;
2256         int err = 0;
2257
2258         if(!did_version++)
2259                 printk(KERN_INFO "%s", version);
2260
2261         dev = alloc_etherdev(sizeof(*tp));
2262         if(dev == NULL) {
2263                 printk(ERR_PFX "%s: unable to alloc new net device\n",
2264                        pdev->slot_name);
2265                 err = -ENOMEM;
2266                 goto error_out;
2267         }
2268         SET_MODULE_OWNER(dev);
2269         SET_NETDEV_DEV(dev, &pdev->dev);
2270
2271         err = pci_enable_device(pdev);
2272         if(err < 0) {
2273                 printk(ERR_PFX "%s: unable to enable device\n",
2274                        pdev->slot_name);
2275                 goto error_out_dev;
2276         }
2277
2278         /* If we transitioned from D3->D0 in pci_enable_device(),
2279          * we lost our configuration and need to restore it to the
2280          * conditions at boot.
2281          */
2282         pci_restore_state(pdev, NULL);
2283
2284         err = pci_set_dma_mask(pdev, 0xffffffffULL);
2285         if(err < 0) {
2286                 printk(ERR_PFX "%s: No usable DMA configuration\n",
2287                        pdev->slot_name);
2288                 goto error_out_dev;
2289         }
2290
2291         /* sanity checks, resource #1 is our mmio area
2292          */
2293         if(!(pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
2294                 printk(ERR_PFX
2295                        "%s: region #1 not a PCI MMIO resource, aborting\n",
2296                        pdev->slot_name);
2297                 err = -ENODEV;
2298                 goto error_out_dev;
2299         }
2300         if(pci_resource_len(pdev, 1) < 128) {
2301                 printk(ERR_PFX "%s: Invalid PCI MMIO region size, aborting\n",
2302                        pdev->slot_name);
2303                 err = -ENODEV;
2304                 goto error_out_dev;
2305         }
2306
2307         err = pci_request_regions(pdev, "typhoon");
2308         if(err < 0) {
2309                 printk(ERR_PFX "%s: could not request regions\n",
2310                        pdev->slot_name);
2311                 goto error_out_dev;
2312         }
2313
2314         pci_set_master(pdev);
2315         pci_set_mwi(pdev);
2316
2317         /* map our MMIO region
2318          */
2319         ioaddr = pci_resource_start(pdev, 1);
2320         ioaddr = (unsigned long) ioremap(ioaddr, 128);
2321         if(!ioaddr) {
2322                 printk(ERR_PFX "%s: cannot remap MMIO, aborting\n",
2323                        pdev->slot_name);
2324                 err = -EIO;
2325                 goto error_out_regions;
2326         }
2327         dev->base_addr = ioaddr;
2328
2329         /* allocate pci dma space for rx and tx descriptor rings
2330          */
2331         shared = pci_alloc_consistent(pdev, sizeof(struct typhoon_shared),
2332                                       &shared_dma);
2333         if(!shared) {
2334                 printk(ERR_PFX "%s: could not allocate DMA memory\n",
2335                        pdev->slot_name);
2336                 err = -ENOMEM;
2337                 goto error_out_remap;
2338         }
2339
2340         dev->irq = pdev->irq;
2341         tp = dev->priv;
2342         tp->shared = (struct typhoon_shared *) shared;
2343         tp->shared_dma = shared_dma;
2344         tp->pdev = pdev;
2345         tp->tx_pdev = pdev;
2346         tp->ioaddr = dev->base_addr;
2347         tp->tx_ioaddr = dev->base_addr;
2348         tp->dev = dev;
2349
2350         /* need to be able to restore PCI state after a suspend */
2351         pci_save_state(pdev, tp->pci_state);
2352
2353         /* Init sequence:
2354          * 1) Reset the adapter to clear any bad juju
2355          * 2) Reload the sleep image
2356          * 3) Boot the sleep image
2357          * 4) Get the hardware address.
2358          * 5) Put the card to sleep.
2359          */
2360         if(typhoon_reset(ioaddr, WaitSleep) < 0) {
2361                 printk(ERR_PFX "%s: could not reset 3XP\n", pdev->slot_name);
2362                 err = -EIO;
2363                 goto error_out_dma;
2364         }
2365
2366         /* dev->name is not valid until we register, but we need to
2367          * use some common routines to initialize the card. So that those
2368          * routines print the right name, we keep our oun pointer to the name
2369          */
2370         tp->name = pdev->slot_name;
2371
2372         typhoon_init_interface(tp);
2373         typhoon_init_rings(tp);
2374
2375         if(typhoon_boot_3XP(tp, TYPHOON_STATUS_WAITING_FOR_HOST) < 0) {
2376                 printk(ERR_PFX "%s: cannot boot 3XP sleep image\n",
2377                        pdev->slot_name);
2378                 err = -EIO;
2379                 goto error_out_reset;
2380         }
2381
2382         INIT_COMMAND_WITH_RESPONSE(&xp_cmd, TYPHOON_CMD_READ_MAC_ADDRESS);
2383         if(typhoon_issue_command(tp, 1, &xp_cmd, 1, &xp_resp) < 0) {
2384                 printk(ERR_PFX "%s: cannot read MAC address\n",
2385                        pdev->slot_name);
2386                 err = -EIO;
2387                 goto error_out_reset;
2388         }
2389
2390         *(u16 *)&dev->dev_addr[0] = htons(le16_to_cpu(xp_resp.parm1));
2391         *(u32 *)&dev->dev_addr[2] = htonl(le32_to_cpu(xp_resp.parm2));
2392
2393         if(!is_valid_ether_addr(dev->dev_addr)) {
2394                 printk(ERR_PFX "%s: Could not obtain valid ethernet address, "
2395                        "aborting\n", pdev->slot_name);
2396                 goto error_out_reset;
2397         }
2398
2399         if(typhoon_sleep(tp, 3, 0) < 0) {
2400                 printk(ERR_PFX "%s: cannot put adapter to sleep\n",
2401                        pdev->slot_name);
2402                 err = -EIO;
2403                 goto error_out_reset;
2404         }
2405
2406         tp->capabilities = typhoon_card_info[card_id].capabilities;
2407         tp->xcvr_select = TYPHOON_XCVR_AUTONEG;
2408
2409         /* The chip-specific entries in the device structure. */
2410         dev->open               = typhoon_open;
2411         dev->hard_start_xmit    = typhoon_start_tx;
2412         dev->stop               = typhoon_close;
2413         dev->set_multicast_list = typhoon_set_rx_mode;
2414         dev->tx_timeout         = typhoon_tx_timeout;
2415         dev->poll               = typhoon_poll;
2416         dev->weight             = 16;
2417         dev->watchdog_timeo     = TX_TIMEOUT;
2418         dev->get_stats          = typhoon_get_stats;
2419         dev->set_mac_address    = typhoon_set_mac_address;
2420         dev->do_ioctl           = typhoon_ioctl;
2421         dev->vlan_rx_register   = typhoon_vlan_rx_register;
2422         dev->vlan_rx_kill_vid   = typhoon_vlan_rx_kill_vid;
2423
2424         /* We can handle scatter gather, up to 16 entries, and
2425          * we can do IP checksumming (only version 4, doh...)
2426          */
2427         dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
2428         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
2429         dev->features |= NETIF_F_TSO;
2430
2431         if(register_netdev(dev) < 0)
2432                 goto error_out_reset;
2433
2434         /* fixup our local name */
2435         tp->name = dev->name;
2436
2437         pci_set_drvdata(pdev, dev);
2438
2439         printk(KERN_INFO "%s: %s at 0x%lx, ",
2440                dev->name, typhoon_card_info[card_id].name, ioaddr);
2441         for(i = 0; i < 5; i++)
2442                 printk("%2.2x:", dev->dev_addr[i]);
2443         printk("%2.2x\n", dev->dev_addr[i]);
2444
2445         return 0;
2446
2447 error_out_reset:
2448         typhoon_reset(ioaddr, NoWait);
2449
2450 error_out_dma:
2451         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2452                             shared, shared_dma);
2453 error_out_remap:
2454         iounmap((void *) ioaddr);
2455 error_out_regions:
2456         pci_release_regions(pdev);
2457 error_out_dev:
2458         kfree(dev);
2459 error_out:
2460         return err;
2461 }
2462
2463 static void __devexit
2464 typhoon_remove_one(struct pci_dev *pdev)
2465 {
2466         struct net_device *dev = pci_get_drvdata(pdev);
2467         struct typhoon *tp = (struct typhoon *) (dev->priv);
2468
2469         unregister_netdev(dev);
2470         pci_set_power_state(pdev, 0);
2471         pci_restore_state(pdev, tp->pci_state);
2472         typhoon_reset(dev->base_addr, NoWait);
2473         iounmap((char *) (dev->base_addr));
2474         pci_free_consistent(pdev, sizeof(struct typhoon_shared),
2475                             tp->shared, tp->shared_dma);
2476         pci_release_regions(pdev);
2477         pci_disable_device(pdev);
2478         pci_set_drvdata(pdev, NULL);
2479         kfree(dev);
2480 }
2481
2482 static struct pci_driver typhoon_driver = {
2483         .name           = DRV_MODULE_NAME,
2484         .id_table       = typhoon_pci_tbl,
2485         .probe          = typhoon_init_one,
2486         .remove         = __devexit_p(typhoon_remove_one),
2487 #ifdef CONFIG_PM
2488         .suspend        = typhoon_suspend,
2489         .resume         = typhoon_resume,
2490         .enable_wake    = typhoon_enable_wake,
2491 #endif
2492 };
2493
2494 static int __init
2495 typhoon_init(void)
2496 {
2497         return pci_module_init(&typhoon_driver);
2498 }
2499
2500 static void __exit
2501 typhoon_cleanup(void)
2502 {
2503         pci_unregister_driver(&typhoon_driver);
2504 }
2505
2506 module_init(typhoon_init);
2507 module_exit(typhoon_cleanup);