f52c8fbb8451d8686f343c419b1a727b8eadf739
[linux-flexiantxendom0-3.2.10.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  */
7
8 #include <linux/config.h>
9
10 #include <linux/module.h>
11
12 #include <linux/kernel.h>
13 #include <linux/types.h>
14 #include <linux/compiler.h>
15 #include <linux/slab.h>
16 #include <linux/delay.h>
17 #include <linux/init.h>
18 #include <linux/ioport.h>
19 #include <linux/pci.h>
20 #include <linux/netdevice.h>
21 #include <linux/etherdevice.h>
22 #include <linux/skbuff.h>
23 #include <linux/ethtool.h>
24 #include <linux/mii.h>
25 #include <linux/if_vlan.h>
26 #include <linux/ip.h>
27 #include <linux/tcp.h>
28 #include <linux/workqueue.h>
29
30 #include <net/checksum.h>
31
32 #include <asm/system.h>
33 #include <asm/io.h>
34 #include <asm/byteorder.h>
35 #include <asm/uaccess.h>
36
37 #ifdef CONFIG_SPARC64
38 #include <asm/idprom.h>
39 #include <asm/oplib.h>
40 #include <asm/pbm.h>
41 #endif
42
43 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
44 #define TG3_VLAN_TAG_USED 1
45 #else
46 #define TG3_VLAN_TAG_USED 0
47 #endif
48
49 #ifdef NETIF_F_TSO
50 #define TG3_TSO_SUPPORT 1
51 #else
52 #define TG3_TSO_SUPPORT 0
53 #endif
54
55 #include "tg3.h"
56
57 #define DRV_MODULE_NAME         "tg3"
58 #define PFX DRV_MODULE_NAME     ": "
59 #define DRV_MODULE_VERSION      "2.6"
60 #define DRV_MODULE_RELDATE      "February 3, 2004"
61
62 #define TG3_DEF_MAC_MODE        0
63 #define TG3_DEF_RX_MODE         0
64 #define TG3_DEF_TX_MODE         0
65 #define TG3_DEF_MSG_ENABLE        \
66         (NETIF_MSG_DRV          | \
67          NETIF_MSG_PROBE        | \
68          NETIF_MSG_LINK         | \
69          NETIF_MSG_TIMER        | \
70          NETIF_MSG_IFDOWN       | \
71          NETIF_MSG_IFUP         | \
72          NETIF_MSG_RX_ERR       | \
73          NETIF_MSG_TX_ERR)
74
75 /* length of time before we decide the hardware is borked,
76  * and dev->tx_timeout() should be called to fix the problem
77  */
78 #define TG3_TX_TIMEOUT                  (5 * HZ)
79
80 /* hardware minimum and maximum for a single frame's data payload */
81 #define TG3_MIN_MTU                     60
82 #define TG3_MAX_MTU(tp) \
83         (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 ? 9000 : 1500)
84
85 /* These numbers seem to be hard coded in the NIC firmware somehow.
86  * You can't change the ring sizes, but you can change where you place
87  * them in the NIC onboard memory.
88  */
89 #define TG3_RX_RING_SIZE                512
90 #define TG3_DEF_RX_RING_PENDING         200
91 #define TG3_RX_JUMBO_RING_SIZE          256
92 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
93
94 /* Do not place this n-ring entries value into the tp struct itself,
95  * we really want to expose these constants to GCC so that modulo et
96  * al.  operations are done with shifts and masks instead of with
97  * hw multiply/modulo instructions.  Another solution would be to
98  * replace things like '% foo' with '& (foo - 1)'.
99  */
100 #define TG3_RX_RCB_RING_SIZE(tp)        \
101         (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ? \
102          512 : 1024)
103
104 #define TG3_TX_RING_SIZE                512
105 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
106
107 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
108                                  TG3_RX_RING_SIZE)
109 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
110                                  TG3_RX_JUMBO_RING_SIZE)
111 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
112                                    TG3_RX_RCB_RING_SIZE(tp))
113 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
114                                  TG3_TX_RING_SIZE)
115 #define TX_RING_GAP(TP) \
116         (TG3_TX_RING_SIZE - (TP)->tx_pending)
117 #define TX_BUFFS_AVAIL(TP)                                              \
118         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
119           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
120           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
121 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
122
123 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
124 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
125
126 /* minimum number of free TX descriptors required to wake up TX process */
127 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
128
129 static char version[] __devinitdata =
130         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
131
132 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
133 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
134 MODULE_LICENSE("GPL");
135 MODULE_PARM(tg3_debug, "i");
136 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
137
138 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
139
140 static struct pci_device_id tg3_pci_tbl[] = {
141         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
142           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
143         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
144           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
145         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
146           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
147         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
148           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
149         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
150           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
151         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
152           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
153         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
154           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
155         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
156           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
157         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
158           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
159         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
160           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
161         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
162           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
163         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
164           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
165         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
166           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
167         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
168           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
169         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
170           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
171         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
172           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
173         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
174           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
175         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
176           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
177         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
178           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
179         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
180           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
181         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
182           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
183         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
184           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
185         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
186           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
187         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
188           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
189         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
190           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
191         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
192           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
193         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
194           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
195         { 0, }
196 };
197
198 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
199
200 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
201 {
202         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
203                 unsigned long flags;
204
205                 spin_lock_irqsave(&tp->indirect_lock, flags);
206                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
207                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
208                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
209         } else {
210                 writel(val, tp->regs + off);
211                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
212                         readl(tp->regs + off);
213         }
214 }
215
216 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
217 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
218 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
219 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
220 #define tr32(reg)               readl(tp->regs + (reg))
221 #define tr16(reg)               readw(tp->regs + (reg))
222 #define tr8(reg)                readb(tp->regs + (reg))
223
224 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
225 {
226         unsigned long flags;
227
228         spin_lock_irqsave(&tp->indirect_lock, flags);
229         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
230         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
231
232         /* Always leave this as zero. */
233         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
234         spin_unlock_irqrestore(&tp->indirect_lock, flags);
235 }
236
237 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
238 {
239         unsigned long flags;
240
241         spin_lock_irqsave(&tp->indirect_lock, flags);
242         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
243         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
244
245         /* Always leave this as zero. */
246         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
247         spin_unlock_irqrestore(&tp->indirect_lock, flags);
248 }
249
250 static void tg3_disable_ints(struct tg3 *tp)
251 {
252         tw32(TG3PCI_MISC_HOST_CTRL,
253              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
254         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
255         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
256 }
257
258 static inline void tg3_cond_int(struct tg3 *tp)
259 {
260         if (tp->hw_status->status & SD_STATUS_UPDATED)
261                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
262 }
263
264 static void tg3_enable_ints(struct tg3 *tp)
265 {
266         tw32(TG3PCI_MISC_HOST_CTRL,
267              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
268         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
269         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
270
271         tg3_cond_int(tp);
272 }
273
274 static inline void tg3_netif_stop(struct tg3 *tp)
275 {
276         netif_poll_disable(tp->dev);
277         netif_tx_disable(tp->dev);
278 }
279
280 static inline void tg3_netif_start(struct tg3 *tp)
281 {
282         netif_wake_queue(tp->dev);
283         /* NOTE: unconditional netif_wake_queue is only appropriate
284          * so long as all callers are assured to have free tx slots
285          * (such as after tg3_init_hw)
286          */
287         netif_poll_enable(tp->dev);
288         tg3_cond_int(tp);
289 }
290
291 static void tg3_switch_clocks(struct tg3 *tp)
292 {
293         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
294         u32 orig_clock_ctrl;
295
296         orig_clock_ctrl = clock_ctrl;
297         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
298                        CLOCK_CTRL_CLKRUN_OENABLE |
299                        0x1f);
300         tp->pci_clock_ctrl = clock_ctrl;
301
302         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
303             (orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
304                 tw32(TG3PCI_CLOCK_CTRL,
305                      clock_ctrl |
306                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
307                 tr32(TG3PCI_CLOCK_CTRL);
308                 udelay(40);
309                 tw32(TG3PCI_CLOCK_CTRL,
310                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
311                 tr32(TG3PCI_CLOCK_CTRL);
312                 udelay(40);
313         }
314         tw32(TG3PCI_CLOCK_CTRL, clock_ctrl);
315         tr32(TG3PCI_CLOCK_CTRL);
316         udelay(40);
317 }
318
319 #define PHY_BUSY_LOOPS  5000
320
321 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
322 {
323         u32 frame_val;
324         int loops, ret;
325
326         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
327                 tw32(MAC_MI_MODE,
328                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
329                 tr32(MAC_MI_MODE);
330                 udelay(40);
331         }
332
333         *val = 0xffffffff;
334
335         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
336                       MI_COM_PHY_ADDR_MASK);
337         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
338                       MI_COM_REG_ADDR_MASK);
339         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
340         
341         tw32(MAC_MI_COM, frame_val);
342         tr32(MAC_MI_COM);
343
344         loops = PHY_BUSY_LOOPS;
345         while (loops-- > 0) {
346                 udelay(10);
347                 frame_val = tr32(MAC_MI_COM);
348
349                 if ((frame_val & MI_COM_BUSY) == 0) {
350                         udelay(5);
351                         frame_val = tr32(MAC_MI_COM);
352                         break;
353                 }
354         }
355
356         ret = -EBUSY;
357         if (loops > 0) {
358                 *val = frame_val & MI_COM_DATA_MASK;
359                 ret = 0;
360         }
361
362         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
363                 tw32(MAC_MI_MODE, tp->mi_mode);
364                 tr32(MAC_MI_MODE);
365                 udelay(40);
366         }
367
368         return ret;
369 }
370
371 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
372 {
373         u32 frame_val;
374         int loops, ret;
375
376         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
377                 tw32(MAC_MI_MODE,
378                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
379                 tr32(MAC_MI_MODE);
380                 udelay(40);
381         }
382
383         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
384                       MI_COM_PHY_ADDR_MASK);
385         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
386                       MI_COM_REG_ADDR_MASK);
387         frame_val |= (val & MI_COM_DATA_MASK);
388         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
389         
390         tw32(MAC_MI_COM, frame_val);
391         tr32(MAC_MI_COM);
392
393         loops = PHY_BUSY_LOOPS;
394         while (loops-- > 0) {
395                 udelay(10);
396                 frame_val = tr32(MAC_MI_COM);
397                 if ((frame_val & MI_COM_BUSY) == 0) {
398                         udelay(5);
399                         frame_val = tr32(MAC_MI_COM);
400                         break;
401                 }
402         }
403
404         ret = -EBUSY;
405         if (loops > 0)
406                 ret = 0;
407
408         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
409                 tw32(MAC_MI_MODE, tp->mi_mode);
410                 tr32(MAC_MI_MODE);
411                 udelay(40);
412         }
413
414         return ret;
415 }
416
417 static void tg3_phy_set_wirespeed(struct tg3 *tp)
418 {
419         u32 val;
420
421         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
422                 return;
423
424         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007);
425         tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
426         tg3_writephy(tp, MII_TG3_AUX_CTRL, (val | (1 << 15) | (1 << 4)));
427 }
428
429 static int tg3_bmcr_reset(struct tg3 *tp)
430 {
431         u32 phy_control;
432         int limit, err;
433
434         /* OK, reset it, and poll the BMCR_RESET bit until it
435          * clears or we time out.
436          */
437         phy_control = BMCR_RESET;
438         err = tg3_writephy(tp, MII_BMCR, phy_control);
439         if (err != 0)
440                 return -EBUSY;
441
442         limit = 5000;
443         while (limit--) {
444                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
445                 if (err != 0)
446                         return -EBUSY;
447
448                 if ((phy_control & BMCR_RESET) == 0) {
449                         udelay(40);
450                         break;
451                 }
452                 udelay(10);
453         }
454         if (limit <= 0)
455                 return -EBUSY;
456
457         return 0;
458 }
459
460 static int tg3_wait_macro_done(struct tg3 *tp)
461 {
462         int limit = 100;
463
464         while (limit--) {
465                 u32 tmp32;
466
467                 tg3_readphy(tp, 0x16, &tmp32);
468                 if ((tmp32 & 0x1000) == 0)
469                         break;
470         }
471         if (limit <= 0)
472                 return -EBUSY;
473
474         return 0;
475 }
476
477 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
478 {
479         static const u32 test_pat[4][6] = {
480         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
481         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
482         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
483         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
484         };
485         int chan;
486
487         for (chan = 0; chan < 4; chan++) {
488                 int i;
489
490                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
491                              (chan * 0x2000) | 0x0200);
492                 tg3_writephy(tp, 0x16, 0x0002);
493
494                 for (i = 0; i < 6; i++)
495                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
496                                      test_pat[chan][i]);
497
498                 tg3_writephy(tp, 0x16, 0x0202);
499                 if (tg3_wait_macro_done(tp)) {
500                         *resetp = 1;
501                         return -EBUSY;
502                 }
503
504                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
505                              (chan * 0x2000) | 0x0200);
506                 tg3_writephy(tp, 0x16, 0x0082);
507                 if (tg3_wait_macro_done(tp)) {
508                         *resetp = 1;
509                         return -EBUSY;
510                 }
511
512                 tg3_writephy(tp, 0x16, 0x0802);
513                 if (tg3_wait_macro_done(tp)) {
514                         *resetp = 1;
515                         return -EBUSY;
516                 }
517
518                 for (i = 0; i < 6; i += 2) {
519                         u32 low, high;
520
521                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low);
522                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high);
523                         if (tg3_wait_macro_done(tp)) {
524                                 *resetp = 1;
525                                 return -EBUSY;
526                         }
527                         low &= 0x7fff;
528                         high &= 0x000f;
529                         if (low != test_pat[chan][i] ||
530                             high != test_pat[chan][i+1]) {
531                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
532                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
533                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
534
535                                 return -EBUSY;
536                         }
537                 }
538         }
539
540         return 0;
541 }
542
543 static int tg3_phy_reset_chanpat(struct tg3 *tp)
544 {
545         int chan;
546
547         for (chan = 0; chan < 4; chan++) {
548                 int i;
549
550                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
551                              (chan * 0x2000) | 0x0200);
552                 tg3_writephy(tp, 0x16, 0x0002);
553                 for (i = 0; i < 6; i++)
554                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
555                 tg3_writephy(tp, 0x16, 0x0202);
556                 if (tg3_wait_macro_done(tp))
557                         return -EBUSY;
558         }
559
560         return 0;
561 }
562
563 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
564 {
565         u32 reg32, phy9_orig;
566         int retries, do_phy_reset, err;
567
568         retries = 10;
569         do_phy_reset = 1;
570         do {
571                 if (do_phy_reset) {
572                         err = tg3_bmcr_reset(tp);
573                         if (err)
574                                 return err;
575                         do_phy_reset = 0;
576                 }
577
578                 /* Disable transmitter and interrupt.  */
579                 tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
580                 reg32 |= 0x3000;
581                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
582
583                 /* Set full-duplex, 1000 mbps.  */
584                 tg3_writephy(tp, MII_BMCR,
585                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
586
587                 /* Set to master mode.  */
588                 tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig);
589                 tg3_writephy(tp, MII_TG3_CTRL,
590                              (MII_TG3_CTRL_AS_MASTER |
591                               MII_TG3_CTRL_ENABLE_AS_MASTER));
592
593                 /* Enable SM_DSP_CLOCK and 6dB.  */
594                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
595
596                 /* Block the PHY control access.  */
597                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
598                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
599
600                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
601                 if (!err)
602                         break;
603         } while (--retries);
604
605         err = tg3_phy_reset_chanpat(tp);
606         if (err)
607                 return err;
608
609         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
610         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
611
612         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
613         tg3_writephy(tp, 0x16, 0x0000);
614
615         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
616
617         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
618
619         tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32);
620         reg32 &= ~0x3000;
621         tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
622
623         return err;
624 }
625
626 /* This will reset the tigon3 PHY if there is no valid
627  * link unless the FORCE argument is non-zero.
628  */
629 static int tg3_phy_reset(struct tg3 *tp, int force)
630 {
631         u32 phy_status;
632         int err;
633
634         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
635         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
636         if (err != 0)
637                 return -EBUSY;
638
639         /* If we have link, and not forcing a reset, then nothing
640          * to do.
641          */
642         if ((phy_status & BMSR_LSTATUS) != 0 && (force == 0))
643                 return 0;
644
645         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
646             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
647             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
648                 err = tg3_phy_reset_5703_4_5(tp);
649                 if (err)
650                         return err;
651                 goto out;
652         }
653
654         err = tg3_bmcr_reset(tp);
655         if (err)
656                 return err;
657
658 out:
659         tg3_phy_set_wirespeed(tp);
660         return 0;
661 }
662
663 static void tg3_frob_aux_power(struct tg3 *tp)
664 {
665         struct tg3 *tp_peer = tp;
666
667         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
668                 return;
669
670         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
671                 tp_peer = pci_get_drvdata(tp->pdev_peer);
672                 if (!tp_peer)
673                         BUG();
674         }
675
676
677         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
678             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
679                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
680                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
681                         tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
682                              (GRC_LCLCTRL_GPIO_OE0 |
683                               GRC_LCLCTRL_GPIO_OE1 |
684                               GRC_LCLCTRL_GPIO_OE2 |
685                               GRC_LCLCTRL_GPIO_OUTPUT0 |
686                               GRC_LCLCTRL_GPIO_OUTPUT1));
687                         tr32(GRC_LOCAL_CTRL);
688                         udelay(100);
689                 } else {
690                         if (tp_peer != tp &&
691                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
692                                 return;
693
694                         tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
695                              (GRC_LCLCTRL_GPIO_OE0 |
696                               GRC_LCLCTRL_GPIO_OE1 |
697                               GRC_LCLCTRL_GPIO_OE2 |
698                               GRC_LCLCTRL_GPIO_OUTPUT1 |
699                               GRC_LCLCTRL_GPIO_OUTPUT2));
700                         tr32(GRC_LOCAL_CTRL);
701                         udelay(100);
702
703                         tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
704                              (GRC_LCLCTRL_GPIO_OE0 |
705                               GRC_LCLCTRL_GPIO_OE1 |
706                               GRC_LCLCTRL_GPIO_OE2 |
707                               GRC_LCLCTRL_GPIO_OUTPUT0 |
708                               GRC_LCLCTRL_GPIO_OUTPUT1 |
709                               GRC_LCLCTRL_GPIO_OUTPUT2));
710                         tr32(GRC_LOCAL_CTRL);
711                         udelay(100);
712
713                         tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
714                              (GRC_LCLCTRL_GPIO_OE0 |
715                               GRC_LCLCTRL_GPIO_OE1 |
716                               GRC_LCLCTRL_GPIO_OE2 |
717                               GRC_LCLCTRL_GPIO_OUTPUT0 |
718                               GRC_LCLCTRL_GPIO_OUTPUT1));
719                         tr32(GRC_LOCAL_CTRL);
720                         udelay(100);
721                 }
722         } else {
723                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
724                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
725                         if (tp_peer != tp &&
726                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
727                                 return;
728
729                         tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
730                              (GRC_LCLCTRL_GPIO_OE1 |
731                               GRC_LCLCTRL_GPIO_OUTPUT1));
732                         tr32(GRC_LOCAL_CTRL);
733                         udelay(100);
734
735                         tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
736                              (GRC_LCLCTRL_GPIO_OE1));
737                         tr32(GRC_LOCAL_CTRL);
738                         udelay(100);
739
740                         tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
741                              (GRC_LCLCTRL_GPIO_OE1 |
742                               GRC_LCLCTRL_GPIO_OUTPUT1));
743                         tr32(GRC_LOCAL_CTRL);
744                         udelay(100);
745                 }
746         }
747 }
748
749 static int tg3_setup_phy(struct tg3 *);
750
751 static int tg3_set_power_state(struct tg3 *tp, int state)
752 {
753         u32 misc_host_ctrl;
754         u16 power_control, power_caps;
755         int pm = tp->pm_cap;
756
757         /* Make sure register accesses (indirect or otherwise)
758          * will function correctly.
759          */
760         pci_write_config_dword(tp->pdev,
761                                TG3PCI_MISC_HOST_CTRL,
762                                tp->misc_host_ctrl);
763
764         pci_read_config_word(tp->pdev,
765                              pm + PCI_PM_CTRL,
766                              &power_control);
767         power_control |= PCI_PM_CTRL_PME_STATUS;
768         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
769         switch (state) {
770         case 0:
771                 power_control |= 0;
772                 pci_write_config_word(tp->pdev,
773                                       pm + PCI_PM_CTRL,
774                                       power_control);
775                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
776                 tr32(GRC_LOCAL_CTRL);
777                 udelay(100);
778
779                 return 0;
780
781         case 1:
782                 power_control |= 1;
783                 break;
784
785         case 2:
786                 power_control |= 2;
787                 break;
788
789         case 3:
790                 power_control |= 3;
791                 break;
792
793         default:
794                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
795                        "requested.\n",
796                        tp->dev->name, state);
797                 return -EINVAL;
798         };
799
800         power_control |= PCI_PM_CTRL_PME_ENABLE;
801
802         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
803         tw32(TG3PCI_MISC_HOST_CTRL,
804              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
805
806         if (tp->link_config.phy_is_low_power == 0) {
807                 tp->link_config.phy_is_low_power = 1;
808                 tp->link_config.orig_speed = tp->link_config.speed;
809                 tp->link_config.orig_duplex = tp->link_config.duplex;
810                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
811         }
812
813         if (tp->phy_id != PHY_ID_SERDES) {
814                 tp->link_config.speed = SPEED_10;
815                 tp->link_config.duplex = DUPLEX_HALF;
816                 tp->link_config.autoneg = AUTONEG_ENABLE;
817                 tg3_setup_phy(tp);
818         }
819
820         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
821
822         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
823                 u32 mac_mode;
824
825                 if (tp->phy_id != PHY_ID_SERDES) {
826                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
827                         udelay(40);
828
829                         mac_mode = MAC_MODE_PORT_MODE_MII;
830
831                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
832                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
833                                 mac_mode |= MAC_MODE_LINK_POLARITY;
834                 } else {
835                         mac_mode = MAC_MODE_PORT_MODE_TBI;
836                 }
837
838
839                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
840                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
841                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
842
843                 tw32(MAC_MODE, mac_mode);
844                 tr32(MAC_MODE);
845                 udelay(100);
846
847                 tw32(MAC_RX_MODE, RX_MODE_ENABLE);
848                 tr32(MAC_RX_MODE);
849                 udelay(10);
850         }
851
852         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
853             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
854              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
855                 u32 base_val;
856
857                 base_val = tp->pci_clock_ctrl;
858                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
859                              CLOCK_CTRL_TXCLK_DISABLE);
860
861                 tw32(TG3PCI_CLOCK_CTRL, base_val |
862                      CLOCK_CTRL_ALTCLK |
863                      CLOCK_CTRL_PWRDOWN_PLL133);
864                 tr32(TG3PCI_CLOCK_CTRL);
865                 udelay(40);
866         } else {
867                 u32 newbits1, newbits2;
868
869                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
870                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
871                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
872                                     CLOCK_CTRL_TXCLK_DISABLE |
873                                     CLOCK_CTRL_ALTCLK);
874                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
875                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
876                         newbits1 = CLOCK_CTRL_625_CORE;
877                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
878                 } else {
879                         newbits1 = CLOCK_CTRL_ALTCLK;
880                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
881                 }
882
883                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
884                 tr32(TG3PCI_CLOCK_CTRL);
885                 udelay(40);
886
887                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
888                 tr32(TG3PCI_CLOCK_CTRL);
889                 udelay(40);
890
891                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
892                         u32 newbits3;
893
894                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
895                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
896                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
897                                             CLOCK_CTRL_TXCLK_DISABLE |
898                                             CLOCK_CTRL_44MHZ_CORE);
899                         } else {
900                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
901                         }
902
903                         tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits3);
904                         tr32(TG3PCI_CLOCK_CTRL);
905                         udelay(40);
906                 }
907         }
908
909         tg3_frob_aux_power(tp);
910
911         /* Finally, set the new power state. */
912         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
913
914         return 0;
915 }
916
917 static void tg3_link_report(struct tg3 *tp)
918 {
919         if (!netif_carrier_ok(tp->dev)) {
920                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
921         } else {
922                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
923                        tp->dev->name,
924                        (tp->link_config.active_speed == SPEED_1000 ?
925                         1000 :
926                         (tp->link_config.active_speed == SPEED_100 ?
927                          100 : 10)),
928                        (tp->link_config.active_duplex == DUPLEX_FULL ?
929                         "full" : "half"));
930
931                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
932                        "%s for RX.\n",
933                        tp->dev->name,
934                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
935                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
936         }
937 }
938
939 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
940 {
941         u32 new_tg3_flags = 0;
942
943         if (local_adv & ADVERTISE_PAUSE_CAP) {
944                 if (local_adv & ADVERTISE_PAUSE_ASYM) {
945                         if (remote_adv & LPA_PAUSE_CAP)
946                                 new_tg3_flags |=
947                                         (TG3_FLAG_RX_PAUSE |
948                                          TG3_FLAG_TX_PAUSE);
949                         else if (remote_adv & LPA_PAUSE_ASYM)
950                                 new_tg3_flags |=
951                                         (TG3_FLAG_RX_PAUSE);
952                 } else {
953                         if (remote_adv & LPA_PAUSE_CAP)
954                                 new_tg3_flags |=
955                                         (TG3_FLAG_RX_PAUSE |
956                                          TG3_FLAG_TX_PAUSE);
957                 }
958         } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
959                 if ((remote_adv & LPA_PAUSE_CAP) &&
960                     (remote_adv & LPA_PAUSE_ASYM))
961                         new_tg3_flags |= TG3_FLAG_TX_PAUSE;
962         }
963
964         tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
965         tp->tg3_flags |= new_tg3_flags;
966
967         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
968                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
969         else
970                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
971
972         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
973                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
974         else
975                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
976 }
977
978 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
979 {
980         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
981         case MII_TG3_AUX_STAT_10HALF:
982                 *speed = SPEED_10;
983                 *duplex = DUPLEX_HALF;
984                 break;
985
986         case MII_TG3_AUX_STAT_10FULL:
987                 *speed = SPEED_10;
988                 *duplex = DUPLEX_FULL;
989                 break;
990
991         case MII_TG3_AUX_STAT_100HALF:
992                 *speed = SPEED_100;
993                 *duplex = DUPLEX_HALF;
994                 break;
995
996         case MII_TG3_AUX_STAT_100FULL:
997                 *speed = SPEED_100;
998                 *duplex = DUPLEX_FULL;
999                 break;
1000
1001         case MII_TG3_AUX_STAT_1000HALF:
1002                 *speed = SPEED_1000;
1003                 *duplex = DUPLEX_HALF;
1004                 break;
1005
1006         case MII_TG3_AUX_STAT_1000FULL:
1007                 *speed = SPEED_1000;
1008                 *duplex = DUPLEX_FULL;
1009                 break;
1010
1011         default:
1012                 *speed = SPEED_INVALID;
1013                 *duplex = DUPLEX_INVALID;
1014                 break;
1015         };
1016 }
1017
1018 static int tg3_phy_copper_begin(struct tg3 *tp, int wait_for_link)
1019 {
1020         u32 new_adv;
1021         int i;
1022
1023         if (tp->link_config.phy_is_low_power) {
1024                 /* Entering low power mode.  Disable gigabit and
1025                  * 100baseT advertisements.
1026                  */
1027                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1028
1029                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1030                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1031                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1032                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1033
1034                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1035         } else if (tp->link_config.speed == SPEED_INVALID) {
1036                 tp->link_config.advertising =
1037                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1038                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1039                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1040                          ADVERTISED_Autoneg | ADVERTISED_MII);
1041
1042                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1043                         tp->link_config.advertising &=
1044                                 ~(ADVERTISED_1000baseT_Half |
1045                                   ADVERTISED_1000baseT_Full);
1046
1047                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1048                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1049                         new_adv |= ADVERTISE_10HALF;
1050                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1051                         new_adv |= ADVERTISE_10FULL;
1052                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1053                         new_adv |= ADVERTISE_100HALF;
1054                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1055                         new_adv |= ADVERTISE_100FULL;
1056                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1057
1058                 if (tp->link_config.advertising &
1059                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1060                         new_adv = 0;
1061                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1062                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1063                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1064                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1065                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1066                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1067                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1068                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1069                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1070                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1071                 } else {
1072                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1073                 }
1074         } else {
1075                 /* Asking for a specific link mode. */
1076                 if (tp->link_config.speed == SPEED_1000) {
1077                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1078                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1079
1080                         if (tp->link_config.duplex == DUPLEX_FULL)
1081                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1082                         else
1083                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1084                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1085                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1086                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1087                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1088                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1089                 } else {
1090                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1091
1092                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1093                         if (tp->link_config.speed == SPEED_100) {
1094                                 if (tp->link_config.duplex == DUPLEX_FULL)
1095                                         new_adv |= ADVERTISE_100FULL;
1096                                 else
1097                                         new_adv |= ADVERTISE_100HALF;
1098                         } else {
1099                                 if (tp->link_config.duplex == DUPLEX_FULL)
1100                                         new_adv |= ADVERTISE_10FULL;
1101                                 else
1102                                         new_adv |= ADVERTISE_10HALF;
1103                         }
1104                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1105                 }
1106         }
1107
1108         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1109             tp->link_config.speed != SPEED_INVALID) {
1110                 u32 bmcr, orig_bmcr;
1111
1112                 tp->link_config.active_speed = tp->link_config.speed;
1113                 tp->link_config.active_duplex = tp->link_config.duplex;
1114
1115                 bmcr = 0;
1116                 switch (tp->link_config.speed) {
1117                 default:
1118                 case SPEED_10:
1119                         break;
1120
1121                 case SPEED_100:
1122                         bmcr |= BMCR_SPEED100;
1123                         break;
1124
1125                 case SPEED_1000:
1126                         bmcr |= TG3_BMCR_SPEED1000;
1127                         break;
1128                 };
1129
1130                 if (tp->link_config.duplex == DUPLEX_FULL)
1131                         bmcr |= BMCR_FULLDPLX;
1132
1133                 tg3_readphy(tp, MII_BMCR, &orig_bmcr);
1134                 if (bmcr != orig_bmcr) {
1135                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1136                         for (i = 0; i < 15000; i++) {
1137                                 u32 tmp;
1138
1139                                 udelay(10);
1140                                 tg3_readphy(tp, MII_BMSR, &tmp);
1141                                 tg3_readphy(tp, MII_BMSR, &tmp);
1142                                 if (!(tmp & BMSR_LSTATUS)) {
1143                                         udelay(40);
1144                                         break;
1145                                 }
1146                         }
1147                         tg3_writephy(tp, MII_BMCR, bmcr);
1148                         udelay(40);
1149                 }
1150         } else {
1151                 tg3_writephy(tp, MII_BMCR,
1152                              BMCR_ANENABLE | BMCR_ANRESTART);
1153         }
1154
1155         if (wait_for_link) {
1156                 tp->link_config.active_speed = SPEED_INVALID;
1157                 tp->link_config.active_duplex = DUPLEX_INVALID;
1158                 for (i = 0; i < 300000; i++) {
1159                         u32 tmp;
1160
1161                         udelay(10);
1162                         tg3_readphy(tp, MII_BMSR, &tmp);
1163                         tg3_readphy(tp, MII_BMSR, &tmp);
1164                         if (!(tmp & BMSR_LSTATUS))
1165                                 continue;
1166
1167                         tg3_readphy(tp, MII_TG3_AUX_STAT, &tmp);
1168                         tg3_aux_stat_to_speed_duplex(tp, tmp,
1169                                                      &tp->link_config.active_speed,
1170                                                      &tp->link_config.active_duplex);
1171                 }
1172                 if (tp->link_config.active_speed == SPEED_INVALID)
1173                         return -EINVAL;
1174         }
1175
1176         return 0;
1177 }
1178
1179 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1180 {
1181         int err;
1182
1183         /* Turn off tap power management. */
1184         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c20);
1185
1186         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1187         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1188
1189         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1190         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1191
1192         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1193         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1194
1195         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1196         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1197
1198         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1199         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1200
1201         udelay(40);
1202
1203         return err;
1204 }
1205
1206 static int tg3_setup_copper_phy(struct tg3 *tp)
1207 {
1208         int current_link_up;
1209         u32 bmsr, dummy;
1210         u16 current_speed;
1211         u8 current_duplex;
1212         int i, err;
1213
1214         tw32(MAC_STATUS,
1215              (MAC_STATUS_SYNC_CHANGED |
1216               MAC_STATUS_CFG_CHANGED));
1217         tr32(MAC_STATUS);
1218         udelay(40);
1219
1220         tp->mi_mode = MAC_MI_MODE_BASE;
1221         tw32(MAC_MI_MODE, tp->mi_mode);
1222         tr32(MAC_MI_MODE);
1223         udelay(40);
1224
1225         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1226
1227         /* Some third-party PHYs need to be reset on link going
1228          * down.
1229          */
1230         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1231              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1232              tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) &&
1233             netif_carrier_ok(tp->dev)) {
1234                 tg3_readphy(tp, MII_BMSR, &bmsr);
1235                 tg3_readphy(tp, MII_BMSR, &bmsr);
1236                 if (!(bmsr & BMSR_LSTATUS))
1237                         tg3_phy_reset(tp, 1);
1238         }
1239
1240         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1241                 tg3_readphy(tp, MII_BMSR, &bmsr);
1242                 tg3_readphy(tp, MII_BMSR, &bmsr);
1243
1244                 if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1245                         bmsr = 0;
1246
1247                 if (!(bmsr & BMSR_LSTATUS)) {
1248                         err = tg3_init_5401phy_dsp(tp);
1249                         if (err)
1250                                 return err;
1251
1252                         tg3_readphy(tp, MII_BMSR, &bmsr);
1253                         for (i = 0; i < 1000; i++) {
1254                                 udelay(10);
1255                                 tg3_readphy(tp, MII_BMSR, &bmsr);
1256                                 if (bmsr & BMSR_LSTATUS) {
1257                                         udelay(40);
1258                                         break;
1259                                 }
1260                         }
1261
1262                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1263                             !(bmsr & BMSR_LSTATUS) &&
1264                             tp->link_config.active_speed == SPEED_1000) {
1265                                 err = tg3_phy_reset(tp, 1);
1266                                 if (!err)
1267                                         err = tg3_init_5401phy_dsp(tp);
1268                                 if (err)
1269                                         return err;
1270                         }
1271                 }
1272         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1273                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1274                 /* 5701 {A0,B0} CRC bug workaround */
1275                 tg3_writephy(tp, 0x15, 0x0a75);
1276                 tg3_writephy(tp, 0x1c, 0x8c68);
1277                 tg3_writephy(tp, 0x1c, 0x8d68);
1278                 tg3_writephy(tp, 0x1c, 0x8c68);
1279         }
1280
1281         /* Clear pending interrupts... */
1282         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1283         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1284
1285         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1286                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1287         else
1288                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1289
1290         if (tp->led_mode == led_mode_three_link)
1291                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
1292                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1293         else
1294                 tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1295
1296         current_link_up = 0;
1297         current_speed = SPEED_INVALID;
1298         current_duplex = DUPLEX_INVALID;
1299
1300         tg3_readphy(tp, MII_BMSR, &bmsr);
1301         tg3_readphy(tp, MII_BMSR, &bmsr);
1302
1303         if (bmsr & BMSR_LSTATUS) {
1304                 u32 aux_stat, bmcr;
1305
1306                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1307                 for (i = 0; i < 2000; i++) {
1308                         udelay(10);
1309                         tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1310                         if (aux_stat)
1311                                 break;
1312                 }
1313
1314                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1315                                              &current_speed,
1316                                              &current_duplex);
1317                 tg3_readphy(tp, MII_BMCR, &bmcr);
1318                 tg3_readphy(tp, MII_BMCR, &bmcr);
1319                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1320                         if (bmcr & BMCR_ANENABLE) {
1321                                 u32 gig_ctrl;
1322
1323                                 current_link_up = 1;
1324
1325                                 /* Force autoneg restart if we are exiting
1326                                  * low power mode.
1327                                  */
1328                                 tg3_readphy(tp, MII_TG3_CTRL, &gig_ctrl);
1329                                 if (!(gig_ctrl & (MII_TG3_CTRL_ADV_1000_HALF |
1330                                                   MII_TG3_CTRL_ADV_1000_FULL))) {
1331                                         current_link_up = 0;
1332                                 }
1333                         } else {
1334                                 current_link_up = 0;
1335                         }
1336                 } else {
1337                         if (!(bmcr & BMCR_ANENABLE) &&
1338                             tp->link_config.speed == current_speed &&
1339                             tp->link_config.duplex == current_duplex) {
1340                                 current_link_up = 1;
1341                         } else {
1342                                 current_link_up = 0;
1343                         }
1344                 }
1345
1346                 tp->link_config.active_speed = current_speed;
1347                 tp->link_config.active_duplex = current_duplex;
1348         }
1349
1350         if (current_link_up == 1 &&
1351             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1352             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1353                 u32 local_adv, remote_adv;
1354
1355                 tg3_readphy(tp, MII_ADVERTISE, &local_adv);
1356                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1357
1358                 tg3_readphy(tp, MII_LPA, &remote_adv);
1359                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1360
1361                 /* If we are not advertising full pause capability,
1362                  * something is wrong.  Bring the link down and reconfigure.
1363                  */
1364                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1365                         current_link_up = 0;
1366                 } else {
1367                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1368                 }
1369         }
1370
1371         if (current_link_up == 0) {
1372                 u32 tmp;
1373
1374                 tg3_phy_copper_begin(tp, 0);
1375
1376                 tg3_readphy(tp, MII_BMSR, &tmp);
1377                 tg3_readphy(tp, MII_BMSR, &tmp);
1378                 if (tmp & BMSR_LSTATUS)
1379                         current_link_up = 1;
1380         }
1381
1382         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1383         if (current_link_up == 1) {
1384                 if (tp->link_config.active_speed == SPEED_100 ||
1385                     tp->link_config.active_speed == SPEED_10)
1386                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1387                 else
1388                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1389         } else
1390                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1391
1392         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1393         if (tp->link_config.active_duplex == DUPLEX_HALF)
1394                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1395
1396         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1397         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1398                 if ((tp->led_mode == led_mode_link10) ||
1399                     (current_link_up == 1 &&
1400                      tp->link_config.active_speed == SPEED_10))
1401                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1402         } else {
1403                 if (current_link_up == 1)
1404                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1405                 tw32(MAC_LED_CTRL, LED_CTRL_PHY_MODE_1);
1406         }
1407
1408         /* ??? Without this setting Netgear GA302T PHY does not
1409          * ??? send/receive packets...
1410          */
1411         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1412             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1413                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1414                 tw32(MAC_MI_MODE, tp->mi_mode);
1415                 tr32(MAC_MI_MODE);
1416                 udelay(40);
1417         }
1418
1419         tw32(MAC_MODE, tp->mac_mode);
1420         tr32(MAC_MODE);
1421         udelay(40);
1422
1423         if (tp->tg3_flags &
1424             (TG3_FLAG_USE_LINKCHG_REG |
1425              TG3_FLAG_POLL_SERDES)) {
1426                 /* Polled via timer. */
1427                 tw32(MAC_EVENT, 0);
1428         } else {
1429                 tw32(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1430         }
1431         tr32(MAC_EVENT);
1432         udelay(40);
1433
1434         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1435             current_link_up == 1 &&
1436             tp->link_config.active_speed == SPEED_1000 &&
1437             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1438              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1439                 udelay(120);
1440                 tw32(MAC_STATUS,
1441                      (MAC_STATUS_SYNC_CHANGED |
1442                       MAC_STATUS_CFG_CHANGED));
1443                 tr32(MAC_STATUS);
1444                 udelay(40);
1445                 tg3_write_mem(tp,
1446                               NIC_SRAM_FIRMWARE_MBOX,
1447                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1448         }
1449
1450         if (current_link_up != netif_carrier_ok(tp->dev)) {
1451                 if (current_link_up)
1452                         netif_carrier_on(tp->dev);
1453                 else
1454                         netif_carrier_off(tp->dev);
1455                 tg3_link_report(tp);
1456         }
1457
1458         return 0;
1459 }
1460
1461 struct tg3_fiber_aneginfo {
1462         int state;
1463 #define ANEG_STATE_UNKNOWN              0
1464 #define ANEG_STATE_AN_ENABLE            1
1465 #define ANEG_STATE_RESTART_INIT         2
1466 #define ANEG_STATE_RESTART              3
1467 #define ANEG_STATE_DISABLE_LINK_OK      4
1468 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1469 #define ANEG_STATE_ABILITY_DETECT       6
1470 #define ANEG_STATE_ACK_DETECT_INIT      7
1471 #define ANEG_STATE_ACK_DETECT           8
1472 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1473 #define ANEG_STATE_COMPLETE_ACK         10
1474 #define ANEG_STATE_IDLE_DETECT_INIT     11
1475 #define ANEG_STATE_IDLE_DETECT          12
1476 #define ANEG_STATE_LINK_OK              13
1477 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1478 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1479
1480         u32 flags;
1481 #define MR_AN_ENABLE            0x00000001
1482 #define MR_RESTART_AN           0x00000002
1483 #define MR_AN_COMPLETE          0x00000004
1484 #define MR_PAGE_RX              0x00000008
1485 #define MR_NP_LOADED            0x00000010
1486 #define MR_TOGGLE_TX            0x00000020
1487 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1488 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1489 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1490 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1491 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1492 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1493 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1494 #define MR_TOGGLE_RX            0x00002000
1495 #define MR_NP_RX                0x00004000
1496
1497 #define MR_LINK_OK              0x80000000
1498
1499         unsigned long link_time, cur_time;
1500
1501         u32 ability_match_cfg;
1502         int ability_match_count;
1503
1504         char ability_match, idle_match, ack_match;
1505
1506         u32 txconfig, rxconfig;
1507 #define ANEG_CFG_NP             0x00000080
1508 #define ANEG_CFG_ACK            0x00000040
1509 #define ANEG_CFG_RF2            0x00000020
1510 #define ANEG_CFG_RF1            0x00000010
1511 #define ANEG_CFG_PS2            0x00000001
1512 #define ANEG_CFG_PS1            0x00008000
1513 #define ANEG_CFG_HD             0x00004000
1514 #define ANEG_CFG_FD             0x00002000
1515 #define ANEG_CFG_INVAL          0x00001f06
1516
1517 };
1518 #define ANEG_OK         0
1519 #define ANEG_DONE       1
1520 #define ANEG_TIMER_ENAB 2
1521 #define ANEG_FAILED     -1
1522
1523 #define ANEG_STATE_SETTLE_TIME  10000
1524
1525 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1526                                    struct tg3_fiber_aneginfo *ap)
1527 {
1528         unsigned long delta;
1529         u32 rx_cfg_reg;
1530         int ret;
1531
1532         if (ap->state == ANEG_STATE_UNKNOWN) {
1533                 ap->rxconfig = 0;
1534                 ap->link_time = 0;
1535                 ap->cur_time = 0;
1536                 ap->ability_match_cfg = 0;
1537                 ap->ability_match_count = 0;
1538                 ap->ability_match = 0;
1539                 ap->idle_match = 0;
1540                 ap->ack_match = 0;
1541         }
1542         ap->cur_time++;
1543
1544         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1545                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1546
1547                 if (rx_cfg_reg != ap->ability_match_cfg) {
1548                         ap->ability_match_cfg = rx_cfg_reg;
1549                         ap->ability_match = 0;
1550                         ap->ability_match_count = 0;
1551                 } else {
1552                         if (++ap->ability_match_count > 1) {
1553                                 ap->ability_match = 1;
1554                                 ap->ability_match_cfg = rx_cfg_reg;
1555                         }
1556                 }
1557                 if (rx_cfg_reg & ANEG_CFG_ACK)
1558                         ap->ack_match = 1;
1559                 else
1560                         ap->ack_match = 0;
1561
1562                 ap->idle_match = 0;
1563         } else {
1564                 ap->idle_match = 1;
1565                 ap->ability_match_cfg = 0;
1566                 ap->ability_match_count = 0;
1567                 ap->ability_match = 0;
1568                 ap->ack_match = 0;
1569
1570                 rx_cfg_reg = 0;
1571         }
1572
1573         ap->rxconfig = rx_cfg_reg;
1574         ret = ANEG_OK;
1575
1576         switch(ap->state) {
1577         case ANEG_STATE_UNKNOWN:
1578                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1579                         ap->state = ANEG_STATE_AN_ENABLE;
1580
1581                 /* fallthru */
1582         case ANEG_STATE_AN_ENABLE:
1583                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1584                 if (ap->flags & MR_AN_ENABLE) {
1585                         ap->link_time = 0;
1586                         ap->cur_time = 0;
1587                         ap->ability_match_cfg = 0;
1588                         ap->ability_match_count = 0;
1589                         ap->ability_match = 0;
1590                         ap->idle_match = 0;
1591                         ap->ack_match = 0;
1592
1593                         ap->state = ANEG_STATE_RESTART_INIT;
1594                 } else {
1595                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1596                 }
1597                 break;
1598
1599         case ANEG_STATE_RESTART_INIT:
1600                 ap->link_time = ap->cur_time;
1601                 ap->flags &= ~(MR_NP_LOADED);
1602                 ap->txconfig = 0;
1603                 tw32(MAC_TX_AUTO_NEG, 0);
1604                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1605                 tw32(MAC_MODE, tp->mac_mode);
1606                 tr32(MAC_MODE);
1607                 udelay(40);
1608
1609                 ret = ANEG_TIMER_ENAB;
1610                 ap->state = ANEG_STATE_RESTART;
1611
1612                 /* fallthru */
1613         case ANEG_STATE_RESTART:
1614                 delta = ap->cur_time - ap->link_time;
1615                 if (delta > ANEG_STATE_SETTLE_TIME) {
1616                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1617                 } else {
1618                         ret = ANEG_TIMER_ENAB;
1619                 }
1620                 break;
1621
1622         case ANEG_STATE_DISABLE_LINK_OK:
1623                 ret = ANEG_DONE;
1624                 break;
1625
1626         case ANEG_STATE_ABILITY_DETECT_INIT:
1627                 ap->flags &= ~(MR_TOGGLE_TX);
1628                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1629                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1630                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1631                 tw32(MAC_MODE, tp->mac_mode);
1632                 tr32(MAC_MODE);
1633                 udelay(40);
1634
1635                 ap->state = ANEG_STATE_ABILITY_DETECT;
1636                 break;
1637
1638         case ANEG_STATE_ABILITY_DETECT:
1639                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1640                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1641                 }
1642                 break;
1643
1644         case ANEG_STATE_ACK_DETECT_INIT:
1645                 ap->txconfig |= ANEG_CFG_ACK;
1646                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1647                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1648                 tw32(MAC_MODE, tp->mac_mode);
1649                 tr32(MAC_MODE);
1650                 udelay(40);
1651
1652                 ap->state = ANEG_STATE_ACK_DETECT;
1653
1654                 /* fallthru */
1655         case ANEG_STATE_ACK_DETECT:
1656                 if (ap->ack_match != 0) {
1657                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1658                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1659                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1660                         } else {
1661                                 ap->state = ANEG_STATE_AN_ENABLE;
1662                         }
1663                 } else if (ap->ability_match != 0 &&
1664                            ap->rxconfig == 0) {
1665                         ap->state = ANEG_STATE_AN_ENABLE;
1666                 }
1667                 break;
1668
1669         case ANEG_STATE_COMPLETE_ACK_INIT:
1670                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1671                         ret = ANEG_FAILED;
1672                         break;
1673                 }
1674                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1675                                MR_LP_ADV_HALF_DUPLEX |
1676                                MR_LP_ADV_SYM_PAUSE |
1677                                MR_LP_ADV_ASYM_PAUSE |
1678                                MR_LP_ADV_REMOTE_FAULT1 |
1679                                MR_LP_ADV_REMOTE_FAULT2 |
1680                                MR_LP_ADV_NEXT_PAGE |
1681                                MR_TOGGLE_RX |
1682                                MR_NP_RX);
1683                 if (ap->rxconfig & ANEG_CFG_FD)
1684                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1685                 if (ap->rxconfig & ANEG_CFG_HD)
1686                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1687                 if (ap->rxconfig & ANEG_CFG_PS1)
1688                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
1689                 if (ap->rxconfig & ANEG_CFG_PS2)
1690                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1691                 if (ap->rxconfig & ANEG_CFG_RF1)
1692                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1693                 if (ap->rxconfig & ANEG_CFG_RF2)
1694                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1695                 if (ap->rxconfig & ANEG_CFG_NP)
1696                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
1697
1698                 ap->link_time = ap->cur_time;
1699
1700                 ap->flags ^= (MR_TOGGLE_TX);
1701                 if (ap->rxconfig & 0x0008)
1702                         ap->flags |= MR_TOGGLE_RX;
1703                 if (ap->rxconfig & ANEG_CFG_NP)
1704                         ap->flags |= MR_NP_RX;
1705                 ap->flags |= MR_PAGE_RX;
1706
1707                 ap->state = ANEG_STATE_COMPLETE_ACK;
1708                 ret = ANEG_TIMER_ENAB;
1709                 break;
1710
1711         case ANEG_STATE_COMPLETE_ACK:
1712                 if (ap->ability_match != 0 &&
1713                     ap->rxconfig == 0) {
1714                         ap->state = ANEG_STATE_AN_ENABLE;
1715                         break;
1716                 }
1717                 delta = ap->cur_time - ap->link_time;
1718                 if (delta > ANEG_STATE_SETTLE_TIME) {
1719                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
1720                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1721                         } else {
1722                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
1723                                     !(ap->flags & MR_NP_RX)) {
1724                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
1725                                 } else {
1726                                         ret = ANEG_FAILED;
1727                                 }
1728                         }
1729                 }
1730                 break;
1731
1732         case ANEG_STATE_IDLE_DETECT_INIT:
1733                 ap->link_time = ap->cur_time;
1734                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1735                 tw32(MAC_MODE, tp->mac_mode);
1736                 tr32(MAC_MODE);
1737                 udelay(40);
1738
1739                 ap->state = ANEG_STATE_IDLE_DETECT;
1740                 ret = ANEG_TIMER_ENAB;
1741                 break;
1742
1743         case ANEG_STATE_IDLE_DETECT:
1744                 if (ap->ability_match != 0 &&
1745                     ap->rxconfig == 0) {
1746                         ap->state = ANEG_STATE_AN_ENABLE;
1747                         break;
1748                 }
1749                 delta = ap->cur_time - ap->link_time;
1750                 if (delta > ANEG_STATE_SETTLE_TIME) {
1751                         /* XXX another gem from the Broadcom driver :( */
1752                         ap->state = ANEG_STATE_LINK_OK;
1753                 }
1754                 break;
1755
1756         case ANEG_STATE_LINK_OK:
1757                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
1758                 ret = ANEG_DONE;
1759                 break;
1760
1761         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
1762                 /* ??? unimplemented */
1763                 break;
1764
1765         case ANEG_STATE_NEXT_PAGE_WAIT:
1766                 /* ??? unimplemented */
1767                 break;
1768
1769         default:
1770                 ret = ANEG_FAILED;
1771                 break;
1772         };
1773
1774         return ret;
1775 }
1776
1777 static int tg3_setup_fiber_phy(struct tg3 *tp)
1778 {
1779         u32 orig_pause_cfg;
1780         u16 orig_active_speed;
1781         u8 orig_active_duplex;
1782         int current_link_up;
1783         int i;
1784
1785         orig_pause_cfg =
1786                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
1787                                   TG3_FLAG_TX_PAUSE));
1788         orig_active_speed = tp->link_config.active_speed;
1789         orig_active_duplex = tp->link_config.active_duplex;
1790
1791         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
1792         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
1793         tw32(MAC_MODE, tp->mac_mode);
1794         tr32(MAC_MODE);
1795         udelay(40);
1796
1797         /* Reset when initting first time or we have a link. */
1798         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
1799             (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
1800                 /* Set PLL lock range. */
1801                 tg3_writephy(tp, 0x16, 0x8007);
1802
1803                 /* SW reset */
1804                 tg3_writephy(tp, MII_BMCR, BMCR_RESET);
1805
1806                 /* Wait for reset to complete. */
1807                 /* XXX schedule_timeout() ... */
1808                 for (i = 0; i < 500; i++)
1809                         udelay(10);
1810
1811                 /* Config mode; select PMA/Ch 1 regs. */
1812                 tg3_writephy(tp, 0x10, 0x8411);
1813
1814                 /* Enable auto-lock and comdet, select txclk for tx. */
1815                 tg3_writephy(tp, 0x11, 0x0a10);
1816
1817                 tg3_writephy(tp, 0x18, 0x00a0);
1818                 tg3_writephy(tp, 0x16, 0x41ff);
1819
1820                 /* Assert and deassert POR. */
1821                 tg3_writephy(tp, 0x13, 0x0400);
1822                 udelay(40);
1823                 tg3_writephy(tp, 0x13, 0x0000);
1824
1825                 tg3_writephy(tp, 0x11, 0x0a50);
1826                 udelay(40);
1827                 tg3_writephy(tp, 0x11, 0x0a10);
1828
1829                 /* Wait for signal to stabilize */
1830                 /* XXX schedule_timeout() ... */
1831                 for (i = 0; i < 15000; i++)
1832                         udelay(10);
1833
1834                 /* Deselect the channel register so we can read the PHYID
1835                  * later.
1836                  */
1837                 tg3_writephy(tp, 0x10, 0x8011);
1838         }
1839
1840         /* Enable link change interrupt unless serdes polling.  */
1841         if (!(tp->tg3_flags & TG3_FLAG_POLL_SERDES))
1842                 tw32(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1843         else
1844                 tw32(MAC_EVENT, 0);
1845         tr32(MAC_EVENT);
1846         udelay(40);
1847
1848         current_link_up = 0;
1849         if (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) {
1850                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
1851                     !(tp->tg3_flags & TG3_FLAG_GOT_SERDES_FLOWCTL)) {
1852                         struct tg3_fiber_aneginfo aninfo;
1853                         int status = ANEG_FAILED;
1854                         unsigned int tick;
1855                         u32 tmp;
1856
1857                         memset(&aninfo, 0, sizeof(aninfo));
1858                         aninfo.flags |= (MR_AN_ENABLE);
1859
1860                         tw32(MAC_TX_AUTO_NEG, 0);
1861
1862                         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
1863                         tw32(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
1864                         tr32(MAC_MODE);
1865                         udelay(40);
1866
1867                         tw32(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
1868                         tr32(MAC_MODE);
1869                         udelay(40);
1870
1871                         aninfo.state = ANEG_STATE_UNKNOWN;
1872                         aninfo.cur_time = 0;
1873                         tick = 0;
1874                         while (++tick < 195000) {
1875                                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
1876                                 if (status == ANEG_DONE ||
1877                                     status == ANEG_FAILED)
1878                                         break;
1879
1880                                 udelay(1);
1881                         }
1882
1883                         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
1884                         tw32(MAC_MODE, tp->mac_mode);
1885                         tr32(MAC_MODE);
1886                         udelay(40);
1887
1888                         if (status == ANEG_DONE &&
1889                             (aninfo.flags &
1890                              (MR_AN_COMPLETE | MR_LINK_OK |
1891                               MR_LP_ADV_FULL_DUPLEX))) {
1892                                 u32 local_adv, remote_adv;
1893
1894                                 local_adv = ADVERTISE_PAUSE_CAP;
1895                                 remote_adv = 0;
1896                                 if (aninfo.flags & MR_LP_ADV_SYM_PAUSE)
1897                                         remote_adv |= LPA_PAUSE_CAP;
1898                                 if (aninfo.flags & MR_LP_ADV_ASYM_PAUSE)
1899                                         remote_adv |= LPA_PAUSE_ASYM;
1900
1901                                 tg3_setup_flow_control(tp, local_adv, remote_adv);
1902
1903                                 tp->tg3_flags |=
1904                                         TG3_FLAG_GOT_SERDES_FLOWCTL;
1905                                 current_link_up = 1;
1906                         }
1907                         for (i = 0; i < 60; i++) {
1908                                 udelay(20);
1909                                 tw32(MAC_STATUS,
1910                                      (MAC_STATUS_SYNC_CHANGED |
1911                                       MAC_STATUS_CFG_CHANGED));
1912                                 tr32(MAC_STATUS);
1913                                 udelay(40);
1914                                 if ((tr32(MAC_STATUS) &
1915                                      (MAC_STATUS_SYNC_CHANGED |
1916                                       MAC_STATUS_CFG_CHANGED)) == 0)
1917                                         break;
1918                         }
1919                         if (current_link_up == 0 &&
1920                             (tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED)) {
1921                                 current_link_up = 1;
1922                         }
1923                 } else {
1924                         /* Forcing 1000FD link up. */
1925                         current_link_up = 1;
1926                 }
1927         }
1928
1929         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1930         tw32(MAC_MODE, tp->mac_mode);
1931         tr32(MAC_MODE);
1932         udelay(40);
1933
1934         tp->hw_status->status =
1935                 (SD_STATUS_UPDATED |
1936                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
1937
1938         for (i = 0; i < 100; i++) {
1939                 udelay(20);
1940                 tw32(MAC_STATUS,
1941                      (MAC_STATUS_SYNC_CHANGED |
1942                       MAC_STATUS_CFG_CHANGED));
1943                 tr32(MAC_STATUS);
1944                 udelay(40);
1945                 if ((tr32(MAC_STATUS) &
1946                      (MAC_STATUS_SYNC_CHANGED |
1947                       MAC_STATUS_CFG_CHANGED)) == 0)
1948                         break;
1949         }
1950
1951         if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0)
1952                 current_link_up = 0;
1953
1954         if (current_link_up == 1) {
1955                 tp->link_config.active_speed = SPEED_1000;
1956                 tp->link_config.active_duplex = DUPLEX_FULL;
1957         } else {
1958                 tp->link_config.active_speed = SPEED_INVALID;
1959                 tp->link_config.active_duplex = DUPLEX_INVALID;
1960         }
1961
1962         if (current_link_up != netif_carrier_ok(tp->dev)) {
1963                 if (current_link_up)
1964                         netif_carrier_on(tp->dev);
1965                 else
1966                         netif_carrier_off(tp->dev);
1967                 tg3_link_report(tp);
1968         } else {
1969                 u32 now_pause_cfg =
1970                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
1971                                          TG3_FLAG_TX_PAUSE);
1972                 if (orig_pause_cfg != now_pause_cfg ||
1973                     orig_active_speed != tp->link_config.active_speed ||
1974                     orig_active_duplex != tp->link_config.active_duplex)
1975                         tg3_link_report(tp);
1976         }
1977
1978         if ((tr32(MAC_STATUS) & MAC_STATUS_PCS_SYNCED) == 0) {
1979                 tw32(MAC_MODE, tp->mac_mode | MAC_MODE_LINK_POLARITY);
1980                 tr32(MAC_MODE);
1981                 udelay(40);
1982                 if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
1983                         tw32(MAC_MODE, tp->mac_mode);
1984                         tr32(MAC_MODE);
1985                         udelay(40);
1986                 }
1987         }
1988
1989         return 0;
1990 }
1991
1992 static int tg3_setup_phy(struct tg3 *tp)
1993 {
1994         int err;
1995
1996         if (tp->phy_id == PHY_ID_SERDES) {
1997                 err = tg3_setup_fiber_phy(tp);
1998         } else {
1999                 err = tg3_setup_copper_phy(tp);
2000         }
2001
2002         if (tp->link_config.active_speed == SPEED_1000 &&
2003             tp->link_config.active_duplex == DUPLEX_HALF)
2004                 tw32(MAC_TX_LENGTHS,
2005                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2006                       (6 << TX_LENGTHS_IPG_SHIFT) |
2007                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2008         else
2009                 tw32(MAC_TX_LENGTHS,
2010                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2011                       (6 << TX_LENGTHS_IPG_SHIFT) |
2012                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2013
2014         return err;
2015 }
2016
2017 /* Tigon3 never reports partial packet sends.  So we do not
2018  * need special logic to handle SKBs that have not had all
2019  * of their frags sent yet, like SunGEM does.
2020  */
2021 static void tg3_tx(struct tg3 *tp)
2022 {
2023         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2024         u32 sw_idx = tp->tx_cons;
2025
2026         while (sw_idx != hw_idx) {
2027                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2028                 struct sk_buff *skb = ri->skb;
2029                 int i;
2030
2031                 if (unlikely(skb == NULL))
2032                         BUG();
2033
2034                 pci_unmap_single(tp->pdev,
2035                                  pci_unmap_addr(ri, mapping),
2036                                  skb_headlen(skb),
2037                                  PCI_DMA_TODEVICE);
2038
2039                 ri->skb = NULL;
2040
2041                 sw_idx = NEXT_TX(sw_idx);
2042
2043                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2044                         if (unlikely(sw_idx == hw_idx))
2045                                 BUG();
2046
2047                         ri = &tp->tx_buffers[sw_idx];
2048                         if (unlikely(ri->skb != NULL))
2049                                 BUG();
2050
2051                         pci_unmap_page(tp->pdev,
2052                                        pci_unmap_addr(ri, mapping),
2053                                        skb_shinfo(skb)->frags[i].size,
2054                                        PCI_DMA_TODEVICE);
2055
2056                         sw_idx = NEXT_TX(sw_idx);
2057                 }
2058
2059                 dev_kfree_skb_irq(skb);
2060         }
2061
2062         tp->tx_cons = sw_idx;
2063
2064         if (netif_queue_stopped(tp->dev) &&
2065             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2066                 netif_wake_queue(tp->dev);
2067 }
2068
2069 /* Returns size of skb allocated or < 0 on error.
2070  *
2071  * We only need to fill in the address because the other members
2072  * of the RX descriptor are invariant, see tg3_init_rings.
2073  *
2074  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2075  * posting buffers we only dirty the first cache line of the RX
2076  * descriptor (containing the address).  Whereas for the RX status
2077  * buffers the cpu only reads the last cacheline of the RX descriptor
2078  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2079  */
2080 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2081                             int src_idx, u32 dest_idx_unmasked)
2082 {
2083         struct tg3_rx_buffer_desc *desc;
2084         struct ring_info *map, *src_map;
2085         struct sk_buff *skb;
2086         dma_addr_t mapping;
2087         int skb_size, dest_idx;
2088
2089         src_map = NULL;
2090         switch (opaque_key) {
2091         case RXD_OPAQUE_RING_STD:
2092                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2093                 desc = &tp->rx_std[dest_idx];
2094                 map = &tp->rx_std_buffers[dest_idx];
2095                 if (src_idx >= 0)
2096                         src_map = &tp->rx_std_buffers[src_idx];
2097                 skb_size = RX_PKT_BUF_SZ;
2098                 break;
2099
2100         case RXD_OPAQUE_RING_JUMBO:
2101                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2102                 desc = &tp->rx_jumbo[dest_idx];
2103                 map = &tp->rx_jumbo_buffers[dest_idx];
2104                 if (src_idx >= 0)
2105                         src_map = &tp->rx_jumbo_buffers[src_idx];
2106                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2107                 break;
2108
2109         default:
2110                 return -EINVAL;
2111         };
2112
2113         /* Do not overwrite any of the map or rp information
2114          * until we are sure we can commit to a new buffer.
2115          *
2116          * Callers depend upon this behavior and assume that
2117          * we leave everything unchanged if we fail.
2118          */
2119         skb = dev_alloc_skb(skb_size);
2120         if (skb == NULL)
2121                 return -ENOMEM;
2122
2123         skb->dev = tp->dev;
2124         skb_reserve(skb, tp->rx_offset);
2125
2126         mapping = pci_map_single(tp->pdev, skb->data,
2127                                  skb_size - tp->rx_offset,
2128                                  PCI_DMA_FROMDEVICE);
2129
2130         map->skb = skb;
2131         pci_unmap_addr_set(map, mapping, mapping);
2132
2133         if (src_map != NULL)
2134                 src_map->skb = NULL;
2135
2136         desc->addr_hi = ((u64)mapping >> 32);
2137         desc->addr_lo = ((u64)mapping & 0xffffffff);
2138
2139         return skb_size;
2140 }
2141
2142 /* We only need to move over in the address because the other
2143  * members of the RX descriptor are invariant.  See notes above
2144  * tg3_alloc_rx_skb for full details.
2145  */
2146 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2147                            int src_idx, u32 dest_idx_unmasked)
2148 {
2149         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2150         struct ring_info *src_map, *dest_map;
2151         int dest_idx;
2152
2153         switch (opaque_key) {
2154         case RXD_OPAQUE_RING_STD:
2155                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2156                 dest_desc = &tp->rx_std[dest_idx];
2157                 dest_map = &tp->rx_std_buffers[dest_idx];
2158                 src_desc = &tp->rx_std[src_idx];
2159                 src_map = &tp->rx_std_buffers[src_idx];
2160                 break;
2161
2162         case RXD_OPAQUE_RING_JUMBO:
2163                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2164                 dest_desc = &tp->rx_jumbo[dest_idx];
2165                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2166                 src_desc = &tp->rx_jumbo[src_idx];
2167                 src_map = &tp->rx_jumbo_buffers[src_idx];
2168                 break;
2169
2170         default:
2171                 return;
2172         };
2173
2174         dest_map->skb = src_map->skb;
2175         pci_unmap_addr_set(dest_map, mapping,
2176                            pci_unmap_addr(src_map, mapping));
2177         dest_desc->addr_hi = src_desc->addr_hi;
2178         dest_desc->addr_lo = src_desc->addr_lo;
2179
2180         src_map->skb = NULL;
2181 }
2182
2183 #if TG3_VLAN_TAG_USED
2184 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2185 {
2186         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2187 }
2188 #endif
2189
2190 /* The RX ring scheme is composed of multiple rings which post fresh
2191  * buffers to the chip, and one special ring the chip uses to report
2192  * status back to the host.
2193  *
2194  * The special ring reports the status of received packets to the
2195  * host.  The chip does not write into the original descriptor the
2196  * RX buffer was obtained from.  The chip simply takes the original
2197  * descriptor as provided by the host, updates the status and length
2198  * field, then writes this into the next status ring entry.
2199  *
2200  * Each ring the host uses to post buffers to the chip is described
2201  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2202  * it is first placed into the on-chip ram.  When the packet's length
2203  * is known, it walks down the TG3_BDINFO entries to select the ring.
2204  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2205  * which is within the range of the new packet's length is chosen.
2206  *
2207  * The "separate ring for rx status" scheme may sound queer, but it makes
2208  * sense from a cache coherency perspective.  If only the host writes
2209  * to the buffer post rings, and only the chip writes to the rx status
2210  * rings, then cache lines never move beyond shared-modified state.
2211  * If both the host and chip were to write into the same ring, cache line
2212  * eviction could occur since both entities want it in an exclusive state.
2213  */
2214 static int tg3_rx(struct tg3 *tp, int budget)
2215 {
2216         u32 work_mask;
2217         u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2218         u16 hw_idx, sw_idx;
2219         int received;
2220
2221         hw_idx = tp->hw_status->idx[0].rx_producer;
2222         sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2223         work_mask = 0;
2224         received = 0;
2225         while (sw_idx != hw_idx && budget > 0) {
2226                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2227                 unsigned int len;
2228                 struct sk_buff *skb;
2229                 dma_addr_t dma_addr;
2230                 u32 opaque_key, desc_idx, *post_ptr;
2231
2232                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2233                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2234                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2235                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2236                                                   mapping);
2237                         skb = tp->rx_std_buffers[desc_idx].skb;
2238                         post_ptr = &tp->rx_std_ptr;
2239                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2240                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2241                                                   mapping);
2242                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2243                         post_ptr = &tp->rx_jumbo_ptr;
2244                 }
2245                 else {
2246                         goto next_pkt_nopost;
2247                 }
2248
2249                 work_mask |= opaque_key;
2250
2251                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2252                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2253                 drop_it:
2254                         tg3_recycle_rx(tp, opaque_key,
2255                                        desc_idx, *post_ptr);
2256                 drop_it_no_recycle:
2257                         /* Other statistics kept track of by card. */
2258                         tp->net_stats.rx_dropped++;
2259                         goto next_pkt;
2260                 }
2261
2262                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2263
2264                 if (len > RX_COPY_THRESHOLD) {
2265                         int skb_size;
2266
2267                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2268                                                     desc_idx, *post_ptr);
2269                         if (skb_size < 0)
2270                                 goto drop_it;
2271
2272                         pci_unmap_single(tp->pdev, dma_addr,
2273                                          skb_size - tp->rx_offset,
2274                                          PCI_DMA_FROMDEVICE);
2275
2276                         skb_put(skb, len);
2277                 } else {
2278                         struct sk_buff *copy_skb;
2279
2280                         tg3_recycle_rx(tp, opaque_key,
2281                                        desc_idx, *post_ptr);
2282
2283                         copy_skb = dev_alloc_skb(len + 2);
2284                         if (copy_skb == NULL)
2285                                 goto drop_it_no_recycle;
2286
2287                         copy_skb->dev = tp->dev;
2288                         skb_reserve(copy_skb, 2);
2289                         skb_put(copy_skb, len);
2290                         pci_dma_sync_single(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2291                         memcpy(copy_skb->data, skb->data, len);
2292
2293                         /* We'll reuse the original ring buffer. */
2294                         skb = copy_skb;
2295                 }
2296
2297                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2298                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2299                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2300                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2301                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2302                 else
2303                         skb->ip_summed = CHECKSUM_NONE;
2304
2305                 skb->protocol = eth_type_trans(skb, tp->dev);
2306 #if TG3_VLAN_TAG_USED
2307                 if (tp->vlgrp != NULL &&
2308                     desc->type_flags & RXD_FLAG_VLAN) {
2309                         tg3_vlan_rx(tp, skb,
2310                                     desc->err_vlan & RXD_VLAN_MASK);
2311                 } else
2312 #endif
2313                         netif_receive_skb(skb);
2314
2315                 tp->dev->last_rx = jiffies;
2316                 received++;
2317                 budget--;
2318
2319 next_pkt:
2320                 (*post_ptr)++;
2321 next_pkt_nopost:
2322                 rx_rcb_ptr++;
2323                 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2324         }
2325
2326         /* ACK the status ring. */
2327         tp->rx_rcb_ptr = rx_rcb_ptr;
2328         tw32_mailbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2329                      (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2330         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2331                 tr32(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW);
2332
2333         /* Refill RX ring(s). */
2334         if (work_mask & RXD_OPAQUE_RING_STD) {
2335                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2336                 tw32_mailbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2337                              sw_idx);
2338                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2339                         tr32(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW);
2340         }
2341         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2342                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2343                 tw32_mailbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2344                              sw_idx);
2345                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2346                         tr32(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW);
2347         }
2348
2349         return received;
2350 }
2351
2352 static int tg3_poll(struct net_device *netdev, int *budget)
2353 {
2354         struct tg3 *tp = netdev->priv;
2355         struct tg3_hw_status *sblk = tp->hw_status;
2356         unsigned long flags;
2357         int done;
2358
2359         spin_lock_irqsave(&tp->lock, flags);
2360
2361         /* handle link change and other phy events */
2362         if (!(tp->tg3_flags &
2363               (TG3_FLAG_USE_LINKCHG_REG |
2364                TG3_FLAG_POLL_SERDES))) {
2365                 if (sblk->status & SD_STATUS_LINK_CHG) {
2366                         sblk->status = SD_STATUS_UPDATED |
2367                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2368                         tg3_setup_phy(tp);
2369                 }
2370         }
2371
2372         /* run TX completion thread */
2373         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2374                 spin_lock(&tp->tx_lock);
2375                 tg3_tx(tp);
2376                 spin_unlock(&tp->tx_lock);
2377         }
2378
2379         spin_unlock_irqrestore(&tp->lock, flags);
2380
2381         /* run RX thread, within the bounds set by NAPI.
2382          * All RX "locking" is done by ensuring outside
2383          * code synchronizes with dev->poll()
2384          */
2385         done = 1;
2386         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2387                 int orig_budget = *budget;
2388                 int work_done;
2389
2390                 if (orig_budget > netdev->quota)
2391                         orig_budget = netdev->quota;
2392
2393                 work_done = tg3_rx(tp, orig_budget);
2394
2395                 *budget -= work_done;
2396                 netdev->quota -= work_done;
2397
2398                 if (work_done >= orig_budget)
2399                         done = 0;
2400         }
2401
2402         /* if no more work, tell net stack and NIC we're done */
2403         if (done) {
2404                 spin_lock_irqsave(&tp->lock, flags);
2405                 __netif_rx_complete(netdev);
2406                 tg3_enable_ints(tp);
2407                 spin_unlock_irqrestore(&tp->lock, flags);
2408         }
2409
2410         return (done ? 0 : 1);
2411 }
2412
2413 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2414 {
2415         struct tg3_hw_status *sblk = tp->hw_status;
2416         unsigned int work_exists = 0;
2417
2418         /* check for phy events */
2419         if (!(tp->tg3_flags &
2420               (TG3_FLAG_USE_LINKCHG_REG |
2421                TG3_FLAG_POLL_SERDES))) {
2422                 if (sblk->status & SD_STATUS_LINK_CHG)
2423                         work_exists = 1;
2424         }
2425         /* check for RX/TX work to do */
2426         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2427             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2428                 work_exists = 1;
2429
2430         return work_exists;
2431 }
2432
2433 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2434 {
2435         struct net_device *dev = dev_id;
2436         struct tg3 *tp = dev->priv;
2437         struct tg3_hw_status *sblk = tp->hw_status;
2438         unsigned long flags;
2439         unsigned int handled = 1;
2440
2441         spin_lock_irqsave(&tp->lock, flags);
2442
2443         if (sblk->status & SD_STATUS_UPDATED) {
2444                 /*
2445                  * writing any value to intr-mbox-0 clears PCI INTA# and
2446                  * chip-internal interrupt pending events.
2447                  * writing non-zero to intr-mbox-0 additional tells the
2448                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2449                  * event coalescing.
2450                  */
2451                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2452                              0x00000001);
2453                 /*
2454                  * Flush PCI write.  This also guarantees that our
2455                  * status block has been flushed to host memory.
2456                  */
2457                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2458                 sblk->status &= ~SD_STATUS_UPDATED;
2459
2460                 if (likely(tg3_has_work(dev, tp)))
2461                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2462                 else {
2463                         /* no work, shared interrupt perhaps?  re-enable
2464                          * interrupts, and flush that PCI write
2465                          */
2466                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2467                                 0x00000000);
2468                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2469                 }
2470         } else {        /* shared interrupt */
2471                 handled = 0;
2472         }
2473
2474         spin_unlock_irqrestore(&tp->lock, flags);
2475
2476         return IRQ_RETVAL(handled);
2477 }
2478
2479 static int tg3_init_hw(struct tg3 *);
2480 static int tg3_halt(struct tg3 *);
2481
2482 static void tg3_reset_task(void *_data)
2483 {
2484         struct tg3 *tp = _data;
2485         unsigned int restart_timer;
2486
2487         tg3_netif_stop(tp);
2488
2489         spin_lock_irq(&tp->lock);
2490         spin_lock(&tp->tx_lock);
2491
2492         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2493         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2494
2495         tg3_halt(tp);
2496         tg3_init_hw(tp);
2497
2498         spin_unlock(&tp->tx_lock);
2499         spin_unlock_irq(&tp->lock);
2500
2501         tg3_netif_start(tp);
2502
2503         if (restart_timer)
2504                 mod_timer(&tp->timer, jiffies + 1);
2505 }
2506
2507 static void tg3_tx_timeout(struct net_device *dev)
2508 {
2509         struct tg3 *tp = dev->priv;
2510
2511         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
2512                dev->name);
2513
2514         schedule_work(&tp->reset_task);
2515 }
2516
2517 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
2518
2519 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
2520                                        u32 guilty_entry, int guilty_len,
2521                                        u32 last_plus_one, u32 *start, u32 mss)
2522 {
2523         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
2524         dma_addr_t new_addr;
2525         u32 entry = *start;
2526         int i;
2527
2528         if (!new_skb) {
2529                 dev_kfree_skb(skb);
2530                 return -1;
2531         }
2532
2533         /* New SKB is guaranteed to be linear. */
2534         entry = *start;
2535         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
2536                                   PCI_DMA_TODEVICE);
2537         tg3_set_txd(tp, entry, new_addr, new_skb->len,
2538                     (skb->ip_summed == CHECKSUM_HW) ?
2539                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
2540         *start = NEXT_TX(entry);
2541
2542         /* Now clean up the sw ring entries. */
2543         i = 0;
2544         while (entry != last_plus_one) {
2545                 int len;
2546
2547                 if (i == 0)
2548                         len = skb_headlen(skb);
2549                 else
2550                         len = skb_shinfo(skb)->frags[i-1].size;
2551                 pci_unmap_single(tp->pdev,
2552                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
2553                                  len, PCI_DMA_TODEVICE);
2554                 if (i == 0) {
2555                         tp->tx_buffers[entry].skb = new_skb;
2556                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
2557                 } else {
2558                         tp->tx_buffers[entry].skb = NULL;
2559                 }
2560                 entry = NEXT_TX(entry);
2561         }
2562
2563         dev_kfree_skb(skb);
2564
2565         return 0;
2566 }
2567
2568 static void tg3_set_txd(struct tg3 *tp, int entry,
2569                         dma_addr_t mapping, int len, u32 flags,
2570                         u32 mss_and_is_end)
2571 {
2572         int is_end = (mss_and_is_end & 0x1);
2573         u32 mss = (mss_and_is_end >> 1);
2574         u32 vlan_tag = 0;
2575
2576         if (is_end)
2577                 flags |= TXD_FLAG_END;
2578         if (flags & TXD_FLAG_VLAN) {
2579                 vlan_tag = flags >> 16;
2580                 flags &= 0xffff;
2581         }
2582         vlan_tag |= (mss << TXD_MSS_SHIFT);
2583         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
2584                 struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
2585
2586                 txd->addr_hi = ((u64) mapping >> 32);
2587                 txd->addr_lo = ((u64) mapping & 0xffffffff);
2588                 txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
2589                 txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
2590         } else {
2591                 struct tx_ring_info *txr = &tp->tx_buffers[entry];
2592                 unsigned long txd;
2593
2594                 txd = (tp->regs +
2595                        NIC_SRAM_WIN_BASE +
2596                        NIC_SRAM_TX_BUFFER_DESC);
2597                 txd += (entry * TXD_SIZE);
2598
2599                 /* Save some PIOs */
2600                 if (sizeof(dma_addr_t) != sizeof(u32))
2601                         writel(((u64) mapping >> 32),
2602                                txd + TXD_ADDR + TG3_64BIT_REG_HIGH);
2603
2604                 writel(((u64) mapping & 0xffffffff),
2605                        txd + TXD_ADDR + TG3_64BIT_REG_LOW);
2606                 writel(len << TXD_LEN_SHIFT | flags, txd + TXD_LEN_FLAGS);
2607                 if (txr->prev_vlan_tag != vlan_tag) {
2608                         writel(vlan_tag << TXD_VLAN_TAG_SHIFT, txd + TXD_VLAN_TAG);
2609                         txr->prev_vlan_tag = vlan_tag;
2610                 }
2611         }
2612 }
2613
2614 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
2615 {
2616         u32 base = (u32) mapping & 0xffffffff;
2617
2618         return ((base > 0xffffdcc0) &&
2619                 ((u64) mapping >> 32) == 0 &&
2620                 (base + len + 8 < base));
2621 }
2622
2623 static int tg3_start_xmit_4gbug(struct sk_buff *skb, struct net_device *dev)
2624 {
2625         struct tg3 *tp = dev->priv;
2626         dma_addr_t mapping;
2627         unsigned int i;
2628         u32 len, entry, base_flags, mss;
2629         int would_hit_hwbug;
2630         unsigned long flags;
2631
2632         len = skb_headlen(skb);
2633
2634         /* No BH disabling for tx_lock here.  We are running in BH disabled
2635          * context and TX reclaim runs via tp->poll inside of a software
2636          * interrupt.  Rejoice!
2637          *
2638          * Actually, things are not so simple.  If we are to take a hw
2639          * IRQ here, we can deadlock, consider:
2640          *
2641          *       CPU1           CPU2
2642          *   tg3_start_xmit
2643          *   take tp->tx_lock
2644          *                      tg3_timer
2645          *                      take tp->lock
2646          *   tg3_interrupt
2647          *   spin on tp->lock
2648          *                      spin on tp->tx_lock
2649          *
2650          * So we really do need to disable interrupts when taking
2651          * tx_lock here.
2652          */
2653         spin_lock_irqsave(&tp->tx_lock, flags);
2654
2655         /* This is a hard error, log it. */
2656         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
2657                 netif_stop_queue(dev);
2658                 spin_unlock_irqrestore(&tp->tx_lock, flags);
2659                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
2660                        dev->name);
2661                 return 1;
2662         }
2663
2664         entry = tp->tx_prod;
2665         base_flags = 0;
2666         if (skb->ip_summed == CHECKSUM_HW)
2667                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
2668 #if TG3_TSO_SUPPORT != 0
2669         mss = 0;
2670         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
2671             (mss = skb_shinfo(skb)->tso_size) != 0) {
2672                 int tcp_opt_len, ip_tcp_len;
2673
2674                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
2675                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
2676
2677                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
2678                                TXD_FLAG_CPU_POST_DMA);
2679
2680                 skb->nh.iph->check = 0;
2681                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
2682                 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
2683                                                       skb->nh.iph->daddr,
2684                                                       0, IPPROTO_TCP, 0);
2685
2686                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2687                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2688                                 int tsflags;
2689
2690                                 tsflags = ((skb->nh.iph->ihl - 5) +
2691                                            (tcp_opt_len >> 2));
2692                                 mss |= (tsflags << 11);
2693                         }
2694                 } else {
2695                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2696                                 int tsflags;
2697
2698                                 tsflags = ((skb->nh.iph->ihl - 5) +
2699                                            (tcp_opt_len >> 2));
2700                                 base_flags |= tsflags << 12;
2701                         }
2702                 }
2703         }
2704 #else
2705         mss = 0;
2706 #endif
2707 #if TG3_VLAN_TAG_USED
2708         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
2709                 base_flags |= (TXD_FLAG_VLAN |
2710                                (vlan_tx_tag_get(skb) << 16));
2711 #endif
2712
2713         /* Queue skb data, a.k.a. the main skb fragment. */
2714         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
2715
2716         tp->tx_buffers[entry].skb = skb;
2717         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2718
2719         would_hit_hwbug = 0;
2720
2721         if (tg3_4g_overflow_test(mapping, len))
2722                 would_hit_hwbug = entry + 1;
2723
2724         tg3_set_txd(tp, entry, mapping, len, base_flags,
2725                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
2726
2727         entry = NEXT_TX(entry);
2728
2729         /* Now loop through additional data fragments, and queue them. */
2730         if (skb_shinfo(skb)->nr_frags > 0) {
2731                 unsigned int i, last;
2732
2733                 last = skb_shinfo(skb)->nr_frags - 1;
2734                 for (i = 0; i <= last; i++) {
2735                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2736
2737                         len = frag->size;
2738                         mapping = pci_map_page(tp->pdev,
2739                                                frag->page,
2740                                                frag->page_offset,
2741                                                len, PCI_DMA_TODEVICE);
2742
2743                         tp->tx_buffers[entry].skb = NULL;
2744                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2745
2746                         if (tg3_4g_overflow_test(mapping, len)) {
2747                                 /* Only one should match. */
2748                                 if (would_hit_hwbug)
2749                                         BUG();
2750                                 would_hit_hwbug = entry + 1;
2751                         }
2752
2753                         tg3_set_txd(tp, entry, mapping, len,
2754                                     base_flags, (i == last));
2755
2756                         entry = NEXT_TX(entry);
2757                 }
2758         }
2759
2760         if (would_hit_hwbug) {
2761                 u32 last_plus_one = entry;
2762                 u32 start;
2763                 unsigned int len = 0;
2764
2765                 would_hit_hwbug -= 1;
2766                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
2767                 entry &= (TG3_TX_RING_SIZE - 1);
2768                 start = entry;
2769                 i = 0;
2770                 while (entry != last_plus_one) {
2771                         if (i == 0)
2772                                 len = skb_headlen(skb);
2773                         else
2774                                 len = skb_shinfo(skb)->frags[i-1].size;
2775
2776                         if (entry == would_hit_hwbug)
2777                                 break;
2778
2779                         i++;
2780                         entry = NEXT_TX(entry);
2781
2782                 }
2783
2784                 /* If the workaround fails due to memory/mapping
2785                  * failure, silently drop this packet.
2786                  */
2787                 if (tigon3_4gb_hwbug_workaround(tp, skb,
2788                                                 entry, len,
2789                                                 last_plus_one,
2790                                                 &start, mss))
2791                         goto out_unlock;
2792
2793                 entry = start;
2794         }
2795
2796         /* Packets are ready, update Tx producer idx local and on card. */
2797         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
2798                 tw32_mailbox((MAILBOX_SNDHOST_PROD_IDX_0 +
2799                               TG3_64BIT_REG_LOW), entry);
2800                 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
2801                         tw32_mailbox((MAILBOX_SNDHOST_PROD_IDX_0 +
2802                                       TG3_64BIT_REG_LOW), entry);
2803                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2804                         tr32(MAILBOX_SNDHOST_PROD_IDX_0 +
2805                              TG3_64BIT_REG_LOW);
2806         } else {
2807                 /* First, make sure tg3 sees last descriptor fully
2808                  * in SRAM.
2809                  */
2810                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2811                         tr32(MAILBOX_SNDNIC_PROD_IDX_0 +
2812                              TG3_64BIT_REG_LOW);
2813
2814                 tw32_mailbox((MAILBOX_SNDNIC_PROD_IDX_0 +
2815                               TG3_64BIT_REG_LOW), entry);
2816                 if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
2817                         tw32_mailbox((MAILBOX_SNDNIC_PROD_IDX_0 +
2818                                       TG3_64BIT_REG_LOW), entry);
2819
2820                 /* Now post the mailbox write itself.  */
2821                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2822                         tr32(MAILBOX_SNDNIC_PROD_IDX_0 +
2823                              TG3_64BIT_REG_LOW);
2824         }
2825
2826         tp->tx_prod = entry;
2827         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
2828                 netif_stop_queue(dev);
2829
2830 out_unlock:
2831         spin_unlock_irqrestore(&tp->tx_lock, flags);
2832
2833         dev->trans_start = jiffies;
2834
2835         return 0;
2836 }
2837
2838 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
2839 {
2840         struct tg3 *tp = dev->priv;
2841         dma_addr_t mapping;
2842         u32 len, entry, base_flags, mss;
2843         unsigned long flags;
2844
2845         len = skb_headlen(skb);
2846
2847         /* No BH disabling for tx_lock here.  We are running in BH disabled
2848          * context and TX reclaim runs via tp->poll inside of a software
2849          * interrupt.  Rejoice!
2850          *
2851          * Actually, things are not so simple.  If we are to take a hw
2852          * IRQ here, we can deadlock, consider:
2853          *
2854          *       CPU1           CPU2
2855          *   tg3_start_xmit
2856          *   take tp->tx_lock
2857          *                      tg3_timer
2858          *                      take tp->lock
2859          *   tg3_interrupt
2860          *   spin on tp->lock
2861          *                      spin on tp->tx_lock
2862          *
2863          * So we really do need to disable interrupts when taking
2864          * tx_lock here.
2865          */
2866         spin_lock_irqsave(&tp->tx_lock, flags);
2867
2868         /* This is a hard error, log it. */
2869         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
2870                 netif_stop_queue(dev);
2871                 spin_unlock_irqrestore(&tp->tx_lock, flags);
2872                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
2873                        dev->name);
2874                 return 1;
2875         }
2876
2877         entry = tp->tx_prod;
2878         base_flags = 0;
2879         if (skb->ip_summed == CHECKSUM_HW)
2880                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
2881 #if TG3_TSO_SUPPORT != 0
2882         mss = 0;
2883         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
2884             (mss = skb_shinfo(skb)->tso_size) != 0) {
2885                 int tcp_opt_len, ip_tcp_len;
2886
2887                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
2888                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
2889
2890                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
2891                                TXD_FLAG_CPU_POST_DMA);
2892
2893                 skb->nh.iph->check = 0;
2894                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
2895                 skb->h.th->check = ~csum_tcpudp_magic(skb->nh.iph->saddr,
2896                                                       skb->nh.iph->daddr,
2897                                                       0, IPPROTO_TCP, 0);
2898
2899                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2900                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2901                                 int tsflags;
2902
2903                                 tsflags = ((skb->nh.iph->ihl - 5) +
2904                                            (tcp_opt_len >> 2));
2905                                 mss |= (tsflags << 11);
2906                         }
2907                 } else {
2908                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
2909                                 int tsflags;
2910
2911                                 tsflags = ((skb->nh.iph->ihl - 5) +
2912                                            (tcp_opt_len >> 2));
2913                                 base_flags |= tsflags << 12;
2914                         }
2915                 }
2916         }
2917 #else
2918         mss = 0;
2919 #endif
2920 #if TG3_VLAN_TAG_USED
2921         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
2922                 base_flags |= (TXD_FLAG_VLAN |
2923                                (vlan_tx_tag_get(skb) << 16));
2924 #endif
2925
2926         /* Queue skb data, a.k.a. the main skb fragment. */
2927         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
2928
2929         tp->tx_buffers[entry].skb = skb;
2930         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2931
2932         tg3_set_txd(tp, entry, mapping, len, base_flags,
2933                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
2934
2935         entry = NEXT_TX(entry);
2936
2937         /* Now loop through additional data fragments, and queue them. */
2938         if (skb_shinfo(skb)->nr_frags > 0) {
2939                 unsigned int i, last;
2940
2941                 last = skb_shinfo(skb)->nr_frags - 1;
2942                 for (i = 0; i <= last; i++) {
2943                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2944
2945
2946                         len = frag->size;
2947                         mapping = pci_map_page(tp->pdev,
2948                                                frag->page,
2949                                                frag->page_offset,
2950                                                len, PCI_DMA_TODEVICE);
2951
2952                         tp->tx_buffers[entry].skb = NULL;
2953                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
2954
2955                         tg3_set_txd(tp, entry, mapping, len,
2956                                     base_flags, (i == last));
2957
2958                         entry = NEXT_TX(entry);
2959                 }
2960         }
2961
2962         /* Packets are ready, update Tx producer idx local and on card.
2963          * We know this is not a 5700 (by virtue of not being a chip
2964          * requiring the 4GB overflow workaround) so we can safely omit
2965          * the double-write bug tests.
2966          */
2967         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
2968                 tw32_mailbox((MAILBOX_SNDHOST_PROD_IDX_0 +
2969                               TG3_64BIT_REG_LOW), entry);
2970                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2971                         tr32(MAILBOX_SNDHOST_PROD_IDX_0 +
2972                              TG3_64BIT_REG_LOW);
2973         } else {
2974                 /* First, make sure tg3 sees last descriptor fully
2975                  * in SRAM.
2976                  */
2977                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2978                         tr32(MAILBOX_SNDNIC_PROD_IDX_0 +
2979                              TG3_64BIT_REG_LOW);
2980
2981                 tw32_mailbox((MAILBOX_SNDNIC_PROD_IDX_0 +
2982                               TG3_64BIT_REG_LOW), entry);
2983
2984                 /* Now post the mailbox write itself.  */
2985                 if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
2986                         tr32(MAILBOX_SNDNIC_PROD_IDX_0 +
2987                              TG3_64BIT_REG_LOW);
2988         }
2989
2990         tp->tx_prod = entry;
2991         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
2992                 netif_stop_queue(dev);
2993
2994         spin_unlock_irqrestore(&tp->tx_lock, flags);
2995
2996         dev->trans_start = jiffies;
2997
2998         return 0;
2999 }
3000
3001 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3002                                int new_mtu)
3003 {
3004         dev->mtu = new_mtu;
3005
3006         if (new_mtu > ETH_DATA_LEN)
3007                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3008         else
3009                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3010 }
3011
3012 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3013 {
3014         struct tg3 *tp = dev->priv;
3015
3016         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3017                 return -EINVAL;
3018
3019         if (!netif_running(dev)) {
3020                 /* We'll just catch it later when the
3021                  * device is up'd.
3022                  */
3023                 tg3_set_mtu(dev, tp, new_mtu);
3024                 return 0;
3025         }
3026
3027         tg3_netif_stop(tp);
3028         spin_lock_irq(&tp->lock);
3029         spin_lock(&tp->tx_lock);
3030
3031         tg3_halt(tp);
3032
3033         tg3_set_mtu(dev, tp, new_mtu);
3034
3035         tg3_init_hw(tp);
3036
3037         spin_unlock(&tp->tx_lock);
3038         spin_unlock_irq(&tp->lock);
3039         tg3_netif_start(tp);
3040
3041         return 0;
3042 }
3043
3044 /* Free up pending packets in all rx/tx rings.
3045  *
3046  * The chip has been shut down and the driver detached from
3047  * the networking, so no interrupts or new tx packets will
3048  * end up in the driver.  tp->{tx,}lock is not held and we are not
3049  * in an interrupt context and thus may sleep.
3050  */
3051 static void tg3_free_rings(struct tg3 *tp)
3052 {
3053         struct ring_info *rxp;
3054         int i;
3055
3056         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3057                 rxp = &tp->rx_std_buffers[i];
3058
3059                 if (rxp->skb == NULL)
3060                         continue;
3061                 pci_unmap_single(tp->pdev,
3062                                  pci_unmap_addr(rxp, mapping),
3063                                  RX_PKT_BUF_SZ - tp->rx_offset,
3064                                  PCI_DMA_FROMDEVICE);
3065                 dev_kfree_skb_any(rxp->skb);
3066                 rxp->skb = NULL;
3067         }
3068
3069         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3070                 rxp = &tp->rx_jumbo_buffers[i];
3071
3072                 if (rxp->skb == NULL)
3073                         continue;
3074                 pci_unmap_single(tp->pdev,
3075                                  pci_unmap_addr(rxp, mapping),
3076                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3077                                  PCI_DMA_FROMDEVICE);
3078                 dev_kfree_skb_any(rxp->skb);
3079                 rxp->skb = NULL;
3080         }
3081
3082         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3083                 struct tx_ring_info *txp;
3084                 struct sk_buff *skb;
3085                 int j;
3086
3087                 txp = &tp->tx_buffers[i];
3088                 skb = txp->skb;
3089
3090                 if (skb == NULL) {
3091                         i++;
3092                         continue;
3093                 }
3094
3095                 pci_unmap_single(tp->pdev,
3096                                  pci_unmap_addr(txp, mapping),
3097                                  skb_headlen(skb),
3098                                  PCI_DMA_TODEVICE);
3099                 txp->skb = NULL;
3100
3101                 i++;
3102
3103                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3104                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3105                         pci_unmap_page(tp->pdev,
3106                                        pci_unmap_addr(txp, mapping),
3107                                        skb_shinfo(skb)->frags[j].size,
3108                                        PCI_DMA_TODEVICE);
3109                         i++;
3110                 }
3111
3112                 dev_kfree_skb_any(skb);
3113         }
3114 }
3115
3116 /* Initialize tx/rx rings for packet processing.
3117  *
3118  * The chip has been shut down and the driver detached from
3119  * the networking, so no interrupts or new tx packets will
3120  * end up in the driver.  tp->{tx,}lock are held and thus
3121  * we may not sleep.
3122  */
3123 static void tg3_init_rings(struct tg3 *tp)
3124 {
3125         unsigned long start, end;
3126         u32 i;
3127
3128         /* Free up all the SKBs. */
3129         tg3_free_rings(tp);
3130
3131         /* Zero out all descriptors. */
3132         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3133         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3134         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3135
3136         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3137                 memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3138         } else {
3139                 start = (tp->regs +
3140                          NIC_SRAM_WIN_BASE +
3141                          NIC_SRAM_TX_BUFFER_DESC);
3142                 end = start + TG3_TX_RING_BYTES;
3143                 while (start < end) {
3144                         writel(0, start);
3145                         start += 4;
3146                 }
3147                 for (i = 0; i < TG3_TX_RING_SIZE; i++)
3148                         tp->tx_buffers[i].prev_vlan_tag = 0;
3149         }
3150
3151         /* Initialize invariants of the rings, we only set this
3152          * stuff once.  This works because the card does not
3153          * write into the rx buffer posting rings.
3154          */
3155         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3156                 struct tg3_rx_buffer_desc *rxd;
3157
3158                 rxd = &tp->rx_std[i];
3159                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3160                         << RXD_LEN_SHIFT;
3161                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3162                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3163                                (i << RXD_OPAQUE_INDEX_SHIFT));
3164         }
3165
3166         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3167                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3168                         struct tg3_rx_buffer_desc *rxd;
3169
3170                         rxd = &tp->rx_jumbo[i];
3171                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3172                                 << RXD_LEN_SHIFT;
3173                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3174                                 RXD_FLAG_JUMBO;
3175                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3176                                (i << RXD_OPAQUE_INDEX_SHIFT));
3177                 }
3178         }
3179
3180         /* Now allocate fresh SKBs for each rx ring. */
3181         for (i = 0; i < tp->rx_pending; i++) {
3182                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3183                                      -1, i) < 0)
3184                         break;
3185         }
3186
3187         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3188                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3189                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3190                                              -1, i) < 0)
3191                                 break;
3192                 }
3193         }
3194 }
3195
3196 /*
3197  * Must not be invoked with interrupt sources disabled and
3198  * the hardware shutdown down.
3199  */
3200 static void tg3_free_consistent(struct tg3 *tp)
3201 {
3202         if (tp->rx_std_buffers) {
3203                 kfree(tp->rx_std_buffers);
3204                 tp->rx_std_buffers = NULL;
3205         }
3206         if (tp->rx_std) {
3207                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3208                                     tp->rx_std, tp->rx_std_mapping);
3209                 tp->rx_std = NULL;
3210         }
3211         if (tp->rx_jumbo) {
3212                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3213                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3214                 tp->rx_jumbo = NULL;
3215         }
3216         if (tp->rx_rcb) {
3217                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3218                                     tp->rx_rcb, tp->rx_rcb_mapping);
3219                 tp->rx_rcb = NULL;
3220         }
3221         if (tp->tx_ring) {
3222                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3223                         tp->tx_ring, tp->tx_desc_mapping);
3224                 tp->tx_ring = NULL;
3225         }
3226         if (tp->hw_status) {
3227                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3228                                     tp->hw_status, tp->status_mapping);
3229                 tp->hw_status = NULL;
3230         }
3231         if (tp->hw_stats) {
3232                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3233                                     tp->hw_stats, tp->stats_mapping);
3234                 tp->hw_stats = NULL;
3235         }
3236 }
3237
3238 /*
3239  * Must not be invoked with interrupt sources disabled and
3240  * the hardware shutdown down.  Can sleep.
3241  */
3242 static int tg3_alloc_consistent(struct tg3 *tp)
3243 {
3244         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3245                                       (TG3_RX_RING_SIZE +
3246                                        TG3_RX_JUMBO_RING_SIZE)) +
3247                                      (sizeof(struct tx_ring_info) *
3248                                       TG3_TX_RING_SIZE),
3249                                      GFP_KERNEL);
3250         if (!tp->rx_std_buffers)
3251                 return -ENOMEM;
3252
3253         memset(tp->rx_std_buffers, 0,
3254                (sizeof(struct ring_info) *
3255                 (TG3_RX_RING_SIZE +
3256                  TG3_RX_JUMBO_RING_SIZE)) +
3257                (sizeof(struct tx_ring_info) *
3258                 TG3_TX_RING_SIZE));
3259
3260         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3261         tp->tx_buffers = (struct tx_ring_info *)
3262                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3263
3264         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3265                                           &tp->rx_std_mapping);
3266         if (!tp->rx_std)
3267                 goto err_out;
3268
3269         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3270                                             &tp->rx_jumbo_mapping);
3271
3272         if (!tp->rx_jumbo)
3273                 goto err_out;
3274
3275         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3276                                           &tp->rx_rcb_mapping);
3277         if (!tp->rx_rcb)
3278                 goto err_out;
3279
3280         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
3281                 tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3282                                                    &tp->tx_desc_mapping);
3283                 if (!tp->tx_ring)
3284                         goto err_out;
3285         } else {
3286                 tp->tx_ring = NULL;
3287                 tp->tx_desc_mapping = 0;
3288         }
3289
3290         tp->hw_status = pci_alloc_consistent(tp->pdev,
3291                                              TG3_HW_STATUS_SIZE,
3292                                              &tp->status_mapping);
3293         if (!tp->hw_status)
3294                 goto err_out;
3295
3296         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3297                                             sizeof(struct tg3_hw_stats),
3298                                             &tp->stats_mapping);
3299         if (!tp->hw_stats)
3300                 goto err_out;
3301
3302         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3303         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3304
3305         return 0;
3306
3307 err_out:
3308         tg3_free_consistent(tp);
3309         return -ENOMEM;
3310 }
3311
3312 #define MAX_WAIT_CNT 1000
3313
3314 /* To stop a block, clear the enable bit and poll till it
3315  * clears.  tp->lock is held.
3316  */
3317 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3318 {
3319         unsigned int i;
3320         u32 val;
3321
3322         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3323                 switch (ofs) {
3324                 case RCVLSC_MODE:
3325                 case DMAC_MODE:
3326                 case MBFREE_MODE:
3327                 case BUFMGR_MODE:
3328                 case MEMARB_MODE:
3329                         /* We can't enable/disable these bits of the
3330                          * 5705, just say success.
3331                          */
3332                         return 0;
3333
3334                 default:
3335                         break;
3336                 };
3337         }
3338
3339         val = tr32(ofs);
3340         val &= ~enable_bit;
3341         tw32(ofs, val);
3342         tr32(ofs);
3343
3344         for (i = 0; i < MAX_WAIT_CNT; i++) {
3345                 udelay(100);
3346                 val = tr32(ofs);
3347                 if ((val & enable_bit) == 0)
3348                         break;
3349         }
3350
3351         if (i == MAX_WAIT_CNT) {
3352                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3353                        "ofs=%lx enable_bit=%x\n",
3354                        ofs, enable_bit);
3355                 return -ENODEV;
3356         }
3357
3358         return 0;
3359 }
3360
3361 /* tp->lock is held. */
3362 static int tg3_abort_hw(struct tg3 *tp)
3363 {
3364         int i, err;
3365
3366         tg3_disable_ints(tp);
3367
3368         tp->rx_mode &= ~RX_MODE_ENABLE;
3369         tw32(MAC_RX_MODE, tp->rx_mode);
3370         tr32(MAC_RX_MODE);
3371         udelay(10);
3372
3373         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3374         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3375         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3376         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3377         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3378         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3379
3380         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3381         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3382         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3383         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3384         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3385         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3386         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3387         if (err)
3388                 goto out;
3389
3390         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3391         tw32(MAC_MODE, tp->mac_mode);
3392         tr32(MAC_MODE);
3393         udelay(40);
3394
3395         tp->tx_mode &= ~TX_MODE_ENABLE;
3396         tw32(MAC_TX_MODE, tp->tx_mode);
3397         tr32(MAC_TX_MODE);
3398
3399         for (i = 0; i < MAX_WAIT_CNT; i++) {
3400                 udelay(100);
3401                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3402                         break;
3403         }
3404         if (i >= MAX_WAIT_CNT) {
3405                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3406                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3407                        tp->dev->name, tr32(MAC_TX_MODE));
3408                 return -ENODEV;
3409         }
3410
3411         err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3412         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3413         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3414
3415         tw32(FTQ_RESET, 0xffffffff);
3416         tw32(FTQ_RESET, 0x00000000);
3417
3418         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3419         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3420         if (err)
3421                 goto out;
3422
3423         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3424
3425 out:
3426         return err;
3427 }
3428
3429 /* tp->lock is held. */
3430 static void tg3_chip_reset(struct tg3 *tp)
3431 {
3432         u32 val;
3433         u32 flags_save;
3434
3435         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_5704)) {
3436                 /* Force NVRAM to settle.
3437                  * This deals with a chip bug which can result in EEPROM
3438                  * corruption.
3439                  */
3440                 if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3441                         int i;
3442
3443                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3444                         for (i = 0; i < 100000; i++) {
3445                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3446                                         break;
3447                                 udelay(10);
3448                         }
3449                 }
3450         }
3451
3452         /*
3453          * We must avoid the readl() that normally takes place.
3454          * It locks machines, causes machine checks, and other
3455          * fun things.  So, temporarily disable the 5701
3456          * hardware workaround, while we do the reset.
3457          */
3458         flags_save = tp->tg3_flags;
3459         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3460
3461         /* do the reset */
3462         val = GRC_MISC_CFG_CORECLK_RESET;
3463         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
3464                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3465         tw32(GRC_MISC_CFG, val);
3466
3467         /* restore 5701 hardware bug workaround flag */
3468         tp->tg3_flags = flags_save;
3469
3470         /* Flush PCI posted writes.  The normal MMIO registers
3471          * are inaccessible at this time so this is the only
3472          * way to make this reliably.  I tried to use indirect
3473          * register read/write but this upset some 5701 variants.
3474          */
3475         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3476
3477         udelay(40);
3478         udelay(40);
3479         udelay(40);
3480
3481         /* Re-enable indirect register accesses. */
3482         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3483                                tp->misc_host_ctrl);
3484
3485         /* Set MAX PCI retry to zero. */
3486         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3487         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3488             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3489                 val |= PCISTATE_RETRY_SAME_DMA;
3490         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3491
3492         pci_restore_state(tp->pdev, tp->pci_cfg_state);
3493
3494         /* Make sure PCI-X relaxed ordering bit is clear. */
3495         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3496         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3497         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3498
3499         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3500
3501         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3502             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3503                 tp->pci_clock_ctrl |=
3504                         (CLOCK_CTRL_FORCE_CLKRUN | CLOCK_CTRL_CLKRUN_OENABLE);
3505                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3506         }
3507
3508         tw32(TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3509 }
3510
3511 /* tp->lock is held. */
3512 static void tg3_stop_fw(struct tg3 *tp)
3513 {
3514         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3515                 u32 val;
3516                 int i;
3517
3518                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3519                 val = tr32(GRC_RX_CPU_EVENT);
3520                 val |= (1 << 14);
3521                 tw32(GRC_RX_CPU_EVENT, val);
3522
3523                 /* Wait for RX cpu to ACK the event.  */
3524                 for (i = 0; i < 100; i++) {
3525                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3526                                 break;
3527                         udelay(1);
3528                 }
3529         }
3530 }
3531
3532 /* tp->lock is held. */
3533 static int tg3_halt(struct tg3 *tp)
3534 {
3535         u32 val;
3536         int i;
3537
3538         tg3_stop_fw(tp);
3539         tg3_abort_hw(tp);
3540         tg3_chip_reset(tp);
3541         tg3_write_mem(tp,
3542                       NIC_SRAM_FIRMWARE_MBOX,
3543                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3544         for (i = 0; i < 100000; i++) {
3545                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3546                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3547                         break;
3548                 udelay(10);
3549         }
3550
3551         if (i >= 100000 &&
3552             !(tp->tg3_flags2 & TG3_FLG2_SUN_5704)) {
3553                 printk(KERN_ERR PFX "tg3_halt timed out for %s, "
3554                        "firmware will not restart magic=%08x\n",
3555                        tp->dev->name, val);
3556                 return -ENODEV;
3557         }
3558
3559         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3560                 if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
3561                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3562                                       DRV_STATE_WOL);
3563                 else
3564                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3565                                       DRV_STATE_UNLOAD);
3566         } else
3567                 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3568                               DRV_STATE_SUSPEND);
3569
3570         return 0;
3571 }
3572
3573 #define TG3_FW_RELEASE_MAJOR    0x0
3574 #define TG3_FW_RELASE_MINOR     0x0
3575 #define TG3_FW_RELEASE_FIX      0x0
3576 #define TG3_FW_START_ADDR       0x08000000
3577 #define TG3_FW_TEXT_ADDR        0x08000000
3578 #define TG3_FW_TEXT_LEN         0x9c0
3579 #define TG3_FW_RODATA_ADDR      0x080009c0
3580 #define TG3_FW_RODATA_LEN       0x60
3581 #define TG3_FW_DATA_ADDR        0x08000a40
3582 #define TG3_FW_DATA_LEN         0x20
3583 #define TG3_FW_SBSS_ADDR        0x08000a60
3584 #define TG3_FW_SBSS_LEN         0xc
3585 #define TG3_FW_BSS_ADDR         0x08000a70
3586 #define TG3_FW_BSS_LEN          0x10
3587
3588 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
3589         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
3590         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
3591         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
3592         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
3593         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
3594         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
3595         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
3596         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
3597         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
3598         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
3599         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
3600         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
3601         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
3602         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
3603         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
3604         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
3605         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
3606         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
3607         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
3608         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
3609         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
3610         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
3611         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
3612         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3613         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3614         0, 0, 0, 0, 0, 0,
3615         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
3616         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3617         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3618         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3619         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
3620         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
3621         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
3622         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
3623         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3624         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
3625         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
3626         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3627         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3628         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
3629         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
3630         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
3631         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
3632         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
3633         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
3634         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
3635         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
3636         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
3637         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
3638         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
3639         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
3640         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
3641         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
3642         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
3643         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
3644         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
3645         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
3646         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
3647         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
3648         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
3649         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
3650         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
3651         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
3652         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
3653         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
3654         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
3655         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
3656         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
3657         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
3658         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
3659         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
3660         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
3661         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
3662         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
3663         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
3664         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
3665         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
3666         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
3667         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
3668         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
3669         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
3670         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
3671         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
3672         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
3673         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
3674         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
3675         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
3676         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
3677         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
3678         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
3679         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
3680 };
3681
3682 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
3683         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
3684         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
3685         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
3686         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
3687         0x00000000
3688 };
3689
3690 #if 0 /* All zeros, don't eat up space with it. */
3691 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
3692         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
3693         0x00000000, 0x00000000, 0x00000000, 0x00000000
3694 };
3695 #endif
3696
3697 #define RX_CPU_SCRATCH_BASE     0x30000
3698 #define RX_CPU_SCRATCH_SIZE     0x04000
3699 #define TX_CPU_SCRATCH_BASE     0x34000
3700 #define TX_CPU_SCRATCH_SIZE     0x04000
3701
3702 /* tp->lock is held. */
3703 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3704 {
3705         int i;
3706
3707         if (offset == TX_CPU_BASE &&
3708             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
3709                 BUG();
3710
3711         if (offset == RX_CPU_BASE) {
3712                 for (i = 0; i < 10000; i++) {
3713                         tw32(offset + CPU_STATE, 0xffffffff);
3714                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3715                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3716                                 break;
3717                 }
3718
3719                 tw32(offset + CPU_STATE, 0xffffffff);
3720                 tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3721                 tr32(offset + CPU_MODE);
3722                 udelay(10);
3723         } else {
3724                 for (i = 0; i < 10000; i++) {
3725                         tw32(offset + CPU_STATE, 0xffffffff);
3726                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3727                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3728                                 break;
3729                 }
3730         }
3731
3732         if (i >= 10000) {
3733                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
3734                        "and %s CPU\n",
3735                        tp->dev->name,
3736                        (offset == RX_CPU_BASE ? "RX" : "TX"));
3737                 return -ENODEV;
3738         }
3739         return 0;
3740 }
3741
3742 struct fw_info {
3743         unsigned int text_base;
3744         unsigned int text_len;
3745         u32 *text_data;
3746         unsigned int rodata_base;
3747         unsigned int rodata_len;
3748         u32 *rodata_data;
3749         unsigned int data_base;
3750         unsigned int data_len;
3751         u32 *data_data;
3752 };
3753
3754 /* tp->lock is held. */
3755 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
3756                                  int cpu_scratch_size, struct fw_info *info)
3757 {
3758         int err, i;
3759         u32 orig_tg3_flags = tp->tg3_flags;
3760         void (*write_op)(struct tg3 *, u32, u32);
3761
3762         if (cpu_base == TX_CPU_BASE &&
3763             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3764                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
3765                        "TX cpu firmware on %s which is 5705.\n",
3766                        tp->dev->name);
3767                 return -EINVAL;
3768         }
3769
3770         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
3771                 write_op = tg3_write_mem;
3772         else
3773                 write_op = tg3_write_indirect_reg32;
3774
3775         /* Force use of PCI config space for indirect register
3776          * write calls.
3777          */
3778         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
3779
3780         err = tg3_halt_cpu(tp, cpu_base);
3781         if (err)
3782                 goto out;
3783
3784         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3785                 write_op(tp, cpu_scratch_base + i, 0);
3786         tw32(cpu_base + CPU_STATE, 0xffffffff);
3787         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3788         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
3789                 write_op(tp, (cpu_scratch_base +
3790                               (info->text_base & 0xffff) +
3791                               (i * sizeof(u32))),
3792                          (info->text_data ?
3793                           info->text_data[i] : 0));
3794         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
3795                 write_op(tp, (cpu_scratch_base +
3796                               (info->rodata_base & 0xffff) +
3797                               (i * sizeof(u32))),
3798                          (info->rodata_data ?
3799                           info->rodata_data[i] : 0));
3800         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
3801                 write_op(tp, (cpu_scratch_base +
3802                               (info->data_base & 0xffff) +
3803                               (i * sizeof(u32))),
3804                          (info->data_data ?
3805                           info->data_data[i] : 0));
3806
3807         err = 0;
3808
3809 out:
3810         tp->tg3_flags = orig_tg3_flags;
3811         return err;
3812 }
3813
3814 /* tp->lock is held. */
3815 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3816 {
3817         struct fw_info info;
3818         int err, i;
3819
3820         info.text_base = TG3_FW_TEXT_ADDR;
3821         info.text_len = TG3_FW_TEXT_LEN;
3822         info.text_data = &tg3FwText[0];
3823         info.rodata_base = TG3_FW_RODATA_ADDR;
3824         info.rodata_len = TG3_FW_RODATA_LEN;
3825         info.rodata_data = &tg3FwRodata[0];
3826         info.data_base = TG3_FW_DATA_ADDR;
3827         info.data_len = TG3_FW_DATA_LEN;
3828         info.data_data = NULL;
3829
3830         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3831                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3832                                     &info);
3833         if (err)
3834                 return err;
3835
3836         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3837                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3838                                     &info);
3839         if (err)
3840                 return err;
3841
3842         /* Now startup only the RX cpu. */
3843         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3844         tw32(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
3845
3846         /* Flush posted writes. */
3847         tr32(RX_CPU_BASE + CPU_PC);
3848         for (i = 0; i < 5; i++) {
3849                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
3850                         break;
3851                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3852                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3853                 tw32(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
3854
3855                 /* Flush posted writes. */
3856                 tr32(RX_CPU_BASE + CPU_PC);
3857
3858                 udelay(1000);
3859         }
3860         if (i >= 5) {
3861                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
3862                        "to set RX CPU PC, is %08x should be %08x\n",
3863                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
3864                        TG3_FW_TEXT_ADDR);
3865                 return -ENODEV;
3866         }
3867         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3868         tw32(RX_CPU_BASE + CPU_MODE,  0x00000000);
3869
3870         /* Flush posted writes. */
3871         tr32(RX_CPU_BASE + CPU_MODE);
3872
3873         return 0;
3874 }
3875
3876 #if TG3_TSO_SUPPORT != 0
3877
3878 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
3879 #define TG3_TSO_FW_RELASE_MINOR         0x4
3880 #define TG3_TSO_FW_RELEASE_FIX          0x0
3881 #define TG3_TSO_FW_START_ADDR           0x08000000
3882 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
3883 #define TG3_TSO_FW_TEXT_LEN             0x1a90
3884 #define TG3_TSO_FW_RODATA_ADDR          0x08001a900
3885 #define TG3_TSO_FW_RODATA_LEN           0x60
3886 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
3887 #define TG3_TSO_FW_DATA_LEN             0x20
3888 #define TG3_TSO_FW_SBSS_ADDR            0x08001b40
3889 #define TG3_TSO_FW_SBSS_LEN             0x2c
3890 #define TG3_TSO_FW_BSS_ADDR             0x08001b70
3891 #define TG3_TSO_FW_BSS_LEN              0x894
3892
3893 static u32 tg3TsoFwText[] = {
3894         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
3895         0x37bd4000, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000010, 0x00000000,
3896         0x0000000d, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0x3c04fefe,
3897         0xafbf0018, 0x0e0005d4, 0x34840002, 0x0e000664, 0x00000000, 0x3c030800,
3898         0x90631b58, 0x24020002, 0x3c040800, 0x24841a9c, 0x14620003, 0x24050001,
3899         0x3c040800, 0x24841a90, 0x24060003, 0x00003821, 0xafa00010, 0x0e000678,
3900         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
3901         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
3902         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
3903         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
3904         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
3905         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001ef, 0x24040001,
3906         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
3907         0x90421b88, 0x14520003, 0x00000000, 0x0e0004bf, 0x00000000, 0x0a00003c,
3908         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
3909         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ab0, 0x00002821, 0x00003021,
3910         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000678, 0xafa00014, 0x3c040800,
3911         0x248423c8, 0xa4800000, 0x3c010800, 0xa0201b88, 0x3c010800, 0xac201b8c,
3912         0x3c010800, 0xac201b90, 0x3c010800, 0xac201b94, 0x3c010800, 0xac201b9c,
3913         0x3c010800, 0xac201ba8, 0x3c010800, 0xac201bac, 0x8f624434, 0x3c010800,
3914         0xac221b78, 0x8f624438, 0x3c010800, 0xac221b7c, 0x8f624410, 0xac80f7a8,
3915         0x3c010800, 0xac201b74, 0x3c010800, 0xac2023d0, 0x3c010800, 0xac2023b8,
3916         0x3c010800, 0xac2023bc, 0x3c010800, 0xac2023f0, 0x3c010800, 0xac221b80,
3917         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
3918         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac2023fc,
3919         0xac820034, 0x3c040800, 0x24841abc, 0x3c050800, 0x8ca523fc, 0x00003021,
3920         0x00003821, 0xafa00010, 0x0e000678, 0xafa00014, 0x8fbf0018, 0x03e00008,
3921         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac8, 0x00002821, 0x00003021,
3922         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000678, 0xafa00014, 0x0e00005b,
3923         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
3924         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
3925         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
3926         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
3927         0x24631bac, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b88,
3928         0x14400118, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
3929         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
3930         0x3c030800, 0x90631b88, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
3931         0x3c010800, 0xa0221b88, 0x00051100, 0x00821025, 0x3c010800, 0xac201b8c,
3932         0x3c010800, 0xac201b90, 0x3c010800, 0xac201b94, 0x3c010800, 0xac201b9c,
3933         0x3c010800, 0xac201ba8, 0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4,
3934         0x3c010800, 0xa42223c8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222400,
3935         0x30428000, 0x3c010800, 0xa4231bb6, 0x10400005, 0x24020001, 0x3c010800,
3936         0xac2223e4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023e4,
3937         0x9622000a, 0x3c030800, 0x94631bb6, 0x3c010800, 0xac2023e0, 0x3c010800,
3938         0xac2023e8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
3939         0xa42223c0, 0x3c010800, 0x0a000115, 0xa4231b86, 0x9622000c, 0x3c010800,
3940         0xa42223dc, 0x3c040800, 0x24841b8c, 0x8c820000, 0x00021100, 0x3c010800,
3941         0x00220821, 0xac311bb8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
3942         0xac271bbc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
3943         0x00220821, 0xac261bc0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
3944         0xac291bc4, 0x96230008, 0x3c020800, 0x8c421b9c, 0x00432821, 0x3c010800,
3945         0xac251b9c, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
3946         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
3947         0x8c421b30, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b30, 0x2c620002,
3948         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
3949         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b70,
3950         0x3c040800, 0x94841b84, 0x01221025, 0x3c010800, 0xa42223ca, 0x24020001,
3951         0x3c010800, 0xac221ba8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
3952         0xac231b70, 0x3c010800, 0xa4251b84, 0x3c060800, 0x24c61b8c, 0x8cc20000,
3953         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000652,
3954         0x24040002, 0x0a0001e5, 0x00000000, 0x3c020800, 0x8c421ba8, 0x10400077,
3955         0x24020001, 0x3c050800, 0x90a51b88, 0x14a20071, 0x00000000, 0x3c150800,
3956         0x96b51b86, 0x3c040800, 0x8c841b9c, 0x32a3ffff, 0x0083102a, 0x1440006b,
3957         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523e0, 0x1060005b,
3958         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
3959         0x3c110800, 0x02308821, 0x0e000621, 0x8e311bb8, 0x00402821, 0x10a00053,
3960         0x00000000, 0x9628000a, 0x31020040, 0x10400004, 0x2407180c, 0x8e22000c,
3961         0x2407188c, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bc0, 0x3c020800,
3962         0x00501021, 0x8c421bc4, 0x00031d00, 0x00021400, 0x00621825, 0xaca30014,
3963         0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff, 0x00431021,
3964         0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000, 0x30c4ffff,
3965         0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004, 0x8e63fff4,
3966         0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021, 0xae62fff4,
3967         0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0, 0xae60fff4,
3968         0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008, 0x24020305,
3969         0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c, 0x0a0001ca,
3970         0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223e0, 0x10400003, 0x3c024b65,
3971         0x0a0001d2, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c, 0x30e2ffff,
3972         0xaca20010, 0x0e00059f, 0x00a02021, 0x3242ffff, 0x0054102b, 0x1440ffaa,
3973         0x00000000, 0x24020002, 0x3c010800, 0x0a0001e5, 0xa0221b88, 0x8ec2083c,
3974         0x24420001, 0x0a0001e5, 0xaec2083c, 0x0e0004bf, 0x00000000, 0x8fbf002c,
3975         0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018, 0x8fb10014,
3976         0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028, 0xafb30024,
3977         0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff, 0x3442fff8,
3978         0x3c060800, 0x24c61ba4, 0x02428824, 0x9623000e, 0x8cc20000, 0x00431021,
3979         0xacc20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821, 0x0e000637,
3980         0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
3981         0x10400121, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x1040011c,
3982         0x00000000, 0x0a00020c, 0x00000000, 0x8e240008, 0x8e230014, 0x00041402,
3983         0x000241c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f, 0x00031942,
3984         0x30637800, 0x00021100, 0x24424000, 0x00625021, 0x9542000a, 0x3084ffff,
3985         0x30420008, 0x104000b3, 0x000429c0, 0x3c020800, 0x8c4223f0, 0x1440002d,
3986         0x25050008, 0x95020014, 0x3c010800, 0xa42223c0, 0x8d070010, 0x00071402,
3987         0x3c010800, 0xa42223c2, 0x3c010800, 0xa42723c4, 0x9502000e, 0x30e3ffff,
3988         0x00431023, 0x3c010800, 0xac2223f8, 0x8f626800, 0x3c030010, 0x00431024,
3989         0x10400005, 0x00000000, 0x9503001a, 0x9502001c, 0x0a000241, 0x00431021,
3990         0x9502001a, 0x3c010800, 0xac2223ec, 0x3c02c000, 0x02421825, 0x3c010800,
3991         0xac2823f0, 0x3c010800, 0xac3223f4, 0xaf635c9c, 0x8f625c90, 0x30420002,
3992         0x104000df, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000da,
3993         0x00000000, 0x0a00024e, 0x00000000, 0x9502000e, 0x3c030800, 0x946323c4,
3994         0x00434823, 0x3123ffff, 0x2c620008, 0x1040001c, 0x00000000, 0x95020014,
3995         0x24420028, 0x00a22821, 0x00031042, 0x1840000b, 0x00002021, 0x24c60848,
3996         0x00403821, 0x94a30000, 0x8cc20000, 0x24840001, 0x00431021, 0xacc20000,
3997         0x0087102a, 0x1440fff9, 0x24a50002, 0x31220001, 0x1040001f, 0x3c024000,
3998         0x3c040800, 0x248423ec, 0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021,
3999         0x0a00028d, 0xac820000, 0x8f626800, 0x3c030010, 0x00431024, 0x10400009,
4000         0x00000000, 0x9502001a, 0x3c030800, 0x8c6323ec, 0x00431021, 0x3c010800,
4001         0xac2223ec, 0x0a00028e, 0x3c024000, 0x9502001a, 0x9504001c, 0x3c030800,
4002         0x8c6323ec, 0x00441023, 0x00621821, 0x3c010800, 0xac2323ec, 0x3c024000,
4003         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000,
4004         0x9542000a, 0x30420010, 0x10400095, 0x00000000, 0x3c060800, 0x24c623f0,
4005         0x3c020800, 0x944223c4, 0x8cc50000, 0x3c040800, 0x8c8423f8, 0x24420030,
4006         0x00a22821, 0x94a20004, 0x3c030800, 0x8c6323ec, 0x00441023, 0x00621821,
4007         0x00603821, 0x00032402, 0x30e2ffff, 0x00823821, 0x00071402, 0x00e23821,
4008         0x00071027, 0x3c010800, 0xac2323ec, 0xa4a20006, 0x3c030800, 0x8c6323f4,
4009         0x3c0200ff, 0x3442fff8, 0x00628824, 0x96220008, 0x24040001, 0x24034000,
4010         0x000241c0, 0x00e01021, 0xa502001a, 0xa500001c, 0xacc00000, 0x3c010800,
4011         0xac241b50, 0xaf635cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4012         0x3c010800, 0xac201b50, 0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002,
4013         0x10400003, 0x00000000, 0x3c010800, 0xac201b50, 0x3c020800, 0x8c421b50,
4014         0x1040ffec, 0x00000000, 0x3c040800, 0x0e000637, 0x8c8423f4, 0x0a00032c,
4015         0x00000000, 0x3c030800, 0x90631b88, 0x24020002, 0x14620003, 0x3c034b65,
4016         0x0a0002e3, 0x00008021, 0x8e22001c, 0x34637654, 0x10430002, 0x24100002,
4017         0x24100001, 0x01002021, 0x0e000352, 0x02003021, 0x24020003, 0x3c010800,
4018         0xa0221b88, 0x24020002, 0x1202000a, 0x24020001, 0x3c030800, 0x8c6323e0,
4019         0x10620006, 0x00000000, 0x3c020800, 0x944223c8, 0x00021400, 0x0a000321,
4020         0xae220014, 0x3c040800, 0x248423ca, 0x94820000, 0x00021400, 0xae220014,
4021         0x3c020800, 0x8c421bac, 0x3c03c000, 0x3c010800, 0xa0201b88, 0x00431025,
4022         0xaf625c5c, 0x8f625c50, 0x30420002, 0x10400009, 0x00000000, 0x2484f7e2,
4023         0x8c820000, 0x00431025, 0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa,
4024         0x00000000, 0x3c020800, 0x24421b74, 0x8c430000, 0x24630001, 0xac430000,
4025         0x8f630c14, 0x3063000f, 0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14,
4026         0x3c020800, 0x8c421b30, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b30,
4027         0x2c620002, 0x1040fff7, 0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c,
4028         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x12600003, 0x00000000,
4029         0x0e0004bf, 0x00000000, 0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c,
4030         0x8fb00018, 0x03e00008, 0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b78,
4031         0x8c820000, 0x00031c02, 0x0043102b, 0x14400007, 0x3c038000, 0x8c840004,
4032         0x8f624450, 0x00021c02, 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444,
4033         0x8f624444, 0x00431024, 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008,
4034         0x3042ffff, 0x3c024000, 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002,
4035         0x1440fffc, 0x00000000, 0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821,
4036         0x14c00011, 0x256e0008, 0x3c020800, 0x8c4223e4, 0x10400007, 0x24020016,
4037         0x3c010800, 0xa42223c2, 0x2402002a, 0x3c010800, 0x0a000366, 0xa42223c4,
4038         0x8d670010, 0x00071402, 0x3c010800, 0xa42223c2, 0x3c010800, 0xa42723c4,
4039         0x3c040800, 0x948423c4, 0x3c030800, 0x946323c2, 0x95cf0006, 0x3c020800,
4040         0x944223c0, 0x00832023, 0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821,
4041         0x3082ffff, 0x14c0001a, 0x01226021, 0x9582000c, 0x3042003f, 0x3c010800,
4042         0xa42223c6, 0x95820004, 0x95830006, 0x3c010800, 0xac2023d4, 0x3c010800,
4043         0xac2023d8, 0x00021400, 0x00431025, 0x3c010800, 0xac221bb0, 0x95220004,
4044         0x3c010800, 0xa4221bb4, 0x95230002, 0x01e51023, 0x0043102a, 0x10400010,
4045         0x24020001, 0x3c010800, 0x0a00039a, 0xac2223e8, 0x3c030800, 0x8c6323d8,
4046         0x3c020800, 0x94421bb4, 0x00431021, 0xa5220004, 0x3c020800, 0x94421bb0,
4047         0xa5820004, 0x3c020800, 0x8c421bb0, 0xa5820006, 0x3c020800, 0x8c4223e0,
4048         0x3c0d0800, 0x8dad23d4, 0x3c0a0800, 0x144000e5, 0x8d4a23d8, 0x3c020800,
4049         0x94421bb4, 0x004a1821, 0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d,
4050         0x01435023, 0x3c020800, 0x944223c6, 0x30420009, 0x10400008, 0x00000000,
4051         0x9582000c, 0x3042fff6, 0xa582000c, 0x3c020800, 0x944223c6, 0x30420009,
4052         0x01a26823, 0x3c020800, 0x8c4223e8, 0x1040004a, 0x01203821, 0x3c020800,
4053         0x944223c2, 0x00004021, 0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff,
4054         0x00021042, 0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001,
4055         0x00c23021, 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff,
4056         0x00623021, 0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a,
4057         0x00003021, 0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021,
4058         0x2d020004, 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009,
4059         0x00442023, 0x01803821, 0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042,
4060         0x18400010, 0x00c33021, 0x00404821, 0x94e20000, 0x24e70002, 0x00c23021,
4061         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4062         0x00625824, 0x25670008, 0x0109102a, 0x1440fff3, 0x00000000, 0x30820001,
4063         0x10400005, 0x00061c02, 0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02,
4064         0x30c2ffff, 0x00623021, 0x00061402, 0x00c23021, 0x0a00047f, 0x30c6ffff,
4065         0x24020002, 0x14c20081, 0x00000000, 0x3c020800, 0x8c4223fc, 0x14400007,
4066         0x00000000, 0x3c020800, 0x944223c2, 0x95230002, 0x01e21023, 0x10620077,
4067         0x00000000, 0x3c020800, 0x944223c2, 0x01e21023, 0xa5220002, 0x3c020800,
4068         0x8c4223fc, 0x1040001a, 0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b86,
4069         0x00e04021, 0x00072c02, 0x00aa2021, 0x00431023, 0x00823823, 0x00072402,
4070         0x30e2ffff, 0x00823821, 0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800,
4071         0x948423c4, 0x00453023, 0x00e02821, 0x00641823, 0x006d1821, 0x00c33021,
4072         0x00061c02, 0x30c2ffff, 0x0a00047f, 0x00623021, 0x01203821, 0x00004021,
4073         0x3082ffff, 0x00021042, 0x18400008, 0x00003021, 0x00401821, 0x94e20000,
4074         0x25080001, 0x00c23021, 0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02,
4075         0x30c2ffff, 0x00623021, 0x00061402, 0x00c23021, 0x00c02821, 0x00061027,
4076         0xa522000a, 0x00003021, 0x2527000c, 0x00004021, 0x94e20000, 0x25080001,
4077         0x00c23021, 0x2d020004, 0x1440fffb, 0x24e70002, 0x95220002, 0x00004021,
4078         0x91230009, 0x00442023, 0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800,
4079         0x948423c4, 0x00621821, 0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021,
4080         0x00061c02, 0x3c020800, 0x944223c0, 0x00c34821, 0x00441023, 0x00021fc2,
4081         0x00431021, 0x00021043, 0x18400010, 0x00003021, 0x00402021, 0x94e20000,
4082         0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006, 0x25080001, 0x8d630000,
4083         0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008, 0x0104102a, 0x1440fff3,
4084         0x00000000, 0x3c020800, 0x944223dc, 0x00c23021, 0x3122ffff, 0x00c23021,
4085         0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402, 0x00c23021, 0x00c04021,
4086         0x00061027, 0xa5820010, 0xadc00014, 0x0a00049f, 0xadc00000, 0x8dc70010,
4087         0x00e04021, 0x11400007, 0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff,
4088         0x00433021, 0x00061402, 0x00c22821, 0x00051027, 0xa522000a, 0x3c030800,
4089         0x946323c4, 0x3102ffff, 0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02,
4090         0x30c2ffff, 0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027,
4091         0xa5820010, 0x3102ffff, 0x00051c00, 0x00431025, 0xadc20010, 0x3c020800,
4092         0x8c4223e4, 0x10400002, 0x25e2fff2, 0xa5c20034, 0x3c020800, 0x8c4223d8,
4093         0x3c040800, 0x8c8423d4, 0x24420001, 0x3c010800, 0xac2223d8, 0x3c020800,
4094         0x8c421bb0, 0x3303ffff, 0x00832021, 0x3c010800, 0xac2423d4, 0x00431821,
4095         0x0062102b, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223d4, 0x3c010800,
4096         0xac231bb0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800, 0x24a51b86,
4097         0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034, 0xafb40030,
4098         0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000, 0x3c020800,
4099         0x944223c0, 0x3c030800, 0x8c631ba0, 0x3c040800, 0x8c841b9c, 0x01221023,
4100         0x0064182a, 0xa7a9001e, 0x106000bc, 0xa7a20016, 0x24be0022, 0x97b6001e,
4101         0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000, 0x8fc2fff8,
4102         0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000ae, 0x00000000,
4103         0x97d50818, 0x32a2ffff, 0x104000a1, 0x00009021, 0x0040a021, 0x00008821,
4104         0x0e000621, 0x00000000, 0x00403021, 0x14c00007, 0x00000000, 0x3c020800,
4105         0x8c4223cc, 0x24420001, 0x3c010800, 0x0a000593, 0xac2223cc, 0x3c100800,
4106         0x02118021, 0x8e101bb8, 0x9608000a, 0x31020040, 0x10400004, 0x2407180c,
4107         0x8e02000c, 0x2407188c, 0xacc20018, 0x31020080, 0x54400001, 0x34e70010,
4108         0x3c020800, 0x00511021, 0x8c421bc0, 0x3c030800, 0x00711821, 0x8c631bc4,
4109         0x00021500, 0x00031c00, 0x00431025, 0xacc20014, 0x96040008, 0x3242ffff,
4110         0x00821021, 0x0282102a, 0x14400002, 0x02b22823, 0x00802821, 0x8e020000,
4111         0x02459021, 0xacc20000, 0x8e020004, 0x00c02021, 0x26310010, 0xac820004,
4112         0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010, 0x24020305, 0x0e00059f,
4113         0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc6, 0x3242ffff, 0x0a00058b,
4114         0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a, 0x10400066, 0x00000000,
4115         0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021, 0x0e000621, 0x8e101bb8,
4116         0x00403021, 0x14c00005, 0x00000000, 0x8e62082c, 0x24420001, 0x0a000593,
4117         0xae62082c, 0x9608000a, 0x31020040, 0x10400004, 0x2407180c, 0x8e02000c,
4118         0x2407188c, 0xacc20018, 0x3c020800, 0x00511021, 0x8c421bc0, 0x3c030800,
4119         0x00711821, 0x8c631bc4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4120         0x8e63fff4, 0x96020008, 0x00432023, 0x3242ffff, 0x3083ffff, 0x00431021,
4121         0x02c2102a, 0x10400003, 0x00802821, 0x97a9001e, 0x01322823, 0x8e620000,
4122         0x30a4ffff, 0x00441021, 0xae620000, 0xa4c5000e, 0x8e020000, 0xacc20000,
4123         0x8e020004, 0x8e63fff4, 0x00431021, 0xacc20004, 0x8e63fff4, 0x96020008,
4124         0x00641821, 0x0062102a, 0x14400006, 0x02459021, 0x8e62fff0, 0xae60fff4,
4125         0x24420001, 0x0a00056e, 0xae62fff0, 0xae63fff4, 0xacc00008, 0x3242ffff,
4126         0x10560003, 0x31020004, 0x10400006, 0x24020305, 0x31020080, 0x54400001,
4127         0x34e70010, 0x34e70020, 0x24020905, 0xa4c2000c, 0x8ee30000, 0x8ee20004,
4128         0x14620007, 0x3c02b49a, 0x8ee20860, 0x54400001, 0x34e70400, 0x3c024b65,
4129         0x0a000585, 0x34427654, 0x344289ab, 0xacc2001c, 0x30e2ffff, 0xacc20010,
4130         0x0e00059f, 0x00c02021, 0x3242ffff, 0x0056102b, 0x1440ff9c, 0x00000000,
4131         0x8e620000, 0x8e63fffc, 0x0043102a, 0x1440ff4a, 0x00000000, 0x8fbf0044,
4132         0x8fbe0040, 0x8fb7003c, 0x8fb60038, 0x8fb50034, 0x8fb40030, 0x8fb3002c,
4133         0x8fb20028, 0x8fb10024, 0x8fb00020, 0x03e00008, 0x27bd0048, 0x27bdffe8,
4134         0xafbf0014, 0xafb00010, 0x8f624450, 0x8f634410, 0x0a0005ae, 0x00808021,
4135         0x8f626820, 0x30422000, 0x10400003, 0x00000000, 0x0e0001ef, 0x00002021,
4136         0x8f624450, 0x8f634410, 0x3042ffff, 0x0043102b, 0x1440fff5, 0x00000000,
4137         0x8f630c14, 0x3063000f, 0x2c620002, 0x1440000b, 0x00000000, 0x8f630c14,
4138         0x3c020800, 0x8c421b30, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b30,
4139         0x2c620002, 0x1040fff7, 0x00000000, 0xaf705c18, 0x8f625c10, 0x30420002,
4140         0x10400009, 0x00000000, 0x8f626820, 0x30422000, 0x1040fff8, 0x00000000,
4141         0x0e0001ef, 0x00002021, 0x0a0005c1, 0x00000000, 0x8fbf0014, 0x8fb00010,
4142         0x03e00008, 0x27bd0018, 0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000,
4143         0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804,
4144         0x8f634000, 0x24020b50, 0x3c010800, 0xac221b44, 0x24020b78, 0x3c010800,
4145         0xac221b54, 0x34630002, 0xaf634000, 0x0e000601, 0x00808021, 0x3c010800,
4146         0xa0221b58, 0x304200ff, 0x24030002, 0x14430005, 0x00000000, 0x3c020800,
4147         0x8c421b44, 0x0a0005f4, 0xac5000c0, 0x3c020800, 0x8c421b44, 0xac5000bc,
4148         0x8f624434, 0x8f634438, 0x8f644410, 0x3c010800, 0xac221b4c, 0x3c010800,
4149         0xac231b5c, 0x3c010800, 0xac241b48, 0x8fbf0014, 0x8fb00010, 0x03e00008,
4150         0x27bd0018, 0x3c040800, 0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003,
4151         0xac830000, 0x8cc20000, 0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa,
4152         0xac830000, 0x8cc20000, 0x50430001, 0x24050001, 0x3c020800, 0xac470000,
4153         0x03e00008, 0x00a01021, 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c,
4154         0x8f62680c, 0x1043fffe, 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9,
4155         0x00000000, 0x03e00008, 0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b4c,
4156         0x00031c02, 0x0043102b, 0x14400008, 0x3c038000, 0x3c040800, 0x8c841b5c,
4157         0x8f624450, 0x00021c02, 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444,
4158         0x8f624444, 0x00431024, 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008,
4159         0x3042ffff, 0x3082ffff, 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000,
4160         0x0a000644, 0x2402ffff, 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002,
4161         0x1440fffc, 0x00001021, 0x03e00008, 0x00000000, 0x8f624450, 0x3c030800,
4162         0x8c631b48, 0x0a00064d, 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b,
4163         0x1440fffc, 0x00000000, 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821,
4164         0x3c040800, 0x24841ae0, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4165         0x0e000678, 0xafa00014, 0x0a00065c, 0x00000000, 0x8fbf0018, 0x03e00008,
4166         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4167         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac221b64,
4168         0x24020040, 0x3c010800, 0xac221b68, 0x3c010800, 0xac201b60, 0xac600000,
4169         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4170         0x00804821, 0x8faa0010, 0x3c020800, 0x8c421b60, 0x3c040800, 0x8c841b68,
4171         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac231b60, 0x14400003,
4172         0x00004021, 0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x3c030800,
4173         0x8c631b64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4174         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b60,
4175         0x3c030800, 0x8c631b64, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4176         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4177         0x00000000, 0x00000000,
4178 };
4179
4180 u32 tg3TsoFwRodata[] = {
4181         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4182         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4183         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4184         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4185 };
4186
4187 #if 0 /* All zeros, don't eat up space with it. */
4188 u32 tg3TsoFwData[] = {
4189         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4190         0x00000000, 0x00000000, 0x00000000
4191 };
4192 #endif
4193
4194 /* 5705 needs a special version of the TSO firmware.  */
4195 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4196 #define TG3_TSO5_FW_RELASE_MINOR        0x1
4197 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4198 #define TG3_TSO5_FW_START_ADDR          0x00010000
4199 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4200 #define TG3_TSO5_FW_TEXT_LEN            0xeb0
4201 #define TG3_TSO5_FW_RODATA_ADDR         0x00010eb0
4202 #define TG3_TSO5_FW_RODATA_LEN          0x50
4203 #define TG3_TSO5_FW_DATA_ADDR           0x00010f20
4204 #define TG3_TSO5_FW_DATA_LEN            0x20
4205 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f40
4206 #define TG3_TSO5_FW_SBSS_LEN            0x28
4207 #define TG3_TSO5_FW_BSS_ADDR            0x00010f70
4208 #define TG3_TSO5_FW_BSS_LEN             0x88
4209
4210 static u32 tg3Tso5FwText[] = {
4211         0x0c004003, 0x00000000, 0x00010f30, 0x00000000, 0x10000003, 0x00000000,
4212         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4213         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4214         0xafbf0018, 0x0c0042f0, 0x34840002, 0x0c00436c, 0x00000000, 0x3c030001,
4215         0x90630f54, 0x24020002, 0x3c040001, 0x24840ebc, 0x14620003, 0x24050001,
4216         0x3c040001, 0x24840eb0, 0x24060001, 0x00003821, 0xafa00010, 0x0c004380,
4217         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4218         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4219         0x0c0042d3, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4220         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4221         0x0c004064, 0x00000000, 0x3c020001, 0x90420f76, 0x10510003, 0x32020200,
4222         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4223         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4224         0x27bdffe0, 0x3c040001, 0x24840ed0, 0x00002821, 0x00003021, 0x00003821,
4225         0xafbf0018, 0xafa00010, 0x0c004380, 0xafa00014, 0x0000d021, 0x24020130,
4226         0xaf625000, 0x3c010001, 0xa4200f70, 0x3c010001, 0xa0200f77, 0x8fbf0018,
4227         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f80,
4228         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4229         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4230         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4231         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f9a, 0x00041402,
4232         0xa0a20000, 0x3c010001, 0xa0240f9b, 0x3c020001, 0x00431021, 0x94428014,
4233         0x3c010001, 0xa0220f9c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4234         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f80, 0x0124102b,
4235         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4236         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4237         0x24c60008, 0x00003821, 0x3c080001, 0x25080f9b, 0x91060000, 0x3c020001,
4238         0x90420f9c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4239         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4240         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4241         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4242         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4243         0x080040fa, 0xac220fa0, 0x3c050001, 0x24a50f9c, 0x90a20000, 0x3c0c0001,
4244         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4245         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4246         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f9c,
4247         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4248         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4249         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4250         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4251         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4252         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4253         0x90420f9c, 0x3c030001, 0x90630f9a, 0x00e2c823, 0x3c020001, 0x90420f9b,
4254         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4255         0x3c010001, 0xa4220f98, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f96,
4256         0x3c010001, 0xa4200f92, 0x00021400, 0x00431025, 0x3c010001, 0xac220f8c,
4257         0x95020004, 0x3c010001, 0x08004124, 0xa4220f90, 0x3c020001, 0x94420f90,
4258         0x3c030001, 0x94630f92, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f8c,
4259         0xa4c20004, 0x3c020001, 0x8c420f8c, 0xa4c20006, 0x3c040001, 0x94840f92,
4260         0x3c020001, 0x94420f90, 0x3c0a0001, 0x954a0f96, 0x00441821, 0x3063ffff,
4261         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f98,
4262         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f98,
4263         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4264         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4265         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0fa0, 0x10800005,
4266         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4267         0xa502000a, 0x3c030001, 0x90630f9b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4268         0x00432023, 0x3c020001, 0x94420fa0, 0x00442021, 0x00041c02, 0x3082ffff,
4269         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4270         0x24a50f9a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4271         0x00e21023, 0xa5020002, 0x3c030001, 0x94630fa0, 0x3c020001, 0x94420f7a,
4272         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4273         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f9c, 0x24620001,
4274         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4275         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4276         0x94420fa2, 0x3183ffff, 0x3c040001, 0x90840f9b, 0x00431021, 0x00e21021,
4277         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4278         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4279         0x00431025, 0x3c040001, 0x24840f92, 0xade20010, 0x94820000, 0x3c050001,
4280         0x94a50f96, 0x3c030001, 0x8c630f8c, 0x24420001, 0x00b92821, 0xa4820000,
4281         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f96, 0x10600003,
4282         0x24a2ffff, 0x3c010001, 0xa4220f96, 0x3c024000, 0x03021025, 0x3c010001,
4283         0xac240f8c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f76,
4284         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4285         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f84,
4286         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4287         0x24020008, 0x3c010001, 0xa4220f88, 0x30620004, 0x10400005, 0x24020001,
4288         0x3c010001, 0xa0220f77, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f77,
4289         0x00031402, 0x3c010001, 0xa4220f74, 0x9483000c, 0x24020001, 0x3c010001,
4290         0xa4200f70, 0x3c010001, 0xa0220f76, 0x3c010001, 0xa4230f82, 0x24020001,
4291         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4292         0x080042cf, 0x00000000, 0x3c020001, 0x94420f82, 0x241a0001, 0x3c010001,
4293         0xa4200f7e, 0x3c010001, 0xa4200f72, 0x304407ff, 0x00021bc2, 0x00031823,
4294         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4295         0xa4240f78, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f7a, 0x3c010001,
4296         0xa4230f7c, 0x3c060001, 0x24c60f72, 0x94c50000, 0x94c30002, 0x3c040001,
4297         0x94840f7a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4298         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f76, 0x8f641008,
4299         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4300         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4301         0x94630f70, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4302         0xa4230f70, 0xaf620ce8, 0x3c020001, 0x94420f88, 0x34420024, 0xaf620cec,
4303         0x94c30002, 0x3c020001, 0x94420f70, 0x14620012, 0x3c028000, 0x3c108000,
4304         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f76, 0x8f641008, 0x00901024,
4305         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4306         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4307         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4308         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4309         0x3c070001, 0x24e70f70, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4310         0x8c420f84, 0xaf620ce4, 0x3c050001, 0x94a50f74, 0x94e30000, 0x3c040001,
4311         0x94840f78, 0x3c020001, 0x94420f7e, 0x00a32823, 0x00822023, 0x30a6ffff,
4312         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f7c,
4313         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f74,
4314         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4315         0x90420f77, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f88, 0x34630624,
4316         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f88, 0x3c030008, 0x34630624,
4317         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4318         0xa0200f76, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4319         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4320         0x00000000, 0x3c030001, 0x94630f88, 0x34420624, 0x3c108000, 0x00621825,
4321         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4322         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4323         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f7e, 0x3c020001, 0x94420f7c,
4324         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f77, 0x10400009,
4325         0x3c03000c, 0x3c020001, 0x94420f88, 0x34630624, 0x0000d021, 0x00431025,
4326         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f88, 0x3c030008,
4327         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f7e, 0x00451021,
4328         0x3c010001, 0xa4220f7e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4329         0xa0200f76, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4330         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4331         0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdffe0, 0x3c040001, 0x24840ee0,
4332         0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004380,
4333         0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001, 0xa4200f70,
4334         0x3c010001, 0xa0200f77, 0x8f636804, 0x3c020001, 0x3442e000, 0x00621824,
4335         0x3c020001, 0x14620003, 0x00000000, 0x080042eb, 0x00000000, 0x8fbf0018,
4336         0x03e00008, 0x27bd0020, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4337         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4338         0x3c010001, 0xac220f40, 0x24020b78, 0x3c010001, 0xac220f50, 0x34630002,
4339         0xaf634000, 0x0c00431d, 0x00808021, 0x3c010001, 0xa0220f54, 0x304200ff,
4340         0x24030002, 0x14430005, 0x00000000, 0x3c020001, 0x8c420f40, 0x08004310,
4341         0xac5000c0, 0x3c020001, 0x8c420f40, 0xac5000bc, 0x8f624434, 0x8f634438,
4342         0x8f644410, 0x3c010001, 0xac220f48, 0x3c010001, 0xac230f58, 0x3c010001,
4343         0xac240f44, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008,
4344         0x24020001, 0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c,
4345         0x1043fffe, 0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000,
4346         0x03e00008, 0x27bd0008, 0x8f634450, 0x3c020001, 0x8c420f48, 0x00031c02,
4347         0x0043102b, 0x14400008, 0x3c038000, 0x3c040001, 0x8c840f58, 0x8f624450,
4348         0x00021c02, 0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444,
4349         0x00431024, 0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff,
4350         0x3082ffff, 0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0800434f,
4351         0x2402ffff, 0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc,
4352         0x00001021, 0x03e00008, 0x00000000, 0x8f624450, 0x3c030001, 0x8c630f44,
4353         0x08004358, 0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc,
4354         0x00000000, 0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001,
4355         0x24840ef0, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0c004380,
4356         0xafa00014, 0x08004367, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4357         0x3c020001, 0x3442d600, 0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff,
4358         0x3c010001, 0xac220f60, 0x24020040, 0x3c010001, 0xac220f64, 0x3c010001,
4359         0xac200f5c, 0xac600000, 0x24630004, 0x0083102b, 0x5040fffd, 0xac600000,
4360         0x03e00008, 0x00000000, 0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f5c,
4361         0x3c040001, 0x8c840f64, 0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001,
4362         0xac230f5c, 0x14400003, 0x00004021, 0x3c010001, 0xac200f5c, 0x3c020001,
4363         0x8c420f5c, 0x3c030001, 0x8c630f60, 0x91240000, 0x00021140, 0x00431021,
4364         0x00481021, 0x25080001, 0xa0440000, 0x29020008, 0x1440fff4, 0x25290001,
4365         0x3c020001, 0x8c420f5c, 0x3c030001, 0x8c630f60, 0x8f64680c, 0x00021140,
4366         0x00431021, 0xac440008, 0xac45000c, 0xac460010, 0xac470014, 0xac4a0018,
4367         0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4368 };
4369
4370 u32 tg3Tso5FwRodata[] = {
4371         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4372         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4373         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4374         0x00000000, 0x00000000, 0x00000000
4375 };
4376
4377 u32 tg3Tso5FwData[] = {
4378         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x73746b6f, 
4379         0x66666c64, 0x5f76312e, 0x312e3000, 0x00000000
4380 };
4381
4382 /* tp->lock is held. */
4383 static int tg3_load_tso_firmware(struct tg3 *tp)
4384 {
4385         struct fw_info info;
4386         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4387         int err, i;
4388
4389         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4390                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4391                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4392                 info.text_data = &tg3Tso5FwText[0];
4393                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4394                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4395                 info.rodata_data = &tg3Tso5FwRodata[0];
4396                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4397                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4398                 info.data_data = &tg3Tso5FwData[0];
4399                 cpu_base = RX_CPU_BASE;
4400                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4401                 cpu_scratch_size = (info.text_len +
4402                                     info.rodata_len +
4403                                     info.data_len +
4404                                     TG3_TSO5_FW_SBSS_LEN +
4405                                     TG3_TSO5_FW_BSS_LEN);
4406         } else {
4407                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4408                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4409                 info.text_data = &tg3TsoFwText[0];
4410                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4411                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4412                 info.rodata_data = &tg3TsoFwRodata[0];
4413                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4414                 info.data_len = TG3_TSO_FW_DATA_LEN;
4415                 info.data_data = NULL;
4416                 cpu_base = TX_CPU_BASE;
4417                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4418                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4419         }
4420
4421         err = tg3_load_firmware_cpu(tp, cpu_base,
4422                                     cpu_scratch_base, cpu_scratch_size,
4423                                     &info);
4424         if (err)
4425                 return err;
4426
4427         /* Now startup the cpu. */
4428         tw32(cpu_base + CPU_STATE, 0xffffffff);
4429         tw32(cpu_base + CPU_PC,    info.text_base);
4430
4431         /* Flush posted writes. */
4432         tr32(cpu_base + CPU_PC);
4433         for (i = 0; i < 5; i++) {
4434                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4435                         break;
4436                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4437                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4438                 tw32(cpu_base + CPU_PC,    info.text_base);
4439
4440                 /* Flush posted writes. */
4441                 tr32(cpu_base + CPU_PC);
4442
4443                 udelay(1000);
4444         }
4445         if (i >= 5) {
4446                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4447                        "to set CPU PC, is %08x should be %08x\n",
4448                        tp->dev->name, tr32(cpu_base + CPU_PC),
4449                        info.text_base);
4450                 return -ENODEV;
4451         }
4452         tw32(cpu_base + CPU_STATE, 0xffffffff);
4453         tw32(cpu_base + CPU_MODE,  0x00000000);
4454
4455         /* Flush posted writes. */
4456         tr32(cpu_base + CPU_MODE);
4457
4458         return 0;
4459 }
4460
4461 #endif /* TG3_TSO_SUPPORT != 0 */
4462
4463 /* tp->lock is held. */
4464 static void __tg3_set_mac_addr(struct tg3 *tp)
4465 {
4466         u32 addr_high, addr_low;
4467         int i;
4468
4469         addr_high = ((tp->dev->dev_addr[0] << 8) |
4470                      tp->dev->dev_addr[1]);
4471         addr_low = ((tp->dev->dev_addr[2] << 24) |
4472                     (tp->dev->dev_addr[3] << 16) |
4473                     (tp->dev->dev_addr[4] <<  8) |
4474                     (tp->dev->dev_addr[5] <<  0));
4475         for (i = 0; i < 4; i++) {
4476                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4477                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4478         }
4479
4480         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
4481             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
4482             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4483                 for (i = 0; i < 12; i++) {
4484                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4485                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4486                 }
4487         }
4488
4489         addr_high = (tp->dev->dev_addr[0] +
4490                      tp->dev->dev_addr[1] +
4491                      tp->dev->dev_addr[2] +
4492                      tp->dev->dev_addr[3] +
4493                      tp->dev->dev_addr[4] +
4494                      tp->dev->dev_addr[5]) &
4495                 TX_BACKOFF_SEED_MASK;
4496         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4497 }
4498
4499 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4500 {
4501         struct tg3 *tp = dev->priv;
4502         struct sockaddr *addr = p;
4503
4504         if (netif_running(dev))
4505                 return -EBUSY;
4506
4507         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4508
4509         spin_lock_irq(&tp->lock);
4510         __tg3_set_mac_addr(tp);
4511         spin_unlock_irq(&tp->lock);
4512
4513         return 0;
4514 }
4515
4516 /* tp->lock is held. */
4517 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4518                            dma_addr_t mapping, u32 maxlen_flags,
4519                            u32 nic_addr)
4520 {
4521         tg3_write_mem(tp,
4522                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4523                       ((u64) mapping >> 32));
4524         tg3_write_mem(tp,
4525                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4526                       ((u64) mapping & 0xffffffff));
4527         tg3_write_mem(tp,
4528                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4529                        maxlen_flags);
4530
4531         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
4532                 tg3_write_mem(tp,
4533                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4534                               nic_addr);
4535 }
4536
4537 static void __tg3_set_rx_mode(struct net_device *);
4538
4539 /* tp->lock is held. */
4540 static int tg3_reset_hw(struct tg3 *tp)
4541 {
4542         u32 val, rdmac_mode;
4543         int i, err, limit;
4544
4545         tg3_disable_ints(tp);
4546
4547         tg3_stop_fw(tp);
4548
4549         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4550                 err = tg3_abort_hw(tp);
4551                 if (err)
4552                         return err;
4553         }
4554
4555         tg3_chip_reset(tp);
4556
4557         val = tr32(GRC_MODE);
4558         val &= GRC_MODE_HOST_STACKUP;
4559         tw32(GRC_MODE, val | tp->grc_mode);
4560
4561         tg3_write_mem(tp,
4562                       NIC_SRAM_FIRMWARE_MBOX,
4563                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
4564         if (tp->phy_id == PHY_ID_SERDES) {
4565                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
4566                 tw32(MAC_MODE, tp->mac_mode);
4567         } else
4568                 tw32(MAC_MODE, 0);
4569         tr32(MAC_MODE);
4570         udelay(40);
4571
4572         /* Wait for firmware initialization to complete. */
4573         for (i = 0; i < 100000; i++) {
4574                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
4575                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
4576                         break;
4577                 udelay(10);
4578         }
4579         if (i >= 100000 &&
4580             !(tp->tg3_flags2 & TG3_FLG2_SUN_5704)) {
4581                 printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
4582                        "firmware will not restart magic=%08x\n",
4583                        tp->dev->name, val);
4584                 return -ENODEV;
4585         }
4586
4587         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
4588                 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4589                               DRV_STATE_START);
4590         else
4591                 tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
4592                               DRV_STATE_SUSPEND);
4593
4594         /* This works around an issue with Athlon chipsets on
4595          * B3 tigon3 silicon.  This bit has no effect on any
4596          * other revision.
4597          */
4598         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
4599         tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4600         tr32(TG3PCI_CLOCK_CTRL);
4601
4602         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4603             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
4604                 val = tr32(TG3PCI_PCISTATE);
4605                 val |= PCISTATE_RETRY_SAME_DMA;
4606                 tw32(TG3PCI_PCISTATE, val);
4607         }
4608
4609         /* Descriptor ring init may make accesses to the
4610          * NIC SRAM area to setup the TX descriptors, so we
4611          * can only do this after the hardware has been
4612          * successfully reset.
4613          */
4614         tg3_init_rings(tp);
4615
4616         /* This value is determined during the probe time DMA
4617          * engine test, tg3_test_dma.
4618          */
4619         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
4620
4621         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
4622                           GRC_MODE_4X_NIC_SEND_RINGS |
4623                           GRC_MODE_NO_TX_PHDR_CSUM |
4624                           GRC_MODE_NO_RX_PHDR_CSUM);
4625         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS)
4626                 tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
4627         else
4628                 tp->grc_mode |= GRC_MODE_4X_NIC_SEND_RINGS;
4629         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
4630                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
4631         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
4632                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
4633
4634         tw32(GRC_MODE,
4635              tp->grc_mode |
4636              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
4637
4638         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
4639         tw32(GRC_MISC_CFG,
4640              (65 << GRC_MISC_CFG_PRESCALAR_SHIFT));
4641
4642         /* Initialize MBUF/DESC pool. */
4643         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4644                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
4645                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
4646                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
4647                 else
4648                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
4649                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
4650                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
4651         }
4652 #if TG3_TSO_SUPPORT != 0
4653         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
4654                 int fw_len;
4655
4656                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
4657                           TG3_TSO5_FW_RODATA_LEN +
4658                           TG3_TSO5_FW_DATA_LEN +
4659                           TG3_TSO5_FW_SBSS_LEN +
4660                           TG3_TSO5_FW_BSS_LEN);
4661                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
4662                 tw32(BUFMGR_MB_POOL_ADDR,
4663                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
4664                 tw32(BUFMGR_MB_POOL_SIZE,
4665                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
4666         }
4667 #endif
4668
4669         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
4670                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4671                      tp->bufmgr_config.mbuf_read_dma_low_water);
4672                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4673                      tp->bufmgr_config.mbuf_mac_rx_low_water);
4674                 tw32(BUFMGR_MB_HIGH_WATER,
4675                      tp->bufmgr_config.mbuf_high_water);
4676         } else {
4677                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
4678                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
4679                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
4680                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
4681                 tw32(BUFMGR_MB_HIGH_WATER,
4682                      tp->bufmgr_config.mbuf_high_water_jumbo);
4683         }
4684         tw32(BUFMGR_DMA_LOW_WATER,
4685              tp->bufmgr_config.dma_low_water);
4686         tw32(BUFMGR_DMA_HIGH_WATER,
4687              tp->bufmgr_config.dma_high_water);
4688
4689         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
4690         for (i = 0; i < 2000; i++) {
4691                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
4692                         break;
4693                 udelay(10);
4694         }
4695         if (i >= 2000) {
4696                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
4697                        tp->dev->name);
4698                 return -ENODEV;
4699         }
4700
4701         tw32(FTQ_RESET, 0xffffffff);
4702         tw32(FTQ_RESET, 0x00000000);
4703         for (i = 0; i < 2000; i++) {
4704                 if (tr32(FTQ_RESET) == 0x00000000)
4705                         break;
4706                 udelay(10);
4707         }
4708         if (i >= 2000) {
4709                 printk(KERN_ERR PFX "tg3_reset_hw cannot reset FTQ for %s.\n",
4710                        tp->dev->name);
4711                 return -ENODEV;
4712         }
4713
4714         /* Clear statistics/status block in chip, and status block in ram. */
4715         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4716                 for (i = NIC_SRAM_STATS_BLK;
4717                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
4718                      i += sizeof(u32)) {
4719                         tg3_write_mem(tp, i, 0);
4720                         udelay(40);
4721                 }
4722         }
4723         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
4724
4725         /* Setup replenish threshold. */
4726         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
4727
4728         /* Initialize TG3_BDINFO's at:
4729          *  RCVDBDI_STD_BD:     standard eth size rx ring
4730          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
4731          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
4732          *
4733          * like so:
4734          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
4735          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
4736          *                              ring attribute flags
4737          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
4738          *
4739          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
4740          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
4741          *
4742          * The size of each ring is fixed in the firmware, but the location is
4743          * configurable.
4744          */
4745         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
4746              ((u64) tp->rx_std_mapping >> 32));
4747         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
4748              ((u64) tp->rx_std_mapping & 0xffffffff));
4749         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
4750              NIC_SRAM_RX_BUFFER_DESC);
4751
4752         /* Don't even try to program the JUMBO/MINI buffer descriptor
4753          * configs on 5705.
4754          */
4755         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4756                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
4757                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
4758         } else {
4759                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
4760                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
4761
4762                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
4763                      BDINFO_FLAGS_DISABLED);
4764
4765                 /* Setup replenish threshold. */
4766                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
4767
4768                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
4769                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
4770                              ((u64) tp->rx_jumbo_mapping >> 32));
4771                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
4772                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
4773                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
4774                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
4775                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
4776                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
4777                 } else {
4778                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
4779                              BDINFO_FLAGS_DISABLED);
4780                 }
4781
4782         }
4783
4784         /* There is only one send ring on 5705, no need to explicitly
4785          * disable the others.
4786          */
4787         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4788                 /* Clear out send RCB ring in SRAM. */
4789                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
4790                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
4791                                       BDINFO_FLAGS_DISABLED);
4792         }
4793
4794         tp->tx_prod = 0;
4795         tp->tx_cons = 0;
4796         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
4797         tw32_mailbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
4798         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
4799                 tr32(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW);
4800
4801         if (tp->tg3_flags & TG3_FLAG_HOST_TXDS) {
4802                 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
4803                                tp->tx_desc_mapping,
4804                                (TG3_TX_RING_SIZE <<
4805                                 BDINFO_FLAGS_MAXLEN_SHIFT),
4806                                NIC_SRAM_TX_BUFFER_DESC);
4807         } else {
4808                 tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
4809                                0,
4810                                BDINFO_FLAGS_DISABLED,
4811                                NIC_SRAM_TX_BUFFER_DESC);
4812         }
4813
4814         /* There is only one receive return ring on 5705, no need to explicitly
4815          * disable the others.
4816          */
4817         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4818                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
4819                      i += TG3_BDINFO_SIZE) {
4820                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
4821                                       BDINFO_FLAGS_DISABLED);
4822                 }
4823         }
4824
4825         tp->rx_rcb_ptr = 0;
4826         tw32_mailbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
4827         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
4828                 tr32(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW);
4829
4830         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
4831                        tp->rx_rcb_mapping,
4832                        (TG3_RX_RCB_RING_SIZE(tp) <<
4833                         BDINFO_FLAGS_MAXLEN_SHIFT),
4834                        0);
4835
4836         tp->rx_std_ptr = tp->rx_pending;
4837         tw32_mailbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
4838                      tp->rx_std_ptr);
4839         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
4840                 tr32(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW);
4841
4842         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)
4843                 tp->rx_jumbo_ptr = tp->rx_jumbo_pending;
4844         else
4845                 tp->rx_jumbo_ptr = 0;
4846         tw32_mailbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
4847                      tp->rx_jumbo_ptr);
4848         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
4849                 tr32(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW);
4850
4851         /* Initialize MAC address and backoff seed. */
4852         __tg3_set_mac_addr(tp);
4853
4854         /* MTU + ethernet header + FCS + optional VLAN tag */
4855         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
4856
4857         /* The slot time is changed by tg3_setup_phy if we
4858          * run at gigabit with half duplex.
4859          */
4860         tw32(MAC_TX_LENGTHS,
4861              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
4862              (6 << TX_LENGTHS_IPG_SHIFT) |
4863              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
4864
4865         /* Receive rules. */
4866         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
4867         tw32(RCVLPC_CONFIG, 0x0181);
4868
4869         /* Calculate RDMAC_MODE setting early, we need it to determine
4870          * the RCVLPC_STATE_ENABLE mask.
4871          */
4872         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
4873                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
4874                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
4875                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
4876                       RDMAC_MODE_LNGREAD_ENAB);
4877         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
4878                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
4879         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4880                 if (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
4881                         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
4882                                 rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
4883                         } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
4884                                    !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
4885                                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
4886                         }
4887                 }
4888         }
4889
4890         /* Receive/send statistics. */
4891         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
4892             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
4893                 val = tr32(RCVLPC_STATS_ENABLE);
4894                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
4895                 tw32(RCVLPC_STATS_ENABLE, val);
4896         } else {
4897                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
4898         }
4899         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
4900         tw32(SNDDATAI_STATSENAB, 0xffffff);
4901         tw32(SNDDATAI_STATSCTRL,
4902              (SNDDATAI_SCTRL_ENABLE |
4903               SNDDATAI_SCTRL_FASTUPD));
4904
4905         /* Setup host coalescing engine. */
4906         tw32(HOSTCC_MODE, 0);
4907         for (i = 0; i < 2000; i++) {
4908                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
4909                         break;
4910                 udelay(10);
4911         }
4912
4913         tw32(HOSTCC_RXCOL_TICKS, 0);
4914         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
4915         tw32(HOSTCC_RXMAX_FRAMES, 1);
4916         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
4917         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
4918                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
4919         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
4920                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
4921         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
4922         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
4923
4924         /* set status block DMA address */
4925         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
4926              ((u64) tp->status_mapping >> 32));
4927         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
4928              ((u64) tp->status_mapping & 0xffffffff));
4929
4930         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4931                 /* Status/statistics block address.  See tg3_timer,
4932                  * the tg3_periodic_fetch_stats call there, and
4933                  * tg3_get_stats to see how this works for 5705 chips.
4934                  */
4935                 tw32(HOSTCC_STAT_COAL_TICKS,
4936                      DEFAULT_STAT_COAL_TICKS);
4937                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
4938                      ((u64) tp->stats_mapping >> 32));
4939                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
4940                      ((u64) tp->stats_mapping & 0xffffffff));
4941                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
4942                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
4943         }
4944
4945         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
4946
4947         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
4948         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
4949         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
4950                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
4951
4952         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
4953                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
4954         tw32(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
4955         tr32(MAC_MODE);
4956         udelay(40);
4957
4958         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
4959         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
4960                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
4961                                        GRC_LCLCTRL_GPIO_OUTPUT1);
4962         tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
4963         tr32(GRC_LOCAL_CTRL);
4964         udelay(100);
4965
4966         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
4967         tr32(MAILBOX_INTERRUPT_0);
4968
4969         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
4970                 tw32(DMAC_MODE, DMAC_MODE_ENABLE);
4971                 tr32(DMAC_MODE);
4972                 udelay(40);
4973         }
4974
4975         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
4976                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
4977                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
4978                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
4979                WDMAC_MODE_LNGREAD_ENAB);
4980         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
4981             (tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) != 0 &&
4982             !(tp->tg3_flags2 & TG3_FLG2_IS_5788))
4983                 val |= WDMAC_MODE_RX_ACCEL;
4984         tw32(WDMAC_MODE, val);
4985         tr32(WDMAC_MODE);
4986         udelay(40);
4987
4988         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
4989                 val = tr32(TG3PCI_X_CAPS);
4990                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
4991                         val &= ~PCIX_CAPS_BURST_MASK;
4992                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
4993                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
4994                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
4995                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
4996                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
4997                                 val |= (tp->split_mode_max_reqs <<
4998                                         PCIX_CAPS_SPLIT_SHIFT);
4999                 }
5000                 tw32(TG3PCI_X_CAPS, val);
5001         }
5002
5003         tw32(RDMAC_MODE, rdmac_mode);
5004         tr32(RDMAC_MODE);
5005         udelay(40);
5006
5007         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5008         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705)
5009                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5010         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5011         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5012         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5013         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5014         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5015         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5016         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5017
5018         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5019                 err = tg3_load_5701_a0_firmware_fix(tp);
5020                 if (err)
5021                         return err;
5022         }
5023
5024 #if TG3_TSO_SUPPORT != 0
5025         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5026                 err = tg3_load_tso_firmware(tp);
5027                 if (err)
5028                         return err;
5029         }
5030 #endif
5031
5032         tp->tx_mode = TX_MODE_ENABLE;
5033         tw32(MAC_TX_MODE, tp->tx_mode);
5034         tr32(MAC_TX_MODE);
5035         udelay(100);
5036
5037         tp->rx_mode = RX_MODE_ENABLE;
5038         tw32(MAC_RX_MODE, tp->rx_mode);
5039         tr32(MAC_RX_MODE);
5040         udelay(10);
5041
5042         if (tp->link_config.phy_is_low_power) {
5043                 tp->link_config.phy_is_low_power = 0;
5044                 tp->link_config.speed = tp->link_config.orig_speed;
5045                 tp->link_config.duplex = tp->link_config.orig_duplex;
5046                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5047         }
5048
5049         tp->mi_mode = MAC_MI_MODE_BASE;
5050         tw32(MAC_MI_MODE, tp->mi_mode);
5051         tr32(MAC_MI_MODE);
5052         udelay(40);
5053
5054         tw32(MAC_LED_CTRL, 0);
5055         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5056         if (tp->phy_id == PHY_ID_SERDES) {
5057                 tw32(MAC_RX_MODE, RX_MODE_RESET);
5058                 tr32(MAC_RX_MODE);
5059                 udelay(10);
5060         }
5061         tw32(MAC_RX_MODE, tp->rx_mode);
5062         tr32(MAC_RX_MODE);
5063         udelay(10);
5064
5065         if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5066                 tw32(MAC_SERDES_CFG, 0x616000);
5067
5068         /* Prevent chip from dropping frames when flow control
5069          * is enabled.
5070          */
5071         tw32(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5072         tr32(MAC_LOW_WMARK_MAX_RX_FRAME);
5073
5074         err = tg3_setup_phy(tp);
5075         if (err)
5076                 return err;
5077
5078         if (tp->phy_id != PHY_ID_SERDES) {
5079                 u32 tmp;
5080
5081                 /* Clear CRC stats. */
5082                 tg3_readphy(tp, 0x1e, &tmp);
5083                 tg3_writephy(tp, 0x1e, tmp | 0x8000);
5084                 tg3_readphy(tp, 0x14, &tmp);
5085         }
5086
5087         __tg3_set_rx_mode(tp->dev);
5088
5089         /* Initialize receive rules. */
5090         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5091         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5092         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5093         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5094
5095         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
5096                 limit = 8;
5097         else
5098                 limit = 16;
5099         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5100                 limit -= 4;
5101         switch (limit) {
5102         case 16:
5103                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5104         case 15:
5105                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5106         case 14:
5107                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5108         case 13:
5109                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5110         case 12:
5111                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5112         case 11:
5113                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5114         case 10:
5115                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5116         case 9:
5117                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5118         case 8:
5119                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5120         case 7:
5121                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5122         case 6:
5123                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5124         case 5:
5125                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5126         case 4:
5127                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5128         case 3:
5129                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5130         case 2:
5131         case 1:
5132
5133         default:
5134                 break;
5135         };
5136
5137         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5138                 tg3_enable_ints(tp);
5139
5140         return 0;
5141 }
5142
5143 /* Called at device open time to get the chip ready for
5144  * packet processing.  Invoked with tp->lock held.
5145  */
5146 static int tg3_init_hw(struct tg3 *tp)
5147 {
5148         int err;
5149
5150         /* Force the chip into D0. */
5151         err = tg3_set_power_state(tp, 0);
5152         if (err)
5153                 goto out;
5154
5155         tg3_switch_clocks(tp);
5156
5157         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5158
5159         err = tg3_reset_hw(tp);
5160
5161 out:
5162         return err;
5163 }
5164
5165 #define TG3_STAT_ADD32(PSTAT, REG) \
5166 do {    u32 __val = tr32(REG); \
5167         (PSTAT)->low += __val; \
5168         if ((PSTAT)->low < __val) \
5169                 (PSTAT)->high += 1; \
5170 } while (0)
5171
5172 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5173 {
5174         struct tg3_hw_stats *sp = tp->hw_stats;
5175
5176         if (!netif_carrier_ok(tp->dev))
5177                 return;
5178
5179         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5180         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5181         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5182         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5183         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5184         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5185         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5186         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5187         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5188         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5189         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5190         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5191         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5192
5193         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5194         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5195         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5196         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5197         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5198         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5199         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5200         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5201         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5202         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5203         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5204         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5205         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5206         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5207 }
5208
5209 static void tg3_timer(unsigned long __opaque)
5210 {
5211         struct tg3 *tp = (struct tg3 *) __opaque;
5212         unsigned long flags;
5213
5214         spin_lock_irqsave(&tp->lock, flags);
5215         spin_lock(&tp->tx_lock);
5216
5217         /* All of this garbage is because when using non-tagged
5218          * IRQ status the mailbox/status_block protocol the chip
5219          * uses with the cpu is race prone.
5220          */
5221         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5222                 tw32(GRC_LOCAL_CTRL,
5223                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5224         } else {
5225                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5226                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5227         }
5228
5229         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5230                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5231                 spin_unlock(&tp->tx_lock);
5232                 spin_unlock_irqrestore(&tp->lock, flags);
5233                 schedule_work(&tp->reset_task);
5234                 return;
5235         }
5236
5237         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
5238                 tg3_periodic_fetch_stats(tp);
5239
5240         /* This part only runs once per second. */
5241         if (!--tp->timer_counter) {
5242                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5243                         u32 mac_stat;
5244                         int phy_event;
5245
5246                         mac_stat = tr32(MAC_STATUS);
5247
5248                         phy_event = 0;
5249                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5250                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5251                                         phy_event = 1;
5252                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5253                                 phy_event = 1;
5254
5255                         if (phy_event)
5256                                 tg3_setup_phy(tp);
5257                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5258                         u32 mac_stat = tr32(MAC_STATUS);
5259                         int need_setup = 0;
5260
5261                         if (netif_carrier_ok(tp->dev) &&
5262                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5263                                 need_setup = 1;
5264                         }
5265                         if (! netif_carrier_ok(tp->dev) &&
5266                             (mac_stat & MAC_STATUS_PCS_SYNCED)) {
5267                                 need_setup = 1;
5268                         }
5269                         if (need_setup) {
5270                                 tw32(MAC_MODE,
5271                                      (tp->mac_mode &
5272                                       ~MAC_MODE_PORT_MODE_MASK));
5273                                 tr32(MAC_MODE);
5274                                 udelay(40);
5275                                 tw32(MAC_MODE, tp->mac_mode);
5276                                 tr32(MAC_MODE);
5277                                 udelay(40);
5278                                 tg3_setup_phy(tp);
5279                         }
5280                 }
5281
5282                 tp->timer_counter = tp->timer_multiplier;
5283         }
5284
5285         /* Heartbeat is only sent once every 120 seconds.  */
5286         if (!--tp->asf_counter) {
5287                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5288                         u32 val;
5289
5290                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5291                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5292                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5293                         val = tr32(GRC_RX_CPU_EVENT);
5294                         val |= (1 << 14);
5295                         tw32(GRC_RX_CPU_EVENT, val);
5296                 }
5297                 tp->asf_counter = tp->asf_multiplier;
5298         }
5299
5300         spin_unlock(&tp->tx_lock);
5301         spin_unlock_irqrestore(&tp->lock, flags);
5302
5303         tp->timer.expires = jiffies + tp->timer_offset;
5304         add_timer(&tp->timer);
5305 }
5306
5307 static int tg3_open(struct net_device *dev)
5308 {
5309         struct tg3 *tp = dev->priv;
5310         int err;
5311
5312         spin_lock_irq(&tp->lock);
5313         spin_lock(&tp->tx_lock);
5314
5315         tg3_disable_ints(tp);
5316         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5317
5318         spin_unlock(&tp->tx_lock);
5319         spin_unlock_irq(&tp->lock);
5320
5321         /* If you move this call, make sure TG3_FLAG_HOST_TXDS in
5322          * tp->tg3_flags is accurate at that new place.
5323          */
5324         err = tg3_alloc_consistent(tp);
5325         if (err)
5326                 return err;
5327
5328         err = request_irq(dev->irq, tg3_interrupt,
5329                           SA_SHIRQ, dev->name, dev);
5330
5331         if (err) {
5332                 tg3_free_consistent(tp);
5333                 return err;
5334         }
5335
5336         spin_lock_irq(&tp->lock);
5337         spin_lock(&tp->tx_lock);
5338
5339         err = tg3_init_hw(tp);
5340         if (err) {
5341                 tg3_halt(tp);
5342                 tg3_free_rings(tp);
5343         } else {
5344                 tp->timer_offset = HZ / 10;
5345                 tp->timer_counter = tp->timer_multiplier = 10;
5346                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5347
5348                 init_timer(&tp->timer);
5349                 tp->timer.expires = jiffies + tp->timer_offset;
5350                 tp->timer.data = (unsigned long) tp;
5351                 tp->timer.function = tg3_timer;
5352                 add_timer(&tp->timer);
5353
5354                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5355         }
5356
5357         spin_unlock(&tp->tx_lock);
5358         spin_unlock_irq(&tp->lock);
5359
5360         if (err) {
5361                 free_irq(dev->irq, dev);
5362                 tg3_free_consistent(tp);
5363                 return err;
5364         }
5365
5366         spin_lock_irq(&tp->lock);
5367         spin_lock(&tp->tx_lock);
5368
5369         tg3_enable_ints(tp);
5370
5371         spin_unlock(&tp->tx_lock);
5372         spin_unlock_irq(&tp->lock);
5373
5374         netif_start_queue(dev);
5375
5376         return 0;
5377 }
5378
5379 #if 0
5380 /*static*/ void tg3_dump_state(struct tg3 *tp)
5381 {
5382         u32 val32, val32_2, val32_3, val32_4, val32_5;
5383         u16 val16;
5384         int i;
5385
5386         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5387         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5388         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5389                val16, val32);
5390
5391         /* MAC block */
5392         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5393                tr32(MAC_MODE), tr32(MAC_STATUS));
5394         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5395                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5396         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5397                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5398         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5399                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5400
5401         /* Send data initiator control block */
5402         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5403                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5404         printk("       SNDDATAI_STATSCTRL[%08x]\n",
5405                tr32(SNDDATAI_STATSCTRL));
5406
5407         /* Send data completion control block */
5408         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5409
5410         /* Send BD ring selector block */
5411         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5412                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5413
5414         /* Send BD initiator control block */
5415         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5416                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5417
5418         /* Send BD completion control block */
5419         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5420
5421         /* Receive list placement control block */
5422         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5423                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5424         printk("       RCVLPC_STATSCTRL[%08x]\n",
5425                tr32(RCVLPC_STATSCTRL));
5426
5427         /* Receive data and receive BD initiator control block */
5428         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5429                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5430
5431         /* Receive data completion control block */
5432         printk("DEBUG: RCVDCC_MODE[%08x]\n",
5433                tr32(RCVDCC_MODE));
5434
5435         /* Receive BD initiator control block */
5436         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5437                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5438
5439         /* Receive BD completion control block */
5440         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5441                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5442
5443         /* Receive list selector control block */
5444         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5445                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5446
5447         /* Mbuf cluster free block */
5448         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5449                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5450
5451         /* Host coalescing control block */
5452         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5453                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5454         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5455                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5456                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5457         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5458                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5459                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5460         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5461                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5462         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5463                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5464
5465         /* Memory arbiter control block */
5466         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5467                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5468
5469         /* Buffer manager control block */
5470         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5471                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5472         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5473                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5474         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5475                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5476                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5477                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5478
5479         /* Read DMA control block */
5480         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5481                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5482
5483         /* Write DMA control block */
5484         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5485                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5486
5487         /* DMA completion block */
5488         printk("DEBUG: DMAC_MODE[%08x]\n",
5489                tr32(DMAC_MODE));
5490
5491         /* GRC block */
5492         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5493                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5494         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5495                tr32(GRC_LOCAL_CTRL));
5496
5497         /* TG3_BDINFOs */
5498         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5499                tr32(RCVDBDI_JUMBO_BD + 0x0),
5500                tr32(RCVDBDI_JUMBO_BD + 0x4),
5501                tr32(RCVDBDI_JUMBO_BD + 0x8),
5502                tr32(RCVDBDI_JUMBO_BD + 0xc));
5503         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5504                tr32(RCVDBDI_STD_BD + 0x0),
5505                tr32(RCVDBDI_STD_BD + 0x4),
5506                tr32(RCVDBDI_STD_BD + 0x8),
5507                tr32(RCVDBDI_STD_BD + 0xc));
5508         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5509                tr32(RCVDBDI_MINI_BD + 0x0),
5510                tr32(RCVDBDI_MINI_BD + 0x4),
5511                tr32(RCVDBDI_MINI_BD + 0x8),
5512                tr32(RCVDBDI_MINI_BD + 0xc));
5513
5514         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5515         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5516         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5517         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5518         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5519                val32, val32_2, val32_3, val32_4);
5520
5521         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5522         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5523         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5524         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5525         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5526                val32, val32_2, val32_3, val32_4);
5527
5528         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5529         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5530         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5531         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5532         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5533         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5534                val32, val32_2, val32_3, val32_4, val32_5);
5535
5536         /* SW status block */
5537         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5538                tp->hw_status->status,
5539                tp->hw_status->status_tag,
5540                tp->hw_status->rx_jumbo_consumer,
5541                tp->hw_status->rx_consumer,
5542                tp->hw_status->rx_mini_consumer,
5543                tp->hw_status->idx[0].rx_producer,
5544                tp->hw_status->idx[0].tx_consumer);
5545
5546         /* SW statistics block */
5547         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5548                ((u32 *)tp->hw_stats)[0],
5549                ((u32 *)tp->hw_stats)[1],
5550                ((u32 *)tp->hw_stats)[2],
5551                ((u32 *)tp->hw_stats)[3]);
5552
5553         /* Mailboxes */
5554         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5555                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5556                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5557                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5558                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5559
5560         /* NIC side send descriptors. */
5561         for (i = 0; i < 6; i++) {
5562                 unsigned long txd;
5563
5564                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5565                         + (i * sizeof(struct tg3_tx_buffer_desc));
5566                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5567                        i,
5568                        readl(txd + 0x0), readl(txd + 0x4),
5569                        readl(txd + 0x8), readl(txd + 0xc));
5570         }
5571
5572         /* NIC side RX descriptors. */
5573         for (i = 0; i < 6; i++) {
5574                 unsigned long rxd;
5575
5576                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5577                         + (i * sizeof(struct tg3_rx_buffer_desc));
5578                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5579                        i,
5580                        readl(rxd + 0x0), readl(rxd + 0x4),
5581                        readl(rxd + 0x8), readl(rxd + 0xc));
5582                 rxd += (4 * sizeof(u32));
5583                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5584                        i,
5585                        readl(rxd + 0x0), readl(rxd + 0x4),
5586                        readl(rxd + 0x8), readl(rxd + 0xc));
5587         }
5588
5589         for (i = 0; i < 6; i++) {
5590                 unsigned long rxd;
5591
5592                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
5593                         + (i * sizeof(struct tg3_rx_buffer_desc));
5594                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
5595                        i,
5596                        readl(rxd + 0x0), readl(rxd + 0x4),
5597                        readl(rxd + 0x8), readl(rxd + 0xc));
5598                 rxd += (4 * sizeof(u32));
5599                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
5600                        i,
5601                        readl(rxd + 0x0), readl(rxd + 0x4),
5602                        readl(rxd + 0x8), readl(rxd + 0xc));
5603         }
5604 }
5605 #endif
5606
5607 static struct net_device_stats *tg3_get_stats(struct net_device *);
5608
5609 static int tg3_close(struct net_device *dev)
5610 {
5611         struct tg3 *tp = dev->priv;
5612
5613         netif_stop_queue(dev);
5614
5615         del_timer_sync(&tp->timer);
5616
5617         spin_lock_irq(&tp->lock);
5618         spin_lock(&tp->tx_lock);
5619 #if 0
5620         tg3_dump_state(tp);
5621 #endif
5622
5623         tg3_disable_ints(tp);
5624
5625         tg3_halt(tp);
5626         tg3_free_rings(tp);
5627         tp->tg3_flags &=
5628                 ~(TG3_FLAG_INIT_COMPLETE |
5629                   TG3_FLAG_GOT_SERDES_FLOWCTL);
5630         netif_carrier_off(tp->dev);
5631
5632         spin_unlock(&tp->tx_lock);
5633         spin_unlock_irq(&tp->lock);
5634
5635         free_irq(dev->irq, dev);
5636
5637         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
5638                sizeof(tp->net_stats_prev));
5639
5640         tg3_free_consistent(tp);
5641
5642         return 0;
5643 }
5644
5645 static inline unsigned long get_stat64(tg3_stat64_t *val)
5646 {
5647         unsigned long ret;
5648
5649 #if (BITS_PER_LONG == 32)
5650         ret = val->low;
5651 #else
5652         ret = ((u64)val->high << 32) | ((u64)val->low);
5653 #endif
5654         return ret;
5655 }
5656
5657 static unsigned long calc_crc_errors(struct tg3 *tp)
5658 {
5659         struct tg3_hw_stats *hw_stats = tp->hw_stats;
5660
5661         if (tp->phy_id != PHY_ID_SERDES &&
5662             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
5663              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
5664                 unsigned long flags;
5665                 u32 val;
5666
5667                 spin_lock_irqsave(&tp->lock, flags);
5668                 tg3_readphy(tp, 0x1e, &val);
5669                 tg3_writephy(tp, 0x1e, val | 0x8000);
5670                 tg3_readphy(tp, 0x14, &val);
5671                 spin_unlock_irqrestore(&tp->lock, flags);
5672
5673                 tp->phy_crc_errors += val;
5674
5675                 return tp->phy_crc_errors;
5676         }
5677
5678         return get_stat64(&hw_stats->rx_fcs_errors);
5679 }
5680
5681 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
5682 {
5683         struct tg3 *tp = dev->priv;
5684         struct net_device_stats *stats = &tp->net_stats;
5685         struct net_device_stats *old_stats = &tp->net_stats_prev;
5686         struct tg3_hw_stats *hw_stats = tp->hw_stats;
5687
5688         if (!hw_stats)
5689                 return old_stats;
5690
5691         stats->rx_packets = old_stats->rx_packets +
5692                 get_stat64(&hw_stats->rx_ucast_packets) +
5693                 get_stat64(&hw_stats->rx_mcast_packets) +
5694                 get_stat64(&hw_stats->rx_bcast_packets);
5695                 
5696         stats->tx_packets = old_stats->tx_packets +
5697                 get_stat64(&hw_stats->tx_ucast_packets) +
5698                 get_stat64(&hw_stats->tx_mcast_packets) +
5699                 get_stat64(&hw_stats->tx_bcast_packets);
5700
5701         stats->rx_bytes = old_stats->rx_bytes +
5702                 get_stat64(&hw_stats->rx_octets);
5703         stats->tx_bytes = old_stats->tx_bytes +
5704                 get_stat64(&hw_stats->tx_octets);
5705
5706         stats->rx_errors = old_stats->rx_errors +
5707                 get_stat64(&hw_stats->rx_errors);
5708         stats->tx_errors = old_stats->tx_errors +
5709                 get_stat64(&hw_stats->tx_errors) +
5710                 get_stat64(&hw_stats->tx_mac_errors) +
5711                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
5712                 get_stat64(&hw_stats->tx_discards);
5713
5714         stats->multicast = old_stats->multicast +
5715                 get_stat64(&hw_stats->rx_mcast_packets);
5716         stats->collisions = old_stats->collisions +
5717                 get_stat64(&hw_stats->tx_collisions);
5718
5719         stats->rx_length_errors = old_stats->rx_length_errors +
5720                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
5721                 get_stat64(&hw_stats->rx_undersize_packets);
5722
5723         stats->rx_over_errors = old_stats->rx_over_errors +
5724                 get_stat64(&hw_stats->rxbds_empty);
5725         stats->rx_frame_errors = old_stats->rx_frame_errors +
5726                 get_stat64(&hw_stats->rx_align_errors);
5727         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
5728                 get_stat64(&hw_stats->tx_discards);
5729         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
5730                 get_stat64(&hw_stats->tx_carrier_sense_errors);
5731
5732         stats->rx_crc_errors = old_stats->rx_crc_errors +
5733                 calc_crc_errors(tp);
5734
5735         return stats;
5736 }
5737
5738 static inline u32 calc_crc(unsigned char *buf, int len)
5739 {
5740         u32 reg;
5741         u32 tmp;
5742         int j, k;
5743
5744         reg = 0xffffffff;
5745
5746         for (j = 0; j < len; j++) {
5747                 reg ^= buf[j];
5748
5749                 for (k = 0; k < 8; k++) {
5750                         tmp = reg & 0x01;
5751
5752                         reg >>= 1;
5753
5754                         if (tmp) {
5755                                 reg ^= 0xedb88320;
5756                         }
5757                 }
5758         }
5759
5760         return ~reg;
5761 }
5762
5763 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
5764 {
5765         /* accept or reject all multicast frames */
5766         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
5767         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
5768         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
5769         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
5770 }
5771
5772 static void __tg3_set_rx_mode(struct net_device *dev)
5773 {
5774         struct tg3 *tp = dev->priv;
5775         u32 rx_mode;
5776
5777         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
5778                                   RX_MODE_KEEP_VLAN_TAG);
5779
5780         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
5781          * flag clear.
5782          */
5783 #if TG3_VLAN_TAG_USED
5784         if (!tp->vlgrp &&
5785             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
5786                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
5787 #else
5788         /* By definition, VLAN is disabled always in this
5789          * case.
5790          */
5791         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
5792                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
5793 #endif
5794
5795         if (dev->flags & IFF_PROMISC) {
5796                 /* Promiscuous mode. */
5797                 rx_mode |= RX_MODE_PROMISC;
5798         } else if (dev->flags & IFF_ALLMULTI) {
5799                 /* Accept all multicast. */
5800                 tg3_set_multi (tp, 1);
5801         } else if (dev->mc_count < 1) {
5802                 /* Reject all multicast. */
5803                 tg3_set_multi (tp, 0);
5804         } else {
5805                 /* Accept one or more multicast(s). */
5806                 struct dev_mc_list *mclist;
5807                 unsigned int i;
5808                 u32 mc_filter[4] = { 0, };
5809                 u32 regidx;
5810                 u32 bit;
5811                 u32 crc;
5812
5813                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
5814                      i++, mclist = mclist->next) {
5815
5816                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
5817                         bit = ~crc & 0x7f;
5818                         regidx = (bit & 0x60) >> 5;
5819                         bit &= 0x1f;
5820                         mc_filter[regidx] |= (1 << bit);
5821                 }
5822
5823                 tw32(MAC_HASH_REG_0, mc_filter[0]);
5824                 tw32(MAC_HASH_REG_1, mc_filter[1]);
5825                 tw32(MAC_HASH_REG_2, mc_filter[2]);
5826                 tw32(MAC_HASH_REG_3, mc_filter[3]);
5827         }
5828
5829         if (rx_mode != tp->rx_mode) {
5830                 tp->rx_mode = rx_mode;
5831                 tw32(MAC_RX_MODE, rx_mode);
5832                 tr32(MAC_RX_MODE);
5833                 udelay(10);
5834         }
5835 }
5836
5837 static void tg3_set_rx_mode(struct net_device *dev)
5838 {
5839         struct tg3 *tp = dev->priv;
5840
5841         spin_lock_irq(&tp->lock);
5842         __tg3_set_rx_mode(dev);
5843         spin_unlock_irq(&tp->lock);
5844 }
5845
5846 #define TG3_REGDUMP_LEN         (32 * 1024)
5847
5848 static int tg3_get_regs_len(struct net_device *dev)
5849 {
5850         return TG3_REGDUMP_LEN;
5851 }
5852
5853 static void tg3_get_regs(struct net_device *dev,
5854                 struct ethtool_regs *regs, void *_p)
5855 {
5856         u32 *p = _p;
5857         struct tg3 *tp = dev->priv;
5858         u8 *orig_p = _p;
5859         int i;
5860
5861         regs->version = 0;
5862
5863         memset(p, 0, TG3_REGDUMP_LEN);
5864
5865         spin_lock_irq(&tp->lock);
5866         spin_lock(&tp->tx_lock);
5867
5868 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
5869 #define GET_REG32_LOOP(base,len)                \
5870 do {    p = (u32 *)(orig_p + (base));           \
5871         for (i = 0; i < len; i += 4)            \
5872                 __GET_REG32((base) + i);        \
5873 } while (0)
5874 #define GET_REG32_1(reg)                        \
5875 do {    p = (u32 *)(orig_p + (reg));            \
5876         __GET_REG32((reg));                     \
5877 } while (0)
5878
5879         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
5880         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
5881         GET_REG32_LOOP(MAC_MODE, 0x4f0);
5882         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
5883         GET_REG32_1(SNDDATAC_MODE);
5884         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
5885         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
5886         GET_REG32_1(SNDBDC_MODE);
5887         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
5888         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
5889         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
5890         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
5891         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
5892         GET_REG32_1(RCVDCC_MODE);
5893         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
5894         GET_REG32_LOOP(RCVCC_MODE, 0x14);
5895         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
5896         GET_REG32_1(MBFREE_MODE);
5897         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
5898         GET_REG32_LOOP(MEMARB_MODE, 0x10);
5899         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
5900         GET_REG32_LOOP(RDMAC_MODE, 0x08);
5901         GET_REG32_LOOP(WDMAC_MODE, 0x08);
5902         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
5903         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
5904         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
5905         GET_REG32_LOOP(FTQ_RESET, 0x120);
5906         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
5907         GET_REG32_1(DMAC_MODE);
5908         GET_REG32_LOOP(GRC_MODE, 0x4c);
5909         if (tp->tg3_flags & TG3_FLAG_NVRAM)
5910                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
5911
5912 #undef __GET_REG32
5913 #undef GET_REG32_LOOP
5914 #undef GET_REG32_1
5915
5916         spin_unlock(&tp->tx_lock);
5917         spin_unlock_irq(&tp->lock);
5918 }
5919
5920 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5921 {
5922         struct tg3 *tp = dev->priv;
5923   
5924         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
5925                                         tp->link_config.phy_is_low_power)
5926                 return -EAGAIN;
5927
5928         cmd->supported = (SUPPORTED_Autoneg);
5929
5930         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
5931                 cmd->supported |= (SUPPORTED_1000baseT_Half |
5932                                    SUPPORTED_1000baseT_Full);
5933
5934         if (tp->phy_id != PHY_ID_SERDES)
5935                 cmd->supported |= (SUPPORTED_100baseT_Half |
5936                                   SUPPORTED_100baseT_Full |
5937                                   SUPPORTED_10baseT_Half |
5938                                   SUPPORTED_10baseT_Full |
5939                                   SUPPORTED_MII);
5940         else
5941                 cmd->supported |= SUPPORTED_FIBRE;
5942   
5943         cmd->advertising = tp->link_config.advertising;
5944         cmd->speed = tp->link_config.active_speed;
5945         cmd->duplex = tp->link_config.active_duplex;
5946         cmd->port = 0;
5947         cmd->phy_address = PHY_ADDR;
5948         cmd->transceiver = 0;
5949         cmd->autoneg = tp->link_config.autoneg;
5950         cmd->maxtxpkt = 0;
5951         cmd->maxrxpkt = 0;
5952         return 0;
5953 }
5954   
5955 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
5956 {
5957         struct tg3 *tp = dev->priv;
5958   
5959         if (!(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) ||
5960                                         tp->link_config.phy_is_low_power)
5961                 return -EAGAIN;
5962
5963         spin_lock_irq(&tp->lock);
5964         spin_lock(&tp->tx_lock);
5965
5966         tp->link_config.autoneg = cmd->autoneg;
5967         if (cmd->autoneg == AUTONEG_ENABLE) {
5968                 tp->link_config.advertising = cmd->advertising;
5969                 tp->link_config.speed = SPEED_INVALID;
5970                 tp->link_config.duplex = DUPLEX_INVALID;
5971         } else {
5972                 tp->link_config.speed = cmd->speed;
5973                 tp->link_config.duplex = cmd->duplex;
5974         }
5975   
5976         tg3_setup_phy(tp);
5977         spin_unlock(&tp->tx_lock);
5978         spin_unlock_irq(&tp->lock);
5979   
5980         return 0;
5981 }
5982   
5983 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
5984 {
5985         struct tg3 *tp = dev->priv;
5986   
5987         strcpy(info->driver, DRV_MODULE_NAME);
5988         strcpy(info->version, DRV_MODULE_VERSION);
5989         strcpy(info->bus_info, pci_name(tp->pdev));
5990 }
5991   
5992 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
5993 {
5994         struct tg3 *tp = dev->priv;
5995   
5996         wol->supported = WAKE_MAGIC;
5997         wol->wolopts = 0;
5998         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
5999                 wol->wolopts = WAKE_MAGIC;
6000         memset(&wol->sopass, 0, sizeof(wol->sopass));
6001 }
6002   
6003 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6004 {
6005         struct tg3 *tp = dev->priv;
6006   
6007         if (wol->wolopts & ~WAKE_MAGIC)
6008                 return -EINVAL;
6009         if ((wol->wolopts & WAKE_MAGIC) &&
6010             tp->phy_id == PHY_ID_SERDES &&
6011             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6012                 return -EINVAL;
6013   
6014         spin_lock_irq(&tp->lock);
6015         if (wol->wolopts & WAKE_MAGIC)
6016                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6017         else
6018                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6019         spin_unlock_irq(&tp->lock);
6020   
6021         return 0;
6022 }
6023   
6024 static u32 tg3_get_msglevel(struct net_device *dev)
6025 {
6026         struct tg3 *tp = dev->priv;
6027         return tp->msg_enable;
6028 }
6029   
6030 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6031 {
6032         struct tg3 *tp = dev->priv;
6033         tp->msg_enable = value;
6034 }
6035   
6036 #if TG3_TSO_SUPPORT != 0
6037 static int tg3_set_tso(struct net_device *dev, u32 value)
6038 {
6039         struct tg3 *tp = dev->priv;
6040
6041         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6042                 if (value)
6043                         return -EINVAL;
6044                 return 0;
6045         }
6046         return ethtool_op_set_tso(dev, value);
6047 }
6048 #endif
6049   
6050 static int tg3_nway_reset(struct net_device *dev)
6051 {
6052         struct tg3 *tp = dev->priv;
6053         u32 bmcr;
6054         int r;
6055   
6056         spin_lock_irq(&tp->lock);
6057         tg3_readphy(tp, MII_BMCR, &bmcr);
6058         tg3_readphy(tp, MII_BMCR, &bmcr);
6059         r = -EINVAL;
6060         if (bmcr & BMCR_ANENABLE) {
6061                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6062                 r = 0;
6063         }
6064         spin_unlock_irq(&tp->lock);
6065   
6066         return r;
6067 }
6068   
6069 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6070 {
6071         struct tg3 *tp = dev->priv;
6072   
6073         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6074         ering->rx_mini_max_pending = 0;
6075         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6076
6077         ering->rx_pending = tp->rx_pending;
6078         ering->rx_mini_pending = 0;
6079         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6080         ering->tx_pending = tp->tx_pending;
6081 }
6082   
6083 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6084 {
6085         struct tg3 *tp = dev->priv;
6086   
6087         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6088             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6089             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6090                 return -EINVAL;
6091   
6092         tg3_netif_stop(tp);
6093         spin_lock_irq(&tp->lock);
6094         spin_lock(&tp->tx_lock);
6095   
6096         tp->rx_pending = ering->rx_pending;
6097
6098         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6099             tp->rx_pending > 63)
6100                 tp->rx_pending = 63;
6101         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6102         tp->tx_pending = ering->tx_pending;
6103
6104         tg3_halt(tp);
6105         tg3_init_hw(tp);
6106         netif_wake_queue(tp->dev);
6107         spin_unlock(&tp->tx_lock);
6108         spin_unlock_irq(&tp->lock);
6109         tg3_netif_start(tp);
6110   
6111         return 0;
6112 }
6113   
6114 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6115 {
6116         struct tg3 *tp = dev->priv;
6117   
6118         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6119         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_PAUSE_RX) != 0;
6120         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_PAUSE_TX) != 0;
6121 }
6122   
6123 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6124 {
6125         struct tg3 *tp = dev->priv;
6126   
6127         tg3_netif_stop(tp);
6128         spin_lock_irq(&tp->lock);
6129         spin_lock(&tp->tx_lock);
6130         if (epause->autoneg)
6131                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6132         else
6133                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6134         if (epause->rx_pause)
6135                 tp->tg3_flags |= TG3_FLAG_PAUSE_RX;
6136         else
6137                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_RX;
6138         if (epause->tx_pause)
6139                 tp->tg3_flags |= TG3_FLAG_PAUSE_TX;
6140         else
6141                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_TX;
6142         tg3_halt(tp);
6143         tg3_init_hw(tp);
6144         spin_unlock(&tp->tx_lock);
6145         spin_unlock_irq(&tp->lock);
6146         tg3_netif_start(tp);
6147   
6148         return 0;
6149 }
6150   
6151 static u32 tg3_get_rx_csum(struct net_device *dev)
6152 {
6153         struct tg3 *tp = dev->priv;
6154         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6155 }
6156   
6157 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6158 {
6159         struct tg3 *tp = dev->priv;
6160   
6161         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6162                 if (data != 0)
6163                         return -EINVAL;
6164                 return 0;
6165         }
6166   
6167         spin_lock_irq(&tp->lock);
6168         if (data)
6169                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6170         else
6171                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6172         spin_unlock_irq(&tp->lock);
6173   
6174         return 0;
6175 }
6176   
6177 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6178 {
6179         struct tg3 *tp = dev->priv;
6180   
6181         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6182                 if (data != 0)
6183                         return -EINVAL;
6184                 return 0;
6185         }
6186   
6187         if (data)
6188                 dev->features |= NETIF_F_IP_CSUM;
6189         else
6190                 dev->features &= ~NETIF_F_IP_CSUM;
6191
6192         return 0;
6193 }
6194   
6195 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6196 {
6197         struct mii_ioctl_data *data = (struct mii_ioctl_data *)&ifr->ifr_data;
6198         struct tg3 *tp = dev->priv;
6199         int err;
6200
6201         switch(cmd) {
6202         case SIOCGMIIPHY:
6203                 data->phy_id = PHY_ADDR;
6204
6205                 /* fallthru */
6206         case SIOCGMIIREG: {
6207                 u32 mii_regval;
6208
6209                 spin_lock_irq(&tp->lock);
6210                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6211                 spin_unlock_irq(&tp->lock);
6212
6213                 data->val_out = mii_regval;
6214
6215                 return err;
6216         }
6217
6218         case SIOCSMIIREG:
6219                 if (!capable(CAP_NET_ADMIN))
6220                         return -EPERM;
6221
6222                 spin_lock_irq(&tp->lock);
6223                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6224                 spin_unlock_irq(&tp->lock);
6225
6226                 return err;
6227
6228         default:
6229                 /* do nothing */
6230                 break;
6231         }
6232         return -EOPNOTSUPP;
6233 }
6234
6235 #if TG3_VLAN_TAG_USED
6236 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6237 {
6238         struct tg3 *tp = dev->priv;
6239
6240         spin_lock_irq(&tp->lock);
6241         spin_lock(&tp->tx_lock);
6242
6243         tp->vlgrp = grp;
6244
6245         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6246         __tg3_set_rx_mode(dev);
6247
6248         spin_unlock(&tp->tx_lock);
6249         spin_unlock_irq(&tp->lock);
6250 }
6251
6252 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6253 {
6254         struct tg3 *tp = dev->priv;
6255
6256         spin_lock_irq(&tp->lock);
6257         spin_lock(&tp->tx_lock);
6258         if (tp->vlgrp)
6259                 tp->vlgrp->vlan_devices[vid] = NULL;
6260         spin_unlock(&tp->tx_lock);
6261         spin_unlock_irq(&tp->lock);
6262 }
6263 #endif
6264
6265 static struct ethtool_ops tg3_ethtool_ops = {
6266         .get_settings           = tg3_get_settings,
6267         .set_settings           = tg3_set_settings,
6268         .get_drvinfo            = tg3_get_drvinfo,
6269         .get_regs_len           = tg3_get_regs_len,
6270         .get_regs               = tg3_get_regs,
6271         .get_wol                = tg3_get_wol,
6272         .set_wol                = tg3_set_wol,
6273         .get_msglevel           = tg3_get_msglevel,
6274         .set_msglevel           = tg3_set_msglevel,
6275         .nway_reset             = tg3_nway_reset,
6276         .get_link               = ethtool_op_get_link,
6277         .get_ringparam          = tg3_get_ringparam,
6278         .set_ringparam          = tg3_set_ringparam,
6279         .get_pauseparam         = tg3_get_pauseparam,
6280         .set_pauseparam         = tg3_set_pauseparam,
6281         .get_rx_csum            = tg3_get_rx_csum,
6282         .set_rx_csum            = tg3_set_rx_csum,
6283         .get_tx_csum            = ethtool_op_get_tx_csum,
6284         .set_tx_csum            = tg3_set_tx_csum,
6285         .get_sg                 = ethtool_op_get_sg,
6286         .set_sg                 = ethtool_op_set_sg,
6287 #if TG3_TSO_SUPPORT != 0
6288         .get_tso                = ethtool_op_get_tso,
6289         .set_tso                = tg3_set_tso,
6290 #endif
6291 };
6292
6293 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
6294 static void __devinit tg3_nvram_init(struct tg3 *tp)
6295 {
6296         int j;
6297
6298         if (tp->tg3_flags2 & TG3_FLG2_SUN_5704)
6299                 return;
6300
6301         tw32(GRC_EEPROM_ADDR,
6302              (EEPROM_ADDR_FSM_RESET |
6303               (EEPROM_DEFAULT_CLOCK_PERIOD <<
6304                EEPROM_ADDR_CLKPERD_SHIFT)));
6305
6306         /* XXX schedule_timeout() ... */
6307         for (j = 0; j < 100; j++)
6308                 udelay(10);
6309
6310         /* Enable seeprom accesses. */
6311         tw32(GRC_LOCAL_CTRL,
6312              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
6313         tr32(GRC_LOCAL_CTRL);
6314         udelay(100);
6315
6316         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
6317             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
6318                 u32 nvcfg1 = tr32(NVRAM_CFG1);
6319
6320                 tp->tg3_flags |= TG3_FLAG_NVRAM;
6321                 if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
6322                         if (nvcfg1 & NVRAM_CFG1_BUFFERED_MODE)
6323                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
6324                 } else {
6325                         nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
6326                         tw32(NVRAM_CFG1, nvcfg1);
6327                 }
6328
6329         } else {
6330                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
6331         }
6332 }
6333
6334 static int __devinit tg3_nvram_read_using_eeprom(struct tg3 *tp,
6335                                                  u32 offset, u32 *val)
6336 {
6337         u32 tmp;
6338         int i;
6339
6340         if (offset > EEPROM_ADDR_ADDR_MASK ||
6341             (offset % 4) != 0)
6342                 return -EINVAL;
6343
6344         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
6345                                         EEPROM_ADDR_DEVID_MASK |
6346                                         EEPROM_ADDR_READ);
6347         tw32(GRC_EEPROM_ADDR,
6348              tmp |
6349              (0 << EEPROM_ADDR_DEVID_SHIFT) |
6350              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
6351               EEPROM_ADDR_ADDR_MASK) |
6352              EEPROM_ADDR_READ | EEPROM_ADDR_START);
6353
6354         for (i = 0; i < 10000; i++) {
6355                 tmp = tr32(GRC_EEPROM_ADDR);
6356
6357                 if (tmp & EEPROM_ADDR_COMPLETE)
6358                         break;
6359                 udelay(100);
6360         }
6361         if (!(tmp & EEPROM_ADDR_COMPLETE))
6362                 return -EBUSY;
6363
6364         *val = tr32(GRC_EEPROM_DATA);
6365         return 0;
6366 }
6367
6368 static int __devinit tg3_nvram_read(struct tg3 *tp,
6369                                     u32 offset, u32 *val)
6370 {
6371         int i, saw_done_clear;
6372
6373         if (tp->tg3_flags2 & TG3_FLG2_SUN_5704) {
6374                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 5704\n");
6375                 return -EINVAL;
6376         }
6377
6378         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
6379                 return tg3_nvram_read_using_eeprom(tp, offset, val);
6380
6381         if (tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED)
6382                 offset = ((offset / NVRAM_BUFFERED_PAGE_SIZE) <<
6383                           NVRAM_BUFFERED_PAGE_POS) +
6384                         (offset % NVRAM_BUFFERED_PAGE_SIZE);
6385
6386         if (offset > NVRAM_ADDR_MSK)
6387                 return -EINVAL;
6388
6389         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
6390         for (i = 0; i < 1000; i++) {
6391                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
6392                         break;
6393                 udelay(20);
6394         }
6395
6396         tw32(NVRAM_ADDR, offset);
6397         tw32(NVRAM_CMD,
6398              NVRAM_CMD_RD | NVRAM_CMD_GO |
6399              NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
6400
6401         /* Wait for done bit to clear then set again. */
6402         saw_done_clear = 0;
6403         for (i = 0; i < 1000; i++) {
6404                 udelay(10);
6405                 if (!saw_done_clear &&
6406                     !(tr32(NVRAM_CMD) & NVRAM_CMD_DONE))
6407                         saw_done_clear = 1;
6408                 else if (saw_done_clear &&
6409                          (tr32(NVRAM_CMD) & NVRAM_CMD_DONE))
6410                         break;
6411         }
6412         if (i >= 1000) {
6413                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
6414                 return -EBUSY;
6415         }
6416
6417         *val = swab32(tr32(NVRAM_RDDATA));
6418         tw32(NVRAM_SWARB, 0x20);
6419
6420         return 0;
6421 }
6422
6423 struct subsys_tbl_ent {
6424         u16 subsys_vendor, subsys_devid;
6425         u32 phy_id;
6426 };
6427
6428 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
6429         /* Broadcom boards. */
6430         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
6431         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
6432         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
6433         { PCI_VENDOR_ID_BROADCOM, 0x0003, PHY_ID_SERDES  }, /* BCM95700A9 */
6434         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
6435         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
6436         { PCI_VENDOR_ID_BROADCOM, 0x0007, PHY_ID_SERDES  }, /* BCM95701A7 */
6437         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
6438         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
6439         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5701 }, /* BCM95703Ax1 */
6440         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5701 }, /* BCM95703Ax2 */
6441
6442         /* 3com boards. */
6443         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
6444         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
6445         { PCI_VENDOR_ID_3COM, 0x1004, PHY_ID_SERDES  }, /* 3C996SX */
6446         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
6447         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
6448
6449         /* DELL boards. */
6450         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
6451         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
6452         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
6453         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
6454
6455         /* Compaq boards. */
6456         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
6457         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
6458         { PCI_VENDOR_ID_COMPAQ, 0x007d, PHY_ID_SERDES  }, /* CHANGELING */
6459         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
6460         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
6461
6462         /* IBM boards. */
6463         { PCI_VENDOR_ID_IBM, 0x0281, PHY_ID_SERDES } /* IBM??? */
6464 };
6465
6466 static int __devinit tg3_phy_probe(struct tg3 *tp)
6467 {
6468         u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
6469         u32 hw_phy_id, hw_phy_id_masked;
6470         enum phy_led_mode eeprom_led_mode;
6471         u32 val;
6472         int i, eeprom_signature_found, err;
6473
6474         tp->phy_id = PHY_ID_INVALID;
6475         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
6476                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
6477                      tp->pdev->subsystem_vendor) &&
6478                     (subsys_id_to_phy_id[i].subsys_devid ==
6479                      tp->pdev->subsystem_device)) {
6480                         tp->phy_id = subsys_id_to_phy_id[i].phy_id;
6481                         break;
6482                 }
6483         }
6484
6485         eeprom_phy_id = PHY_ID_INVALID;
6486         eeprom_led_mode = led_mode_auto;
6487         eeprom_signature_found = 0;
6488         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
6489         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
6490                 u32 nic_cfg;
6491
6492                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
6493                 tp->nic_sram_data_cfg = nic_cfg;
6494
6495                 eeprom_signature_found = 1;
6496
6497                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
6498                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER) {
6499                         eeprom_phy_id = PHY_ID_SERDES;
6500                 } else {
6501                         u32 nic_phy_id;
6502
6503                         tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
6504                         if (nic_phy_id != 0) {
6505                                 u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
6506                                 u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
6507
6508                                 eeprom_phy_id  = (id1 >> 16) << 10;
6509                                 eeprom_phy_id |= (id2 & 0xfc00) << 16;
6510                                 eeprom_phy_id |= (id2 & 0x03ff) <<  0;
6511                         }
6512                 }
6513
6514                 switch (nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK) {
6515                 case NIC_SRAM_DATA_CFG_LED_TRIPLE_SPD:
6516                         eeprom_led_mode = led_mode_three_link;
6517                         break;
6518
6519                 case NIC_SRAM_DATA_CFG_LED_LINK_SPD:
6520                         eeprom_led_mode = led_mode_link10;
6521                         break;
6522
6523                 default:
6524                         eeprom_led_mode = led_mode_auto;
6525                         break;
6526                 };
6527
6528                 if (((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) ||
6529                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) ||
6530                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) &&
6531                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
6532                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
6533
6534                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE)
6535                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
6536                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
6537                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
6538         }
6539
6540         /* Now read the physical PHY_ID from the chip and verify
6541          * that it is sane.  If it doesn't look good, we fall back
6542          * to either the hard-coded table based PHY_ID and failing
6543          * that the value found in the eeprom area.
6544          */
6545         err  = tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
6546         err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
6547
6548         hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
6549         hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
6550         hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
6551
6552         hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
6553
6554         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
6555                 tp->phy_id = hw_phy_id;
6556         } else {
6557                 /* phy_id currently holds the value found in the
6558                  * subsys_id_to_phy_id[] table or PHY_ID_INVALID
6559                  * if a match was not found there.
6560                  */
6561                 if (tp->phy_id == PHY_ID_INVALID) {
6562                         if (!eeprom_signature_found ||
6563                             !KNOWN_PHY_ID(eeprom_phy_id & PHY_ID_MASK))
6564                                 return -ENODEV;
6565                         tp->phy_id = eeprom_phy_id;
6566                 }
6567         }
6568
6569         err = tg3_phy_reset(tp, 1);
6570         if (err)
6571                 return err;
6572
6573         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
6574             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
6575                 u32 mii_tg3_ctrl;
6576                 
6577                 /* These chips, when reset, only advertise 10Mb
6578                  * capabilities.  Fix that.
6579                  */
6580                 err  = tg3_writephy(tp, MII_ADVERTISE,
6581                                     (ADVERTISE_CSMA |
6582                                      ADVERTISE_PAUSE_CAP |
6583                                      ADVERTISE_10HALF |
6584                                      ADVERTISE_10FULL |
6585                                      ADVERTISE_100HALF |
6586                                      ADVERTISE_100FULL));
6587                 mii_tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
6588                                 MII_TG3_CTRL_ADV_1000_FULL |
6589                                 MII_TG3_CTRL_AS_MASTER |
6590                                 MII_TG3_CTRL_ENABLE_AS_MASTER);
6591                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
6592                         mii_tg3_ctrl = 0;
6593
6594                 err |= tg3_writephy(tp, MII_TG3_CTRL, mii_tg3_ctrl);
6595                 err |= tg3_writephy(tp, MII_BMCR,
6596                                     (BMCR_ANRESTART | BMCR_ANENABLE));
6597         }
6598
6599         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
6600                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
6601                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
6602                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
6603         }
6604
6605         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
6606             (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)) {
6607                 tg3_writephy(tp, 0x1c, 0x8d68);
6608                 tg3_writephy(tp, 0x1c, 0x8d68);
6609         }
6610
6611         /* Enable Ethernet@WireSpeed */
6612         tg3_phy_set_wirespeed(tp);
6613
6614         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
6615                 err = tg3_init_5401phy_dsp(tp);
6616         }
6617
6618         /* Determine the PHY led mode. */
6619         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL) {
6620                 tp->led_mode = led_mode_link10;
6621         } else {
6622                 tp->led_mode = led_mode_three_link;
6623                 if (eeprom_signature_found &&
6624                     eeprom_led_mode != led_mode_auto)
6625                         tp->led_mode = eeprom_led_mode;
6626         }
6627
6628         if (tp->phy_id == PHY_ID_SERDES)
6629                 tp->link_config.advertising =
6630                         (ADVERTISED_1000baseT_Half |
6631                          ADVERTISED_1000baseT_Full |
6632                          ADVERTISED_Autoneg |
6633                          ADVERTISED_FIBRE);
6634         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
6635                 tp->link_config.advertising &=
6636                         ~(ADVERTISED_1000baseT_Half |
6637                           ADVERTISED_1000baseT_Full);
6638
6639         return err;
6640 }
6641
6642 static void __devinit tg3_read_partno(struct tg3 *tp)
6643 {
6644         unsigned char vpd_data[256];
6645         int i;
6646
6647         if (tp->tg3_flags2 & TG3_FLG2_SUN_5704) {
6648                 /* Sun decided not to put the necessary bits in the
6649                  * NVRAM of their onboard tg3 parts :(
6650                  */
6651                 strcpy(tp->board_part_number, "Sun 5704");
6652                 return;
6653         }
6654
6655         for (i = 0; i < 256; i += 4) {
6656                 u32 tmp;
6657
6658                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
6659                         goto out_not_found;
6660
6661                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
6662                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
6663                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
6664                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
6665         }
6666
6667         /* Now parse and find the part number. */
6668         for (i = 0; i < 256; ) {
6669                 unsigned char val = vpd_data[i];
6670                 int block_end;
6671
6672                 if (val == 0x82 || val == 0x91) {
6673                         i = (i + 3 +
6674                              (vpd_data[i + 1] +
6675                               (vpd_data[i + 2] << 8)));
6676                         continue;
6677                 }
6678
6679                 if (val != 0x90)
6680                         goto out_not_found;
6681
6682                 block_end = (i + 3 +
6683                              (vpd_data[i + 1] +
6684                               (vpd_data[i + 2] << 8)));
6685                 i += 3;
6686                 while (i < block_end) {
6687                         if (vpd_data[i + 0] == 'P' &&
6688                             vpd_data[i + 1] == 'N') {
6689                                 int partno_len = vpd_data[i + 2];
6690
6691                                 if (partno_len > 24)
6692                                         goto out_not_found;
6693
6694                                 memcpy(tp->board_part_number,
6695                                        &vpd_data[i + 3],
6696                                        partno_len);
6697
6698                                 /* Success. */
6699                                 return;
6700                         }
6701                 }
6702
6703                 /* Part number not found. */
6704                 goto out_not_found;
6705         }
6706
6707 out_not_found:
6708         strcpy(tp->board_part_number, "none");
6709 }
6710
6711 #ifdef CONFIG_SPARC64
6712 static int __devinit tg3_is_sun_5704(struct tg3 *tp)
6713 {
6714         struct pci_dev *pdev = tp->pdev;
6715         struct pcidev_cookie *pcp = pdev->sysdata;
6716
6717         if (pcp != NULL) {
6718                 int node = pcp->prom_node;
6719                 u32 venid, devid;
6720                 int err;
6721
6722                 err = prom_getproperty(node, "subsystem-vendor-id",
6723                                        (char *) &venid, sizeof(venid));
6724                 if (err == 0 || err == -1)
6725                         return 0;
6726                 err = prom_getproperty(node, "subsystem-id",
6727                                        (char *) &devid, sizeof(devid));
6728                 if (err == 0 || err == -1)
6729                         return 0;
6730
6731                 if (venid == PCI_VENDOR_ID_SUN &&
6732                     devid == PCI_DEVICE_ID_TIGON3_5704)
6733                         return 1;
6734         }
6735         return 0;
6736 }
6737 #endif
6738
6739 static int __devinit tg3_get_invariants(struct tg3 *tp)
6740 {
6741         u32 misc_ctrl_reg;
6742         u32 cacheline_sz_reg;
6743         u32 pci_state_reg, grc_misc_cfg;
6744         u32 val;
6745         u16 pci_cmd;
6746         int err;
6747
6748 #ifdef CONFIG_SPARC64
6749         if (tg3_is_sun_5704(tp))
6750                 tp->tg3_flags2 |= TG3_FLG2_SUN_5704;
6751 #endif
6752
6753         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
6754          * reordering to the mailbox registers done by the host
6755          * controller can cause major troubles.  We read back from
6756          * every mailbox register write to force the writes to be
6757          * posted to the chip in order.
6758          */
6759         if (pci_find_device(PCI_VENDOR_ID_INTEL,
6760                             PCI_DEVICE_ID_INTEL_82801AA_8, NULL) ||
6761             pci_find_device(PCI_VENDOR_ID_INTEL,
6762                             PCI_DEVICE_ID_INTEL_82801AB_8, NULL) ||
6763             pci_find_device(PCI_VENDOR_ID_INTEL,
6764                             PCI_DEVICE_ID_INTEL_82801BA_11, NULL) ||
6765             pci_find_device(PCI_VENDOR_ID_INTEL,
6766                             PCI_DEVICE_ID_INTEL_82801BA_6, NULL) ||
6767             pci_find_device(PCI_VENDOR_ID_AMD,
6768                             PCI_DEVICE_ID_AMD_FE_GATE_700C, NULL))
6769                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
6770
6771         /* Force memory write invalidate off.  If we leave it on,
6772          * then on 5700_BX chips we have to enable a workaround.
6773          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
6774          * to match the cacheline size.  The Broadcom driver have this
6775          * workaround but turns MWI off all the times so never uses
6776          * it.  This seems to suggest that the workaround is insufficient.
6777          */
6778         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6779         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
6780         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6781
6782         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
6783          * has the register indirect write enable bit set before
6784          * we try to access any of the MMIO registers.  It is also
6785          * critical that the PCI-X hw workaround situation is decided
6786          * before that as well.
6787          */
6788         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6789                               &misc_ctrl_reg);
6790
6791         tp->pci_chip_rev_id = (misc_ctrl_reg >>
6792                                MISC_HOST_CTRL_CHIPREV_SHIFT);
6793
6794         /* Initialize misc host control in PCI block. */
6795         tp->misc_host_ctrl |= (misc_ctrl_reg &
6796                                MISC_HOST_CTRL_CHIPREV);
6797         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
6798                                tp->misc_host_ctrl);
6799
6800         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
6801                               &cacheline_sz_reg);
6802
6803         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
6804         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
6805         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
6806         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
6807
6808         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
6809             tp->pci_lat_timer < 64) {
6810                 tp->pci_lat_timer = 64;
6811
6812                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
6813                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
6814                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
6815                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
6816
6817                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
6818                                        cacheline_sz_reg);
6819         }
6820
6821         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
6822                               &pci_state_reg);
6823
6824         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
6825                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
6826
6827                 /* If this is a 5700 BX chipset, and we are in PCI-X
6828                  * mode, enable register write workaround.
6829                  *
6830                  * The workaround is to use indirect register accesses
6831                  * for all chip writes not to mailbox registers.
6832                  */
6833                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
6834                         u32 pm_reg;
6835                         u16 pci_cmd;
6836
6837                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
6838
6839                         /* The chip can have it's power management PCI config
6840                          * space registers clobbered due to this bug.
6841                          * So explicitly force the chip into D0 here.
6842                          */
6843                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
6844                                               &pm_reg);
6845                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
6846                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
6847                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
6848                                                pm_reg);
6849
6850                         /* Also, force SERR#/PERR# in PCI command. */
6851                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
6852                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
6853                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
6854                 }
6855         }
6856
6857         /* Back to back register writes can cause problems on this chip,
6858          * the workaround is to read back all reg writes except those to
6859          * mailbox regs.  See tg3_write_indirect_reg32().
6860          */
6861         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
6862                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
6863
6864         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
6865                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
6866         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
6867                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
6868
6869         /* Chip-specific fixup from Broadcom driver */
6870         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
6871             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
6872                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
6873                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
6874         }
6875
6876         /* Force the chip into D0. */
6877         err = tg3_set_power_state(tp, 0);
6878         if (err) {
6879                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
6880                        pci_name(tp->pdev));
6881                 return err;
6882         }
6883
6884         /* 5700 B0 chips do not support checksumming correctly due
6885          * to hardware bugs.
6886          */
6887         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
6888                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
6889
6890         /* Pseudo-header checksum is done by hardware logic and not
6891          * the offload processers, so make the chip do the pseudo-
6892          * header checksums on receive.  For transmit it is more
6893          * convenient to do the pseudo-header checksum in software
6894          * as Linux does that on transmit for us in all cases.
6895          */
6896         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
6897         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
6898
6899         /* Derive initial jumbo mode from MTU assigned in
6900          * ether_setup() via the alloc_etherdev() call
6901          */
6902         if (tp->dev->mtu > ETH_DATA_LEN)
6903                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
6904
6905         /* Determine WakeOnLan speed to use. */
6906         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6907             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
6908             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
6909             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
6910                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
6911         } else {
6912                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
6913         }
6914
6915         /* A few boards don't want Ethernet@WireSpeed phy feature */
6916         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
6917             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
6918              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
6919              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
6920                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
6921
6922         /* Only 5701 and later support tagged irq status mode.
6923          * Also, 5788 chips cannot use tagged irq status.
6924          *
6925          * However, since we are using NAPI avoid tagged irq status
6926          * because the interrupt condition is more difficult to
6927          * fully clear in that mode.
6928          */
6929         tp->coalesce_mode = 0;
6930
6931         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
6932             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
6933                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
6934
6935         /* Initialize MAC MI mode, polling disabled. */
6936         tw32(MAC_MI_MODE, tp->mi_mode);
6937         tr32(MAC_MI_MODE);
6938         udelay(40);
6939
6940         /* Initialize data/descriptor byte/word swapping. */
6941         val = tr32(GRC_MODE);
6942         val &= GRC_MODE_HOST_STACKUP;
6943         tw32(GRC_MODE, val | tp->grc_mode);
6944
6945         tg3_switch_clocks(tp);
6946
6947         /* Clear this out for sanity. */
6948         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
6949
6950         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
6951                               &pci_state_reg);
6952         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
6953             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
6954                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
6955
6956                 if (chiprevid == CHIPREV_ID_5701_A0 ||
6957                     chiprevid == CHIPREV_ID_5701_B0 ||
6958                     chiprevid == CHIPREV_ID_5701_B2 ||
6959                     chiprevid == CHIPREV_ID_5701_B5) {
6960                         unsigned long sram_base;
6961
6962                         /* Write some dummy words into the SRAM status block
6963                          * area, see if it reads back correctly.  If the return
6964                          * value is bad, force enable the PCIX workaround.
6965                          */
6966                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
6967
6968                         writel(0x00000000, sram_base);
6969                         writel(0x00000000, sram_base + 4);
6970                         writel(0xffffffff, sram_base + 4);
6971                         if (readl(sram_base) != 0x00000000)
6972                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
6973                 }
6974         }
6975
6976         udelay(50);
6977         tg3_nvram_init(tp);
6978
6979         /* Determine if TX descriptors will reside in
6980          * main memory or in the chip SRAM.
6981          */
6982         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0 ||
6983             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
6984                 tp->tg3_flags |= TG3_FLAG_HOST_TXDS;
6985
6986         grc_misc_cfg = tr32(GRC_MISC_CFG);
6987         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
6988
6989         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
6990             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
6991                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
6992                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
6993         }
6994
6995         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
6996             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
6997              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
6998                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
6999
7000         /* these are limited to 10/100 only */
7001         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7002              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
7003             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
7004              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
7005              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
7006               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
7007               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)))
7008                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
7009
7010         err = tg3_phy_probe(tp);
7011         if (err) {
7012                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
7013                        pci_name(tp->pdev), err);
7014                 /* ... but do not return immediately ... */
7015         }
7016
7017         tg3_read_partno(tp);
7018
7019         if (tp->phy_id == PHY_ID_SERDES) {
7020                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7021
7022                 /* And override led_mode in case Dell ever makes
7023                  * a fibre board.
7024                  */
7025                 tp->led_mode = led_mode_three_link;
7026         } else {
7027                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7028                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
7029                 else
7030                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
7031         }
7032
7033         /* 5700 {AX,BX} chips have a broken status block link
7034          * change bit implementation, so we must use the
7035          * status register in those cases.
7036          */
7037         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7038                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
7039         else
7040                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
7041
7042         /* The led_mode is set during tg3_phy_probe, here we might
7043          * have to force the link status polling mechanism based
7044          * upon subsystem IDs.
7045          */
7046         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
7047             tp->phy_id != PHY_ID_SERDES) {
7048                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
7049                                   TG3_FLAG_USE_LINKCHG_REG);
7050         }
7051
7052         /* For all SERDES we poll the MAC status register. */
7053         if (tp->phy_id == PHY_ID_SERDES)
7054                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
7055         else
7056                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
7057
7058         /* 5700 BX chips need to have their TX producer index mailboxes
7059          * written twice to workaround a bug.
7060          */
7061         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
7062                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
7063         else
7064                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
7065
7066         /* 5700 chips can get confused if TX buffers straddle the
7067          * 4GB address boundary in some cases.
7068          */
7069         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7070                 tp->dev->hard_start_xmit = tg3_start_xmit_4gbug;
7071         else
7072                 tp->dev->hard_start_xmit = tg3_start_xmit;
7073
7074         tp->rx_offset = 2;
7075         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
7076             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
7077                 tp->rx_offset = 0;
7078
7079         /* By default, disable wake-on-lan.  User can change this
7080          * using ETHTOOL_SWOL.
7081          */
7082         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
7083
7084         return err;
7085 }
7086
7087 #ifdef CONFIG_SPARC64
7088 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
7089 {
7090         struct net_device *dev = tp->dev;
7091         struct pci_dev *pdev = tp->pdev;
7092         struct pcidev_cookie *pcp = pdev->sysdata;
7093
7094         if (pcp != NULL) {
7095                 int node = pcp->prom_node;
7096
7097                 if (prom_getproplen(node, "local-mac-address") == 6) {
7098                         prom_getproperty(node, "local-mac-address",
7099                                          dev->dev_addr, 6);
7100                         return 0;
7101                 }
7102         }
7103         return -ENODEV;
7104 }
7105
7106 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
7107 {
7108         struct net_device *dev = tp->dev;
7109
7110         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
7111         return 0;
7112 }
7113 #endif
7114
7115 static int __devinit tg3_get_device_address(struct tg3 *tp)
7116 {
7117         struct net_device *dev = tp->dev;
7118         u32 hi, lo, mac_offset;
7119
7120 #ifdef CONFIG_SPARC64
7121         if (!tg3_get_macaddr_sparc(tp))
7122                 return 0;
7123 #endif
7124
7125         if (PCI_FUNC(tp->pdev->devfn) == 0)
7126                 mac_offset = 0x7c;
7127         else
7128                 mac_offset = 0xcc;
7129
7130         /* First try to get it from MAC address mailbox. */
7131         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
7132         if ((hi >> 16) == 0x484b) {
7133                 dev->dev_addr[0] = (hi >>  8) & 0xff;
7134                 dev->dev_addr[1] = (hi >>  0) & 0xff;
7135
7136                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
7137                 dev->dev_addr[2] = (lo >> 24) & 0xff;
7138                 dev->dev_addr[3] = (lo >> 16) & 0xff;
7139                 dev->dev_addr[4] = (lo >>  8) & 0xff;
7140                 dev->dev_addr[5] = (lo >>  0) & 0xff;
7141         }
7142         /* Next, try NVRAM. */
7143         else if (!(tp->tg3_flags & TG3_FLG2_SUN_5704) &&
7144                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
7145                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
7146                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
7147                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
7148                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
7149                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
7150                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
7151                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
7152         }
7153         /* Finally just fetch it out of the MAC control regs. */
7154         else {
7155                 hi = tr32(MAC_ADDR_0_HIGH);
7156                 lo = tr32(MAC_ADDR_0_LOW);
7157
7158                 dev->dev_addr[5] = lo & 0xff;
7159                 dev->dev_addr[4] = (lo >> 8) & 0xff;
7160                 dev->dev_addr[3] = (lo >> 16) & 0xff;
7161                 dev->dev_addr[2] = (lo >> 24) & 0xff;
7162                 dev->dev_addr[1] = hi & 0xff;
7163                 dev->dev_addr[0] = (hi >> 8) & 0xff;
7164         }
7165
7166         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
7167 #ifdef CONFIG_SPARC64
7168                 if (!tg3_get_default_macaddr_sparc(tp))
7169                         return 0;
7170 #endif
7171                 return -EINVAL;
7172         }
7173         return 0;
7174 }
7175
7176 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
7177 {
7178         struct tg3_internal_buffer_desc test_desc;
7179         u32 sram_dma_descs;
7180         int i, ret;
7181
7182         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
7183
7184         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
7185         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
7186         tw32(RDMAC_STATUS, 0);
7187         tw32(WDMAC_STATUS, 0);
7188
7189         tw32(BUFMGR_MODE, 0);
7190         tw32(FTQ_RESET, 0);
7191
7192         test_desc.addr_hi = ((u64) buf_dma) >> 32;
7193         test_desc.addr_lo = buf_dma & 0xffffffff;
7194         test_desc.nic_mbuf = 0x00002100;
7195         test_desc.len = size;
7196
7197         /*
7198          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
7199          * the *second* time the tg3 driver was getting loaded after an
7200          * initial scan.
7201          *
7202          * Broadcom tells me:
7203          *   ...the DMA engine is connected to the GRC block and a DMA
7204          *   reset may affect the GRC block in some unpredictable way...
7205          *   The behavior of resets to individual blocks has not been tested.
7206          *
7207          * Broadcom noted the GRC reset will also reset all sub-components.
7208          */
7209         if (to_device) {
7210                 test_desc.cqid_sqid = (13 << 8) | 2;
7211
7212                 tw32(RDMAC_MODE, RDMAC_MODE_ENABLE);
7213                 tr32(RDMAC_MODE);
7214                 udelay(40);
7215         } else {
7216                 test_desc.cqid_sqid = (16 << 8) | 7;
7217
7218                 tw32(WDMAC_MODE, WDMAC_MODE_ENABLE);
7219                 tr32(WDMAC_MODE);
7220                 udelay(40);
7221         }
7222         test_desc.flags = 0x00000005;
7223
7224         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
7225                 u32 val;
7226
7227                 val = *(((u32 *)&test_desc) + i);
7228                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
7229                                        sram_dma_descs + (i * sizeof(u32)));
7230                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
7231         }
7232         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
7233
7234         if (to_device) {
7235                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
7236         } else {
7237                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
7238         }
7239
7240         ret = -ENODEV;
7241         for (i = 0; i < 40; i++) {
7242                 u32 val;
7243
7244                 if (to_device)
7245                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
7246                 else
7247                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
7248                 if ((val & 0xffff) == sram_dma_descs) {
7249                         ret = 0;
7250                         break;
7251                 }
7252
7253                 udelay(100);
7254         }
7255
7256         return ret;
7257 }
7258
7259 #define TEST_BUFFER_SIZE        0x400
7260
7261 static int __devinit tg3_test_dma(struct tg3 *tp)
7262 {
7263         dma_addr_t buf_dma;
7264         u32 *buf;
7265         int ret;
7266
7267         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
7268         if (!buf) {
7269                 ret = -ENOMEM;
7270                 goto out_nofree;
7271         }
7272
7273         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) == 0) {
7274                 tp->dma_rwctrl =
7275                         (0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
7276                         (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT) |
7277                         (0x7 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
7278                         (0x7 << DMA_RWCTRL_READ_WATER_SHIFT) |
7279                         (0x0f << DMA_RWCTRL_MIN_DMA_SHIFT);
7280                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
7281                         tp->dma_rwctrl &= ~(DMA_RWCTRL_MIN_DMA
7282                                             << DMA_RWCTRL_MIN_DMA_SHIFT);
7283         } else {
7284                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7285                         tp->dma_rwctrl =
7286                                 (0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
7287                                 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT) |
7288                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
7289                                 (0x7 << DMA_RWCTRL_READ_WATER_SHIFT) |
7290                                 (0x00 << DMA_RWCTRL_MIN_DMA_SHIFT);
7291                 else
7292                         tp->dma_rwctrl =
7293                                 (0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
7294                                 (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT) |
7295                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
7296                                 (0x3 << DMA_RWCTRL_READ_WATER_SHIFT) |
7297                                 (0x0f << DMA_RWCTRL_MIN_DMA_SHIFT);
7298
7299                 /* Wheee, some more chip bugs... */
7300                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7301                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
7302                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
7303
7304                         if (ccval == 0x6 || ccval == 0x7)
7305                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
7306                 }
7307         }
7308
7309         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
7310             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7311                 tp->dma_rwctrl &= ~(DMA_RWCTRL_MIN_DMA
7312                                     << DMA_RWCTRL_MIN_DMA_SHIFT);
7313
7314         /* We don't do this on x86 because it seems to hurt performace.
7315          * It does help things on other platforms though.
7316          */
7317 #ifndef CONFIG_X86
7318         {
7319                 u8 byte;
7320                 int cacheline_size;
7321                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
7322
7323                 if (byte == 0)
7324                         cacheline_size = 1024;
7325                 else
7326                         cacheline_size = (int) byte * 4;
7327
7328                 tp->dma_rwctrl &= ~(DMA_RWCTRL_READ_BNDRY_MASK |
7329                                     DMA_RWCTRL_WRITE_BNDRY_MASK);
7330
7331                 switch (cacheline_size) {
7332                 case 16:
7333                         tp->dma_rwctrl |=
7334                                 (DMA_RWCTRL_READ_BNDRY_16 |
7335                                  DMA_RWCTRL_WRITE_BNDRY_16);
7336                         break;
7337
7338                 case 32:
7339                         tp->dma_rwctrl |=
7340                                 (DMA_RWCTRL_READ_BNDRY_32 |
7341                                  DMA_RWCTRL_WRITE_BNDRY_32);
7342                         break;
7343
7344                 case 64:
7345                         tp->dma_rwctrl |=
7346                                 (DMA_RWCTRL_READ_BNDRY_64 |
7347                                  DMA_RWCTRL_WRITE_BNDRY_64);
7348                         break;
7349
7350                 case 128:
7351                         tp->dma_rwctrl |=
7352                                 (DMA_RWCTRL_READ_BNDRY_128 |
7353                                  DMA_RWCTRL_WRITE_BNDRY_128);
7354                         break;
7355
7356                 case 256:
7357                         tp->dma_rwctrl |=
7358                                 (DMA_RWCTRL_READ_BNDRY_256 |
7359                                  DMA_RWCTRL_WRITE_BNDRY_256);
7360                         break;
7361
7362                 case 512:
7363                         tp->dma_rwctrl |=
7364                                 (DMA_RWCTRL_READ_BNDRY_512 |
7365                                  DMA_RWCTRL_WRITE_BNDRY_512);
7366                         break;
7367
7368                 case 1024:
7369                         tp->dma_rwctrl |=
7370                                 (DMA_RWCTRL_READ_BNDRY_1024 |
7371                                  DMA_RWCTRL_WRITE_BNDRY_1024);
7372                         break;
7373                 };
7374         }
7375 #endif
7376
7377         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7378             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
7379                 /* Remove this if it causes problems for some boards. */
7380                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
7381
7382                 /* On 5700/5701 chips, we need to set this bit.
7383                  * Otherwise the chip will issue cacheline transactions
7384                  * to streamable DMA memory with not all the byte
7385                  * enables turned on.  This is an error on several
7386                  * RISC PCI controllers, in particular sparc64.
7387                  *
7388                  * On 5703/5704 chips, this bit has been reassigned
7389                  * a different meaning.  In particular, it is used
7390                  * on those chips to enable a PCI-X workaround.
7391                  */
7392                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
7393         }
7394
7395         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7396
7397 #if 0
7398         /* Unneeded, already done by tg3_get_invariants.  */
7399         tg3_switch_clocks(tp);
7400 #endif
7401
7402         ret = 0;
7403         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7404             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
7405                 goto out;
7406
7407         while (1) {
7408                 u32 *p = buf, i;
7409
7410                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
7411                         p[i] = i;
7412
7413                 /* Send the buffer to the chip. */
7414                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
7415                 if (ret) {
7416                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
7417                         break;
7418                 }
7419
7420                 /* validate data reached card RAM correctly. */
7421                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
7422                         u32 val;
7423                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
7424                         if (val != p[i]) {
7425                                 printk( KERN_ERR "  tg3_test_dma()  Card buffer currupted on write! (%d != %d)\n", val, i);
7426                                 /* ret = -ENODEV here? */
7427                         }
7428                         p[i] = 0;
7429                 }
7430
7431                 /* Now read it back. */
7432                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
7433                 if (ret) {
7434                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
7435
7436                         break;
7437                 }
7438
7439                 /* Verify it. */
7440                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
7441                         if (p[i] == i)
7442                                 continue;
7443
7444                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
7445                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
7446                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
7447                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
7448                                 break;
7449                         } else {
7450                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
7451                                 ret = -ENODEV;
7452                                 goto out;
7453                         }
7454                 }
7455
7456                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
7457                         /* Success. */
7458                         ret = 0;
7459                         break;
7460                 }
7461         }
7462
7463 out:
7464         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
7465 out_nofree:
7466         return ret;
7467 }
7468
7469 static void __devinit tg3_init_link_config(struct tg3 *tp)
7470 {
7471         tp->link_config.advertising =
7472                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
7473                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
7474                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
7475                  ADVERTISED_Autoneg | ADVERTISED_MII);
7476         tp->link_config.speed = SPEED_INVALID;
7477         tp->link_config.duplex = DUPLEX_INVALID;
7478         tp->link_config.autoneg = AUTONEG_ENABLE;
7479         netif_carrier_off(tp->dev);
7480         tp->link_config.active_speed = SPEED_INVALID;
7481         tp->link_config.active_duplex = DUPLEX_INVALID;
7482         tp->link_config.phy_is_low_power = 0;
7483         tp->link_config.orig_speed = SPEED_INVALID;
7484         tp->link_config.orig_duplex = DUPLEX_INVALID;
7485         tp->link_config.orig_autoneg = AUTONEG_INVALID;
7486 }
7487
7488 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
7489 {
7490         tp->bufmgr_config.mbuf_read_dma_low_water =
7491                 DEFAULT_MB_RDMA_LOW_WATER;
7492         tp->bufmgr_config.mbuf_mac_rx_low_water =
7493                 DEFAULT_MB_MACRX_LOW_WATER;
7494         tp->bufmgr_config.mbuf_high_water =
7495                 DEFAULT_MB_HIGH_WATER;
7496
7497         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
7498                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
7499         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
7500                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
7501         tp->bufmgr_config.mbuf_high_water_jumbo =
7502                 DEFAULT_MB_HIGH_WATER_JUMBO;
7503
7504         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
7505         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
7506 }
7507
7508 static char * __devinit tg3_phy_string(struct tg3 *tp)
7509 {
7510         switch (tp->phy_id & PHY_ID_MASK) {
7511         case PHY_ID_BCM5400:    return "5400";
7512         case PHY_ID_BCM5401:    return "5401";
7513         case PHY_ID_BCM5411:    return "5411";
7514         case PHY_ID_BCM5701:    return "5701";
7515         case PHY_ID_BCM5703:    return "5703";
7516         case PHY_ID_BCM5704:    return "5704";
7517         case PHY_ID_BCM5705:    return "5705";
7518         case PHY_ID_BCM8002:    return "8002";
7519         case PHY_ID_SERDES:     return "serdes";
7520         default:                return "unknown";
7521         };
7522 }
7523
7524 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
7525 {
7526         struct pci_dev *peer = NULL;
7527         unsigned int func;
7528
7529         for (func = 0; func < 7; func++) {
7530                 unsigned int devfn = tp->pdev->devfn;
7531
7532                 devfn &= ~7;
7533                 devfn |= func;
7534
7535                 if (devfn == tp->pdev->devfn)
7536                         continue;
7537                 peer = pci_find_slot(tp->pdev->bus->number, devfn);
7538                 if (peer)
7539                         break;
7540         }
7541         if (!peer || peer == tp->pdev)
7542                 BUG();
7543         return peer;
7544 }
7545
7546 static int __devinit tg3_init_one(struct pci_dev *pdev,
7547                                   const struct pci_device_id *ent)
7548 {
7549         static int tg3_version_printed = 0;
7550         unsigned long tg3reg_base, tg3reg_len;
7551         struct net_device *dev;
7552         struct tg3 *tp;
7553         int i, err, pci_using_dac, pm_cap;
7554
7555         if (tg3_version_printed++ == 0)
7556                 printk(KERN_INFO "%s", version);
7557
7558         err = pci_enable_device(pdev);
7559         if (err) {
7560                 printk(KERN_ERR PFX "Cannot enable PCI device, "
7561                        "aborting.\n");
7562                 return err;
7563         }
7564
7565         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7566                 printk(KERN_ERR PFX "Cannot find proper PCI device "
7567                        "base address, aborting.\n");
7568                 err = -ENODEV;
7569                 goto err_out_disable_pdev;
7570         }
7571
7572         err = pci_request_regions(pdev, DRV_MODULE_NAME);
7573         if (err) {
7574                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
7575                        "aborting.\n");
7576                 goto err_out_disable_pdev;
7577         }
7578
7579         pci_set_master(pdev);
7580
7581         /* Find power-management capability. */
7582         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7583         if (pm_cap == 0) {
7584                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
7585                        "aborting.\n");
7586                 err = -EIO;
7587                 goto err_out_free_res;
7588         }
7589
7590         /* Configure DMA attributes. */
7591         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
7592         if (!err) {
7593                 pci_using_dac = 1;
7594                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
7595                 if (err < 0) {
7596                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
7597                                "for consistent allocations\n");
7598                         goto err_out_free_res;
7599                 }
7600         } else {
7601                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
7602                 if (err) {
7603                         printk(KERN_ERR PFX "No usable DMA configuration, "
7604                                "aborting.\n");
7605                         goto err_out_free_res;
7606                 }
7607                 pci_using_dac = 0;
7608         }
7609
7610         tg3reg_base = pci_resource_start(pdev, 0);
7611         tg3reg_len = pci_resource_len(pdev, 0);
7612
7613         dev = alloc_etherdev(sizeof(*tp));
7614         if (!dev) {
7615                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
7616                 err = -ENOMEM;
7617                 goto err_out_free_res;
7618         }
7619
7620         SET_MODULE_OWNER(dev);
7621         SET_NETDEV_DEV(dev, &pdev->dev);
7622
7623         if (pci_using_dac)
7624                 dev->features |= NETIF_F_HIGHDMA;
7625 #if TG3_VLAN_TAG_USED
7626         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
7627         dev->vlan_rx_register = tg3_vlan_rx_register;
7628         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
7629 #endif
7630
7631         tp = dev->priv;
7632         tp->pdev = pdev;
7633         tp->dev = dev;
7634         tp->pm_cap = pm_cap;
7635         tp->mac_mode = TG3_DEF_MAC_MODE;
7636         tp->rx_mode = TG3_DEF_RX_MODE;
7637         tp->tx_mode = TG3_DEF_TX_MODE;
7638         tp->mi_mode = MAC_MI_MODE_BASE;
7639         if (tg3_debug > 0)
7640                 tp->msg_enable = tg3_debug;
7641         else
7642                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
7643
7644         /* The word/byte swap controls here control register access byte
7645          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
7646          * setting below.
7647          */
7648         tp->misc_host_ctrl =
7649                 MISC_HOST_CTRL_MASK_PCI_INT |
7650                 MISC_HOST_CTRL_WORD_SWAP |
7651                 MISC_HOST_CTRL_INDIR_ACCESS |
7652                 MISC_HOST_CTRL_PCISTATE_RW;
7653
7654         /* The NONFRM (non-frame) byte/word swap controls take effect
7655          * on descriptor entries, anything which isn't packet data.
7656          *
7657          * The StrongARM chips on the board (one for tx, one for rx)
7658          * are running in big-endian mode.
7659          */
7660         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
7661                         GRC_MODE_WSWAP_NONFRM_DATA);
7662 #ifdef __BIG_ENDIAN
7663         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
7664 #endif
7665         spin_lock_init(&tp->lock);
7666         spin_lock_init(&tp->tx_lock);
7667         spin_lock_init(&tp->indirect_lock);
7668         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
7669
7670         tp->regs = (unsigned long) ioremap(tg3reg_base, tg3reg_len);
7671         if (tp->regs == 0UL) {
7672                 printk(KERN_ERR PFX "Cannot map device registers, "
7673                        "aborting.\n");
7674                 err = -ENOMEM;
7675                 goto err_out_free_dev;
7676         }
7677
7678         tg3_init_link_config(tp);
7679
7680         tg3_init_bufmgr_config(tp);
7681
7682         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
7683         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
7684         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
7685
7686         dev->open = tg3_open;
7687         dev->stop = tg3_close;
7688         dev->get_stats = tg3_get_stats;
7689         dev->set_multicast_list = tg3_set_rx_mode;
7690         dev->set_mac_address = tg3_set_mac_addr;
7691         dev->do_ioctl = tg3_ioctl;
7692         dev->tx_timeout = tg3_tx_timeout;
7693         dev->poll = tg3_poll;
7694         dev->ethtool_ops = &tg3_ethtool_ops;
7695         dev->weight = 64;
7696         dev->watchdog_timeo = TG3_TX_TIMEOUT;
7697         dev->change_mtu = tg3_change_mtu;
7698         dev->irq = pdev->irq;
7699
7700         err = tg3_get_invariants(tp);
7701         if (err) {
7702                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
7703                        "aborting.\n");
7704                 goto err_out_iounmap;
7705         }
7706
7707         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7708                 tp->bufmgr_config.mbuf_read_dma_low_water =
7709                         DEFAULT_MB_RDMA_LOW_WATER_5705;
7710                 tp->bufmgr_config.mbuf_mac_rx_low_water =
7711                         DEFAULT_MB_MACRX_LOW_WATER_5705;
7712                 tp->bufmgr_config.mbuf_high_water =
7713                         DEFAULT_MB_HIGH_WATER_5705;
7714         }
7715
7716 #if TG3_TSO_SUPPORT != 0
7717         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7718             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
7719             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
7720             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0 ||
7721             (tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
7722                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
7723         } else {
7724                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
7725         }
7726
7727         /* TSO is off by default, user can enable using ethtool.  */
7728 #if 0
7729         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
7730                 dev->features |= NETIF_F_TSO;
7731 #endif
7732
7733 #endif
7734
7735         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
7736             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
7737             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
7738                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
7739                 tp->rx_pending = 63;
7740         }
7741
7742         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
7743                 tp->pdev_peer = tg3_find_5704_peer(tp);
7744
7745         err = tg3_get_device_address(tp);
7746         if (err) {
7747                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
7748                        "aborting.\n");
7749                 goto err_out_iounmap;
7750         }
7751
7752         err = tg3_test_dma(tp);
7753         if (err) {
7754                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
7755                 goto err_out_iounmap;
7756         }
7757
7758         /* Tigon3 can do ipv4 only... and some chips have buggy
7759          * checksumming.
7760          */
7761         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
7762                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
7763                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
7764         } else
7765                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
7766
7767         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
7768                 dev->features &= ~NETIF_F_HIGHDMA;
7769
7770         err = register_netdev(dev);
7771         if (err) {
7772                 printk(KERN_ERR PFX "Cannot register net device, "
7773                        "aborting.\n");
7774                 goto err_out_iounmap;
7775         }
7776
7777         pci_set_drvdata(pdev, dev);
7778
7779         /* Now that we have fully setup the chip, save away a snapshot
7780          * of the PCI config space.  We need to restore this after
7781          * GRC_MISC_CFG core clock resets and some resume events.
7782          */
7783         pci_save_state(tp->pdev, tp->pci_cfg_state);
7784
7785         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
7786                dev->name,
7787                tp->board_part_number,
7788                tp->pci_chip_rev_id,
7789                tg3_phy_string(tp),
7790                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
7791                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
7792                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
7793                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
7794                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
7795                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
7796
7797         for (i = 0; i < 6; i++)
7798                 printk("%2.2x%c", dev->dev_addr[i],
7799                        i == 5 ? '\n' : ':');
7800
7801         return 0;
7802
7803 err_out_iounmap:
7804         iounmap((void *) tp->regs);
7805
7806 err_out_free_dev:
7807         free_netdev(dev);
7808
7809 err_out_free_res:
7810         pci_release_regions(pdev);
7811
7812 err_out_disable_pdev:
7813         pci_disable_device(pdev);
7814         pci_set_drvdata(pdev, NULL);
7815         return err;
7816 }
7817
7818 static void __devexit tg3_remove_one(struct pci_dev *pdev)
7819 {
7820         struct net_device *dev = pci_get_drvdata(pdev);
7821
7822         if (dev) {
7823                 unregister_netdev(dev);
7824                 iounmap((void *) ((struct tg3 *)(dev->priv))->regs);
7825                 free_netdev(dev);
7826                 pci_release_regions(pdev);
7827                 pci_disable_device(pdev);
7828                 pci_set_drvdata(pdev, NULL);
7829         }
7830 }
7831
7832 static int tg3_suspend(struct pci_dev *pdev, u32 state)
7833 {
7834         struct net_device *dev = pci_get_drvdata(pdev);
7835         struct tg3 *tp = dev->priv;
7836         int err;
7837
7838         if (!netif_running(dev))
7839                 return 0;
7840
7841         tg3_netif_stop(tp);
7842
7843         del_timer_sync(&tp->timer);
7844
7845         spin_lock_irq(&tp->lock);
7846         spin_lock(&tp->tx_lock);
7847         tg3_disable_ints(tp);
7848         spin_unlock(&tp->tx_lock);
7849         spin_unlock_irq(&tp->lock);
7850
7851         netif_device_detach(dev);
7852
7853         spin_lock_irq(&tp->lock);
7854         spin_lock(&tp->tx_lock);
7855         tg3_halt(tp);
7856         spin_unlock(&tp->tx_lock);
7857         spin_unlock_irq(&tp->lock);
7858
7859         err = tg3_set_power_state(tp, state);
7860         if (err) {
7861                 spin_lock_irq(&tp->lock);
7862                 spin_lock(&tp->tx_lock);
7863
7864                 tg3_init_hw(tp);
7865
7866                 tp->timer.expires = jiffies + tp->timer_offset;
7867                 add_timer(&tp->timer);
7868
7869                 spin_unlock(&tp->tx_lock);
7870                 spin_unlock_irq(&tp->lock);
7871
7872                 netif_device_attach(dev);
7873                 tg3_netif_start(tp);
7874         }
7875
7876         return err;
7877 }
7878
7879 static int tg3_resume(struct pci_dev *pdev)
7880 {
7881         struct net_device *dev = pci_get_drvdata(pdev);
7882         struct tg3 *tp = dev->priv;
7883         int err;
7884
7885         if (!netif_running(dev))
7886                 return 0;
7887
7888         err = tg3_set_power_state(tp, 0);
7889         if (err)
7890                 return err;
7891
7892         netif_device_attach(dev);
7893
7894         spin_lock_irq(&tp->lock);
7895         spin_lock(&tp->tx_lock);
7896
7897         tg3_init_hw(tp);
7898
7899         tp->timer.expires = jiffies + tp->timer_offset;
7900         add_timer(&tp->timer);
7901
7902         tg3_enable_ints(tp);
7903
7904         spin_unlock(&tp->tx_lock);
7905         spin_unlock_irq(&tp->lock);
7906
7907         tg3_netif_start(tp);
7908
7909         return 0;
7910 }
7911
7912 static struct pci_driver tg3_driver = {
7913         .name           = DRV_MODULE_NAME,
7914         .id_table       = tg3_pci_tbl,
7915         .probe          = tg3_init_one,
7916         .remove         = __devexit_p(tg3_remove_one),
7917         .suspend        = tg3_suspend,
7918         .resume         = tg3_resume
7919 };
7920
7921 static int __init tg3_init(void)
7922 {
7923         return pci_module_init(&tg3_driver);
7924 }
7925
7926 static void __exit tg3_cleanup(void)
7927 {
7928         pci_unregister_driver(&tg3_driver);
7929 }
7930
7931 module_init(tg3_init);
7932 module_exit(tg3_cleanup);