Linux-2.6.12-rc2
[linux-flexiantxendom0-natty.git] / drivers / net / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Copyright (C) 2000-2003 Broadcom Corporation.
11  */
12
13 #include <linux/config.h>
14
15 #include <linux/module.h>
16 #include <linux/moduleparam.h>
17 #include <linux/kernel.h>
18 #include <linux/types.h>
19 #include <linux/compiler.h>
20 #include <linux/slab.h>
21 #include <linux/delay.h>
22 #include <linux/init.h>
23 #include <linux/ioport.h>
24 #include <linux/pci.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/ethtool.h>
29 #include <linux/mii.h>
30 #include <linux/if_vlan.h>
31 #include <linux/ip.h>
32 #include <linux/tcp.h>
33 #include <linux/workqueue.h>
34
35 #include <net/checksum.h>
36
37 #include <asm/system.h>
38 #include <asm/io.h>
39 #include <asm/byteorder.h>
40 #include <asm/uaccess.h>
41
42 #ifdef CONFIG_SPARC64
43 #include <asm/idprom.h>
44 #include <asm/oplib.h>
45 #include <asm/pbm.h>
46 #endif
47
48 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
49 #define TG3_VLAN_TAG_USED 1
50 #else
51 #define TG3_VLAN_TAG_USED 0
52 #endif
53
54 #ifdef NETIF_F_TSO
55 #define TG3_TSO_SUPPORT 1
56 #else
57 #define TG3_TSO_SUPPORT 0
58 #endif
59
60 #include "tg3.h"
61
62 #define DRV_MODULE_NAME         "tg3"
63 #define PFX DRV_MODULE_NAME     ": "
64 #define DRV_MODULE_VERSION      "3.25"
65 #define DRV_MODULE_RELDATE      "March 24, 2005"
66
67 #define TG3_DEF_MAC_MODE        0
68 #define TG3_DEF_RX_MODE         0
69 #define TG3_DEF_TX_MODE         0
70 #define TG3_DEF_MSG_ENABLE        \
71         (NETIF_MSG_DRV          | \
72          NETIF_MSG_PROBE        | \
73          NETIF_MSG_LINK         | \
74          NETIF_MSG_TIMER        | \
75          NETIF_MSG_IFDOWN       | \
76          NETIF_MSG_IFUP         | \
77          NETIF_MSG_RX_ERR       | \
78          NETIF_MSG_TX_ERR)
79
80 /* length of time before we decide the hardware is borked,
81  * and dev->tx_timeout() should be called to fix the problem
82  */
83 #define TG3_TX_TIMEOUT                  (5 * HZ)
84
85 /* hardware minimum and maximum for a single frame's data payload */
86 #define TG3_MIN_MTU                     60
87 #define TG3_MAX_MTU(tp) \
88         ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 && \
89           GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) ? 9000 : 1500)
90
91 /* These numbers seem to be hard coded in the NIC firmware somehow.
92  * You can't change the ring sizes, but you can change where you place
93  * them in the NIC onboard memory.
94  */
95 #define TG3_RX_RING_SIZE                512
96 #define TG3_DEF_RX_RING_PENDING         200
97 #define TG3_RX_JUMBO_RING_SIZE          256
98 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
99
100 /* Do not place this n-ring entries value into the tp struct itself,
101  * we really want to expose these constants to GCC so that modulo et
102  * al.  operations are done with shifts and masks instead of with
103  * hw multiply/modulo instructions.  Another solution would be to
104  * replace things like '% foo' with '& (foo - 1)'.
105  */
106 #define TG3_RX_RCB_RING_SIZE(tp)        \
107         ((tp->tg3_flags2 & TG3_FLG2_5705_PLUS) ?  512 : 1024)
108
109 #define TG3_TX_RING_SIZE                512
110 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
111
112 #define TG3_RX_RING_BYTES       (sizeof(struct tg3_rx_buffer_desc) * \
113                                  TG3_RX_RING_SIZE)
114 #define TG3_RX_JUMBO_RING_BYTES (sizeof(struct tg3_rx_buffer_desc) * \
115                                  TG3_RX_JUMBO_RING_SIZE)
116 #define TG3_RX_RCB_RING_BYTES(tp) (sizeof(struct tg3_rx_buffer_desc) * \
117                                    TG3_RX_RCB_RING_SIZE(tp))
118 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
119                                  TG3_TX_RING_SIZE)
120 #define TX_RING_GAP(TP) \
121         (TG3_TX_RING_SIZE - (TP)->tx_pending)
122 #define TX_BUFFS_AVAIL(TP)                                              \
123         (((TP)->tx_cons <= (TP)->tx_prod) ?                             \
124           (TP)->tx_cons + (TP)->tx_pending - (TP)->tx_prod :            \
125           (TP)->tx_cons - (TP)->tx_prod - TX_RING_GAP(TP))
126 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
127
128 #define RX_PKT_BUF_SZ           (1536 + tp->rx_offset + 64)
129 #define RX_JUMBO_PKT_BUF_SZ     (9046 + tp->rx_offset + 64)
130
131 /* minimum number of free TX descriptors required to wake up TX process */
132 #define TG3_TX_WAKEUP_THRESH            (TG3_TX_RING_SIZE / 4)
133
134 /* number of ETHTOOL_GSTATS u64's */
135 #define TG3_NUM_STATS           (sizeof(struct tg3_ethtool_stats)/sizeof(u64))
136
137 static char version[] __devinitdata =
138         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
139
140 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
141 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
142 MODULE_LICENSE("GPL");
143 MODULE_VERSION(DRV_MODULE_VERSION);
144
145 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
146 module_param(tg3_debug, int, 0);
147 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
148
149 static struct pci_device_id tg3_pci_tbl[] = {
150         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700,
151           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
152         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701,
153           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
154         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702,
155           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
156         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703,
157           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
158         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704,
159           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
160         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE,
161           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
162         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705,
163           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
164         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2,
165           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
166         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M,
167           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
168         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2,
169           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
170         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X,
171           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
172         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X,
173           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
174         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S,
175           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
176         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3,
177           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
178         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3,
179           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
180         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782,
181           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
182         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788,
183           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
184         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789,
185           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
186         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901,
187           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
188         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2,
189           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
190         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2,
191           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
192         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F,
193           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
194         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5720,
195           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
196         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721,
197           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
198         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750,
199           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
200         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751,
201           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
202         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5750M,
203           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
204         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M,
205           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
206         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F,
207           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
208         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753,
209           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
210         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M,
211           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
212         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F,
213           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
214         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781,
215           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
216         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX,
217           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
218         { PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX,
219           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
220         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000,
221           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
222         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001,
223           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
224         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003,
225           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
226         { PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100,
227           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
228         { PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3,
229           PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0UL },
230         { 0, }
231 };
232
233 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
234
235 static struct {
236         const char string[ETH_GSTRING_LEN];
237 } ethtool_stats_keys[TG3_NUM_STATS] = {
238         { "rx_octets" },
239         { "rx_fragments" },
240         { "rx_ucast_packets" },
241         { "rx_mcast_packets" },
242         { "rx_bcast_packets" },
243         { "rx_fcs_errors" },
244         { "rx_align_errors" },
245         { "rx_xon_pause_rcvd" },
246         { "rx_xoff_pause_rcvd" },
247         { "rx_mac_ctrl_rcvd" },
248         { "rx_xoff_entered" },
249         { "rx_frame_too_long_errors" },
250         { "rx_jabbers" },
251         { "rx_undersize_packets" },
252         { "rx_in_length_errors" },
253         { "rx_out_length_errors" },
254         { "rx_64_or_less_octet_packets" },
255         { "rx_65_to_127_octet_packets" },
256         { "rx_128_to_255_octet_packets" },
257         { "rx_256_to_511_octet_packets" },
258         { "rx_512_to_1023_octet_packets" },
259         { "rx_1024_to_1522_octet_packets" },
260         { "rx_1523_to_2047_octet_packets" },
261         { "rx_2048_to_4095_octet_packets" },
262         { "rx_4096_to_8191_octet_packets" },
263         { "rx_8192_to_9022_octet_packets" },
264
265         { "tx_octets" },
266         { "tx_collisions" },
267
268         { "tx_xon_sent" },
269         { "tx_xoff_sent" },
270         { "tx_flow_control" },
271         { "tx_mac_errors" },
272         { "tx_single_collisions" },
273         { "tx_mult_collisions" },
274         { "tx_deferred" },
275         { "tx_excessive_collisions" },
276         { "tx_late_collisions" },
277         { "tx_collide_2times" },
278         { "tx_collide_3times" },
279         { "tx_collide_4times" },
280         { "tx_collide_5times" },
281         { "tx_collide_6times" },
282         { "tx_collide_7times" },
283         { "tx_collide_8times" },
284         { "tx_collide_9times" },
285         { "tx_collide_10times" },
286         { "tx_collide_11times" },
287         { "tx_collide_12times" },
288         { "tx_collide_13times" },
289         { "tx_collide_14times" },
290         { "tx_collide_15times" },
291         { "tx_ucast_packets" },
292         { "tx_mcast_packets" },
293         { "tx_bcast_packets" },
294         { "tx_carrier_sense_errors" },
295         { "tx_discards" },
296         { "tx_errors" },
297
298         { "dma_writeq_full" },
299         { "dma_write_prioq_full" },
300         { "rxbds_empty" },
301         { "rx_discards" },
302         { "rx_errors" },
303         { "rx_threshold_hit" },
304
305         { "dma_readq_full" },
306         { "dma_read_prioq_full" },
307         { "tx_comp_queue_full" },
308
309         { "ring_set_send_prod_index" },
310         { "ring_status_update" },
311         { "nic_irqs" },
312         { "nic_avoided_irqs" },
313         { "nic_tx_threshold_hit" }
314 };
315
316 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
317 {
318         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
319                 unsigned long flags;
320
321                 spin_lock_irqsave(&tp->indirect_lock, flags);
322                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
323                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
324                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
325         } else {
326                 writel(val, tp->regs + off);
327                 if ((tp->tg3_flags & TG3_FLAG_5701_REG_WRITE_BUG) != 0)
328                         readl(tp->regs + off);
329         }
330 }
331
332 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val)
333 {
334         if ((tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) != 0) {
335                 unsigned long flags;
336
337                 spin_lock_irqsave(&tp->indirect_lock, flags);
338                 pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
339                 pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
340                 spin_unlock_irqrestore(&tp->indirect_lock, flags);
341         } else {
342                 void __iomem *dest = tp->regs + off;
343                 writel(val, dest);
344                 readl(dest);    /* always flush PCI write */
345         }
346 }
347
348 static inline void _tw32_rx_mbox(struct tg3 *tp, u32 off, u32 val)
349 {
350         void __iomem *mbox = tp->regs + off;
351         writel(val, mbox);
352         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
353                 readl(mbox);
354 }
355
356 static inline void _tw32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
357 {
358         void __iomem *mbox = tp->regs + off;
359         writel(val, mbox);
360         if (tp->tg3_flags & TG3_FLAG_TXD_MBOX_HWBUG)
361                 writel(val, mbox);
362         if (tp->tg3_flags & TG3_FLAG_MBOX_WRITE_REORDER)
363                 readl(mbox);
364 }
365
366 #define tw32_mailbox(reg, val)  writel(((val) & 0xffffffff), tp->regs + (reg))
367 #define tw32_rx_mbox(reg, val)  _tw32_rx_mbox(tp, reg, val)
368 #define tw32_tx_mbox(reg, val)  _tw32_tx_mbox(tp, reg, val)
369
370 #define tw32(reg,val)           tg3_write_indirect_reg32(tp,(reg),(val))
371 #define tw32_f(reg,val)         _tw32_flush(tp,(reg),(val))
372 #define tw16(reg,val)           writew(((val) & 0xffff), tp->regs + (reg))
373 #define tw8(reg,val)            writeb(((val) & 0xff), tp->regs + (reg))
374 #define tr32(reg)               readl(tp->regs + (reg))
375 #define tr16(reg)               readw(tp->regs + (reg))
376 #define tr8(reg)                readb(tp->regs + (reg))
377
378 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
379 {
380         unsigned long flags;
381
382         spin_lock_irqsave(&tp->indirect_lock, flags);
383         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
384         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
385
386         /* Always leave this as zero. */
387         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
388         spin_unlock_irqrestore(&tp->indirect_lock, flags);
389 }
390
391 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
392 {
393         unsigned long flags;
394
395         spin_lock_irqsave(&tp->indirect_lock, flags);
396         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
397         pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
398
399         /* Always leave this as zero. */
400         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
401         spin_unlock_irqrestore(&tp->indirect_lock, flags);
402 }
403
404 static void tg3_disable_ints(struct tg3 *tp)
405 {
406         tw32(TG3PCI_MISC_HOST_CTRL,
407              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
408         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
409         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
410 }
411
412 static inline void tg3_cond_int(struct tg3 *tp)
413 {
414         if (tp->hw_status->status & SD_STATUS_UPDATED)
415                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
416 }
417
418 static void tg3_enable_ints(struct tg3 *tp)
419 {
420         tw32(TG3PCI_MISC_HOST_CTRL,
421              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
422         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
423         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
424
425         tg3_cond_int(tp);
426 }
427
428 /* tg3_restart_ints
429  *  similar to tg3_enable_ints, but it can return without flushing the
430  *  PIO write which reenables interrupts
431  */
432 static void tg3_restart_ints(struct tg3 *tp)
433 {
434         tw32(TG3PCI_MISC_HOST_CTRL,
435                 (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
436         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000000);
437         mmiowb();
438
439         tg3_cond_int(tp);
440 }
441
442 static inline void tg3_netif_stop(struct tg3 *tp)
443 {
444         netif_poll_disable(tp->dev);
445         netif_tx_disable(tp->dev);
446 }
447
448 static inline void tg3_netif_start(struct tg3 *tp)
449 {
450         netif_wake_queue(tp->dev);
451         /* NOTE: unconditional netif_wake_queue is only appropriate
452          * so long as all callers are assured to have free tx slots
453          * (such as after tg3_init_hw)
454          */
455         netif_poll_enable(tp->dev);
456         tg3_cond_int(tp);
457 }
458
459 static void tg3_switch_clocks(struct tg3 *tp)
460 {
461         u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
462         u32 orig_clock_ctrl;
463
464         orig_clock_ctrl = clock_ctrl;
465         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
466                        CLOCK_CTRL_CLKRUN_OENABLE |
467                        0x1f);
468         tp->pci_clock_ctrl = clock_ctrl;
469
470         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
471                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
472                         tw32_f(TG3PCI_CLOCK_CTRL,
473                                clock_ctrl | CLOCK_CTRL_625_CORE);
474                         udelay(40);
475                 }
476         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
477                 tw32_f(TG3PCI_CLOCK_CTRL,
478                      clock_ctrl |
479                      (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK));
480                 udelay(40);
481                 tw32_f(TG3PCI_CLOCK_CTRL,
482                      clock_ctrl | (CLOCK_CTRL_ALTCLK));
483                 udelay(40);
484         }
485         tw32_f(TG3PCI_CLOCK_CTRL, clock_ctrl);
486         udelay(40);
487 }
488
489 #define PHY_BUSY_LOOPS  5000
490
491 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
492 {
493         u32 frame_val;
494         unsigned int loops;
495         int ret;
496
497         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
498                 tw32_f(MAC_MI_MODE,
499                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
500                 udelay(80);
501         }
502
503         *val = 0x0;
504
505         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
506                       MI_COM_PHY_ADDR_MASK);
507         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
508                       MI_COM_REG_ADDR_MASK);
509         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
510         
511         tw32_f(MAC_MI_COM, frame_val);
512
513         loops = PHY_BUSY_LOOPS;
514         while (loops != 0) {
515                 udelay(10);
516                 frame_val = tr32(MAC_MI_COM);
517
518                 if ((frame_val & MI_COM_BUSY) == 0) {
519                         udelay(5);
520                         frame_val = tr32(MAC_MI_COM);
521                         break;
522                 }
523                 loops -= 1;
524         }
525
526         ret = -EBUSY;
527         if (loops != 0) {
528                 *val = frame_val & MI_COM_DATA_MASK;
529                 ret = 0;
530         }
531
532         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
533                 tw32_f(MAC_MI_MODE, tp->mi_mode);
534                 udelay(80);
535         }
536
537         return ret;
538 }
539
540 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
541 {
542         u32 frame_val;
543         unsigned int loops;
544         int ret;
545
546         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
547                 tw32_f(MAC_MI_MODE,
548                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
549                 udelay(80);
550         }
551
552         frame_val  = ((PHY_ADDR << MI_COM_PHY_ADDR_SHIFT) &
553                       MI_COM_PHY_ADDR_MASK);
554         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
555                       MI_COM_REG_ADDR_MASK);
556         frame_val |= (val & MI_COM_DATA_MASK);
557         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
558         
559         tw32_f(MAC_MI_COM, frame_val);
560
561         loops = PHY_BUSY_LOOPS;
562         while (loops != 0) {
563                 udelay(10);
564                 frame_val = tr32(MAC_MI_COM);
565                 if ((frame_val & MI_COM_BUSY) == 0) {
566                         udelay(5);
567                         frame_val = tr32(MAC_MI_COM);
568                         break;
569                 }
570                 loops -= 1;
571         }
572
573         ret = -EBUSY;
574         if (loops != 0)
575                 ret = 0;
576
577         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
578                 tw32_f(MAC_MI_MODE, tp->mi_mode);
579                 udelay(80);
580         }
581
582         return ret;
583 }
584
585 static void tg3_phy_set_wirespeed(struct tg3 *tp)
586 {
587         u32 val;
588
589         if (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED)
590                 return;
591
592         if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x7007) &&
593             !tg3_readphy(tp, MII_TG3_AUX_CTRL, &val))
594                 tg3_writephy(tp, MII_TG3_AUX_CTRL,
595                              (val | (1 << 15) | (1 << 4)));
596 }
597
598 static int tg3_bmcr_reset(struct tg3 *tp)
599 {
600         u32 phy_control;
601         int limit, err;
602
603         /* OK, reset it, and poll the BMCR_RESET bit until it
604          * clears or we time out.
605          */
606         phy_control = BMCR_RESET;
607         err = tg3_writephy(tp, MII_BMCR, phy_control);
608         if (err != 0)
609                 return -EBUSY;
610
611         limit = 5000;
612         while (limit--) {
613                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
614                 if (err != 0)
615                         return -EBUSY;
616
617                 if ((phy_control & BMCR_RESET) == 0) {
618                         udelay(40);
619                         break;
620                 }
621                 udelay(10);
622         }
623         if (limit <= 0)
624                 return -EBUSY;
625
626         return 0;
627 }
628
629 static int tg3_wait_macro_done(struct tg3 *tp)
630 {
631         int limit = 100;
632
633         while (limit--) {
634                 u32 tmp32;
635
636                 if (!tg3_readphy(tp, 0x16, &tmp32)) {
637                         if ((tmp32 & 0x1000) == 0)
638                                 break;
639                 }
640         }
641         if (limit <= 0)
642                 return -EBUSY;
643
644         return 0;
645 }
646
647 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
648 {
649         static const u32 test_pat[4][6] = {
650         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
651         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
652         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
653         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
654         };
655         int chan;
656
657         for (chan = 0; chan < 4; chan++) {
658                 int i;
659
660                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
661                              (chan * 0x2000) | 0x0200);
662                 tg3_writephy(tp, 0x16, 0x0002);
663
664                 for (i = 0; i < 6; i++)
665                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
666                                      test_pat[chan][i]);
667
668                 tg3_writephy(tp, 0x16, 0x0202);
669                 if (tg3_wait_macro_done(tp)) {
670                         *resetp = 1;
671                         return -EBUSY;
672                 }
673
674                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
675                              (chan * 0x2000) | 0x0200);
676                 tg3_writephy(tp, 0x16, 0x0082);
677                 if (tg3_wait_macro_done(tp)) {
678                         *resetp = 1;
679                         return -EBUSY;
680                 }
681
682                 tg3_writephy(tp, 0x16, 0x0802);
683                 if (tg3_wait_macro_done(tp)) {
684                         *resetp = 1;
685                         return -EBUSY;
686                 }
687
688                 for (i = 0; i < 6; i += 2) {
689                         u32 low, high;
690
691                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
692                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
693                             tg3_wait_macro_done(tp)) {
694                                 *resetp = 1;
695                                 return -EBUSY;
696                         }
697                         low &= 0x7fff;
698                         high &= 0x000f;
699                         if (low != test_pat[chan][i] ||
700                             high != test_pat[chan][i+1]) {
701                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
702                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
703                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
704
705                                 return -EBUSY;
706                         }
707                 }
708         }
709
710         return 0;
711 }
712
713 static int tg3_phy_reset_chanpat(struct tg3 *tp)
714 {
715         int chan;
716
717         for (chan = 0; chan < 4; chan++) {
718                 int i;
719
720                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
721                              (chan * 0x2000) | 0x0200);
722                 tg3_writephy(tp, 0x16, 0x0002);
723                 for (i = 0; i < 6; i++)
724                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
725                 tg3_writephy(tp, 0x16, 0x0202);
726                 if (tg3_wait_macro_done(tp))
727                         return -EBUSY;
728         }
729
730         return 0;
731 }
732
733 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
734 {
735         u32 reg32, phy9_orig;
736         int retries, do_phy_reset, err;
737
738         retries = 10;
739         do_phy_reset = 1;
740         do {
741                 if (do_phy_reset) {
742                         err = tg3_bmcr_reset(tp);
743                         if (err)
744                                 return err;
745                         do_phy_reset = 0;
746                 }
747
748                 /* Disable transmitter and interrupt.  */
749                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
750                         continue;
751
752                 reg32 |= 0x3000;
753                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
754
755                 /* Set full-duplex, 1000 mbps.  */
756                 tg3_writephy(tp, MII_BMCR,
757                              BMCR_FULLDPLX | TG3_BMCR_SPEED1000);
758
759                 /* Set to master mode.  */
760                 if (tg3_readphy(tp, MII_TG3_CTRL, &phy9_orig))
761                         continue;
762
763                 tg3_writephy(tp, MII_TG3_CTRL,
764                              (MII_TG3_CTRL_AS_MASTER |
765                               MII_TG3_CTRL_ENABLE_AS_MASTER));
766
767                 /* Enable SM_DSP_CLOCK and 6dB.  */
768                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
769
770                 /* Block the PHY control access.  */
771                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
772                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0800);
773
774                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
775                 if (!err)
776                         break;
777         } while (--retries);
778
779         err = tg3_phy_reset_chanpat(tp);
780         if (err)
781                 return err;
782
783         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8005);
784         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0000);
785
786         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
787         tg3_writephy(tp, 0x16, 0x0000);
788
789         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
790             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
791                 /* Set Extended packet length bit for jumbo frames */
792                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4400);
793         }
794         else {
795                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
796         }
797
798         tg3_writephy(tp, MII_TG3_CTRL, phy9_orig);
799
800         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
801                 reg32 &= ~0x3000;
802                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
803         } else if (!err)
804                 err = -EBUSY;
805
806         return err;
807 }
808
809 /* This will reset the tigon3 PHY if there is no valid
810  * link unless the FORCE argument is non-zero.
811  */
812 static int tg3_phy_reset(struct tg3 *tp)
813 {
814         u32 phy_status;
815         int err;
816
817         err  = tg3_readphy(tp, MII_BMSR, &phy_status);
818         err |= tg3_readphy(tp, MII_BMSR, &phy_status);
819         if (err != 0)
820                 return -EBUSY;
821
822         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
823             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
824             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
825                 err = tg3_phy_reset_5703_4_5(tp);
826                 if (err)
827                         return err;
828                 goto out;
829         }
830
831         err = tg3_bmcr_reset(tp);
832         if (err)
833                 return err;
834
835 out:
836         if (tp->tg3_flags2 & TG3_FLG2_PHY_ADC_BUG) {
837                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
838                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
839                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x2aaa);
840                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
841                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0323);
842                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
843         }
844         if (tp->tg3_flags2 & TG3_FLG2_PHY_5704_A0_BUG) {
845                 tg3_writephy(tp, 0x1c, 0x8d68);
846                 tg3_writephy(tp, 0x1c, 0x8d68);
847         }
848         if (tp->tg3_flags2 & TG3_FLG2_PHY_BER_BUG) {
849                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0c00);
850                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
851                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x310b);
852                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
853                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x9506);
854                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x401f);
855                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x14e2);
856                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0400);
857         }
858         /* Set Extended packet length bit (bit 14) on all chips that */
859         /* support jumbo frames */
860         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
861                 /* Cannot do read-modify-write on 5401 */
862                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
863         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
864                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
865                 u32 phy_reg;
866
867                 /* Set bit 14 with read-modify-write to preserve other bits */
868                 if (!tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x0007) &&
869                     !tg3_readphy(tp, MII_TG3_AUX_CTRL, &phy_reg))
870                         tg3_writephy(tp, MII_TG3_AUX_CTRL, phy_reg | 0x4000);
871         }
872
873         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
874          * jumbo frames transmission.
875          */
876         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705 &&
877             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750) {
878                 u32 phy_reg;
879
880                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &phy_reg))
881                     tg3_writephy(tp, MII_TG3_EXT_CTRL,
882                                  phy_reg | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
883         }
884
885         tg3_phy_set_wirespeed(tp);
886         return 0;
887 }
888
889 static void tg3_frob_aux_power(struct tg3 *tp)
890 {
891         struct tg3 *tp_peer = tp;
892
893         if ((tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) != 0)
894                 return;
895
896         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
897                 tp_peer = pci_get_drvdata(tp->pdev_peer);
898                 if (!tp_peer)
899                         BUG();
900         }
901
902
903         if ((tp->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0 ||
904             (tp_peer->tg3_flags & TG3_FLAG_WOL_ENABLE) != 0) {
905                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
906                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
907                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
908                              (GRC_LCLCTRL_GPIO_OE0 |
909                               GRC_LCLCTRL_GPIO_OE1 |
910                               GRC_LCLCTRL_GPIO_OE2 |
911                               GRC_LCLCTRL_GPIO_OUTPUT0 |
912                               GRC_LCLCTRL_GPIO_OUTPUT1));
913                         udelay(100);
914                 } else {
915                         u32 no_gpio2;
916                         u32 grc_local_ctrl;
917
918                         if (tp_peer != tp &&
919                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
920                                 return;
921
922                         /* On 5753 and variants, GPIO2 cannot be used. */
923                         no_gpio2 = tp->nic_sram_data_cfg &
924                                     NIC_SRAM_DATA_CFG_NO_GPIO2;
925
926                         grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
927                                          GRC_LCLCTRL_GPIO_OE1 |
928                                          GRC_LCLCTRL_GPIO_OE2 |
929                                          GRC_LCLCTRL_GPIO_OUTPUT1 |
930                                          GRC_LCLCTRL_GPIO_OUTPUT2;
931                         if (no_gpio2) {
932                                 grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
933                                                     GRC_LCLCTRL_GPIO_OUTPUT2);
934                         }
935                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
936                                                 grc_local_ctrl);
937                         udelay(100);
938
939                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
940
941                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
942                                                 grc_local_ctrl);
943                         udelay(100);
944
945                         if (!no_gpio2) {
946                                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
947                                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
948                                        grc_local_ctrl);
949                                 udelay(100);
950                         }
951                 }
952         } else {
953                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
954                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
955                         if (tp_peer != tp &&
956                             (tp_peer->tg3_flags & TG3_FLAG_INIT_COMPLETE) != 0)
957                                 return;
958
959                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
960                              (GRC_LCLCTRL_GPIO_OE1 |
961                               GRC_LCLCTRL_GPIO_OUTPUT1));
962                         udelay(100);
963
964                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
965                              (GRC_LCLCTRL_GPIO_OE1));
966                         udelay(100);
967
968                         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
969                              (GRC_LCLCTRL_GPIO_OE1 |
970                               GRC_LCLCTRL_GPIO_OUTPUT1));
971                         udelay(100);
972                 }
973         }
974 }
975
976 static int tg3_setup_phy(struct tg3 *, int);
977
978 #define RESET_KIND_SHUTDOWN     0
979 #define RESET_KIND_INIT         1
980 #define RESET_KIND_SUSPEND      2
981
982 static void tg3_write_sig_post_reset(struct tg3 *, int);
983 static int tg3_halt_cpu(struct tg3 *, u32);
984
985 static int tg3_set_power_state(struct tg3 *tp, int state)
986 {
987         u32 misc_host_ctrl;
988         u16 power_control, power_caps;
989         int pm = tp->pm_cap;
990
991         /* Make sure register accesses (indirect or otherwise)
992          * will function correctly.
993          */
994         pci_write_config_dword(tp->pdev,
995                                TG3PCI_MISC_HOST_CTRL,
996                                tp->misc_host_ctrl);
997
998         pci_read_config_word(tp->pdev,
999                              pm + PCI_PM_CTRL,
1000                              &power_control);
1001         power_control |= PCI_PM_CTRL_PME_STATUS;
1002         power_control &= ~(PCI_PM_CTRL_STATE_MASK);
1003         switch (state) {
1004         case 0:
1005                 power_control |= 0;
1006                 pci_write_config_word(tp->pdev,
1007                                       pm + PCI_PM_CTRL,
1008                                       power_control);
1009                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
1010                 udelay(100);
1011
1012                 return 0;
1013
1014         case 1:
1015                 power_control |= 1;
1016                 break;
1017
1018         case 2:
1019                 power_control |= 2;
1020                 break;
1021
1022         case 3:
1023                 power_control |= 3;
1024                 break;
1025
1026         default:
1027                 printk(KERN_WARNING PFX "%s: Invalid power state (%d) "
1028                        "requested.\n",
1029                        tp->dev->name, state);
1030                 return -EINVAL;
1031         };
1032
1033         power_control |= PCI_PM_CTRL_PME_ENABLE;
1034
1035         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
1036         tw32(TG3PCI_MISC_HOST_CTRL,
1037              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
1038
1039         if (tp->link_config.phy_is_low_power == 0) {
1040                 tp->link_config.phy_is_low_power = 1;
1041                 tp->link_config.orig_speed = tp->link_config.speed;
1042                 tp->link_config.orig_duplex = tp->link_config.duplex;
1043                 tp->link_config.orig_autoneg = tp->link_config.autoneg;
1044         }
1045
1046         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1047                 tp->link_config.speed = SPEED_10;
1048                 tp->link_config.duplex = DUPLEX_HALF;
1049                 tp->link_config.autoneg = AUTONEG_ENABLE;
1050                 tg3_setup_phy(tp, 0);
1051         }
1052
1053         pci_read_config_word(tp->pdev, pm + PCI_PM_PMC, &power_caps);
1054
1055         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE) {
1056                 u32 mac_mode;
1057
1058                 if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
1059                         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x5a);
1060                         udelay(40);
1061
1062                         mac_mode = MAC_MODE_PORT_MODE_MII;
1063
1064                         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 ||
1065                             !(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB))
1066                                 mac_mode |= MAC_MODE_LINK_POLARITY;
1067                 } else {
1068                         mac_mode = MAC_MODE_PORT_MODE_TBI;
1069                 }
1070
1071                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5750)
1072                         tw32(MAC_LED_CTRL, tp->led_ctrl);
1073
1074                 if (((power_caps & PCI_PM_CAP_PME_D3cold) &&
1075                      (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)))
1076                         mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
1077
1078                 tw32_f(MAC_MODE, mac_mode);
1079                 udelay(100);
1080
1081                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
1082                 udelay(10);
1083         }
1084
1085         if (!(tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB) &&
1086             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1087              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
1088                 u32 base_val;
1089
1090                 base_val = tp->pci_clock_ctrl;
1091                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
1092                              CLOCK_CTRL_TXCLK_DISABLE);
1093
1094                 tw32_f(TG3PCI_CLOCK_CTRL, base_val |
1095                      CLOCK_CTRL_ALTCLK |
1096                      CLOCK_CTRL_PWRDOWN_PLL133);
1097                 udelay(40);
1098         } else if (!((GET_ASIC_REV(tp->pci_chip_rev_id) == 5750) &&
1099                      (tp->tg3_flags & TG3_FLAG_ENABLE_ASF))) {
1100                 u32 newbits1, newbits2;
1101
1102                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1103                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1104                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
1105                                     CLOCK_CTRL_TXCLK_DISABLE |
1106                                     CLOCK_CTRL_ALTCLK);
1107                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1108                 } else if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
1109                         newbits1 = CLOCK_CTRL_625_CORE;
1110                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
1111                 } else {
1112                         newbits1 = CLOCK_CTRL_ALTCLK;
1113                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
1114                 }
1115
1116                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1);
1117                 udelay(40);
1118
1119                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2);
1120                 udelay(40);
1121
1122                 if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
1123                         u32 newbits3;
1124
1125                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1126                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1127                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
1128                                             CLOCK_CTRL_TXCLK_DISABLE |
1129                                             CLOCK_CTRL_44MHZ_CORE);
1130                         } else {
1131                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
1132                         }
1133
1134                         tw32_f(TG3PCI_CLOCK_CTRL,
1135                                          tp->pci_clock_ctrl | newbits3);
1136                         udelay(40);
1137                 }
1138         }
1139
1140         tg3_frob_aux_power(tp);
1141
1142         /* Workaround for unstable PLL clock */
1143         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
1144             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
1145                 u32 val = tr32(0x7d00);
1146
1147                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
1148                 tw32(0x7d00, val);
1149                 if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
1150                         tg3_halt_cpu(tp, RX_CPU_BASE);
1151         }
1152
1153         /* Finally, set the new power state. */
1154         pci_write_config_word(tp->pdev, pm + PCI_PM_CTRL, power_control);
1155
1156         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
1157
1158         return 0;
1159 }
1160
1161 static void tg3_link_report(struct tg3 *tp)
1162 {
1163         if (!netif_carrier_ok(tp->dev)) {
1164                 printk(KERN_INFO PFX "%s: Link is down.\n", tp->dev->name);
1165         } else {
1166                 printk(KERN_INFO PFX "%s: Link is up at %d Mbps, %s duplex.\n",
1167                        tp->dev->name,
1168                        (tp->link_config.active_speed == SPEED_1000 ?
1169                         1000 :
1170                         (tp->link_config.active_speed == SPEED_100 ?
1171                          100 : 10)),
1172                        (tp->link_config.active_duplex == DUPLEX_FULL ?
1173                         "full" : "half"));
1174
1175                 printk(KERN_INFO PFX "%s: Flow control is %s for TX and "
1176                        "%s for RX.\n",
1177                        tp->dev->name,
1178                        (tp->tg3_flags & TG3_FLAG_TX_PAUSE) ? "on" : "off",
1179                        (tp->tg3_flags & TG3_FLAG_RX_PAUSE) ? "on" : "off");
1180         }
1181 }
1182
1183 static void tg3_setup_flow_control(struct tg3 *tp, u32 local_adv, u32 remote_adv)
1184 {
1185         u32 new_tg3_flags = 0;
1186         u32 old_rx_mode = tp->rx_mode;
1187         u32 old_tx_mode = tp->tx_mode;
1188
1189         if (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) {
1190                 if (local_adv & ADVERTISE_PAUSE_CAP) {
1191                         if (local_adv & ADVERTISE_PAUSE_ASYM) {
1192                                 if (remote_adv & LPA_PAUSE_CAP)
1193                                         new_tg3_flags |=
1194                                                 (TG3_FLAG_RX_PAUSE |
1195                                                 TG3_FLAG_TX_PAUSE);
1196                                 else if (remote_adv & LPA_PAUSE_ASYM)
1197                                         new_tg3_flags |=
1198                                                 (TG3_FLAG_RX_PAUSE);
1199                         } else {
1200                                 if (remote_adv & LPA_PAUSE_CAP)
1201                                         new_tg3_flags |=
1202                                                 (TG3_FLAG_RX_PAUSE |
1203                                                 TG3_FLAG_TX_PAUSE);
1204                         }
1205                 } else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1206                         if ((remote_adv & LPA_PAUSE_CAP) &&
1207                         (remote_adv & LPA_PAUSE_ASYM))
1208                                 new_tg3_flags |= TG3_FLAG_TX_PAUSE;
1209                 }
1210
1211                 tp->tg3_flags &= ~(TG3_FLAG_RX_PAUSE | TG3_FLAG_TX_PAUSE);
1212                 tp->tg3_flags |= new_tg3_flags;
1213         } else {
1214                 new_tg3_flags = tp->tg3_flags;
1215         }
1216
1217         if (new_tg3_flags & TG3_FLAG_RX_PAUSE)
1218                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1219         else
1220                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1221
1222         if (old_rx_mode != tp->rx_mode) {
1223                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1224         }
1225         
1226         if (new_tg3_flags & TG3_FLAG_TX_PAUSE)
1227                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1228         else
1229                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1230
1231         if (old_tx_mode != tp->tx_mode) {
1232                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1233         }
1234 }
1235
1236 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
1237 {
1238         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
1239         case MII_TG3_AUX_STAT_10HALF:
1240                 *speed = SPEED_10;
1241                 *duplex = DUPLEX_HALF;
1242                 break;
1243
1244         case MII_TG3_AUX_STAT_10FULL:
1245                 *speed = SPEED_10;
1246                 *duplex = DUPLEX_FULL;
1247                 break;
1248
1249         case MII_TG3_AUX_STAT_100HALF:
1250                 *speed = SPEED_100;
1251                 *duplex = DUPLEX_HALF;
1252                 break;
1253
1254         case MII_TG3_AUX_STAT_100FULL:
1255                 *speed = SPEED_100;
1256                 *duplex = DUPLEX_FULL;
1257                 break;
1258
1259         case MII_TG3_AUX_STAT_1000HALF:
1260                 *speed = SPEED_1000;
1261                 *duplex = DUPLEX_HALF;
1262                 break;
1263
1264         case MII_TG3_AUX_STAT_1000FULL:
1265                 *speed = SPEED_1000;
1266                 *duplex = DUPLEX_FULL;
1267                 break;
1268
1269         default:
1270                 *speed = SPEED_INVALID;
1271                 *duplex = DUPLEX_INVALID;
1272                 break;
1273         };
1274 }
1275
1276 static void tg3_phy_copper_begin(struct tg3 *tp)
1277 {
1278         u32 new_adv;
1279         int i;
1280
1281         if (tp->link_config.phy_is_low_power) {
1282                 /* Entering low power mode.  Disable gigabit and
1283                  * 100baseT advertisements.
1284                  */
1285                 tg3_writephy(tp, MII_TG3_CTRL, 0);
1286
1287                 new_adv = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1288                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1289                 if (tp->tg3_flags & TG3_FLAG_WOL_SPEED_100MB)
1290                         new_adv |= (ADVERTISE_100HALF | ADVERTISE_100FULL);
1291
1292                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1293         } else if (tp->link_config.speed == SPEED_INVALID) {
1294                 tp->link_config.advertising =
1295                         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
1296                          ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
1297                          ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
1298                          ADVERTISED_Autoneg | ADVERTISED_MII);
1299
1300                 if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
1301                         tp->link_config.advertising &=
1302                                 ~(ADVERTISED_1000baseT_Half |
1303                                   ADVERTISED_1000baseT_Full);
1304
1305                 new_adv = (ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
1306                 if (tp->link_config.advertising & ADVERTISED_10baseT_Half)
1307                         new_adv |= ADVERTISE_10HALF;
1308                 if (tp->link_config.advertising & ADVERTISED_10baseT_Full)
1309                         new_adv |= ADVERTISE_10FULL;
1310                 if (tp->link_config.advertising & ADVERTISED_100baseT_Half)
1311                         new_adv |= ADVERTISE_100HALF;
1312                 if (tp->link_config.advertising & ADVERTISED_100baseT_Full)
1313                         new_adv |= ADVERTISE_100FULL;
1314                 tg3_writephy(tp, MII_ADVERTISE, new_adv);
1315
1316                 if (tp->link_config.advertising &
1317                     (ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full)) {
1318                         new_adv = 0;
1319                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Half)
1320                                 new_adv |= MII_TG3_CTRL_ADV_1000_HALF;
1321                         if (tp->link_config.advertising & ADVERTISED_1000baseT_Full)
1322                                 new_adv |= MII_TG3_CTRL_ADV_1000_FULL;
1323                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY) &&
1324                             (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1325                              tp->pci_chip_rev_id == CHIPREV_ID_5701_B0))
1326                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1327                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1328                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1329                 } else {
1330                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1331                 }
1332         } else {
1333                 /* Asking for a specific link mode. */
1334                 if (tp->link_config.speed == SPEED_1000) {
1335                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1336                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1337
1338                         if (tp->link_config.duplex == DUPLEX_FULL)
1339                                 new_adv = MII_TG3_CTRL_ADV_1000_FULL;
1340                         else
1341                                 new_adv = MII_TG3_CTRL_ADV_1000_HALF;
1342                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1343                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
1344                                 new_adv |= (MII_TG3_CTRL_AS_MASTER |
1345                                             MII_TG3_CTRL_ENABLE_AS_MASTER);
1346                         tg3_writephy(tp, MII_TG3_CTRL, new_adv);
1347                 } else {
1348                         tg3_writephy(tp, MII_TG3_CTRL, 0);
1349
1350                         new_adv = ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP;
1351                         if (tp->link_config.speed == SPEED_100) {
1352                                 if (tp->link_config.duplex == DUPLEX_FULL)
1353                                         new_adv |= ADVERTISE_100FULL;
1354                                 else
1355                                         new_adv |= ADVERTISE_100HALF;
1356                         } else {
1357                                 if (tp->link_config.duplex == DUPLEX_FULL)
1358                                         new_adv |= ADVERTISE_10FULL;
1359                                 else
1360                                         new_adv |= ADVERTISE_10HALF;
1361                         }
1362                         tg3_writephy(tp, MII_ADVERTISE, new_adv);
1363                 }
1364         }
1365
1366         if (tp->link_config.autoneg == AUTONEG_DISABLE &&
1367             tp->link_config.speed != SPEED_INVALID) {
1368                 u32 bmcr, orig_bmcr;
1369
1370                 tp->link_config.active_speed = tp->link_config.speed;
1371                 tp->link_config.active_duplex = tp->link_config.duplex;
1372
1373                 bmcr = 0;
1374                 switch (tp->link_config.speed) {
1375                 default:
1376                 case SPEED_10:
1377                         break;
1378
1379                 case SPEED_100:
1380                         bmcr |= BMCR_SPEED100;
1381                         break;
1382
1383                 case SPEED_1000:
1384                         bmcr |= TG3_BMCR_SPEED1000;
1385                         break;
1386                 };
1387
1388                 if (tp->link_config.duplex == DUPLEX_FULL)
1389                         bmcr |= BMCR_FULLDPLX;
1390
1391                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
1392                     (bmcr != orig_bmcr)) {
1393                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
1394                         for (i = 0; i < 1500; i++) {
1395                                 u32 tmp;
1396
1397                                 udelay(10);
1398                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
1399                                     tg3_readphy(tp, MII_BMSR, &tmp))
1400                                         continue;
1401                                 if (!(tmp & BMSR_LSTATUS)) {
1402                                         udelay(40);
1403                                         break;
1404                                 }
1405                         }
1406                         tg3_writephy(tp, MII_BMCR, bmcr);
1407                         udelay(40);
1408                 }
1409         } else {
1410                 tg3_writephy(tp, MII_BMCR,
1411                              BMCR_ANENABLE | BMCR_ANRESTART);
1412         }
1413 }
1414
1415 static int tg3_init_5401phy_dsp(struct tg3 *tp)
1416 {
1417         int err;
1418
1419         /* Turn off tap power management. */
1420         /* Set Extended packet length bit */
1421         err  = tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4c20);
1422
1423         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0012);
1424         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1804);
1425
1426         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x0013);
1427         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x1204);
1428
1429         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1430         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0132);
1431
1432         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8006);
1433         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0232);
1434
1435         err |= tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x201f);
1436         err |= tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x0a20);
1437
1438         udelay(40);
1439
1440         return err;
1441 }
1442
1443 static int tg3_copper_is_advertising_all(struct tg3 *tp)
1444 {
1445         u32 adv_reg, all_mask;
1446
1447         if (tg3_readphy(tp, MII_ADVERTISE, &adv_reg))
1448                 return 0;
1449
1450         all_mask = (ADVERTISE_10HALF | ADVERTISE_10FULL |
1451                     ADVERTISE_100HALF | ADVERTISE_100FULL);
1452         if ((adv_reg & all_mask) != all_mask)
1453                 return 0;
1454         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
1455                 u32 tg3_ctrl;
1456
1457                 if (tg3_readphy(tp, MII_TG3_CTRL, &tg3_ctrl))
1458                         return 0;
1459
1460                 all_mask = (MII_TG3_CTRL_ADV_1000_HALF |
1461                             MII_TG3_CTRL_ADV_1000_FULL);
1462                 if ((tg3_ctrl & all_mask) != all_mask)
1463                         return 0;
1464         }
1465         return 1;
1466 }
1467
1468 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
1469 {
1470         int current_link_up;
1471         u32 bmsr, dummy;
1472         u16 current_speed;
1473         u8 current_duplex;
1474         int i, err;
1475
1476         tw32(MAC_EVENT, 0);
1477
1478         tw32_f(MAC_STATUS,
1479              (MAC_STATUS_SYNC_CHANGED |
1480               MAC_STATUS_CFG_CHANGED |
1481               MAC_STATUS_MI_COMPLETION |
1482               MAC_STATUS_LNKSTATE_CHANGED));
1483         udelay(40);
1484
1485         tp->mi_mode = MAC_MI_MODE_BASE;
1486         tw32_f(MAC_MI_MODE, tp->mi_mode);
1487         udelay(80);
1488
1489         tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x02);
1490
1491         /* Some third-party PHYs need to be reset on link going
1492          * down.
1493          */
1494         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
1495              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
1496              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
1497             netif_carrier_ok(tp->dev)) {
1498                 tg3_readphy(tp, MII_BMSR, &bmsr);
1499                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1500                     !(bmsr & BMSR_LSTATUS))
1501                         force_reset = 1;
1502         }
1503         if (force_reset)
1504                 tg3_phy_reset(tp);
1505
1506         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
1507                 tg3_readphy(tp, MII_BMSR, &bmsr);
1508                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
1509                     !(tp->tg3_flags & TG3_FLAG_INIT_COMPLETE))
1510                         bmsr = 0;
1511
1512                 if (!(bmsr & BMSR_LSTATUS)) {
1513                         err = tg3_init_5401phy_dsp(tp);
1514                         if (err)
1515                                 return err;
1516
1517                         tg3_readphy(tp, MII_BMSR, &bmsr);
1518                         for (i = 0; i < 1000; i++) {
1519                                 udelay(10);
1520                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1521                                     (bmsr & BMSR_LSTATUS)) {
1522                                         udelay(40);
1523                                         break;
1524                                 }
1525                         }
1526
1527                         if ((tp->phy_id & PHY_ID_REV_MASK) == PHY_REV_BCM5401_B0 &&
1528                             !(bmsr & BMSR_LSTATUS) &&
1529                             tp->link_config.active_speed == SPEED_1000) {
1530                                 err = tg3_phy_reset(tp);
1531                                 if (!err)
1532                                         err = tg3_init_5401phy_dsp(tp);
1533                                 if (err)
1534                                         return err;
1535                         }
1536                 }
1537         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
1538                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
1539                 /* 5701 {A0,B0} CRC bug workaround */
1540                 tg3_writephy(tp, 0x15, 0x0a75);
1541                 tg3_writephy(tp, 0x1c, 0x8c68);
1542                 tg3_writephy(tp, 0x1c, 0x8d68);
1543                 tg3_writephy(tp, 0x1c, 0x8c68);
1544         }
1545
1546         /* Clear pending interrupts... */
1547         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1548         tg3_readphy(tp, MII_TG3_ISTAT, &dummy);
1549
1550         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT)
1551                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
1552         else
1553                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
1554
1555         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
1556             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
1557                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
1558                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
1559                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
1560                 else
1561                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
1562         }
1563
1564         current_link_up = 0;
1565         current_speed = SPEED_INVALID;
1566         current_duplex = DUPLEX_INVALID;
1567
1568         if (tp->tg3_flags2 & TG3_FLG2_CAPACITIVE_COUPLING) {
1569                 u32 val;
1570
1571                 tg3_writephy(tp, MII_TG3_AUX_CTRL, 0x4007);
1572                 tg3_readphy(tp, MII_TG3_AUX_CTRL, &val);
1573                 if (!(val & (1 << 10))) {
1574                         val |= (1 << 10);
1575                         tg3_writephy(tp, MII_TG3_AUX_CTRL, val);
1576                         goto relink;
1577                 }
1578         }
1579
1580         bmsr = 0;
1581         for (i = 0; i < 100; i++) {
1582                 tg3_readphy(tp, MII_BMSR, &bmsr);
1583                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
1584                     (bmsr & BMSR_LSTATUS))
1585                         break;
1586                 udelay(40);
1587         }
1588
1589         if (bmsr & BMSR_LSTATUS) {
1590                 u32 aux_stat, bmcr;
1591
1592                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
1593                 for (i = 0; i < 2000; i++) {
1594                         udelay(10);
1595                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
1596                             aux_stat)
1597                                 break;
1598                 }
1599
1600                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
1601                                              &current_speed,
1602                                              &current_duplex);
1603
1604                 bmcr = 0;
1605                 for (i = 0; i < 200; i++) {
1606                         tg3_readphy(tp, MII_BMCR, &bmcr);
1607                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
1608                                 continue;
1609                         if (bmcr && bmcr != 0x7fff)
1610                                 break;
1611                         udelay(10);
1612                 }
1613
1614                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
1615                         if (bmcr & BMCR_ANENABLE) {
1616                                 current_link_up = 1;
1617
1618                                 /* Force autoneg restart if we are exiting
1619                                  * low power mode.
1620                                  */
1621                                 if (!tg3_copper_is_advertising_all(tp))
1622                                         current_link_up = 0;
1623                         } else {
1624                                 current_link_up = 0;
1625                         }
1626                 } else {
1627                         if (!(bmcr & BMCR_ANENABLE) &&
1628                             tp->link_config.speed == current_speed &&
1629                             tp->link_config.duplex == current_duplex) {
1630                                 current_link_up = 1;
1631                         } else {
1632                                 current_link_up = 0;
1633                         }
1634                 }
1635
1636                 tp->link_config.active_speed = current_speed;
1637                 tp->link_config.active_duplex = current_duplex;
1638         }
1639
1640         if (current_link_up == 1 &&
1641             (tp->link_config.active_duplex == DUPLEX_FULL) &&
1642             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
1643                 u32 local_adv, remote_adv;
1644
1645                 if (tg3_readphy(tp, MII_ADVERTISE, &local_adv))
1646                         local_adv = 0;
1647                 local_adv &= (ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM);
1648
1649                 if (tg3_readphy(tp, MII_LPA, &remote_adv))
1650                         remote_adv = 0;
1651
1652                 remote_adv &= (LPA_PAUSE_CAP | LPA_PAUSE_ASYM);
1653
1654                 /* If we are not advertising full pause capability,
1655                  * something is wrong.  Bring the link down and reconfigure.
1656                  */
1657                 if (local_adv != ADVERTISE_PAUSE_CAP) {
1658                         current_link_up = 0;
1659                 } else {
1660                         tg3_setup_flow_control(tp, local_adv, remote_adv);
1661                 }
1662         }
1663 relink:
1664         if (current_link_up == 0) {
1665                 u32 tmp;
1666
1667                 tg3_phy_copper_begin(tp);
1668
1669                 tg3_readphy(tp, MII_BMSR, &tmp);
1670                 if (!tg3_readphy(tp, MII_BMSR, &tmp) &&
1671                     (tmp & BMSR_LSTATUS))
1672                         current_link_up = 1;
1673         }
1674
1675         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
1676         if (current_link_up == 1) {
1677                 if (tp->link_config.active_speed == SPEED_100 ||
1678                     tp->link_config.active_speed == SPEED_10)
1679                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
1680                 else
1681                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1682         } else
1683                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
1684
1685         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
1686         if (tp->link_config.active_duplex == DUPLEX_HALF)
1687                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
1688
1689         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
1690         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
1691                 if ((tp->led_ctrl == LED_CTRL_MODE_PHY_2) ||
1692                     (current_link_up == 1 &&
1693                      tp->link_config.active_speed == SPEED_10))
1694                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1695         } else {
1696                 if (current_link_up == 1)
1697                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
1698         }
1699
1700         /* ??? Without this setting Netgear GA302T PHY does not
1701          * ??? send/receive packets...
1702          */
1703         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5411 &&
1704             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
1705                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
1706                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1707                 udelay(80);
1708         }
1709
1710         tw32_f(MAC_MODE, tp->mac_mode);
1711         udelay(40);
1712
1713         if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
1714                 /* Polled via timer. */
1715                 tw32_f(MAC_EVENT, 0);
1716         } else {
1717                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
1718         }
1719         udelay(40);
1720
1721         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
1722             current_link_up == 1 &&
1723             tp->link_config.active_speed == SPEED_1000 &&
1724             ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ||
1725              (tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED))) {
1726                 udelay(120);
1727                 tw32_f(MAC_STATUS,
1728                      (MAC_STATUS_SYNC_CHANGED |
1729                       MAC_STATUS_CFG_CHANGED));
1730                 udelay(40);
1731                 tg3_write_mem(tp,
1732                               NIC_SRAM_FIRMWARE_MBOX,
1733                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
1734         }
1735
1736         if (current_link_up != netif_carrier_ok(tp->dev)) {
1737                 if (current_link_up)
1738                         netif_carrier_on(tp->dev);
1739                 else
1740                         netif_carrier_off(tp->dev);
1741                 tg3_link_report(tp);
1742         }
1743
1744         return 0;
1745 }
1746
1747 struct tg3_fiber_aneginfo {
1748         int state;
1749 #define ANEG_STATE_UNKNOWN              0
1750 #define ANEG_STATE_AN_ENABLE            1
1751 #define ANEG_STATE_RESTART_INIT         2
1752 #define ANEG_STATE_RESTART              3
1753 #define ANEG_STATE_DISABLE_LINK_OK      4
1754 #define ANEG_STATE_ABILITY_DETECT_INIT  5
1755 #define ANEG_STATE_ABILITY_DETECT       6
1756 #define ANEG_STATE_ACK_DETECT_INIT      7
1757 #define ANEG_STATE_ACK_DETECT           8
1758 #define ANEG_STATE_COMPLETE_ACK_INIT    9
1759 #define ANEG_STATE_COMPLETE_ACK         10
1760 #define ANEG_STATE_IDLE_DETECT_INIT     11
1761 #define ANEG_STATE_IDLE_DETECT          12
1762 #define ANEG_STATE_LINK_OK              13
1763 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
1764 #define ANEG_STATE_NEXT_PAGE_WAIT       15
1765
1766         u32 flags;
1767 #define MR_AN_ENABLE            0x00000001
1768 #define MR_RESTART_AN           0x00000002
1769 #define MR_AN_COMPLETE          0x00000004
1770 #define MR_PAGE_RX              0x00000008
1771 #define MR_NP_LOADED            0x00000010
1772 #define MR_TOGGLE_TX            0x00000020
1773 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
1774 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
1775 #define MR_LP_ADV_SYM_PAUSE     0x00000100
1776 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
1777 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
1778 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
1779 #define MR_LP_ADV_NEXT_PAGE     0x00001000
1780 #define MR_TOGGLE_RX            0x00002000
1781 #define MR_NP_RX                0x00004000
1782
1783 #define MR_LINK_OK              0x80000000
1784
1785         unsigned long link_time, cur_time;
1786
1787         u32 ability_match_cfg;
1788         int ability_match_count;
1789
1790         char ability_match, idle_match, ack_match;
1791
1792         u32 txconfig, rxconfig;
1793 #define ANEG_CFG_NP             0x00000080
1794 #define ANEG_CFG_ACK            0x00000040
1795 #define ANEG_CFG_RF2            0x00000020
1796 #define ANEG_CFG_RF1            0x00000010
1797 #define ANEG_CFG_PS2            0x00000001
1798 #define ANEG_CFG_PS1            0x00008000
1799 #define ANEG_CFG_HD             0x00004000
1800 #define ANEG_CFG_FD             0x00002000
1801 #define ANEG_CFG_INVAL          0x00001f06
1802
1803 };
1804 #define ANEG_OK         0
1805 #define ANEG_DONE       1
1806 #define ANEG_TIMER_ENAB 2
1807 #define ANEG_FAILED     -1
1808
1809 #define ANEG_STATE_SETTLE_TIME  10000
1810
1811 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
1812                                    struct tg3_fiber_aneginfo *ap)
1813 {
1814         unsigned long delta;
1815         u32 rx_cfg_reg;
1816         int ret;
1817
1818         if (ap->state == ANEG_STATE_UNKNOWN) {
1819                 ap->rxconfig = 0;
1820                 ap->link_time = 0;
1821                 ap->cur_time = 0;
1822                 ap->ability_match_cfg = 0;
1823                 ap->ability_match_count = 0;
1824                 ap->ability_match = 0;
1825                 ap->idle_match = 0;
1826                 ap->ack_match = 0;
1827         }
1828         ap->cur_time++;
1829
1830         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
1831                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
1832
1833                 if (rx_cfg_reg != ap->ability_match_cfg) {
1834                         ap->ability_match_cfg = rx_cfg_reg;
1835                         ap->ability_match = 0;
1836                         ap->ability_match_count = 0;
1837                 } else {
1838                         if (++ap->ability_match_count > 1) {
1839                                 ap->ability_match = 1;
1840                                 ap->ability_match_cfg = rx_cfg_reg;
1841                         }
1842                 }
1843                 if (rx_cfg_reg & ANEG_CFG_ACK)
1844                         ap->ack_match = 1;
1845                 else
1846                         ap->ack_match = 0;
1847
1848                 ap->idle_match = 0;
1849         } else {
1850                 ap->idle_match = 1;
1851                 ap->ability_match_cfg = 0;
1852                 ap->ability_match_count = 0;
1853                 ap->ability_match = 0;
1854                 ap->ack_match = 0;
1855
1856                 rx_cfg_reg = 0;
1857         }
1858
1859         ap->rxconfig = rx_cfg_reg;
1860         ret = ANEG_OK;
1861
1862         switch(ap->state) {
1863         case ANEG_STATE_UNKNOWN:
1864                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
1865                         ap->state = ANEG_STATE_AN_ENABLE;
1866
1867                 /* fallthru */
1868         case ANEG_STATE_AN_ENABLE:
1869                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
1870                 if (ap->flags & MR_AN_ENABLE) {
1871                         ap->link_time = 0;
1872                         ap->cur_time = 0;
1873                         ap->ability_match_cfg = 0;
1874                         ap->ability_match_count = 0;
1875                         ap->ability_match = 0;
1876                         ap->idle_match = 0;
1877                         ap->ack_match = 0;
1878
1879                         ap->state = ANEG_STATE_RESTART_INIT;
1880                 } else {
1881                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
1882                 }
1883                 break;
1884
1885         case ANEG_STATE_RESTART_INIT:
1886                 ap->link_time = ap->cur_time;
1887                 ap->flags &= ~(MR_NP_LOADED);
1888                 ap->txconfig = 0;
1889                 tw32(MAC_TX_AUTO_NEG, 0);
1890                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1891                 tw32_f(MAC_MODE, tp->mac_mode);
1892                 udelay(40);
1893
1894                 ret = ANEG_TIMER_ENAB;
1895                 ap->state = ANEG_STATE_RESTART;
1896
1897                 /* fallthru */
1898         case ANEG_STATE_RESTART:
1899                 delta = ap->cur_time - ap->link_time;
1900                 if (delta > ANEG_STATE_SETTLE_TIME) {
1901                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
1902                 } else {
1903                         ret = ANEG_TIMER_ENAB;
1904                 }
1905                 break;
1906
1907         case ANEG_STATE_DISABLE_LINK_OK:
1908                 ret = ANEG_DONE;
1909                 break;
1910
1911         case ANEG_STATE_ABILITY_DETECT_INIT:
1912                 ap->flags &= ~(MR_TOGGLE_TX);
1913                 ap->txconfig = (ANEG_CFG_FD | ANEG_CFG_PS1);
1914                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1915                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1916                 tw32_f(MAC_MODE, tp->mac_mode);
1917                 udelay(40);
1918
1919                 ap->state = ANEG_STATE_ABILITY_DETECT;
1920                 break;
1921
1922         case ANEG_STATE_ABILITY_DETECT:
1923                 if (ap->ability_match != 0 && ap->rxconfig != 0) {
1924                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
1925                 }
1926                 break;
1927
1928         case ANEG_STATE_ACK_DETECT_INIT:
1929                 ap->txconfig |= ANEG_CFG_ACK;
1930                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
1931                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
1932                 tw32_f(MAC_MODE, tp->mac_mode);
1933                 udelay(40);
1934
1935                 ap->state = ANEG_STATE_ACK_DETECT;
1936
1937                 /* fallthru */
1938         case ANEG_STATE_ACK_DETECT:
1939                 if (ap->ack_match != 0) {
1940                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
1941                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
1942                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
1943                         } else {
1944                                 ap->state = ANEG_STATE_AN_ENABLE;
1945                         }
1946                 } else if (ap->ability_match != 0 &&
1947                            ap->rxconfig == 0) {
1948                         ap->state = ANEG_STATE_AN_ENABLE;
1949                 }
1950                 break;
1951
1952         case ANEG_STATE_COMPLETE_ACK_INIT:
1953                 if (ap->rxconfig & ANEG_CFG_INVAL) {
1954                         ret = ANEG_FAILED;
1955                         break;
1956                 }
1957                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
1958                                MR_LP_ADV_HALF_DUPLEX |
1959                                MR_LP_ADV_SYM_PAUSE |
1960                                MR_LP_ADV_ASYM_PAUSE |
1961                                MR_LP_ADV_REMOTE_FAULT1 |
1962                                MR_LP_ADV_REMOTE_FAULT2 |
1963                                MR_LP_ADV_NEXT_PAGE |
1964                                MR_TOGGLE_RX |
1965                                MR_NP_RX);
1966                 if (ap->rxconfig & ANEG_CFG_FD)
1967                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
1968                 if (ap->rxconfig & ANEG_CFG_HD)
1969                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
1970                 if (ap->rxconfig & ANEG_CFG_PS1)
1971                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
1972                 if (ap->rxconfig & ANEG_CFG_PS2)
1973                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
1974                 if (ap->rxconfig & ANEG_CFG_RF1)
1975                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
1976                 if (ap->rxconfig & ANEG_CFG_RF2)
1977                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
1978                 if (ap->rxconfig & ANEG_CFG_NP)
1979                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
1980
1981                 ap->link_time = ap->cur_time;
1982
1983                 ap->flags ^= (MR_TOGGLE_TX);
1984                 if (ap->rxconfig & 0x0008)
1985                         ap->flags |= MR_TOGGLE_RX;
1986                 if (ap->rxconfig & ANEG_CFG_NP)
1987                         ap->flags |= MR_NP_RX;
1988                 ap->flags |= MR_PAGE_RX;
1989
1990                 ap->state = ANEG_STATE_COMPLETE_ACK;
1991                 ret = ANEG_TIMER_ENAB;
1992                 break;
1993
1994         case ANEG_STATE_COMPLETE_ACK:
1995                 if (ap->ability_match != 0 &&
1996                     ap->rxconfig == 0) {
1997                         ap->state = ANEG_STATE_AN_ENABLE;
1998                         break;
1999                 }
2000                 delta = ap->cur_time - ap->link_time;
2001                 if (delta > ANEG_STATE_SETTLE_TIME) {
2002                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
2003                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2004                         } else {
2005                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
2006                                     !(ap->flags & MR_NP_RX)) {
2007                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
2008                                 } else {
2009                                         ret = ANEG_FAILED;
2010                                 }
2011                         }
2012                 }
2013                 break;
2014
2015         case ANEG_STATE_IDLE_DETECT_INIT:
2016                 ap->link_time = ap->cur_time;
2017                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2018                 tw32_f(MAC_MODE, tp->mac_mode);
2019                 udelay(40);
2020
2021                 ap->state = ANEG_STATE_IDLE_DETECT;
2022                 ret = ANEG_TIMER_ENAB;
2023                 break;
2024
2025         case ANEG_STATE_IDLE_DETECT:
2026                 if (ap->ability_match != 0 &&
2027                     ap->rxconfig == 0) {
2028                         ap->state = ANEG_STATE_AN_ENABLE;
2029                         break;
2030                 }
2031                 delta = ap->cur_time - ap->link_time;
2032                 if (delta > ANEG_STATE_SETTLE_TIME) {
2033                         /* XXX another gem from the Broadcom driver :( */
2034                         ap->state = ANEG_STATE_LINK_OK;
2035                 }
2036                 break;
2037
2038         case ANEG_STATE_LINK_OK:
2039                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
2040                 ret = ANEG_DONE;
2041                 break;
2042
2043         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
2044                 /* ??? unimplemented */
2045                 break;
2046
2047         case ANEG_STATE_NEXT_PAGE_WAIT:
2048                 /* ??? unimplemented */
2049                 break;
2050
2051         default:
2052                 ret = ANEG_FAILED;
2053                 break;
2054         };
2055
2056         return ret;
2057 }
2058
2059 static int fiber_autoneg(struct tg3 *tp, u32 *flags)
2060 {
2061         int res = 0;
2062         struct tg3_fiber_aneginfo aninfo;
2063         int status = ANEG_FAILED;
2064         unsigned int tick;
2065         u32 tmp;
2066
2067         tw32_f(MAC_TX_AUTO_NEG, 0);
2068
2069         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
2070         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
2071         udelay(40);
2072
2073         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
2074         udelay(40);
2075
2076         memset(&aninfo, 0, sizeof(aninfo));
2077         aninfo.flags |= MR_AN_ENABLE;
2078         aninfo.state = ANEG_STATE_UNKNOWN;
2079         aninfo.cur_time = 0;
2080         tick = 0;
2081         while (++tick < 195000) {
2082                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
2083                 if (status == ANEG_DONE || status == ANEG_FAILED)
2084                         break;
2085
2086                 udelay(1);
2087         }
2088
2089         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
2090         tw32_f(MAC_MODE, tp->mac_mode);
2091         udelay(40);
2092
2093         *flags = aninfo.flags;
2094
2095         if (status == ANEG_DONE &&
2096             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
2097                              MR_LP_ADV_FULL_DUPLEX)))
2098                 res = 1;
2099
2100         return res;
2101 }
2102
2103 static void tg3_init_bcm8002(struct tg3 *tp)
2104 {
2105         u32 mac_status = tr32(MAC_STATUS);
2106         int i;
2107
2108         /* Reset when initting first time or we have a link. */
2109         if ((tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) &&
2110             !(mac_status & MAC_STATUS_PCS_SYNCED))
2111                 return;
2112
2113         /* Set PLL lock range. */
2114         tg3_writephy(tp, 0x16, 0x8007);
2115
2116         /* SW reset */
2117         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
2118
2119         /* Wait for reset to complete. */
2120         /* XXX schedule_timeout() ... */
2121         for (i = 0; i < 500; i++)
2122                 udelay(10);
2123
2124         /* Config mode; select PMA/Ch 1 regs. */
2125         tg3_writephy(tp, 0x10, 0x8411);
2126
2127         /* Enable auto-lock and comdet, select txclk for tx. */
2128         tg3_writephy(tp, 0x11, 0x0a10);
2129
2130         tg3_writephy(tp, 0x18, 0x00a0);
2131         tg3_writephy(tp, 0x16, 0x41ff);
2132
2133         /* Assert and deassert POR. */
2134         tg3_writephy(tp, 0x13, 0x0400);
2135         udelay(40);
2136         tg3_writephy(tp, 0x13, 0x0000);
2137
2138         tg3_writephy(tp, 0x11, 0x0a50);
2139         udelay(40);
2140         tg3_writephy(tp, 0x11, 0x0a10);
2141
2142         /* Wait for signal to stabilize */
2143         /* XXX schedule_timeout() ... */
2144         for (i = 0; i < 15000; i++)
2145                 udelay(10);
2146
2147         /* Deselect the channel register so we can read the PHYID
2148          * later.
2149          */
2150         tg3_writephy(tp, 0x10, 0x8011);
2151 }
2152
2153 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
2154 {
2155         u32 sg_dig_ctrl, sg_dig_status;
2156         u32 serdes_cfg, expected_sg_dig_ctrl;
2157         int workaround, port_a;
2158         int current_link_up;
2159
2160         serdes_cfg = 0;
2161         expected_sg_dig_ctrl = 0;
2162         workaround = 0;
2163         port_a = 1;
2164         current_link_up = 0;
2165
2166         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
2167             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
2168                 workaround = 1;
2169                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
2170                         port_a = 0;
2171
2172                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
2173                 /* preserve bits 20-23 for voltage regulator */
2174                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
2175         }
2176
2177         sg_dig_ctrl = tr32(SG_DIG_CTRL);
2178
2179         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
2180                 if (sg_dig_ctrl & (1 << 31)) {
2181                         if (workaround) {
2182                                 u32 val = serdes_cfg;
2183
2184                                 if (port_a)
2185                                         val |= 0xc010000;
2186                                 else
2187                                         val |= 0x4010000;
2188                                 tw32_f(MAC_SERDES_CFG, val);
2189                         }
2190                         tw32_f(SG_DIG_CTRL, 0x01388400);
2191                 }
2192                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
2193                         tg3_setup_flow_control(tp, 0, 0);
2194                         current_link_up = 1;
2195                 }
2196                 goto out;
2197         }
2198
2199         /* Want auto-negotiation.  */
2200         expected_sg_dig_ctrl = 0x81388400;
2201
2202         /* Pause capability */
2203         expected_sg_dig_ctrl |= (1 << 11);
2204
2205         /* Asymettric pause */
2206         expected_sg_dig_ctrl |= (1 << 12);
2207
2208         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
2209                 if (workaround)
2210                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
2211                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | (1 << 30));
2212                 udelay(5);
2213                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
2214
2215                 tp->tg3_flags2 |= TG3_FLG2_PHY_JUST_INITTED;
2216         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
2217                                  MAC_STATUS_SIGNAL_DET)) {
2218                 int i;
2219
2220                 /* Giver time to negotiate (~200ms) */
2221                 for (i = 0; i < 40000; i++) {
2222                         sg_dig_status = tr32(SG_DIG_STATUS);
2223                         if (sg_dig_status & (0x3))
2224                                 break;
2225                         udelay(5);
2226                 }
2227                 mac_status = tr32(MAC_STATUS);
2228
2229                 if ((sg_dig_status & (1 << 1)) &&
2230                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
2231                         u32 local_adv, remote_adv;
2232
2233                         local_adv = ADVERTISE_PAUSE_CAP;
2234                         remote_adv = 0;
2235                         if (sg_dig_status & (1 << 19))
2236                                 remote_adv |= LPA_PAUSE_CAP;
2237                         if (sg_dig_status & (1 << 20))
2238                                 remote_adv |= LPA_PAUSE_ASYM;
2239
2240                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2241                         current_link_up = 1;
2242                         tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2243                 } else if (!(sg_dig_status & (1 << 1))) {
2244                         if (tp->tg3_flags2 & TG3_FLG2_PHY_JUST_INITTED)
2245                                 tp->tg3_flags2 &= ~TG3_FLG2_PHY_JUST_INITTED;
2246                         else {
2247                                 if (workaround) {
2248                                         u32 val = serdes_cfg;
2249
2250                                         if (port_a)
2251                                                 val |= 0xc010000;
2252                                         else
2253                                                 val |= 0x4010000;
2254
2255                                         tw32_f(MAC_SERDES_CFG, val);
2256                                 }
2257
2258                                 tw32_f(SG_DIG_CTRL, 0x01388400);
2259                                 udelay(40);
2260
2261                                 /* Link parallel detection - link is up */
2262                                 /* only if we have PCS_SYNC and not */
2263                                 /* receiving config code words */
2264                                 mac_status = tr32(MAC_STATUS);
2265                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
2266                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
2267                                         tg3_setup_flow_control(tp, 0, 0);
2268                                         current_link_up = 1;
2269                                 }
2270                         }
2271                 }
2272         }
2273
2274 out:
2275         return current_link_up;
2276 }
2277
2278 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
2279 {
2280         int current_link_up = 0;
2281
2282         if (!(mac_status & MAC_STATUS_PCS_SYNCED)) {
2283                 tp->tg3_flags &= ~TG3_FLAG_GOT_SERDES_FLOWCTL;
2284                 goto out;
2285         }
2286
2287         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2288                 u32 flags;
2289                 int i;
2290   
2291                 if (fiber_autoneg(tp, &flags)) {
2292                         u32 local_adv, remote_adv;
2293
2294                         local_adv = ADVERTISE_PAUSE_CAP;
2295                         remote_adv = 0;
2296                         if (flags & MR_LP_ADV_SYM_PAUSE)
2297                                 remote_adv |= LPA_PAUSE_CAP;
2298                         if (flags & MR_LP_ADV_ASYM_PAUSE)
2299                                 remote_adv |= LPA_PAUSE_ASYM;
2300
2301                         tg3_setup_flow_control(tp, local_adv, remote_adv);
2302
2303                         tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2304                         current_link_up = 1;
2305                 }
2306                 for (i = 0; i < 30; i++) {
2307                         udelay(20);
2308                         tw32_f(MAC_STATUS,
2309                                (MAC_STATUS_SYNC_CHANGED |
2310                                 MAC_STATUS_CFG_CHANGED));
2311                         udelay(40);
2312                         if ((tr32(MAC_STATUS) &
2313                              (MAC_STATUS_SYNC_CHANGED |
2314                               MAC_STATUS_CFG_CHANGED)) == 0)
2315                                 break;
2316                 }
2317
2318                 mac_status = tr32(MAC_STATUS);
2319                 if (current_link_up == 0 &&
2320                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
2321                     !(mac_status & MAC_STATUS_RCVD_CFG))
2322                         current_link_up = 1;
2323         } else {
2324                 /* Forcing 1000FD link up. */
2325                 current_link_up = 1;
2326                 tp->tg3_flags |= TG3_FLAG_GOT_SERDES_FLOWCTL;
2327
2328                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
2329                 udelay(40);
2330         }
2331
2332 out:
2333         return current_link_up;
2334 }
2335
2336 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
2337 {
2338         u32 orig_pause_cfg;
2339         u16 orig_active_speed;
2340         u8 orig_active_duplex;
2341         u32 mac_status;
2342         int current_link_up;
2343         int i;
2344
2345         orig_pause_cfg =
2346                 (tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2347                                   TG3_FLAG_TX_PAUSE));
2348         orig_active_speed = tp->link_config.active_speed;
2349         orig_active_duplex = tp->link_config.active_duplex;
2350
2351         if (!(tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG) &&
2352             netif_carrier_ok(tp->dev) &&
2353             (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)) {
2354                 mac_status = tr32(MAC_STATUS);
2355                 mac_status &= (MAC_STATUS_PCS_SYNCED |
2356                                MAC_STATUS_SIGNAL_DET |
2357                                MAC_STATUS_CFG_CHANGED |
2358                                MAC_STATUS_RCVD_CFG);
2359                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
2360                                    MAC_STATUS_SIGNAL_DET)) {
2361                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2362                                             MAC_STATUS_CFG_CHANGED));
2363                         return 0;
2364                 }
2365         }
2366
2367         tw32_f(MAC_TX_AUTO_NEG, 0);
2368
2369         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
2370         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
2371         tw32_f(MAC_MODE, tp->mac_mode);
2372         udelay(40);
2373
2374         if (tp->phy_id == PHY_ID_BCM8002)
2375                 tg3_init_bcm8002(tp);
2376
2377         /* Enable link change event even when serdes polling.  */
2378         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
2379         udelay(40);
2380
2381         current_link_up = 0;
2382         mac_status = tr32(MAC_STATUS);
2383
2384         if (tp->tg3_flags2 & TG3_FLG2_HW_AUTONEG)
2385                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
2386         else
2387                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
2388
2389         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
2390         tw32_f(MAC_MODE, tp->mac_mode);
2391         udelay(40);
2392
2393         tp->hw_status->status =
2394                 (SD_STATUS_UPDATED |
2395                  (tp->hw_status->status & ~SD_STATUS_LINK_CHG));
2396
2397         for (i = 0; i < 100; i++) {
2398                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
2399                                     MAC_STATUS_CFG_CHANGED));
2400                 udelay(5);
2401                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
2402                                          MAC_STATUS_CFG_CHANGED)) == 0)
2403                         break;
2404         }
2405
2406         mac_status = tr32(MAC_STATUS);
2407         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
2408                 current_link_up = 0;
2409                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
2410                         tw32_f(MAC_MODE, (tp->mac_mode |
2411                                           MAC_MODE_SEND_CONFIGS));
2412                         udelay(1);
2413                         tw32_f(MAC_MODE, tp->mac_mode);
2414                 }
2415         }
2416
2417         if (current_link_up == 1) {
2418                 tp->link_config.active_speed = SPEED_1000;
2419                 tp->link_config.active_duplex = DUPLEX_FULL;
2420                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2421                                     LED_CTRL_LNKLED_OVERRIDE |
2422                                     LED_CTRL_1000MBPS_ON));
2423         } else {
2424                 tp->link_config.active_speed = SPEED_INVALID;
2425                 tp->link_config.active_duplex = DUPLEX_INVALID;
2426                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
2427                                     LED_CTRL_LNKLED_OVERRIDE |
2428                                     LED_CTRL_TRAFFIC_OVERRIDE));
2429         }
2430
2431         if (current_link_up != netif_carrier_ok(tp->dev)) {
2432                 if (current_link_up)
2433                         netif_carrier_on(tp->dev);
2434                 else
2435                         netif_carrier_off(tp->dev);
2436                 tg3_link_report(tp);
2437         } else {
2438                 u32 now_pause_cfg =
2439                         tp->tg3_flags & (TG3_FLAG_RX_PAUSE |
2440                                          TG3_FLAG_TX_PAUSE);
2441                 if (orig_pause_cfg != now_pause_cfg ||
2442                     orig_active_speed != tp->link_config.active_speed ||
2443                     orig_active_duplex != tp->link_config.active_duplex)
2444                         tg3_link_report(tp);
2445         }
2446
2447         return 0;
2448 }
2449
2450 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
2451 {
2452         int err;
2453
2454         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
2455                 err = tg3_setup_fiber_phy(tp, force_reset);
2456         } else {
2457                 err = tg3_setup_copper_phy(tp, force_reset);
2458         }
2459
2460         if (tp->link_config.active_speed == SPEED_1000 &&
2461             tp->link_config.active_duplex == DUPLEX_HALF)
2462                 tw32(MAC_TX_LENGTHS,
2463                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2464                       (6 << TX_LENGTHS_IPG_SHIFT) |
2465                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
2466         else
2467                 tw32(MAC_TX_LENGTHS,
2468                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
2469                       (6 << TX_LENGTHS_IPG_SHIFT) |
2470                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
2471
2472         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
2473                 if (netif_carrier_ok(tp->dev)) {
2474                         tw32(HOSTCC_STAT_COAL_TICKS,
2475                              DEFAULT_STAT_COAL_TICKS);
2476                 } else {
2477                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
2478                 }
2479         }
2480
2481         return err;
2482 }
2483
2484 /* Tigon3 never reports partial packet sends.  So we do not
2485  * need special logic to handle SKBs that have not had all
2486  * of their frags sent yet, like SunGEM does.
2487  */
2488 static void tg3_tx(struct tg3 *tp)
2489 {
2490         u32 hw_idx = tp->hw_status->idx[0].tx_consumer;
2491         u32 sw_idx = tp->tx_cons;
2492
2493         while (sw_idx != hw_idx) {
2494                 struct tx_ring_info *ri = &tp->tx_buffers[sw_idx];
2495                 struct sk_buff *skb = ri->skb;
2496                 int i;
2497
2498                 if (unlikely(skb == NULL))
2499                         BUG();
2500
2501                 pci_unmap_single(tp->pdev,
2502                                  pci_unmap_addr(ri, mapping),
2503                                  skb_headlen(skb),
2504                                  PCI_DMA_TODEVICE);
2505
2506                 ri->skb = NULL;
2507
2508                 sw_idx = NEXT_TX(sw_idx);
2509
2510                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
2511                         if (unlikely(sw_idx == hw_idx))
2512                                 BUG();
2513
2514                         ri = &tp->tx_buffers[sw_idx];
2515                         if (unlikely(ri->skb != NULL))
2516                                 BUG();
2517
2518                         pci_unmap_page(tp->pdev,
2519                                        pci_unmap_addr(ri, mapping),
2520                                        skb_shinfo(skb)->frags[i].size,
2521                                        PCI_DMA_TODEVICE);
2522
2523                         sw_idx = NEXT_TX(sw_idx);
2524                 }
2525
2526                 dev_kfree_skb_irq(skb);
2527         }
2528
2529         tp->tx_cons = sw_idx;
2530
2531         if (netif_queue_stopped(tp->dev) &&
2532             (TX_BUFFS_AVAIL(tp) > TG3_TX_WAKEUP_THRESH))
2533                 netif_wake_queue(tp->dev);
2534 }
2535
2536 /* Returns size of skb allocated or < 0 on error.
2537  *
2538  * We only need to fill in the address because the other members
2539  * of the RX descriptor are invariant, see tg3_init_rings.
2540  *
2541  * Note the purposeful assymetry of cpu vs. chip accesses.  For
2542  * posting buffers we only dirty the first cache line of the RX
2543  * descriptor (containing the address).  Whereas for the RX status
2544  * buffers the cpu only reads the last cacheline of the RX descriptor
2545  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
2546  */
2547 static int tg3_alloc_rx_skb(struct tg3 *tp, u32 opaque_key,
2548                             int src_idx, u32 dest_idx_unmasked)
2549 {
2550         struct tg3_rx_buffer_desc *desc;
2551         struct ring_info *map, *src_map;
2552         struct sk_buff *skb;
2553         dma_addr_t mapping;
2554         int skb_size, dest_idx;
2555
2556         src_map = NULL;
2557         switch (opaque_key) {
2558         case RXD_OPAQUE_RING_STD:
2559                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2560                 desc = &tp->rx_std[dest_idx];
2561                 map = &tp->rx_std_buffers[dest_idx];
2562                 if (src_idx >= 0)
2563                         src_map = &tp->rx_std_buffers[src_idx];
2564                 skb_size = RX_PKT_BUF_SZ;
2565                 break;
2566
2567         case RXD_OPAQUE_RING_JUMBO:
2568                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2569                 desc = &tp->rx_jumbo[dest_idx];
2570                 map = &tp->rx_jumbo_buffers[dest_idx];
2571                 if (src_idx >= 0)
2572                         src_map = &tp->rx_jumbo_buffers[src_idx];
2573                 skb_size = RX_JUMBO_PKT_BUF_SZ;
2574                 break;
2575
2576         default:
2577                 return -EINVAL;
2578         };
2579
2580         /* Do not overwrite any of the map or rp information
2581          * until we are sure we can commit to a new buffer.
2582          *
2583          * Callers depend upon this behavior and assume that
2584          * we leave everything unchanged if we fail.
2585          */
2586         skb = dev_alloc_skb(skb_size);
2587         if (skb == NULL)
2588                 return -ENOMEM;
2589
2590         skb->dev = tp->dev;
2591         skb_reserve(skb, tp->rx_offset);
2592
2593         mapping = pci_map_single(tp->pdev, skb->data,
2594                                  skb_size - tp->rx_offset,
2595                                  PCI_DMA_FROMDEVICE);
2596
2597         map->skb = skb;
2598         pci_unmap_addr_set(map, mapping, mapping);
2599
2600         if (src_map != NULL)
2601                 src_map->skb = NULL;
2602
2603         desc->addr_hi = ((u64)mapping >> 32);
2604         desc->addr_lo = ((u64)mapping & 0xffffffff);
2605
2606         return skb_size;
2607 }
2608
2609 /* We only need to move over in the address because the other
2610  * members of the RX descriptor are invariant.  See notes above
2611  * tg3_alloc_rx_skb for full details.
2612  */
2613 static void tg3_recycle_rx(struct tg3 *tp, u32 opaque_key,
2614                            int src_idx, u32 dest_idx_unmasked)
2615 {
2616         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
2617         struct ring_info *src_map, *dest_map;
2618         int dest_idx;
2619
2620         switch (opaque_key) {
2621         case RXD_OPAQUE_RING_STD:
2622                 dest_idx = dest_idx_unmasked % TG3_RX_RING_SIZE;
2623                 dest_desc = &tp->rx_std[dest_idx];
2624                 dest_map = &tp->rx_std_buffers[dest_idx];
2625                 src_desc = &tp->rx_std[src_idx];
2626                 src_map = &tp->rx_std_buffers[src_idx];
2627                 break;
2628
2629         case RXD_OPAQUE_RING_JUMBO:
2630                 dest_idx = dest_idx_unmasked % TG3_RX_JUMBO_RING_SIZE;
2631                 dest_desc = &tp->rx_jumbo[dest_idx];
2632                 dest_map = &tp->rx_jumbo_buffers[dest_idx];
2633                 src_desc = &tp->rx_jumbo[src_idx];
2634                 src_map = &tp->rx_jumbo_buffers[src_idx];
2635                 break;
2636
2637         default:
2638                 return;
2639         };
2640
2641         dest_map->skb = src_map->skb;
2642         pci_unmap_addr_set(dest_map, mapping,
2643                            pci_unmap_addr(src_map, mapping));
2644         dest_desc->addr_hi = src_desc->addr_hi;
2645         dest_desc->addr_lo = src_desc->addr_lo;
2646
2647         src_map->skb = NULL;
2648 }
2649
2650 #if TG3_VLAN_TAG_USED
2651 static int tg3_vlan_rx(struct tg3 *tp, struct sk_buff *skb, u16 vlan_tag)
2652 {
2653         return vlan_hwaccel_receive_skb(skb, tp->vlgrp, vlan_tag);
2654 }
2655 #endif
2656
2657 /* The RX ring scheme is composed of multiple rings which post fresh
2658  * buffers to the chip, and one special ring the chip uses to report
2659  * status back to the host.
2660  *
2661  * The special ring reports the status of received packets to the
2662  * host.  The chip does not write into the original descriptor the
2663  * RX buffer was obtained from.  The chip simply takes the original
2664  * descriptor as provided by the host, updates the status and length
2665  * field, then writes this into the next status ring entry.
2666  *
2667  * Each ring the host uses to post buffers to the chip is described
2668  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
2669  * it is first placed into the on-chip ram.  When the packet's length
2670  * is known, it walks down the TG3_BDINFO entries to select the ring.
2671  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
2672  * which is within the range of the new packet's length is chosen.
2673  *
2674  * The "separate ring for rx status" scheme may sound queer, but it makes
2675  * sense from a cache coherency perspective.  If only the host writes
2676  * to the buffer post rings, and only the chip writes to the rx status
2677  * rings, then cache lines never move beyond shared-modified state.
2678  * If both the host and chip were to write into the same ring, cache line
2679  * eviction could occur since both entities want it in an exclusive state.
2680  */
2681 static int tg3_rx(struct tg3 *tp, int budget)
2682 {
2683         u32 work_mask;
2684         u32 rx_rcb_ptr = tp->rx_rcb_ptr;
2685         u16 hw_idx, sw_idx;
2686         int received;
2687
2688         hw_idx = tp->hw_status->idx[0].rx_producer;
2689         /*
2690          * We need to order the read of hw_idx and the read of
2691          * the opaque cookie.
2692          */
2693         rmb();
2694         sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2695         work_mask = 0;
2696         received = 0;
2697         while (sw_idx != hw_idx && budget > 0) {
2698                 struct tg3_rx_buffer_desc *desc = &tp->rx_rcb[sw_idx];
2699                 unsigned int len;
2700                 struct sk_buff *skb;
2701                 dma_addr_t dma_addr;
2702                 u32 opaque_key, desc_idx, *post_ptr;
2703
2704                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
2705                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
2706                 if (opaque_key == RXD_OPAQUE_RING_STD) {
2707                         dma_addr = pci_unmap_addr(&tp->rx_std_buffers[desc_idx],
2708                                                   mapping);
2709                         skb = tp->rx_std_buffers[desc_idx].skb;
2710                         post_ptr = &tp->rx_std_ptr;
2711                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
2712                         dma_addr = pci_unmap_addr(&tp->rx_jumbo_buffers[desc_idx],
2713                                                   mapping);
2714                         skb = tp->rx_jumbo_buffers[desc_idx].skb;
2715                         post_ptr = &tp->rx_jumbo_ptr;
2716                 }
2717                 else {
2718                         goto next_pkt_nopost;
2719                 }
2720
2721                 work_mask |= opaque_key;
2722
2723                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
2724                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
2725                 drop_it:
2726                         tg3_recycle_rx(tp, opaque_key,
2727                                        desc_idx, *post_ptr);
2728                 drop_it_no_recycle:
2729                         /* Other statistics kept track of by card. */
2730                         tp->net_stats.rx_dropped++;
2731                         goto next_pkt;
2732                 }
2733
2734                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) - 4; /* omit crc */
2735
2736                 if (len > RX_COPY_THRESHOLD 
2737                         && tp->rx_offset == 2
2738                         /* rx_offset != 2 iff this is a 5701 card running
2739                          * in PCI-X mode [see tg3_get_invariants()] */
2740                 ) {
2741                         int skb_size;
2742
2743                         skb_size = tg3_alloc_rx_skb(tp, opaque_key,
2744                                                     desc_idx, *post_ptr);
2745                         if (skb_size < 0)
2746                                 goto drop_it;
2747
2748                         pci_unmap_single(tp->pdev, dma_addr,
2749                                          skb_size - tp->rx_offset,
2750                                          PCI_DMA_FROMDEVICE);
2751
2752                         skb_put(skb, len);
2753                 } else {
2754                         struct sk_buff *copy_skb;
2755
2756                         tg3_recycle_rx(tp, opaque_key,
2757                                        desc_idx, *post_ptr);
2758
2759                         copy_skb = dev_alloc_skb(len + 2);
2760                         if (copy_skb == NULL)
2761                                 goto drop_it_no_recycle;
2762
2763                         copy_skb->dev = tp->dev;
2764                         skb_reserve(copy_skb, 2);
2765                         skb_put(copy_skb, len);
2766                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2767                         memcpy(copy_skb->data, skb->data, len);
2768                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
2769
2770                         /* We'll reuse the original ring buffer. */
2771                         skb = copy_skb;
2772                 }
2773
2774                 if ((tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) &&
2775                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
2776                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
2777                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
2778                         skb->ip_summed = CHECKSUM_UNNECESSARY;
2779                 else
2780                         skb->ip_summed = CHECKSUM_NONE;
2781
2782                 skb->protocol = eth_type_trans(skb, tp->dev);
2783 #if TG3_VLAN_TAG_USED
2784                 if (tp->vlgrp != NULL &&
2785                     desc->type_flags & RXD_FLAG_VLAN) {
2786                         tg3_vlan_rx(tp, skb,
2787                                     desc->err_vlan & RXD_VLAN_MASK);
2788                 } else
2789 #endif
2790                         netif_receive_skb(skb);
2791
2792                 tp->dev->last_rx = jiffies;
2793                 received++;
2794                 budget--;
2795
2796 next_pkt:
2797                 (*post_ptr)++;
2798 next_pkt_nopost:
2799                 rx_rcb_ptr++;
2800                 sw_idx = rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp);
2801         }
2802
2803         /* ACK the status ring. */
2804         tp->rx_rcb_ptr = rx_rcb_ptr;
2805         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW,
2806                      (rx_rcb_ptr % TG3_RX_RCB_RING_SIZE(tp)));
2807
2808         /* Refill RX ring(s). */
2809         if (work_mask & RXD_OPAQUE_RING_STD) {
2810                 sw_idx = tp->rx_std_ptr % TG3_RX_RING_SIZE;
2811                 tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
2812                              sw_idx);
2813         }
2814         if (work_mask & RXD_OPAQUE_RING_JUMBO) {
2815                 sw_idx = tp->rx_jumbo_ptr % TG3_RX_JUMBO_RING_SIZE;
2816                 tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
2817                              sw_idx);
2818         }
2819         mmiowb();
2820
2821         return received;
2822 }
2823
2824 static int tg3_poll(struct net_device *netdev, int *budget)
2825 {
2826         struct tg3 *tp = netdev_priv(netdev);
2827         struct tg3_hw_status *sblk = tp->hw_status;
2828         unsigned long flags;
2829         int done;
2830
2831         spin_lock_irqsave(&tp->lock, flags);
2832
2833         /* handle link change and other phy events */
2834         if (!(tp->tg3_flags &
2835               (TG3_FLAG_USE_LINKCHG_REG |
2836                TG3_FLAG_POLL_SERDES))) {
2837                 if (sblk->status & SD_STATUS_LINK_CHG) {
2838                         sblk->status = SD_STATUS_UPDATED |
2839                                 (sblk->status & ~SD_STATUS_LINK_CHG);
2840                         tg3_setup_phy(tp, 0);
2841                 }
2842         }
2843
2844         /* run TX completion thread */
2845         if (sblk->idx[0].tx_consumer != tp->tx_cons) {
2846                 spin_lock(&tp->tx_lock);
2847                 tg3_tx(tp);
2848                 spin_unlock(&tp->tx_lock);
2849         }
2850
2851         spin_unlock_irqrestore(&tp->lock, flags);
2852
2853         /* run RX thread, within the bounds set by NAPI.
2854          * All RX "locking" is done by ensuring outside
2855          * code synchronizes with dev->poll()
2856          */
2857         done = 1;
2858         if (sblk->idx[0].rx_producer != tp->rx_rcb_ptr) {
2859                 int orig_budget = *budget;
2860                 int work_done;
2861
2862                 if (orig_budget > netdev->quota)
2863                         orig_budget = netdev->quota;
2864
2865                 work_done = tg3_rx(tp, orig_budget);
2866
2867                 *budget -= work_done;
2868                 netdev->quota -= work_done;
2869
2870                 if (work_done >= orig_budget)
2871                         done = 0;
2872         }
2873
2874         /* if no more work, tell net stack and NIC we're done */
2875         if (done) {
2876                 spin_lock_irqsave(&tp->lock, flags);
2877                 __netif_rx_complete(netdev);
2878                 tg3_restart_ints(tp);
2879                 spin_unlock_irqrestore(&tp->lock, flags);
2880         }
2881
2882         return (done ? 0 : 1);
2883 }
2884
2885 static inline unsigned int tg3_has_work(struct net_device *dev, struct tg3 *tp)
2886 {
2887         struct tg3_hw_status *sblk = tp->hw_status;
2888         unsigned int work_exists = 0;
2889
2890         /* check for phy events */
2891         if (!(tp->tg3_flags &
2892               (TG3_FLAG_USE_LINKCHG_REG |
2893                TG3_FLAG_POLL_SERDES))) {
2894                 if (sblk->status & SD_STATUS_LINK_CHG)
2895                         work_exists = 1;
2896         }
2897         /* check for RX/TX work to do */
2898         if (sblk->idx[0].tx_consumer != tp->tx_cons ||
2899             sblk->idx[0].rx_producer != tp->rx_rcb_ptr)
2900                 work_exists = 1;
2901
2902         return work_exists;
2903 }
2904
2905 static irqreturn_t tg3_interrupt(int irq, void *dev_id, struct pt_regs *regs)
2906 {
2907         struct net_device *dev = dev_id;
2908         struct tg3 *tp = netdev_priv(dev);
2909         struct tg3_hw_status *sblk = tp->hw_status;
2910         unsigned long flags;
2911         unsigned int handled = 1;
2912
2913         spin_lock_irqsave(&tp->lock, flags);
2914
2915         /* In INTx mode, it is possible for the interrupt to arrive at
2916          * the CPU before the status block posted prior to the interrupt.
2917          * Reading the PCI State register will confirm whether the
2918          * interrupt is ours and will flush the status block.
2919          */
2920         if ((sblk->status & SD_STATUS_UPDATED) ||
2921             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
2922                 /*
2923                  * writing any value to intr-mbox-0 clears PCI INTA# and
2924                  * chip-internal interrupt pending events.
2925                  * writing non-zero to intr-mbox-0 additional tells the
2926                  * NIC to stop sending us irqs, engaging "in-intr-handler"
2927                  * event coalescing.
2928                  */
2929                 tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2930                              0x00000001);
2931                 /*
2932                  * Flush PCI write.  This also guarantees that our
2933                  * status block has been flushed to host memory.
2934                  */
2935                 tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2936                 sblk->status &= ~SD_STATUS_UPDATED;
2937
2938                 if (likely(tg3_has_work(dev, tp)))
2939                         netif_rx_schedule(dev);         /* schedule NAPI poll */
2940                 else {
2941                         /* no work, shared interrupt perhaps?  re-enable
2942                          * interrupts, and flush that PCI write
2943                          */
2944                         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
2945                                 0x00000000);
2946                         tr32(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW);
2947                 }
2948         } else {        /* shared interrupt */
2949                 handled = 0;
2950         }
2951
2952         spin_unlock_irqrestore(&tp->lock, flags);
2953
2954         return IRQ_RETVAL(handled);
2955 }
2956
2957 static int tg3_init_hw(struct tg3 *);
2958 static int tg3_halt(struct tg3 *);
2959
2960 #ifdef CONFIG_NET_POLL_CONTROLLER
2961 static void tg3_poll_controller(struct net_device *dev)
2962 {
2963         tg3_interrupt(dev->irq, dev, NULL);
2964 }
2965 #endif
2966
2967 static void tg3_reset_task(void *_data)
2968 {
2969         struct tg3 *tp = _data;
2970         unsigned int restart_timer;
2971
2972         tg3_netif_stop(tp);
2973
2974         spin_lock_irq(&tp->lock);
2975         spin_lock(&tp->tx_lock);
2976
2977         restart_timer = tp->tg3_flags2 & TG3_FLG2_RESTART_TIMER;
2978         tp->tg3_flags2 &= ~TG3_FLG2_RESTART_TIMER;
2979
2980         tg3_halt(tp);
2981         tg3_init_hw(tp);
2982
2983         tg3_netif_start(tp);
2984
2985         spin_unlock(&tp->tx_lock);
2986         spin_unlock_irq(&tp->lock);
2987
2988         if (restart_timer)
2989                 mod_timer(&tp->timer, jiffies + 1);
2990 }
2991
2992 static void tg3_tx_timeout(struct net_device *dev)
2993 {
2994         struct tg3 *tp = netdev_priv(dev);
2995
2996         printk(KERN_ERR PFX "%s: transmit timed out, resetting\n",
2997                dev->name);
2998
2999         schedule_work(&tp->reset_task);
3000 }
3001
3002 static void tg3_set_txd(struct tg3 *, int, dma_addr_t, int, u32, u32);
3003
3004 static int tigon3_4gb_hwbug_workaround(struct tg3 *tp, struct sk_buff *skb,
3005                                        u32 guilty_entry, int guilty_len,
3006                                        u32 last_plus_one, u32 *start, u32 mss)
3007 {
3008         struct sk_buff *new_skb = skb_copy(skb, GFP_ATOMIC);
3009         dma_addr_t new_addr;
3010         u32 entry = *start;
3011         int i;
3012
3013         if (!new_skb) {
3014                 dev_kfree_skb(skb);
3015                 return -1;
3016         }
3017
3018         /* New SKB is guaranteed to be linear. */
3019         entry = *start;
3020         new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
3021                                   PCI_DMA_TODEVICE);
3022         tg3_set_txd(tp, entry, new_addr, new_skb->len,
3023                     (skb->ip_summed == CHECKSUM_HW) ?
3024                     TXD_FLAG_TCPUDP_CSUM : 0, 1 | (mss << 1));
3025         *start = NEXT_TX(entry);
3026
3027         /* Now clean up the sw ring entries. */
3028         i = 0;
3029         while (entry != last_plus_one) {
3030                 int len;
3031
3032                 if (i == 0)
3033                         len = skb_headlen(skb);
3034                 else
3035                         len = skb_shinfo(skb)->frags[i-1].size;
3036                 pci_unmap_single(tp->pdev,
3037                                  pci_unmap_addr(&tp->tx_buffers[entry], mapping),
3038                                  len, PCI_DMA_TODEVICE);
3039                 if (i == 0) {
3040                         tp->tx_buffers[entry].skb = new_skb;
3041                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, new_addr);
3042                 } else {
3043                         tp->tx_buffers[entry].skb = NULL;
3044                 }
3045                 entry = NEXT_TX(entry);
3046                 i++;
3047         }
3048
3049         dev_kfree_skb(skb);
3050
3051         return 0;
3052 }
3053
3054 static void tg3_set_txd(struct tg3 *tp, int entry,
3055                         dma_addr_t mapping, int len, u32 flags,
3056                         u32 mss_and_is_end)
3057 {
3058         struct tg3_tx_buffer_desc *txd = &tp->tx_ring[entry];
3059         int is_end = (mss_and_is_end & 0x1);
3060         u32 mss = (mss_and_is_end >> 1);
3061         u32 vlan_tag = 0;
3062
3063         if (is_end)
3064                 flags |= TXD_FLAG_END;
3065         if (flags & TXD_FLAG_VLAN) {
3066                 vlan_tag = flags >> 16;
3067                 flags &= 0xffff;
3068         }
3069         vlan_tag |= (mss << TXD_MSS_SHIFT);
3070
3071         txd->addr_hi = ((u64) mapping >> 32);
3072         txd->addr_lo = ((u64) mapping & 0xffffffff);
3073         txd->len_flags = (len << TXD_LEN_SHIFT) | flags;
3074         txd->vlan_tag = vlan_tag << TXD_VLAN_TAG_SHIFT;
3075 }
3076
3077 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
3078 {
3079         u32 base = (u32) mapping & 0xffffffff;
3080
3081         return ((base > 0xffffdcc0) &&
3082                 (base + len + 8 < base));
3083 }
3084
3085 static int tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
3086 {
3087         struct tg3 *tp = netdev_priv(dev);
3088         dma_addr_t mapping;
3089         unsigned int i;
3090         u32 len, entry, base_flags, mss;
3091         int would_hit_hwbug;
3092         unsigned long flags;
3093
3094         len = skb_headlen(skb);
3095
3096         /* No BH disabling for tx_lock here.  We are running in BH disabled
3097          * context and TX reclaim runs via tp->poll inside of a software
3098          * interrupt.  Rejoice!
3099          *
3100          * Actually, things are not so simple.  If we are to take a hw
3101          * IRQ here, we can deadlock, consider:
3102          *
3103          *       CPU1           CPU2
3104          *   tg3_start_xmit
3105          *   take tp->tx_lock
3106          *                      tg3_timer
3107          *                      take tp->lock
3108          *   tg3_interrupt
3109          *   spin on tp->lock
3110          *                      spin on tp->tx_lock
3111          *
3112          * So we really do need to disable interrupts when taking
3113          * tx_lock here.
3114          */
3115         local_irq_save(flags);
3116         if (!spin_trylock(&tp->tx_lock)) { 
3117                 local_irq_restore(flags);
3118                 return NETDEV_TX_LOCKED; 
3119         } 
3120
3121         /* This is a hard error, log it. */
3122         if (unlikely(TX_BUFFS_AVAIL(tp) <= (skb_shinfo(skb)->nr_frags + 1))) {
3123                 netif_stop_queue(dev);
3124                 spin_unlock_irqrestore(&tp->tx_lock, flags);
3125                 printk(KERN_ERR PFX "%s: BUG! Tx Ring full when queue awake!\n",
3126                        dev->name);
3127                 return NETDEV_TX_BUSY;
3128         }
3129
3130         entry = tp->tx_prod;
3131         base_flags = 0;
3132         if (skb->ip_summed == CHECKSUM_HW)
3133                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
3134 #if TG3_TSO_SUPPORT != 0
3135         mss = 0;
3136         if (skb->len > (tp->dev->mtu + ETH_HLEN) &&
3137             (mss = skb_shinfo(skb)->tso_size) != 0) {
3138                 int tcp_opt_len, ip_tcp_len;
3139
3140                 if (skb_header_cloned(skb) &&
3141                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC)) {
3142                         dev_kfree_skb(skb);
3143                         goto out_unlock;
3144                 }
3145
3146                 tcp_opt_len = ((skb->h.th->doff - 5) * 4);
3147                 ip_tcp_len = (skb->nh.iph->ihl * 4) + sizeof(struct tcphdr);
3148
3149                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
3150                                TXD_FLAG_CPU_POST_DMA);
3151
3152                 skb->nh.iph->check = 0;
3153                 skb->nh.iph->tot_len = ntohs(mss + ip_tcp_len + tcp_opt_len);
3154                 if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
3155                         skb->h.th->check = 0;
3156                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
3157                 }
3158                 else {
3159                         skb->h.th->check =
3160                                 ~csum_tcpudp_magic(skb->nh.iph->saddr,
3161                                                    skb->nh.iph->daddr,
3162                                                    0, IPPROTO_TCP, 0);
3163                 }
3164
3165                 if ((tp->tg3_flags2 & TG3_FLG2_HW_TSO) ||
3166                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)) {
3167                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3168                                 int tsflags;
3169
3170                                 tsflags = ((skb->nh.iph->ihl - 5) +
3171                                            (tcp_opt_len >> 2));
3172                                 mss |= (tsflags << 11);
3173                         }
3174                 } else {
3175                         if (tcp_opt_len || skb->nh.iph->ihl > 5) {
3176                                 int tsflags;
3177
3178                                 tsflags = ((skb->nh.iph->ihl - 5) +
3179                                            (tcp_opt_len >> 2));
3180                                 base_flags |= tsflags << 12;
3181                         }
3182                 }
3183         }
3184 #else
3185         mss = 0;
3186 #endif
3187 #if TG3_VLAN_TAG_USED
3188         if (tp->vlgrp != NULL && vlan_tx_tag_present(skb))
3189                 base_flags |= (TXD_FLAG_VLAN |
3190                                (vlan_tx_tag_get(skb) << 16));
3191 #endif
3192
3193         /* Queue skb data, a.k.a. the main skb fragment. */
3194         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
3195
3196         tp->tx_buffers[entry].skb = skb;
3197         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3198
3199         would_hit_hwbug = 0;
3200
3201         if (tg3_4g_overflow_test(mapping, len))
3202                 would_hit_hwbug = entry + 1;
3203
3204         tg3_set_txd(tp, entry, mapping, len, base_flags,
3205                     (skb_shinfo(skb)->nr_frags == 0) | (mss << 1));
3206
3207         entry = NEXT_TX(entry);
3208
3209         /* Now loop through additional data fragments, and queue them. */
3210         if (skb_shinfo(skb)->nr_frags > 0) {
3211                 unsigned int i, last;
3212
3213                 last = skb_shinfo(skb)->nr_frags - 1;
3214                 for (i = 0; i <= last; i++) {
3215                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
3216
3217                         len = frag->size;
3218                         mapping = pci_map_page(tp->pdev,
3219                                                frag->page,
3220                                                frag->page_offset,
3221                                                len, PCI_DMA_TODEVICE);
3222
3223                         tp->tx_buffers[entry].skb = NULL;
3224                         pci_unmap_addr_set(&tp->tx_buffers[entry], mapping, mapping);
3225
3226                         if (tg3_4g_overflow_test(mapping, len)) {
3227                                 /* Only one should match. */
3228                                 if (would_hit_hwbug)
3229                                         BUG();
3230                                 would_hit_hwbug = entry + 1;
3231                         }
3232
3233                         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
3234                                 tg3_set_txd(tp, entry, mapping, len,
3235                                             base_flags, (i == last)|(mss << 1));
3236                         else
3237                                 tg3_set_txd(tp, entry, mapping, len,
3238                                             base_flags, (i == last));
3239
3240                         entry = NEXT_TX(entry);
3241                 }
3242         }
3243
3244         if (would_hit_hwbug) {
3245                 u32 last_plus_one = entry;
3246                 u32 start;
3247                 unsigned int len = 0;
3248
3249                 would_hit_hwbug -= 1;
3250                 entry = entry - 1 - skb_shinfo(skb)->nr_frags;
3251                 entry &= (TG3_TX_RING_SIZE - 1);
3252                 start = entry;
3253                 i = 0;
3254                 while (entry != last_plus_one) {
3255                         if (i == 0)
3256                                 len = skb_headlen(skb);
3257                         else
3258                                 len = skb_shinfo(skb)->frags[i-1].size;
3259
3260                         if (entry == would_hit_hwbug)
3261                                 break;
3262
3263                         i++;
3264                         entry = NEXT_TX(entry);
3265
3266                 }
3267
3268                 /* If the workaround fails due to memory/mapping
3269                  * failure, silently drop this packet.
3270                  */
3271                 if (tigon3_4gb_hwbug_workaround(tp, skb,
3272                                                 entry, len,
3273                                                 last_plus_one,
3274                                                 &start, mss))
3275                         goto out_unlock;
3276
3277                 entry = start;
3278         }
3279
3280         /* Packets are ready, update Tx producer idx local and on card. */
3281         tw32_tx_mbox((MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW), entry);
3282
3283         tp->tx_prod = entry;
3284         if (TX_BUFFS_AVAIL(tp) <= (MAX_SKB_FRAGS + 1))
3285                 netif_stop_queue(dev);
3286
3287 out_unlock:
3288         mmiowb();
3289         spin_unlock_irqrestore(&tp->tx_lock, flags);
3290
3291         dev->trans_start = jiffies;
3292
3293         return NETDEV_TX_OK;
3294 }
3295
3296 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
3297                                int new_mtu)
3298 {
3299         dev->mtu = new_mtu;
3300
3301         if (new_mtu > ETH_DATA_LEN)
3302                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
3303         else
3304                 tp->tg3_flags &= ~TG3_FLAG_JUMBO_ENABLE;
3305 }
3306
3307 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
3308 {
3309         struct tg3 *tp = netdev_priv(dev);
3310
3311         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
3312                 return -EINVAL;
3313
3314         if (!netif_running(dev)) {
3315                 /* We'll just catch it later when the
3316                  * device is up'd.
3317                  */
3318                 tg3_set_mtu(dev, tp, new_mtu);
3319                 return 0;
3320         }
3321
3322         tg3_netif_stop(tp);
3323         spin_lock_irq(&tp->lock);
3324         spin_lock(&tp->tx_lock);
3325
3326         tg3_halt(tp);
3327
3328         tg3_set_mtu(dev, tp, new_mtu);
3329
3330         tg3_init_hw(tp);
3331
3332         tg3_netif_start(tp);
3333
3334         spin_unlock(&tp->tx_lock);
3335         spin_unlock_irq(&tp->lock);
3336
3337         return 0;
3338 }
3339
3340 /* Free up pending packets in all rx/tx rings.
3341  *
3342  * The chip has been shut down and the driver detached from
3343  * the networking, so no interrupts or new tx packets will
3344  * end up in the driver.  tp->{tx,}lock is not held and we are not
3345  * in an interrupt context and thus may sleep.
3346  */
3347 static void tg3_free_rings(struct tg3 *tp)
3348 {
3349         struct ring_info *rxp;
3350         int i;
3351
3352         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3353                 rxp = &tp->rx_std_buffers[i];
3354
3355                 if (rxp->skb == NULL)
3356                         continue;
3357                 pci_unmap_single(tp->pdev,
3358                                  pci_unmap_addr(rxp, mapping),
3359                                  RX_PKT_BUF_SZ - tp->rx_offset,
3360                                  PCI_DMA_FROMDEVICE);
3361                 dev_kfree_skb_any(rxp->skb);
3362                 rxp->skb = NULL;
3363         }
3364
3365         for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3366                 rxp = &tp->rx_jumbo_buffers[i];
3367
3368                 if (rxp->skb == NULL)
3369                         continue;
3370                 pci_unmap_single(tp->pdev,
3371                                  pci_unmap_addr(rxp, mapping),
3372                                  RX_JUMBO_PKT_BUF_SZ - tp->rx_offset,
3373                                  PCI_DMA_FROMDEVICE);
3374                 dev_kfree_skb_any(rxp->skb);
3375                 rxp->skb = NULL;
3376         }
3377
3378         for (i = 0; i < TG3_TX_RING_SIZE; ) {
3379                 struct tx_ring_info *txp;
3380                 struct sk_buff *skb;
3381                 int j;
3382
3383                 txp = &tp->tx_buffers[i];
3384                 skb = txp->skb;
3385
3386                 if (skb == NULL) {
3387                         i++;
3388                         continue;
3389                 }
3390
3391                 pci_unmap_single(tp->pdev,
3392                                  pci_unmap_addr(txp, mapping),
3393                                  skb_headlen(skb),
3394                                  PCI_DMA_TODEVICE);
3395                 txp->skb = NULL;
3396
3397                 i++;
3398
3399                 for (j = 0; j < skb_shinfo(skb)->nr_frags; j++) {
3400                         txp = &tp->tx_buffers[i & (TG3_TX_RING_SIZE - 1)];
3401                         pci_unmap_page(tp->pdev,
3402                                        pci_unmap_addr(txp, mapping),
3403                                        skb_shinfo(skb)->frags[j].size,
3404                                        PCI_DMA_TODEVICE);
3405                         i++;
3406                 }
3407
3408                 dev_kfree_skb_any(skb);
3409         }
3410 }
3411
3412 /* Initialize tx/rx rings for packet processing.
3413  *
3414  * The chip has been shut down and the driver detached from
3415  * the networking, so no interrupts or new tx packets will
3416  * end up in the driver.  tp->{tx,}lock are held and thus
3417  * we may not sleep.
3418  */
3419 static void tg3_init_rings(struct tg3 *tp)
3420 {
3421         u32 i;
3422
3423         /* Free up all the SKBs. */
3424         tg3_free_rings(tp);
3425
3426         /* Zero out all descriptors. */
3427         memset(tp->rx_std, 0, TG3_RX_RING_BYTES);
3428         memset(tp->rx_jumbo, 0, TG3_RX_JUMBO_RING_BYTES);
3429         memset(tp->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
3430         memset(tp->tx_ring, 0, TG3_TX_RING_BYTES);
3431
3432         /* Initialize invariants of the rings, we only set this
3433          * stuff once.  This works because the card does not
3434          * write into the rx buffer posting rings.
3435          */
3436         for (i = 0; i < TG3_RX_RING_SIZE; i++) {
3437                 struct tg3_rx_buffer_desc *rxd;
3438
3439                 rxd = &tp->rx_std[i];
3440                 rxd->idx_len = (RX_PKT_BUF_SZ - tp->rx_offset - 64)
3441                         << RXD_LEN_SHIFT;
3442                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
3443                 rxd->opaque = (RXD_OPAQUE_RING_STD |
3444                                (i << RXD_OPAQUE_INDEX_SHIFT));
3445         }
3446
3447         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3448                 for (i = 0; i < TG3_RX_JUMBO_RING_SIZE; i++) {
3449                         struct tg3_rx_buffer_desc *rxd;
3450
3451                         rxd = &tp->rx_jumbo[i];
3452                         rxd->idx_len = (RX_JUMBO_PKT_BUF_SZ - tp->rx_offset - 64)
3453                                 << RXD_LEN_SHIFT;
3454                         rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
3455                                 RXD_FLAG_JUMBO;
3456                         rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
3457                                (i << RXD_OPAQUE_INDEX_SHIFT));
3458                 }
3459         }
3460
3461         /* Now allocate fresh SKBs for each rx ring. */
3462         for (i = 0; i < tp->rx_pending; i++) {
3463                 if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_STD,
3464                                      -1, i) < 0)
3465                         break;
3466         }
3467
3468         if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
3469                 for (i = 0; i < tp->rx_jumbo_pending; i++) {
3470                         if (tg3_alloc_rx_skb(tp, RXD_OPAQUE_RING_JUMBO,
3471                                              -1, i) < 0)
3472                                 break;
3473                 }
3474         }
3475 }
3476
3477 /*
3478  * Must not be invoked with interrupt sources disabled and
3479  * the hardware shutdown down.
3480  */
3481 static void tg3_free_consistent(struct tg3 *tp)
3482 {
3483         if (tp->rx_std_buffers) {
3484                 kfree(tp->rx_std_buffers);
3485                 tp->rx_std_buffers = NULL;
3486         }
3487         if (tp->rx_std) {
3488                 pci_free_consistent(tp->pdev, TG3_RX_RING_BYTES,
3489                                     tp->rx_std, tp->rx_std_mapping);
3490                 tp->rx_std = NULL;
3491         }
3492         if (tp->rx_jumbo) {
3493                 pci_free_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3494                                     tp->rx_jumbo, tp->rx_jumbo_mapping);
3495                 tp->rx_jumbo = NULL;
3496         }
3497         if (tp->rx_rcb) {
3498                 pci_free_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3499                                     tp->rx_rcb, tp->rx_rcb_mapping);
3500                 tp->rx_rcb = NULL;
3501         }
3502         if (tp->tx_ring) {
3503                 pci_free_consistent(tp->pdev, TG3_TX_RING_BYTES,
3504                         tp->tx_ring, tp->tx_desc_mapping);
3505                 tp->tx_ring = NULL;
3506         }
3507         if (tp->hw_status) {
3508                 pci_free_consistent(tp->pdev, TG3_HW_STATUS_SIZE,
3509                                     tp->hw_status, tp->status_mapping);
3510                 tp->hw_status = NULL;
3511         }
3512         if (tp->hw_stats) {
3513                 pci_free_consistent(tp->pdev, sizeof(struct tg3_hw_stats),
3514                                     tp->hw_stats, tp->stats_mapping);
3515                 tp->hw_stats = NULL;
3516         }
3517 }
3518
3519 /*
3520  * Must not be invoked with interrupt sources disabled and
3521  * the hardware shutdown down.  Can sleep.
3522  */
3523 static int tg3_alloc_consistent(struct tg3 *tp)
3524 {
3525         tp->rx_std_buffers = kmalloc((sizeof(struct ring_info) *
3526                                       (TG3_RX_RING_SIZE +
3527                                        TG3_RX_JUMBO_RING_SIZE)) +
3528                                      (sizeof(struct tx_ring_info) *
3529                                       TG3_TX_RING_SIZE),
3530                                      GFP_KERNEL);
3531         if (!tp->rx_std_buffers)
3532                 return -ENOMEM;
3533
3534         memset(tp->rx_std_buffers, 0,
3535                (sizeof(struct ring_info) *
3536                 (TG3_RX_RING_SIZE +
3537                  TG3_RX_JUMBO_RING_SIZE)) +
3538                (sizeof(struct tx_ring_info) *
3539                 TG3_TX_RING_SIZE));
3540
3541         tp->rx_jumbo_buffers = &tp->rx_std_buffers[TG3_RX_RING_SIZE];
3542         tp->tx_buffers = (struct tx_ring_info *)
3543                 &tp->rx_jumbo_buffers[TG3_RX_JUMBO_RING_SIZE];
3544
3545         tp->rx_std = pci_alloc_consistent(tp->pdev, TG3_RX_RING_BYTES,
3546                                           &tp->rx_std_mapping);
3547         if (!tp->rx_std)
3548                 goto err_out;
3549
3550         tp->rx_jumbo = pci_alloc_consistent(tp->pdev, TG3_RX_JUMBO_RING_BYTES,
3551                                             &tp->rx_jumbo_mapping);
3552
3553         if (!tp->rx_jumbo)
3554                 goto err_out;
3555
3556         tp->rx_rcb = pci_alloc_consistent(tp->pdev, TG3_RX_RCB_RING_BYTES(tp),
3557                                           &tp->rx_rcb_mapping);
3558         if (!tp->rx_rcb)
3559                 goto err_out;
3560
3561         tp->tx_ring = pci_alloc_consistent(tp->pdev, TG3_TX_RING_BYTES,
3562                                            &tp->tx_desc_mapping);
3563         if (!tp->tx_ring)
3564                 goto err_out;
3565
3566         tp->hw_status = pci_alloc_consistent(tp->pdev,
3567                                              TG3_HW_STATUS_SIZE,
3568                                              &tp->status_mapping);
3569         if (!tp->hw_status)
3570                 goto err_out;
3571
3572         tp->hw_stats = pci_alloc_consistent(tp->pdev,
3573                                             sizeof(struct tg3_hw_stats),
3574                                             &tp->stats_mapping);
3575         if (!tp->hw_stats)
3576                 goto err_out;
3577
3578         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3579         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3580
3581         return 0;
3582
3583 err_out:
3584         tg3_free_consistent(tp);
3585         return -ENOMEM;
3586 }
3587
3588 #define MAX_WAIT_CNT 1000
3589
3590 /* To stop a block, clear the enable bit and poll till it
3591  * clears.  tp->lock is held.
3592  */
3593 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit)
3594 {
3595         unsigned int i;
3596         u32 val;
3597
3598         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
3599                 switch (ofs) {
3600                 case RCVLSC_MODE:
3601                 case DMAC_MODE:
3602                 case MBFREE_MODE:
3603                 case BUFMGR_MODE:
3604                 case MEMARB_MODE:
3605                         /* We can't enable/disable these bits of the
3606                          * 5705/5750, just say success.
3607                          */
3608                         return 0;
3609
3610                 default:
3611                         break;
3612                 };
3613         }
3614
3615         val = tr32(ofs);
3616         val &= ~enable_bit;
3617         tw32_f(ofs, val);
3618
3619         for (i = 0; i < MAX_WAIT_CNT; i++) {
3620                 udelay(100);
3621                 val = tr32(ofs);
3622                 if ((val & enable_bit) == 0)
3623                         break;
3624         }
3625
3626         if (i == MAX_WAIT_CNT) {
3627                 printk(KERN_ERR PFX "tg3_stop_block timed out, "
3628                        "ofs=%lx enable_bit=%x\n",
3629                        ofs, enable_bit);
3630                 return -ENODEV;
3631         }
3632
3633         return 0;
3634 }
3635
3636 /* tp->lock is held. */
3637 static int tg3_abort_hw(struct tg3 *tp)
3638 {
3639         int i, err;
3640
3641         tg3_disable_ints(tp);
3642
3643         tp->rx_mode &= ~RX_MODE_ENABLE;
3644         tw32_f(MAC_RX_MODE, tp->rx_mode);
3645         udelay(10);
3646
3647         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE);
3648         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE);
3649         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE);
3650         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE);
3651         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE);
3652         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE);
3653
3654         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE);
3655         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE);
3656         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
3657         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE);
3658         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
3659         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE);
3660         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE);
3661         if (err)
3662                 goto out;
3663
3664         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
3665         tw32_f(MAC_MODE, tp->mac_mode);
3666         udelay(40);
3667
3668         tp->tx_mode &= ~TX_MODE_ENABLE;
3669         tw32_f(MAC_TX_MODE, tp->tx_mode);
3670
3671         for (i = 0; i < MAX_WAIT_CNT; i++) {
3672                 udelay(100);
3673                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
3674                         break;
3675         }
3676         if (i >= MAX_WAIT_CNT) {
3677                 printk(KERN_ERR PFX "tg3_abort_hw timed out for %s, "
3678                        "TX_MODE_ENABLE will not clear MAC_TX_MODE=%08x\n",
3679                        tp->dev->name, tr32(MAC_TX_MODE));
3680                 return -ENODEV;
3681         }
3682
3683         err  = tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE);
3684         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE);
3685         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE);
3686
3687         tw32(FTQ_RESET, 0xffffffff);
3688         tw32(FTQ_RESET, 0x00000000);
3689
3690         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE);
3691         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE);
3692         if (err)
3693                 goto out;
3694
3695         if (tp->hw_status)
3696                 memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
3697         if (tp->hw_stats)
3698                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
3699
3700 out:
3701         return err;
3702 }
3703
3704 /* tp->lock is held. */
3705 static int tg3_nvram_lock(struct tg3 *tp)
3706 {
3707         if (tp->tg3_flags & TG3_FLAG_NVRAM) {
3708                 int i;
3709
3710                 tw32(NVRAM_SWARB, SWARB_REQ_SET1);
3711                 for (i = 0; i < 8000; i++) {
3712                         if (tr32(NVRAM_SWARB) & SWARB_GNT1)
3713                                 break;
3714                         udelay(20);
3715                 }
3716                 if (i == 8000)
3717                         return -ENODEV;
3718         }
3719         return 0;
3720 }
3721
3722 /* tp->lock is held. */
3723 static void tg3_nvram_unlock(struct tg3 *tp)
3724 {
3725         if (tp->tg3_flags & TG3_FLAG_NVRAM)
3726                 tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
3727 }
3728
3729 /* tp->lock is held. */
3730 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
3731 {
3732         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3733                 tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
3734                               NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
3735
3736         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3737                 switch (kind) {
3738                 case RESET_KIND_INIT:
3739                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3740                                       DRV_STATE_START);
3741                         break;
3742
3743                 case RESET_KIND_SHUTDOWN:
3744                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3745                                       DRV_STATE_UNLOAD);
3746                         break;
3747
3748                 case RESET_KIND_SUSPEND:
3749                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3750                                       DRV_STATE_SUSPEND);
3751                         break;
3752
3753                 default:
3754                         break;
3755                 };
3756         }
3757 }
3758
3759 /* tp->lock is held. */
3760 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
3761 {
3762         if (tp->tg3_flags2 & TG3_FLG2_ASF_NEW_HANDSHAKE) {
3763                 switch (kind) {
3764                 case RESET_KIND_INIT:
3765                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3766                                       DRV_STATE_START_DONE);
3767                         break;
3768
3769                 case RESET_KIND_SHUTDOWN:
3770                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3771                                       DRV_STATE_UNLOAD_DONE);
3772                         break;
3773
3774                 default:
3775                         break;
3776                 };
3777         }
3778 }
3779
3780 /* tp->lock is held. */
3781 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
3782 {
3783         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3784                 switch (kind) {
3785                 case RESET_KIND_INIT:
3786                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3787                                       DRV_STATE_START);
3788                         break;
3789
3790                 case RESET_KIND_SHUTDOWN:
3791                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3792                                       DRV_STATE_UNLOAD);
3793                         break;
3794
3795                 case RESET_KIND_SUSPEND:
3796                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
3797                                       DRV_STATE_SUSPEND);
3798                         break;
3799
3800                 default:
3801                         break;
3802                 };
3803         }
3804 }
3805
3806 static void tg3_stop_fw(struct tg3 *);
3807
3808 /* tp->lock is held. */
3809 static int tg3_chip_reset(struct tg3 *tp)
3810 {
3811         u32 val;
3812         u32 flags_save;
3813         int i;
3814
3815         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X))
3816                 tg3_nvram_lock(tp);
3817
3818         /*
3819          * We must avoid the readl() that normally takes place.
3820          * It locks machines, causes machine checks, and other
3821          * fun things.  So, temporarily disable the 5701
3822          * hardware workaround, while we do the reset.
3823          */
3824         flags_save = tp->tg3_flags;
3825         tp->tg3_flags &= ~TG3_FLAG_5701_REG_WRITE_BUG;
3826
3827         /* do the reset */
3828         val = GRC_MISC_CFG_CORECLK_RESET;
3829
3830         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3831                 if (tr32(0x7e2c) == 0x60) {
3832                         tw32(0x7e2c, 0x20);
3833                 }
3834                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3835                         tw32(GRC_MISC_CFG, (1 << 29));
3836                         val |= (1 << 29);
3837                 }
3838         }
3839
3840         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
3841                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
3842         tw32(GRC_MISC_CFG, val);
3843
3844         /* restore 5701 hardware bug workaround flag */
3845         tp->tg3_flags = flags_save;
3846
3847         /* Unfortunately, we have to delay before the PCI read back.
3848          * Some 575X chips even will not respond to a PCI cfg access
3849          * when the reset command is given to the chip.
3850          *
3851          * How do these hardware designers expect things to work
3852          * properly if the PCI write is posted for a long period
3853          * of time?  It is always necessary to have some method by
3854          * which a register read back can occur to push the write
3855          * out which does the reset.
3856          *
3857          * For most tg3 variants the trick below was working.
3858          * Ho hum...
3859          */
3860         udelay(120);
3861
3862         /* Flush PCI posted writes.  The normal MMIO registers
3863          * are inaccessible at this time so this is the only
3864          * way to make this reliably (actually, this is no longer
3865          * the case, see above).  I tried to use indirect
3866          * register read/write but this upset some 5701 variants.
3867          */
3868         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
3869
3870         udelay(120);
3871
3872         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
3873                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
3874                         int i;
3875                         u32 cfg_val;
3876
3877                         /* Wait for link training to complete.  */
3878                         for (i = 0; i < 5000; i++)
3879                                 udelay(100);
3880
3881                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
3882                         pci_write_config_dword(tp->pdev, 0xc4,
3883                                                cfg_val | (1 << 15));
3884                 }
3885                 /* Set PCIE max payload size and clear error status.  */
3886                 pci_write_config_dword(tp->pdev, 0xd8, 0xf5000);
3887         }
3888
3889         /* Re-enable indirect register accesses. */
3890         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
3891                                tp->misc_host_ctrl);
3892
3893         /* Set MAX PCI retry to zero. */
3894         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
3895         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
3896             (tp->tg3_flags & TG3_FLAG_PCIX_MODE))
3897                 val |= PCISTATE_RETRY_SAME_DMA;
3898         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
3899
3900         pci_restore_state(tp->pdev);
3901
3902         /* Make sure PCI-X relaxed ordering bit is clear. */
3903         pci_read_config_dword(tp->pdev, TG3PCI_X_CAPS, &val);
3904         val &= ~PCIX_CAPS_RELAXED_ORDERING;
3905         pci_write_config_dword(tp->pdev, TG3PCI_X_CAPS, val);
3906
3907         tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
3908
3909         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
3910                 tg3_stop_fw(tp);
3911                 tw32(0x5000, 0x400);
3912         }
3913
3914         tw32(GRC_MODE, tp->grc_mode);
3915
3916         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
3917                 u32 val = tr32(0xc4);
3918
3919                 tw32(0xc4, val | (1 << 15));
3920         }
3921
3922         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
3923             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3924                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
3925                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
3926                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
3927                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
3928         }
3929
3930         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
3931                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
3932                 tw32_f(MAC_MODE, tp->mac_mode);
3933         } else
3934                 tw32_f(MAC_MODE, 0);
3935         udelay(40);
3936
3937         if (!(tp->tg3_flags2 & TG3_FLG2_SUN_570X)) {
3938                 /* Wait for firmware initialization to complete. */
3939                 for (i = 0; i < 100000; i++) {
3940                         tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
3941                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3942                                 break;
3943                         udelay(10);
3944                 }
3945                 if (i >= 100000) {
3946                         printk(KERN_ERR PFX "tg3_reset_hw timed out for %s, "
3947                                "firmware will not restart magic=%08x\n",
3948                                tp->dev->name, val);
3949                         return -ENODEV;
3950                 }
3951         }
3952
3953         if ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
3954             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
3955                 u32 val = tr32(0x7c00);
3956
3957                 tw32(0x7c00, val | (1 << 25));
3958         }
3959
3960         /* Reprobe ASF enable state.  */
3961         tp->tg3_flags &= ~TG3_FLAG_ENABLE_ASF;
3962         tp->tg3_flags2 &= ~TG3_FLG2_ASF_NEW_HANDSHAKE;
3963         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
3964         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
3965                 u32 nic_cfg;
3966
3967                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
3968                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
3969                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
3970                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
3971                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
3972                 }
3973         }
3974
3975         return 0;
3976 }
3977
3978 /* tp->lock is held. */
3979 static void tg3_stop_fw(struct tg3 *tp)
3980 {
3981         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
3982                 u32 val;
3983                 int i;
3984
3985                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
3986                 val = tr32(GRC_RX_CPU_EVENT);
3987                 val |= (1 << 14);
3988                 tw32(GRC_RX_CPU_EVENT, val);
3989
3990                 /* Wait for RX cpu to ACK the event.  */
3991                 for (i = 0; i < 100; i++) {
3992                         if (!(tr32(GRC_RX_CPU_EVENT) & (1 << 14)))
3993                                 break;
3994                         udelay(1);
3995                 }
3996         }
3997 }
3998
3999 /* tp->lock is held. */
4000 static int tg3_halt(struct tg3 *tp)
4001 {
4002         int err;
4003
4004         tg3_stop_fw(tp);
4005
4006         tg3_write_sig_pre_reset(tp, RESET_KIND_SHUTDOWN);
4007
4008         tg3_abort_hw(tp);
4009         err = tg3_chip_reset(tp);
4010
4011         tg3_write_sig_legacy(tp, RESET_KIND_SHUTDOWN);
4012         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
4013
4014         if (err)
4015                 return err;
4016
4017         return 0;
4018 }
4019
4020 #define TG3_FW_RELEASE_MAJOR    0x0
4021 #define TG3_FW_RELASE_MINOR     0x0
4022 #define TG3_FW_RELEASE_FIX      0x0
4023 #define TG3_FW_START_ADDR       0x08000000
4024 #define TG3_FW_TEXT_ADDR        0x08000000
4025 #define TG3_FW_TEXT_LEN         0x9c0
4026 #define TG3_FW_RODATA_ADDR      0x080009c0
4027 #define TG3_FW_RODATA_LEN       0x60
4028 #define TG3_FW_DATA_ADDR        0x08000a40
4029 #define TG3_FW_DATA_LEN         0x20
4030 #define TG3_FW_SBSS_ADDR        0x08000a60
4031 #define TG3_FW_SBSS_LEN         0xc
4032 #define TG3_FW_BSS_ADDR         0x08000a70
4033 #define TG3_FW_BSS_LEN          0x10
4034
4035 static u32 tg3FwText[(TG3_FW_TEXT_LEN / sizeof(u32)) + 1] = {
4036         0x00000000, 0x10000003, 0x00000000, 0x0000000d, 0x0000000d, 0x3c1d0800,
4037         0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100000, 0x0e000018, 0x00000000,
4038         0x0000000d, 0x3c1d0800, 0x37bd3ffc, 0x03a0f021, 0x3c100800, 0x26100034,
4039         0x0e00021c, 0x00000000, 0x0000000d, 0x00000000, 0x00000000, 0x00000000,
4040         0x27bdffe0, 0x3c1cc000, 0xafbf0018, 0xaf80680c, 0x0e00004c, 0x241b2105,
4041         0x97850000, 0x97870002, 0x9782002c, 0x9783002e, 0x3c040800, 0x248409c0,
4042         0xafa00014, 0x00021400, 0x00621825, 0x00052c00, 0xafa30010, 0x8f860010,
4043         0x00e52825, 0x0e000060, 0x24070102, 0x3c02ac00, 0x34420100, 0x3c03ac01,
4044         0x34630100, 0xaf820490, 0x3c02ffff, 0xaf820494, 0xaf830498, 0xaf82049c,
4045         0x24020001, 0xaf825ce0, 0x0e00003f, 0xaf825d00, 0x0e000140, 0x00000000,
4046         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x2402ffff, 0xaf825404, 0x8f835400,
4047         0x34630400, 0xaf835400, 0xaf825404, 0x3c020800, 0x24420034, 0xaf82541c,
4048         0x03e00008, 0xaf805400, 0x00000000, 0x00000000, 0x3c020800, 0x34423000,
4049         0x3c030800, 0x34633000, 0x3c040800, 0x348437ff, 0x3c010800, 0xac220a64,
4050         0x24020040, 0x3c010800, 0xac220a68, 0x3c010800, 0xac200a60, 0xac600000,
4051         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4052         0x00804821, 0x8faa0010, 0x3c020800, 0x8c420a60, 0x3c040800, 0x8c840a68,
4053         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010800, 0xac230a60, 0x14400003,
4054         0x00004021, 0x3c010800, 0xac200a60, 0x3c020800, 0x8c420a60, 0x3c030800,
4055         0x8c630a64, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4056         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020800, 0x8c420a60,
4057         0x3c030800, 0x8c630a64, 0x8f84680c, 0x00021140, 0x00431021, 0xac440008,
4058         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4059         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4060         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4061         0, 0, 0, 0, 0, 0,
4062         0x02000008, 0x00000000, 0x0a0001e3, 0x3c0a0001, 0x0a0001e3, 0x3c0a0002,
4063         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4064         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4065         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4066         0x0a0001e3, 0x3c0a0007, 0x0a0001e3, 0x3c0a0008, 0x0a0001e3, 0x3c0a0009,
4067         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000b,
4068         0x0a0001e3, 0x3c0a000c, 0x0a0001e3, 0x3c0a000d, 0x0a0001e3, 0x00000000,
4069         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a000e, 0x0a0001e3, 0x00000000,
4070         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4071         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000, 0x0a0001e3, 0x00000000,
4072         0x0a0001e3, 0x00000000, 0x0a0001e3, 0x3c0a0013, 0x0a0001e3, 0x3c0a0014,
4073         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4074         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4075         0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
4076         0x27bdffe0, 0x00001821, 0x00001021, 0xafbf0018, 0xafb10014, 0xafb00010,
4077         0x3c010800, 0x00220821, 0xac200a70, 0x3c010800, 0x00220821, 0xac200a74,
4078         0x3c010800, 0x00220821, 0xac200a78, 0x24630001, 0x1860fff5, 0x2442000c,
4079         0x24110001, 0x8f906810, 0x32020004, 0x14400005, 0x24040001, 0x3c020800,
4080         0x8c420a78, 0x18400003, 0x00002021, 0x0e000182, 0x00000000, 0x32020001,
4081         0x10400003, 0x00000000, 0x0e000169, 0x00000000, 0x0a000153, 0xaf915028,
4082         0x8fbf0018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020, 0x3c050800,
4083         0x8ca50a70, 0x3c060800, 0x8cc60a80, 0x3c070800, 0x8ce70a78, 0x27bdffe0,
4084         0x3c040800, 0x248409d0, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014,
4085         0x0e00017b, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x24020001,
4086         0x8f836810, 0x00821004, 0x00021027, 0x00621824, 0x03e00008, 0xaf836810,
4087         0x27bdffd8, 0xafbf0024, 0x1080002e, 0xafb00020, 0x8f825cec, 0xafa20018,
4088         0x8f825cec, 0x3c100800, 0x26100a78, 0xafa2001c, 0x34028000, 0xaf825cec,
4089         0x8e020000, 0x18400016, 0x00000000, 0x3c020800, 0x94420a74, 0x8fa3001c,
4090         0x000221c0, 0xac830004, 0x8fa2001c, 0x3c010800, 0x0e000201, 0xac220a74,
4091         0x10400005, 0x00000000, 0x8e020000, 0x24420001, 0x0a0001df, 0xae020000,
4092         0x3c020800, 0x8c420a70, 0x00021c02, 0x000321c0, 0x0a0001c5, 0xafa2001c,
4093         0x0e000201, 0x00000000, 0x1040001f, 0x00000000, 0x8e020000, 0x8fa3001c,
4094         0x24420001, 0x3c010800, 0xac230a70, 0x3c010800, 0xac230a74, 0x0a0001df,
4095         0xae020000, 0x3c100800, 0x26100a78, 0x8e020000, 0x18400028, 0x00000000,
4096         0x0e000201, 0x00000000, 0x14400024, 0x00000000, 0x8e020000, 0x3c030800,
4097         0x8c630a70, 0x2442ffff, 0xafa3001c, 0x18400006, 0xae020000, 0x00031402,
4098         0x000221c0, 0x8c820004, 0x3c010800, 0xac220a70, 0x97a2001e, 0x2442ff00,
4099         0x2c420300, 0x1440000b, 0x24024000, 0x3c040800, 0x248409dc, 0xafa00010,
4100         0xafa00014, 0x8fa6001c, 0x24050008, 0x0e000060, 0x00003821, 0x0a0001df,
4101         0x00000000, 0xaf825cf8, 0x3c020800, 0x8c420a40, 0x8fa3001c, 0x24420001,
4102         0xaf835cf8, 0x3c010800, 0xac220a40, 0x8fbf0024, 0x8fb00020, 0x03e00008,
4103         0x27bd0028, 0x27bdffe0, 0x3c040800, 0x248409e8, 0x00002821, 0x00003021,
4104         0x00003821, 0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x8fbf0018,
4105         0x03e00008, 0x27bd0020, 0x8f82680c, 0x8f85680c, 0x00021827, 0x0003182b,
4106         0x00031823, 0x00431024, 0x00441021, 0x00a2282b, 0x10a00006, 0x00000000,
4107         0x00401821, 0x8f82680c, 0x0043102b, 0x1440fffd, 0x00000000, 0x03e00008,
4108         0x00000000, 0x3c040800, 0x8c840000, 0x3c030800, 0x8c630a40, 0x0064102b,
4109         0x54400002, 0x00831023, 0x00641023, 0x2c420008, 0x03e00008, 0x38420001,
4110         0x27bdffe0, 0x00802821, 0x3c040800, 0x24840a00, 0x00003021, 0x00003821,
4111         0xafbf0018, 0xafa00010, 0x0e000060, 0xafa00014, 0x0a000216, 0x00000000,
4112         0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000, 0x27bdffe0, 0x3c1cc000,
4113         0xafbf0018, 0x0e00004c, 0xaf80680c, 0x3c040800, 0x24840a10, 0x03802821,
4114         0x00003021, 0x00003821, 0xafa00010, 0x0e000060, 0xafa00014, 0x2402ffff,
4115         0xaf825404, 0x3c0200aa, 0x0e000234, 0xaf825434, 0x8fbf0018, 0x03e00008,
4116         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe8, 0xafb00010,
4117         0x24100001, 0xafbf0014, 0x3c01c003, 0xac200000, 0x8f826810, 0x30422000,
4118         0x10400003, 0x00000000, 0x0e000246, 0x00000000, 0x0a00023a, 0xaf905428,
4119         0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x27bdfff8, 0x8f845d0c,
4120         0x3c0200ff, 0x3c030800, 0x8c630a50, 0x3442fff8, 0x00821024, 0x1043001e,
4121         0x3c0500ff, 0x34a5fff8, 0x3c06c003, 0x3c074000, 0x00851824, 0x8c620010,
4122         0x3c010800, 0xac230a50, 0x30420008, 0x10400005, 0x00871025, 0x8cc20000,
4123         0x24420001, 0xacc20000, 0x00871025, 0xaf825d0c, 0x8fa20000, 0x24420001,
4124         0xafa20000, 0x8fa20000, 0x8fa20000, 0x24420001, 0xafa20000, 0x8fa20000,
4125         0x8f845d0c, 0x3c030800, 0x8c630a50, 0x00851024, 0x1443ffe8, 0x00851824,
4126         0x27bd0008, 0x03e00008, 0x00000000, 0x00000000, 0x00000000
4127 };
4128
4129 static u32 tg3FwRodata[(TG3_FW_RODATA_LEN / sizeof(u32)) + 1] = {
4130         0x35373031, 0x726c7341, 0x00000000, 0x00000000, 0x53774576, 0x656e7430,
4131         0x00000000, 0x726c7045, 0x76656e74, 0x31000000, 0x556e6b6e, 0x45766e74,
4132         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4133         0x00000000, 0x00000000, 0x4d61696e, 0x43707542, 0x00000000, 0x00000000,
4134         0x00000000
4135 };
4136
4137 #if 0 /* All zeros, don't eat up space with it. */
4138 u32 tg3FwData[(TG3_FW_DATA_LEN / sizeof(u32)) + 1] = {
4139         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4140         0x00000000, 0x00000000, 0x00000000, 0x00000000
4141 };
4142 #endif
4143
4144 #define RX_CPU_SCRATCH_BASE     0x30000
4145 #define RX_CPU_SCRATCH_SIZE     0x04000
4146 #define TX_CPU_SCRATCH_BASE     0x34000
4147 #define TX_CPU_SCRATCH_SIZE     0x04000
4148
4149 /* tp->lock is held. */
4150 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
4151 {
4152         int i;
4153
4154         if (offset == TX_CPU_BASE &&
4155             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4156                 BUG();
4157
4158         if (offset == RX_CPU_BASE) {
4159                 for (i = 0; i < 10000; i++) {
4160                         tw32(offset + CPU_STATE, 0xffffffff);
4161                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4162                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4163                                 break;
4164                 }
4165
4166                 tw32(offset + CPU_STATE, 0xffffffff);
4167                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
4168                 udelay(10);
4169         } else {
4170                 for (i = 0; i < 10000; i++) {
4171                         tw32(offset + CPU_STATE, 0xffffffff);
4172                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
4173                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
4174                                 break;
4175                 }
4176         }
4177
4178         if (i >= 10000) {
4179                 printk(KERN_ERR PFX "tg3_reset_cpu timed out for %s, "
4180                        "and %s CPU\n",
4181                        tp->dev->name,
4182                        (offset == RX_CPU_BASE ? "RX" : "TX"));
4183                 return -ENODEV;
4184         }
4185         return 0;
4186 }
4187
4188 struct fw_info {
4189         unsigned int text_base;
4190         unsigned int text_len;
4191         u32 *text_data;
4192         unsigned int rodata_base;
4193         unsigned int rodata_len;
4194         u32 *rodata_data;
4195         unsigned int data_base;
4196         unsigned int data_len;
4197         u32 *data_data;
4198 };
4199
4200 /* tp->lock is held. */
4201 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base, u32 cpu_scratch_base,
4202                                  int cpu_scratch_size, struct fw_info *info)
4203 {
4204         int err, i;
4205         u32 orig_tg3_flags = tp->tg3_flags;
4206         void (*write_op)(struct tg3 *, u32, u32);
4207
4208         if (cpu_base == TX_CPU_BASE &&
4209             (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
4210                 printk(KERN_ERR PFX "tg3_load_firmware_cpu: Trying to load "
4211                        "TX cpu firmware on %s which is 5705.\n",
4212                        tp->dev->name);
4213                 return -EINVAL;
4214         }
4215
4216         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
4217                 write_op = tg3_write_mem;
4218         else
4219                 write_op = tg3_write_indirect_reg32;
4220
4221         /* Force use of PCI config space for indirect register
4222          * write calls.
4223          */
4224         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
4225
4226         err = tg3_halt_cpu(tp, cpu_base);
4227         if (err)
4228                 goto out;
4229
4230         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
4231                 write_op(tp, cpu_scratch_base + i, 0);
4232         tw32(cpu_base + CPU_STATE, 0xffffffff);
4233         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
4234         for (i = 0; i < (info->text_len / sizeof(u32)); i++)
4235                 write_op(tp, (cpu_scratch_base +
4236                               (info->text_base & 0xffff) +
4237                               (i * sizeof(u32))),
4238                          (info->text_data ?
4239                           info->text_data[i] : 0));
4240         for (i = 0; i < (info->rodata_len / sizeof(u32)); i++)
4241                 write_op(tp, (cpu_scratch_base +
4242                               (info->rodata_base & 0xffff) +
4243                               (i * sizeof(u32))),
4244                          (info->rodata_data ?
4245                           info->rodata_data[i] : 0));
4246         for (i = 0; i < (info->data_len / sizeof(u32)); i++)
4247                 write_op(tp, (cpu_scratch_base +
4248                               (info->data_base & 0xffff) +
4249                               (i * sizeof(u32))),
4250                          (info->data_data ?
4251                           info->data_data[i] : 0));
4252
4253         err = 0;
4254
4255 out:
4256         tp->tg3_flags = orig_tg3_flags;
4257         return err;
4258 }
4259
4260 /* tp->lock is held. */
4261 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
4262 {
4263         struct fw_info info;
4264         int err, i;
4265
4266         info.text_base = TG3_FW_TEXT_ADDR;
4267         info.text_len = TG3_FW_TEXT_LEN;
4268         info.text_data = &tg3FwText[0];
4269         info.rodata_base = TG3_FW_RODATA_ADDR;
4270         info.rodata_len = TG3_FW_RODATA_LEN;
4271         info.rodata_data = &tg3FwRodata[0];
4272         info.data_base = TG3_FW_DATA_ADDR;
4273         info.data_len = TG3_FW_DATA_LEN;
4274         info.data_data = NULL;
4275
4276         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
4277                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
4278                                     &info);
4279         if (err)
4280                 return err;
4281
4282         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
4283                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
4284                                     &info);
4285         if (err)
4286                 return err;
4287
4288         /* Now startup only the RX cpu. */
4289         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4290         tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4291
4292         for (i = 0; i < 5; i++) {
4293                 if (tr32(RX_CPU_BASE + CPU_PC) == TG3_FW_TEXT_ADDR)
4294                         break;
4295                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4296                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
4297                 tw32_f(RX_CPU_BASE + CPU_PC,    TG3_FW_TEXT_ADDR);
4298                 udelay(1000);
4299         }
4300         if (i >= 5) {
4301                 printk(KERN_ERR PFX "tg3_load_firmware fails for %s "
4302                        "to set RX CPU PC, is %08x should be %08x\n",
4303                        tp->dev->name, tr32(RX_CPU_BASE + CPU_PC),
4304                        TG3_FW_TEXT_ADDR);
4305                 return -ENODEV;
4306         }
4307         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
4308         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
4309
4310         return 0;
4311 }
4312
4313 #if TG3_TSO_SUPPORT != 0
4314
4315 #define TG3_TSO_FW_RELEASE_MAJOR        0x1
4316 #define TG3_TSO_FW_RELASE_MINOR         0x6
4317 #define TG3_TSO_FW_RELEASE_FIX          0x0
4318 #define TG3_TSO_FW_START_ADDR           0x08000000
4319 #define TG3_TSO_FW_TEXT_ADDR            0x08000000
4320 #define TG3_TSO_FW_TEXT_LEN             0x1aa0
4321 #define TG3_TSO_FW_RODATA_ADDR          0x08001aa0
4322 #define TG3_TSO_FW_RODATA_LEN           0x60
4323 #define TG3_TSO_FW_DATA_ADDR            0x08001b20
4324 #define TG3_TSO_FW_DATA_LEN             0x30
4325 #define TG3_TSO_FW_SBSS_ADDR            0x08001b50
4326 #define TG3_TSO_FW_SBSS_LEN             0x2c
4327 #define TG3_TSO_FW_BSS_ADDR             0x08001b80
4328 #define TG3_TSO_FW_BSS_LEN              0x894
4329
4330 static u32 tg3TsoFwText[(TG3_TSO_FW_TEXT_LEN / 4) + 1] = {
4331         0x0e000003, 0x00000000, 0x08001b24, 0x00000000, 0x10000003, 0x00000000,
4332         0x0000000d, 0x0000000d, 0x3c1d0800, 0x37bd4000, 0x03a0f021, 0x3c100800,
4333         0x26100000, 0x0e000010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4334         0xafbf0018, 0x0e0005d8, 0x34840002, 0x0e000668, 0x00000000, 0x3c030800,
4335         0x90631b68, 0x24020002, 0x3c040800, 0x24841aac, 0x14620003, 0x24050001,
4336         0x3c040800, 0x24841aa0, 0x24060006, 0x00003821, 0xafa00010, 0x0e00067c,
4337         0xafa00014, 0x8f625c50, 0x34420001, 0xaf625c50, 0x8f625c90, 0x34420001,
4338         0xaf625c90, 0x2402ffff, 0x0e000034, 0xaf625404, 0x8fbf0018, 0x03e00008,
4339         0x27bd0020, 0x00000000, 0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c,
4340         0xafb20018, 0xafb10014, 0x0e00005b, 0xafb00010, 0x24120002, 0x24110001,
4341         0x8f706820, 0x32020100, 0x10400003, 0x00000000, 0x0e0000bb, 0x00000000,
4342         0x8f706820, 0x32022000, 0x10400004, 0x32020001, 0x0e0001f0, 0x24040001,
4343         0x32020001, 0x10400003, 0x00000000, 0x0e0000a3, 0x00000000, 0x3c020800,
4344         0x90421b98, 0x14520003, 0x00000000, 0x0e0004c0, 0x00000000, 0x0a00003c,
4345         0xaf715028, 0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008,
4346         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ac0, 0x00002821, 0x00003021,
4347         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x3c040800,
4348         0x248423d8, 0xa4800000, 0x3c010800, 0xa0201b98, 0x3c010800, 0xac201b9c,
4349         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4350         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bbc, 0x8f624434, 0x3c010800,
4351         0xac221b88, 0x8f624438, 0x3c010800, 0xac221b8c, 0x8f624410, 0xac80f7a8,
4352         0x3c010800, 0xac201b84, 0x3c010800, 0xac2023e0, 0x3c010800, 0xac2023c8,
4353         0x3c010800, 0xac2023cc, 0x3c010800, 0xac202400, 0x3c010800, 0xac221b90,
4354         0x8f620068, 0x24030007, 0x00021702, 0x10430005, 0x00000000, 0x8f620068,
4355         0x00021702, 0x14400004, 0x24020001, 0x3c010800, 0x0a000097, 0xac20240c,
4356         0xac820034, 0x3c040800, 0x24841acc, 0x3c050800, 0x8ca5240c, 0x00003021,
4357         0x00003821, 0xafa00010, 0x0e00067c, 0xafa00014, 0x8fbf0018, 0x03e00008,
4358         0x27bd0020, 0x27bdffe0, 0x3c040800, 0x24841ad8, 0x00002821, 0x00003021,
4359         0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014, 0x0e00005b,
4360         0x00000000, 0x0e0000b4, 0x00002021, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4361         0x24020001, 0x8f636820, 0x00821004, 0x00021027, 0x00621824, 0x03e00008,
4362         0xaf636820, 0x27bdffd0, 0xafbf002c, 0xafb60028, 0xafb50024, 0xafb40020,
4363         0xafb3001c, 0xafb20018, 0xafb10014, 0xafb00010, 0x8f675c5c, 0x3c030800,
4364         0x24631bbc, 0x8c620000, 0x14470005, 0x3c0200ff, 0x3c020800, 0x90421b98,
4365         0x14400119, 0x3c0200ff, 0x3442fff8, 0x00e28824, 0xac670000, 0x00111902,
4366         0x306300ff, 0x30e20003, 0x000211c0, 0x00622825, 0x00a04021, 0x00071602,
4367         0x3c030800, 0x90631b98, 0x3044000f, 0x14600036, 0x00804821, 0x24020001,
4368         0x3c010800, 0xa0221b98, 0x00051100, 0x00821025, 0x3c010800, 0xac201b9c,
4369         0x3c010800, 0xac201ba0, 0x3c010800, 0xac201ba4, 0x3c010800, 0xac201bac,
4370         0x3c010800, 0xac201bb8, 0x3c010800, 0xac201bb0, 0x3c010800, 0xac201bb4,
4371         0x3c010800, 0xa42223d8, 0x9622000c, 0x30437fff, 0x3c010800, 0xa4222410,
4372         0x30428000, 0x3c010800, 0xa4231bc6, 0x10400005, 0x24020001, 0x3c010800,
4373         0xac2223f4, 0x0a000102, 0x2406003e, 0x24060036, 0x3c010800, 0xac2023f4,
4374         0x9622000a, 0x3c030800, 0x94631bc6, 0x3c010800, 0xac2023f0, 0x3c010800,
4375         0xac2023f8, 0x00021302, 0x00021080, 0x00c21021, 0x00621821, 0x3c010800,
4376         0xa42223d0, 0x3c010800, 0x0a000115, 0xa4231b96, 0x9622000c, 0x3c010800,
4377         0xa42223ec, 0x3c040800, 0x24841b9c, 0x8c820000, 0x00021100, 0x3c010800,
4378         0x00220821, 0xac311bc8, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4379         0xac271bcc, 0x8c820000, 0x25030001, 0x306601ff, 0x00021100, 0x3c010800,
4380         0x00220821, 0xac261bd0, 0x8c820000, 0x00021100, 0x3c010800, 0x00220821,
4381         0xac291bd4, 0x96230008, 0x3c020800, 0x8c421bac, 0x00432821, 0x3c010800,
4382         0xac251bac, 0x9622000a, 0x30420004, 0x14400018, 0x00061100, 0x8f630c14,
4383         0x3063000f, 0x2c620002, 0x1440000b, 0x3c02c000, 0x8f630c14, 0x3c020800,
4384         0x8c421b40, 0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002,
4385         0x1040fff7, 0x3c02c000, 0x00e21825, 0xaf635c5c, 0x8f625c50, 0x30420002,
4386         0x10400014, 0x00000000, 0x0a000147, 0x00000000, 0x3c030800, 0x8c631b80,
4387         0x3c040800, 0x94841b94, 0x01221025, 0x3c010800, 0xa42223da, 0x24020001,
4388         0x3c010800, 0xac221bb8, 0x24630001, 0x0085202a, 0x3c010800, 0x10800003,
4389         0xac231b80, 0x3c010800, 0xa4251b94, 0x3c060800, 0x24c61b9c, 0x8cc20000,
4390         0x24420001, 0xacc20000, 0x28420080, 0x14400005, 0x00000000, 0x0e000656,
4391         0x24040002, 0x0a0001e6, 0x00000000, 0x3c020800, 0x8c421bb8, 0x10400078,
4392         0x24020001, 0x3c050800, 0x90a51b98, 0x14a20072, 0x00000000, 0x3c150800,
4393         0x96b51b96, 0x3c040800, 0x8c841bac, 0x32a3ffff, 0x0083102a, 0x1440006c,
4394         0x00000000, 0x14830003, 0x00000000, 0x3c010800, 0xac2523f0, 0x1060005c,
4395         0x00009021, 0x24d60004, 0x0060a021, 0x24d30014, 0x8ec20000, 0x00028100,
4396         0x3c110800, 0x02308821, 0x0e000625, 0x8e311bc8, 0x00402821, 0x10a00054,
4397         0x00000000, 0x9628000a, 0x31020040, 0x10400005, 0x2407180c, 0x8e22000c,
4398         0x2407188c, 0x00021400, 0xaca20018, 0x3c030800, 0x00701821, 0x8c631bd0,
4399         0x3c020800, 0x00501021, 0x8c421bd4, 0x00031d00, 0x00021400, 0x00621825,
4400         0xaca30014, 0x8ec30004, 0x96220008, 0x00432023, 0x3242ffff, 0x3083ffff,
4401         0x00431021, 0x0282102a, 0x14400002, 0x02b23023, 0x00803021, 0x8e620000,
4402         0x30c4ffff, 0x00441021, 0xae620000, 0x8e220000, 0xaca20000, 0x8e220004,
4403         0x8e63fff4, 0x00431021, 0xaca20004, 0xa4a6000e, 0x8e62fff4, 0x00441021,
4404         0xae62fff4, 0x96230008, 0x0043102a, 0x14400005, 0x02469021, 0x8e62fff0,
4405         0xae60fff4, 0x24420001, 0xae62fff0, 0xaca00008, 0x3242ffff, 0x14540008,
4406         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x24020905, 0xa4a2000c,
4407         0x0a0001cb, 0x34e70020, 0xa4a2000c, 0x3c020800, 0x8c4223f0, 0x10400003,
4408         0x3c024b65, 0x0a0001d3, 0x34427654, 0x3c02b49a, 0x344289ab, 0xaca2001c,
4409         0x30e2ffff, 0xaca20010, 0x0e0005a2, 0x00a02021, 0x3242ffff, 0x0054102b,
4410         0x1440ffa9, 0x00000000, 0x24020002, 0x3c010800, 0x0a0001e6, 0xa0221b98,
4411         0x8ec2083c, 0x24420001, 0x0a0001e6, 0xaec2083c, 0x0e0004c0, 0x00000000,
4412         0x8fbf002c, 0x8fb60028, 0x8fb50024, 0x8fb40020, 0x8fb3001c, 0x8fb20018,
4413         0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0030, 0x27bdffd0, 0xafbf0028,
4414         0xafb30024, 0xafb20020, 0xafb1001c, 0xafb00018, 0x8f725c9c, 0x3c0200ff,
4415         0x3442fff8, 0x3c070800, 0x24e71bb4, 0x02428824, 0x9623000e, 0x8ce20000,
4416         0x00431021, 0xace20000, 0x8e220010, 0x30420020, 0x14400011, 0x00809821,
4417         0x0e00063b, 0x02202021, 0x3c02c000, 0x02421825, 0xaf635c9c, 0x8f625c90,
4418         0x30420002, 0x1040011e, 0x00000000, 0xaf635c9c, 0x8f625c90, 0x30420002,
4419         0x10400119, 0x00000000, 0x0a00020d, 0x00000000, 0x8e240008, 0x8e230014,
4420         0x00041402, 0x000231c0, 0x00031502, 0x304201ff, 0x2442ffff, 0x3042007f,
4421         0x00031942, 0x30637800, 0x00021100, 0x24424000, 0x00624821, 0x9522000a,
4422         0x3084ffff, 0x30420008, 0x104000b0, 0x000429c0, 0x3c020800, 0x8c422400,
4423         0x14400024, 0x24c50008, 0x94c20014, 0x3c010800, 0xa42223d0, 0x8cc40010,
4424         0x00041402, 0x3c010800, 0xa42223d2, 0x3c010800, 0xa42423d4, 0x94c2000e,
4425         0x3083ffff, 0x00431023, 0x3c010800, 0xac222408, 0x94c2001a, 0x3c010800,
4426         0xac262400, 0x3c010800, 0xac322404, 0x3c010800, 0xac2223fc, 0x3c02c000,
4427         0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e5, 0x00000000,
4428         0xaf635c9c, 0x8f625c90, 0x30420002, 0x104000e0, 0x00000000, 0x0a000246,
4429         0x00000000, 0x94c2000e, 0x3c030800, 0x946323d4, 0x00434023, 0x3103ffff,
4430         0x2c620008, 0x1040001c, 0x00000000, 0x94c20014, 0x24420028, 0x00a22821,
4431         0x00031042, 0x1840000b, 0x00002021, 0x24e60848, 0x00403821, 0x94a30000,
4432         0x8cc20000, 0x24840001, 0x00431021, 0xacc20000, 0x0087102a, 0x1440fff9,
4433         0x24a50002, 0x31020001, 0x1040001f, 0x3c024000, 0x3c040800, 0x248423fc,
4434         0xa0a00001, 0x94a30000, 0x8c820000, 0x00431021, 0x0a000285, 0xac820000,
4435         0x8f626800, 0x3c030010, 0x00431024, 0x10400009, 0x00000000, 0x94c2001a,
4436         0x3c030800, 0x8c6323fc, 0x00431021, 0x3c010800, 0xac2223fc, 0x0a000286,
4437         0x3c024000, 0x94c2001a, 0x94c4001c, 0x3c030800, 0x8c6323fc, 0x00441023,
4438         0x00621821, 0x3c010800, 0xac2323fc, 0x3c024000, 0x02421825, 0xaf635c9c,
4439         0x8f625c90, 0x30420002, 0x1440fffc, 0x00000000, 0x9522000a, 0x30420010,
4440         0x1040009b, 0x00000000, 0x3c030800, 0x946323d4, 0x3c070800, 0x24e72400,
4441         0x8ce40000, 0x8f626800, 0x24630030, 0x00832821, 0x3c030010, 0x00431024,
4442         0x1440000a, 0x00000000, 0x94a20004, 0x3c040800, 0x8c842408, 0x3c030800,
4443         0x8c6323fc, 0x00441023, 0x00621821, 0x3c010800, 0xac2323fc, 0x3c040800,
4444         0x8c8423fc, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402, 0x00822021,
4445         0x00041027, 0xa4a20006, 0x3c030800, 0x8c632404, 0x3c0200ff, 0x3442fff8,
4446         0x00628824, 0x96220008, 0x24050001, 0x24034000, 0x000231c0, 0x00801021,
4447         0xa4c2001a, 0xa4c0001c, 0xace00000, 0x3c010800, 0xac251b60, 0xaf635cb8,
4448         0x8f625cb0, 0x30420002, 0x10400003, 0x00000000, 0x3c010800, 0xac201b60,
4449         0x8e220008, 0xaf625cb8, 0x8f625cb0, 0x30420002, 0x10400003, 0x00000000,
4450         0x3c010800, 0xac201b60, 0x3c020800, 0x8c421b60, 0x1040ffec, 0x00000000,
4451         0x3c040800, 0x0e00063b, 0x8c842404, 0x0a00032a, 0x00000000, 0x3c030800,
4452         0x90631b98, 0x24020002, 0x14620003, 0x3c034b65, 0x0a0002e1, 0x00008021,
4453         0x8e22001c, 0x34637654, 0x10430002, 0x24100002, 0x24100001, 0x00c02021,
4454         0x0e000350, 0x02003021, 0x24020003, 0x3c010800, 0xa0221b98, 0x24020002,
4455         0x1202000a, 0x24020001, 0x3c030800, 0x8c6323f0, 0x10620006, 0x00000000,
4456         0x3c020800, 0x944223d8, 0x00021400, 0x0a00031f, 0xae220014, 0x3c040800,
4457         0x248423da, 0x94820000, 0x00021400, 0xae220014, 0x3c020800, 0x8c421bbc,
4458         0x3c03c000, 0x3c010800, 0xa0201b98, 0x00431025, 0xaf625c5c, 0x8f625c50,
4459         0x30420002, 0x10400009, 0x00000000, 0x2484f7e2, 0x8c820000, 0x00431025,
4460         0xaf625c5c, 0x8f625c50, 0x30420002, 0x1440fffa, 0x00000000, 0x3c020800,
4461         0x24421b84, 0x8c430000, 0x24630001, 0xac430000, 0x8f630c14, 0x3063000f,
4462         0x2c620002, 0x1440000c, 0x3c024000, 0x8f630c14, 0x3c020800, 0x8c421b40,
4463         0x3063000f, 0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7,
4464         0x00000000, 0x3c024000, 0x02421825, 0xaf635c9c, 0x8f625c90, 0x30420002,
4465         0x1440fffc, 0x00000000, 0x12600003, 0x00000000, 0x0e0004c0, 0x00000000,
4466         0x8fbf0028, 0x8fb30024, 0x8fb20020, 0x8fb1001c, 0x8fb00018, 0x03e00008,
4467         0x27bd0030, 0x8f634450, 0x3c040800, 0x24841b88, 0x8c820000, 0x00031c02,
4468         0x0043102b, 0x14400007, 0x3c038000, 0x8c840004, 0x8f624450, 0x00021c02,
4469         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4470         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3c024000,
4471         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00000000,
4472         0x03e00008, 0x00000000, 0x27bdffe0, 0x00805821, 0x14c00011, 0x256e0008,
4473         0x3c020800, 0x8c4223f4, 0x10400007, 0x24020016, 0x3c010800, 0xa42223d2,
4474         0x2402002a, 0x3c010800, 0x0a000364, 0xa42223d4, 0x8d670010, 0x00071402,
4475         0x3c010800, 0xa42223d2, 0x3c010800, 0xa42723d4, 0x3c040800, 0x948423d4,
4476         0x3c030800, 0x946323d2, 0x95cf0006, 0x3c020800, 0x944223d0, 0x00832023,
4477         0x01e2c023, 0x3065ffff, 0x24a20028, 0x01c24821, 0x3082ffff, 0x14c0001a,
4478         0x01226021, 0x9582000c, 0x3042003f, 0x3c010800, 0xa42223d6, 0x95820004,
4479         0x95830006, 0x3c010800, 0xac2023e4, 0x3c010800, 0xac2023e8, 0x00021400,
4480         0x00431025, 0x3c010800, 0xac221bc0, 0x95220004, 0x3c010800, 0xa4221bc4,
4481         0x95230002, 0x01e51023, 0x0043102a, 0x10400010, 0x24020001, 0x3c010800,
4482         0x0a000398, 0xac2223f8, 0x3c030800, 0x8c6323e8, 0x3c020800, 0x94421bc4,
4483         0x00431021, 0xa5220004, 0x3c020800, 0x94421bc0, 0xa5820004, 0x3c020800,
4484         0x8c421bc0, 0xa5820006, 0x3c020800, 0x8c4223f0, 0x3c0d0800, 0x8dad23e4,
4485         0x3c0a0800, 0x144000e5, 0x8d4a23e8, 0x3c020800, 0x94421bc4, 0x004a1821,
4486         0x3063ffff, 0x0062182b, 0x24020002, 0x10c2000d, 0x01435023, 0x3c020800,
4487         0x944223d6, 0x30420009, 0x10400008, 0x00000000, 0x9582000c, 0x3042fff6,
4488         0xa582000c, 0x3c020800, 0x944223d6, 0x30420009, 0x01a26823, 0x3c020800,
4489         0x8c4223f8, 0x1040004a, 0x01203821, 0x3c020800, 0x944223d2, 0x00004021,
4490         0xa520000a, 0x01e21023, 0xa5220002, 0x3082ffff, 0x00021042, 0x18400008,
4491         0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021, 0x0103102a,
4492         0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061402,
4493         0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021, 0x2527000c,
4494         0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004, 0x1440fffb,
4495         0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023, 0x01803821,
4496         0x3082ffff, 0xa4e00010, 0x00621821, 0x00021042, 0x18400010, 0x00c33021,
4497         0x00404821, 0x94e20000, 0x24e70002, 0x00c23021, 0x30e2007f, 0x14400006,
4498         0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80, 0x00625824, 0x25670008,
4499         0x0109102a, 0x1440fff3, 0x00000000, 0x30820001, 0x10400005, 0x00061c02,
4500         0xa0e00001, 0x94e20000, 0x00c23021, 0x00061c02, 0x30c2ffff, 0x00623021,
4501         0x00061402, 0x00c23021, 0x0a00047d, 0x30c6ffff, 0x24020002, 0x14c20081,
4502         0x00000000, 0x3c020800, 0x8c42240c, 0x14400007, 0x00000000, 0x3c020800,
4503         0x944223d2, 0x95230002, 0x01e21023, 0x10620077, 0x00000000, 0x3c020800,
4504         0x944223d2, 0x01e21023, 0xa5220002, 0x3c020800, 0x8c42240c, 0x1040001a,
4505         0x31e3ffff, 0x8dc70010, 0x3c020800, 0x94421b96, 0x00e04021, 0x00072c02,
4506         0x00aa2021, 0x00431023, 0x00823823, 0x00072402, 0x30e2ffff, 0x00823821,
4507         0x00071027, 0xa522000a, 0x3102ffff, 0x3c040800, 0x948423d4, 0x00453023,
4508         0x00e02821, 0x00641823, 0x006d1821, 0x00c33021, 0x00061c02, 0x30c2ffff,
4509         0x0a00047d, 0x00623021, 0x01203821, 0x00004021, 0x3082ffff, 0x00021042,
4510         0x18400008, 0x00003021, 0x00401821, 0x94e20000, 0x25080001, 0x00c23021,
4511         0x0103102a, 0x1440fffb, 0x24e70002, 0x00061c02, 0x30c2ffff, 0x00623021,
4512         0x00061402, 0x00c23021, 0x00c02821, 0x00061027, 0xa522000a, 0x00003021,
4513         0x2527000c, 0x00004021, 0x94e20000, 0x25080001, 0x00c23021, 0x2d020004,
4514         0x1440fffb, 0x24e70002, 0x95220002, 0x00004021, 0x91230009, 0x00442023,
4515         0x01803821, 0x3082ffff, 0xa4e00010, 0x3c040800, 0x948423d4, 0x00621821,
4516         0x00c33021, 0x00061c02, 0x30c2ffff, 0x00623021, 0x00061c02, 0x3c020800,
4517         0x944223d0, 0x00c34821, 0x00441023, 0x00021fc2, 0x00431021, 0x00021043,
4518         0x18400010, 0x00003021, 0x00402021, 0x94e20000, 0x24e70002, 0x00c23021,
4519         0x30e2007f, 0x14400006, 0x25080001, 0x8d630000, 0x3c02007f, 0x3442ff80,
4520         0x00625824, 0x25670008, 0x0104102a, 0x1440fff3, 0x00000000, 0x3c020800,
4521         0x944223ec, 0x00c23021, 0x3122ffff, 0x00c23021, 0x00061c02, 0x30c2ffff,
4522         0x00623021, 0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010,
4523         0xadc00014, 0x0a00049d, 0xadc00000, 0x8dc70010, 0x00e04021, 0x11400007,
4524         0x00072c02, 0x00aa3021, 0x00061402, 0x30c3ffff, 0x00433021, 0x00061402,
4525         0x00c22821, 0x00051027, 0xa522000a, 0x3c030800, 0x946323d4, 0x3102ffff,
4526         0x01e21021, 0x00433023, 0x00cd3021, 0x00061c02, 0x30c2ffff, 0x00623021,
4527         0x00061402, 0x00c23021, 0x00c04021, 0x00061027, 0xa5820010, 0x3102ffff,
4528         0x00051c00, 0x00431025, 0xadc20010, 0x3c020800, 0x8c4223f4, 0x10400005,
4529         0x2de205eb, 0x14400002, 0x25e2fff2, 0x34028870, 0xa5c20034, 0x3c030800,
4530         0x246323e8, 0x8c620000, 0x24420001, 0xac620000, 0x3c040800, 0x8c8423e4,
4531         0x3c020800, 0x8c421bc0, 0x3303ffff, 0x00832021, 0x00431821, 0x0062102b,
4532         0x3c010800, 0xac2423e4, 0x10400003, 0x2482ffff, 0x3c010800, 0xac2223e4,
4533         0x3c010800, 0xac231bc0, 0x03e00008, 0x27bd0020, 0x27bdffb8, 0x3c050800,
4534         0x24a51b96, 0xafbf0044, 0xafbe0040, 0xafb7003c, 0xafb60038, 0xafb50034,
4535         0xafb40030, 0xafb3002c, 0xafb20028, 0xafb10024, 0xafb00020, 0x94a90000,
4536         0x3c020800, 0x944223d0, 0x3c030800, 0x8c631bb0, 0x3c040800, 0x8c841bac,
4537         0x01221023, 0x0064182a, 0xa7a9001e, 0x106000be, 0xa7a20016, 0x24be0022,
4538         0x97b6001e, 0x24b3001a, 0x24b70016, 0x8fc20000, 0x14400008, 0x00000000,
4539         0x8fc2fff8, 0x97a30016, 0x8fc4fff4, 0x00431021, 0x0082202a, 0x148000b0,
4540         0x00000000, 0x97d50818, 0x32a2ffff, 0x104000a3, 0x00009021, 0x0040a021,
4541         0x00008821, 0x0e000625, 0x00000000, 0x00403021, 0x14c00007, 0x00000000,
4542         0x3c020800, 0x8c4223dc, 0x24420001, 0x3c010800, 0x0a000596, 0xac2223dc,
4543         0x3c100800, 0x02118021, 0x8e101bc8, 0x9608000a, 0x31020040, 0x10400005,
4544         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x31020080,
4545         0x54400001, 0x34e70010, 0x3c020800, 0x00511021, 0x8c421bd0, 0x3c030800,
4546         0x00711821, 0x8c631bd4, 0x00021500, 0x00031c00, 0x00431025, 0xacc20014,
4547         0x96040008, 0x3242ffff, 0x00821021, 0x0282102a, 0x14400002, 0x02b22823,
4548         0x00802821, 0x8e020000, 0x02459021, 0xacc20000, 0x8e020004, 0x00c02021,
4549         0x26310010, 0xac820004, 0x30e2ffff, 0xac800008, 0xa485000e, 0xac820010,
4550         0x24020305, 0x0e0005a2, 0xa482000c, 0x3242ffff, 0x0054102b, 0x1440ffc5,
4551         0x3242ffff, 0x0a00058e, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4552         0x10400067, 0x00000000, 0x8e62fff0, 0x00028900, 0x3c100800, 0x02118021,
4553         0x0e000625, 0x8e101bc8, 0x00403021, 0x14c00005, 0x00000000, 0x8e62082c,
4554         0x24420001, 0x0a000596, 0xae62082c, 0x9608000a, 0x31020040, 0x10400005,
4555         0x2407180c, 0x8e02000c, 0x2407188c, 0x00021400, 0xacc20018, 0x3c020800,
4556         0x00511021, 0x8c421bd0, 0x3c030800, 0x00711821, 0x8c631bd4, 0x00021500,
4557         0x00031c00, 0x00431025, 0xacc20014, 0x8e63fff4, 0x96020008, 0x00432023,
4558         0x3242ffff, 0x3083ffff, 0x00431021, 0x02c2102a, 0x10400003, 0x00802821,
4559         0x97a9001e, 0x01322823, 0x8e620000, 0x30a4ffff, 0x00441021, 0xae620000,
4560         0xa4c5000e, 0x8e020000, 0xacc20000, 0x8e020004, 0x8e63fff4, 0x00431021,
4561         0xacc20004, 0x8e63fff4, 0x96020008, 0x00641821, 0x0062102a, 0x14400006,
4562         0x02459021, 0x8e62fff0, 0xae60fff4, 0x24420001, 0x0a000571, 0xae62fff0,
4563         0xae63fff4, 0xacc00008, 0x3242ffff, 0x10560003, 0x31020004, 0x10400006,
4564         0x24020305, 0x31020080, 0x54400001, 0x34e70010, 0x34e70020, 0x24020905,
4565         0xa4c2000c, 0x8ee30000, 0x8ee20004, 0x14620007, 0x3c02b49a, 0x8ee20860,
4566         0x54400001, 0x34e70400, 0x3c024b65, 0x0a000588, 0x34427654, 0x344289ab,
4567         0xacc2001c, 0x30e2ffff, 0xacc20010, 0x0e0005a2, 0x00c02021, 0x3242ffff,
4568         0x0056102b, 0x1440ff9b, 0x00000000, 0x8e620000, 0x8e63fffc, 0x0043102a,
4569         0x1440ff48, 0x00000000, 0x8fbf0044, 0x8fbe0040, 0x8fb7003c, 0x8fb60038,
4570         0x8fb50034, 0x8fb40030, 0x8fb3002c, 0x8fb20028, 0x8fb10024, 0x8fb00020,
4571         0x03e00008, 0x27bd0048, 0x27bdffe8, 0xafbf0014, 0xafb00010, 0x8f624450,
4572         0x8f634410, 0x0a0005b1, 0x00808021, 0x8f626820, 0x30422000, 0x10400003,
4573         0x00000000, 0x0e0001f0, 0x00002021, 0x8f624450, 0x8f634410, 0x3042ffff,
4574         0x0043102b, 0x1440fff5, 0x00000000, 0x8f630c14, 0x3063000f, 0x2c620002,
4575         0x1440000b, 0x00000000, 0x8f630c14, 0x3c020800, 0x8c421b40, 0x3063000f,
4576         0x24420001, 0x3c010800, 0xac221b40, 0x2c620002, 0x1040fff7, 0x00000000,
4577         0xaf705c18, 0x8f625c10, 0x30420002, 0x10400009, 0x00000000, 0x8f626820,
4578         0x30422000, 0x1040fff8, 0x00000000, 0x0e0001f0, 0x00002021, 0x0a0005c4,
4579         0x00000000, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000,
4580         0x00000000, 0x00000000, 0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010,
4581         0xaf60680c, 0x8f626804, 0x34420082, 0xaf626804, 0x8f634000, 0x24020b50,
4582         0x3c010800, 0xac221b54, 0x24020b78, 0x3c010800, 0xac221b64, 0x34630002,
4583         0xaf634000, 0x0e000605, 0x00808021, 0x3c010800, 0xa0221b68, 0x304200ff,
4584         0x24030002, 0x14430005, 0x00000000, 0x3c020800, 0x8c421b54, 0x0a0005f8,
4585         0xac5000c0, 0x3c020800, 0x8c421b54, 0xac5000bc, 0x8f624434, 0x8f634438,
4586         0x8f644410, 0x3c010800, 0xac221b5c, 0x3c010800, 0xac231b6c, 0x3c010800,
4587         0xac241b58, 0x8fbf0014, 0x8fb00010, 0x03e00008, 0x27bd0018, 0x3c040800,
4588         0x8c870000, 0x3c03aa55, 0x3463aa55, 0x3c06c003, 0xac830000, 0x8cc20000,
4589         0x14430007, 0x24050002, 0x3c0355aa, 0x346355aa, 0xac830000, 0x8cc20000,
4590         0x50430001, 0x24050001, 0x3c020800, 0xac470000, 0x03e00008, 0x00a01021,
4591         0x27bdfff8, 0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe,
4592         0x00000000, 0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008,
4593         0x27bd0008, 0x8f634450, 0x3c020800, 0x8c421b5c, 0x00031c02, 0x0043102b,
4594         0x14400008, 0x3c038000, 0x3c040800, 0x8c841b6c, 0x8f624450, 0x00021c02,
4595         0x0083102b, 0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024,
4596         0x1440fffd, 0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff,
4597         0x2442e000, 0x2c422001, 0x14400003, 0x3c024000, 0x0a000648, 0x2402ffff,
4598         0x00822025, 0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021,
4599         0x03e00008, 0x00000000, 0x8f624450, 0x3c030800, 0x8c631b58, 0x0a000651,
4600         0x3042ffff, 0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000,
4601         0x03e00008, 0x00000000, 0x27bdffe0, 0x00802821, 0x3c040800, 0x24841af0,
4602         0x00003021, 0x00003821, 0xafbf0018, 0xafa00010, 0x0e00067c, 0xafa00014,
4603         0x0a000660, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x00000000,
4604         0x00000000, 0x00000000, 0x3c020800, 0x34423000, 0x3c030800, 0x34633000,
4605         0x3c040800, 0x348437ff, 0x3c010800, 0xac221b74, 0x24020040, 0x3c010800,
4606         0xac221b78, 0x3c010800, 0xac201b70, 0xac600000, 0x24630004, 0x0083102b,
4607         0x5040fffd, 0xac600000, 0x03e00008, 0x00000000, 0x00804821, 0x8faa0010,
4608         0x3c020800, 0x8c421b70, 0x3c040800, 0x8c841b78, 0x8fab0014, 0x24430001,
4609         0x0044102b, 0x3c010800, 0xac231b70, 0x14400003, 0x00004021, 0x3c010800,
4610         0xac201b70, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74, 0x91240000,
4611         0x00021140, 0x00431021, 0x00481021, 0x25080001, 0xa0440000, 0x29020008,
4612         0x1440fff4, 0x25290001, 0x3c020800, 0x8c421b70, 0x3c030800, 0x8c631b74,
4613         0x8f64680c, 0x00021140, 0x00431021, 0xac440008, 0xac45000c, 0xac460010,
4614         0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c, 0x00000000, 0x00000000,
4615 };
4616
4617 static u32 tg3TsoFwRodata[] = {
4618         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4619         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x496e0000, 0x73746b6f,
4620         0x66662a2a, 0x00000000, 0x53774576, 0x656e7430, 0x00000000, 0x00000000,
4621         0x00000000, 0x00000000, 0x66617461, 0x6c457272, 0x00000000, 0x00000000,
4622         0x00000000,
4623 };
4624
4625 static u32 tg3TsoFwData[] = {
4626         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x362e3000, 0x00000000,
4627         0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000, 0x00000000,
4628         0x00000000,
4629 };
4630
4631 /* 5705 needs a special version of the TSO firmware.  */
4632 #define TG3_TSO5_FW_RELEASE_MAJOR       0x1
4633 #define TG3_TSO5_FW_RELASE_MINOR        0x2
4634 #define TG3_TSO5_FW_RELEASE_FIX         0x0
4635 #define TG3_TSO5_FW_START_ADDR          0x00010000
4636 #define TG3_TSO5_FW_TEXT_ADDR           0x00010000
4637 #define TG3_TSO5_FW_TEXT_LEN            0xe90
4638 #define TG3_TSO5_FW_RODATA_ADDR         0x00010e90
4639 #define TG3_TSO5_FW_RODATA_LEN          0x50
4640 #define TG3_TSO5_FW_DATA_ADDR           0x00010f00
4641 #define TG3_TSO5_FW_DATA_LEN            0x20
4642 #define TG3_TSO5_FW_SBSS_ADDR           0x00010f20
4643 #define TG3_TSO5_FW_SBSS_LEN            0x28
4644 #define TG3_TSO5_FW_BSS_ADDR            0x00010f50
4645 #define TG3_TSO5_FW_BSS_LEN             0x88
4646
4647 static u32 tg3Tso5FwText[(TG3_TSO5_FW_TEXT_LEN / 4) + 1] = {
4648         0x0c004003, 0x00000000, 0x00010f04, 0x00000000, 0x10000003, 0x00000000,
4649         0x0000000d, 0x0000000d, 0x3c1d0001, 0x37bde000, 0x03a0f021, 0x3c100001,
4650         0x26100000, 0x0c004010, 0x00000000, 0x0000000d, 0x27bdffe0, 0x3c04fefe,
4651         0xafbf0018, 0x0c0042e8, 0x34840002, 0x0c004364, 0x00000000, 0x3c030001,
4652         0x90630f34, 0x24020002, 0x3c040001, 0x24840e9c, 0x14620003, 0x24050001,
4653         0x3c040001, 0x24840e90, 0x24060002, 0x00003821, 0xafa00010, 0x0c004378,
4654         0xafa00014, 0x0c00402c, 0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4655         0x00000000, 0x00000000, 0x27bdffe0, 0xafbf001c, 0xafb20018, 0xafb10014,
4656         0x0c0042d4, 0xafb00010, 0x3c128000, 0x24110001, 0x8f706810, 0x32020400,
4657         0x10400007, 0x00000000, 0x8f641008, 0x00921024, 0x14400003, 0x00000000,
4658         0x0c004064, 0x00000000, 0x3c020001, 0x90420f56, 0x10510003, 0x32020200,
4659         0x1040fff1, 0x00000000, 0x0c0041b4, 0x00000000, 0x08004034, 0x00000000,
4660         0x8fbf001c, 0x8fb20018, 0x8fb10014, 0x8fb00010, 0x03e00008, 0x27bd0020,
4661         0x27bdffe0, 0x3c040001, 0x24840eb0, 0x00002821, 0x00003021, 0x00003821,
4662         0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0000d021, 0x24020130,
4663         0xaf625000, 0x3c010001, 0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018,
4664         0x03e00008, 0x27bd0020, 0x00000000, 0x00000000, 0x3c030001, 0x24630f60,
4665         0x90620000, 0x27bdfff0, 0x14400003, 0x0080c021, 0x08004073, 0x00004821,
4666         0x3c022000, 0x03021024, 0x10400003, 0x24090002, 0x08004073, 0xa0600000,
4667         0x24090001, 0x00181040, 0x30431f80, 0x346f8008, 0x1520004b, 0x25eb0028,
4668         0x3c040001, 0x00832021, 0x8c848010, 0x3c050001, 0x24a50f7a, 0x00041402,
4669         0xa0a20000, 0x3c010001, 0xa0240f7b, 0x3c020001, 0x00431021, 0x94428014,
4670         0x3c010001, 0xa0220f7c, 0x3c0c0001, 0x01836021, 0x8d8c8018, 0x304200ff,
4671         0x24420008, 0x000220c3, 0x24020001, 0x3c010001, 0xa0220f60, 0x0124102b,
4672         0x1040000c, 0x00003821, 0x24a6000e, 0x01602821, 0x8ca20000, 0x8ca30004,
4673         0x24a50008, 0x24e70001, 0xacc20000, 0xacc30004, 0x00e4102b, 0x1440fff8,
4674         0x24c60008, 0x00003821, 0x3c080001, 0x25080f7b, 0x91060000, 0x3c020001,
4675         0x90420f7c, 0x2503000d, 0x00c32821, 0x00461023, 0x00021fc2, 0x00431021,
4676         0x00021043, 0x1840000c, 0x00002021, 0x91020001, 0x00461023, 0x00021fc2,
4677         0x00431021, 0x00021843, 0x94a20000, 0x24e70001, 0x00822021, 0x00e3102a,
4678         0x1440fffb, 0x24a50002, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4679         0x00822021, 0x3c02ffff, 0x01821024, 0x3083ffff, 0x00431025, 0x3c010001,
4680         0x080040fa, 0xac220f80, 0x3c050001, 0x24a50f7c, 0x90a20000, 0x3c0c0001,
4681         0x01836021, 0x8d8c8018, 0x000220c2, 0x1080000e, 0x00003821, 0x01603021,
4682         0x24a5000c, 0x8ca20000, 0x8ca30004, 0x24a50008, 0x24e70001, 0xacc20000,
4683         0xacc30004, 0x00e4102b, 0x1440fff8, 0x24c60008, 0x3c050001, 0x24a50f7c,
4684         0x90a20000, 0x30430007, 0x24020004, 0x10620011, 0x28620005, 0x10400005,
4685         0x24020002, 0x10620008, 0x000710c0, 0x080040fa, 0x00000000, 0x24020006,
4686         0x1062000e, 0x000710c0, 0x080040fa, 0x00000000, 0x00a21821, 0x9463000c,
4687         0x004b1021, 0x080040fa, 0xa4430000, 0x000710c0, 0x00a21821, 0x8c63000c,
4688         0x004b1021, 0x080040fa, 0xac430000, 0x00a21821, 0x8c63000c, 0x004b2021,
4689         0x00a21021, 0xac830000, 0x94420010, 0xa4820004, 0x95e70006, 0x3c020001,
4690         0x90420f7c, 0x3c030001, 0x90630f7a, 0x00e2c823, 0x3c020001, 0x90420f7b,
4691         0x24630028, 0x01e34021, 0x24420028, 0x15200012, 0x01e23021, 0x94c2000c,
4692         0x3c010001, 0xa4220f78, 0x94c20004, 0x94c30006, 0x3c010001, 0xa4200f76,
4693         0x3c010001, 0xa4200f72, 0x00021400, 0x00431025, 0x3c010001, 0xac220f6c,
4694         0x95020004, 0x3c010001, 0x08004124, 0xa4220f70, 0x3c020001, 0x94420f70,
4695         0x3c030001, 0x94630f72, 0x00431021, 0xa5020004, 0x3c020001, 0x94420f6c,
4696         0xa4c20004, 0x3c020001, 0x8c420f6c, 0xa4c20006, 0x3c040001, 0x94840f72,
4697         0x3c020001, 0x94420f70, 0x3c0a0001, 0x954a0f76, 0x00441821, 0x3063ffff,
4698         0x0062182a, 0x24020002, 0x1122000b, 0x00832023, 0x3c030001, 0x94630f78,
4699         0x30620009, 0x10400006, 0x3062fff6, 0xa4c2000c, 0x3c020001, 0x94420f78,
4700         0x30420009, 0x01425023, 0x24020001, 0x1122001b, 0x29220002, 0x50400005,
4701         0x24020002, 0x11200007, 0x31a2ffff, 0x08004197, 0x00000000, 0x1122001d,
4702         0x24020016, 0x08004197, 0x31a2ffff, 0x3c0e0001, 0x95ce0f80, 0x10800005,
4703         0x01806821, 0x01c42021, 0x00041c02, 0x3082ffff, 0x00627021, 0x000e1027,
4704         0xa502000a, 0x3c030001, 0x90630f7b, 0x31a2ffff, 0x00e21021, 0x0800418d,
4705         0x00432023, 0x3c020001, 0x94420f80, 0x00442021, 0x00041c02, 0x3082ffff,
4706         0x00622021, 0x00807021, 0x00041027, 0x08004185, 0xa502000a, 0x3c050001,
4707         0x24a50f7a, 0x90a30000, 0x14620002, 0x24e2fff2, 0xa5e20034, 0x90a20000,
4708         0x00e21023, 0xa5020002, 0x3c030001, 0x94630f80, 0x3c020001, 0x94420f5a,
4709         0x30e5ffff, 0x00641821, 0x00451023, 0x00622023, 0x00041c02, 0x3082ffff,
4710         0x00622021, 0x00041027, 0xa502000a, 0x3c030001, 0x90630f7c, 0x24620001,
4711         0x14a20005, 0x00807021, 0x01631021, 0x90420000, 0x08004185, 0x00026200,
4712         0x24620002, 0x14a20003, 0x306200fe, 0x004b1021, 0x944c0000, 0x3c020001,
4713         0x94420f82, 0x3183ffff, 0x3c040001, 0x90840f7b, 0x00431021, 0x00e21021,
4714         0x00442023, 0x008a2021, 0x00041c02, 0x3082ffff, 0x00622021, 0x00041402,
4715         0x00822021, 0x00806821, 0x00041027, 0xa4c20010, 0x31a2ffff, 0x000e1c00,
4716         0x00431025, 0x3c040001, 0x24840f72, 0xade20010, 0x94820000, 0x3c050001,
4717         0x94a50f76, 0x3c030001, 0x8c630f6c, 0x24420001, 0x00b92821, 0xa4820000,
4718         0x3322ffff, 0x00622021, 0x0083182b, 0x3c010001, 0xa4250f76, 0x10600003,
4719         0x24a2ffff, 0x3c010001, 0xa4220f76, 0x3c024000, 0x03021025, 0x3c010001,
4720         0xac240f6c, 0xaf621008, 0x03e00008, 0x27bd0010, 0x3c030001, 0x90630f56,
4721         0x27bdffe8, 0x24020001, 0xafbf0014, 0x10620026, 0xafb00010, 0x8f620cf4,
4722         0x2442ffff, 0x3042007f, 0x00021100, 0x8c434000, 0x3c010001, 0xac230f64,
4723         0x8c434008, 0x24444000, 0x8c5c4004, 0x30620040, 0x14400002, 0x24020088,
4724         0x24020008, 0x3c010001, 0xa4220f68, 0x30620004, 0x10400005, 0x24020001,
4725         0x3c010001, 0xa0220f57, 0x080041d5, 0x00031402, 0x3c010001, 0xa0200f57,
4726         0x00031402, 0x3c010001, 0xa4220f54, 0x9483000c, 0x24020001, 0x3c010001,
4727         0xa4200f50, 0x3c010001, 0xa0220f56, 0x3c010001, 0xa4230f62, 0x24020001,
4728         0x1342001e, 0x00000000, 0x13400005, 0x24020003, 0x13420067, 0x00000000,
4729         0x080042cf, 0x00000000, 0x3c020001, 0x94420f62, 0x241a0001, 0x3c010001,
4730         0xa4200f5e, 0x3c010001, 0xa4200f52, 0x304407ff, 0x00021bc2, 0x00031823,
4731         0x3063003e, 0x34630036, 0x00021242, 0x3042003c, 0x00621821, 0x3c010001,
4732         0xa4240f58, 0x00832021, 0x24630030, 0x3c010001, 0xa4240f5a, 0x3c010001,
4733         0xa4230f5c, 0x3c060001, 0x24c60f52, 0x94c50000, 0x94c30002, 0x3c040001,
4734         0x94840f5a, 0x00651021, 0x0044102a, 0x10400013, 0x3c108000, 0x00a31021,
4735         0xa4c20000, 0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008,
4736         0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4,
4737         0x00501024, 0x104000b7, 0x00000000, 0x0800420f, 0x00000000, 0x3c030001,
4738         0x94630f50, 0x00851023, 0xa4c40000, 0x00621821, 0x3042ffff, 0x3c010001,
4739         0xa4230f50, 0xaf620ce8, 0x3c020001, 0x94420f68, 0x34420024, 0xaf620cec,
4740         0x94c30002, 0x3c020001, 0x94420f50, 0x14620012, 0x3c028000, 0x3c108000,
4741         0x3c02a000, 0xaf620cf4, 0x3c010001, 0xa0200f56, 0x8f641008, 0x00901024,
4742         0x14400003, 0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024,
4743         0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003, 0xaf620cf4, 0x3c108000,
4744         0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064, 0x00000000,
4745         0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x080042cf, 0x241a0003,
4746         0x3c070001, 0x24e70f50, 0x94e20000, 0x03821021, 0xaf620ce0, 0x3c020001,
4747         0x8c420f64, 0xaf620ce4, 0x3c050001, 0x94a50f54, 0x94e30000, 0x3c040001,
4748         0x94840f58, 0x3c020001, 0x94420f5e, 0x00a32823, 0x00822023, 0x30a6ffff,
4749         0x3083ffff, 0x00c3102b, 0x14400043, 0x00000000, 0x3c020001, 0x94420f5c,
4750         0x00021400, 0x00621025, 0xaf620ce8, 0x94e20000, 0x3c030001, 0x94630f54,
4751         0x00441021, 0xa4e20000, 0x3042ffff, 0x14430021, 0x3c020008, 0x3c020001,
4752         0x90420f57, 0x10400006, 0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624,
4753         0x0800427c, 0x0000d021, 0x3c020001, 0x94420f68, 0x3c030008, 0x34630624,
4754         0x00431025, 0xaf620cec, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4755         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4756         0x00000000, 0x8f620cf4, 0x00501024, 0x10400015, 0x00000000, 0x08004283,
4757         0x00000000, 0x3c030001, 0x94630f68, 0x34420624, 0x3c108000, 0x00621825,
4758         0x3c028000, 0xaf630cec, 0xaf620cf4, 0x8f641008, 0x00901024, 0x14400003,
4759         0x00000000, 0x0c004064, 0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7,
4760         0x00000000, 0x3c010001, 0x080042cf, 0xa4200f5e, 0x3c020001, 0x94420f5c,
4761         0x00021400, 0x00c21025, 0xaf620ce8, 0x3c020001, 0x90420f57, 0x10400009,
4762         0x3c03000c, 0x3c020001, 0x94420f68, 0x34630624, 0x0000d021, 0x00431025,
4763         0xaf620cec, 0x080042c1, 0x3c108000, 0x3c020001, 0x94420f68, 0x3c030008,
4764         0x34630604, 0x00431025, 0xaf620cec, 0x3c020001, 0x94420f5e, 0x00451021,
4765         0x3c010001, 0xa4220f5e, 0x3c108000, 0x3c02a000, 0xaf620cf4, 0x3c010001,
4766         0xa0200f56, 0x8f641008, 0x00901024, 0x14400003, 0x00000000, 0x0c004064,
4767         0x00000000, 0x8f620cf4, 0x00501024, 0x1440fff7, 0x00000000, 0x8fbf0014,
4768         0x8fb00010, 0x03e00008, 0x27bd0018, 0x00000000, 0x27bdffe0, 0x3c040001,
4769         0x24840ec0, 0x00002821, 0x00003021, 0x00003821, 0xafbf0018, 0xafa00010,
4770         0x0c004378, 0xafa00014, 0x0000d021, 0x24020130, 0xaf625000, 0x3c010001,
4771         0xa4200f50, 0x3c010001, 0xa0200f57, 0x8fbf0018, 0x03e00008, 0x27bd0020,
4772         0x27bdffe8, 0x3c1bc000, 0xafbf0014, 0xafb00010, 0xaf60680c, 0x8f626804,
4773         0x34420082, 0xaf626804, 0x8f634000, 0x24020b50, 0x3c010001, 0xac220f20,
4774         0x24020b78, 0x3c010001, 0xac220f30, 0x34630002, 0xaf634000, 0x0c004315,
4775         0x00808021, 0x3c010001, 0xa0220f34, 0x304200ff, 0x24030002, 0x14430005,
4776         0x00000000, 0x3c020001, 0x8c420f20, 0x08004308, 0xac5000c0, 0x3c020001,
4777         0x8c420f20, 0xac5000bc, 0x8f624434, 0x8f634438, 0x8f644410, 0x3c010001,
4778         0xac220f28, 0x3c010001, 0xac230f38, 0x3c010001, 0xac240f24, 0x8fbf0014,
4779         0x8fb00010, 0x03e00008, 0x27bd0018, 0x03e00008, 0x24020001, 0x27bdfff8,
4780         0x18800009, 0x00002821, 0x8f63680c, 0x8f62680c, 0x1043fffe, 0x00000000,
4781         0x24a50001, 0x00a4102a, 0x1440fff9, 0x00000000, 0x03e00008, 0x27bd0008,
4782         0x8f634450, 0x3c020001, 0x8c420f28, 0x00031c02, 0x0043102b, 0x14400008,
4783         0x3c038000, 0x3c040001, 0x8c840f38, 0x8f624450, 0x00021c02, 0x0083102b,
4784         0x1040fffc, 0x3c038000, 0xaf634444, 0x8f624444, 0x00431024, 0x1440fffd,
4785         0x00000000, 0x8f624448, 0x03e00008, 0x3042ffff, 0x3082ffff, 0x2442e000,
4786         0x2c422001, 0x14400003, 0x3c024000, 0x08004347, 0x2402ffff, 0x00822025,
4787         0xaf645c38, 0x8f625c30, 0x30420002, 0x1440fffc, 0x00001021, 0x03e00008,
4788         0x00000000, 0x8f624450, 0x3c030001, 0x8c630f24, 0x08004350, 0x3042ffff,
4789         0x8f624450, 0x3042ffff, 0x0043102b, 0x1440fffc, 0x00000000, 0x03e00008,
4790         0x00000000, 0x27bdffe0, 0x00802821, 0x3c040001, 0x24840ed0, 0x00003021,
4791         0x00003821, 0xafbf0018, 0xafa00010, 0x0c004378, 0xafa00014, 0x0800435f,
4792         0x00000000, 0x8fbf0018, 0x03e00008, 0x27bd0020, 0x3c020001, 0x3442d600,
4793         0x3c030001, 0x3463d600, 0x3c040001, 0x3484ddff, 0x3c010001, 0xac220f40,
4794         0x24020040, 0x3c010001, 0xac220f44, 0x3c010001, 0xac200f3c, 0xac600000,
4795         0x24630004, 0x0083102b, 0x5040fffd, 0xac600000, 0x03e00008, 0x00000000,
4796         0x00804821, 0x8faa0010, 0x3c020001, 0x8c420f3c, 0x3c040001, 0x8c840f44,
4797         0x8fab0014, 0x24430001, 0x0044102b, 0x3c010001, 0xac230f3c, 0x14400003,
4798         0x00004021, 0x3c010001, 0xac200f3c, 0x3c020001, 0x8c420f3c, 0x3c030001,
4799         0x8c630f40, 0x91240000, 0x00021140, 0x00431021, 0x00481021, 0x25080001,
4800         0xa0440000, 0x29020008, 0x1440fff4, 0x25290001, 0x3c020001, 0x8c420f3c,
4801         0x3c030001, 0x8c630f40, 0x8f64680c, 0x00021140, 0x00431021, 0xac440008,
4802         0xac45000c, 0xac460010, 0xac470014, 0xac4a0018, 0x03e00008, 0xac4b001c,
4803         0x00000000, 0x00000000, 0x00000000,
4804 };
4805
4806 static u32 tg3Tso5FwRodata[(TG3_TSO5_FW_RODATA_LEN / 4) + 1] = {
4807         0x4d61696e, 0x43707542, 0x00000000, 0x4d61696e, 0x43707541, 0x00000000,
4808         0x00000000, 0x00000000, 0x73746b6f, 0x66666c64, 0x00000000, 0x00000000,
4809         0x73746b6f, 0x66666c64, 0x00000000, 0x00000000, 0x66617461, 0x6c457272,
4810         0x00000000, 0x00000000, 0x00000000,
4811 };
4812
4813 static u32 tg3Tso5FwData[(TG3_TSO5_FW_DATA_LEN / 4) + 1] = {
4814         0x00000000, 0x73746b6f, 0x66666c64, 0x5f76312e, 0x322e3000, 0x00000000,
4815         0x00000000, 0x00000000, 0x00000000,
4816 };
4817
4818 /* tp->lock is held. */
4819 static int tg3_load_tso_firmware(struct tg3 *tp)
4820 {
4821         struct fw_info info;
4822         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
4823         int err, i;
4824
4825         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
4826                 return 0;
4827
4828         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
4829                 info.text_base = TG3_TSO5_FW_TEXT_ADDR;
4830                 info.text_len = TG3_TSO5_FW_TEXT_LEN;
4831                 info.text_data = &tg3Tso5FwText[0];
4832                 info.rodata_base = TG3_TSO5_FW_RODATA_ADDR;
4833                 info.rodata_len = TG3_TSO5_FW_RODATA_LEN;
4834                 info.rodata_data = &tg3Tso5FwRodata[0];
4835                 info.data_base = TG3_TSO5_FW_DATA_ADDR;
4836                 info.data_len = TG3_TSO5_FW_DATA_LEN;
4837                 info.data_data = &tg3Tso5FwData[0];
4838                 cpu_base = RX_CPU_BASE;
4839                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
4840                 cpu_scratch_size = (info.text_len +
4841                                     info.rodata_len +
4842                                     info.data_len +
4843                                     TG3_TSO5_FW_SBSS_LEN +
4844                                     TG3_TSO5_FW_BSS_LEN);
4845         } else {
4846                 info.text_base = TG3_TSO_FW_TEXT_ADDR;
4847                 info.text_len = TG3_TSO_FW_TEXT_LEN;
4848                 info.text_data = &tg3TsoFwText[0];
4849                 info.rodata_base = TG3_TSO_FW_RODATA_ADDR;
4850                 info.rodata_len = TG3_TSO_FW_RODATA_LEN;
4851                 info.rodata_data = &tg3TsoFwRodata[0];
4852                 info.data_base = TG3_TSO_FW_DATA_ADDR;
4853                 info.data_len = TG3_TSO_FW_DATA_LEN;
4854                 info.data_data = &tg3TsoFwData[0];
4855                 cpu_base = TX_CPU_BASE;
4856                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
4857                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
4858         }
4859
4860         err = tg3_load_firmware_cpu(tp, cpu_base,
4861                                     cpu_scratch_base, cpu_scratch_size,
4862                                     &info);
4863         if (err)
4864                 return err;
4865
4866         /* Now startup the cpu. */
4867         tw32(cpu_base + CPU_STATE, 0xffffffff);
4868         tw32_f(cpu_base + CPU_PC,    info.text_base);
4869
4870         for (i = 0; i < 5; i++) {
4871                 if (tr32(cpu_base + CPU_PC) == info.text_base)
4872                         break;
4873                 tw32(cpu_base + CPU_STATE, 0xffffffff);
4874                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
4875                 tw32_f(cpu_base + CPU_PC,    info.text_base);
4876                 udelay(1000);
4877         }
4878         if (i >= 5) {
4879                 printk(KERN_ERR PFX "tg3_load_tso_firmware fails for %s "
4880                        "to set CPU PC, is %08x should be %08x\n",
4881                        tp->dev->name, tr32(cpu_base + CPU_PC),
4882                        info.text_base);
4883                 return -ENODEV;
4884         }
4885         tw32(cpu_base + CPU_STATE, 0xffffffff);
4886         tw32_f(cpu_base + CPU_MODE,  0x00000000);
4887         return 0;
4888 }
4889
4890 #endif /* TG3_TSO_SUPPORT != 0 */
4891
4892 /* tp->lock is held. */
4893 static void __tg3_set_mac_addr(struct tg3 *tp)
4894 {
4895         u32 addr_high, addr_low;
4896         int i;
4897
4898         addr_high = ((tp->dev->dev_addr[0] << 8) |
4899                      tp->dev->dev_addr[1]);
4900         addr_low = ((tp->dev->dev_addr[2] << 24) |
4901                     (tp->dev->dev_addr[3] << 16) |
4902                     (tp->dev->dev_addr[4] <<  8) |
4903                     (tp->dev->dev_addr[5] <<  0));
4904         for (i = 0; i < 4; i++) {
4905                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
4906                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
4907         }
4908
4909         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4910             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
4911                 for (i = 0; i < 12; i++) {
4912                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
4913                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
4914                 }
4915         }
4916
4917         addr_high = (tp->dev->dev_addr[0] +
4918                      tp->dev->dev_addr[1] +
4919                      tp->dev->dev_addr[2] +
4920                      tp->dev->dev_addr[3] +
4921                      tp->dev->dev_addr[4] +
4922                      tp->dev->dev_addr[5]) &
4923                 TX_BACKOFF_SEED_MASK;
4924         tw32(MAC_TX_BACKOFF_SEED, addr_high);
4925 }
4926
4927 static int tg3_set_mac_addr(struct net_device *dev, void *p)
4928 {
4929         struct tg3 *tp = netdev_priv(dev);
4930         struct sockaddr *addr = p;
4931
4932         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
4933
4934         spin_lock_irq(&tp->lock);
4935         __tg3_set_mac_addr(tp);
4936         spin_unlock_irq(&tp->lock);
4937
4938         return 0;
4939 }
4940
4941 /* tp->lock is held. */
4942 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
4943                            dma_addr_t mapping, u32 maxlen_flags,
4944                            u32 nic_addr)
4945 {
4946         tg3_write_mem(tp,
4947                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
4948                       ((u64) mapping >> 32));
4949         tg3_write_mem(tp,
4950                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
4951                       ((u64) mapping & 0xffffffff));
4952         tg3_write_mem(tp,
4953                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
4954                        maxlen_flags);
4955
4956         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
4957                 tg3_write_mem(tp,
4958                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
4959                               nic_addr);
4960 }
4961
4962 static void __tg3_set_rx_mode(struct net_device *);
4963
4964 /* tp->lock is held. */
4965 static int tg3_reset_hw(struct tg3 *tp)
4966 {
4967         u32 val, rdmac_mode;
4968         int i, err, limit;
4969
4970         tg3_disable_ints(tp);
4971
4972         tg3_stop_fw(tp);
4973
4974         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
4975
4976         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE) {
4977                 err = tg3_abort_hw(tp);
4978                 if (err)
4979                         return err;
4980         }
4981
4982         err = tg3_chip_reset(tp);
4983         if (err)
4984                 return err;
4985
4986         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
4987
4988         /* This works around an issue with Athlon chipsets on
4989          * B3 tigon3 silicon.  This bit has no effect on any
4990          * other revision.  But do not set this on PCI Express
4991          * chips.
4992          */
4993         if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
4994                 tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
4995         tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
4996
4997         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
4998             (tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
4999                 val = tr32(TG3PCI_PCISTATE);
5000                 val |= PCISTATE_RETRY_SAME_DMA;
5001                 tw32(TG3PCI_PCISTATE, val);
5002         }
5003
5004         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
5005                 /* Enable some hw fixes.  */
5006                 val = tr32(TG3PCI_MSI_DATA);
5007                 val |= (1 << 26) | (1 << 28) | (1 << 29);
5008                 tw32(TG3PCI_MSI_DATA, val);
5009         }
5010
5011         /* Descriptor ring init may make accesses to the
5012          * NIC SRAM area to setup the TX descriptors, so we
5013          * can only do this after the hardware has been
5014          * successfully reset.
5015          */
5016         tg3_init_rings(tp);
5017
5018         /* This value is determined during the probe time DMA
5019          * engine test, tg3_test_dma.
5020          */
5021         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
5022
5023         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
5024                           GRC_MODE_4X_NIC_SEND_RINGS |
5025                           GRC_MODE_NO_TX_PHDR_CSUM |
5026                           GRC_MODE_NO_RX_PHDR_CSUM);
5027         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
5028         if (tp->tg3_flags & TG3_FLAG_NO_TX_PSEUDO_CSUM)
5029                 tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
5030         if (tp->tg3_flags & TG3_FLAG_NO_RX_PSEUDO_CSUM)
5031                 tp->grc_mode |= GRC_MODE_NO_RX_PHDR_CSUM;
5032
5033         tw32(GRC_MODE,
5034              tp->grc_mode |
5035              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
5036
5037         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
5038         val = tr32(GRC_MISC_CFG);
5039         val &= ~0xff;
5040         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
5041         tw32(GRC_MISC_CFG, val);
5042
5043         /* Initialize MBUF/DESC pool. */
5044         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5045                 /* Do nothing.  */
5046         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
5047                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
5048                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
5049                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
5050                 else
5051                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
5052                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
5053                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
5054         }
5055 #if TG3_TSO_SUPPORT != 0
5056         else if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5057                 int fw_len;
5058
5059                 fw_len = (TG3_TSO5_FW_TEXT_LEN +
5060                           TG3_TSO5_FW_RODATA_LEN +
5061                           TG3_TSO5_FW_DATA_LEN +
5062                           TG3_TSO5_FW_SBSS_LEN +
5063                           TG3_TSO5_FW_BSS_LEN);
5064                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
5065                 tw32(BUFMGR_MB_POOL_ADDR,
5066                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
5067                 tw32(BUFMGR_MB_POOL_SIZE,
5068                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
5069         }
5070 #endif
5071
5072         if (!(tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE)) {
5073                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5074                      tp->bufmgr_config.mbuf_read_dma_low_water);
5075                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5076                      tp->bufmgr_config.mbuf_mac_rx_low_water);
5077                 tw32(BUFMGR_MB_HIGH_WATER,
5078                      tp->bufmgr_config.mbuf_high_water);
5079         } else {
5080                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
5081                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
5082                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
5083                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
5084                 tw32(BUFMGR_MB_HIGH_WATER,
5085                      tp->bufmgr_config.mbuf_high_water_jumbo);
5086         }
5087         tw32(BUFMGR_DMA_LOW_WATER,
5088              tp->bufmgr_config.dma_low_water);
5089         tw32(BUFMGR_DMA_HIGH_WATER,
5090              tp->bufmgr_config.dma_high_water);
5091
5092         tw32(BUFMGR_MODE, BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE);
5093         for (i = 0; i < 2000; i++) {
5094                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
5095                         break;
5096                 udelay(10);
5097         }
5098         if (i >= 2000) {
5099                 printk(KERN_ERR PFX "tg3_reset_hw cannot enable BUFMGR for %s.\n",
5100                        tp->dev->name);
5101                 return -ENODEV;
5102         }
5103
5104         /* Setup replenish threshold. */
5105         tw32(RCVBDI_STD_THRESH, tp->rx_pending / 8);
5106
5107         /* Initialize TG3_BDINFO's at:
5108          *  RCVDBDI_STD_BD:     standard eth size rx ring
5109          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
5110          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
5111          *
5112          * like so:
5113          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
5114          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
5115          *                              ring attribute flags
5116          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
5117          *
5118          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
5119          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
5120          *
5121          * The size of each ring is fixed in the firmware, but the location is
5122          * configurable.
5123          */
5124         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5125              ((u64) tp->rx_std_mapping >> 32));
5126         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5127              ((u64) tp->rx_std_mapping & 0xffffffff));
5128         tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
5129              NIC_SRAM_RX_BUFFER_DESC);
5130
5131         /* Don't even try to program the JUMBO/MINI buffer descriptor
5132          * configs on 5705.
5133          */
5134         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
5135                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5136                      RX_STD_MAX_SIZE_5705 << BDINFO_FLAGS_MAXLEN_SHIFT);
5137         } else {
5138                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS,
5139                      RX_STD_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5140
5141                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
5142                      BDINFO_FLAGS_DISABLED);
5143
5144                 /* Setup replenish threshold. */
5145                 tw32(RCVBDI_JUMBO_THRESH, tp->rx_jumbo_pending / 8);
5146
5147                 if (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) {
5148                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
5149                              ((u64) tp->rx_jumbo_mapping >> 32));
5150                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
5151                              ((u64) tp->rx_jumbo_mapping & 0xffffffff));
5152                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5153                              RX_JUMBO_MAX_SIZE << BDINFO_FLAGS_MAXLEN_SHIFT);
5154                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
5155                              NIC_SRAM_RX_JUMBO_BUFFER_DESC);
5156                 } else {
5157                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
5158                              BDINFO_FLAGS_DISABLED);
5159                 }
5160
5161         }
5162
5163         /* There is only one send ring on 5705/5750, no need to explicitly
5164          * disable the others.
5165          */
5166         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5167                 /* Clear out send RCB ring in SRAM. */
5168                 for (i = NIC_SRAM_SEND_RCB; i < NIC_SRAM_RCV_RET_RCB; i += TG3_BDINFO_SIZE)
5169                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5170                                       BDINFO_FLAGS_DISABLED);
5171         }
5172
5173         tp->tx_prod = 0;
5174         tp->tx_cons = 0;
5175         tw32_mailbox(MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5176         tw32_tx_mbox(MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW, 0);
5177
5178         tg3_set_bdinfo(tp, NIC_SRAM_SEND_RCB,
5179                        tp->tx_desc_mapping,
5180                        (TG3_TX_RING_SIZE <<
5181                         BDINFO_FLAGS_MAXLEN_SHIFT),
5182                        NIC_SRAM_TX_BUFFER_DESC);
5183
5184         /* There is only one receive return ring on 5705/5750, no need
5185          * to explicitly disable the others.
5186          */
5187         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5188                 for (i = NIC_SRAM_RCV_RET_RCB; i < NIC_SRAM_STATS_BLK;
5189                      i += TG3_BDINFO_SIZE) {
5190                         tg3_write_mem(tp, i + TG3_BDINFO_MAXLEN_FLAGS,
5191                                       BDINFO_FLAGS_DISABLED);
5192                 }
5193         }
5194
5195         tp->rx_rcb_ptr = 0;
5196         tw32_rx_mbox(MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW, 0);
5197
5198         tg3_set_bdinfo(tp, NIC_SRAM_RCV_RET_RCB,
5199                        tp->rx_rcb_mapping,
5200                        (TG3_RX_RCB_RING_SIZE(tp) <<
5201                         BDINFO_FLAGS_MAXLEN_SHIFT),
5202                        0);
5203
5204         tp->rx_std_ptr = tp->rx_pending;
5205         tw32_rx_mbox(MAILBOX_RCV_STD_PROD_IDX + TG3_64BIT_REG_LOW,
5206                      tp->rx_std_ptr);
5207
5208         tp->rx_jumbo_ptr = (tp->tg3_flags & TG3_FLAG_JUMBO_ENABLE) ?
5209                                                 tp->rx_jumbo_pending : 0;
5210         tw32_rx_mbox(MAILBOX_RCV_JUMBO_PROD_IDX + TG3_64BIT_REG_LOW,
5211                      tp->rx_jumbo_ptr);
5212
5213         /* Initialize MAC address and backoff seed. */
5214         __tg3_set_mac_addr(tp);
5215
5216         /* MTU + ethernet header + FCS + optional VLAN tag */
5217         tw32(MAC_RX_MTU_SIZE, tp->dev->mtu + ETH_HLEN + 8);
5218
5219         /* The slot time is changed by tg3_setup_phy if we
5220          * run at gigabit with half duplex.
5221          */
5222         tw32(MAC_TX_LENGTHS,
5223              (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5224              (6 << TX_LENGTHS_IPG_SHIFT) |
5225              (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5226
5227         /* Receive rules. */
5228         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
5229         tw32(RCVLPC_CONFIG, 0x0181);
5230
5231         /* Calculate RDMAC_MODE setting early, we need it to determine
5232          * the RCVLPC_STATE_ENABLE mask.
5233          */
5234         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
5235                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
5236                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
5237                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
5238                       RDMAC_MODE_LNGREAD_ENAB);
5239         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5240                 rdmac_mode |= RDMAC_MODE_SPLIT_ENABLE;
5241         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5242              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5243             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)) {
5244                 if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE &&
5245                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5246                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5247                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
5248                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5249                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788)) {
5250                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
5251                 }
5252         }
5253
5254 #if TG3_TSO_SUPPORT != 0
5255         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5256                 rdmac_mode |= (1 << 27);
5257 #endif
5258
5259         /* Receive/send statistics. */
5260         if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
5261             (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
5262                 val = tr32(RCVLPC_STATS_ENABLE);
5263                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
5264                 tw32(RCVLPC_STATS_ENABLE, val);
5265         } else {
5266                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
5267         }
5268         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
5269         tw32(SNDDATAI_STATSENAB, 0xffffff);
5270         tw32(SNDDATAI_STATSCTRL,
5271              (SNDDATAI_SCTRL_ENABLE |
5272               SNDDATAI_SCTRL_FASTUPD));
5273
5274         /* Setup host coalescing engine. */
5275         tw32(HOSTCC_MODE, 0);
5276         for (i = 0; i < 2000; i++) {
5277                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
5278                         break;
5279                 udelay(10);
5280         }
5281
5282         tw32(HOSTCC_RXCOL_TICKS, 0);
5283         tw32(HOSTCC_TXCOL_TICKS, LOW_TXCOL_TICKS);
5284         tw32(HOSTCC_RXMAX_FRAMES, 1);
5285         tw32(HOSTCC_TXMAX_FRAMES, LOW_RXMAX_FRAMES);
5286         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5287                 tw32(HOSTCC_RXCOAL_TICK_INT, 0);
5288                 tw32(HOSTCC_TXCOAL_TICK_INT, 0);
5289         }
5290         tw32(HOSTCC_RXCOAL_MAXF_INT, 1);
5291         tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
5292
5293         /* set status block DMA address */
5294         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5295              ((u64) tp->status_mapping >> 32));
5296         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5297              ((u64) tp->status_mapping & 0xffffffff));
5298
5299         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5300                 /* Status/statistics block address.  See tg3_timer,
5301                  * the tg3_periodic_fetch_stats call there, and
5302                  * tg3_get_stats to see how this works for 5705/5750 chips.
5303                  */
5304                 tw32(HOSTCC_STAT_COAL_TICKS,
5305                      DEFAULT_STAT_COAL_TICKS);
5306                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
5307                      ((u64) tp->stats_mapping >> 32));
5308                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
5309                      ((u64) tp->stats_mapping & 0xffffffff));
5310                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
5311                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
5312         }
5313
5314         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
5315
5316         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
5317         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
5318         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5319                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
5320
5321         /* Clear statistics/status block in chip, and status block in ram. */
5322         for (i = NIC_SRAM_STATS_BLK;
5323              i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
5324              i += sizeof(u32)) {
5325                 tg3_write_mem(tp, i, 0);
5326                 udelay(40);
5327         }
5328         memset(tp->hw_status, 0, TG3_HW_STATUS_SIZE);
5329
5330         tp->mac_mode = MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
5331                 MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE | MAC_MODE_FHDE_ENABLE;
5332         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
5333         udelay(40);
5334
5335         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
5336         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
5337                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
5338                                        GRC_LCLCTRL_GPIO_OUTPUT1);
5339         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
5340         udelay(100);
5341
5342         tw32_mailbox(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0);
5343         tr32(MAILBOX_INTERRUPT_0);
5344
5345         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS)) {
5346                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
5347                 udelay(40);
5348         }
5349
5350         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
5351                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
5352                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
5353                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
5354                WDMAC_MODE_LNGREAD_ENAB);
5355
5356         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
5357              tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) ||
5358             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
5359                 if ((tp->tg3_flags & TG3_FLG2_TSO_CAPABLE) &&
5360                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
5361                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
5362                         /* nothing */
5363                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
5364                            !(tp->tg3_flags2 & TG3_FLG2_IS_5788) &&
5365                            !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
5366                         val |= WDMAC_MODE_RX_ACCEL;
5367                 }
5368         }
5369
5370         tw32_f(WDMAC_MODE, val);
5371         udelay(40);
5372
5373         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0) {
5374                 val = tr32(TG3PCI_X_CAPS);
5375                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
5376                         val &= ~PCIX_CAPS_BURST_MASK;
5377                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5378                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
5379                         val &= ~(PCIX_CAPS_SPLIT_MASK | PCIX_CAPS_BURST_MASK);
5380                         val |= (PCIX_CAPS_MAX_BURST_CPIOB << PCIX_CAPS_BURST_SHIFT);
5381                         if (tp->tg3_flags & TG3_FLAG_SPLIT_MODE)
5382                                 val |= (tp->split_mode_max_reqs <<
5383                                         PCIX_CAPS_SPLIT_SHIFT);
5384                 }
5385                 tw32(TG3PCI_X_CAPS, val);
5386         }
5387
5388         tw32_f(RDMAC_MODE, rdmac_mode);
5389         udelay(40);
5390
5391         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
5392         if (!(tp->tg3_flags2 & TG3_FLG2_5705_PLUS))
5393                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
5394         tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
5395         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
5396         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
5397         tw32(RCVDBDI_MODE, RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ);
5398         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
5399 #if TG3_TSO_SUPPORT != 0
5400         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO)
5401                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
5402 #endif
5403         tw32(SNDBDI_MODE, SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE);
5404         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
5405
5406         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
5407                 err = tg3_load_5701_a0_firmware_fix(tp);
5408                 if (err)
5409                         return err;
5410         }
5411
5412 #if TG3_TSO_SUPPORT != 0
5413         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) {
5414                 err = tg3_load_tso_firmware(tp);
5415                 if (err)
5416                         return err;
5417         }
5418 #endif
5419
5420         tp->tx_mode = TX_MODE_ENABLE;
5421         tw32_f(MAC_TX_MODE, tp->tx_mode);
5422         udelay(100);
5423
5424         tp->rx_mode = RX_MODE_ENABLE;
5425         tw32_f(MAC_RX_MODE, tp->rx_mode);
5426         udelay(10);
5427
5428         if (tp->link_config.phy_is_low_power) {
5429                 tp->link_config.phy_is_low_power = 0;
5430                 tp->link_config.speed = tp->link_config.orig_speed;
5431                 tp->link_config.duplex = tp->link_config.orig_duplex;
5432                 tp->link_config.autoneg = tp->link_config.orig_autoneg;
5433         }
5434
5435         tp->mi_mode = MAC_MI_MODE_BASE;
5436         tw32_f(MAC_MI_MODE, tp->mi_mode);
5437         udelay(80);
5438
5439         tw32(MAC_LED_CTRL, tp->led_ctrl);
5440
5441         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
5442         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5443                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
5444                 udelay(10);
5445         }
5446         tw32_f(MAC_RX_MODE, tp->rx_mode);
5447         udelay(10);
5448
5449         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
5450                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
5451                         !(tp->tg3_flags2 & TG3_FLG2_SERDES_PREEMPHASIS)) {
5452                         /* Set drive transmission level to 1.2V  */
5453                         /* only if the signal pre-emphasis bit is not set  */
5454                         val = tr32(MAC_SERDES_CFG);
5455                         val &= 0xfffff000;
5456                         val |= 0x880;
5457                         tw32(MAC_SERDES_CFG, val);
5458                 }
5459                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
5460                         tw32(MAC_SERDES_CFG, 0x616000);
5461         }
5462
5463         /* Prevent chip from dropping frames when flow control
5464          * is enabled.
5465          */
5466         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, 2);
5467
5468         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
5469             (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5470                 /* Use hardware link auto-negotiation */
5471                 tp->tg3_flags2 |= TG3_FLG2_HW_AUTONEG;
5472         }
5473
5474         err = tg3_setup_phy(tp, 1);
5475         if (err)
5476                 return err;
5477
5478         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
5479                 u32 tmp;
5480
5481                 /* Clear CRC stats. */
5482                 if (!tg3_readphy(tp, 0x1e, &tmp)) {
5483                         tg3_writephy(tp, 0x1e, tmp | 0x8000);
5484                         tg3_readphy(tp, 0x14, &tmp);
5485                 }
5486         }
5487
5488         __tg3_set_rx_mode(tp->dev);
5489
5490         /* Initialize receive rules. */
5491         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
5492         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
5493         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
5494         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
5495
5496         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5497                 limit = 8;
5498         else
5499                 limit = 16;
5500         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF)
5501                 limit -= 4;
5502         switch (limit) {
5503         case 16:
5504                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
5505         case 15:
5506                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
5507         case 14:
5508                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
5509         case 13:
5510                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
5511         case 12:
5512                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
5513         case 11:
5514                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
5515         case 10:
5516                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
5517         case 9:
5518                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
5519         case 8:
5520                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
5521         case 7:
5522                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
5523         case 6:
5524                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
5525         case 5:
5526                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
5527         case 4:
5528                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
5529         case 3:
5530                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
5531         case 2:
5532         case 1:
5533
5534         default:
5535                 break;
5536         };
5537
5538         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
5539
5540         if (tp->tg3_flags & TG3_FLAG_INIT_COMPLETE)
5541                 tg3_enable_ints(tp);
5542
5543         return 0;
5544 }
5545
5546 /* Called at device open time to get the chip ready for
5547  * packet processing.  Invoked with tp->lock held.
5548  */
5549 static int tg3_init_hw(struct tg3 *tp)
5550 {
5551         int err;
5552
5553         /* Force the chip into D0. */
5554         err = tg3_set_power_state(tp, 0);
5555         if (err)
5556                 goto out;
5557
5558         tg3_switch_clocks(tp);
5559
5560         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
5561
5562         err = tg3_reset_hw(tp);
5563
5564 out:
5565         return err;
5566 }
5567
5568 #define TG3_STAT_ADD32(PSTAT, REG) \
5569 do {    u32 __val = tr32(REG); \
5570         (PSTAT)->low += __val; \
5571         if ((PSTAT)->low < __val) \
5572                 (PSTAT)->high += 1; \
5573 } while (0)
5574
5575 static void tg3_periodic_fetch_stats(struct tg3 *tp)
5576 {
5577         struct tg3_hw_stats *sp = tp->hw_stats;
5578
5579         if (!netif_carrier_ok(tp->dev))
5580                 return;
5581
5582         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
5583         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
5584         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
5585         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
5586         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
5587         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
5588         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
5589         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
5590         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
5591         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
5592         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
5593         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
5594         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
5595
5596         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
5597         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
5598         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
5599         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
5600         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
5601         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
5602         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
5603         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
5604         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
5605         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
5606         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
5607         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
5608         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
5609         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
5610 }
5611
5612 static void tg3_timer(unsigned long __opaque)
5613 {
5614         struct tg3 *tp = (struct tg3 *) __opaque;
5615         unsigned long flags;
5616
5617         spin_lock_irqsave(&tp->lock, flags);
5618         spin_lock(&tp->tx_lock);
5619
5620         /* All of this garbage is because when using non-tagged
5621          * IRQ status the mailbox/status_block protocol the chip
5622          * uses with the cpu is race prone.
5623          */
5624         if (tp->hw_status->status & SD_STATUS_UPDATED) {
5625                 tw32(GRC_LOCAL_CTRL,
5626                      tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
5627         } else {
5628                 tw32(HOSTCC_MODE, tp->coalesce_mode |
5629                      (HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW));
5630         }
5631
5632         if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
5633                 tp->tg3_flags2 |= TG3_FLG2_RESTART_TIMER;
5634                 spin_unlock(&tp->tx_lock);
5635                 spin_unlock_irqrestore(&tp->lock, flags);
5636                 schedule_work(&tp->reset_task);
5637                 return;
5638         }
5639
5640         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS)
5641                 tg3_periodic_fetch_stats(tp);
5642
5643         /* This part only runs once per second. */
5644         if (!--tp->timer_counter) {
5645                 if (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) {
5646                         u32 mac_stat;
5647                         int phy_event;
5648
5649                         mac_stat = tr32(MAC_STATUS);
5650
5651                         phy_event = 0;
5652                         if (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) {
5653                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
5654                                         phy_event = 1;
5655                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
5656                                 phy_event = 1;
5657
5658                         if (phy_event)
5659                                 tg3_setup_phy(tp, 0);
5660                 } else if (tp->tg3_flags & TG3_FLAG_POLL_SERDES) {
5661                         u32 mac_stat = tr32(MAC_STATUS);
5662                         int need_setup = 0;
5663
5664                         if (netif_carrier_ok(tp->dev) &&
5665                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
5666                                 need_setup = 1;
5667                         }
5668                         if (! netif_carrier_ok(tp->dev) &&
5669                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
5670                                          MAC_STATUS_SIGNAL_DET))) {
5671                                 need_setup = 1;
5672                         }
5673                         if (need_setup) {
5674                                 tw32_f(MAC_MODE,
5675                                      (tp->mac_mode &
5676                                       ~MAC_MODE_PORT_MODE_MASK));
5677                                 udelay(40);
5678                                 tw32_f(MAC_MODE, tp->mac_mode);
5679                                 udelay(40);
5680                                 tg3_setup_phy(tp, 0);
5681                         }
5682                 }
5683
5684                 tp->timer_counter = tp->timer_multiplier;
5685         }
5686
5687         /* Heartbeat is only sent once every 120 seconds.  */
5688         if (!--tp->asf_counter) {
5689                 if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
5690                         u32 val;
5691
5692                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_ALIVE);
5693                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
5694                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX, 3);
5695                         val = tr32(GRC_RX_CPU_EVENT);
5696                         val |= (1 << 14);
5697                         tw32(GRC_RX_CPU_EVENT, val);
5698                 }
5699                 tp->asf_counter = tp->asf_multiplier;
5700         }
5701
5702         spin_unlock(&tp->tx_lock);
5703         spin_unlock_irqrestore(&tp->lock, flags);
5704
5705         tp->timer.expires = jiffies + tp->timer_offset;
5706         add_timer(&tp->timer);
5707 }
5708
5709 static int tg3_open(struct net_device *dev)
5710 {
5711         struct tg3 *tp = netdev_priv(dev);
5712         int err;
5713
5714         spin_lock_irq(&tp->lock);
5715         spin_lock(&tp->tx_lock);
5716
5717         tg3_disable_ints(tp);
5718         tp->tg3_flags &= ~TG3_FLAG_INIT_COMPLETE;
5719
5720         spin_unlock(&tp->tx_lock);
5721         spin_unlock_irq(&tp->lock);
5722
5723         /* The placement of this call is tied
5724          * to the setup and use of Host TX descriptors.
5725          */
5726         err = tg3_alloc_consistent(tp);
5727         if (err)
5728                 return err;
5729
5730         err = request_irq(dev->irq, tg3_interrupt,
5731                           SA_SHIRQ, dev->name, dev);
5732
5733         if (err) {
5734                 tg3_free_consistent(tp);
5735                 return err;
5736         }
5737
5738         spin_lock_irq(&tp->lock);
5739         spin_lock(&tp->tx_lock);
5740
5741         err = tg3_init_hw(tp);
5742         if (err) {
5743                 tg3_halt(tp);
5744                 tg3_free_rings(tp);
5745         } else {
5746                 tp->timer_offset = HZ / 10;
5747                 tp->timer_counter = tp->timer_multiplier = 10;
5748                 tp->asf_counter = tp->asf_multiplier = (10 * 120);
5749
5750                 init_timer(&tp->timer);
5751                 tp->timer.expires = jiffies + tp->timer_offset;
5752                 tp->timer.data = (unsigned long) tp;
5753                 tp->timer.function = tg3_timer;
5754                 add_timer(&tp->timer);
5755
5756                 tp->tg3_flags |= TG3_FLAG_INIT_COMPLETE;
5757         }
5758
5759         spin_unlock(&tp->tx_lock);
5760         spin_unlock_irq(&tp->lock);
5761
5762         if (err) {
5763                 free_irq(dev->irq, dev);
5764                 tg3_free_consistent(tp);
5765                 return err;
5766         }
5767
5768         spin_lock_irq(&tp->lock);
5769         spin_lock(&tp->tx_lock);
5770
5771         tg3_enable_ints(tp);
5772
5773         spin_unlock(&tp->tx_lock);
5774         spin_unlock_irq(&tp->lock);
5775
5776         netif_start_queue(dev);
5777
5778         return 0;
5779 }
5780
5781 #if 0
5782 /*static*/ void tg3_dump_state(struct tg3 *tp)
5783 {
5784         u32 val32, val32_2, val32_3, val32_4, val32_5;
5785         u16 val16;
5786         int i;
5787
5788         pci_read_config_word(tp->pdev, PCI_STATUS, &val16);
5789         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE, &val32);
5790         printk("DEBUG: PCI status [%04x] TG3PCI state[%08x]\n",
5791                val16, val32);
5792
5793         /* MAC block */
5794         printk("DEBUG: MAC_MODE[%08x] MAC_STATUS[%08x]\n",
5795                tr32(MAC_MODE), tr32(MAC_STATUS));
5796         printk("       MAC_EVENT[%08x] MAC_LED_CTRL[%08x]\n",
5797                tr32(MAC_EVENT), tr32(MAC_LED_CTRL));
5798         printk("DEBUG: MAC_TX_MODE[%08x] MAC_TX_STATUS[%08x]\n",
5799                tr32(MAC_TX_MODE), tr32(MAC_TX_STATUS));
5800         printk("       MAC_RX_MODE[%08x] MAC_RX_STATUS[%08x]\n",
5801                tr32(MAC_RX_MODE), tr32(MAC_RX_STATUS));
5802
5803         /* Send data initiator control block */
5804         printk("DEBUG: SNDDATAI_MODE[%08x] SNDDATAI_STATUS[%08x]\n",
5805                tr32(SNDDATAI_MODE), tr32(SNDDATAI_STATUS));
5806         printk("       SNDDATAI_STATSCTRL[%08x]\n",
5807                tr32(SNDDATAI_STATSCTRL));
5808
5809         /* Send data completion control block */
5810         printk("DEBUG: SNDDATAC_MODE[%08x]\n", tr32(SNDDATAC_MODE));
5811
5812         /* Send BD ring selector block */
5813         printk("DEBUG: SNDBDS_MODE[%08x] SNDBDS_STATUS[%08x]\n",
5814                tr32(SNDBDS_MODE), tr32(SNDBDS_STATUS));
5815
5816         /* Send BD initiator control block */
5817         printk("DEBUG: SNDBDI_MODE[%08x] SNDBDI_STATUS[%08x]\n",
5818                tr32(SNDBDI_MODE), tr32(SNDBDI_STATUS));
5819
5820         /* Send BD completion control block */
5821         printk("DEBUG: SNDBDC_MODE[%08x]\n", tr32(SNDBDC_MODE));
5822
5823         /* Receive list placement control block */
5824         printk("DEBUG: RCVLPC_MODE[%08x] RCVLPC_STATUS[%08x]\n",
5825                tr32(RCVLPC_MODE), tr32(RCVLPC_STATUS));
5826         printk("       RCVLPC_STATSCTRL[%08x]\n",
5827                tr32(RCVLPC_STATSCTRL));
5828
5829         /* Receive data and receive BD initiator control block */
5830         printk("DEBUG: RCVDBDI_MODE[%08x] RCVDBDI_STATUS[%08x]\n",
5831                tr32(RCVDBDI_MODE), tr32(RCVDBDI_STATUS));
5832
5833         /* Receive data completion control block */
5834         printk("DEBUG: RCVDCC_MODE[%08x]\n",
5835                tr32(RCVDCC_MODE));
5836
5837         /* Receive BD initiator control block */
5838         printk("DEBUG: RCVBDI_MODE[%08x] RCVBDI_STATUS[%08x]\n",
5839                tr32(RCVBDI_MODE), tr32(RCVBDI_STATUS));
5840
5841         /* Receive BD completion control block */
5842         printk("DEBUG: RCVCC_MODE[%08x] RCVCC_STATUS[%08x]\n",
5843                tr32(RCVCC_MODE), tr32(RCVCC_STATUS));
5844
5845         /* Receive list selector control block */
5846         printk("DEBUG: RCVLSC_MODE[%08x] RCVLSC_STATUS[%08x]\n",
5847                tr32(RCVLSC_MODE), tr32(RCVLSC_STATUS));
5848
5849         /* Mbuf cluster free block */
5850         printk("DEBUG: MBFREE_MODE[%08x] MBFREE_STATUS[%08x]\n",
5851                tr32(MBFREE_MODE), tr32(MBFREE_STATUS));
5852
5853         /* Host coalescing control block */
5854         printk("DEBUG: HOSTCC_MODE[%08x] HOSTCC_STATUS[%08x]\n",
5855                tr32(HOSTCC_MODE), tr32(HOSTCC_STATUS));
5856         printk("DEBUG: HOSTCC_STATS_BLK_HOST_ADDR[%08x%08x]\n",
5857                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5858                tr32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5859         printk("DEBUG: HOSTCC_STATUS_BLK_HOST_ADDR[%08x%08x]\n",
5860                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH),
5861                tr32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW));
5862         printk("DEBUG: HOSTCC_STATS_BLK_NIC_ADDR[%08x]\n",
5863                tr32(HOSTCC_STATS_BLK_NIC_ADDR));
5864         printk("DEBUG: HOSTCC_STATUS_BLK_NIC_ADDR[%08x]\n",
5865                tr32(HOSTCC_STATUS_BLK_NIC_ADDR));
5866
5867         /* Memory arbiter control block */
5868         printk("DEBUG: MEMARB_MODE[%08x] MEMARB_STATUS[%08x]\n",
5869                tr32(MEMARB_MODE), tr32(MEMARB_STATUS));
5870
5871         /* Buffer manager control block */
5872         printk("DEBUG: BUFMGR_MODE[%08x] BUFMGR_STATUS[%08x]\n",
5873                tr32(BUFMGR_MODE), tr32(BUFMGR_STATUS));
5874         printk("DEBUG: BUFMGR_MB_POOL_ADDR[%08x] BUFMGR_MB_POOL_SIZE[%08x]\n",
5875                tr32(BUFMGR_MB_POOL_ADDR), tr32(BUFMGR_MB_POOL_SIZE));
5876         printk("DEBUG: BUFMGR_DMA_DESC_POOL_ADDR[%08x] "
5877                "BUFMGR_DMA_DESC_POOL_SIZE[%08x]\n",
5878                tr32(BUFMGR_DMA_DESC_POOL_ADDR),
5879                tr32(BUFMGR_DMA_DESC_POOL_SIZE));
5880
5881         /* Read DMA control block */
5882         printk("DEBUG: RDMAC_MODE[%08x] RDMAC_STATUS[%08x]\n",
5883                tr32(RDMAC_MODE), tr32(RDMAC_STATUS));
5884
5885         /* Write DMA control block */
5886         printk("DEBUG: WDMAC_MODE[%08x] WDMAC_STATUS[%08x]\n",
5887                tr32(WDMAC_MODE), tr32(WDMAC_STATUS));
5888
5889         /* DMA completion block */
5890         printk("DEBUG: DMAC_MODE[%08x]\n",
5891                tr32(DMAC_MODE));
5892
5893         /* GRC block */
5894         printk("DEBUG: GRC_MODE[%08x] GRC_MISC_CFG[%08x]\n",
5895                tr32(GRC_MODE), tr32(GRC_MISC_CFG));
5896         printk("DEBUG: GRC_LOCAL_CTRL[%08x]\n",
5897                tr32(GRC_LOCAL_CTRL));
5898
5899         /* TG3_BDINFOs */
5900         printk("DEBUG: RCVDBDI_JUMBO_BD[%08x%08x:%08x:%08x]\n",
5901                tr32(RCVDBDI_JUMBO_BD + 0x0),
5902                tr32(RCVDBDI_JUMBO_BD + 0x4),
5903                tr32(RCVDBDI_JUMBO_BD + 0x8),
5904                tr32(RCVDBDI_JUMBO_BD + 0xc));
5905         printk("DEBUG: RCVDBDI_STD_BD[%08x%08x:%08x:%08x]\n",
5906                tr32(RCVDBDI_STD_BD + 0x0),
5907                tr32(RCVDBDI_STD_BD + 0x4),
5908                tr32(RCVDBDI_STD_BD + 0x8),
5909                tr32(RCVDBDI_STD_BD + 0xc));
5910         printk("DEBUG: RCVDBDI_MINI_BD[%08x%08x:%08x:%08x]\n",
5911                tr32(RCVDBDI_MINI_BD + 0x0),
5912                tr32(RCVDBDI_MINI_BD + 0x4),
5913                tr32(RCVDBDI_MINI_BD + 0x8),
5914                tr32(RCVDBDI_MINI_BD + 0xc));
5915
5916         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x0, &val32);
5917         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x4, &val32_2);
5918         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0x8, &val32_3);
5919         tg3_read_mem(tp, NIC_SRAM_SEND_RCB + 0xc, &val32_4);
5920         printk("DEBUG: SRAM_SEND_RCB_0[%08x%08x:%08x:%08x]\n",
5921                val32, val32_2, val32_3, val32_4);
5922
5923         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x0, &val32);
5924         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x4, &val32_2);
5925         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0x8, &val32_3);
5926         tg3_read_mem(tp, NIC_SRAM_RCV_RET_RCB + 0xc, &val32_4);
5927         printk("DEBUG: SRAM_RCV_RET_RCB_0[%08x%08x:%08x:%08x]\n",
5928                val32, val32_2, val32_3, val32_4);
5929
5930         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x0, &val32);
5931         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x4, &val32_2);
5932         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x8, &val32_3);
5933         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0xc, &val32_4);
5934         tg3_read_mem(tp, NIC_SRAM_STATUS_BLK + 0x10, &val32_5);
5935         printk("DEBUG: SRAM_STATUS_BLK[%08x:%08x:%08x:%08x:%08x]\n",
5936                val32, val32_2, val32_3, val32_4, val32_5);
5937
5938         /* SW status block */
5939         printk("DEBUG: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5940                tp->hw_status->status,
5941                tp->hw_status->status_tag,
5942                tp->hw_status->rx_jumbo_consumer,
5943                tp->hw_status->rx_consumer,
5944                tp->hw_status->rx_mini_consumer,
5945                tp->hw_status->idx[0].rx_producer,
5946                tp->hw_status->idx[0].tx_consumer);
5947
5948         /* SW statistics block */
5949         printk("DEBUG: Host statistics block [%08x:%08x:%08x:%08x]\n",
5950                ((u32 *)tp->hw_stats)[0],
5951                ((u32 *)tp->hw_stats)[1],
5952                ((u32 *)tp->hw_stats)[2],
5953                ((u32 *)tp->hw_stats)[3]);
5954
5955         /* Mailboxes */
5956         printk("DEBUG: SNDHOST_PROD[%08x%08x] SNDNIC_PROD[%08x%08x]\n",
5957                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x0),
5958                tr32(MAILBOX_SNDHOST_PROD_IDX_0 + 0x4),
5959                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x0),
5960                tr32(MAILBOX_SNDNIC_PROD_IDX_0 + 0x4));
5961
5962         /* NIC side send descriptors. */
5963         for (i = 0; i < 6; i++) {
5964                 unsigned long txd;
5965
5966                 txd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_TX_BUFFER_DESC
5967                         + (i * sizeof(struct tg3_tx_buffer_desc));
5968                 printk("DEBUG: NIC TXD(%d)[%08x:%08x:%08x:%08x]\n",
5969                        i,
5970                        readl(txd + 0x0), readl(txd + 0x4),
5971                        readl(txd + 0x8), readl(txd + 0xc));
5972         }
5973
5974         /* NIC side RX descriptors. */
5975         for (i = 0; i < 6; i++) {
5976                 unsigned long rxd;
5977
5978                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_BUFFER_DESC
5979                         + (i * sizeof(struct tg3_rx_buffer_desc));
5980                 printk("DEBUG: NIC RXD_STD(%d)[0][%08x:%08x:%08x:%08x]\n",
5981                        i,
5982                        readl(rxd + 0x0), readl(rxd + 0x4),
5983                        readl(rxd + 0x8), readl(rxd + 0xc));
5984                 rxd += (4 * sizeof(u32));
5985                 printk("DEBUG: NIC RXD_STD(%d)[1][%08x:%08x:%08x:%08x]\n",
5986                        i,
5987                        readl(rxd + 0x0), readl(rxd + 0x4),
5988                        readl(rxd + 0x8), readl(rxd + 0xc));
5989         }
5990
5991         for (i = 0; i < 6; i++) {
5992                 unsigned long rxd;
5993
5994                 rxd = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_RX_JUMBO_BUFFER_DESC
5995                         + (i * sizeof(struct tg3_rx_buffer_desc));
5996                 printk("DEBUG: NIC RXD_JUMBO(%d)[0][%08x:%08x:%08x:%08x]\n",
5997                        i,
5998                        readl(rxd + 0x0), readl(rxd + 0x4),
5999                        readl(rxd + 0x8), readl(rxd + 0xc));
6000                 rxd += (4 * sizeof(u32));
6001                 printk("DEBUG: NIC RXD_JUMBO(%d)[1][%08x:%08x:%08x:%08x]\n",
6002                        i,
6003                        readl(rxd + 0x0), readl(rxd + 0x4),
6004                        readl(rxd + 0x8), readl(rxd + 0xc));
6005         }
6006 }
6007 #endif
6008
6009 static struct net_device_stats *tg3_get_stats(struct net_device *);
6010 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *);
6011
6012 static int tg3_close(struct net_device *dev)
6013 {
6014         struct tg3 *tp = netdev_priv(dev);
6015
6016         netif_stop_queue(dev);
6017
6018         del_timer_sync(&tp->timer);
6019
6020         spin_lock_irq(&tp->lock);
6021         spin_lock(&tp->tx_lock);
6022 #if 0
6023         tg3_dump_state(tp);
6024 #endif
6025
6026         tg3_disable_ints(tp);
6027
6028         tg3_halt(tp);
6029         tg3_free_rings(tp);
6030         tp->tg3_flags &=
6031                 ~(TG3_FLAG_INIT_COMPLETE |
6032                   TG3_FLAG_GOT_SERDES_FLOWCTL);
6033         netif_carrier_off(tp->dev);
6034
6035         spin_unlock(&tp->tx_lock);
6036         spin_unlock_irq(&tp->lock);
6037
6038         free_irq(dev->irq, dev);
6039
6040         memcpy(&tp->net_stats_prev, tg3_get_stats(tp->dev),
6041                sizeof(tp->net_stats_prev));
6042         memcpy(&tp->estats_prev, tg3_get_estats(tp),
6043                sizeof(tp->estats_prev));
6044
6045         tg3_free_consistent(tp);
6046
6047         return 0;
6048 }
6049
6050 static inline unsigned long get_stat64(tg3_stat64_t *val)
6051 {
6052         unsigned long ret;
6053
6054 #if (BITS_PER_LONG == 32)
6055         ret = val->low;
6056 #else
6057         ret = ((u64)val->high << 32) | ((u64)val->low);
6058 #endif
6059         return ret;
6060 }
6061
6062 static unsigned long calc_crc_errors(struct tg3 *tp)
6063 {
6064         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6065
6066         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
6067             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
6068              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
6069                 unsigned long flags;
6070                 u32 val;
6071
6072                 spin_lock_irqsave(&tp->lock, flags);
6073                 if (!tg3_readphy(tp, 0x1e, &val)) {
6074                         tg3_writephy(tp, 0x1e, val | 0x8000);
6075                         tg3_readphy(tp, 0x14, &val);
6076                 } else
6077                         val = 0;
6078                 spin_unlock_irqrestore(&tp->lock, flags);
6079
6080                 tp->phy_crc_errors += val;
6081
6082                 return tp->phy_crc_errors;
6083         }
6084
6085         return get_stat64(&hw_stats->rx_fcs_errors);
6086 }
6087
6088 #define ESTAT_ADD(member) \
6089         estats->member =        old_estats->member + \
6090                                 get_stat64(&hw_stats->member)
6091
6092 static struct tg3_ethtool_stats *tg3_get_estats(struct tg3 *tp)
6093 {
6094         struct tg3_ethtool_stats *estats = &tp->estats;
6095         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
6096         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6097
6098         if (!hw_stats)
6099                 return old_estats;
6100
6101         ESTAT_ADD(rx_octets);
6102         ESTAT_ADD(rx_fragments);
6103         ESTAT_ADD(rx_ucast_packets);
6104         ESTAT_ADD(rx_mcast_packets);
6105         ESTAT_ADD(rx_bcast_packets);
6106         ESTAT_ADD(rx_fcs_errors);
6107         ESTAT_ADD(rx_align_errors);
6108         ESTAT_ADD(rx_xon_pause_rcvd);
6109         ESTAT_ADD(rx_xoff_pause_rcvd);
6110         ESTAT_ADD(rx_mac_ctrl_rcvd);
6111         ESTAT_ADD(rx_xoff_entered);
6112         ESTAT_ADD(rx_frame_too_long_errors);
6113         ESTAT_ADD(rx_jabbers);
6114         ESTAT_ADD(rx_undersize_packets);
6115         ESTAT_ADD(rx_in_length_errors);
6116         ESTAT_ADD(rx_out_length_errors);
6117         ESTAT_ADD(rx_64_or_less_octet_packets);
6118         ESTAT_ADD(rx_65_to_127_octet_packets);
6119         ESTAT_ADD(rx_128_to_255_octet_packets);
6120         ESTAT_ADD(rx_256_to_511_octet_packets);
6121         ESTAT_ADD(rx_512_to_1023_octet_packets);
6122         ESTAT_ADD(rx_1024_to_1522_octet_packets);
6123         ESTAT_ADD(rx_1523_to_2047_octet_packets);
6124         ESTAT_ADD(rx_2048_to_4095_octet_packets);
6125         ESTAT_ADD(rx_4096_to_8191_octet_packets);
6126         ESTAT_ADD(rx_8192_to_9022_octet_packets);
6127
6128         ESTAT_ADD(tx_octets);
6129         ESTAT_ADD(tx_collisions);
6130         ESTAT_ADD(tx_xon_sent);
6131         ESTAT_ADD(tx_xoff_sent);
6132         ESTAT_ADD(tx_flow_control);
6133         ESTAT_ADD(tx_mac_errors);
6134         ESTAT_ADD(tx_single_collisions);
6135         ESTAT_ADD(tx_mult_collisions);
6136         ESTAT_ADD(tx_deferred);
6137         ESTAT_ADD(tx_excessive_collisions);
6138         ESTAT_ADD(tx_late_collisions);
6139         ESTAT_ADD(tx_collide_2times);
6140         ESTAT_ADD(tx_collide_3times);
6141         ESTAT_ADD(tx_collide_4times);
6142         ESTAT_ADD(tx_collide_5times);
6143         ESTAT_ADD(tx_collide_6times);
6144         ESTAT_ADD(tx_collide_7times);
6145         ESTAT_ADD(tx_collide_8times);
6146         ESTAT_ADD(tx_collide_9times);
6147         ESTAT_ADD(tx_collide_10times);
6148         ESTAT_ADD(tx_collide_11times);
6149         ESTAT_ADD(tx_collide_12times);
6150         ESTAT_ADD(tx_collide_13times);
6151         ESTAT_ADD(tx_collide_14times);
6152         ESTAT_ADD(tx_collide_15times);
6153         ESTAT_ADD(tx_ucast_packets);
6154         ESTAT_ADD(tx_mcast_packets);
6155         ESTAT_ADD(tx_bcast_packets);
6156         ESTAT_ADD(tx_carrier_sense_errors);
6157         ESTAT_ADD(tx_discards);
6158         ESTAT_ADD(tx_errors);
6159
6160         ESTAT_ADD(dma_writeq_full);
6161         ESTAT_ADD(dma_write_prioq_full);
6162         ESTAT_ADD(rxbds_empty);
6163         ESTAT_ADD(rx_discards);
6164         ESTAT_ADD(rx_errors);
6165         ESTAT_ADD(rx_threshold_hit);
6166
6167         ESTAT_ADD(dma_readq_full);
6168         ESTAT_ADD(dma_read_prioq_full);
6169         ESTAT_ADD(tx_comp_queue_full);
6170
6171         ESTAT_ADD(ring_set_send_prod_index);
6172         ESTAT_ADD(ring_status_update);
6173         ESTAT_ADD(nic_irqs);
6174         ESTAT_ADD(nic_avoided_irqs);
6175         ESTAT_ADD(nic_tx_threshold_hit);
6176
6177         return estats;
6178 }
6179
6180 static struct net_device_stats *tg3_get_stats(struct net_device *dev)
6181 {
6182         struct tg3 *tp = netdev_priv(dev);
6183         struct net_device_stats *stats = &tp->net_stats;
6184         struct net_device_stats *old_stats = &tp->net_stats_prev;
6185         struct tg3_hw_stats *hw_stats = tp->hw_stats;
6186
6187         if (!hw_stats)
6188                 return old_stats;
6189
6190         stats->rx_packets = old_stats->rx_packets +
6191                 get_stat64(&hw_stats->rx_ucast_packets) +
6192                 get_stat64(&hw_stats->rx_mcast_packets) +
6193                 get_stat64(&hw_stats->rx_bcast_packets);
6194                 
6195         stats->tx_packets = old_stats->tx_packets +
6196                 get_stat64(&hw_stats->tx_ucast_packets) +
6197                 get_stat64(&hw_stats->tx_mcast_packets) +
6198                 get_stat64(&hw_stats->tx_bcast_packets);
6199
6200         stats->rx_bytes = old_stats->rx_bytes +
6201                 get_stat64(&hw_stats->rx_octets);
6202         stats->tx_bytes = old_stats->tx_bytes +
6203                 get_stat64(&hw_stats->tx_octets);
6204
6205         stats->rx_errors = old_stats->rx_errors +
6206                 get_stat64(&hw_stats->rx_errors) +
6207                 get_stat64(&hw_stats->rx_discards);
6208         stats->tx_errors = old_stats->tx_errors +
6209                 get_stat64(&hw_stats->tx_errors) +
6210                 get_stat64(&hw_stats->tx_mac_errors) +
6211                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
6212                 get_stat64(&hw_stats->tx_discards);
6213
6214         stats->multicast = old_stats->multicast +
6215                 get_stat64(&hw_stats->rx_mcast_packets);
6216         stats->collisions = old_stats->collisions +
6217                 get_stat64(&hw_stats->tx_collisions);
6218
6219         stats->rx_length_errors = old_stats->rx_length_errors +
6220                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
6221                 get_stat64(&hw_stats->rx_undersize_packets);
6222
6223         stats->rx_over_errors = old_stats->rx_over_errors +
6224                 get_stat64(&hw_stats->rxbds_empty);
6225         stats->rx_frame_errors = old_stats->rx_frame_errors +
6226                 get_stat64(&hw_stats->rx_align_errors);
6227         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
6228                 get_stat64(&hw_stats->tx_discards);
6229         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
6230                 get_stat64(&hw_stats->tx_carrier_sense_errors);
6231
6232         stats->rx_crc_errors = old_stats->rx_crc_errors +
6233                 calc_crc_errors(tp);
6234
6235         return stats;
6236 }
6237
6238 static inline u32 calc_crc(unsigned char *buf, int len)
6239 {
6240         u32 reg;
6241         u32 tmp;
6242         int j, k;
6243
6244         reg = 0xffffffff;
6245
6246         for (j = 0; j < len; j++) {
6247                 reg ^= buf[j];
6248
6249                 for (k = 0; k < 8; k++) {
6250                         tmp = reg & 0x01;
6251
6252                         reg >>= 1;
6253
6254                         if (tmp) {
6255                                 reg ^= 0xedb88320;
6256                         }
6257                 }
6258         }
6259
6260         return ~reg;
6261 }
6262
6263 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
6264 {
6265         /* accept or reject all multicast frames */
6266         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
6267         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
6268         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
6269         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
6270 }
6271
6272 static void __tg3_set_rx_mode(struct net_device *dev)
6273 {
6274         struct tg3 *tp = netdev_priv(dev);
6275         u32 rx_mode;
6276
6277         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
6278                                   RX_MODE_KEEP_VLAN_TAG);
6279
6280         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
6281          * flag clear.
6282          */
6283 #if TG3_VLAN_TAG_USED
6284         if (!tp->vlgrp &&
6285             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6286                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6287 #else
6288         /* By definition, VLAN is disabled always in this
6289          * case.
6290          */
6291         if (!(tp->tg3_flags & TG3_FLAG_ENABLE_ASF))
6292                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
6293 #endif
6294
6295         if (dev->flags & IFF_PROMISC) {
6296                 /* Promiscuous mode. */
6297                 rx_mode |= RX_MODE_PROMISC;
6298         } else if (dev->flags & IFF_ALLMULTI) {
6299                 /* Accept all multicast. */
6300                 tg3_set_multi (tp, 1);
6301         } else if (dev->mc_count < 1) {
6302                 /* Reject all multicast. */
6303                 tg3_set_multi (tp, 0);
6304         } else {
6305                 /* Accept one or more multicast(s). */
6306                 struct dev_mc_list *mclist;
6307                 unsigned int i;
6308                 u32 mc_filter[4] = { 0, };
6309                 u32 regidx;
6310                 u32 bit;
6311                 u32 crc;
6312
6313                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
6314                      i++, mclist = mclist->next) {
6315
6316                         crc = calc_crc (mclist->dmi_addr, ETH_ALEN);
6317                         bit = ~crc & 0x7f;
6318                         regidx = (bit & 0x60) >> 5;
6319                         bit &= 0x1f;
6320                         mc_filter[regidx] |= (1 << bit);
6321                 }
6322
6323                 tw32(MAC_HASH_REG_0, mc_filter[0]);
6324                 tw32(MAC_HASH_REG_1, mc_filter[1]);
6325                 tw32(MAC_HASH_REG_2, mc_filter[2]);
6326                 tw32(MAC_HASH_REG_3, mc_filter[3]);
6327         }
6328
6329         if (rx_mode != tp->rx_mode) {
6330                 tp->rx_mode = rx_mode;
6331                 tw32_f(MAC_RX_MODE, rx_mode);
6332                 udelay(10);
6333         }
6334 }
6335
6336 static void tg3_set_rx_mode(struct net_device *dev)
6337 {
6338         struct tg3 *tp = netdev_priv(dev);
6339
6340         spin_lock_irq(&tp->lock);
6341         spin_lock(&tp->tx_lock);
6342         __tg3_set_rx_mode(dev);
6343         spin_unlock(&tp->tx_lock);
6344         spin_unlock_irq(&tp->lock);
6345 }
6346
6347 #define TG3_REGDUMP_LEN         (32 * 1024)
6348
6349 static int tg3_get_regs_len(struct net_device *dev)
6350 {
6351         return TG3_REGDUMP_LEN;
6352 }
6353
6354 static void tg3_get_regs(struct net_device *dev,
6355                 struct ethtool_regs *regs, void *_p)
6356 {
6357         u32 *p = _p;
6358         struct tg3 *tp = netdev_priv(dev);
6359         u8 *orig_p = _p;
6360         int i;
6361
6362         regs->version = 0;
6363
6364         memset(p, 0, TG3_REGDUMP_LEN);
6365
6366         spin_lock_irq(&tp->lock);
6367         spin_lock(&tp->tx_lock);
6368
6369 #define __GET_REG32(reg)        (*(p)++ = tr32(reg))
6370 #define GET_REG32_LOOP(base,len)                \
6371 do {    p = (u32 *)(orig_p + (base));           \
6372         for (i = 0; i < len; i += 4)            \
6373                 __GET_REG32((base) + i);        \
6374 } while (0)
6375 #define GET_REG32_1(reg)                        \
6376 do {    p = (u32 *)(orig_p + (reg));            \
6377         __GET_REG32((reg));                     \
6378 } while (0)
6379
6380         GET_REG32_LOOP(TG3PCI_VENDOR, 0xb0);
6381         GET_REG32_LOOP(MAILBOX_INTERRUPT_0, 0x200);
6382         GET_REG32_LOOP(MAC_MODE, 0x4f0);
6383         GET_REG32_LOOP(SNDDATAI_MODE, 0xe0);
6384         GET_REG32_1(SNDDATAC_MODE);
6385         GET_REG32_LOOP(SNDBDS_MODE, 0x80);
6386         GET_REG32_LOOP(SNDBDI_MODE, 0x48);
6387         GET_REG32_1(SNDBDC_MODE);
6388         GET_REG32_LOOP(RCVLPC_MODE, 0x20);
6389         GET_REG32_LOOP(RCVLPC_SELLST_BASE, 0x15c);
6390         GET_REG32_LOOP(RCVDBDI_MODE, 0x0c);
6391         GET_REG32_LOOP(RCVDBDI_JUMBO_BD, 0x3c);
6392         GET_REG32_LOOP(RCVDBDI_BD_PROD_IDX_0, 0x44);
6393         GET_REG32_1(RCVDCC_MODE);
6394         GET_REG32_LOOP(RCVBDI_MODE, 0x20);
6395         GET_REG32_LOOP(RCVCC_MODE, 0x14);
6396         GET_REG32_LOOP(RCVLSC_MODE, 0x08);
6397         GET_REG32_1(MBFREE_MODE);
6398         GET_REG32_LOOP(HOSTCC_MODE, 0x100);
6399         GET_REG32_LOOP(MEMARB_MODE, 0x10);
6400         GET_REG32_LOOP(BUFMGR_MODE, 0x58);
6401         GET_REG32_LOOP(RDMAC_MODE, 0x08);
6402         GET_REG32_LOOP(WDMAC_MODE, 0x08);
6403         GET_REG32_LOOP(RX_CPU_BASE, 0x280);
6404         GET_REG32_LOOP(TX_CPU_BASE, 0x280);
6405         GET_REG32_LOOP(GRCMBOX_INTERRUPT_0, 0x110);
6406         GET_REG32_LOOP(FTQ_RESET, 0x120);
6407         GET_REG32_LOOP(MSGINT_MODE, 0x0c);
6408         GET_REG32_1(DMAC_MODE);
6409         GET_REG32_LOOP(GRC_MODE, 0x4c);
6410         if (tp->tg3_flags & TG3_FLAG_NVRAM)
6411                 GET_REG32_LOOP(NVRAM_CMD, 0x24);
6412
6413 #undef __GET_REG32
6414 #undef GET_REG32_LOOP
6415 #undef GET_REG32_1
6416
6417         spin_unlock(&tp->tx_lock);
6418         spin_unlock_irq(&tp->lock);
6419 }
6420
6421 static int tg3_get_eeprom_len(struct net_device *dev)
6422 {
6423         struct tg3 *tp = netdev_priv(dev);
6424
6425         return tp->nvram_size;
6426 }
6427
6428 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val);
6429
6430 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6431 {
6432         struct tg3 *tp = netdev_priv(dev);
6433         int ret;
6434         u8  *pd;
6435         u32 i, offset, len, val, b_offset, b_count;
6436
6437         offset = eeprom->offset;
6438         len = eeprom->len;
6439         eeprom->len = 0;
6440
6441         eeprom->magic = TG3_EEPROM_MAGIC;
6442
6443         if (offset & 3) {
6444                 /* adjustments to start on required 4 byte boundary */
6445                 b_offset = offset & 3;
6446                 b_count = 4 - b_offset;
6447                 if (b_count > len) {
6448                         /* i.e. offset=1 len=2 */
6449                         b_count = len;
6450                 }
6451                 ret = tg3_nvram_read(tp, offset-b_offset, &val);
6452                 if (ret)
6453                         return ret;
6454                 val = cpu_to_le32(val);
6455                 memcpy(data, ((char*)&val) + b_offset, b_count);
6456                 len -= b_count;
6457                 offset += b_count;
6458                 eeprom->len += b_count;
6459         }
6460
6461         /* read bytes upto the last 4 byte boundary */
6462         pd = &data[eeprom->len];
6463         for (i = 0; i < (len - (len & 3)); i += 4) {
6464                 ret = tg3_nvram_read(tp, offset + i, &val);
6465                 if (ret) {
6466                         eeprom->len += i;
6467                         return ret;
6468                 }
6469                 val = cpu_to_le32(val);
6470                 memcpy(pd + i, &val, 4);
6471         }
6472         eeprom->len += i;
6473
6474         if (len & 3) {
6475                 /* read last bytes not ending on 4 byte boundary */
6476                 pd = &data[eeprom->len];
6477                 b_count = len & 3;
6478                 b_offset = offset + len - b_count;
6479                 ret = tg3_nvram_read(tp, b_offset, &val);
6480                 if (ret)
6481                         return ret;
6482                 val = cpu_to_le32(val);
6483                 memcpy(pd, ((char*)&val), b_count);
6484                 eeprom->len += b_count;
6485         }
6486         return 0;
6487 }
6488
6489 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf); 
6490
6491 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
6492 {
6493         struct tg3 *tp = netdev_priv(dev);
6494         int ret;
6495         u32 offset, len, b_offset, odd_len, start, end;
6496         u8 *buf;
6497
6498         if (eeprom->magic != TG3_EEPROM_MAGIC)
6499                 return -EINVAL;
6500
6501         offset = eeprom->offset;
6502         len = eeprom->len;
6503
6504         if ((b_offset = (offset & 3))) {
6505                 /* adjustments to start on required 4 byte boundary */
6506                 ret = tg3_nvram_read(tp, offset-b_offset, &start);
6507                 if (ret)
6508                         return ret;
6509                 start = cpu_to_le32(start);
6510                 len += b_offset;
6511                 offset &= ~3;
6512         }
6513
6514         odd_len = 0;
6515         if ((len & 3) && ((len > 4) || (b_offset == 0))) {
6516                 /* adjustments to end on required 4 byte boundary */
6517                 odd_len = 1;
6518                 len = (len + 3) & ~3;
6519                 ret = tg3_nvram_read(tp, offset+len-4, &end);
6520                 if (ret)
6521                         return ret;
6522                 end = cpu_to_le32(end);
6523         }
6524
6525         buf = data;
6526         if (b_offset || odd_len) {
6527                 buf = kmalloc(len, GFP_KERNEL);
6528                 if (buf == 0)
6529                         return -ENOMEM;
6530                 if (b_offset)
6531                         memcpy(buf, &start, 4);
6532                 if (odd_len)
6533                         memcpy(buf+len-4, &end, 4);
6534                 memcpy(buf + b_offset, data, eeprom->len);
6535         }
6536
6537         ret = tg3_nvram_write_block(tp, offset, len, buf);
6538
6539         if (buf != data)
6540                 kfree(buf);
6541
6542         return ret;
6543 }
6544
6545 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6546 {
6547         struct tg3 *tp = netdev_priv(dev);
6548   
6549         cmd->supported = (SUPPORTED_Autoneg);
6550
6551         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
6552                 cmd->supported |= (SUPPORTED_1000baseT_Half |
6553                                    SUPPORTED_1000baseT_Full);
6554
6555         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES))
6556                 cmd->supported |= (SUPPORTED_100baseT_Half |
6557                                   SUPPORTED_100baseT_Full |
6558                                   SUPPORTED_10baseT_Half |
6559                                   SUPPORTED_10baseT_Full |
6560                                   SUPPORTED_MII);
6561         else
6562                 cmd->supported |= SUPPORTED_FIBRE;
6563   
6564         cmd->advertising = tp->link_config.advertising;
6565         if (netif_running(dev)) {
6566                 cmd->speed = tp->link_config.active_speed;
6567                 cmd->duplex = tp->link_config.active_duplex;
6568         }
6569         cmd->port = 0;
6570         cmd->phy_address = PHY_ADDR;
6571         cmd->transceiver = 0;
6572         cmd->autoneg = tp->link_config.autoneg;
6573         cmd->maxtxpkt = 0;
6574         cmd->maxrxpkt = 0;
6575         return 0;
6576 }
6577   
6578 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6579 {
6580         struct tg3 *tp = netdev_priv(dev);
6581   
6582         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
6583                 /* These are the only valid advertisement bits allowed.  */
6584                 if (cmd->autoneg == AUTONEG_ENABLE &&
6585                     (cmd->advertising & ~(ADVERTISED_1000baseT_Half |
6586                                           ADVERTISED_1000baseT_Full |
6587                                           ADVERTISED_Autoneg |
6588                                           ADVERTISED_FIBRE)))
6589                         return -EINVAL;
6590         }
6591
6592         spin_lock_irq(&tp->lock);
6593         spin_lock(&tp->tx_lock);
6594
6595         tp->link_config.autoneg = cmd->autoneg;
6596         if (cmd->autoneg == AUTONEG_ENABLE) {
6597                 tp->link_config.advertising = cmd->advertising;
6598                 tp->link_config.speed = SPEED_INVALID;
6599                 tp->link_config.duplex = DUPLEX_INVALID;
6600         } else {
6601                 tp->link_config.advertising = 0;
6602                 tp->link_config.speed = cmd->speed;
6603                 tp->link_config.duplex = cmd->duplex;
6604         }
6605   
6606         if (netif_running(dev))
6607                 tg3_setup_phy(tp, 1);
6608
6609         spin_unlock(&tp->tx_lock);
6610         spin_unlock_irq(&tp->lock);
6611   
6612         return 0;
6613 }
6614   
6615 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6616 {
6617         struct tg3 *tp = netdev_priv(dev);
6618   
6619         strcpy(info->driver, DRV_MODULE_NAME);
6620         strcpy(info->version, DRV_MODULE_VERSION);
6621         strcpy(info->bus_info, pci_name(tp->pdev));
6622 }
6623   
6624 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6625 {
6626         struct tg3 *tp = netdev_priv(dev);
6627   
6628         wol->supported = WAKE_MAGIC;
6629         wol->wolopts = 0;
6630         if (tp->tg3_flags & TG3_FLAG_WOL_ENABLE)
6631                 wol->wolopts = WAKE_MAGIC;
6632         memset(&wol->sopass, 0, sizeof(wol->sopass));
6633 }
6634   
6635 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6636 {
6637         struct tg3 *tp = netdev_priv(dev);
6638   
6639         if (wol->wolopts & ~WAKE_MAGIC)
6640                 return -EINVAL;
6641         if ((wol->wolopts & WAKE_MAGIC) &&
6642             tp->tg3_flags2 & TG3_FLG2_PHY_SERDES &&
6643             !(tp->tg3_flags & TG3_FLAG_SERDES_WOL_CAP))
6644                 return -EINVAL;
6645   
6646         spin_lock_irq(&tp->lock);
6647         if (wol->wolopts & WAKE_MAGIC)
6648                 tp->tg3_flags |= TG3_FLAG_WOL_ENABLE;
6649         else
6650                 tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
6651         spin_unlock_irq(&tp->lock);
6652   
6653         return 0;
6654 }
6655   
6656 static u32 tg3_get_msglevel(struct net_device *dev)
6657 {
6658         struct tg3 *tp = netdev_priv(dev);
6659         return tp->msg_enable;
6660 }
6661   
6662 static void tg3_set_msglevel(struct net_device *dev, u32 value)
6663 {
6664         struct tg3 *tp = netdev_priv(dev);
6665         tp->msg_enable = value;
6666 }
6667   
6668 #if TG3_TSO_SUPPORT != 0
6669 static int tg3_set_tso(struct net_device *dev, u32 value)
6670 {
6671         struct tg3 *tp = netdev_priv(dev);
6672
6673         if (!(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)) {
6674                 if (value)
6675                         return -EINVAL;
6676                 return 0;
6677         }
6678         return ethtool_op_set_tso(dev, value);
6679 }
6680 #endif
6681   
6682 static int tg3_nway_reset(struct net_device *dev)
6683 {
6684         struct tg3 *tp = netdev_priv(dev);
6685         u32 bmcr;
6686         int r;
6687   
6688         if (!netif_running(dev))
6689                 return -EAGAIN;
6690
6691         spin_lock_irq(&tp->lock);
6692         r = -EINVAL;
6693         tg3_readphy(tp, MII_BMCR, &bmcr);
6694         if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
6695             (bmcr & BMCR_ANENABLE)) {
6696                 tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART);
6697                 r = 0;
6698         }
6699         spin_unlock_irq(&tp->lock);
6700   
6701         return r;
6702 }
6703   
6704 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6705 {
6706         struct tg3 *tp = netdev_priv(dev);
6707   
6708         ering->rx_max_pending = TG3_RX_RING_SIZE - 1;
6709         ering->rx_mini_max_pending = 0;
6710         ering->rx_jumbo_max_pending = TG3_RX_JUMBO_RING_SIZE - 1;
6711
6712         ering->rx_pending = tp->rx_pending;
6713         ering->rx_mini_pending = 0;
6714         ering->rx_jumbo_pending = tp->rx_jumbo_pending;
6715         ering->tx_pending = tp->tx_pending;
6716 }
6717   
6718 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6719 {
6720         struct tg3 *tp = netdev_priv(dev);
6721   
6722         if ((ering->rx_pending > TG3_RX_RING_SIZE - 1) ||
6723             (ering->rx_jumbo_pending > TG3_RX_JUMBO_RING_SIZE - 1) ||
6724             (ering->tx_pending > TG3_TX_RING_SIZE - 1))
6725                 return -EINVAL;
6726   
6727         if (netif_running(dev))
6728                 tg3_netif_stop(tp);
6729
6730         spin_lock_irq(&tp->lock);
6731         spin_lock(&tp->tx_lock);
6732   
6733         tp->rx_pending = ering->rx_pending;
6734
6735         if ((tp->tg3_flags2 & TG3_FLG2_MAX_RXPEND_64) &&
6736             tp->rx_pending > 63)
6737                 tp->rx_pending = 63;
6738         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
6739         tp->tx_pending = ering->tx_pending;
6740
6741         if (netif_running(dev)) {
6742                 tg3_halt(tp);
6743                 tg3_init_hw(tp);
6744                 tg3_netif_start(tp);
6745         }
6746
6747         spin_unlock(&tp->tx_lock);
6748         spin_unlock_irq(&tp->lock);
6749   
6750         return 0;
6751 }
6752   
6753 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6754 {
6755         struct tg3 *tp = netdev_priv(dev);
6756   
6757         epause->autoneg = (tp->tg3_flags & TG3_FLAG_PAUSE_AUTONEG) != 0;
6758         epause->rx_pause = (tp->tg3_flags & TG3_FLAG_RX_PAUSE) != 0;
6759         epause->tx_pause = (tp->tg3_flags & TG3_FLAG_TX_PAUSE) != 0;
6760 }
6761   
6762 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
6763 {
6764         struct tg3 *tp = netdev_priv(dev);
6765   
6766         if (netif_running(dev))
6767                 tg3_netif_stop(tp);
6768
6769         spin_lock_irq(&tp->lock);
6770         spin_lock(&tp->tx_lock);
6771         if (epause->autoneg)
6772                 tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
6773         else
6774                 tp->tg3_flags &= ~TG3_FLAG_PAUSE_AUTONEG;
6775         if (epause->rx_pause)
6776                 tp->tg3_flags |= TG3_FLAG_RX_PAUSE;
6777         else
6778                 tp->tg3_flags &= ~TG3_FLAG_RX_PAUSE;
6779         if (epause->tx_pause)
6780                 tp->tg3_flags |= TG3_FLAG_TX_PAUSE;
6781         else
6782                 tp->tg3_flags &= ~TG3_FLAG_TX_PAUSE;
6783
6784         if (netif_running(dev)) {
6785                 tg3_halt(tp);
6786                 tg3_init_hw(tp);
6787                 tg3_netif_start(tp);
6788         }
6789         spin_unlock(&tp->tx_lock);
6790         spin_unlock_irq(&tp->lock);
6791   
6792         return 0;
6793 }
6794   
6795 static u32 tg3_get_rx_csum(struct net_device *dev)
6796 {
6797         struct tg3 *tp = netdev_priv(dev);
6798         return (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0;
6799 }
6800   
6801 static int tg3_set_rx_csum(struct net_device *dev, u32 data)
6802 {
6803         struct tg3 *tp = netdev_priv(dev);
6804   
6805         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6806                 if (data != 0)
6807                         return -EINVAL;
6808                 return 0;
6809         }
6810   
6811         spin_lock_irq(&tp->lock);
6812         if (data)
6813                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
6814         else
6815                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
6816         spin_unlock_irq(&tp->lock);
6817   
6818         return 0;
6819 }
6820   
6821 static int tg3_set_tx_csum(struct net_device *dev, u32 data)
6822 {
6823         struct tg3 *tp = netdev_priv(dev);
6824   
6825         if (tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) {
6826                 if (data != 0)
6827                         return -EINVAL;
6828                 return 0;
6829         }
6830   
6831         if (data)
6832                 dev->features |= NETIF_F_IP_CSUM;
6833         else
6834                 dev->features &= ~NETIF_F_IP_CSUM;
6835
6836         return 0;
6837 }
6838
6839 static int tg3_get_stats_count (struct net_device *dev)
6840 {
6841         return TG3_NUM_STATS;
6842 }
6843
6844 static void tg3_get_strings (struct net_device *dev, u32 stringset, u8 *buf)
6845 {
6846         switch (stringset) {
6847         case ETH_SS_STATS:
6848                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
6849                 break;
6850         default:
6851                 WARN_ON(1);     /* we need a WARN() */
6852                 break;
6853         }
6854 }
6855
6856 static void tg3_get_ethtool_stats (struct net_device *dev,
6857                                    struct ethtool_stats *estats, u64 *tmp_stats)
6858 {
6859         struct tg3 *tp = netdev_priv(dev);
6860         memcpy(tmp_stats, tg3_get_estats(tp), sizeof(tp->estats));
6861 }
6862
6863 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6864 {
6865         struct mii_ioctl_data *data = if_mii(ifr);
6866         struct tg3 *tp = netdev_priv(dev);
6867         int err;
6868
6869         switch(cmd) {
6870         case SIOCGMIIPHY:
6871                 data->phy_id = PHY_ADDR;
6872
6873                 /* fallthru */
6874         case SIOCGMIIREG: {
6875                 u32 mii_regval;
6876
6877                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6878                         break;                  /* We have no PHY */
6879
6880                 spin_lock_irq(&tp->lock);
6881                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
6882                 spin_unlock_irq(&tp->lock);
6883
6884                 data->val_out = mii_regval;
6885
6886                 return err;
6887         }
6888
6889         case SIOCSMIIREG:
6890                 if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
6891                         break;                  /* We have no PHY */
6892
6893                 if (!capable(CAP_NET_ADMIN))
6894                         return -EPERM;
6895
6896                 spin_lock_irq(&tp->lock);
6897                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
6898                 spin_unlock_irq(&tp->lock);
6899
6900                 return err;
6901
6902         default:
6903                 /* do nothing */
6904                 break;
6905         }
6906         return -EOPNOTSUPP;
6907 }
6908
6909 #if TG3_VLAN_TAG_USED
6910 static void tg3_vlan_rx_register(struct net_device *dev, struct vlan_group *grp)
6911 {
6912         struct tg3 *tp = netdev_priv(dev);
6913
6914         spin_lock_irq(&tp->lock);
6915         spin_lock(&tp->tx_lock);
6916
6917         tp->vlgrp = grp;
6918
6919         /* Update RX_MODE_KEEP_VLAN_TAG bit in RX_MODE register. */
6920         __tg3_set_rx_mode(dev);
6921
6922         spin_unlock(&tp->tx_lock);
6923         spin_unlock_irq(&tp->lock);
6924 }
6925
6926 static void tg3_vlan_rx_kill_vid(struct net_device *dev, unsigned short vid)
6927 {
6928         struct tg3 *tp = netdev_priv(dev);
6929
6930         spin_lock_irq(&tp->lock);
6931         spin_lock(&tp->tx_lock);
6932         if (tp->vlgrp)
6933                 tp->vlgrp->vlan_devices[vid] = NULL;
6934         spin_unlock(&tp->tx_lock);
6935         spin_unlock_irq(&tp->lock);
6936 }
6937 #endif
6938
6939 static struct ethtool_ops tg3_ethtool_ops = {
6940         .get_settings           = tg3_get_settings,
6941         .set_settings           = tg3_set_settings,
6942         .get_drvinfo            = tg3_get_drvinfo,
6943         .get_regs_len           = tg3_get_regs_len,
6944         .get_regs               = tg3_get_regs,
6945         .get_wol                = tg3_get_wol,
6946         .set_wol                = tg3_set_wol,
6947         .get_msglevel           = tg3_get_msglevel,
6948         .set_msglevel           = tg3_set_msglevel,
6949         .nway_reset             = tg3_nway_reset,
6950         .get_link               = ethtool_op_get_link,
6951         .get_eeprom_len         = tg3_get_eeprom_len,
6952         .get_eeprom             = tg3_get_eeprom,
6953         .set_eeprom             = tg3_set_eeprom,
6954         .get_ringparam          = tg3_get_ringparam,
6955         .set_ringparam          = tg3_set_ringparam,
6956         .get_pauseparam         = tg3_get_pauseparam,
6957         .set_pauseparam         = tg3_set_pauseparam,
6958         .get_rx_csum            = tg3_get_rx_csum,
6959         .set_rx_csum            = tg3_set_rx_csum,
6960         .get_tx_csum            = ethtool_op_get_tx_csum,
6961         .set_tx_csum            = tg3_set_tx_csum,
6962         .get_sg                 = ethtool_op_get_sg,
6963         .set_sg                 = ethtool_op_set_sg,
6964 #if TG3_TSO_SUPPORT != 0
6965         .get_tso                = ethtool_op_get_tso,
6966         .set_tso                = tg3_set_tso,
6967 #endif
6968         .get_strings            = tg3_get_strings,
6969         .get_stats_count        = tg3_get_stats_count,
6970         .get_ethtool_stats      = tg3_get_ethtool_stats,
6971 };
6972
6973 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
6974 {
6975         u32 cursize, val;
6976
6977         tp->nvram_size = EEPROM_CHIP_SIZE;
6978
6979         if (tg3_nvram_read(tp, 0, &val) != 0)
6980                 return;
6981
6982         if (swab32(val) != TG3_EEPROM_MAGIC)
6983                 return;
6984
6985         /*
6986          * Size the chip by reading offsets at increasing powers of two.
6987          * When we encounter our validation signature, we know the addressing
6988          * has wrapped around, and thus have our chip size.
6989          */
6990         cursize = 0x800;
6991
6992         while (cursize < tp->nvram_size) {
6993                 if (tg3_nvram_read(tp, cursize, &val) != 0)
6994                         return;
6995
6996                 if (swab32(val) == TG3_EEPROM_MAGIC)
6997                         break;
6998
6999                 cursize <<= 1;
7000         }
7001
7002         tp->nvram_size = cursize;
7003 }
7004                 
7005 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
7006 {
7007         u32 val;
7008
7009         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
7010                 if (val != 0) {
7011                         tp->nvram_size = (val >> 16) * 1024;
7012                         return;
7013                 }
7014         }
7015         tp->nvram_size = 0x20000;
7016 }
7017
7018 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
7019 {
7020         u32 nvcfg1;
7021
7022         nvcfg1 = tr32(NVRAM_CFG1);
7023         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
7024                 tp->tg3_flags2 |= TG3_FLG2_FLASH;
7025         }
7026         else {
7027                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
7028                 tw32(NVRAM_CFG1, nvcfg1);
7029         }
7030
7031         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7032                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
7033                         case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
7034                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7035                                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7036                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7037                                 break;
7038                         case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
7039                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7040                                 tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
7041                                 break;
7042                         case FLASH_VENDOR_ATMEL_EEPROM:
7043                                 tp->nvram_jedecnum = JEDEC_ATMEL;
7044                                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
7045                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7046                                 break;
7047                         case FLASH_VENDOR_ST:
7048                                 tp->nvram_jedecnum = JEDEC_ST;
7049                                 tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
7050                                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7051                                 break;
7052                         case FLASH_VENDOR_SAIFUN:
7053                                 tp->nvram_jedecnum = JEDEC_SAIFUN;
7054                                 tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
7055                                 break;
7056                         case FLASH_VENDOR_SST_SMALL:
7057                         case FLASH_VENDOR_SST_LARGE:
7058                                 tp->nvram_jedecnum = JEDEC_SST;
7059                                 tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
7060                                 break;
7061                 }
7062         }
7063         else {
7064                 tp->nvram_jedecnum = JEDEC_ATMEL;
7065                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
7066                 tp->tg3_flags |= TG3_FLAG_NVRAM_BUFFERED;
7067         }
7068 }
7069
7070 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
7071 static void __devinit tg3_nvram_init(struct tg3 *tp)
7072 {
7073         int j;
7074
7075         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X)
7076                 return;
7077
7078         tw32_f(GRC_EEPROM_ADDR,
7079              (EEPROM_ADDR_FSM_RESET |
7080               (EEPROM_DEFAULT_CLOCK_PERIOD <<
7081                EEPROM_ADDR_CLKPERD_SHIFT)));
7082
7083         /* XXX schedule_timeout() ... */
7084         for (j = 0; j < 100; j++)
7085                 udelay(10);
7086
7087         /* Enable seeprom accesses. */
7088         tw32_f(GRC_LOCAL_CTRL,
7089              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
7090         udelay(100);
7091
7092         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
7093             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
7094                 tp->tg3_flags |= TG3_FLAG_NVRAM;
7095
7096                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7097                         u32 nvaccess = tr32(NVRAM_ACCESS);
7098
7099                         tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7100                 }
7101
7102                 tg3_get_nvram_info(tp);
7103                 tg3_get_nvram_size(tp);
7104
7105                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7106                         u32 nvaccess = tr32(NVRAM_ACCESS);
7107
7108                         tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7109                 }
7110
7111         } else {
7112                 tp->tg3_flags &= ~(TG3_FLAG_NVRAM | TG3_FLAG_NVRAM_BUFFERED);
7113
7114                 tg3_get_eeprom_size(tp);
7115         }
7116 }
7117
7118 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
7119                                         u32 offset, u32 *val)
7120 {
7121         u32 tmp;
7122         int i;
7123
7124         if (offset > EEPROM_ADDR_ADDR_MASK ||
7125             (offset % 4) != 0)
7126                 return -EINVAL;
7127
7128         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
7129                                         EEPROM_ADDR_DEVID_MASK |
7130                                         EEPROM_ADDR_READ);
7131         tw32(GRC_EEPROM_ADDR,
7132              tmp |
7133              (0 << EEPROM_ADDR_DEVID_SHIFT) |
7134              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
7135               EEPROM_ADDR_ADDR_MASK) |
7136              EEPROM_ADDR_READ | EEPROM_ADDR_START);
7137
7138         for (i = 0; i < 10000; i++) {
7139                 tmp = tr32(GRC_EEPROM_ADDR);
7140
7141                 if (tmp & EEPROM_ADDR_COMPLETE)
7142                         break;
7143                 udelay(100);
7144         }
7145         if (!(tmp & EEPROM_ADDR_COMPLETE))
7146                 return -EBUSY;
7147
7148         *val = tr32(GRC_EEPROM_DATA);
7149         return 0;
7150 }
7151
7152 #define NVRAM_CMD_TIMEOUT 10000
7153
7154 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
7155 {
7156         int i;
7157
7158         tw32(NVRAM_CMD, nvram_cmd);
7159         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
7160                 udelay(10);
7161                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
7162                         udelay(10);
7163                         break;
7164                 }
7165         }
7166         if (i == NVRAM_CMD_TIMEOUT) {
7167                 return -EBUSY;
7168         }
7169         return 0;
7170 }
7171
7172 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
7173 {
7174         int ret;
7175
7176         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7177                 printk(KERN_ERR PFX "Attempt to do nvram_read on Sun 570X\n");
7178                 return -EINVAL;
7179         }
7180
7181         if (!(tp->tg3_flags & TG3_FLAG_NVRAM))
7182                 return tg3_nvram_read_using_eeprom(tp, offset, val);
7183
7184         if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) &&
7185                 (tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7186                 (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7187
7188                 offset = ((offset / tp->nvram_pagesize) <<
7189                           ATMEL_AT45DB0X1B_PAGE_POS) +
7190                         (offset % tp->nvram_pagesize);
7191         }
7192
7193         if (offset > NVRAM_ADDR_MSK)
7194                 return -EINVAL;
7195
7196         tg3_nvram_lock(tp);
7197
7198         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7199                 u32 nvaccess = tr32(NVRAM_ACCESS);
7200
7201                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7202         }
7203
7204         tw32(NVRAM_ADDR, offset);
7205         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
7206                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
7207
7208         if (ret == 0)
7209                 *val = swab32(tr32(NVRAM_RDDATA));
7210
7211         tg3_nvram_unlock(tp);
7212
7213         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7214                 u32 nvaccess = tr32(NVRAM_ACCESS);
7215
7216                 tw32_f(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7217         }
7218
7219         return ret;
7220 }
7221
7222 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
7223                                     u32 offset, u32 len, u8 *buf)
7224 {
7225         int i, j, rc = 0;
7226         u32 val;
7227
7228         for (i = 0; i < len; i += 4) {
7229                 u32 addr, data;
7230
7231                 addr = offset + i;
7232
7233                 memcpy(&data, buf + i, 4);
7234
7235                 tw32(GRC_EEPROM_DATA, cpu_to_le32(data));
7236
7237                 val = tr32(GRC_EEPROM_ADDR);
7238                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
7239
7240                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
7241                         EEPROM_ADDR_READ);
7242                 tw32(GRC_EEPROM_ADDR, val |
7243                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
7244                         (addr & EEPROM_ADDR_ADDR_MASK) |
7245                         EEPROM_ADDR_START |
7246                         EEPROM_ADDR_WRITE);
7247                 
7248                 for (j = 0; j < 10000; j++) {
7249                         val = tr32(GRC_EEPROM_ADDR);
7250
7251                         if (val & EEPROM_ADDR_COMPLETE)
7252                                 break;
7253                         udelay(100);
7254                 }
7255                 if (!(val & EEPROM_ADDR_COMPLETE)) {
7256                         rc = -EBUSY;
7257                         break;
7258                 }
7259         }
7260
7261         return rc;
7262 }
7263
7264 /* offset and length are dword aligned */
7265 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
7266                 u8 *buf)
7267 {
7268         int ret = 0;
7269         u32 pagesize = tp->nvram_pagesize;
7270         u32 pagemask = pagesize - 1;
7271         u32 nvram_cmd;
7272         u8 *tmp;
7273
7274         tmp = kmalloc(pagesize, GFP_KERNEL);
7275         if (tmp == NULL)
7276                 return -ENOMEM;
7277
7278         while (len) {
7279                 int j;
7280                 u32 phy_addr, page_off, size, nvaccess;
7281
7282                 phy_addr = offset & ~pagemask;
7283         
7284                 for (j = 0; j < pagesize; j += 4) {
7285                         if ((ret = tg3_nvram_read(tp, phy_addr + j,
7286                                                 (u32 *) (tmp + j))))
7287                                 break;
7288                 }
7289                 if (ret)
7290                         break;
7291
7292                 page_off = offset & pagemask;
7293                 size = pagesize;
7294                 if (len < size)
7295                         size = len;
7296
7297                 len -= size;
7298
7299                 memcpy(tmp + page_off, buf, size);
7300
7301                 offset = offset + (pagesize - page_off);
7302
7303                 nvaccess = tr32(NVRAM_ACCESS);
7304                 tw32_f(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7305
7306                 /*
7307                  * Before we can erase the flash page, we need
7308                  * to issue a special "write enable" command.
7309                  */
7310                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7311
7312                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7313                         break;
7314
7315                 /* Erase the target page */
7316                 tw32(NVRAM_ADDR, phy_addr);
7317
7318                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
7319                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
7320
7321                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7322                         break;
7323
7324                 /* Issue another write enable to start the write. */
7325                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7326
7327                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
7328                         break;
7329
7330                 for (j = 0; j < pagesize; j += 4) {
7331                         u32 data;
7332
7333                         data = *((u32 *) (tmp + j));
7334                         tw32(NVRAM_WRDATA, cpu_to_be32(data));
7335
7336                         tw32(NVRAM_ADDR, phy_addr + j);
7337
7338                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
7339                                 NVRAM_CMD_WR;
7340
7341                         if (j == 0)
7342                                 nvram_cmd |= NVRAM_CMD_FIRST;
7343                         else if (j == (pagesize - 4))
7344                                 nvram_cmd |= NVRAM_CMD_LAST;
7345
7346                         if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7347                                 break;
7348                 }
7349                 if (ret)
7350                         break;
7351         }
7352
7353         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
7354         tg3_nvram_exec_cmd(tp, nvram_cmd);
7355
7356         kfree(tmp);
7357
7358         return ret;
7359 }
7360
7361 /* offset and length are dword aligned */
7362 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
7363                 u8 *buf)
7364 {
7365         int i, ret = 0;
7366
7367         for (i = 0; i < len; i += 4, offset += 4) {
7368                 u32 data, page_off, phy_addr, nvram_cmd;
7369
7370                 memcpy(&data, buf + i, 4);
7371                 tw32(NVRAM_WRDATA, cpu_to_be32(data));
7372
7373                 page_off = offset % tp->nvram_pagesize;
7374
7375                 if ((tp->tg3_flags2 & TG3_FLG2_FLASH) &&
7376                         (tp->nvram_jedecnum == JEDEC_ATMEL)) {
7377
7378                         phy_addr = ((offset / tp->nvram_pagesize) <<
7379                                     ATMEL_AT45DB0X1B_PAGE_POS) + page_off;
7380                 }
7381                 else {
7382                         phy_addr = offset;
7383                 }
7384
7385                 tw32(NVRAM_ADDR, phy_addr);
7386
7387                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
7388
7389                 if ((page_off == 0) || (i == 0))
7390                         nvram_cmd |= NVRAM_CMD_FIRST;
7391                 else if (page_off == (tp->nvram_pagesize - 4))
7392                         nvram_cmd |= NVRAM_CMD_LAST;
7393
7394                 if (i == (len - 4))
7395                         nvram_cmd |= NVRAM_CMD_LAST;
7396
7397                 if ((tp->nvram_jedecnum == JEDEC_ST) &&
7398                         (nvram_cmd & NVRAM_CMD_FIRST)) {
7399
7400                         if ((ret = tg3_nvram_exec_cmd(tp,
7401                                 NVRAM_CMD_WREN | NVRAM_CMD_GO |
7402                                 NVRAM_CMD_DONE)))
7403
7404                                 break;
7405                 }
7406                 if (!(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7407                         /* We always do complete word writes to eeprom. */
7408                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
7409                 }
7410
7411                 if ((ret = tg3_nvram_exec_cmd(tp, nvram_cmd)))
7412                         break;
7413         }
7414         return ret;
7415 }
7416
7417 /* offset and length are dword aligned */
7418 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
7419 {
7420         int ret;
7421
7422         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7423                 printk(KERN_ERR PFX "Attempt to do nvram_write on Sun 570X\n");
7424                 return -EINVAL;
7425         }
7426
7427         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7428                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
7429                        GRC_LCLCTRL_GPIO_OE1);
7430                 udelay(40);
7431         }
7432
7433         if (!(tp->tg3_flags & TG3_FLAG_NVRAM)) {
7434                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
7435         }
7436         else {
7437                 u32 grc_mode;
7438
7439                 tg3_nvram_lock(tp);
7440
7441                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7442                         u32 nvaccess = tr32(NVRAM_ACCESS);
7443
7444                         tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
7445
7446                         tw32(NVRAM_WRITE1, 0x406);
7447                 }
7448
7449                 grc_mode = tr32(GRC_MODE);
7450                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
7451
7452                 if ((tp->tg3_flags & TG3_FLAG_NVRAM_BUFFERED) ||
7453                         !(tp->tg3_flags2 & TG3_FLG2_FLASH)) {
7454
7455                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
7456                                 buf);
7457                 }
7458                 else {
7459                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
7460                                 buf);
7461                 }
7462
7463                 grc_mode = tr32(GRC_MODE);
7464                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
7465
7466                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7467                         u32 nvaccess = tr32(NVRAM_ACCESS);
7468
7469                         tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
7470                 }
7471                 tg3_nvram_unlock(tp);
7472         }
7473
7474         if (tp->tg3_flags & TG3_FLAG_EEPROM_WRITE_PROT) {
7475                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
7476                        GRC_LCLCTRL_GPIO_OE1 | GRC_LCLCTRL_GPIO_OUTPUT1);
7477                 udelay(40);
7478         }
7479
7480         return ret;
7481 }
7482
7483 struct subsys_tbl_ent {
7484         u16 subsys_vendor, subsys_devid;
7485         u32 phy_id;
7486 };
7487
7488 static struct subsys_tbl_ent subsys_id_to_phy_id[] = {
7489         /* Broadcom boards. */
7490         { PCI_VENDOR_ID_BROADCOM, 0x1644, PHY_ID_BCM5401 }, /* BCM95700A6 */
7491         { PCI_VENDOR_ID_BROADCOM, 0x0001, PHY_ID_BCM5701 }, /* BCM95701A5 */
7492         { PCI_VENDOR_ID_BROADCOM, 0x0002, PHY_ID_BCM8002 }, /* BCM95700T6 */
7493         { PCI_VENDOR_ID_BROADCOM, 0x0003, 0 },              /* BCM95700A9 */
7494         { PCI_VENDOR_ID_BROADCOM, 0x0005, PHY_ID_BCM5701 }, /* BCM95701T1 */
7495         { PCI_VENDOR_ID_BROADCOM, 0x0006, PHY_ID_BCM5701 }, /* BCM95701T8 */
7496         { PCI_VENDOR_ID_BROADCOM, 0x0007, 0 },              /* BCM95701A7 */
7497         { PCI_VENDOR_ID_BROADCOM, 0x0008, PHY_ID_BCM5701 }, /* BCM95701A10 */
7498         { PCI_VENDOR_ID_BROADCOM, 0x8008, PHY_ID_BCM5701 }, /* BCM95701A12 */
7499         { PCI_VENDOR_ID_BROADCOM, 0x0009, PHY_ID_BCM5703 }, /* BCM95703Ax1 */
7500         { PCI_VENDOR_ID_BROADCOM, 0x8009, PHY_ID_BCM5703 }, /* BCM95703Ax2 */
7501
7502         /* 3com boards. */
7503         { PCI_VENDOR_ID_3COM, 0x1000, PHY_ID_BCM5401 }, /* 3C996T */
7504         { PCI_VENDOR_ID_3COM, 0x1006, PHY_ID_BCM5701 }, /* 3C996BT */
7505         { PCI_VENDOR_ID_3COM, 0x1004, 0 },              /* 3C996SX */
7506         { PCI_VENDOR_ID_3COM, 0x1007, PHY_ID_BCM5701 }, /* 3C1000T */
7507         { PCI_VENDOR_ID_3COM, 0x1008, PHY_ID_BCM5701 }, /* 3C940BR01 */
7508
7509         /* DELL boards. */
7510         { PCI_VENDOR_ID_DELL, 0x00d1, PHY_ID_BCM5401 }, /* VIPER */
7511         { PCI_VENDOR_ID_DELL, 0x0106, PHY_ID_BCM5401 }, /* JAGUAR */
7512         { PCI_VENDOR_ID_DELL, 0x0109, PHY_ID_BCM5411 }, /* MERLOT */
7513         { PCI_VENDOR_ID_DELL, 0x010a, PHY_ID_BCM5411 }, /* SLIM_MERLOT */
7514
7515         /* Compaq boards. */
7516         { PCI_VENDOR_ID_COMPAQ, 0x007c, PHY_ID_BCM5701 }, /* BANSHEE */
7517         { PCI_VENDOR_ID_COMPAQ, 0x009a, PHY_ID_BCM5701 }, /* BANSHEE_2 */
7518         { PCI_VENDOR_ID_COMPAQ, 0x007d, 0 },              /* CHANGELING */
7519         { PCI_VENDOR_ID_COMPAQ, 0x0085, PHY_ID_BCM5701 }, /* NC7780 */
7520         { PCI_VENDOR_ID_COMPAQ, 0x0099, PHY_ID_BCM5701 }, /* NC7780_2 */
7521
7522         /* IBM boards. */
7523         { PCI_VENDOR_ID_IBM, 0x0281, 0 } /* IBM??? */
7524 };
7525
7526 static inline struct subsys_tbl_ent *lookup_by_subsys(struct tg3 *tp)
7527 {
7528         int i;
7529
7530         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
7531                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
7532                      tp->pdev->subsystem_vendor) &&
7533                     (subsys_id_to_phy_id[i].subsys_devid ==
7534                      tp->pdev->subsystem_device))
7535                         return &subsys_id_to_phy_id[i];
7536         }
7537         return NULL;
7538 }
7539
7540 static int __devinit tg3_phy_probe(struct tg3 *tp)
7541 {
7542         u32 eeprom_phy_id, hw_phy_id_1, hw_phy_id_2;
7543         u32 hw_phy_id, hw_phy_id_masked;
7544         u32 val;
7545         int eeprom_signature_found, eeprom_phy_serdes, err;
7546
7547         tp->phy_id = PHY_ID_INVALID;
7548         eeprom_phy_id = PHY_ID_INVALID;
7549         eeprom_phy_serdes = 0;
7550         eeprom_signature_found = 0;
7551         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7552         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7553                 u32 nic_cfg, led_cfg;
7554                 u32 nic_phy_id, ver, cfg2 = 0;
7555
7556                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7557                 tp->nic_sram_data_cfg = nic_cfg;
7558
7559                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
7560                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
7561                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7562                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7563                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703) &&
7564                     (ver > 0) && (ver < 0x100))
7565                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
7566
7567                 eeprom_signature_found = 1;
7568
7569                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
7570                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
7571                         eeprom_phy_serdes = 1;
7572
7573                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
7574                 if (nic_phy_id != 0) {
7575                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
7576                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
7577
7578                         eeprom_phy_id  = (id1 >> 16) << 10;
7579                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
7580                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
7581                 } else
7582                         eeprom_phy_id = 0;
7583
7584                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750) {
7585                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
7586                                     SHASTA_EXT_LED_MODE_MASK);
7587                 } else
7588                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
7589
7590                 switch (led_cfg) {
7591                 default:
7592                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
7593                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7594                         break;
7595
7596                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
7597                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7598                         break;
7599
7600                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
7601                         tp->led_ctrl = LED_CTRL_MODE_MAC;
7602                         break;
7603
7604                 case SHASTA_EXT_LED_SHARED:
7605                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
7606                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7607                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
7608                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7609                                                  LED_CTRL_MODE_PHY_2);
7610                         break;
7611
7612                 case SHASTA_EXT_LED_MAC:
7613                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
7614                         break;
7615
7616                 case SHASTA_EXT_LED_COMBO:
7617                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
7618                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
7619                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
7620                                                  LED_CTRL_MODE_PHY_2);
7621                         break;
7622
7623                 };
7624
7625                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
7626                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
7627                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
7628                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
7629
7630                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700) &&
7631                     (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) &&
7632                     (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP))
7633                         tp->tg3_flags |= TG3_FLAG_EEPROM_WRITE_PROT;
7634
7635                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
7636                         tp->tg3_flags |= TG3_FLAG_ENABLE_ASF;
7637                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7638                                 tp->tg3_flags2 |= TG3_FLG2_ASF_NEW_HANDSHAKE;
7639                 }
7640                 if (nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL)
7641                         tp->tg3_flags |= TG3_FLAG_SERDES_WOL_CAP;
7642
7643                 if (cfg2 & (1 << 17))
7644                         tp->tg3_flags2 |= TG3_FLG2_CAPACITIVE_COUPLING;
7645
7646                 /* serdes signal pre-emphasis in register 0x590 set by */
7647                 /* bootcode if bit 18 is set */
7648                 if (cfg2 & (1 << 18))
7649                         tp->tg3_flags2 |= TG3_FLG2_SERDES_PREEMPHASIS;
7650         }
7651
7652         /* Reading the PHY ID register can conflict with ASF
7653          * firwmare access to the PHY hardware.
7654          */
7655         err = 0;
7656         if (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) {
7657                 hw_phy_id = hw_phy_id_masked = PHY_ID_INVALID;
7658         } else {
7659                 /* Now read the physical PHY_ID from the chip and verify
7660                  * that it is sane.  If it doesn't look good, we fall back
7661                  * to either the hard-coded table based PHY_ID and failing
7662                  * that the value found in the eeprom area.
7663                  */
7664                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
7665                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
7666
7667                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
7668                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
7669                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
7670
7671                 hw_phy_id_masked = hw_phy_id & PHY_ID_MASK;
7672         }
7673
7674         if (!err && KNOWN_PHY_ID(hw_phy_id_masked)) {
7675                 tp->phy_id = hw_phy_id;
7676                 if (hw_phy_id_masked == PHY_ID_BCM8002)
7677                         tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7678         } else {
7679                 if (eeprom_signature_found) {
7680                         tp->phy_id = eeprom_phy_id;
7681                         if (eeprom_phy_serdes)
7682                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7683                 } else {
7684                         struct subsys_tbl_ent *p;
7685
7686                         /* No eeprom signature?  Try the hardcoded
7687                          * subsys device table.
7688                          */
7689                         p = lookup_by_subsys(tp);
7690                         if (!p)
7691                                 return -ENODEV;
7692
7693                         tp->phy_id = p->phy_id;
7694                         if (!tp->phy_id ||
7695                             tp->phy_id == PHY_ID_BCM8002)
7696                                 tp->tg3_flags2 |= TG3_FLG2_PHY_SERDES;
7697                 }
7698         }
7699
7700         if (!(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) &&
7701             !(tp->tg3_flags & TG3_FLAG_ENABLE_ASF)) {
7702                 u32 bmsr, adv_reg, tg3_ctrl;
7703
7704                 tg3_readphy(tp, MII_BMSR, &bmsr);
7705                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
7706                     (bmsr & BMSR_LSTATUS))
7707                         goto skip_phy_reset;
7708                     
7709                 err = tg3_phy_reset(tp);
7710                 if (err)
7711                         return err;
7712
7713                 adv_reg = (ADVERTISE_10HALF | ADVERTISE_10FULL |
7714                            ADVERTISE_100HALF | ADVERTISE_100FULL |
7715                            ADVERTISE_CSMA | ADVERTISE_PAUSE_CAP);
7716                 tg3_ctrl = 0;
7717                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY)) {
7718                         tg3_ctrl = (MII_TG3_CTRL_ADV_1000_HALF |
7719                                     MII_TG3_CTRL_ADV_1000_FULL);
7720                         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
7721                             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
7722                                 tg3_ctrl |= (MII_TG3_CTRL_AS_MASTER |
7723                                              MII_TG3_CTRL_ENABLE_AS_MASTER);
7724                 }
7725
7726                 if (!tg3_copper_is_advertising_all(tp)) {
7727                         tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7728
7729                         if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7730                                 tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7731
7732                         tg3_writephy(tp, MII_BMCR,
7733                                      BMCR_ANENABLE | BMCR_ANRESTART);
7734                 }
7735                 tg3_phy_set_wirespeed(tp);
7736
7737                 tg3_writephy(tp, MII_ADVERTISE, adv_reg);
7738                 if (!(tp->tg3_flags & TG3_FLAG_10_100_ONLY))
7739                         tg3_writephy(tp, MII_TG3_CTRL, tg3_ctrl);
7740         }
7741
7742 skip_phy_reset:
7743         if ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401) {
7744                 err = tg3_init_5401phy_dsp(tp);
7745                 if (err)
7746                         return err;
7747         }
7748
7749         if (!err && ((tp->phy_id & PHY_ID_MASK) == PHY_ID_BCM5401)) {
7750                 err = tg3_init_5401phy_dsp(tp);
7751         }
7752
7753         if (!eeprom_signature_found)
7754                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
7755
7756         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
7757                 tp->link_config.advertising =
7758                         (ADVERTISED_1000baseT_Half |
7759                          ADVERTISED_1000baseT_Full |
7760                          ADVERTISED_Autoneg |
7761                          ADVERTISED_FIBRE);
7762         if (tp->tg3_flags & TG3_FLAG_10_100_ONLY)
7763                 tp->link_config.advertising &=
7764                         ~(ADVERTISED_1000baseT_Half |
7765                           ADVERTISED_1000baseT_Full);
7766
7767         return err;
7768 }
7769
7770 static void __devinit tg3_read_partno(struct tg3 *tp)
7771 {
7772         unsigned char vpd_data[256];
7773         int i;
7774
7775         if (tp->tg3_flags2 & TG3_FLG2_SUN_570X) {
7776                 /* Sun decided not to put the necessary bits in the
7777                  * NVRAM of their onboard tg3 parts :(
7778                  */
7779                 strcpy(tp->board_part_number, "Sun 570X");
7780                 return;
7781         }
7782
7783         for (i = 0; i < 256; i += 4) {
7784                 u32 tmp;
7785
7786                 if (tg3_nvram_read(tp, 0x100 + i, &tmp))
7787                         goto out_not_found;
7788
7789                 vpd_data[i + 0] = ((tmp >>  0) & 0xff);
7790                 vpd_data[i + 1] = ((tmp >>  8) & 0xff);
7791                 vpd_data[i + 2] = ((tmp >> 16) & 0xff);
7792                 vpd_data[i + 3] = ((tmp >> 24) & 0xff);
7793         }
7794
7795         /* Now parse and find the part number. */
7796         for (i = 0; i < 256; ) {
7797                 unsigned char val = vpd_data[i];
7798                 int block_end;
7799
7800                 if (val == 0x82 || val == 0x91) {
7801                         i = (i + 3 +
7802                              (vpd_data[i + 1] +
7803                               (vpd_data[i + 2] << 8)));
7804                         continue;
7805                 }
7806
7807                 if (val != 0x90)
7808                         goto out_not_found;
7809
7810                 block_end = (i + 3 +
7811                              (vpd_data[i + 1] +
7812                               (vpd_data[i + 2] << 8)));
7813                 i += 3;
7814                 while (i < block_end) {
7815                         if (vpd_data[i + 0] == 'P' &&
7816                             vpd_data[i + 1] == 'N') {
7817                                 int partno_len = vpd_data[i + 2];
7818
7819                                 if (partno_len > 24)
7820                                         goto out_not_found;
7821
7822                                 memcpy(tp->board_part_number,
7823                                        &vpd_data[i + 3],
7824                                        partno_len);
7825
7826                                 /* Success. */
7827                                 return;
7828                         }
7829                 }
7830
7831                 /* Part number not found. */
7832                 goto out_not_found;
7833         }
7834
7835 out_not_found:
7836         strcpy(tp->board_part_number, "none");
7837 }
7838
7839 #ifdef CONFIG_SPARC64
7840 static int __devinit tg3_is_sun_570X(struct tg3 *tp)
7841 {
7842         struct pci_dev *pdev = tp->pdev;
7843         struct pcidev_cookie *pcp = pdev->sysdata;
7844
7845         if (pcp != NULL) {
7846                 int node = pcp->prom_node;
7847                 u32 venid;
7848                 int err;
7849
7850                 err = prom_getproperty(node, "subsystem-vendor-id",
7851                                        (char *) &venid, sizeof(venid));
7852                 if (err == 0 || err == -1)
7853                         return 0;
7854                 if (venid == PCI_VENDOR_ID_SUN)
7855                         return 1;
7856         }
7857         return 0;
7858 }
7859 #endif
7860
7861 static int __devinit tg3_get_invariants(struct tg3 *tp)
7862 {
7863         static struct pci_device_id write_reorder_chipsets[] = {
7864                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7865                              PCI_DEVICE_ID_INTEL_82801AA_8) },
7866                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7867                              PCI_DEVICE_ID_INTEL_82801AB_8) },
7868                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7869                              PCI_DEVICE_ID_INTEL_82801BA_11) },
7870                 { PCI_DEVICE(PCI_VENDOR_ID_INTEL,
7871                              PCI_DEVICE_ID_INTEL_82801BA_6) },
7872                 { PCI_DEVICE(PCI_VENDOR_ID_AMD,
7873                              PCI_DEVICE_ID_AMD_FE_GATE_700C) },
7874                 { },
7875         };
7876         u32 misc_ctrl_reg;
7877         u32 cacheline_sz_reg;
7878         u32 pci_state_reg, grc_misc_cfg;
7879         u32 val;
7880         u16 pci_cmd;
7881         int err;
7882
7883 #ifdef CONFIG_SPARC64
7884         if (tg3_is_sun_570X(tp))
7885                 tp->tg3_flags2 |= TG3_FLG2_SUN_570X;
7886 #endif
7887
7888         /* If we have an AMD 762 or Intel ICH/ICH0/ICH2 chipset, write
7889          * reordering to the mailbox registers done by the host
7890          * controller can cause major troubles.  We read back from
7891          * every mailbox register write to force the writes to be
7892          * posted to the chip in order.
7893          */
7894         if (pci_dev_present(write_reorder_chipsets))
7895                 tp->tg3_flags |= TG3_FLAG_MBOX_WRITE_REORDER;
7896
7897         /* Force memory write invalidate off.  If we leave it on,
7898          * then on 5700_BX chips we have to enable a workaround.
7899          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
7900          * to match the cacheline size.  The Broadcom driver have this
7901          * workaround but turns MWI off all the times so never uses
7902          * it.  This seems to suggest that the workaround is insufficient.
7903          */
7904         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7905         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
7906         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7907
7908         /* It is absolutely critical that TG3PCI_MISC_HOST_CTRL
7909          * has the register indirect write enable bit set before
7910          * we try to access any of the MMIO registers.  It is also
7911          * critical that the PCI-X hw workaround situation is decided
7912          * before that as well.
7913          */
7914         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7915                               &misc_ctrl_reg);
7916
7917         tp->pci_chip_rev_id = (misc_ctrl_reg >>
7918                                MISC_HOST_CTRL_CHIPREV_SHIFT);
7919
7920         /* Initialize misc host control in PCI block. */
7921         tp->misc_host_ctrl |= (misc_ctrl_reg &
7922                                MISC_HOST_CTRL_CHIPREV);
7923         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7924                                tp->misc_host_ctrl);
7925
7926         pci_read_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7927                               &cacheline_sz_reg);
7928
7929         tp->pci_cacheline_sz = (cacheline_sz_reg >>  0) & 0xff;
7930         tp->pci_lat_timer    = (cacheline_sz_reg >>  8) & 0xff;
7931         tp->pci_hdr_type     = (cacheline_sz_reg >> 16) & 0xff;
7932         tp->pci_bist         = (cacheline_sz_reg >> 24) & 0xff;
7933
7934         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) ||
7935             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750))
7936                 tp->tg3_flags2 |= TG3_FLG2_5705_PLUS;
7937
7938         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
7939                 tp->tg3_flags2 |= TG3_FLG2_HW_TSO;
7940
7941         if (pci_find_capability(tp->pdev, PCI_CAP_ID_EXP) != 0)
7942                 tp->tg3_flags2 |= TG3_FLG2_PCI_EXPRESS;
7943
7944         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
7945             tp->pci_lat_timer < 64) {
7946                 tp->pci_lat_timer = 64;
7947
7948                 cacheline_sz_reg  = ((tp->pci_cacheline_sz & 0xff) <<  0);
7949                 cacheline_sz_reg |= ((tp->pci_lat_timer    & 0xff) <<  8);
7950                 cacheline_sz_reg |= ((tp->pci_hdr_type     & 0xff) << 16);
7951                 cacheline_sz_reg |= ((tp->pci_bist         & 0xff) << 24);
7952
7953                 pci_write_config_dword(tp->pdev, TG3PCI_CACHELINESZ,
7954                                        cacheline_sz_reg);
7955         }
7956
7957         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
7958                               &pci_state_reg);
7959
7960         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0) {
7961                 tp->tg3_flags |= TG3_FLAG_PCIX_MODE;
7962
7963                 /* If this is a 5700 BX chipset, and we are in PCI-X
7964                  * mode, enable register write workaround.
7965                  *
7966                  * The workaround is to use indirect register accesses
7967                  * for all chip writes not to mailbox registers.
7968                  */
7969                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
7970                         u32 pm_reg;
7971                         u16 pci_cmd;
7972
7973                         tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
7974
7975                         /* The chip can have it's power management PCI config
7976                          * space registers clobbered due to this bug.
7977                          * So explicitly force the chip into D0 here.
7978                          */
7979                         pci_read_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7980                                               &pm_reg);
7981                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
7982                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
7983                         pci_write_config_dword(tp->pdev, TG3PCI_PM_CTRL_STAT,
7984                                                pm_reg);
7985
7986                         /* Also, force SERR#/PERR# in PCI command. */
7987                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
7988                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
7989                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
7990                 }
7991         }
7992
7993         /* Back to back register writes can cause problems on this chip,
7994          * the workaround is to read back all reg writes except those to
7995          * mailbox regs.  See tg3_write_indirect_reg32().
7996          *
7997          * PCI Express 5750_A0 rev chips need this workaround too.
7998          */
7999         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8000             ((tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) &&
8001              tp->pci_chip_rev_id == CHIPREV_ID_5750_A0))
8002                 tp->tg3_flags |= TG3_FLAG_5701_REG_WRITE_BUG;
8003
8004         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
8005                 tp->tg3_flags |= TG3_FLAG_PCI_HIGH_SPEED;
8006         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
8007                 tp->tg3_flags |= TG3_FLAG_PCI_32BIT;
8008
8009         /* Chip-specific fixup from Broadcom driver */
8010         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
8011             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
8012                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
8013                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
8014         }
8015
8016         /* Force the chip into D0. */
8017         err = tg3_set_power_state(tp, 0);
8018         if (err) {
8019                 printk(KERN_ERR PFX "(%s) transition to D0 failed\n",
8020                        pci_name(tp->pdev));
8021                 return err;
8022         }
8023
8024         /* 5700 B0 chips do not support checksumming correctly due
8025          * to hardware bugs.
8026          */
8027         if (tp->pci_chip_rev_id == CHIPREV_ID_5700_B0)
8028                 tp->tg3_flags |= TG3_FLAG_BROKEN_CHECKSUMS;
8029
8030         /* Pseudo-header checksum is done by hardware logic and not
8031          * the offload processers, so make the chip do the pseudo-
8032          * header checksums on receive.  For transmit it is more
8033          * convenient to do the pseudo-header checksum in software
8034          * as Linux does that on transmit for us in all cases.
8035          */
8036         tp->tg3_flags |= TG3_FLAG_NO_TX_PSEUDO_CSUM;
8037         tp->tg3_flags &= ~TG3_FLAG_NO_RX_PSEUDO_CSUM;
8038
8039         /* Derive initial jumbo mode from MTU assigned in
8040          * ether_setup() via the alloc_etherdev() call
8041          */
8042         if (tp->dev->mtu > ETH_DATA_LEN)
8043                 tp->tg3_flags |= TG3_FLAG_JUMBO_ENABLE;
8044
8045         /* Determine WakeOnLan speed to use. */
8046         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8047             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
8048             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
8049             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
8050                 tp->tg3_flags &= ~(TG3_FLAG_WOL_SPEED_100MB);
8051         } else {
8052                 tp->tg3_flags |= TG3_FLAG_WOL_SPEED_100MB;
8053         }
8054
8055         /* A few boards don't want Ethernet@WireSpeed phy feature */
8056         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) ||
8057             ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
8058              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
8059              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)))
8060                 tp->tg3_flags2 |= TG3_FLG2_NO_ETH_WIRE_SPEED;
8061
8062         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
8063             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
8064                 tp->tg3_flags2 |= TG3_FLG2_PHY_ADC_BUG;
8065         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
8066                 tp->tg3_flags2 |= TG3_FLG2_PHY_5704_A0_BUG;
8067
8068         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8069             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
8070                 tp->tg3_flags2 |= TG3_FLG2_PHY_BER_BUG;
8071
8072         /* Only 5701 and later support tagged irq status mode.
8073          * Also, 5788 chips cannot use tagged irq status.
8074          *
8075          * However, since we are using NAPI avoid tagged irq status
8076          * because the interrupt condition is more difficult to
8077          * fully clear in that mode.
8078          */
8079         tp->coalesce_mode = 0;
8080
8081         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
8082             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
8083                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
8084
8085         /* Initialize MAC MI mode, polling disabled. */
8086         tw32_f(MAC_MI_MODE, tp->mi_mode);
8087         udelay(80);
8088
8089         /* Initialize data/descriptor byte/word swapping. */
8090         val = tr32(GRC_MODE);
8091         val &= GRC_MODE_HOST_STACKUP;
8092         tw32(GRC_MODE, val | tp->grc_mode);
8093
8094         tg3_switch_clocks(tp);
8095
8096         /* Clear this out for sanity. */
8097         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
8098
8099         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
8100                               &pci_state_reg);
8101         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
8102             (tp->tg3_flags & TG3_FLAG_PCIX_TARGET_HWBUG) == 0) {
8103                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
8104
8105                 if (chiprevid == CHIPREV_ID_5701_A0 ||
8106                     chiprevid == CHIPREV_ID_5701_B0 ||
8107                     chiprevid == CHIPREV_ID_5701_B2 ||
8108                     chiprevid == CHIPREV_ID_5701_B5) {
8109                         void __iomem *sram_base;
8110
8111                         /* Write some dummy words into the SRAM status block
8112                          * area, see if it reads back correctly.  If the return
8113                          * value is bad, force enable the PCIX workaround.
8114                          */
8115                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
8116
8117                         writel(0x00000000, sram_base);
8118                         writel(0x00000000, sram_base + 4);
8119                         writel(0xffffffff, sram_base + 4);
8120                         if (readl(sram_base) != 0x00000000)
8121                                 tp->tg3_flags |= TG3_FLAG_PCIX_TARGET_HWBUG;
8122                 }
8123         }
8124
8125         udelay(50);
8126         tg3_nvram_init(tp);
8127
8128         grc_misc_cfg = tr32(GRC_MISC_CFG);
8129         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
8130
8131         /* Broadcom's driver says that CIOBE multisplit has a bug */
8132 #if 0
8133         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8134             grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5704CIOBE) {
8135                 tp->tg3_flags |= TG3_FLAG_SPLIT_MODE;
8136                 tp->split_mode_max_reqs = SPLIT_MODE_5704_MAX_REQ;
8137         }
8138 #endif
8139         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8140             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
8141              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
8142                 tp->tg3_flags2 |= TG3_FLG2_IS_5788;
8143
8144         /* these are limited to 10/100 only */
8145         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
8146              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
8147             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8148              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8149              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
8150               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
8151               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
8152             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
8153              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
8154               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F)))
8155                 tp->tg3_flags |= TG3_FLAG_10_100_ONLY;
8156
8157         err = tg3_phy_probe(tp);
8158         if (err) {
8159                 printk(KERN_ERR PFX "(%s) phy probe failed, err %d\n",
8160                        pci_name(tp->pdev), err);
8161                 /* ... but do not return immediately ... */
8162         }
8163
8164         tg3_read_partno(tp);
8165
8166         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES) {
8167                 tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8168         } else {
8169                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8170                         tp->tg3_flags |= TG3_FLAG_USE_MI_INTERRUPT;
8171                 else
8172                         tp->tg3_flags &= ~TG3_FLAG_USE_MI_INTERRUPT;
8173         }
8174
8175         /* 5700 {AX,BX} chips have a broken status block link
8176          * change bit implementation, so we must use the
8177          * status register in those cases.
8178          */
8179         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
8180                 tp->tg3_flags |= TG3_FLAG_USE_LINKCHG_REG;
8181         else
8182                 tp->tg3_flags &= ~TG3_FLAG_USE_LINKCHG_REG;
8183
8184         /* The led_ctrl is set during tg3_phy_probe, here we might
8185          * have to force the link status polling mechanism based
8186          * upon subsystem IDs.
8187          */
8188         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
8189             !(tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)) {
8190                 tp->tg3_flags |= (TG3_FLAG_USE_MI_INTERRUPT |
8191                                   TG3_FLAG_USE_LINKCHG_REG);
8192         }
8193
8194         /* For all SERDES we poll the MAC status register. */
8195         if (tp->tg3_flags2 & TG3_FLG2_PHY_SERDES)
8196                 tp->tg3_flags |= TG3_FLAG_POLL_SERDES;
8197         else
8198                 tp->tg3_flags &= ~TG3_FLAG_POLL_SERDES;
8199
8200         /* 5700 BX chips need to have their TX producer index mailboxes
8201          * written twice to workaround a bug.
8202          */
8203         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX)
8204                 tp->tg3_flags |= TG3_FLAG_TXD_MBOX_HWBUG;
8205         else
8206                 tp->tg3_flags &= ~TG3_FLAG_TXD_MBOX_HWBUG;
8207
8208         /* It seems all chips can get confused if TX buffers
8209          * straddle the 4GB address boundary in some cases.
8210          */
8211         tp->dev->hard_start_xmit = tg3_start_xmit;
8212
8213         tp->rx_offset = 2;
8214         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
8215             (tp->tg3_flags & TG3_FLAG_PCIX_MODE) != 0)
8216                 tp->rx_offset = 0;
8217
8218         /* By default, disable wake-on-lan.  User can change this
8219          * using ETHTOOL_SWOL.
8220          */
8221         tp->tg3_flags &= ~TG3_FLAG_WOL_ENABLE;
8222
8223         return err;
8224 }
8225
8226 #ifdef CONFIG_SPARC64
8227 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
8228 {
8229         struct net_device *dev = tp->dev;
8230         struct pci_dev *pdev = tp->pdev;
8231         struct pcidev_cookie *pcp = pdev->sysdata;
8232
8233         if (pcp != NULL) {
8234                 int node = pcp->prom_node;
8235
8236                 if (prom_getproplen(node, "local-mac-address") == 6) {
8237                         prom_getproperty(node, "local-mac-address",
8238                                          dev->dev_addr, 6);
8239                         return 0;
8240                 }
8241         }
8242         return -ENODEV;
8243 }
8244
8245 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
8246 {
8247         struct net_device *dev = tp->dev;
8248
8249         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
8250         return 0;
8251 }
8252 #endif
8253
8254 static int __devinit tg3_get_device_address(struct tg3 *tp)
8255 {
8256         struct net_device *dev = tp->dev;
8257         u32 hi, lo, mac_offset;
8258
8259 #ifdef CONFIG_SPARC64
8260         if (!tg3_get_macaddr_sparc(tp))
8261                 return 0;
8262 #endif
8263
8264         mac_offset = 0x7c;
8265         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
8266             !(tp->tg3_flags & TG3_FLG2_SUN_570X)) {
8267                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
8268                         mac_offset = 0xcc;
8269                 if (tg3_nvram_lock(tp))
8270                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
8271                 else
8272                         tg3_nvram_unlock(tp);
8273         }
8274
8275         /* First try to get it from MAC address mailbox. */
8276         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
8277         if ((hi >> 16) == 0x484b) {
8278                 dev->dev_addr[0] = (hi >>  8) & 0xff;
8279                 dev->dev_addr[1] = (hi >>  0) & 0xff;
8280
8281                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
8282                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8283                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8284                 dev->dev_addr[4] = (lo >>  8) & 0xff;
8285                 dev->dev_addr[5] = (lo >>  0) & 0xff;
8286         }
8287         /* Next, try NVRAM. */
8288         else if (!(tp->tg3_flags & TG3_FLG2_SUN_570X) &&
8289                  !tg3_nvram_read(tp, mac_offset + 0, &hi) &&
8290                  !tg3_nvram_read(tp, mac_offset + 4, &lo)) {
8291                 dev->dev_addr[0] = ((hi >> 16) & 0xff);
8292                 dev->dev_addr[1] = ((hi >> 24) & 0xff);
8293                 dev->dev_addr[2] = ((lo >>  0) & 0xff);
8294                 dev->dev_addr[3] = ((lo >>  8) & 0xff);
8295                 dev->dev_addr[4] = ((lo >> 16) & 0xff);
8296                 dev->dev_addr[5] = ((lo >> 24) & 0xff);
8297         }
8298         /* Finally just fetch it out of the MAC control regs. */
8299         else {
8300                 hi = tr32(MAC_ADDR_0_HIGH);
8301                 lo = tr32(MAC_ADDR_0_LOW);
8302
8303                 dev->dev_addr[5] = lo & 0xff;
8304                 dev->dev_addr[4] = (lo >> 8) & 0xff;
8305                 dev->dev_addr[3] = (lo >> 16) & 0xff;
8306                 dev->dev_addr[2] = (lo >> 24) & 0xff;
8307                 dev->dev_addr[1] = hi & 0xff;
8308                 dev->dev_addr[0] = (hi >> 8) & 0xff;
8309         }
8310
8311         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
8312 #ifdef CONFIG_SPARC64
8313                 if (!tg3_get_default_macaddr_sparc(tp))
8314                         return 0;
8315 #endif
8316                 return -EINVAL;
8317         }
8318         return 0;
8319 }
8320
8321 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
8322 {
8323         struct tg3_internal_buffer_desc test_desc;
8324         u32 sram_dma_descs;
8325         int i, ret;
8326
8327         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
8328
8329         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
8330         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
8331         tw32(RDMAC_STATUS, 0);
8332         tw32(WDMAC_STATUS, 0);
8333
8334         tw32(BUFMGR_MODE, 0);
8335         tw32(FTQ_RESET, 0);
8336
8337         test_desc.addr_hi = ((u64) buf_dma) >> 32;
8338         test_desc.addr_lo = buf_dma & 0xffffffff;
8339         test_desc.nic_mbuf = 0x00002100;
8340         test_desc.len = size;
8341
8342         /*
8343          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
8344          * the *second* time the tg3 driver was getting loaded after an
8345          * initial scan.
8346          *
8347          * Broadcom tells me:
8348          *   ...the DMA engine is connected to the GRC block and a DMA
8349          *   reset may affect the GRC block in some unpredictable way...
8350          *   The behavior of resets to individual blocks has not been tested.
8351          *
8352          * Broadcom noted the GRC reset will also reset all sub-components.
8353          */
8354         if (to_device) {
8355                 test_desc.cqid_sqid = (13 << 8) | 2;
8356
8357                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
8358                 udelay(40);
8359         } else {
8360                 test_desc.cqid_sqid = (16 << 8) | 7;
8361
8362                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
8363                 udelay(40);
8364         }
8365         test_desc.flags = 0x00000005;
8366
8367         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
8368                 u32 val;
8369
8370                 val = *(((u32 *)&test_desc) + i);
8371                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
8372                                        sram_dma_descs + (i * sizeof(u32)));
8373                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
8374         }
8375         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
8376
8377         if (to_device) {
8378                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
8379         } else {
8380                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
8381         }
8382
8383         ret = -ENODEV;
8384         for (i = 0; i < 40; i++) {
8385                 u32 val;
8386
8387                 if (to_device)
8388                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
8389                 else
8390                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
8391                 if ((val & 0xffff) == sram_dma_descs) {
8392                         ret = 0;
8393                         break;
8394                 }
8395
8396                 udelay(100);
8397         }
8398
8399         return ret;
8400 }
8401
8402 #define TEST_BUFFER_SIZE        0x400
8403
8404 static int __devinit tg3_test_dma(struct tg3 *tp)
8405 {
8406         dma_addr_t buf_dma;
8407         u32 *buf;
8408         int ret;
8409
8410         buf = pci_alloc_consistent(tp->pdev, TEST_BUFFER_SIZE, &buf_dma);
8411         if (!buf) {
8412                 ret = -ENOMEM;
8413                 goto out_nofree;
8414         }
8415
8416         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
8417                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
8418
8419 #ifndef CONFIG_X86
8420         {
8421                 u8 byte;
8422                 int cacheline_size;
8423                 pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
8424
8425                 if (byte == 0)
8426                         cacheline_size = 1024;
8427                 else
8428                         cacheline_size = (int) byte * 4;
8429
8430                 switch (cacheline_size) {
8431                 case 16:
8432                 case 32:
8433                 case 64:
8434                 case 128:
8435                         if ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8436                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS)) {
8437                                 tp->dma_rwctrl |=
8438                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX;
8439                                 break;
8440                         } else if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8441                                 tp->dma_rwctrl &=
8442                                         ~(DMA_RWCTRL_PCI_WRITE_CMD);
8443                                 tp->dma_rwctrl |=
8444                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
8445                                 break;
8446                         }
8447                         /* fallthrough */
8448                 case 256:
8449                         if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE) &&
8450                             !(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8451                                 tp->dma_rwctrl |=
8452                                         DMA_RWCTRL_WRITE_BNDRY_256;
8453                         else if (!(tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS))
8454                                 tp->dma_rwctrl |=
8455                                         DMA_RWCTRL_WRITE_BNDRY_256_PCIX;
8456                 };
8457         }
8458 #endif
8459
8460         if (tp->tg3_flags2 & TG3_FLG2_PCI_EXPRESS) {
8461                 /* DMA read watermark not used on PCIE */
8462                 tp->dma_rwctrl |= 0x00180000;
8463         } else if (!(tp->tg3_flags & TG3_FLAG_PCIX_MODE)) {
8464                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
8465                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
8466                         tp->dma_rwctrl |= 0x003f0000;
8467                 else
8468                         tp->dma_rwctrl |= 0x003f000f;
8469         } else {
8470                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8471                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
8472                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
8473
8474                         if (ccval == 0x6 || ccval == 0x7)
8475                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
8476
8477                         /* Set bit 23 to renable PCIX hw bug fix */
8478                         tp->dma_rwctrl |= 0x009f0000;
8479                 } else {
8480                         tp->dma_rwctrl |= 0x001b000f;
8481                 }
8482         }
8483
8484         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
8485             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8486                 tp->dma_rwctrl &= 0xfffffff0;
8487
8488         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8489             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
8490                 /* Remove this if it causes problems for some boards. */
8491                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
8492
8493                 /* On 5700/5701 chips, we need to set this bit.
8494                  * Otherwise the chip will issue cacheline transactions
8495                  * to streamable DMA memory with not all the byte
8496                  * enables turned on.  This is an error on several
8497                  * RISC PCI controllers, in particular sparc64.
8498                  *
8499                  * On 5703/5704 chips, this bit has been reassigned
8500                  * a different meaning.  In particular, it is used
8501                  * on those chips to enable a PCI-X workaround.
8502                  */
8503                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
8504         }
8505
8506         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8507
8508 #if 0
8509         /* Unneeded, already done by tg3_get_invariants.  */
8510         tg3_switch_clocks(tp);
8511 #endif
8512
8513         ret = 0;
8514         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
8515             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
8516                 goto out;
8517
8518         while (1) {
8519                 u32 *p = buf, i;
8520
8521                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
8522                         p[i] = i;
8523
8524                 /* Send the buffer to the chip. */
8525                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
8526                 if (ret) {
8527                         printk(KERN_ERR "tg3_test_dma() Write the buffer failed %d\n", ret);
8528                         break;
8529                 }
8530
8531 #if 0
8532                 /* validate data reached card RAM correctly. */
8533                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8534                         u32 val;
8535                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
8536                         if (le32_to_cpu(val) != p[i]) {
8537                                 printk(KERN_ERR "  tg3_test_dma()  Card buffer corrupted on write! (%d != %d)\n", val, i);
8538                                 /* ret = -ENODEV here? */
8539                         }
8540                         p[i] = 0;
8541                 }
8542 #endif
8543                 /* Now read it back. */
8544                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
8545                 if (ret) {
8546                         printk(KERN_ERR "tg3_test_dma() Read the buffer failed %d\n", ret);
8547
8548                         break;
8549                 }
8550
8551                 /* Verify it. */
8552                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
8553                         if (p[i] == i)
8554                                 continue;
8555
8556                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) ==
8557                             DMA_RWCTRL_WRITE_BNDRY_DISAB) {
8558                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
8559                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8560                                 break;
8561                         } else {
8562                                 printk(KERN_ERR "tg3_test_dma() buffer corrupted on read back! (%d != %d)\n", p[i], i);
8563                                 ret = -ENODEV;
8564                                 goto out;
8565                         }
8566                 }
8567
8568                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
8569                         /* Success. */
8570                         ret = 0;
8571                         break;
8572                 }
8573         }
8574
8575 out:
8576         pci_free_consistent(tp->pdev, TEST_BUFFER_SIZE, buf, buf_dma);
8577 out_nofree:
8578         return ret;
8579 }
8580
8581 static void __devinit tg3_init_link_config(struct tg3 *tp)
8582 {
8583         tp->link_config.advertising =
8584                 (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |
8585                  ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |
8586                  ADVERTISED_1000baseT_Half | ADVERTISED_1000baseT_Full |
8587                  ADVERTISED_Autoneg | ADVERTISED_MII);
8588         tp->link_config.speed = SPEED_INVALID;
8589         tp->link_config.duplex = DUPLEX_INVALID;
8590         tp->link_config.autoneg = AUTONEG_ENABLE;
8591         netif_carrier_off(tp->dev);
8592         tp->link_config.active_speed = SPEED_INVALID;
8593         tp->link_config.active_duplex = DUPLEX_INVALID;
8594         tp->link_config.phy_is_low_power = 0;
8595         tp->link_config.orig_speed = SPEED_INVALID;
8596         tp->link_config.orig_duplex = DUPLEX_INVALID;
8597         tp->link_config.orig_autoneg = AUTONEG_INVALID;
8598 }
8599
8600 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
8601 {
8602         tp->bufmgr_config.mbuf_read_dma_low_water =
8603                 DEFAULT_MB_RDMA_LOW_WATER;
8604         tp->bufmgr_config.mbuf_mac_rx_low_water =
8605                 DEFAULT_MB_MACRX_LOW_WATER;
8606         tp->bufmgr_config.mbuf_high_water =
8607                 DEFAULT_MB_HIGH_WATER;
8608
8609         tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
8610                 DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
8611         tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
8612                 DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
8613         tp->bufmgr_config.mbuf_high_water_jumbo =
8614                 DEFAULT_MB_HIGH_WATER_JUMBO;
8615
8616         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
8617         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
8618 }
8619
8620 static char * __devinit tg3_phy_string(struct tg3 *tp)
8621 {
8622         switch (tp->phy_id & PHY_ID_MASK) {
8623         case PHY_ID_BCM5400:    return "5400";
8624         case PHY_ID_BCM5401:    return "5401";
8625         case PHY_ID_BCM5411:    return "5411";
8626         case PHY_ID_BCM5701:    return "5701";
8627         case PHY_ID_BCM5703:    return "5703";
8628         case PHY_ID_BCM5704:    return "5704";
8629         case PHY_ID_BCM5705:    return "5705";
8630         case PHY_ID_BCM5750:    return "5750";
8631         case PHY_ID_BCM8002:    return "8002/serdes";
8632         case 0:                 return "serdes";
8633         default:                return "unknown";
8634         };
8635 }
8636
8637 static struct pci_dev * __devinit tg3_find_5704_peer(struct tg3 *tp)
8638 {
8639         struct pci_dev *peer;
8640         unsigned int func, devnr = tp->pdev->devfn & ~7;
8641
8642         for (func = 0; func < 8; func++) {
8643                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
8644                 if (peer && peer != tp->pdev)
8645                         break;
8646                 pci_dev_put(peer);
8647         }
8648         if (!peer || peer == tp->pdev)
8649                 BUG();
8650
8651         /*
8652          * We don't need to keep the refcount elevated; there's no way
8653          * to remove one half of this device without removing the other
8654          */
8655         pci_dev_put(peer);
8656
8657         return peer;
8658 }
8659
8660 static int __devinit tg3_init_one(struct pci_dev *pdev,
8661                                   const struct pci_device_id *ent)
8662 {
8663         static int tg3_version_printed = 0;
8664         unsigned long tg3reg_base, tg3reg_len;
8665         struct net_device *dev;
8666         struct tg3 *tp;
8667         int i, err, pci_using_dac, pm_cap;
8668
8669         if (tg3_version_printed++ == 0)
8670                 printk(KERN_INFO "%s", version);
8671
8672         err = pci_enable_device(pdev);
8673         if (err) {
8674                 printk(KERN_ERR PFX "Cannot enable PCI device, "
8675                        "aborting.\n");
8676                 return err;
8677         }
8678
8679         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
8680                 printk(KERN_ERR PFX "Cannot find proper PCI device "
8681                        "base address, aborting.\n");
8682                 err = -ENODEV;
8683                 goto err_out_disable_pdev;
8684         }
8685
8686         err = pci_request_regions(pdev, DRV_MODULE_NAME);
8687         if (err) {
8688                 printk(KERN_ERR PFX "Cannot obtain PCI resources, "
8689                        "aborting.\n");
8690                 goto err_out_disable_pdev;
8691         }
8692
8693         pci_set_master(pdev);
8694
8695         /* Find power-management capability. */
8696         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
8697         if (pm_cap == 0) {
8698                 printk(KERN_ERR PFX "Cannot find PowerManagement capability, "
8699                        "aborting.\n");
8700                 err = -EIO;
8701                 goto err_out_free_res;
8702         }
8703
8704         /* Configure DMA attributes. */
8705         err = pci_set_dma_mask(pdev, 0xffffffffffffffffULL);
8706         if (!err) {
8707                 pci_using_dac = 1;
8708                 err = pci_set_consistent_dma_mask(pdev, 0xffffffffffffffffULL);
8709                 if (err < 0) {
8710                         printk(KERN_ERR PFX "Unable to obtain 64 bit DMA "
8711                                "for consistent allocations\n");
8712                         goto err_out_free_res;
8713                 }
8714         } else {
8715                 err = pci_set_dma_mask(pdev, 0xffffffffULL);
8716                 if (err) {
8717                         printk(KERN_ERR PFX "No usable DMA configuration, "
8718                                "aborting.\n");
8719                         goto err_out_free_res;
8720                 }
8721                 pci_using_dac = 0;
8722         }
8723
8724         tg3reg_base = pci_resource_start(pdev, 0);
8725         tg3reg_len = pci_resource_len(pdev, 0);
8726
8727         dev = alloc_etherdev(sizeof(*tp));
8728         if (!dev) {
8729                 printk(KERN_ERR PFX "Etherdev alloc failed, aborting.\n");
8730                 err = -ENOMEM;
8731                 goto err_out_free_res;
8732         }
8733
8734         SET_MODULE_OWNER(dev);
8735         SET_NETDEV_DEV(dev, &pdev->dev);
8736
8737         if (pci_using_dac)
8738                 dev->features |= NETIF_F_HIGHDMA;
8739         dev->features |= NETIF_F_LLTX;
8740 #if TG3_VLAN_TAG_USED
8741         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8742         dev->vlan_rx_register = tg3_vlan_rx_register;
8743         dev->vlan_rx_kill_vid = tg3_vlan_rx_kill_vid;
8744 #endif
8745
8746         tp = netdev_priv(dev);
8747         tp->pdev = pdev;
8748         tp->dev = dev;
8749         tp->pm_cap = pm_cap;
8750         tp->mac_mode = TG3_DEF_MAC_MODE;
8751         tp->rx_mode = TG3_DEF_RX_MODE;
8752         tp->tx_mode = TG3_DEF_TX_MODE;
8753         tp->mi_mode = MAC_MI_MODE_BASE;
8754         if (tg3_debug > 0)
8755                 tp->msg_enable = tg3_debug;
8756         else
8757                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
8758
8759         /* The word/byte swap controls here control register access byte
8760          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
8761          * setting below.
8762          */
8763         tp->misc_host_ctrl =
8764                 MISC_HOST_CTRL_MASK_PCI_INT |
8765                 MISC_HOST_CTRL_WORD_SWAP |
8766                 MISC_HOST_CTRL_INDIR_ACCESS |
8767                 MISC_HOST_CTRL_PCISTATE_RW;
8768
8769         /* The NONFRM (non-frame) byte/word swap controls take effect
8770          * on descriptor entries, anything which isn't packet data.
8771          *
8772          * The StrongARM chips on the board (one for tx, one for rx)
8773          * are running in big-endian mode.
8774          */
8775         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
8776                         GRC_MODE_WSWAP_NONFRM_DATA);
8777 #ifdef __BIG_ENDIAN
8778         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
8779 #endif
8780         spin_lock_init(&tp->lock);
8781         spin_lock_init(&tp->tx_lock);
8782         spin_lock_init(&tp->indirect_lock);
8783         INIT_WORK(&tp->reset_task, tg3_reset_task, tp);
8784
8785         tp->regs = ioremap_nocache(tg3reg_base, tg3reg_len);
8786         if (tp->regs == 0UL) {
8787                 printk(KERN_ERR PFX "Cannot map device registers, "
8788                        "aborting.\n");
8789                 err = -ENOMEM;
8790                 goto err_out_free_dev;
8791         }
8792
8793         tg3_init_link_config(tp);
8794
8795         tg3_init_bufmgr_config(tp);
8796
8797         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
8798         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
8799         tp->tx_pending = TG3_DEF_TX_RING_PENDING;
8800
8801         dev->open = tg3_open;
8802         dev->stop = tg3_close;
8803         dev->get_stats = tg3_get_stats;
8804         dev->set_multicast_list = tg3_set_rx_mode;
8805         dev->set_mac_address = tg3_set_mac_addr;
8806         dev->do_ioctl = tg3_ioctl;
8807         dev->tx_timeout = tg3_tx_timeout;
8808         dev->poll = tg3_poll;
8809         dev->ethtool_ops = &tg3_ethtool_ops;
8810         dev->weight = 64;
8811         dev->watchdog_timeo = TG3_TX_TIMEOUT;
8812         dev->change_mtu = tg3_change_mtu;
8813         dev->irq = pdev->irq;
8814 #ifdef CONFIG_NET_POLL_CONTROLLER
8815         dev->poll_controller = tg3_poll_controller;
8816 #endif
8817
8818         err = tg3_get_invariants(tp);
8819         if (err) {
8820                 printk(KERN_ERR PFX "Problem fetching invariants of chip, "
8821                        "aborting.\n");
8822                 goto err_out_iounmap;
8823         }
8824
8825         if (tp->tg3_flags2 & TG3_FLG2_5705_PLUS) {
8826                 tp->bufmgr_config.mbuf_read_dma_low_water =
8827                         DEFAULT_MB_RDMA_LOW_WATER_5705;
8828                 tp->bufmgr_config.mbuf_mac_rx_low_water =
8829                         DEFAULT_MB_MACRX_LOW_WATER_5705;
8830                 tp->bufmgr_config.mbuf_high_water =
8831                         DEFAULT_MB_HIGH_WATER_5705;
8832         }
8833
8834 #if TG3_TSO_SUPPORT != 0
8835         if (tp->tg3_flags2 & TG3_FLG2_HW_TSO) {
8836                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8837         }
8838         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
8839             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
8840             tp->pci_chip_rev_id == CHIPREV_ID_5705_A0 ||
8841             (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0) {
8842                 tp->tg3_flags2 &= ~TG3_FLG2_TSO_CAPABLE;
8843         } else {
8844                 tp->tg3_flags2 |= TG3_FLG2_TSO_CAPABLE;
8845         }
8846
8847         /* TSO is off by default, user can enable using ethtool.  */
8848 #if 0
8849         if (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE)
8850                 dev->features |= NETIF_F_TSO;
8851 #endif
8852
8853 #endif
8854
8855         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
8856             !(tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) &&
8857             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
8858                 tp->tg3_flags2 |= TG3_FLG2_MAX_RXPEND_64;
8859                 tp->rx_pending = 63;
8860         }
8861
8862         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8863                 tp->pdev_peer = tg3_find_5704_peer(tp);
8864
8865         err = tg3_get_device_address(tp);
8866         if (err) {
8867                 printk(KERN_ERR PFX "Could not obtain valid ethernet address, "
8868                        "aborting.\n");
8869                 goto err_out_iounmap;
8870         }
8871
8872         /*
8873          * Reset chip in case UNDI or EFI driver did not shutdown
8874          * DMA self test will enable WDMAC and we'll see (spurious)
8875          * pending DMA on the PCI bus at that point.
8876          */
8877         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
8878             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
8879                 pci_save_state(tp->pdev);
8880                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
8881                 tg3_halt(tp);
8882         }
8883
8884         err = tg3_test_dma(tp);
8885         if (err) {
8886                 printk(KERN_ERR PFX "DMA engine test failed, aborting.\n");
8887                 goto err_out_iounmap;
8888         }
8889
8890         /* Tigon3 can do ipv4 only... and some chips have buggy
8891          * checksumming.
8892          */
8893         if ((tp->tg3_flags & TG3_FLAG_BROKEN_CHECKSUMS) == 0) {
8894                 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
8895                 tp->tg3_flags |= TG3_FLAG_RX_CHECKSUMS;
8896         } else
8897                 tp->tg3_flags &= ~TG3_FLAG_RX_CHECKSUMS;
8898
8899         if (tp->tg3_flags2 & TG3_FLG2_IS_5788)
8900                 dev->features &= ~NETIF_F_HIGHDMA;
8901
8902         /* flow control autonegotiation is default behavior */
8903         tp->tg3_flags |= TG3_FLAG_PAUSE_AUTONEG;
8904
8905         err = register_netdev(dev);
8906         if (err) {
8907                 printk(KERN_ERR PFX "Cannot register net device, "
8908                        "aborting.\n");
8909                 goto err_out_iounmap;
8910         }
8911
8912         pci_set_drvdata(pdev, dev);
8913
8914         /* Now that we have fully setup the chip, save away a snapshot
8915          * of the PCI config space.  We need to restore this after
8916          * GRC_MISC_CFG core clock resets and some resume events.
8917          */
8918         pci_save_state(tp->pdev);
8919
8920         printk(KERN_INFO "%s: Tigon3 [partno(%s) rev %04x PHY(%s)] (PCI%s:%s:%s) %sBaseT Ethernet ",
8921                dev->name,
8922                tp->board_part_number,
8923                tp->pci_chip_rev_id,
8924                tg3_phy_string(tp),
8925                ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "X" : ""),
8926                ((tp->tg3_flags & TG3_FLAG_PCI_HIGH_SPEED) ?
8927                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "133MHz" : "66MHz") :
8928                 ((tp->tg3_flags & TG3_FLAG_PCIX_MODE) ? "100MHz" : "33MHz")),
8929                ((tp->tg3_flags & TG3_FLAG_PCI_32BIT) ? "32-bit" : "64-bit"),
8930                (tp->tg3_flags & TG3_FLAG_10_100_ONLY) ? "10/100" : "10/100/1000");
8931
8932         for (i = 0; i < 6; i++)
8933                 printk("%2.2x%c", dev->dev_addr[i],
8934                        i == 5 ? '\n' : ':');
8935
8936         printk(KERN_INFO "%s: RXcsums[%d] LinkChgREG[%d] "
8937                "MIirq[%d] ASF[%d] Split[%d] WireSpeed[%d] "
8938                "TSOcap[%d] \n",
8939                dev->name,
8940                (tp->tg3_flags & TG3_FLAG_RX_CHECKSUMS) != 0,
8941                (tp->tg3_flags & TG3_FLAG_USE_LINKCHG_REG) != 0,
8942                (tp->tg3_flags & TG3_FLAG_USE_MI_INTERRUPT) != 0,
8943                (tp->tg3_flags & TG3_FLAG_ENABLE_ASF) != 0,
8944                (tp->tg3_flags & TG3_FLAG_SPLIT_MODE) != 0,
8945                (tp->tg3_flags2 & TG3_FLG2_NO_ETH_WIRE_SPEED) == 0,
8946                (tp->tg3_flags2 & TG3_FLG2_TSO_CAPABLE) != 0);
8947
8948         return 0;
8949
8950 err_out_iounmap:
8951         iounmap(tp->regs);
8952
8953 err_out_free_dev:
8954         free_netdev(dev);
8955
8956 err_out_free_res:
8957         pci_release_regions(pdev);
8958
8959 err_out_disable_pdev:
8960         pci_disable_device(pdev);
8961         pci_set_drvdata(pdev, NULL);
8962         return err;
8963 }
8964
8965 static void __devexit tg3_remove_one(struct pci_dev *pdev)
8966 {
8967         struct net_device *dev = pci_get_drvdata(pdev);
8968
8969         if (dev) {
8970                 struct tg3 *tp = netdev_priv(dev);
8971
8972                 unregister_netdev(dev);
8973                 iounmap(tp->regs);
8974                 free_netdev(dev);
8975                 pci_release_regions(pdev);
8976                 pci_disable_device(pdev);
8977                 pci_set_drvdata(pdev, NULL);
8978         }
8979 }
8980
8981 static int tg3_suspend(struct pci_dev *pdev, pm_message_t state)
8982 {
8983         struct net_device *dev = pci_get_drvdata(pdev);
8984         struct tg3 *tp = netdev_priv(dev);
8985         int err;
8986
8987         if (!netif_running(dev))
8988                 return 0;
8989
8990         tg3_netif_stop(tp);
8991
8992         del_timer_sync(&tp->timer);
8993
8994         spin_lock_irq(&tp->lock);
8995         spin_lock(&tp->tx_lock);
8996         tg3_disable_ints(tp);
8997         spin_unlock(&tp->tx_lock);
8998         spin_unlock_irq(&tp->lock);
8999
9000         netif_device_detach(dev);
9001
9002         spin_lock_irq(&tp->lock);
9003         spin_lock(&tp->tx_lock);
9004         tg3_halt(tp);
9005         spin_unlock(&tp->tx_lock);
9006         spin_unlock_irq(&tp->lock);
9007
9008         err = tg3_set_power_state(tp, pci_choose_state(pdev, state));
9009         if (err) {
9010                 spin_lock_irq(&tp->lock);
9011                 spin_lock(&tp->tx_lock);
9012
9013                 tg3_init_hw(tp);
9014
9015                 tp->timer.expires = jiffies + tp->timer_offset;
9016                 add_timer(&tp->timer);
9017
9018                 netif_device_attach(dev);
9019                 tg3_netif_start(tp);
9020
9021                 spin_unlock(&tp->tx_lock);
9022                 spin_unlock_irq(&tp->lock);
9023         }
9024
9025         return err;
9026 }
9027
9028 static int tg3_resume(struct pci_dev *pdev)
9029 {
9030         struct net_device *dev = pci_get_drvdata(pdev);
9031         struct tg3 *tp = netdev_priv(dev);
9032         int err;
9033
9034         if (!netif_running(dev))
9035                 return 0;
9036
9037         pci_restore_state(tp->pdev);
9038
9039         err = tg3_set_power_state(tp, 0);
9040         if (err)
9041                 return err;
9042
9043         netif_device_attach(dev);
9044
9045         spin_lock_irq(&tp->lock);
9046         spin_lock(&tp->tx_lock);
9047
9048         tg3_init_hw(tp);
9049
9050         tp->timer.expires = jiffies + tp->timer_offset;
9051         add_timer(&tp->timer);
9052
9053         tg3_enable_ints(tp);
9054
9055         tg3_netif_start(tp);
9056
9057         spin_unlock(&tp->tx_lock);
9058         spin_unlock_irq(&tp->lock);
9059
9060         return 0;
9061 }
9062
9063 static struct pci_driver tg3_driver = {
9064         .name           = DRV_MODULE_NAME,
9065         .id_table       = tg3_pci_tbl,
9066         .probe          = tg3_init_one,
9067         .remove         = __devexit_p(tg3_remove_one),
9068         .suspend        = tg3_suspend,
9069         .resume         = tg3_resume
9070 };
9071
9072 static int __init tg3_init(void)
9073 {
9074         return pci_module_init(&tg3_driver);
9075 }
9076
9077 static void __exit tg3_cleanup(void)
9078 {
9079         pci_unregister_driver(&tg3_driver);
9080 }
9081
9082 module_init(tg3_init);
9083 module_exit(tg3_cleanup);