Merge git://git.kernel.org/pub/scm/linux/kernel/git/davem/net
[linux-flexiantxendom0-3.2.10.git] / drivers / net / ethernet / broadcom / tg3.c
1 /*
2  * tg3.c: Broadcom Tigon3 ethernet driver.
3  *
4  * Copyright (C) 2001, 2002, 2003, 2004 David S. Miller (davem@redhat.com)
5  * Copyright (C) 2001, 2002, 2003 Jeff Garzik (jgarzik@pobox.com)
6  * Copyright (C) 2004 Sun Microsystems Inc.
7  * Copyright (C) 2005-2012 Broadcom Corporation.
8  *
9  * Firmware is:
10  *      Derived from proprietary unpublished source code,
11  *      Copyright (C) 2000-2003 Broadcom Corporation.
12  *
13  *      Permission is hereby granted for the distribution of this firmware
14  *      data in hexadecimal or equivalent format, provided this copyright
15  *      notice is accompanying it.
16  */
17
18
19 #include <linux/module.h>
20 #include <linux/moduleparam.h>
21 #include <linux/stringify.h>
22 #include <linux/kernel.h>
23 #include <linux/types.h>
24 #include <linux/compiler.h>
25 #include <linux/slab.h>
26 #include <linux/delay.h>
27 #include <linux/in.h>
28 #include <linux/init.h>
29 #include <linux/interrupt.h>
30 #include <linux/ioport.h>
31 #include <linux/pci.h>
32 #include <linux/netdevice.h>
33 #include <linux/etherdevice.h>
34 #include <linux/skbuff.h>
35 #include <linux/ethtool.h>
36 #include <linux/mdio.h>
37 #include <linux/mii.h>
38 #include <linux/phy.h>
39 #include <linux/brcmphy.h>
40 #include <linux/if_vlan.h>
41 #include <linux/ip.h>
42 #include <linux/tcp.h>
43 #include <linux/workqueue.h>
44 #include <linux/prefetch.h>
45 #include <linux/dma-mapping.h>
46 #include <linux/firmware.h>
47
48 #include <net/checksum.h>
49 #include <net/ip.h>
50
51 #include <asm/system.h>
52 #include <linux/io.h>
53 #include <asm/byteorder.h>
54 #include <linux/uaccess.h>
55
56 #ifdef CONFIG_SPARC
57 #include <asm/idprom.h>
58 #include <asm/prom.h>
59 #endif
60
61 #define BAR_0   0
62 #define BAR_2   2
63
64 #include "tg3.h"
65
66 /* Functions & macros to verify TG3_FLAGS types */
67
68 static inline int _tg3_flag(enum TG3_FLAGS flag, unsigned long *bits)
69 {
70         return test_bit(flag, bits);
71 }
72
73 static inline void _tg3_flag_set(enum TG3_FLAGS flag, unsigned long *bits)
74 {
75         set_bit(flag, bits);
76 }
77
78 static inline void _tg3_flag_clear(enum TG3_FLAGS flag, unsigned long *bits)
79 {
80         clear_bit(flag, bits);
81 }
82
83 #define tg3_flag(tp, flag)                              \
84         _tg3_flag(TG3_FLAG_##flag, (tp)->tg3_flags)
85 #define tg3_flag_set(tp, flag)                          \
86         _tg3_flag_set(TG3_FLAG_##flag, (tp)->tg3_flags)
87 #define tg3_flag_clear(tp, flag)                        \
88         _tg3_flag_clear(TG3_FLAG_##flag, (tp)->tg3_flags)
89
90 #define DRV_MODULE_NAME         "tg3"
91 #define TG3_MAJ_NUM                     3
92 #define TG3_MIN_NUM                     122
93 #define DRV_MODULE_VERSION      \
94         __stringify(TG3_MAJ_NUM) "." __stringify(TG3_MIN_NUM)
95 #define DRV_MODULE_RELDATE      "December 7, 2011"
96
97 #define RESET_KIND_SHUTDOWN     0
98 #define RESET_KIND_INIT         1
99 #define RESET_KIND_SUSPEND      2
100
101 #define TG3_DEF_RX_MODE         0
102 #define TG3_DEF_TX_MODE         0
103 #define TG3_DEF_MSG_ENABLE        \
104         (NETIF_MSG_DRV          | \
105          NETIF_MSG_PROBE        | \
106          NETIF_MSG_LINK         | \
107          NETIF_MSG_TIMER        | \
108          NETIF_MSG_IFDOWN       | \
109          NETIF_MSG_IFUP         | \
110          NETIF_MSG_RX_ERR       | \
111          NETIF_MSG_TX_ERR)
112
113 #define TG3_GRC_LCLCTL_PWRSW_DELAY      100
114
115 /* length of time before we decide the hardware is borked,
116  * and dev->tx_timeout() should be called to fix the problem
117  */
118
119 #define TG3_TX_TIMEOUT                  (5 * HZ)
120
121 /* hardware minimum and maximum for a single frame's data payload */
122 #define TG3_MIN_MTU                     60
123 #define TG3_MAX_MTU(tp) \
124         (tg3_flag(tp, JUMBO_CAPABLE) ? 9000 : 1500)
125
126 /* These numbers seem to be hard coded in the NIC firmware somehow.
127  * You can't change the ring sizes, but you can change where you place
128  * them in the NIC onboard memory.
129  */
130 #define TG3_RX_STD_RING_SIZE(tp) \
131         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
132          TG3_RX_STD_MAX_SIZE_5717 : TG3_RX_STD_MAX_SIZE_5700)
133 #define TG3_DEF_RX_RING_PENDING         200
134 #define TG3_RX_JMB_RING_SIZE(tp) \
135         (tg3_flag(tp, LRG_PROD_RING_CAP) ? \
136          TG3_RX_JMB_MAX_SIZE_5717 : TG3_RX_JMB_MAX_SIZE_5700)
137 #define TG3_DEF_RX_JUMBO_RING_PENDING   100
138
139 /* Do not place this n-ring entries value into the tp struct itself,
140  * we really want to expose these constants to GCC so that modulo et
141  * al.  operations are done with shifts and masks instead of with
142  * hw multiply/modulo instructions.  Another solution would be to
143  * replace things like '% foo' with '& (foo - 1)'.
144  */
145
146 #define TG3_TX_RING_SIZE                512
147 #define TG3_DEF_TX_RING_PENDING         (TG3_TX_RING_SIZE - 1)
148
149 #define TG3_RX_STD_RING_BYTES(tp) \
150         (sizeof(struct tg3_rx_buffer_desc) * TG3_RX_STD_RING_SIZE(tp))
151 #define TG3_RX_JMB_RING_BYTES(tp) \
152         (sizeof(struct tg3_ext_rx_buffer_desc) * TG3_RX_JMB_RING_SIZE(tp))
153 #define TG3_RX_RCB_RING_BYTES(tp) \
154         (sizeof(struct tg3_rx_buffer_desc) * (tp->rx_ret_ring_mask + 1))
155 #define TG3_TX_RING_BYTES       (sizeof(struct tg3_tx_buffer_desc) * \
156                                  TG3_TX_RING_SIZE)
157 #define NEXT_TX(N)              (((N) + 1) & (TG3_TX_RING_SIZE - 1))
158
159 #define TG3_DMA_BYTE_ENAB               64
160
161 #define TG3_RX_STD_DMA_SZ               1536
162 #define TG3_RX_JMB_DMA_SZ               9046
163
164 #define TG3_RX_DMA_TO_MAP_SZ(x)         ((x) + TG3_DMA_BYTE_ENAB)
165
166 #define TG3_RX_STD_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_STD_DMA_SZ)
167 #define TG3_RX_JMB_MAP_SZ               TG3_RX_DMA_TO_MAP_SZ(TG3_RX_JMB_DMA_SZ)
168
169 #define TG3_RX_STD_BUFF_RING_SIZE(tp) \
170         (sizeof(struct ring_info) * TG3_RX_STD_RING_SIZE(tp))
171
172 #define TG3_RX_JMB_BUFF_RING_SIZE(tp) \
173         (sizeof(struct ring_info) * TG3_RX_JMB_RING_SIZE(tp))
174
175 /* Due to a hardware bug, the 5701 can only DMA to memory addresses
176  * that are at least dword aligned when used in PCIX mode.  The driver
177  * works around this bug by double copying the packet.  This workaround
178  * is built into the normal double copy length check for efficiency.
179  *
180  * However, the double copy is only necessary on those architectures
181  * where unaligned memory accesses are inefficient.  For those architectures
182  * where unaligned memory accesses incur little penalty, we can reintegrate
183  * the 5701 in the normal rx path.  Doing so saves a device structure
184  * dereference by hardcoding the double copy threshold in place.
185  */
186 #define TG3_RX_COPY_THRESHOLD           256
187 #if NET_IP_ALIGN == 0 || defined(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)
188         #define TG3_RX_COPY_THRESH(tp)  TG3_RX_COPY_THRESHOLD
189 #else
190         #define TG3_RX_COPY_THRESH(tp)  ((tp)->rx_copy_thresh)
191 #endif
192
193 #if (NET_IP_ALIGN != 0)
194 #define TG3_RX_OFFSET(tp)       ((tp)->rx_offset)
195 #else
196 #define TG3_RX_OFFSET(tp)       (NET_SKB_PAD)
197 #endif
198
199 /* minimum number of free TX descriptors required to wake up TX process */
200 #define TG3_TX_WAKEUP_THRESH(tnapi)             ((tnapi)->tx_pending / 4)
201 #define TG3_TX_BD_DMA_MAX_2K            2048
202 #define TG3_TX_BD_DMA_MAX_4K            4096
203
204 #define TG3_RAW_IP_ALIGN 2
205
206 #define TG3_FW_UPDATE_TIMEOUT_SEC       5
207 #define TG3_FW_UPDATE_FREQ_SEC          (TG3_FW_UPDATE_TIMEOUT_SEC / 2)
208
209 #define FIRMWARE_TG3            "tigon/tg3.bin"
210 #define FIRMWARE_TG3TSO         "tigon/tg3_tso.bin"
211 #define FIRMWARE_TG3TSO5        "tigon/tg3_tso5.bin"
212
213 static char version[] __devinitdata =
214         DRV_MODULE_NAME ".c:v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")";
215
216 MODULE_AUTHOR("David S. Miller (davem@redhat.com) and Jeff Garzik (jgarzik@pobox.com)");
217 MODULE_DESCRIPTION("Broadcom Tigon3 ethernet driver");
218 MODULE_LICENSE("GPL");
219 MODULE_VERSION(DRV_MODULE_VERSION);
220 MODULE_FIRMWARE(FIRMWARE_TG3);
221 MODULE_FIRMWARE(FIRMWARE_TG3TSO);
222 MODULE_FIRMWARE(FIRMWARE_TG3TSO5);
223
224 static int tg3_debug = -1;      /* -1 == use TG3_DEF_MSG_ENABLE as value */
225 module_param(tg3_debug, int, 0);
226 MODULE_PARM_DESC(tg3_debug, "Tigon3 bitmapped debugging message enable value");
227
228 static DEFINE_PCI_DEVICE_TABLE(tg3_pci_tbl) = {
229         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5700)},
230         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5701)},
231         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702)},
232         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703)},
233         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704)},
234         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702FE)},
235         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705)},
236         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705_2)},
237         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M)},
238         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705M_2)},
239         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702X)},
240         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703X)},
241         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S)},
242         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5702A3)},
243         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5703A3)},
244         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5782)},
245         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5788)},
246         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5789)},
247         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901)},
248         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5901_2)},
249         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5704S_2)},
250         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5705F)},
251         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5721)},
252         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5722)},
253         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751)},
254         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751M)},
255         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5751F)},
256         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752)},
257         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5752M)},
258         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753)},
259         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753M)},
260         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5753F)},
261         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754)},
262         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5754M)},
263         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755)},
264         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5755M)},
265         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5756)},
266         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5786)},
267         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787)},
268         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787M)},
269         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5787F)},
270         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714)},
271         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5714S)},
272         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715)},
273         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5715S)},
274         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780)},
275         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5780S)},
276         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5781)},
277         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906)},
278         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5906M)},
279         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5784)},
280         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5764)},
281         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5723)},
282         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761)},
283         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_TIGON3_5761E)},
284         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761S)},
285         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5761SE)},
286         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_G)},
287         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5785_F)},
288         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57780)},
289         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57760)},
290         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57790)},
291         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57788)},
292         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5717)},
293         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5718)},
294         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57781)},
295         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57785)},
296         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57761)},
297         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57765)},
298         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57791)},
299         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_57795)},
300         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5719)},
301         {PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, TG3PCI_DEVICE_TIGON3_5720)},
302         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9DXX)},
303         {PCI_DEVICE(PCI_VENDOR_ID_SYSKONNECT, PCI_DEVICE_ID_SYSKONNECT_9MXX)},
304         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1000)},
305         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1001)},
306         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC1003)},
307         {PCI_DEVICE(PCI_VENDOR_ID_ALTIMA, PCI_DEVICE_ID_ALTIMA_AC9100)},
308         {PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_TIGON3)},
309         {PCI_DEVICE(0x10cf, 0x11a2)}, /* Fujitsu 1000base-SX with BCM5703SKHB */
310         {}
311 };
312
313 MODULE_DEVICE_TABLE(pci, tg3_pci_tbl);
314
315 static const struct {
316         const char string[ETH_GSTRING_LEN];
317 } ethtool_stats_keys[] = {
318         { "rx_octets" },
319         { "rx_fragments" },
320         { "rx_ucast_packets" },
321         { "rx_mcast_packets" },
322         { "rx_bcast_packets" },
323         { "rx_fcs_errors" },
324         { "rx_align_errors" },
325         { "rx_xon_pause_rcvd" },
326         { "rx_xoff_pause_rcvd" },
327         { "rx_mac_ctrl_rcvd" },
328         { "rx_xoff_entered" },
329         { "rx_frame_too_long_errors" },
330         { "rx_jabbers" },
331         { "rx_undersize_packets" },
332         { "rx_in_length_errors" },
333         { "rx_out_length_errors" },
334         { "rx_64_or_less_octet_packets" },
335         { "rx_65_to_127_octet_packets" },
336         { "rx_128_to_255_octet_packets" },
337         { "rx_256_to_511_octet_packets" },
338         { "rx_512_to_1023_octet_packets" },
339         { "rx_1024_to_1522_octet_packets" },
340         { "rx_1523_to_2047_octet_packets" },
341         { "rx_2048_to_4095_octet_packets" },
342         { "rx_4096_to_8191_octet_packets" },
343         { "rx_8192_to_9022_octet_packets" },
344
345         { "tx_octets" },
346         { "tx_collisions" },
347
348         { "tx_xon_sent" },
349         { "tx_xoff_sent" },
350         { "tx_flow_control" },
351         { "tx_mac_errors" },
352         { "tx_single_collisions" },
353         { "tx_mult_collisions" },
354         { "tx_deferred" },
355         { "tx_excessive_collisions" },
356         { "tx_late_collisions" },
357         { "tx_collide_2times" },
358         { "tx_collide_3times" },
359         { "tx_collide_4times" },
360         { "tx_collide_5times" },
361         { "tx_collide_6times" },
362         { "tx_collide_7times" },
363         { "tx_collide_8times" },
364         { "tx_collide_9times" },
365         { "tx_collide_10times" },
366         { "tx_collide_11times" },
367         { "tx_collide_12times" },
368         { "tx_collide_13times" },
369         { "tx_collide_14times" },
370         { "tx_collide_15times" },
371         { "tx_ucast_packets" },
372         { "tx_mcast_packets" },
373         { "tx_bcast_packets" },
374         { "tx_carrier_sense_errors" },
375         { "tx_discards" },
376         { "tx_errors" },
377
378         { "dma_writeq_full" },
379         { "dma_write_prioq_full" },
380         { "rxbds_empty" },
381         { "rx_discards" },
382         { "rx_errors" },
383         { "rx_threshold_hit" },
384
385         { "dma_readq_full" },
386         { "dma_read_prioq_full" },
387         { "tx_comp_queue_full" },
388
389         { "ring_set_send_prod_index" },
390         { "ring_status_update" },
391         { "nic_irqs" },
392         { "nic_avoided_irqs" },
393         { "nic_tx_threshold_hit" },
394
395         { "mbuf_lwm_thresh_hit" },
396 };
397
398 #define TG3_NUM_STATS   ARRAY_SIZE(ethtool_stats_keys)
399
400
401 static const struct {
402         const char string[ETH_GSTRING_LEN];
403 } ethtool_test_keys[] = {
404         { "nvram test        (online) " },
405         { "link test         (online) " },
406         { "register test     (offline)" },
407         { "memory test       (offline)" },
408         { "mac loopback test (offline)" },
409         { "phy loopback test (offline)" },
410         { "ext loopback test (offline)" },
411         { "interrupt test    (offline)" },
412 };
413
414 #define TG3_NUM_TEST    ARRAY_SIZE(ethtool_test_keys)
415
416
417 static void tg3_write32(struct tg3 *tp, u32 off, u32 val)
418 {
419         writel(val, tp->regs + off);
420 }
421
422 static u32 tg3_read32(struct tg3 *tp, u32 off)
423 {
424         return readl(tp->regs + off);
425 }
426
427 static void tg3_ape_write32(struct tg3 *tp, u32 off, u32 val)
428 {
429         writel(val, tp->aperegs + off);
430 }
431
432 static u32 tg3_ape_read32(struct tg3 *tp, u32 off)
433 {
434         return readl(tp->aperegs + off);
435 }
436
437 static void tg3_write_indirect_reg32(struct tg3 *tp, u32 off, u32 val)
438 {
439         unsigned long flags;
440
441         spin_lock_irqsave(&tp->indirect_lock, flags);
442         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
443         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
444         spin_unlock_irqrestore(&tp->indirect_lock, flags);
445 }
446
447 static void tg3_write_flush_reg32(struct tg3 *tp, u32 off, u32 val)
448 {
449         writel(val, tp->regs + off);
450         readl(tp->regs + off);
451 }
452
453 static u32 tg3_read_indirect_reg32(struct tg3 *tp, u32 off)
454 {
455         unsigned long flags;
456         u32 val;
457
458         spin_lock_irqsave(&tp->indirect_lock, flags);
459         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off);
460         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
461         spin_unlock_irqrestore(&tp->indirect_lock, flags);
462         return val;
463 }
464
465 static void tg3_write_indirect_mbox(struct tg3 *tp, u32 off, u32 val)
466 {
467         unsigned long flags;
468
469         if (off == (MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW)) {
470                 pci_write_config_dword(tp->pdev, TG3PCI_RCV_RET_RING_CON_IDX +
471                                        TG3_64BIT_REG_LOW, val);
472                 return;
473         }
474         if (off == TG3_RX_STD_PROD_IDX_REG) {
475                 pci_write_config_dword(tp->pdev, TG3PCI_STD_RING_PROD_IDX +
476                                        TG3_64BIT_REG_LOW, val);
477                 return;
478         }
479
480         spin_lock_irqsave(&tp->indirect_lock, flags);
481         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
482         pci_write_config_dword(tp->pdev, TG3PCI_REG_DATA, val);
483         spin_unlock_irqrestore(&tp->indirect_lock, flags);
484
485         /* In indirect mode when disabling interrupts, we also need
486          * to clear the interrupt bit in the GRC local ctrl register.
487          */
488         if ((off == (MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW)) &&
489             (val == 0x1)) {
490                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_LOCAL_CTRL,
491                                        tp->grc_local_ctrl|GRC_LCLCTRL_CLEARINT);
492         }
493 }
494
495 static u32 tg3_read_indirect_mbox(struct tg3 *tp, u32 off)
496 {
497         unsigned long flags;
498         u32 val;
499
500         spin_lock_irqsave(&tp->indirect_lock, flags);
501         pci_write_config_dword(tp->pdev, TG3PCI_REG_BASE_ADDR, off + 0x5600);
502         pci_read_config_dword(tp->pdev, TG3PCI_REG_DATA, &val);
503         spin_unlock_irqrestore(&tp->indirect_lock, flags);
504         return val;
505 }
506
507 /* usec_wait specifies the wait time in usec when writing to certain registers
508  * where it is unsafe to read back the register without some delay.
509  * GRC_LOCAL_CTRL is one example if the GPIOs are toggled to switch power.
510  * TG3PCI_CLOCK_CTRL is another example if the clock frequencies are changed.
511  */
512 static void _tw32_flush(struct tg3 *tp, u32 off, u32 val, u32 usec_wait)
513 {
514         if (tg3_flag(tp, PCIX_TARGET_HWBUG) || tg3_flag(tp, ICH_WORKAROUND))
515                 /* Non-posted methods */
516                 tp->write32(tp, off, val);
517         else {
518                 /* Posted method */
519                 tg3_write32(tp, off, val);
520                 if (usec_wait)
521                         udelay(usec_wait);
522                 tp->read32(tp, off);
523         }
524         /* Wait again after the read for the posted method to guarantee that
525          * the wait time is met.
526          */
527         if (usec_wait)
528                 udelay(usec_wait);
529 }
530
531 static inline void tw32_mailbox_flush(struct tg3 *tp, u32 off, u32 val)
532 {
533         tp->write32_mbox(tp, off, val);
534         if (!tg3_flag(tp, MBOX_WRITE_REORDER) && !tg3_flag(tp, ICH_WORKAROUND))
535                 tp->read32_mbox(tp, off);
536 }
537
538 static void tg3_write32_tx_mbox(struct tg3 *tp, u32 off, u32 val)
539 {
540         void __iomem *mbox = tp->regs + off;
541         writel(val, mbox);
542         if (tg3_flag(tp, TXD_MBOX_HWBUG))
543                 writel(val, mbox);
544         if (tg3_flag(tp, MBOX_WRITE_REORDER))
545                 readl(mbox);
546 }
547
548 static u32 tg3_read32_mbox_5906(struct tg3 *tp, u32 off)
549 {
550         return readl(tp->regs + off + GRCMBOX_BASE);
551 }
552
553 static void tg3_write32_mbox_5906(struct tg3 *tp, u32 off, u32 val)
554 {
555         writel(val, tp->regs + off + GRCMBOX_BASE);
556 }
557
558 #define tw32_mailbox(reg, val)          tp->write32_mbox(tp, reg, val)
559 #define tw32_mailbox_f(reg, val)        tw32_mailbox_flush(tp, (reg), (val))
560 #define tw32_rx_mbox(reg, val)          tp->write32_rx_mbox(tp, reg, val)
561 #define tw32_tx_mbox(reg, val)          tp->write32_tx_mbox(tp, reg, val)
562 #define tr32_mailbox(reg)               tp->read32_mbox(tp, reg)
563
564 #define tw32(reg, val)                  tp->write32(tp, reg, val)
565 #define tw32_f(reg, val)                _tw32_flush(tp, (reg), (val), 0)
566 #define tw32_wait_f(reg, val, us)       _tw32_flush(tp, (reg), (val), (us))
567 #define tr32(reg)                       tp->read32(tp, reg)
568
569 static void tg3_write_mem(struct tg3 *tp, u32 off, u32 val)
570 {
571         unsigned long flags;
572
573         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
574             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC))
575                 return;
576
577         spin_lock_irqsave(&tp->indirect_lock, flags);
578         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
579                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
580                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
581
582                 /* Always leave this as zero. */
583                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
584         } else {
585                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
586                 tw32_f(TG3PCI_MEM_WIN_DATA, val);
587
588                 /* Always leave this as zero. */
589                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
590         }
591         spin_unlock_irqrestore(&tp->indirect_lock, flags);
592 }
593
594 static void tg3_read_mem(struct tg3 *tp, u32 off, u32 *val)
595 {
596         unsigned long flags;
597
598         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 &&
599             (off >= NIC_SRAM_STATS_BLK) && (off < NIC_SRAM_TX_BUFFER_DESC)) {
600                 *val = 0;
601                 return;
602         }
603
604         spin_lock_irqsave(&tp->indirect_lock, flags);
605         if (tg3_flag(tp, SRAM_USE_CONFIG)) {
606                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, off);
607                 pci_read_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
608
609                 /* Always leave this as zero. */
610                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
611         } else {
612                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, off);
613                 *val = tr32(TG3PCI_MEM_WIN_DATA);
614
615                 /* Always leave this as zero. */
616                 tw32_f(TG3PCI_MEM_WIN_BASE_ADDR, 0);
617         }
618         spin_unlock_irqrestore(&tp->indirect_lock, flags);
619 }
620
621 static void tg3_ape_lock_init(struct tg3 *tp)
622 {
623         int i;
624         u32 regbase, bit;
625
626         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
627                 regbase = TG3_APE_LOCK_GRANT;
628         else
629                 regbase = TG3_APE_PER_LOCK_GRANT;
630
631         /* Make sure the driver hasn't any stale locks. */
632         for (i = TG3_APE_LOCK_PHY0; i <= TG3_APE_LOCK_GPIO; i++) {
633                 switch (i) {
634                 case TG3_APE_LOCK_PHY0:
635                 case TG3_APE_LOCK_PHY1:
636                 case TG3_APE_LOCK_PHY2:
637                 case TG3_APE_LOCK_PHY3:
638                         bit = APE_LOCK_GRANT_DRIVER;
639                         break;
640                 default:
641                         if (!tp->pci_fn)
642                                 bit = APE_LOCK_GRANT_DRIVER;
643                         else
644                                 bit = 1 << tp->pci_fn;
645                 }
646                 tg3_ape_write32(tp, regbase + 4 * i, bit);
647         }
648
649 }
650
651 static int tg3_ape_lock(struct tg3 *tp, int locknum)
652 {
653         int i, off;
654         int ret = 0;
655         u32 status, req, gnt, bit;
656
657         if (!tg3_flag(tp, ENABLE_APE))
658                 return 0;
659
660         switch (locknum) {
661         case TG3_APE_LOCK_GPIO:
662                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
663                         return 0;
664         case TG3_APE_LOCK_GRC:
665         case TG3_APE_LOCK_MEM:
666                 if (!tp->pci_fn)
667                         bit = APE_LOCK_REQ_DRIVER;
668                 else
669                         bit = 1 << tp->pci_fn;
670                 break;
671         default:
672                 return -EINVAL;
673         }
674
675         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
676                 req = TG3_APE_LOCK_REQ;
677                 gnt = TG3_APE_LOCK_GRANT;
678         } else {
679                 req = TG3_APE_PER_LOCK_REQ;
680                 gnt = TG3_APE_PER_LOCK_GRANT;
681         }
682
683         off = 4 * locknum;
684
685         tg3_ape_write32(tp, req + off, bit);
686
687         /* Wait for up to 1 millisecond to acquire lock. */
688         for (i = 0; i < 100; i++) {
689                 status = tg3_ape_read32(tp, gnt + off);
690                 if (status == bit)
691                         break;
692                 udelay(10);
693         }
694
695         if (status != bit) {
696                 /* Revoke the lock request. */
697                 tg3_ape_write32(tp, gnt + off, bit);
698                 ret = -EBUSY;
699         }
700
701         return ret;
702 }
703
704 static void tg3_ape_unlock(struct tg3 *tp, int locknum)
705 {
706         u32 gnt, bit;
707
708         if (!tg3_flag(tp, ENABLE_APE))
709                 return;
710
711         switch (locknum) {
712         case TG3_APE_LOCK_GPIO:
713                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
714                         return;
715         case TG3_APE_LOCK_GRC:
716         case TG3_APE_LOCK_MEM:
717                 if (!tp->pci_fn)
718                         bit = APE_LOCK_GRANT_DRIVER;
719                 else
720                         bit = 1 << tp->pci_fn;
721                 break;
722         default:
723                 return;
724         }
725
726         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
727                 gnt = TG3_APE_LOCK_GRANT;
728         else
729                 gnt = TG3_APE_PER_LOCK_GRANT;
730
731         tg3_ape_write32(tp, gnt + 4 * locknum, bit);
732 }
733
734 static void tg3_ape_send_event(struct tg3 *tp, u32 event)
735 {
736         int i;
737         u32 apedata;
738
739         /* NCSI does not support APE events */
740         if (tg3_flag(tp, APE_HAS_NCSI))
741                 return;
742
743         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
744         if (apedata != APE_SEG_SIG_MAGIC)
745                 return;
746
747         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
748         if (!(apedata & APE_FW_STATUS_READY))
749                 return;
750
751         /* Wait for up to 1 millisecond for APE to service previous event. */
752         for (i = 0; i < 10; i++) {
753                 if (tg3_ape_lock(tp, TG3_APE_LOCK_MEM))
754                         return;
755
756                 apedata = tg3_ape_read32(tp, TG3_APE_EVENT_STATUS);
757
758                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
759                         tg3_ape_write32(tp, TG3_APE_EVENT_STATUS,
760                                         event | APE_EVENT_STATUS_EVENT_PENDING);
761
762                 tg3_ape_unlock(tp, TG3_APE_LOCK_MEM);
763
764                 if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
765                         break;
766
767                 udelay(100);
768         }
769
770         if (!(apedata & APE_EVENT_STATUS_EVENT_PENDING))
771                 tg3_ape_write32(tp, TG3_APE_EVENT, APE_EVENT_1);
772 }
773
774 static void tg3_ape_driver_state_change(struct tg3 *tp, int kind)
775 {
776         u32 event;
777         u32 apedata;
778
779         if (!tg3_flag(tp, ENABLE_APE))
780                 return;
781
782         switch (kind) {
783         case RESET_KIND_INIT:
784                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG,
785                                 APE_HOST_SEG_SIG_MAGIC);
786                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_LEN,
787                                 APE_HOST_SEG_LEN_MAGIC);
788                 apedata = tg3_ape_read32(tp, TG3_APE_HOST_INIT_COUNT);
789                 tg3_ape_write32(tp, TG3_APE_HOST_INIT_COUNT, ++apedata);
790                 tg3_ape_write32(tp, TG3_APE_HOST_DRIVER_ID,
791                         APE_HOST_DRIVER_ID_MAGIC(TG3_MAJ_NUM, TG3_MIN_NUM));
792                 tg3_ape_write32(tp, TG3_APE_HOST_BEHAVIOR,
793                                 APE_HOST_BEHAV_NO_PHYLOCK);
794                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE,
795                                     TG3_APE_HOST_DRVR_STATE_START);
796
797                 event = APE_EVENT_STATUS_STATE_START;
798                 break;
799         case RESET_KIND_SHUTDOWN:
800                 /* With the interface we are currently using,
801                  * APE does not track driver state.  Wiping
802                  * out the HOST SEGMENT SIGNATURE forces
803                  * the APE to assume OS absent status.
804                  */
805                 tg3_ape_write32(tp, TG3_APE_HOST_SEG_SIG, 0x0);
806
807                 if (device_may_wakeup(&tp->pdev->dev) &&
808                     tg3_flag(tp, WOL_ENABLE)) {
809                         tg3_ape_write32(tp, TG3_APE_HOST_WOL_SPEED,
810                                             TG3_APE_HOST_WOL_SPEED_AUTO);
811                         apedata = TG3_APE_HOST_DRVR_STATE_WOL;
812                 } else
813                         apedata = TG3_APE_HOST_DRVR_STATE_UNLOAD;
814
815                 tg3_ape_write32(tp, TG3_APE_HOST_DRVR_STATE, apedata);
816
817                 event = APE_EVENT_STATUS_STATE_UNLOAD;
818                 break;
819         case RESET_KIND_SUSPEND:
820                 event = APE_EVENT_STATUS_STATE_SUSPEND;
821                 break;
822         default:
823                 return;
824         }
825
826         event |= APE_EVENT_STATUS_DRIVER_EVNT | APE_EVENT_STATUS_STATE_CHNGE;
827
828         tg3_ape_send_event(tp, event);
829 }
830
831 static void tg3_disable_ints(struct tg3 *tp)
832 {
833         int i;
834
835         tw32(TG3PCI_MISC_HOST_CTRL,
836              (tp->misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT));
837         for (i = 0; i < tp->irq_max; i++)
838                 tw32_mailbox_f(tp->napi[i].int_mbox, 0x00000001);
839 }
840
841 static void tg3_enable_ints(struct tg3 *tp)
842 {
843         int i;
844
845         tp->irq_sync = 0;
846         wmb();
847
848         tw32(TG3PCI_MISC_HOST_CTRL,
849              (tp->misc_host_ctrl & ~MISC_HOST_CTRL_MASK_PCI_INT));
850
851         tp->coal_now = tp->coalesce_mode | HOSTCC_MODE_ENABLE;
852         for (i = 0; i < tp->irq_cnt; i++) {
853                 struct tg3_napi *tnapi = &tp->napi[i];
854
855                 tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
856                 if (tg3_flag(tp, 1SHOT_MSI))
857                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
858
859                 tp->coal_now |= tnapi->coal_now;
860         }
861
862         /* Force an initial interrupt */
863         if (!tg3_flag(tp, TAGGED_STATUS) &&
864             (tp->napi[0].hw_status->status & SD_STATUS_UPDATED))
865                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
866         else
867                 tw32(HOSTCC_MODE, tp->coal_now);
868
869         tp->coal_now &= ~(tp->napi[0].coal_now | tp->napi[1].coal_now);
870 }
871
872 static inline unsigned int tg3_has_work(struct tg3_napi *tnapi)
873 {
874         struct tg3 *tp = tnapi->tp;
875         struct tg3_hw_status *sblk = tnapi->hw_status;
876         unsigned int work_exists = 0;
877
878         /* check for phy events */
879         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
880                 if (sblk->status & SD_STATUS_LINK_CHG)
881                         work_exists = 1;
882         }
883         /* check for RX/TX work to do */
884         if (sblk->idx[0].tx_consumer != tnapi->tx_cons ||
885             *(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
886                 work_exists = 1;
887
888         return work_exists;
889 }
890
891 /* tg3_int_reenable
892  *  similar to tg3_enable_ints, but it accurately determines whether there
893  *  is new work pending and can return without flushing the PIO write
894  *  which reenables interrupts
895  */
896 static void tg3_int_reenable(struct tg3_napi *tnapi)
897 {
898         struct tg3 *tp = tnapi->tp;
899
900         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
901         mmiowb();
902
903         /* When doing tagged status, this work check is unnecessary.
904          * The last_tag we write above tells the chip which piece of
905          * work we've completed.
906          */
907         if (!tg3_flag(tp, TAGGED_STATUS) && tg3_has_work(tnapi))
908                 tw32(HOSTCC_MODE, tp->coalesce_mode |
909                      HOSTCC_MODE_ENABLE | tnapi->coal_now);
910 }
911
912 static void tg3_switch_clocks(struct tg3 *tp)
913 {
914         u32 clock_ctrl;
915         u32 orig_clock_ctrl;
916
917         if (tg3_flag(tp, CPMU_PRESENT) || tg3_flag(tp, 5780_CLASS))
918                 return;
919
920         clock_ctrl = tr32(TG3PCI_CLOCK_CTRL);
921
922         orig_clock_ctrl = clock_ctrl;
923         clock_ctrl &= (CLOCK_CTRL_FORCE_CLKRUN |
924                        CLOCK_CTRL_CLKRUN_OENABLE |
925                        0x1f);
926         tp->pci_clock_ctrl = clock_ctrl;
927
928         if (tg3_flag(tp, 5705_PLUS)) {
929                 if (orig_clock_ctrl & CLOCK_CTRL_625_CORE) {
930                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
931                                     clock_ctrl | CLOCK_CTRL_625_CORE, 40);
932                 }
933         } else if ((orig_clock_ctrl & CLOCK_CTRL_44MHZ_CORE) != 0) {
934                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
935                             clock_ctrl |
936                             (CLOCK_CTRL_44MHZ_CORE | CLOCK_CTRL_ALTCLK),
937                             40);
938                 tw32_wait_f(TG3PCI_CLOCK_CTRL,
939                             clock_ctrl | (CLOCK_CTRL_ALTCLK),
940                             40);
941         }
942         tw32_wait_f(TG3PCI_CLOCK_CTRL, clock_ctrl, 40);
943 }
944
945 #define PHY_BUSY_LOOPS  5000
946
947 static int tg3_readphy(struct tg3 *tp, int reg, u32 *val)
948 {
949         u32 frame_val;
950         unsigned int loops;
951         int ret;
952
953         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
954                 tw32_f(MAC_MI_MODE,
955                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
956                 udelay(80);
957         }
958
959         *val = 0x0;
960
961         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
962                       MI_COM_PHY_ADDR_MASK);
963         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
964                       MI_COM_REG_ADDR_MASK);
965         frame_val |= (MI_COM_CMD_READ | MI_COM_START);
966
967         tw32_f(MAC_MI_COM, frame_val);
968
969         loops = PHY_BUSY_LOOPS;
970         while (loops != 0) {
971                 udelay(10);
972                 frame_val = tr32(MAC_MI_COM);
973
974                 if ((frame_val & MI_COM_BUSY) == 0) {
975                         udelay(5);
976                         frame_val = tr32(MAC_MI_COM);
977                         break;
978                 }
979                 loops -= 1;
980         }
981
982         ret = -EBUSY;
983         if (loops != 0) {
984                 *val = frame_val & MI_COM_DATA_MASK;
985                 ret = 0;
986         }
987
988         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
989                 tw32_f(MAC_MI_MODE, tp->mi_mode);
990                 udelay(80);
991         }
992
993         return ret;
994 }
995
996 static int tg3_writephy(struct tg3 *tp, int reg, u32 val)
997 {
998         u32 frame_val;
999         unsigned int loops;
1000         int ret;
1001
1002         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
1003             (reg == MII_CTRL1000 || reg == MII_TG3_AUX_CTRL))
1004                 return 0;
1005
1006         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1007                 tw32_f(MAC_MI_MODE,
1008                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
1009                 udelay(80);
1010         }
1011
1012         frame_val  = ((tp->phy_addr << MI_COM_PHY_ADDR_SHIFT) &
1013                       MI_COM_PHY_ADDR_MASK);
1014         frame_val |= ((reg << MI_COM_REG_ADDR_SHIFT) &
1015                       MI_COM_REG_ADDR_MASK);
1016         frame_val |= (val & MI_COM_DATA_MASK);
1017         frame_val |= (MI_COM_CMD_WRITE | MI_COM_START);
1018
1019         tw32_f(MAC_MI_COM, frame_val);
1020
1021         loops = PHY_BUSY_LOOPS;
1022         while (loops != 0) {
1023                 udelay(10);
1024                 frame_val = tr32(MAC_MI_COM);
1025                 if ((frame_val & MI_COM_BUSY) == 0) {
1026                         udelay(5);
1027                         frame_val = tr32(MAC_MI_COM);
1028                         break;
1029                 }
1030                 loops -= 1;
1031         }
1032
1033         ret = -EBUSY;
1034         if (loops != 0)
1035                 ret = 0;
1036
1037         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
1038                 tw32_f(MAC_MI_MODE, tp->mi_mode);
1039                 udelay(80);
1040         }
1041
1042         return ret;
1043 }
1044
1045 static int tg3_phy_cl45_write(struct tg3 *tp, u32 devad, u32 addr, u32 val)
1046 {
1047         int err;
1048
1049         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1050         if (err)
1051                 goto done;
1052
1053         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1054         if (err)
1055                 goto done;
1056
1057         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1058                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1059         if (err)
1060                 goto done;
1061
1062         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, val);
1063
1064 done:
1065         return err;
1066 }
1067
1068 static int tg3_phy_cl45_read(struct tg3 *tp, u32 devad, u32 addr, u32 *val)
1069 {
1070         int err;
1071
1072         err = tg3_writephy(tp, MII_TG3_MMD_CTRL, devad);
1073         if (err)
1074                 goto done;
1075
1076         err = tg3_writephy(tp, MII_TG3_MMD_ADDRESS, addr);
1077         if (err)
1078                 goto done;
1079
1080         err = tg3_writephy(tp, MII_TG3_MMD_CTRL,
1081                            MII_TG3_MMD_CTRL_DATA_NOINC | devad);
1082         if (err)
1083                 goto done;
1084
1085         err = tg3_readphy(tp, MII_TG3_MMD_ADDRESS, val);
1086
1087 done:
1088         return err;
1089 }
1090
1091 static int tg3_phydsp_read(struct tg3 *tp, u32 reg, u32 *val)
1092 {
1093         int err;
1094
1095         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1096         if (!err)
1097                 err = tg3_readphy(tp, MII_TG3_DSP_RW_PORT, val);
1098
1099         return err;
1100 }
1101
1102 static int tg3_phydsp_write(struct tg3 *tp, u32 reg, u32 val)
1103 {
1104         int err;
1105
1106         err = tg3_writephy(tp, MII_TG3_DSP_ADDRESS, reg);
1107         if (!err)
1108                 err = tg3_writephy(tp, MII_TG3_DSP_RW_PORT, val);
1109
1110         return err;
1111 }
1112
1113 static int tg3_phy_auxctl_read(struct tg3 *tp, int reg, u32 *val)
1114 {
1115         int err;
1116
1117         err = tg3_writephy(tp, MII_TG3_AUX_CTRL,
1118                            (reg << MII_TG3_AUXCTL_MISC_RDSEL_SHIFT) |
1119                            MII_TG3_AUXCTL_SHDWSEL_MISC);
1120         if (!err)
1121                 err = tg3_readphy(tp, MII_TG3_AUX_CTRL, val);
1122
1123         return err;
1124 }
1125
1126 static int tg3_phy_auxctl_write(struct tg3 *tp, int reg, u32 set)
1127 {
1128         if (reg == MII_TG3_AUXCTL_SHDWSEL_MISC)
1129                 set |= MII_TG3_AUXCTL_MISC_WREN;
1130
1131         return tg3_writephy(tp, MII_TG3_AUX_CTRL, set | reg);
1132 }
1133
1134 #define TG3_PHY_AUXCTL_SMDSP_ENABLE(tp) \
1135         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1136                              MII_TG3_AUXCTL_ACTL_SMDSP_ENA | \
1137                              MII_TG3_AUXCTL_ACTL_TX_6DB)
1138
1139 #define TG3_PHY_AUXCTL_SMDSP_DISABLE(tp) \
1140         tg3_phy_auxctl_write((tp), MII_TG3_AUXCTL_SHDWSEL_AUXCTL, \
1141                              MII_TG3_AUXCTL_ACTL_TX_6DB);
1142
1143 static int tg3_bmcr_reset(struct tg3 *tp)
1144 {
1145         u32 phy_control;
1146         int limit, err;
1147
1148         /* OK, reset it, and poll the BMCR_RESET bit until it
1149          * clears or we time out.
1150          */
1151         phy_control = BMCR_RESET;
1152         err = tg3_writephy(tp, MII_BMCR, phy_control);
1153         if (err != 0)
1154                 return -EBUSY;
1155
1156         limit = 5000;
1157         while (limit--) {
1158                 err = tg3_readphy(tp, MII_BMCR, &phy_control);
1159                 if (err != 0)
1160                         return -EBUSY;
1161
1162                 if ((phy_control & BMCR_RESET) == 0) {
1163                         udelay(40);
1164                         break;
1165                 }
1166                 udelay(10);
1167         }
1168         if (limit < 0)
1169                 return -EBUSY;
1170
1171         return 0;
1172 }
1173
1174 static int tg3_mdio_read(struct mii_bus *bp, int mii_id, int reg)
1175 {
1176         struct tg3 *tp = bp->priv;
1177         u32 val;
1178
1179         spin_lock_bh(&tp->lock);
1180
1181         if (tg3_readphy(tp, reg, &val))
1182                 val = -EIO;
1183
1184         spin_unlock_bh(&tp->lock);
1185
1186         return val;
1187 }
1188
1189 static int tg3_mdio_write(struct mii_bus *bp, int mii_id, int reg, u16 val)
1190 {
1191         struct tg3 *tp = bp->priv;
1192         u32 ret = 0;
1193
1194         spin_lock_bh(&tp->lock);
1195
1196         if (tg3_writephy(tp, reg, val))
1197                 ret = -EIO;
1198
1199         spin_unlock_bh(&tp->lock);
1200
1201         return ret;
1202 }
1203
1204 static int tg3_mdio_reset(struct mii_bus *bp)
1205 {
1206         return 0;
1207 }
1208
1209 static void tg3_mdio_config_5785(struct tg3 *tp)
1210 {
1211         u32 val;
1212         struct phy_device *phydev;
1213
1214         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1215         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1216         case PHY_ID_BCM50610:
1217         case PHY_ID_BCM50610M:
1218                 val = MAC_PHYCFG2_50610_LED_MODES;
1219                 break;
1220         case PHY_ID_BCMAC131:
1221                 val = MAC_PHYCFG2_AC131_LED_MODES;
1222                 break;
1223         case PHY_ID_RTL8211C:
1224                 val = MAC_PHYCFG2_RTL8211C_LED_MODES;
1225                 break;
1226         case PHY_ID_RTL8201E:
1227                 val = MAC_PHYCFG2_RTL8201E_LED_MODES;
1228                 break;
1229         default:
1230                 return;
1231         }
1232
1233         if (phydev->interface != PHY_INTERFACE_MODE_RGMII) {
1234                 tw32(MAC_PHYCFG2, val);
1235
1236                 val = tr32(MAC_PHYCFG1);
1237                 val &= ~(MAC_PHYCFG1_RGMII_INT |
1238                          MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK);
1239                 val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT;
1240                 tw32(MAC_PHYCFG1, val);
1241
1242                 return;
1243         }
1244
1245         if (!tg3_flag(tp, RGMII_INBAND_DISABLE))
1246                 val |= MAC_PHYCFG2_EMODE_MASK_MASK |
1247                        MAC_PHYCFG2_FMODE_MASK_MASK |
1248                        MAC_PHYCFG2_GMODE_MASK_MASK |
1249                        MAC_PHYCFG2_ACT_MASK_MASK   |
1250                        MAC_PHYCFG2_QUAL_MASK_MASK |
1251                        MAC_PHYCFG2_INBAND_ENABLE;
1252
1253         tw32(MAC_PHYCFG2, val);
1254
1255         val = tr32(MAC_PHYCFG1);
1256         val &= ~(MAC_PHYCFG1_RXCLK_TO_MASK | MAC_PHYCFG1_TXCLK_TO_MASK |
1257                  MAC_PHYCFG1_RGMII_EXT_RX_DEC | MAC_PHYCFG1_RGMII_SND_STAT_EN);
1258         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1259                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1260                         val |= MAC_PHYCFG1_RGMII_EXT_RX_DEC;
1261                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1262                         val |= MAC_PHYCFG1_RGMII_SND_STAT_EN;
1263         }
1264         val |= MAC_PHYCFG1_RXCLK_TIMEOUT | MAC_PHYCFG1_TXCLK_TIMEOUT |
1265                MAC_PHYCFG1_RGMII_INT | MAC_PHYCFG1_TXC_DRV;
1266         tw32(MAC_PHYCFG1, val);
1267
1268         val = tr32(MAC_EXT_RGMII_MODE);
1269         val &= ~(MAC_RGMII_MODE_RX_INT_B |
1270                  MAC_RGMII_MODE_RX_QUALITY |
1271                  MAC_RGMII_MODE_RX_ACTIVITY |
1272                  MAC_RGMII_MODE_RX_ENG_DET |
1273                  MAC_RGMII_MODE_TX_ENABLE |
1274                  MAC_RGMII_MODE_TX_LOWPWR |
1275                  MAC_RGMII_MODE_TX_RESET);
1276         if (!tg3_flag(tp, RGMII_INBAND_DISABLE)) {
1277                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1278                         val |= MAC_RGMII_MODE_RX_INT_B |
1279                                MAC_RGMII_MODE_RX_QUALITY |
1280                                MAC_RGMII_MODE_RX_ACTIVITY |
1281                                MAC_RGMII_MODE_RX_ENG_DET;
1282                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1283                         val |= MAC_RGMII_MODE_TX_ENABLE |
1284                                MAC_RGMII_MODE_TX_LOWPWR |
1285                                MAC_RGMII_MODE_TX_RESET;
1286         }
1287         tw32(MAC_EXT_RGMII_MODE, val);
1288 }
1289
1290 static void tg3_mdio_start(struct tg3 *tp)
1291 {
1292         tp->mi_mode &= ~MAC_MI_MODE_AUTO_POLL;
1293         tw32_f(MAC_MI_MODE, tp->mi_mode);
1294         udelay(80);
1295
1296         if (tg3_flag(tp, MDIOBUS_INITED) &&
1297             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1298                 tg3_mdio_config_5785(tp);
1299 }
1300
1301 static int tg3_mdio_init(struct tg3 *tp)
1302 {
1303         int i;
1304         u32 reg;
1305         struct phy_device *phydev;
1306
1307         if (tg3_flag(tp, 5717_PLUS)) {
1308                 u32 is_serdes;
1309
1310                 tp->phy_addr = tp->pci_fn + 1;
1311
1312                 if (tp->pci_chip_rev_id != CHIPREV_ID_5717_A0)
1313                         is_serdes = tr32(SG_DIG_STATUS) & SG_DIG_IS_SERDES;
1314                 else
1315                         is_serdes = tr32(TG3_CPMU_PHY_STRAP) &
1316                                     TG3_CPMU_PHY_STRAP_IS_SERDES;
1317                 if (is_serdes)
1318                         tp->phy_addr += 7;
1319         } else
1320                 tp->phy_addr = TG3_PHY_MII_ADDR;
1321
1322         tg3_mdio_start(tp);
1323
1324         if (!tg3_flag(tp, USE_PHYLIB) || tg3_flag(tp, MDIOBUS_INITED))
1325                 return 0;
1326
1327         tp->mdio_bus = mdiobus_alloc();
1328         if (tp->mdio_bus == NULL)
1329                 return -ENOMEM;
1330
1331         tp->mdio_bus->name     = "tg3 mdio bus";
1332         snprintf(tp->mdio_bus->id, MII_BUS_ID_SIZE, "%x",
1333                  (tp->pdev->bus->number << 8) | tp->pdev->devfn);
1334         tp->mdio_bus->priv     = tp;
1335         tp->mdio_bus->parent   = &tp->pdev->dev;
1336         tp->mdio_bus->read     = &tg3_mdio_read;
1337         tp->mdio_bus->write    = &tg3_mdio_write;
1338         tp->mdio_bus->reset    = &tg3_mdio_reset;
1339         tp->mdio_bus->phy_mask = ~(1 << TG3_PHY_MII_ADDR);
1340         tp->mdio_bus->irq      = &tp->mdio_irq[0];
1341
1342         for (i = 0; i < PHY_MAX_ADDR; i++)
1343                 tp->mdio_bus->irq[i] = PHY_POLL;
1344
1345         /* The bus registration will look for all the PHYs on the mdio bus.
1346          * Unfortunately, it does not ensure the PHY is powered up before
1347          * accessing the PHY ID registers.  A chip reset is the
1348          * quickest way to bring the device back to an operational state..
1349          */
1350         if (tg3_readphy(tp, MII_BMCR, &reg) || (reg & BMCR_PDOWN))
1351                 tg3_bmcr_reset(tp);
1352
1353         i = mdiobus_register(tp->mdio_bus);
1354         if (i) {
1355                 dev_warn(&tp->pdev->dev, "mdiobus_reg failed (0x%x)\n", i);
1356                 mdiobus_free(tp->mdio_bus);
1357                 return i;
1358         }
1359
1360         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1361
1362         if (!phydev || !phydev->drv) {
1363                 dev_warn(&tp->pdev->dev, "No PHY devices\n");
1364                 mdiobus_unregister(tp->mdio_bus);
1365                 mdiobus_free(tp->mdio_bus);
1366                 return -ENODEV;
1367         }
1368
1369         switch (phydev->drv->phy_id & phydev->drv->phy_id_mask) {
1370         case PHY_ID_BCM57780:
1371                 phydev->interface = PHY_INTERFACE_MODE_GMII;
1372                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1373                 break;
1374         case PHY_ID_BCM50610:
1375         case PHY_ID_BCM50610M:
1376                 phydev->dev_flags |= PHY_BRCM_CLEAR_RGMII_MODE |
1377                                      PHY_BRCM_RX_REFCLK_UNUSED |
1378                                      PHY_BRCM_DIS_TXCRXC_NOENRGY |
1379                                      PHY_BRCM_AUTO_PWRDWN_ENABLE;
1380                 if (tg3_flag(tp, RGMII_INBAND_DISABLE))
1381                         phydev->dev_flags |= PHY_BRCM_STD_IBND_DISABLE;
1382                 if (tg3_flag(tp, RGMII_EXT_IBND_RX_EN))
1383                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_RX_ENABLE;
1384                 if (tg3_flag(tp, RGMII_EXT_IBND_TX_EN))
1385                         phydev->dev_flags |= PHY_BRCM_EXT_IBND_TX_ENABLE;
1386                 /* fallthru */
1387         case PHY_ID_RTL8211C:
1388                 phydev->interface = PHY_INTERFACE_MODE_RGMII;
1389                 break;
1390         case PHY_ID_RTL8201E:
1391         case PHY_ID_BCMAC131:
1392                 phydev->interface = PHY_INTERFACE_MODE_MII;
1393                 phydev->dev_flags |= PHY_BRCM_AUTO_PWRDWN_ENABLE;
1394                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
1395                 break;
1396         }
1397
1398         tg3_flag_set(tp, MDIOBUS_INITED);
1399
1400         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
1401                 tg3_mdio_config_5785(tp);
1402
1403         return 0;
1404 }
1405
1406 static void tg3_mdio_fini(struct tg3 *tp)
1407 {
1408         if (tg3_flag(tp, MDIOBUS_INITED)) {
1409                 tg3_flag_clear(tp, MDIOBUS_INITED);
1410                 mdiobus_unregister(tp->mdio_bus);
1411                 mdiobus_free(tp->mdio_bus);
1412         }
1413 }
1414
1415 /* tp->lock is held. */
1416 static inline void tg3_generate_fw_event(struct tg3 *tp)
1417 {
1418         u32 val;
1419
1420         val = tr32(GRC_RX_CPU_EVENT);
1421         val |= GRC_RX_CPU_DRIVER_EVENT;
1422         tw32_f(GRC_RX_CPU_EVENT, val);
1423
1424         tp->last_event_jiffies = jiffies;
1425 }
1426
1427 #define TG3_FW_EVENT_TIMEOUT_USEC 2500
1428
1429 /* tp->lock is held. */
1430 static void tg3_wait_for_event_ack(struct tg3 *tp)
1431 {
1432         int i;
1433         unsigned int delay_cnt;
1434         long time_remain;
1435
1436         /* If enough time has passed, no wait is necessary. */
1437         time_remain = (long)(tp->last_event_jiffies + 1 +
1438                       usecs_to_jiffies(TG3_FW_EVENT_TIMEOUT_USEC)) -
1439                       (long)jiffies;
1440         if (time_remain < 0)
1441                 return;
1442
1443         /* Check if we can shorten the wait time. */
1444         delay_cnt = jiffies_to_usecs(time_remain);
1445         if (delay_cnt > TG3_FW_EVENT_TIMEOUT_USEC)
1446                 delay_cnt = TG3_FW_EVENT_TIMEOUT_USEC;
1447         delay_cnt = (delay_cnt >> 3) + 1;
1448
1449         for (i = 0; i < delay_cnt; i++) {
1450                 if (!(tr32(GRC_RX_CPU_EVENT) & GRC_RX_CPU_DRIVER_EVENT))
1451                         break;
1452                 udelay(8);
1453         }
1454 }
1455
1456 /* tp->lock is held. */
1457 static void tg3_phy_gather_ump_data(struct tg3 *tp, u32 *data)
1458 {
1459         u32 reg, val;
1460
1461         val = 0;
1462         if (!tg3_readphy(tp, MII_BMCR, &reg))
1463                 val = reg << 16;
1464         if (!tg3_readphy(tp, MII_BMSR, &reg))
1465                 val |= (reg & 0xffff);
1466         *data++ = val;
1467
1468         val = 0;
1469         if (!tg3_readphy(tp, MII_ADVERTISE, &reg))
1470                 val = reg << 16;
1471         if (!tg3_readphy(tp, MII_LPA, &reg))
1472                 val |= (reg & 0xffff);
1473         *data++ = val;
1474
1475         val = 0;
1476         if (!(tp->phy_flags & TG3_PHYFLG_MII_SERDES)) {
1477                 if (!tg3_readphy(tp, MII_CTRL1000, &reg))
1478                         val = reg << 16;
1479                 if (!tg3_readphy(tp, MII_STAT1000, &reg))
1480                         val |= (reg & 0xffff);
1481         }
1482         *data++ = val;
1483
1484         if (!tg3_readphy(tp, MII_PHYADDR, &reg))
1485                 val = reg << 16;
1486         else
1487                 val = 0;
1488         *data++ = val;
1489 }
1490
1491 /* tp->lock is held. */
1492 static void tg3_ump_link_report(struct tg3 *tp)
1493 {
1494         u32 data[4];
1495
1496         if (!tg3_flag(tp, 5780_CLASS) || !tg3_flag(tp, ENABLE_ASF))
1497                 return;
1498
1499         tg3_phy_gather_ump_data(tp, data);
1500
1501         tg3_wait_for_event_ack(tp);
1502
1503         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_LINK_UPDATE);
1504         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 14);
1505         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x0, data[0]);
1506         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x4, data[1]);
1507         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0x8, data[2]);
1508         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX + 0xc, data[3]);
1509
1510         tg3_generate_fw_event(tp);
1511 }
1512
1513 /* tp->lock is held. */
1514 static void tg3_stop_fw(struct tg3 *tp)
1515 {
1516         if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
1517                 /* Wait for RX cpu to ACK the previous event. */
1518                 tg3_wait_for_event_ack(tp);
1519
1520                 tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX, FWCMD_NICDRV_PAUSE_FW);
1521
1522                 tg3_generate_fw_event(tp);
1523
1524                 /* Wait for RX cpu to ACK this event. */
1525                 tg3_wait_for_event_ack(tp);
1526         }
1527 }
1528
1529 /* tp->lock is held. */
1530 static void tg3_write_sig_pre_reset(struct tg3 *tp, int kind)
1531 {
1532         tg3_write_mem(tp, NIC_SRAM_FIRMWARE_MBOX,
1533                       NIC_SRAM_FIRMWARE_MBOX_MAGIC1);
1534
1535         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1536                 switch (kind) {
1537                 case RESET_KIND_INIT:
1538                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1539                                       DRV_STATE_START);
1540                         break;
1541
1542                 case RESET_KIND_SHUTDOWN:
1543                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1544                                       DRV_STATE_UNLOAD);
1545                         break;
1546
1547                 case RESET_KIND_SUSPEND:
1548                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1549                                       DRV_STATE_SUSPEND);
1550                         break;
1551
1552                 default:
1553                         break;
1554                 }
1555         }
1556
1557         if (kind == RESET_KIND_INIT ||
1558             kind == RESET_KIND_SUSPEND)
1559                 tg3_ape_driver_state_change(tp, kind);
1560 }
1561
1562 /* tp->lock is held. */
1563 static void tg3_write_sig_post_reset(struct tg3 *tp, int kind)
1564 {
1565         if (tg3_flag(tp, ASF_NEW_HANDSHAKE)) {
1566                 switch (kind) {
1567                 case RESET_KIND_INIT:
1568                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1569                                       DRV_STATE_START_DONE);
1570                         break;
1571
1572                 case RESET_KIND_SHUTDOWN:
1573                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1574                                       DRV_STATE_UNLOAD_DONE);
1575                         break;
1576
1577                 default:
1578                         break;
1579                 }
1580         }
1581
1582         if (kind == RESET_KIND_SHUTDOWN)
1583                 tg3_ape_driver_state_change(tp, kind);
1584 }
1585
1586 /* tp->lock is held. */
1587 static void tg3_write_sig_legacy(struct tg3 *tp, int kind)
1588 {
1589         if (tg3_flag(tp, ENABLE_ASF)) {
1590                 switch (kind) {
1591                 case RESET_KIND_INIT:
1592                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1593                                       DRV_STATE_START);
1594                         break;
1595
1596                 case RESET_KIND_SHUTDOWN:
1597                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1598                                       DRV_STATE_UNLOAD);
1599                         break;
1600
1601                 case RESET_KIND_SUSPEND:
1602                         tg3_write_mem(tp, NIC_SRAM_FW_DRV_STATE_MBOX,
1603                                       DRV_STATE_SUSPEND);
1604                         break;
1605
1606                 default:
1607                         break;
1608                 }
1609         }
1610 }
1611
1612 static int tg3_poll_fw(struct tg3 *tp)
1613 {
1614         int i;
1615         u32 val;
1616
1617         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
1618                 /* Wait up to 20ms for init done. */
1619                 for (i = 0; i < 200; i++) {
1620                         if (tr32(VCPU_STATUS) & VCPU_STATUS_INIT_DONE)
1621                                 return 0;
1622                         udelay(100);
1623                 }
1624                 return -ENODEV;
1625         }
1626
1627         /* Wait for firmware initialization to complete. */
1628         for (i = 0; i < 100000; i++) {
1629                 tg3_read_mem(tp, NIC_SRAM_FIRMWARE_MBOX, &val);
1630                 if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
1631                         break;
1632                 udelay(10);
1633         }
1634
1635         /* Chip might not be fitted with firmware.  Some Sun onboard
1636          * parts are configured like that.  So don't signal the timeout
1637          * of the above loop as an error, but do report the lack of
1638          * running firmware once.
1639          */
1640         if (i >= 100000 && !tg3_flag(tp, NO_FWARE_REPORTED)) {
1641                 tg3_flag_set(tp, NO_FWARE_REPORTED);
1642
1643                 netdev_info(tp->dev, "No firmware running\n");
1644         }
1645
1646         if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
1647                 /* The 57765 A0 needs a little more
1648                  * time to do some important work.
1649                  */
1650                 mdelay(10);
1651         }
1652
1653         return 0;
1654 }
1655
1656 static void tg3_link_report(struct tg3 *tp)
1657 {
1658         if (!netif_carrier_ok(tp->dev)) {
1659                 netif_info(tp, link, tp->dev, "Link is down\n");
1660                 tg3_ump_link_report(tp);
1661         } else if (netif_msg_link(tp)) {
1662                 netdev_info(tp->dev, "Link is up at %d Mbps, %s duplex\n",
1663                             (tp->link_config.active_speed == SPEED_1000 ?
1664                              1000 :
1665                              (tp->link_config.active_speed == SPEED_100 ?
1666                               100 : 10)),
1667                             (tp->link_config.active_duplex == DUPLEX_FULL ?
1668                              "full" : "half"));
1669
1670                 netdev_info(tp->dev, "Flow control is %s for TX and %s for RX\n",
1671                             (tp->link_config.active_flowctrl & FLOW_CTRL_TX) ?
1672                             "on" : "off",
1673                             (tp->link_config.active_flowctrl & FLOW_CTRL_RX) ?
1674                             "on" : "off");
1675
1676                 if (tp->phy_flags & TG3_PHYFLG_EEE_CAP)
1677                         netdev_info(tp->dev, "EEE is %s\n",
1678                                     tp->setlpicnt ? "enabled" : "disabled");
1679
1680                 tg3_ump_link_report(tp);
1681         }
1682 }
1683
1684 static u16 tg3_advert_flowctrl_1000X(u8 flow_ctrl)
1685 {
1686         u16 miireg;
1687
1688         if ((flow_ctrl & FLOW_CTRL_TX) && (flow_ctrl & FLOW_CTRL_RX))
1689                 miireg = ADVERTISE_1000XPAUSE;
1690         else if (flow_ctrl & FLOW_CTRL_TX)
1691                 miireg = ADVERTISE_1000XPSE_ASYM;
1692         else if (flow_ctrl & FLOW_CTRL_RX)
1693                 miireg = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1694         else
1695                 miireg = 0;
1696
1697         return miireg;
1698 }
1699
1700 static u8 tg3_resolve_flowctrl_1000X(u16 lcladv, u16 rmtadv)
1701 {
1702         u8 cap = 0;
1703
1704         if (lcladv & rmtadv & ADVERTISE_1000XPAUSE) {
1705                 cap = FLOW_CTRL_TX | FLOW_CTRL_RX;
1706         } else if (lcladv & rmtadv & ADVERTISE_1000XPSE_ASYM) {
1707                 if (lcladv & ADVERTISE_1000XPAUSE)
1708                         cap = FLOW_CTRL_RX;
1709                 if (rmtadv & ADVERTISE_1000XPAUSE)
1710                         cap = FLOW_CTRL_TX;
1711         }
1712
1713         return cap;
1714 }
1715
1716 static void tg3_setup_flow_control(struct tg3 *tp, u32 lcladv, u32 rmtadv)
1717 {
1718         u8 autoneg;
1719         u8 flowctrl = 0;
1720         u32 old_rx_mode = tp->rx_mode;
1721         u32 old_tx_mode = tp->tx_mode;
1722
1723         if (tg3_flag(tp, USE_PHYLIB))
1724                 autoneg = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]->autoneg;
1725         else
1726                 autoneg = tp->link_config.autoneg;
1727
1728         if (autoneg == AUTONEG_ENABLE && tg3_flag(tp, PAUSE_AUTONEG)) {
1729                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
1730                         flowctrl = tg3_resolve_flowctrl_1000X(lcladv, rmtadv);
1731                 else
1732                         flowctrl = mii_resolve_flowctrl_fdx(lcladv, rmtadv);
1733         } else
1734                 flowctrl = tp->link_config.flowctrl;
1735
1736         tp->link_config.active_flowctrl = flowctrl;
1737
1738         if (flowctrl & FLOW_CTRL_RX)
1739                 tp->rx_mode |= RX_MODE_FLOW_CTRL_ENABLE;
1740         else
1741                 tp->rx_mode &= ~RX_MODE_FLOW_CTRL_ENABLE;
1742
1743         if (old_rx_mode != tp->rx_mode)
1744                 tw32_f(MAC_RX_MODE, tp->rx_mode);
1745
1746         if (flowctrl & FLOW_CTRL_TX)
1747                 tp->tx_mode |= TX_MODE_FLOW_CTRL_ENABLE;
1748         else
1749                 tp->tx_mode &= ~TX_MODE_FLOW_CTRL_ENABLE;
1750
1751         if (old_tx_mode != tp->tx_mode)
1752                 tw32_f(MAC_TX_MODE, tp->tx_mode);
1753 }
1754
1755 static void tg3_adjust_link(struct net_device *dev)
1756 {
1757         u8 oldflowctrl, linkmesg = 0;
1758         u32 mac_mode, lcl_adv, rmt_adv;
1759         struct tg3 *tp = netdev_priv(dev);
1760         struct phy_device *phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1761
1762         spin_lock_bh(&tp->lock);
1763
1764         mac_mode = tp->mac_mode & ~(MAC_MODE_PORT_MODE_MASK |
1765                                     MAC_MODE_HALF_DUPLEX);
1766
1767         oldflowctrl = tp->link_config.active_flowctrl;
1768
1769         if (phydev->link) {
1770                 lcl_adv = 0;
1771                 rmt_adv = 0;
1772
1773                 if (phydev->speed == SPEED_100 || phydev->speed == SPEED_10)
1774                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1775                 else if (phydev->speed == SPEED_1000 ||
1776                          GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785)
1777                         mac_mode |= MAC_MODE_PORT_MODE_GMII;
1778                 else
1779                         mac_mode |= MAC_MODE_PORT_MODE_MII;
1780
1781                 if (phydev->duplex == DUPLEX_HALF)
1782                         mac_mode |= MAC_MODE_HALF_DUPLEX;
1783                 else {
1784                         lcl_adv = mii_advertise_flowctrl(
1785                                   tp->link_config.flowctrl);
1786
1787                         if (phydev->pause)
1788                                 rmt_adv = LPA_PAUSE_CAP;
1789                         if (phydev->asym_pause)
1790                                 rmt_adv |= LPA_PAUSE_ASYM;
1791                 }
1792
1793                 tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
1794         } else
1795                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
1796
1797         if (mac_mode != tp->mac_mode) {
1798                 tp->mac_mode = mac_mode;
1799                 tw32_f(MAC_MODE, tp->mac_mode);
1800                 udelay(40);
1801         }
1802
1803         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
1804                 if (phydev->speed == SPEED_10)
1805                         tw32(MAC_MI_STAT,
1806                              MAC_MI_STAT_10MBPS_MODE |
1807                              MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1808                 else
1809                         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
1810         }
1811
1812         if (phydev->speed == SPEED_1000 && phydev->duplex == DUPLEX_HALF)
1813                 tw32(MAC_TX_LENGTHS,
1814                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1815                       (6 << TX_LENGTHS_IPG_SHIFT) |
1816                       (0xff << TX_LENGTHS_SLOT_TIME_SHIFT)));
1817         else
1818                 tw32(MAC_TX_LENGTHS,
1819                      ((2 << TX_LENGTHS_IPG_CRS_SHIFT) |
1820                       (6 << TX_LENGTHS_IPG_SHIFT) |
1821                       (32 << TX_LENGTHS_SLOT_TIME_SHIFT)));
1822
1823         if (phydev->link != tp->old_link ||
1824             phydev->speed != tp->link_config.active_speed ||
1825             phydev->duplex != tp->link_config.active_duplex ||
1826             oldflowctrl != tp->link_config.active_flowctrl)
1827                 linkmesg = 1;
1828
1829         tp->old_link = phydev->link;
1830         tp->link_config.active_speed = phydev->speed;
1831         tp->link_config.active_duplex = phydev->duplex;
1832
1833         spin_unlock_bh(&tp->lock);
1834
1835         if (linkmesg)
1836                 tg3_link_report(tp);
1837 }
1838
1839 static int tg3_phy_init(struct tg3 *tp)
1840 {
1841         struct phy_device *phydev;
1842
1843         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED)
1844                 return 0;
1845
1846         /* Bring the PHY back to a known state. */
1847         tg3_bmcr_reset(tp);
1848
1849         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1850
1851         /* Attach the MAC to the PHY. */
1852         phydev = phy_connect(tp->dev, dev_name(&phydev->dev), tg3_adjust_link,
1853                              phydev->dev_flags, phydev->interface);
1854         if (IS_ERR(phydev)) {
1855                 dev_err(&tp->pdev->dev, "Could not attach to PHY\n");
1856                 return PTR_ERR(phydev);
1857         }
1858
1859         /* Mask with MAC supported features. */
1860         switch (phydev->interface) {
1861         case PHY_INTERFACE_MODE_GMII:
1862         case PHY_INTERFACE_MODE_RGMII:
1863                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
1864                         phydev->supported &= (PHY_GBIT_FEATURES |
1865                                               SUPPORTED_Pause |
1866                                               SUPPORTED_Asym_Pause);
1867                         break;
1868                 }
1869                 /* fallthru */
1870         case PHY_INTERFACE_MODE_MII:
1871                 phydev->supported &= (PHY_BASIC_FEATURES |
1872                                       SUPPORTED_Pause |
1873                                       SUPPORTED_Asym_Pause);
1874                 break;
1875         default:
1876                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1877                 return -EINVAL;
1878         }
1879
1880         tp->phy_flags |= TG3_PHYFLG_IS_CONNECTED;
1881
1882         phydev->advertising = phydev->supported;
1883
1884         return 0;
1885 }
1886
1887 static void tg3_phy_start(struct tg3 *tp)
1888 {
1889         struct phy_device *phydev;
1890
1891         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1892                 return;
1893
1894         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
1895
1896         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
1897                 tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
1898                 phydev->speed = tp->link_config.speed;
1899                 phydev->duplex = tp->link_config.duplex;
1900                 phydev->autoneg = tp->link_config.autoneg;
1901                 phydev->advertising = tp->link_config.advertising;
1902         }
1903
1904         phy_start(phydev);
1905
1906         phy_start_aneg(phydev);
1907 }
1908
1909 static void tg3_phy_stop(struct tg3 *tp)
1910 {
1911         if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
1912                 return;
1913
1914         phy_stop(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1915 }
1916
1917 static void tg3_phy_fini(struct tg3 *tp)
1918 {
1919         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
1920                 phy_disconnect(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
1921                 tp->phy_flags &= ~TG3_PHYFLG_IS_CONNECTED;
1922         }
1923 }
1924
1925 static int tg3_phy_set_extloopbk(struct tg3 *tp)
1926 {
1927         int err;
1928         u32 val;
1929
1930         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
1931                 return 0;
1932
1933         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
1934                 /* Cannot do read-modify-write on 5401 */
1935                 err = tg3_phy_auxctl_write(tp,
1936                                            MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
1937                                            MII_TG3_AUXCTL_ACTL_EXTLOOPBK |
1938                                            0x4c20);
1939                 goto done;
1940         }
1941
1942         err = tg3_phy_auxctl_read(tp,
1943                                   MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
1944         if (err)
1945                 return err;
1946
1947         val |= MII_TG3_AUXCTL_ACTL_EXTLOOPBK;
1948         err = tg3_phy_auxctl_write(tp,
1949                                    MII_TG3_AUXCTL_SHDWSEL_AUXCTL, val);
1950
1951 done:
1952         return err;
1953 }
1954
1955 static void tg3_phy_fet_toggle_apd(struct tg3 *tp, bool enable)
1956 {
1957         u32 phytest;
1958
1959         if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
1960                 u32 phy;
1961
1962                 tg3_writephy(tp, MII_TG3_FET_TEST,
1963                              phytest | MII_TG3_FET_SHADOW_EN);
1964                 if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXSTAT2, &phy)) {
1965                         if (enable)
1966                                 phy |= MII_TG3_FET_SHDW_AUXSTAT2_APD;
1967                         else
1968                                 phy &= ~MII_TG3_FET_SHDW_AUXSTAT2_APD;
1969                         tg3_writephy(tp, MII_TG3_FET_SHDW_AUXSTAT2, phy);
1970                 }
1971                 tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
1972         }
1973 }
1974
1975 static void tg3_phy_toggle_apd(struct tg3 *tp, bool enable)
1976 {
1977         u32 reg;
1978
1979         if (!tg3_flag(tp, 5705_PLUS) ||
1980             (tg3_flag(tp, 5717_PLUS) &&
1981              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
1982                 return;
1983
1984         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
1985                 tg3_phy_fet_toggle_apd(tp, enable);
1986                 return;
1987         }
1988
1989         reg = MII_TG3_MISC_SHDW_WREN |
1990               MII_TG3_MISC_SHDW_SCR5_SEL |
1991               MII_TG3_MISC_SHDW_SCR5_LPED |
1992               MII_TG3_MISC_SHDW_SCR5_DLPTLM |
1993               MII_TG3_MISC_SHDW_SCR5_SDTL |
1994               MII_TG3_MISC_SHDW_SCR5_C125OE;
1995         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 || !enable)
1996                 reg |= MII_TG3_MISC_SHDW_SCR5_DLLAPD;
1997
1998         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
1999
2000
2001         reg = MII_TG3_MISC_SHDW_WREN |
2002               MII_TG3_MISC_SHDW_APD_SEL |
2003               MII_TG3_MISC_SHDW_APD_WKTM_84MS;
2004         if (enable)
2005                 reg |= MII_TG3_MISC_SHDW_APD_ENABLE;
2006
2007         tg3_writephy(tp, MII_TG3_MISC_SHDW, reg);
2008 }
2009
2010 static void tg3_phy_toggle_automdix(struct tg3 *tp, int enable)
2011 {
2012         u32 phy;
2013
2014         if (!tg3_flag(tp, 5705_PLUS) ||
2015             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
2016                 return;
2017
2018         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2019                 u32 ephy;
2020
2021                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &ephy)) {
2022                         u32 reg = MII_TG3_FET_SHDW_MISCCTRL;
2023
2024                         tg3_writephy(tp, MII_TG3_FET_TEST,
2025                                      ephy | MII_TG3_FET_SHADOW_EN);
2026                         if (!tg3_readphy(tp, reg, &phy)) {
2027                                 if (enable)
2028                                         phy |= MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2029                                 else
2030                                         phy &= ~MII_TG3_FET_SHDW_MISCCTRL_MDIX;
2031                                 tg3_writephy(tp, reg, phy);
2032                         }
2033                         tg3_writephy(tp, MII_TG3_FET_TEST, ephy);
2034                 }
2035         } else {
2036                 int ret;
2037
2038                 ret = tg3_phy_auxctl_read(tp,
2039                                           MII_TG3_AUXCTL_SHDWSEL_MISC, &phy);
2040                 if (!ret) {
2041                         if (enable)
2042                                 phy |= MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2043                         else
2044                                 phy &= ~MII_TG3_AUXCTL_MISC_FORCE_AMDIX;
2045                         tg3_phy_auxctl_write(tp,
2046                                              MII_TG3_AUXCTL_SHDWSEL_MISC, phy);
2047                 }
2048         }
2049 }
2050
2051 static void tg3_phy_set_wirespeed(struct tg3 *tp)
2052 {
2053         int ret;
2054         u32 val;
2055
2056         if (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED)
2057                 return;
2058
2059         ret = tg3_phy_auxctl_read(tp, MII_TG3_AUXCTL_SHDWSEL_MISC, &val);
2060         if (!ret)
2061                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_MISC,
2062                                      val | MII_TG3_AUXCTL_MISC_WIRESPD_EN);
2063 }
2064
2065 static void tg3_phy_apply_otp(struct tg3 *tp)
2066 {
2067         u32 otp, phy;
2068
2069         if (!tp->phy_otp)
2070                 return;
2071
2072         otp = tp->phy_otp;
2073
2074         if (TG3_PHY_AUXCTL_SMDSP_ENABLE(tp))
2075                 return;
2076
2077         phy = ((otp & TG3_OTP_AGCTGT_MASK) >> TG3_OTP_AGCTGT_SHIFT);
2078         phy |= MII_TG3_DSP_TAP1_AGCTGT_DFLT;
2079         tg3_phydsp_write(tp, MII_TG3_DSP_TAP1, phy);
2080
2081         phy = ((otp & TG3_OTP_HPFFLTR_MASK) >> TG3_OTP_HPFFLTR_SHIFT) |
2082               ((otp & TG3_OTP_HPFOVER_MASK) >> TG3_OTP_HPFOVER_SHIFT);
2083         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH0, phy);
2084
2085         phy = ((otp & TG3_OTP_LPFDIS_MASK) >> TG3_OTP_LPFDIS_SHIFT);
2086         phy |= MII_TG3_DSP_AADJ1CH3_ADCCKADJ;
2087         tg3_phydsp_write(tp, MII_TG3_DSP_AADJ1CH3, phy);
2088
2089         phy = ((otp & TG3_OTP_VDAC_MASK) >> TG3_OTP_VDAC_SHIFT);
2090         tg3_phydsp_write(tp, MII_TG3_DSP_EXP75, phy);
2091
2092         phy = ((otp & TG3_OTP_10BTAMP_MASK) >> TG3_OTP_10BTAMP_SHIFT);
2093         tg3_phydsp_write(tp, MII_TG3_DSP_EXP96, phy);
2094
2095         phy = ((otp & TG3_OTP_ROFF_MASK) >> TG3_OTP_ROFF_SHIFT) |
2096               ((otp & TG3_OTP_RCOFF_MASK) >> TG3_OTP_RCOFF_SHIFT);
2097         tg3_phydsp_write(tp, MII_TG3_DSP_EXP97, phy);
2098
2099         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2100 }
2101
2102 static void tg3_phy_eee_adjust(struct tg3 *tp, u32 current_link_up)
2103 {
2104         u32 val;
2105
2106         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
2107                 return;
2108
2109         tp->setlpicnt = 0;
2110
2111         if (tp->link_config.autoneg == AUTONEG_ENABLE &&
2112             current_link_up == 1 &&
2113             tp->link_config.active_duplex == DUPLEX_FULL &&
2114             (tp->link_config.active_speed == SPEED_100 ||
2115              tp->link_config.active_speed == SPEED_1000)) {
2116                 u32 eeectl;
2117
2118                 if (tp->link_config.active_speed == SPEED_1000)
2119                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_16_5_US;
2120                 else
2121                         eeectl = TG3_CPMU_EEE_CTRL_EXIT_36_US;
2122
2123                 tw32(TG3_CPMU_EEE_CTRL, eeectl);
2124
2125                 tg3_phy_cl45_read(tp, MDIO_MMD_AN,
2126                                   TG3_CL45_D7_EEERES_STAT, &val);
2127
2128                 if (val == TG3_CL45_D7_EEERES_STAT_LP_1000T ||
2129                     val == TG3_CL45_D7_EEERES_STAT_LP_100TX)
2130                         tp->setlpicnt = 2;
2131         }
2132
2133         if (!tp->setlpicnt) {
2134                 if (current_link_up == 1 &&
2135                    !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2136                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, 0x0000);
2137                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2138                 }
2139
2140                 val = tr32(TG3_CPMU_EEE_MODE);
2141                 tw32(TG3_CPMU_EEE_MODE, val & ~TG3_CPMU_EEEMD_LPI_ENABLE);
2142         }
2143 }
2144
2145 static void tg3_phy_eee_enable(struct tg3 *tp)
2146 {
2147         u32 val;
2148
2149         if (tp->link_config.active_speed == SPEED_1000 &&
2150             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2151              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2152              tg3_flag(tp, 57765_CLASS)) &&
2153             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2154                 val = MII_TG3_DSP_TAP26_ALNOKO |
2155                       MII_TG3_DSP_TAP26_RMRXSTO;
2156                 tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
2157                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2158         }
2159
2160         val = tr32(TG3_CPMU_EEE_MODE);
2161         tw32(TG3_CPMU_EEE_MODE, val | TG3_CPMU_EEEMD_LPI_ENABLE);
2162 }
2163
2164 static int tg3_wait_macro_done(struct tg3 *tp)
2165 {
2166         int limit = 100;
2167
2168         while (limit--) {
2169                 u32 tmp32;
2170
2171                 if (!tg3_readphy(tp, MII_TG3_DSP_CONTROL, &tmp32)) {
2172                         if ((tmp32 & 0x1000) == 0)
2173                                 break;
2174                 }
2175         }
2176         if (limit < 0)
2177                 return -EBUSY;
2178
2179         return 0;
2180 }
2181
2182 static int tg3_phy_write_and_check_testpat(struct tg3 *tp, int *resetp)
2183 {
2184         static const u32 test_pat[4][6] = {
2185         { 0x00005555, 0x00000005, 0x00002aaa, 0x0000000a, 0x00003456, 0x00000003 },
2186         { 0x00002aaa, 0x0000000a, 0x00003333, 0x00000003, 0x0000789a, 0x00000005 },
2187         { 0x00005a5a, 0x00000005, 0x00002a6a, 0x0000000a, 0x00001bcd, 0x00000003 },
2188         { 0x00002a5a, 0x0000000a, 0x000033c3, 0x00000003, 0x00002ef1, 0x00000005 }
2189         };
2190         int chan;
2191
2192         for (chan = 0; chan < 4; chan++) {
2193                 int i;
2194
2195                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2196                              (chan * 0x2000) | 0x0200);
2197                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2198
2199                 for (i = 0; i < 6; i++)
2200                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT,
2201                                      test_pat[chan][i]);
2202
2203                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2204                 if (tg3_wait_macro_done(tp)) {
2205                         *resetp = 1;
2206                         return -EBUSY;
2207                 }
2208
2209                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2210                              (chan * 0x2000) | 0x0200);
2211                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0082);
2212                 if (tg3_wait_macro_done(tp)) {
2213                         *resetp = 1;
2214                         return -EBUSY;
2215                 }
2216
2217                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0802);
2218                 if (tg3_wait_macro_done(tp)) {
2219                         *resetp = 1;
2220                         return -EBUSY;
2221                 }
2222
2223                 for (i = 0; i < 6; i += 2) {
2224                         u32 low, high;
2225
2226                         if (tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &low) ||
2227                             tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &high) ||
2228                             tg3_wait_macro_done(tp)) {
2229                                 *resetp = 1;
2230                                 return -EBUSY;
2231                         }
2232                         low &= 0x7fff;
2233                         high &= 0x000f;
2234                         if (low != test_pat[chan][i] ||
2235                             high != test_pat[chan][i+1]) {
2236                                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000b);
2237                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4001);
2238                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x4005);
2239
2240                                 return -EBUSY;
2241                         }
2242                 }
2243         }
2244
2245         return 0;
2246 }
2247
2248 static int tg3_phy_reset_chanpat(struct tg3 *tp)
2249 {
2250         int chan;
2251
2252         for (chan = 0; chan < 4; chan++) {
2253                 int i;
2254
2255                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
2256                              (chan * 0x2000) | 0x0200);
2257                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0002);
2258                 for (i = 0; i < 6; i++)
2259                         tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x000);
2260                 tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0202);
2261                 if (tg3_wait_macro_done(tp))
2262                         return -EBUSY;
2263         }
2264
2265         return 0;
2266 }
2267
2268 static int tg3_phy_reset_5703_4_5(struct tg3 *tp)
2269 {
2270         u32 reg32, phy9_orig;
2271         int retries, do_phy_reset, err;
2272
2273         retries = 10;
2274         do_phy_reset = 1;
2275         do {
2276                 if (do_phy_reset) {
2277                         err = tg3_bmcr_reset(tp);
2278                         if (err)
2279                                 return err;
2280                         do_phy_reset = 0;
2281                 }
2282
2283                 /* Disable transmitter and interrupt.  */
2284                 if (tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32))
2285                         continue;
2286
2287                 reg32 |= 0x3000;
2288                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2289
2290                 /* Set full-duplex, 1000 mbps.  */
2291                 tg3_writephy(tp, MII_BMCR,
2292                              BMCR_FULLDPLX | BMCR_SPEED1000);
2293
2294                 /* Set to master mode.  */
2295                 if (tg3_readphy(tp, MII_CTRL1000, &phy9_orig))
2296                         continue;
2297
2298                 tg3_writephy(tp, MII_CTRL1000,
2299                              CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
2300
2301                 err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
2302                 if (err)
2303                         return err;
2304
2305                 /* Block the PHY control access.  */
2306                 tg3_phydsp_write(tp, 0x8005, 0x0800);
2307
2308                 err = tg3_phy_write_and_check_testpat(tp, &do_phy_reset);
2309                 if (!err)
2310                         break;
2311         } while (--retries);
2312
2313         err = tg3_phy_reset_chanpat(tp);
2314         if (err)
2315                 return err;
2316
2317         tg3_phydsp_write(tp, 0x8005, 0x0000);
2318
2319         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x8200);
2320         tg3_writephy(tp, MII_TG3_DSP_CONTROL, 0x0000);
2321
2322         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2323
2324         tg3_writephy(tp, MII_CTRL1000, phy9_orig);
2325
2326         if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &reg32)) {
2327                 reg32 &= ~0x3000;
2328                 tg3_writephy(tp, MII_TG3_EXT_CTRL, reg32);
2329         } else if (!err)
2330                 err = -EBUSY;
2331
2332         return err;
2333 }
2334
2335 /* This will reset the tigon3 PHY if there is no valid
2336  * link unless the FORCE argument is non-zero.
2337  */
2338 static int tg3_phy_reset(struct tg3 *tp)
2339 {
2340         u32 val, cpmuctrl;
2341         int err;
2342
2343         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2344                 val = tr32(GRC_MISC_CFG);
2345                 tw32_f(GRC_MISC_CFG, val & ~GRC_MISC_CFG_EPHY_IDDQ);
2346                 udelay(40);
2347         }
2348         err  = tg3_readphy(tp, MII_BMSR, &val);
2349         err |= tg3_readphy(tp, MII_BMSR, &val);
2350         if (err != 0)
2351                 return -EBUSY;
2352
2353         if (netif_running(tp->dev) && netif_carrier_ok(tp->dev)) {
2354                 netif_carrier_off(tp->dev);
2355                 tg3_link_report(tp);
2356         }
2357
2358         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
2359             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2360             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
2361                 err = tg3_phy_reset_5703_4_5(tp);
2362                 if (err)
2363                         return err;
2364                 goto out;
2365         }
2366
2367         cpmuctrl = 0;
2368         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
2369             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
2370                 cpmuctrl = tr32(TG3_CPMU_CTRL);
2371                 if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY)
2372                         tw32(TG3_CPMU_CTRL,
2373                              cpmuctrl & ~CPMU_CTRL_GPHY_10MB_RXONLY);
2374         }
2375
2376         err = tg3_bmcr_reset(tp);
2377         if (err)
2378                 return err;
2379
2380         if (cpmuctrl & CPMU_CTRL_GPHY_10MB_RXONLY) {
2381                 val = MII_TG3_DSP_EXP8_AEDW | MII_TG3_DSP_EXP8_REJ2MHz;
2382                 tg3_phydsp_write(tp, MII_TG3_DSP_EXP8, val);
2383
2384                 tw32(TG3_CPMU_CTRL, cpmuctrl);
2385         }
2386
2387         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2388             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2389                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2390                 if ((val & CPMU_LSPD_1000MB_MACCLK_MASK) ==
2391                     CPMU_LSPD_1000MB_MACCLK_12_5) {
2392                         val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2393                         udelay(40);
2394                         tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2395                 }
2396         }
2397
2398         if (tg3_flag(tp, 5717_PLUS) &&
2399             (tp->phy_flags & TG3_PHYFLG_MII_SERDES))
2400                 return 0;
2401
2402         tg3_phy_apply_otp(tp);
2403
2404         if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
2405                 tg3_phy_toggle_apd(tp, true);
2406         else
2407                 tg3_phy_toggle_apd(tp, false);
2408
2409 out:
2410         if ((tp->phy_flags & TG3_PHYFLG_ADC_BUG) &&
2411             !TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2412                 tg3_phydsp_write(tp, 0x201f, 0x2aaa);
2413                 tg3_phydsp_write(tp, 0x000a, 0x0323);
2414                 TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2415         }
2416
2417         if (tp->phy_flags & TG3_PHYFLG_5704_A0_BUG) {
2418                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2419                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
2420         }
2421
2422         if (tp->phy_flags & TG3_PHYFLG_BER_BUG) {
2423                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2424                         tg3_phydsp_write(tp, 0x000a, 0x310b);
2425                         tg3_phydsp_write(tp, 0x201f, 0x9506);
2426                         tg3_phydsp_write(tp, 0x401f, 0x14e2);
2427                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2428                 }
2429         } else if (tp->phy_flags & TG3_PHYFLG_JITTER_BUG) {
2430                 if (!TG3_PHY_AUXCTL_SMDSP_ENABLE(tp)) {
2431                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS, 0x000a);
2432                         if (tp->phy_flags & TG3_PHYFLG_ADJUST_TRIM) {
2433                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x110b);
2434                                 tg3_writephy(tp, MII_TG3_TEST1,
2435                                              MII_TG3_TEST1_TRIM_EN | 0x4);
2436                         } else
2437                                 tg3_writephy(tp, MII_TG3_DSP_RW_PORT, 0x010b);
2438
2439                         TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
2440                 }
2441         }
2442
2443         /* Set Extended packet length bit (bit 14) on all chips that */
2444         /* support jumbo frames */
2445         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
2446                 /* Cannot do read-modify-write on 5401 */
2447                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
2448         } else if (tg3_flag(tp, JUMBO_CAPABLE)) {
2449                 /* Set bit 14 with read-modify-write to preserve other bits */
2450                 err = tg3_phy_auxctl_read(tp,
2451                                           MII_TG3_AUXCTL_SHDWSEL_AUXCTL, &val);
2452                 if (!err)
2453                         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL,
2454                                            val | MII_TG3_AUXCTL_ACTL_EXTPKTLEN);
2455         }
2456
2457         /* Set phy register 0x10 bit 0 to high fifo elasticity to support
2458          * jumbo frames transmission.
2459          */
2460         if (tg3_flag(tp, JUMBO_CAPABLE)) {
2461                 if (!tg3_readphy(tp, MII_TG3_EXT_CTRL, &val))
2462                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
2463                                      val | MII_TG3_EXT_CTRL_FIFO_ELASTIC);
2464         }
2465
2466         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2467                 /* adjust output voltage */
2468                 tg3_writephy(tp, MII_TG3_FET_PTEST, 0x12);
2469         }
2470
2471         tg3_phy_toggle_automdix(tp, 1);
2472         tg3_phy_set_wirespeed(tp);
2473         return 0;
2474 }
2475
2476 #define TG3_GPIO_MSG_DRVR_PRES           0x00000001
2477 #define TG3_GPIO_MSG_NEED_VAUX           0x00000002
2478 #define TG3_GPIO_MSG_MASK                (TG3_GPIO_MSG_DRVR_PRES | \
2479                                           TG3_GPIO_MSG_NEED_VAUX)
2480 #define TG3_GPIO_MSG_ALL_DRVR_PRES_MASK \
2481         ((TG3_GPIO_MSG_DRVR_PRES << 0) | \
2482          (TG3_GPIO_MSG_DRVR_PRES << 4) | \
2483          (TG3_GPIO_MSG_DRVR_PRES << 8) | \
2484          (TG3_GPIO_MSG_DRVR_PRES << 12))
2485
2486 #define TG3_GPIO_MSG_ALL_NEED_VAUX_MASK \
2487         ((TG3_GPIO_MSG_NEED_VAUX << 0) | \
2488          (TG3_GPIO_MSG_NEED_VAUX << 4) | \
2489          (TG3_GPIO_MSG_NEED_VAUX << 8) | \
2490          (TG3_GPIO_MSG_NEED_VAUX << 12))
2491
2492 static inline u32 tg3_set_function_status(struct tg3 *tp, u32 newstat)
2493 {
2494         u32 status, shift;
2495
2496         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2497             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2498                 status = tg3_ape_read32(tp, TG3_APE_GPIO_MSG);
2499         else
2500                 status = tr32(TG3_CPMU_DRV_STATUS);
2501
2502         shift = TG3_APE_GPIO_MSG_SHIFT + 4 * tp->pci_fn;
2503         status &= ~(TG3_GPIO_MSG_MASK << shift);
2504         status |= (newstat << shift);
2505
2506         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2507             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
2508                 tg3_ape_write32(tp, TG3_APE_GPIO_MSG, status);
2509         else
2510                 tw32(TG3_CPMU_DRV_STATUS, status);
2511
2512         return status >> TG3_APE_GPIO_MSG_SHIFT;
2513 }
2514
2515 static inline int tg3_pwrsrc_switch_to_vmain(struct tg3 *tp)
2516 {
2517         if (!tg3_flag(tp, IS_NIC))
2518                 return 0;
2519
2520         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2521             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2522             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2523                 if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2524                         return -EIO;
2525
2526                 tg3_set_function_status(tp, TG3_GPIO_MSG_DRVR_PRES);
2527
2528                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2529                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2530
2531                 tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2532         } else {
2533                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl,
2534                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2535         }
2536
2537         return 0;
2538 }
2539
2540 static void tg3_pwrsrc_die_with_vmain(struct tg3 *tp)
2541 {
2542         u32 grc_local_ctrl;
2543
2544         if (!tg3_flag(tp, IS_NIC) ||
2545             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2546             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)
2547                 return;
2548
2549         grc_local_ctrl = tp->grc_local_ctrl | GRC_LCLCTRL_GPIO_OE1;
2550
2551         tw32_wait_f(GRC_LOCAL_CTRL,
2552                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2553                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2554
2555         tw32_wait_f(GRC_LOCAL_CTRL,
2556                     grc_local_ctrl,
2557                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2558
2559         tw32_wait_f(GRC_LOCAL_CTRL,
2560                     grc_local_ctrl | GRC_LCLCTRL_GPIO_OUTPUT1,
2561                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2562 }
2563
2564 static void tg3_pwrsrc_switch_to_vaux(struct tg3 *tp)
2565 {
2566         if (!tg3_flag(tp, IS_NIC))
2567                 return;
2568
2569         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2570             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
2571                 tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2572                             (GRC_LCLCTRL_GPIO_OE0 |
2573                              GRC_LCLCTRL_GPIO_OE1 |
2574                              GRC_LCLCTRL_GPIO_OE2 |
2575                              GRC_LCLCTRL_GPIO_OUTPUT0 |
2576                              GRC_LCLCTRL_GPIO_OUTPUT1),
2577                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2578         } else if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
2579                    tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
2580                 /* The 5761 non-e device swaps GPIO 0 and GPIO 2. */
2581                 u32 grc_local_ctrl = GRC_LCLCTRL_GPIO_OE0 |
2582                                      GRC_LCLCTRL_GPIO_OE1 |
2583                                      GRC_LCLCTRL_GPIO_OE2 |
2584                                      GRC_LCLCTRL_GPIO_OUTPUT0 |
2585                                      GRC_LCLCTRL_GPIO_OUTPUT1 |
2586                                      tp->grc_local_ctrl;
2587                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2588                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2589
2590                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT2;
2591                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2592                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2593
2594                 grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT0;
2595                 tw32_wait_f(GRC_LOCAL_CTRL, grc_local_ctrl,
2596                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2597         } else {
2598                 u32 no_gpio2;
2599                 u32 grc_local_ctrl = 0;
2600
2601                 /* Workaround to prevent overdrawing Amps. */
2602                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
2603                         grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
2604                         tw32_wait_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl |
2605                                     grc_local_ctrl,
2606                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2607                 }
2608
2609                 /* On 5753 and variants, GPIO2 cannot be used. */
2610                 no_gpio2 = tp->nic_sram_data_cfg &
2611                            NIC_SRAM_DATA_CFG_NO_GPIO2;
2612
2613                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
2614                                   GRC_LCLCTRL_GPIO_OE1 |
2615                                   GRC_LCLCTRL_GPIO_OE2 |
2616                                   GRC_LCLCTRL_GPIO_OUTPUT1 |
2617                                   GRC_LCLCTRL_GPIO_OUTPUT2;
2618                 if (no_gpio2) {
2619                         grc_local_ctrl &= ~(GRC_LCLCTRL_GPIO_OE2 |
2620                                             GRC_LCLCTRL_GPIO_OUTPUT2);
2621                 }
2622                 tw32_wait_f(GRC_LOCAL_CTRL,
2623                             tp->grc_local_ctrl | grc_local_ctrl,
2624                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2625
2626                 grc_local_ctrl |= GRC_LCLCTRL_GPIO_OUTPUT0;
2627
2628                 tw32_wait_f(GRC_LOCAL_CTRL,
2629                             tp->grc_local_ctrl | grc_local_ctrl,
2630                             TG3_GRC_LCLCTL_PWRSW_DELAY);
2631
2632                 if (!no_gpio2) {
2633                         grc_local_ctrl &= ~GRC_LCLCTRL_GPIO_OUTPUT2;
2634                         tw32_wait_f(GRC_LOCAL_CTRL,
2635                                     tp->grc_local_ctrl | grc_local_ctrl,
2636                                     TG3_GRC_LCLCTL_PWRSW_DELAY);
2637                 }
2638         }
2639 }
2640
2641 static void tg3_frob_aux_power_5717(struct tg3 *tp, bool wol_enable)
2642 {
2643         u32 msg = 0;
2644
2645         /* Serialize power state transitions */
2646         if (tg3_ape_lock(tp, TG3_APE_LOCK_GPIO))
2647                 return;
2648
2649         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE) || wol_enable)
2650                 msg = TG3_GPIO_MSG_NEED_VAUX;
2651
2652         msg = tg3_set_function_status(tp, msg);
2653
2654         if (msg & TG3_GPIO_MSG_ALL_DRVR_PRES_MASK)
2655                 goto done;
2656
2657         if (msg & TG3_GPIO_MSG_ALL_NEED_VAUX_MASK)
2658                 tg3_pwrsrc_switch_to_vaux(tp);
2659         else
2660                 tg3_pwrsrc_die_with_vmain(tp);
2661
2662 done:
2663         tg3_ape_unlock(tp, TG3_APE_LOCK_GPIO);
2664 }
2665
2666 static void tg3_frob_aux_power(struct tg3 *tp, bool include_wol)
2667 {
2668         bool need_vaux = false;
2669
2670         /* The GPIOs do something completely different on 57765. */
2671         if (!tg3_flag(tp, IS_NIC) || tg3_flag(tp, 57765_CLASS))
2672                 return;
2673
2674         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
2675             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
2676             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
2677                 tg3_frob_aux_power_5717(tp, include_wol ?
2678                                         tg3_flag(tp, WOL_ENABLE) != 0 : 0);
2679                 return;
2680         }
2681
2682         if (tp->pdev_peer && tp->pdev_peer != tp->pdev) {
2683                 struct net_device *dev_peer;
2684
2685                 dev_peer = pci_get_drvdata(tp->pdev_peer);
2686
2687                 /* remove_one() may have been run on the peer. */
2688                 if (dev_peer) {
2689                         struct tg3 *tp_peer = netdev_priv(dev_peer);
2690
2691                         if (tg3_flag(tp_peer, INIT_COMPLETE))
2692                                 return;
2693
2694                         if ((include_wol && tg3_flag(tp_peer, WOL_ENABLE)) ||
2695                             tg3_flag(tp_peer, ENABLE_ASF))
2696                                 need_vaux = true;
2697                 }
2698         }
2699
2700         if ((include_wol && tg3_flag(tp, WOL_ENABLE)) ||
2701             tg3_flag(tp, ENABLE_ASF))
2702                 need_vaux = true;
2703
2704         if (need_vaux)
2705                 tg3_pwrsrc_switch_to_vaux(tp);
2706         else
2707                 tg3_pwrsrc_die_with_vmain(tp);
2708 }
2709
2710 static int tg3_5700_link_polarity(struct tg3 *tp, u32 speed)
2711 {
2712         if (tp->led_ctrl == LED_CTRL_MODE_PHY_2)
2713                 return 1;
2714         else if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411) {
2715                 if (speed != SPEED_10)
2716                         return 1;
2717         } else if (speed == SPEED_10)
2718                 return 1;
2719
2720         return 0;
2721 }
2722
2723 static void tg3_power_down_phy(struct tg3 *tp, bool do_low_power)
2724 {
2725         u32 val;
2726
2727         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
2728                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
2729                         u32 sg_dig_ctrl = tr32(SG_DIG_CTRL);
2730                         u32 serdes_cfg = tr32(MAC_SERDES_CFG);
2731
2732                         sg_dig_ctrl |=
2733                                 SG_DIG_USING_HW_AUTONEG | SG_DIG_SOFT_RESET;
2734                         tw32(SG_DIG_CTRL, sg_dig_ctrl);
2735                         tw32(MAC_SERDES_CFG, serdes_cfg | (1 << 15));
2736                 }
2737                 return;
2738         }
2739
2740         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
2741                 tg3_bmcr_reset(tp);
2742                 val = tr32(GRC_MISC_CFG);
2743                 tw32_f(GRC_MISC_CFG, val | GRC_MISC_CFG_EPHY_IDDQ);
2744                 udelay(40);
2745                 return;
2746         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
2747                 u32 phytest;
2748                 if (!tg3_readphy(tp, MII_TG3_FET_TEST, &phytest)) {
2749                         u32 phy;
2750
2751                         tg3_writephy(tp, MII_ADVERTISE, 0);
2752                         tg3_writephy(tp, MII_BMCR,
2753                                      BMCR_ANENABLE | BMCR_ANRESTART);
2754
2755                         tg3_writephy(tp, MII_TG3_FET_TEST,
2756                                      phytest | MII_TG3_FET_SHADOW_EN);
2757                         if (!tg3_readphy(tp, MII_TG3_FET_SHDW_AUXMODE4, &phy)) {
2758                                 phy |= MII_TG3_FET_SHDW_AUXMODE4_SBPD;
2759                                 tg3_writephy(tp,
2760                                              MII_TG3_FET_SHDW_AUXMODE4,
2761                                              phy);
2762                         }
2763                         tg3_writephy(tp, MII_TG3_FET_TEST, phytest);
2764                 }
2765                 return;
2766         } else if (do_low_power) {
2767                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
2768                              MII_TG3_EXT_CTRL_FORCE_LED_OFF);
2769
2770                 val = MII_TG3_AUXCTL_PCTL_100TX_LPWR |
2771                       MII_TG3_AUXCTL_PCTL_SPR_ISOLATE |
2772                       MII_TG3_AUXCTL_PCTL_VREG_11V;
2773                 tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, val);
2774         }
2775
2776         /* The PHY should not be powered down on some chips because
2777          * of bugs.
2778          */
2779         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
2780             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
2781             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 &&
2782              (tp->phy_flags & TG3_PHYFLG_MII_SERDES)))
2783                 return;
2784
2785         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX ||
2786             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5761_AX) {
2787                 val = tr32(TG3_CPMU_LSPD_1000MB_CLK);
2788                 val &= ~CPMU_LSPD_1000MB_MACCLK_MASK;
2789                 val |= CPMU_LSPD_1000MB_MACCLK_12_5;
2790                 tw32_f(TG3_CPMU_LSPD_1000MB_CLK, val);
2791         }
2792
2793         tg3_writephy(tp, MII_BMCR, BMCR_PDOWN);
2794 }
2795
2796 /* tp->lock is held. */
2797 static int tg3_nvram_lock(struct tg3 *tp)
2798 {
2799         if (tg3_flag(tp, NVRAM)) {
2800                 int i;
2801
2802                 if (tp->nvram_lock_cnt == 0) {
2803                         tw32(NVRAM_SWARB, SWARB_REQ_SET1);
2804                         for (i = 0; i < 8000; i++) {
2805                                 if (tr32(NVRAM_SWARB) & SWARB_GNT1)
2806                                         break;
2807                                 udelay(20);
2808                         }
2809                         if (i == 8000) {
2810                                 tw32(NVRAM_SWARB, SWARB_REQ_CLR1);
2811                                 return -ENODEV;
2812                         }
2813                 }
2814                 tp->nvram_lock_cnt++;
2815         }
2816         return 0;
2817 }
2818
2819 /* tp->lock is held. */
2820 static void tg3_nvram_unlock(struct tg3 *tp)
2821 {
2822         if (tg3_flag(tp, NVRAM)) {
2823                 if (tp->nvram_lock_cnt > 0)
2824                         tp->nvram_lock_cnt--;
2825                 if (tp->nvram_lock_cnt == 0)
2826                         tw32_f(NVRAM_SWARB, SWARB_REQ_CLR1);
2827         }
2828 }
2829
2830 /* tp->lock is held. */
2831 static void tg3_enable_nvram_access(struct tg3 *tp)
2832 {
2833         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2834                 u32 nvaccess = tr32(NVRAM_ACCESS);
2835
2836                 tw32(NVRAM_ACCESS, nvaccess | ACCESS_ENABLE);
2837         }
2838 }
2839
2840 /* tp->lock is held. */
2841 static void tg3_disable_nvram_access(struct tg3 *tp)
2842 {
2843         if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM)) {
2844                 u32 nvaccess = tr32(NVRAM_ACCESS);
2845
2846                 tw32(NVRAM_ACCESS, nvaccess & ~ACCESS_ENABLE);
2847         }
2848 }
2849
2850 static int tg3_nvram_read_using_eeprom(struct tg3 *tp,
2851                                         u32 offset, u32 *val)
2852 {
2853         u32 tmp;
2854         int i;
2855
2856         if (offset > EEPROM_ADDR_ADDR_MASK || (offset % 4) != 0)
2857                 return -EINVAL;
2858
2859         tmp = tr32(GRC_EEPROM_ADDR) & ~(EEPROM_ADDR_ADDR_MASK |
2860                                         EEPROM_ADDR_DEVID_MASK |
2861                                         EEPROM_ADDR_READ);
2862         tw32(GRC_EEPROM_ADDR,
2863              tmp |
2864              (0 << EEPROM_ADDR_DEVID_SHIFT) |
2865              ((offset << EEPROM_ADDR_ADDR_SHIFT) &
2866               EEPROM_ADDR_ADDR_MASK) |
2867              EEPROM_ADDR_READ | EEPROM_ADDR_START);
2868
2869         for (i = 0; i < 1000; i++) {
2870                 tmp = tr32(GRC_EEPROM_ADDR);
2871
2872                 if (tmp & EEPROM_ADDR_COMPLETE)
2873                         break;
2874                 msleep(1);
2875         }
2876         if (!(tmp & EEPROM_ADDR_COMPLETE))
2877                 return -EBUSY;
2878
2879         tmp = tr32(GRC_EEPROM_DATA);
2880
2881         /*
2882          * The data will always be opposite the native endian
2883          * format.  Perform a blind byteswap to compensate.
2884          */
2885         *val = swab32(tmp);
2886
2887         return 0;
2888 }
2889
2890 #define NVRAM_CMD_TIMEOUT 10000
2891
2892 static int tg3_nvram_exec_cmd(struct tg3 *tp, u32 nvram_cmd)
2893 {
2894         int i;
2895
2896         tw32(NVRAM_CMD, nvram_cmd);
2897         for (i = 0; i < NVRAM_CMD_TIMEOUT; i++) {
2898                 udelay(10);
2899                 if (tr32(NVRAM_CMD) & NVRAM_CMD_DONE) {
2900                         udelay(10);
2901                         break;
2902                 }
2903         }
2904
2905         if (i == NVRAM_CMD_TIMEOUT)
2906                 return -EBUSY;
2907
2908         return 0;
2909 }
2910
2911 static u32 tg3_nvram_phys_addr(struct tg3 *tp, u32 addr)
2912 {
2913         if (tg3_flag(tp, NVRAM) &&
2914             tg3_flag(tp, NVRAM_BUFFERED) &&
2915             tg3_flag(tp, FLASH) &&
2916             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2917             (tp->nvram_jedecnum == JEDEC_ATMEL))
2918
2919                 addr = ((addr / tp->nvram_pagesize) <<
2920                         ATMEL_AT45DB0X1B_PAGE_POS) +
2921                        (addr % tp->nvram_pagesize);
2922
2923         return addr;
2924 }
2925
2926 static u32 tg3_nvram_logical_addr(struct tg3 *tp, u32 addr)
2927 {
2928         if (tg3_flag(tp, NVRAM) &&
2929             tg3_flag(tp, NVRAM_BUFFERED) &&
2930             tg3_flag(tp, FLASH) &&
2931             !tg3_flag(tp, NO_NVRAM_ADDR_TRANS) &&
2932             (tp->nvram_jedecnum == JEDEC_ATMEL))
2933
2934                 addr = ((addr >> ATMEL_AT45DB0X1B_PAGE_POS) *
2935                         tp->nvram_pagesize) +
2936                        (addr & ((1 << ATMEL_AT45DB0X1B_PAGE_POS) - 1));
2937
2938         return addr;
2939 }
2940
2941 /* NOTE: Data read in from NVRAM is byteswapped according to
2942  * the byteswapping settings for all other register accesses.
2943  * tg3 devices are BE devices, so on a BE machine, the data
2944  * returned will be exactly as it is seen in NVRAM.  On a LE
2945  * machine, the 32-bit value will be byteswapped.
2946  */
2947 static int tg3_nvram_read(struct tg3 *tp, u32 offset, u32 *val)
2948 {
2949         int ret;
2950
2951         if (!tg3_flag(tp, NVRAM))
2952                 return tg3_nvram_read_using_eeprom(tp, offset, val);
2953
2954         offset = tg3_nvram_phys_addr(tp, offset);
2955
2956         if (offset > NVRAM_ADDR_MSK)
2957                 return -EINVAL;
2958
2959         ret = tg3_nvram_lock(tp);
2960         if (ret)
2961                 return ret;
2962
2963         tg3_enable_nvram_access(tp);
2964
2965         tw32(NVRAM_ADDR, offset);
2966         ret = tg3_nvram_exec_cmd(tp, NVRAM_CMD_RD | NVRAM_CMD_GO |
2967                 NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_DONE);
2968
2969         if (ret == 0)
2970                 *val = tr32(NVRAM_RDDATA);
2971
2972         tg3_disable_nvram_access(tp);
2973
2974         tg3_nvram_unlock(tp);
2975
2976         return ret;
2977 }
2978
2979 /* Ensures NVRAM data is in bytestream format. */
2980 static int tg3_nvram_read_be32(struct tg3 *tp, u32 offset, __be32 *val)
2981 {
2982         u32 v;
2983         int res = tg3_nvram_read(tp, offset, &v);
2984         if (!res)
2985                 *val = cpu_to_be32(v);
2986         return res;
2987 }
2988
2989 static int tg3_nvram_write_block_using_eeprom(struct tg3 *tp,
2990                                     u32 offset, u32 len, u8 *buf)
2991 {
2992         int i, j, rc = 0;
2993         u32 val;
2994
2995         for (i = 0; i < len; i += 4) {
2996                 u32 addr;
2997                 __be32 data;
2998
2999                 addr = offset + i;
3000
3001                 memcpy(&data, buf + i, 4);
3002
3003                 /*
3004                  * The SEEPROM interface expects the data to always be opposite
3005                  * the native endian format.  We accomplish this by reversing
3006                  * all the operations that would have been performed on the
3007                  * data from a call to tg3_nvram_read_be32().
3008                  */
3009                 tw32(GRC_EEPROM_DATA, swab32(be32_to_cpu(data)));
3010
3011                 val = tr32(GRC_EEPROM_ADDR);
3012                 tw32(GRC_EEPROM_ADDR, val | EEPROM_ADDR_COMPLETE);
3013
3014                 val &= ~(EEPROM_ADDR_ADDR_MASK | EEPROM_ADDR_DEVID_MASK |
3015                         EEPROM_ADDR_READ);
3016                 tw32(GRC_EEPROM_ADDR, val |
3017                         (0 << EEPROM_ADDR_DEVID_SHIFT) |
3018                         (addr & EEPROM_ADDR_ADDR_MASK) |
3019                         EEPROM_ADDR_START |
3020                         EEPROM_ADDR_WRITE);
3021
3022                 for (j = 0; j < 1000; j++) {
3023                         val = tr32(GRC_EEPROM_ADDR);
3024
3025                         if (val & EEPROM_ADDR_COMPLETE)
3026                                 break;
3027                         msleep(1);
3028                 }
3029                 if (!(val & EEPROM_ADDR_COMPLETE)) {
3030                         rc = -EBUSY;
3031                         break;
3032                 }
3033         }
3034
3035         return rc;
3036 }
3037
3038 /* offset and length are dword aligned */
3039 static int tg3_nvram_write_block_unbuffered(struct tg3 *tp, u32 offset, u32 len,
3040                 u8 *buf)
3041 {
3042         int ret = 0;
3043         u32 pagesize = tp->nvram_pagesize;
3044         u32 pagemask = pagesize - 1;
3045         u32 nvram_cmd;
3046         u8 *tmp;
3047
3048         tmp = kmalloc(pagesize, GFP_KERNEL);
3049         if (tmp == NULL)
3050                 return -ENOMEM;
3051
3052         while (len) {
3053                 int j;
3054                 u32 phy_addr, page_off, size;
3055
3056                 phy_addr = offset & ~pagemask;
3057
3058                 for (j = 0; j < pagesize; j += 4) {
3059                         ret = tg3_nvram_read_be32(tp, phy_addr + j,
3060                                                   (__be32 *) (tmp + j));
3061                         if (ret)
3062                                 break;
3063                 }
3064                 if (ret)
3065                         break;
3066
3067                 page_off = offset & pagemask;
3068                 size = pagesize;
3069                 if (len < size)
3070                         size = len;
3071
3072                 len -= size;
3073
3074                 memcpy(tmp + page_off, buf, size);
3075
3076                 offset = offset + (pagesize - page_off);
3077
3078                 tg3_enable_nvram_access(tp);
3079
3080                 /*
3081                  * Before we can erase the flash page, we need
3082                  * to issue a special "write enable" command.
3083                  */
3084                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3085
3086                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3087                         break;
3088
3089                 /* Erase the target page */
3090                 tw32(NVRAM_ADDR, phy_addr);
3091
3092                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR |
3093                         NVRAM_CMD_FIRST | NVRAM_CMD_LAST | NVRAM_CMD_ERASE;
3094
3095                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3096                         break;
3097
3098                 /* Issue another write enable to start the write. */
3099                 nvram_cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3100
3101                 if (tg3_nvram_exec_cmd(tp, nvram_cmd))
3102                         break;
3103
3104                 for (j = 0; j < pagesize; j += 4) {
3105                         __be32 data;
3106
3107                         data = *((__be32 *) (tmp + j));
3108
3109                         tw32(NVRAM_WRDATA, be32_to_cpu(data));
3110
3111                         tw32(NVRAM_ADDR, phy_addr + j);
3112
3113                         nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE |
3114                                 NVRAM_CMD_WR;
3115
3116                         if (j == 0)
3117                                 nvram_cmd |= NVRAM_CMD_FIRST;
3118                         else if (j == (pagesize - 4))
3119                                 nvram_cmd |= NVRAM_CMD_LAST;
3120
3121                         ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3122                         if (ret)
3123                                 break;
3124                 }
3125                 if (ret)
3126                         break;
3127         }
3128
3129         nvram_cmd = NVRAM_CMD_WRDI | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3130         tg3_nvram_exec_cmd(tp, nvram_cmd);
3131
3132         kfree(tmp);
3133
3134         return ret;
3135 }
3136
3137 /* offset and length are dword aligned */
3138 static int tg3_nvram_write_block_buffered(struct tg3 *tp, u32 offset, u32 len,
3139                 u8 *buf)
3140 {
3141         int i, ret = 0;
3142
3143         for (i = 0; i < len; i += 4, offset += 4) {
3144                 u32 page_off, phy_addr, nvram_cmd;
3145                 __be32 data;
3146
3147                 memcpy(&data, buf + i, 4);
3148                 tw32(NVRAM_WRDATA, be32_to_cpu(data));
3149
3150                 page_off = offset % tp->nvram_pagesize;
3151
3152                 phy_addr = tg3_nvram_phys_addr(tp, offset);
3153
3154                 nvram_cmd = NVRAM_CMD_GO | NVRAM_CMD_DONE | NVRAM_CMD_WR;
3155
3156                 if (page_off == 0 || i == 0)
3157                         nvram_cmd |= NVRAM_CMD_FIRST;
3158                 if (page_off == (tp->nvram_pagesize - 4))
3159                         nvram_cmd |= NVRAM_CMD_LAST;
3160
3161                 if (i == (len - 4))
3162                         nvram_cmd |= NVRAM_CMD_LAST;
3163
3164                 if ((nvram_cmd & NVRAM_CMD_FIRST) ||
3165                     !tg3_flag(tp, FLASH) ||
3166                     !tg3_flag(tp, 57765_PLUS))
3167                         tw32(NVRAM_ADDR, phy_addr);
3168
3169                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5752 &&
3170                     !tg3_flag(tp, 5755_PLUS) &&
3171                     (tp->nvram_jedecnum == JEDEC_ST) &&
3172                     (nvram_cmd & NVRAM_CMD_FIRST)) {
3173                         u32 cmd;
3174
3175                         cmd = NVRAM_CMD_WREN | NVRAM_CMD_GO | NVRAM_CMD_DONE;
3176                         ret = tg3_nvram_exec_cmd(tp, cmd);
3177                         if (ret)
3178                                 break;
3179                 }
3180                 if (!tg3_flag(tp, FLASH)) {
3181                         /* We always do complete word writes to eeprom. */
3182                         nvram_cmd |= (NVRAM_CMD_FIRST | NVRAM_CMD_LAST);
3183                 }
3184
3185                 ret = tg3_nvram_exec_cmd(tp, nvram_cmd);
3186                 if (ret)
3187                         break;
3188         }
3189         return ret;
3190 }
3191
3192 /* offset and length are dword aligned */
3193 static int tg3_nvram_write_block(struct tg3 *tp, u32 offset, u32 len, u8 *buf)
3194 {
3195         int ret;
3196
3197         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3198                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl &
3199                        ~GRC_LCLCTRL_GPIO_OUTPUT1);
3200                 udelay(40);
3201         }
3202
3203         if (!tg3_flag(tp, NVRAM)) {
3204                 ret = tg3_nvram_write_block_using_eeprom(tp, offset, len, buf);
3205         } else {
3206                 u32 grc_mode;
3207
3208                 ret = tg3_nvram_lock(tp);
3209                 if (ret)
3210                         return ret;
3211
3212                 tg3_enable_nvram_access(tp);
3213                 if (tg3_flag(tp, 5750_PLUS) && !tg3_flag(tp, PROTECTED_NVRAM))
3214                         tw32(NVRAM_WRITE1, 0x406);
3215
3216                 grc_mode = tr32(GRC_MODE);
3217                 tw32(GRC_MODE, grc_mode | GRC_MODE_NVRAM_WR_ENABLE);
3218
3219                 if (tg3_flag(tp, NVRAM_BUFFERED) || !tg3_flag(tp, FLASH)) {
3220                         ret = tg3_nvram_write_block_buffered(tp, offset, len,
3221                                 buf);
3222                 } else {
3223                         ret = tg3_nvram_write_block_unbuffered(tp, offset, len,
3224                                 buf);
3225                 }
3226
3227                 grc_mode = tr32(GRC_MODE);
3228                 tw32(GRC_MODE, grc_mode & ~GRC_MODE_NVRAM_WR_ENABLE);
3229
3230                 tg3_disable_nvram_access(tp);
3231                 tg3_nvram_unlock(tp);
3232         }
3233
3234         if (tg3_flag(tp, EEPROM_WRITE_PROT)) {
3235                 tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
3236                 udelay(40);
3237         }
3238
3239         return ret;
3240 }
3241
3242 #define RX_CPU_SCRATCH_BASE     0x30000
3243 #define RX_CPU_SCRATCH_SIZE     0x04000
3244 #define TX_CPU_SCRATCH_BASE     0x34000
3245 #define TX_CPU_SCRATCH_SIZE     0x04000
3246
3247 /* tp->lock is held. */
3248 static int tg3_halt_cpu(struct tg3 *tp, u32 offset)
3249 {
3250         int i;
3251
3252         BUG_ON(offset == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS));
3253
3254         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3255                 u32 val = tr32(GRC_VCPU_EXT_CTRL);
3256
3257                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_HALT_CPU);
3258                 return 0;
3259         }
3260         if (offset == RX_CPU_BASE) {
3261                 for (i = 0; i < 10000; i++) {
3262                         tw32(offset + CPU_STATE, 0xffffffff);
3263                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3264                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3265                                 break;
3266                 }
3267
3268                 tw32(offset + CPU_STATE, 0xffffffff);
3269                 tw32_f(offset + CPU_MODE,  CPU_MODE_HALT);
3270                 udelay(10);
3271         } else {
3272                 for (i = 0; i < 10000; i++) {
3273                         tw32(offset + CPU_STATE, 0xffffffff);
3274                         tw32(offset + CPU_MODE,  CPU_MODE_HALT);
3275                         if (tr32(offset + CPU_MODE) & CPU_MODE_HALT)
3276                                 break;
3277                 }
3278         }
3279
3280         if (i >= 10000) {
3281                 netdev_err(tp->dev, "%s timed out, %s CPU\n",
3282                            __func__, offset == RX_CPU_BASE ? "RX" : "TX");
3283                 return -ENODEV;
3284         }
3285
3286         /* Clear firmware's nvram arbitration. */
3287         if (tg3_flag(tp, NVRAM))
3288                 tw32(NVRAM_SWARB, SWARB_REQ_CLR0);
3289         return 0;
3290 }
3291
3292 struct fw_info {
3293         unsigned int fw_base;
3294         unsigned int fw_len;
3295         const __be32 *fw_data;
3296 };
3297
3298 /* tp->lock is held. */
3299 static int tg3_load_firmware_cpu(struct tg3 *tp, u32 cpu_base,
3300                                  u32 cpu_scratch_base, int cpu_scratch_size,
3301                                  struct fw_info *info)
3302 {
3303         int err, lock_err, i;
3304         void (*write_op)(struct tg3 *, u32, u32);
3305
3306         if (cpu_base == TX_CPU_BASE && tg3_flag(tp, 5705_PLUS)) {
3307                 netdev_err(tp->dev,
3308                            "%s: Trying to load TX cpu firmware which is 5705\n",
3309                            __func__);
3310                 return -EINVAL;
3311         }
3312
3313         if (tg3_flag(tp, 5705_PLUS))
3314                 write_op = tg3_write_mem;
3315         else
3316                 write_op = tg3_write_indirect_reg32;
3317
3318         /* It is possible that bootcode is still loading at this point.
3319          * Get the nvram lock first before halting the cpu.
3320          */
3321         lock_err = tg3_nvram_lock(tp);
3322         err = tg3_halt_cpu(tp, cpu_base);
3323         if (!lock_err)
3324                 tg3_nvram_unlock(tp);
3325         if (err)
3326                 goto out;
3327
3328         for (i = 0; i < cpu_scratch_size; i += sizeof(u32))
3329                 write_op(tp, cpu_scratch_base + i, 0);
3330         tw32(cpu_base + CPU_STATE, 0xffffffff);
3331         tw32(cpu_base + CPU_MODE, tr32(cpu_base+CPU_MODE)|CPU_MODE_HALT);
3332         for (i = 0; i < (info->fw_len / sizeof(u32)); i++)
3333                 write_op(tp, (cpu_scratch_base +
3334                               (info->fw_base & 0xffff) +
3335                               (i * sizeof(u32))),
3336                               be32_to_cpu(info->fw_data[i]));
3337
3338         err = 0;
3339
3340 out:
3341         return err;
3342 }
3343
3344 /* tp->lock is held. */
3345 static int tg3_load_5701_a0_firmware_fix(struct tg3 *tp)
3346 {
3347         struct fw_info info;
3348         const __be32 *fw_data;
3349         int err, i;
3350
3351         fw_data = (void *)tp->fw->data;
3352
3353         /* Firmware blob starts with version numbers, followed by
3354            start address and length. We are setting complete length.
3355            length = end_address_of_bss - start_address_of_text.
3356            Remainder is the blob to be loaded contiguously
3357            from start address. */
3358
3359         info.fw_base = be32_to_cpu(fw_data[1]);
3360         info.fw_len = tp->fw->size - 12;
3361         info.fw_data = &fw_data[3];
3362
3363         err = tg3_load_firmware_cpu(tp, RX_CPU_BASE,
3364                                     RX_CPU_SCRATCH_BASE, RX_CPU_SCRATCH_SIZE,
3365                                     &info);
3366         if (err)
3367                 return err;
3368
3369         err = tg3_load_firmware_cpu(tp, TX_CPU_BASE,
3370                                     TX_CPU_SCRATCH_BASE, TX_CPU_SCRATCH_SIZE,
3371                                     &info);
3372         if (err)
3373                 return err;
3374
3375         /* Now startup only the RX cpu. */
3376         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3377         tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3378
3379         for (i = 0; i < 5; i++) {
3380                 if (tr32(RX_CPU_BASE + CPU_PC) == info.fw_base)
3381                         break;
3382                 tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3383                 tw32(RX_CPU_BASE + CPU_MODE,  CPU_MODE_HALT);
3384                 tw32_f(RX_CPU_BASE + CPU_PC, info.fw_base);
3385                 udelay(1000);
3386         }
3387         if (i >= 5) {
3388                 netdev_err(tp->dev, "%s fails to set RX CPU PC, is %08x "
3389                            "should be %08x\n", __func__,
3390                            tr32(RX_CPU_BASE + CPU_PC), info.fw_base);
3391                 return -ENODEV;
3392         }
3393         tw32(RX_CPU_BASE + CPU_STATE, 0xffffffff);
3394         tw32_f(RX_CPU_BASE + CPU_MODE,  0x00000000);
3395
3396         return 0;
3397 }
3398
3399 /* tp->lock is held. */
3400 static int tg3_load_tso_firmware(struct tg3 *tp)
3401 {
3402         struct fw_info info;
3403         const __be32 *fw_data;
3404         unsigned long cpu_base, cpu_scratch_base, cpu_scratch_size;
3405         int err, i;
3406
3407         if (tg3_flag(tp, HW_TSO_1) ||
3408             tg3_flag(tp, HW_TSO_2) ||
3409             tg3_flag(tp, HW_TSO_3))
3410                 return 0;
3411
3412         fw_data = (void *)tp->fw->data;
3413
3414         /* Firmware blob starts with version numbers, followed by
3415            start address and length. We are setting complete length.
3416            length = end_address_of_bss - start_address_of_text.
3417            Remainder is the blob to be loaded contiguously
3418            from start address. */
3419
3420         info.fw_base = be32_to_cpu(fw_data[1]);
3421         cpu_scratch_size = tp->fw_len;
3422         info.fw_len = tp->fw->size - 12;
3423         info.fw_data = &fw_data[3];
3424
3425         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
3426                 cpu_base = RX_CPU_BASE;
3427                 cpu_scratch_base = NIC_SRAM_MBUF_POOL_BASE5705;
3428         } else {
3429                 cpu_base = TX_CPU_BASE;
3430                 cpu_scratch_base = TX_CPU_SCRATCH_BASE;
3431                 cpu_scratch_size = TX_CPU_SCRATCH_SIZE;
3432         }
3433
3434         err = tg3_load_firmware_cpu(tp, cpu_base,
3435                                     cpu_scratch_base, cpu_scratch_size,
3436                                     &info);
3437         if (err)
3438                 return err;
3439
3440         /* Now startup the cpu. */
3441         tw32(cpu_base + CPU_STATE, 0xffffffff);
3442         tw32_f(cpu_base + CPU_PC, info.fw_base);
3443
3444         for (i = 0; i < 5; i++) {
3445                 if (tr32(cpu_base + CPU_PC) == info.fw_base)
3446                         break;
3447                 tw32(cpu_base + CPU_STATE, 0xffffffff);
3448                 tw32(cpu_base + CPU_MODE,  CPU_MODE_HALT);
3449                 tw32_f(cpu_base + CPU_PC, info.fw_base);
3450                 udelay(1000);
3451         }
3452         if (i >= 5) {
3453                 netdev_err(tp->dev,
3454                            "%s fails to set CPU PC, is %08x should be %08x\n",
3455                            __func__, tr32(cpu_base + CPU_PC), info.fw_base);
3456                 return -ENODEV;
3457         }
3458         tw32(cpu_base + CPU_STATE, 0xffffffff);
3459         tw32_f(cpu_base + CPU_MODE,  0x00000000);
3460         return 0;
3461 }
3462
3463
3464 /* tp->lock is held. */
3465 static void __tg3_set_mac_addr(struct tg3 *tp, int skip_mac_1)
3466 {
3467         u32 addr_high, addr_low;
3468         int i;
3469
3470         addr_high = ((tp->dev->dev_addr[0] << 8) |
3471                      tp->dev->dev_addr[1]);
3472         addr_low = ((tp->dev->dev_addr[2] << 24) |
3473                     (tp->dev->dev_addr[3] << 16) |
3474                     (tp->dev->dev_addr[4] <<  8) |
3475                     (tp->dev->dev_addr[5] <<  0));
3476         for (i = 0; i < 4; i++) {
3477                 if (i == 1 && skip_mac_1)
3478                         continue;
3479                 tw32(MAC_ADDR_0_HIGH + (i * 8), addr_high);
3480                 tw32(MAC_ADDR_0_LOW + (i * 8), addr_low);
3481         }
3482
3483         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
3484             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
3485                 for (i = 0; i < 12; i++) {
3486                         tw32(MAC_EXTADDR_0_HIGH + (i * 8), addr_high);
3487                         tw32(MAC_EXTADDR_0_LOW + (i * 8), addr_low);
3488                 }
3489         }
3490
3491         addr_high = (tp->dev->dev_addr[0] +
3492                      tp->dev->dev_addr[1] +
3493                      tp->dev->dev_addr[2] +
3494                      tp->dev->dev_addr[3] +
3495                      tp->dev->dev_addr[4] +
3496                      tp->dev->dev_addr[5]) &
3497                 TX_BACKOFF_SEED_MASK;
3498         tw32(MAC_TX_BACKOFF_SEED, addr_high);
3499 }
3500
3501 static void tg3_enable_register_access(struct tg3 *tp)
3502 {
3503         /*
3504          * Make sure register accesses (indirect or otherwise) will function
3505          * correctly.
3506          */
3507         pci_write_config_dword(tp->pdev,
3508                                TG3PCI_MISC_HOST_CTRL, tp->misc_host_ctrl);
3509 }
3510
3511 static int tg3_power_up(struct tg3 *tp)
3512 {
3513         int err;
3514
3515         tg3_enable_register_access(tp);
3516
3517         err = pci_set_power_state(tp->pdev, PCI_D0);
3518         if (!err) {
3519                 /* Switch out of Vaux if it is a NIC */
3520                 tg3_pwrsrc_switch_to_vmain(tp);
3521         } else {
3522                 netdev_err(tp->dev, "Transition to D0 failed\n");
3523         }
3524
3525         return err;
3526 }
3527
3528 static int tg3_setup_phy(struct tg3 *, int);
3529
3530 static int tg3_power_down_prepare(struct tg3 *tp)
3531 {
3532         u32 misc_host_ctrl;
3533         bool device_should_wake, do_low_power;
3534
3535         tg3_enable_register_access(tp);
3536
3537         /* Restore the CLKREQ setting. */
3538         if (tg3_flag(tp, CLKREQ_BUG)) {
3539                 u16 lnkctl;
3540
3541                 pci_read_config_word(tp->pdev,
3542                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3543                                      &lnkctl);
3544                 lnkctl |= PCI_EXP_LNKCTL_CLKREQ_EN;
3545                 pci_write_config_word(tp->pdev,
3546                                       pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
3547                                       lnkctl);
3548         }
3549
3550         misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
3551         tw32(TG3PCI_MISC_HOST_CTRL,
3552              misc_host_ctrl | MISC_HOST_CTRL_MASK_PCI_INT);
3553
3554         device_should_wake = device_may_wakeup(&tp->pdev->dev) &&
3555                              tg3_flag(tp, WOL_ENABLE);
3556
3557         if (tg3_flag(tp, USE_PHYLIB)) {
3558                 do_low_power = false;
3559                 if ((tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) &&
3560                     !(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3561                         struct phy_device *phydev;
3562                         u32 phyid, advertising;
3563
3564                         phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
3565
3566                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3567
3568                         tp->link_config.speed = phydev->speed;
3569                         tp->link_config.duplex = phydev->duplex;
3570                         tp->link_config.autoneg = phydev->autoneg;
3571                         tp->link_config.advertising = phydev->advertising;
3572
3573                         advertising = ADVERTISED_TP |
3574                                       ADVERTISED_Pause |
3575                                       ADVERTISED_Autoneg |
3576                                       ADVERTISED_10baseT_Half;
3577
3578                         if (tg3_flag(tp, ENABLE_ASF) || device_should_wake) {
3579                                 if (tg3_flag(tp, WOL_SPEED_100MB))
3580                                         advertising |=
3581                                                 ADVERTISED_100baseT_Half |
3582                                                 ADVERTISED_100baseT_Full |
3583                                                 ADVERTISED_10baseT_Full;
3584                                 else
3585                                         advertising |= ADVERTISED_10baseT_Full;
3586                         }
3587
3588                         phydev->advertising = advertising;
3589
3590                         phy_start_aneg(phydev);
3591
3592                         phyid = phydev->drv->phy_id & phydev->drv->phy_id_mask;
3593                         if (phyid != PHY_ID_BCMAC131) {
3594                                 phyid &= PHY_BCM_OUI_MASK;
3595                                 if (phyid == PHY_BCM_OUI_1 ||
3596                                     phyid == PHY_BCM_OUI_2 ||
3597                                     phyid == PHY_BCM_OUI_3)
3598                                         do_low_power = true;
3599                         }
3600                 }
3601         } else {
3602                 do_low_power = true;
3603
3604                 if (!(tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER))
3605                         tp->phy_flags |= TG3_PHYFLG_IS_LOW_POWER;
3606
3607                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
3608                         tg3_setup_phy(tp, 0);
3609         }
3610
3611         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3612                 u32 val;
3613
3614                 val = tr32(GRC_VCPU_EXT_CTRL);
3615                 tw32(GRC_VCPU_EXT_CTRL, val | GRC_VCPU_EXT_CTRL_DISABLE_WOL);
3616         } else if (!tg3_flag(tp, ENABLE_ASF)) {
3617                 int i;
3618                 u32 val;
3619
3620                 for (i = 0; i < 200; i++) {
3621                         tg3_read_mem(tp, NIC_SRAM_FW_ASF_STATUS_MBOX, &val);
3622                         if (val == ~NIC_SRAM_FIRMWARE_MBOX_MAGIC1)
3623                                 break;
3624                         msleep(1);
3625                 }
3626         }
3627         if (tg3_flag(tp, WOL_CAP))
3628                 tg3_write_mem(tp, NIC_SRAM_WOL_MBOX, WOL_SIGNATURE |
3629                                                      WOL_DRV_STATE_SHUTDOWN |
3630                                                      WOL_DRV_WOL |
3631                                                      WOL_SET_MAGIC_PKT);
3632
3633         if (device_should_wake) {
3634                 u32 mac_mode;
3635
3636                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
3637                         if (do_low_power &&
3638                             !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
3639                                 tg3_phy_auxctl_write(tp,
3640                                                MII_TG3_AUXCTL_SHDWSEL_PWRCTL,
3641                                                MII_TG3_AUXCTL_PCTL_WOL_EN |
3642                                                MII_TG3_AUXCTL_PCTL_100TX_LPWR |
3643                                                MII_TG3_AUXCTL_PCTL_CL_AB_TXDAC);
3644                                 udelay(40);
3645                         }
3646
3647                         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
3648                                 mac_mode = MAC_MODE_PORT_MODE_GMII;
3649                         else
3650                                 mac_mode = MAC_MODE_PORT_MODE_MII;
3651
3652                         mac_mode |= tp->mac_mode & MAC_MODE_LINK_POLARITY;
3653                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
3654                             ASIC_REV_5700) {
3655                                 u32 speed = tg3_flag(tp, WOL_SPEED_100MB) ?
3656                                              SPEED_100 : SPEED_10;
3657                                 if (tg3_5700_link_polarity(tp, speed))
3658                                         mac_mode |= MAC_MODE_LINK_POLARITY;
3659                                 else
3660                                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
3661                         }
3662                 } else {
3663                         mac_mode = MAC_MODE_PORT_MODE_TBI;
3664                 }
3665
3666                 if (!tg3_flag(tp, 5750_PLUS))
3667                         tw32(MAC_LED_CTRL, tp->led_ctrl);
3668
3669                 mac_mode |= MAC_MODE_MAGIC_PKT_ENABLE;
3670                 if ((tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS)) &&
3671                     (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)))
3672                         mac_mode |= MAC_MODE_KEEP_FRAME_IN_WOL;
3673
3674                 if (tg3_flag(tp, ENABLE_APE))
3675                         mac_mode |= MAC_MODE_APE_TX_EN |
3676                                     MAC_MODE_APE_RX_EN |
3677                                     MAC_MODE_TDE_ENABLE;
3678
3679                 tw32_f(MAC_MODE, mac_mode);
3680                 udelay(100);
3681
3682                 tw32_f(MAC_RX_MODE, RX_MODE_ENABLE);
3683                 udelay(10);
3684         }
3685
3686         if (!tg3_flag(tp, WOL_SPEED_100MB) &&
3687             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3688              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
3689                 u32 base_val;
3690
3691                 base_val = tp->pci_clock_ctrl;
3692                 base_val |= (CLOCK_CTRL_RXCLK_DISABLE |
3693                              CLOCK_CTRL_TXCLK_DISABLE);
3694
3695                 tw32_wait_f(TG3PCI_CLOCK_CTRL, base_val | CLOCK_CTRL_ALTCLK |
3696                             CLOCK_CTRL_PWRDOWN_PLL133, 40);
3697         } else if (tg3_flag(tp, 5780_CLASS) ||
3698                    tg3_flag(tp, CPMU_PRESENT) ||
3699                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
3700                 /* do nothing */
3701         } else if (!(tg3_flag(tp, 5750_PLUS) && tg3_flag(tp, ENABLE_ASF))) {
3702                 u32 newbits1, newbits2;
3703
3704                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3705                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3706                         newbits1 = (CLOCK_CTRL_RXCLK_DISABLE |
3707                                     CLOCK_CTRL_TXCLK_DISABLE |
3708                                     CLOCK_CTRL_ALTCLK);
3709                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3710                 } else if (tg3_flag(tp, 5705_PLUS)) {
3711                         newbits1 = CLOCK_CTRL_625_CORE;
3712                         newbits2 = newbits1 | CLOCK_CTRL_ALTCLK;
3713                 } else {
3714                         newbits1 = CLOCK_CTRL_ALTCLK;
3715                         newbits2 = newbits1 | CLOCK_CTRL_44MHZ_CORE;
3716                 }
3717
3718                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits1,
3719                             40);
3720
3721                 tw32_wait_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl | newbits2,
3722                             40);
3723
3724                 if (!tg3_flag(tp, 5705_PLUS)) {
3725                         u32 newbits3;
3726
3727                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
3728                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
3729                                 newbits3 = (CLOCK_CTRL_RXCLK_DISABLE |
3730                                             CLOCK_CTRL_TXCLK_DISABLE |
3731                                             CLOCK_CTRL_44MHZ_CORE);
3732                         } else {
3733                                 newbits3 = CLOCK_CTRL_44MHZ_CORE;
3734                         }
3735
3736                         tw32_wait_f(TG3PCI_CLOCK_CTRL,
3737                                     tp->pci_clock_ctrl | newbits3, 40);
3738                 }
3739         }
3740
3741         if (!(device_should_wake) && !tg3_flag(tp, ENABLE_ASF))
3742                 tg3_power_down_phy(tp, do_low_power);
3743
3744         tg3_frob_aux_power(tp, true);
3745
3746         /* Workaround for unstable PLL clock */
3747         if ((GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX) ||
3748             (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX)) {
3749                 u32 val = tr32(0x7d00);
3750
3751                 val &= ~((1 << 16) | (1 << 4) | (1 << 2) | (1 << 1) | 1);
3752                 tw32(0x7d00, val);
3753                 if (!tg3_flag(tp, ENABLE_ASF)) {
3754                         int err;
3755
3756                         err = tg3_nvram_lock(tp);
3757                         tg3_halt_cpu(tp, RX_CPU_BASE);
3758                         if (!err)
3759                                 tg3_nvram_unlock(tp);
3760                 }
3761         }
3762
3763         tg3_write_sig_post_reset(tp, RESET_KIND_SHUTDOWN);
3764
3765         return 0;
3766 }
3767
3768 static void tg3_power_down(struct tg3 *tp)
3769 {
3770         tg3_power_down_prepare(tp);
3771
3772         pci_wake_from_d3(tp->pdev, tg3_flag(tp, WOL_ENABLE));
3773         pci_set_power_state(tp->pdev, PCI_D3hot);
3774 }
3775
3776 static void tg3_aux_stat_to_speed_duplex(struct tg3 *tp, u32 val, u16 *speed, u8 *duplex)
3777 {
3778         switch (val & MII_TG3_AUX_STAT_SPDMASK) {
3779         case MII_TG3_AUX_STAT_10HALF:
3780                 *speed = SPEED_10;
3781                 *duplex = DUPLEX_HALF;
3782                 break;
3783
3784         case MII_TG3_AUX_STAT_10FULL:
3785                 *speed = SPEED_10;
3786                 *duplex = DUPLEX_FULL;
3787                 break;
3788
3789         case MII_TG3_AUX_STAT_100HALF:
3790                 *speed = SPEED_100;
3791                 *duplex = DUPLEX_HALF;
3792                 break;
3793
3794         case MII_TG3_AUX_STAT_100FULL:
3795                 *speed = SPEED_100;
3796                 *duplex = DUPLEX_FULL;
3797                 break;
3798
3799         case MII_TG3_AUX_STAT_1000HALF:
3800                 *speed = SPEED_1000;
3801                 *duplex = DUPLEX_HALF;
3802                 break;
3803
3804         case MII_TG3_AUX_STAT_1000FULL:
3805                 *speed = SPEED_1000;
3806                 *duplex = DUPLEX_FULL;
3807                 break;
3808
3809         default:
3810                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
3811                         *speed = (val & MII_TG3_AUX_STAT_100) ? SPEED_100 :
3812                                  SPEED_10;
3813                         *duplex = (val & MII_TG3_AUX_STAT_FULL) ? DUPLEX_FULL :
3814                                   DUPLEX_HALF;
3815                         break;
3816                 }
3817                 *speed = SPEED_UNKNOWN;
3818                 *duplex = DUPLEX_UNKNOWN;
3819                 break;
3820         }
3821 }
3822
3823 static int tg3_phy_autoneg_cfg(struct tg3 *tp, u32 advertise, u32 flowctrl)
3824 {
3825         int err = 0;
3826         u32 val, new_adv;
3827
3828         new_adv = ADVERTISE_CSMA;
3829         new_adv |= ethtool_adv_to_mii_adv_t(advertise) & ADVERTISE_ALL;
3830         new_adv |= mii_advertise_flowctrl(flowctrl);
3831
3832         err = tg3_writephy(tp, MII_ADVERTISE, new_adv);
3833         if (err)
3834                 goto done;
3835
3836         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
3837                 new_adv = ethtool_adv_to_mii_ctrl1000_t(advertise);
3838
3839                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
3840                     tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)
3841                         new_adv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
3842
3843                 err = tg3_writephy(tp, MII_CTRL1000, new_adv);
3844                 if (err)
3845                         goto done;
3846         }
3847
3848         if (!(tp->phy_flags & TG3_PHYFLG_EEE_CAP))
3849                 goto done;
3850
3851         tw32(TG3_CPMU_EEE_MODE,
3852              tr32(TG3_CPMU_EEE_MODE) & ~TG3_CPMU_EEEMD_LPI_ENABLE);
3853
3854         err = TG3_PHY_AUXCTL_SMDSP_ENABLE(tp);
3855         if (!err) {
3856                 u32 err2;
3857
3858                 val = 0;
3859                 /* Advertise 100-BaseTX EEE ability */
3860                 if (advertise & ADVERTISED_100baseT_Full)
3861                         val |= MDIO_AN_EEE_ADV_100TX;
3862                 /* Advertise 1000-BaseT EEE ability */
3863                 if (advertise & ADVERTISED_1000baseT_Full)
3864                         val |= MDIO_AN_EEE_ADV_1000T;
3865                 err = tg3_phy_cl45_write(tp, MDIO_MMD_AN, MDIO_AN_EEE_ADV, val);
3866                 if (err)
3867                         val = 0;
3868
3869                 switch (GET_ASIC_REV(tp->pci_chip_rev_id)) {
3870                 case ASIC_REV_5717:
3871                 case ASIC_REV_57765:
3872                 case ASIC_REV_57766:
3873                 case ASIC_REV_5719:
3874                         /* If we advertised any eee advertisements above... */
3875                         if (val)
3876                                 val = MII_TG3_DSP_TAP26_ALNOKO |
3877                                       MII_TG3_DSP_TAP26_RMRXSTO |
3878                                       MII_TG3_DSP_TAP26_OPCSINPT;
3879                         tg3_phydsp_write(tp, MII_TG3_DSP_TAP26, val);
3880                         /* Fall through */
3881                 case ASIC_REV_5720:
3882                         if (!tg3_phydsp_read(tp, MII_TG3_DSP_CH34TP2, &val))
3883                                 tg3_phydsp_write(tp, MII_TG3_DSP_CH34TP2, val |
3884                                                  MII_TG3_DSP_CH34TP2_HIBW01);
3885                 }
3886
3887                 err2 = TG3_PHY_AUXCTL_SMDSP_DISABLE(tp);
3888                 if (!err)
3889                         err = err2;
3890         }
3891
3892 done:
3893         return err;
3894 }
3895
3896 static void tg3_phy_copper_begin(struct tg3 *tp)
3897 {
3898         if (tp->link_config.autoneg == AUTONEG_ENABLE ||
3899             (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
3900                 u32 adv, fc;
3901
3902                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) {
3903                         adv = ADVERTISED_10baseT_Half |
3904                               ADVERTISED_10baseT_Full;
3905                         if (tg3_flag(tp, WOL_SPEED_100MB))
3906                                 adv |= ADVERTISED_100baseT_Half |
3907                                        ADVERTISED_100baseT_Full;
3908
3909                         fc = FLOW_CTRL_TX | FLOW_CTRL_RX;
3910                 } else {
3911                         adv = tp->link_config.advertising;
3912                         if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
3913                                 adv &= ~(ADVERTISED_1000baseT_Half |
3914                                          ADVERTISED_1000baseT_Full);
3915
3916                         fc = tp->link_config.flowctrl;
3917                 }
3918
3919                 tg3_phy_autoneg_cfg(tp, adv, fc);
3920
3921                 tg3_writephy(tp, MII_BMCR,
3922                              BMCR_ANENABLE | BMCR_ANRESTART);
3923         } else {
3924                 int i;
3925                 u32 bmcr, orig_bmcr;
3926
3927                 tp->link_config.active_speed = tp->link_config.speed;
3928                 tp->link_config.active_duplex = tp->link_config.duplex;
3929
3930                 bmcr = 0;
3931                 switch (tp->link_config.speed) {
3932                 default:
3933                 case SPEED_10:
3934                         break;
3935
3936                 case SPEED_100:
3937                         bmcr |= BMCR_SPEED100;
3938                         break;
3939
3940                 case SPEED_1000:
3941                         bmcr |= BMCR_SPEED1000;
3942                         break;
3943                 }
3944
3945                 if (tp->link_config.duplex == DUPLEX_FULL)
3946                         bmcr |= BMCR_FULLDPLX;
3947
3948                 if (!tg3_readphy(tp, MII_BMCR, &orig_bmcr) &&
3949                     (bmcr != orig_bmcr)) {
3950                         tg3_writephy(tp, MII_BMCR, BMCR_LOOPBACK);
3951                         for (i = 0; i < 1500; i++) {
3952                                 u32 tmp;
3953
3954                                 udelay(10);
3955                                 if (tg3_readphy(tp, MII_BMSR, &tmp) ||
3956                                     tg3_readphy(tp, MII_BMSR, &tmp))
3957                                         continue;
3958                                 if (!(tmp & BMSR_LSTATUS)) {
3959                                         udelay(40);
3960                                         break;
3961                                 }
3962                         }
3963                         tg3_writephy(tp, MII_BMCR, bmcr);
3964                         udelay(40);
3965                 }
3966         }
3967 }
3968
3969 static int tg3_init_5401phy_dsp(struct tg3 *tp)
3970 {
3971         int err;
3972
3973         /* Turn off tap power management. */
3974         /* Set Extended packet length bit */
3975         err = tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_AUXCTL, 0x4c20);
3976
3977         err |= tg3_phydsp_write(tp, 0x0012, 0x1804);
3978         err |= tg3_phydsp_write(tp, 0x0013, 0x1204);
3979         err |= tg3_phydsp_write(tp, 0x8006, 0x0132);
3980         err |= tg3_phydsp_write(tp, 0x8006, 0x0232);
3981         err |= tg3_phydsp_write(tp, 0x201f, 0x0a20);
3982
3983         udelay(40);
3984
3985         return err;
3986 }
3987
3988 static bool tg3_phy_copper_an_config_ok(struct tg3 *tp, u32 *lcladv)
3989 {
3990         u32 advmsk, tgtadv, advertising;
3991
3992         advertising = tp->link_config.advertising;
3993         tgtadv = ethtool_adv_to_mii_adv_t(advertising) & ADVERTISE_ALL;
3994
3995         advmsk = ADVERTISE_ALL;
3996         if (tp->link_config.active_duplex == DUPLEX_FULL) {
3997                 tgtadv |= mii_advertise_flowctrl(tp->link_config.flowctrl);
3998                 advmsk |= ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
3999         }
4000
4001         if (tg3_readphy(tp, MII_ADVERTISE, lcladv))
4002                 return false;
4003
4004         if ((*lcladv & advmsk) != tgtadv)
4005                 return false;
4006
4007         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4008                 u32 tg3_ctrl;
4009
4010                 tgtadv = ethtool_adv_to_mii_ctrl1000_t(advertising);
4011
4012                 if (tg3_readphy(tp, MII_CTRL1000, &tg3_ctrl))
4013                         return false;
4014
4015                 if (tgtadv &&
4016                     (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4017                      tp->pci_chip_rev_id == CHIPREV_ID_5701_B0)) {
4018                         tgtadv |= CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER;
4019                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL |
4020                                      CTL1000_AS_MASTER | CTL1000_ENABLE_MASTER);
4021                 } else {
4022                         tg3_ctrl &= (ADVERTISE_1000HALF | ADVERTISE_1000FULL);
4023                 }
4024
4025                 if (tg3_ctrl != tgtadv)
4026                         return false;
4027         }
4028
4029         return true;
4030 }
4031
4032 static bool tg3_phy_copper_fetch_rmtadv(struct tg3 *tp, u32 *rmtadv)
4033 {
4034         u32 lpeth = 0;
4035
4036         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY)) {
4037                 u32 val;
4038
4039                 if (tg3_readphy(tp, MII_STAT1000, &val))
4040                         return false;
4041
4042                 lpeth = mii_stat1000_to_ethtool_lpa_t(val);
4043         }
4044
4045         if (tg3_readphy(tp, MII_LPA, rmtadv))
4046                 return false;
4047
4048         lpeth |= mii_lpa_to_ethtool_lpa_t(*rmtadv);
4049         tp->link_config.rmt_adv = lpeth;
4050
4051         return true;
4052 }
4053
4054 static int tg3_setup_copper_phy(struct tg3 *tp, int force_reset)
4055 {
4056         int current_link_up;
4057         u32 bmsr, val;
4058         u32 lcl_adv, rmt_adv;
4059         u16 current_speed;
4060         u8 current_duplex;
4061         int i, err;
4062
4063         tw32(MAC_EVENT, 0);
4064
4065         tw32_f(MAC_STATUS,
4066              (MAC_STATUS_SYNC_CHANGED |
4067               MAC_STATUS_CFG_CHANGED |
4068               MAC_STATUS_MI_COMPLETION |
4069               MAC_STATUS_LNKSTATE_CHANGED));
4070         udelay(40);
4071
4072         if ((tp->mi_mode & MAC_MI_MODE_AUTO_POLL) != 0) {
4073                 tw32_f(MAC_MI_MODE,
4074                      (tp->mi_mode & ~MAC_MI_MODE_AUTO_POLL));
4075                 udelay(80);
4076         }
4077
4078         tg3_phy_auxctl_write(tp, MII_TG3_AUXCTL_SHDWSEL_PWRCTL, 0);
4079
4080         /* Some third-party PHYs need to be reset on link going
4081          * down.
4082          */
4083         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
4084              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
4085              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) &&
4086             netif_carrier_ok(tp->dev)) {
4087                 tg3_readphy(tp, MII_BMSR, &bmsr);
4088                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4089                     !(bmsr & BMSR_LSTATUS))
4090                         force_reset = 1;
4091         }
4092         if (force_reset)
4093                 tg3_phy_reset(tp);
4094
4095         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
4096                 tg3_readphy(tp, MII_BMSR, &bmsr);
4097                 if (tg3_readphy(tp, MII_BMSR, &bmsr) ||
4098                     !tg3_flag(tp, INIT_COMPLETE))
4099                         bmsr = 0;
4100
4101                 if (!(bmsr & BMSR_LSTATUS)) {
4102                         err = tg3_init_5401phy_dsp(tp);
4103                         if (err)
4104                                 return err;
4105
4106                         tg3_readphy(tp, MII_BMSR, &bmsr);
4107                         for (i = 0; i < 1000; i++) {
4108                                 udelay(10);
4109                                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4110                                     (bmsr & BMSR_LSTATUS)) {
4111                                         udelay(40);
4112                                         break;
4113                                 }
4114                         }
4115
4116                         if ((tp->phy_id & TG3_PHY_ID_REV_MASK) ==
4117                             TG3_PHY_REV_BCM5401_B0 &&
4118                             !(bmsr & BMSR_LSTATUS) &&
4119                             tp->link_config.active_speed == SPEED_1000) {
4120                                 err = tg3_phy_reset(tp);
4121                                 if (!err)
4122                                         err = tg3_init_5401phy_dsp(tp);
4123                                 if (err)
4124                                         return err;
4125                         }
4126                 }
4127         } else if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
4128                    tp->pci_chip_rev_id == CHIPREV_ID_5701_B0) {
4129                 /* 5701 {A0,B0} CRC bug workaround */
4130                 tg3_writephy(tp, 0x15, 0x0a75);
4131                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4132                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8d68);
4133                 tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x8c68);
4134         }
4135
4136         /* Clear pending interrupts... */
4137         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4138         tg3_readphy(tp, MII_TG3_ISTAT, &val);
4139
4140         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT)
4141                 tg3_writephy(tp, MII_TG3_IMASK, ~MII_TG3_INT_LINKCHG);
4142         else if (!(tp->phy_flags & TG3_PHYFLG_IS_FET))
4143                 tg3_writephy(tp, MII_TG3_IMASK, ~0);
4144
4145         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
4146             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
4147                 if (tp->led_ctrl == LED_CTRL_MODE_PHY_1)
4148                         tg3_writephy(tp, MII_TG3_EXT_CTRL,
4149                                      MII_TG3_EXT_CTRL_LNK3_LED_MODE);
4150                 else
4151                         tg3_writephy(tp, MII_TG3_EXT_CTRL, 0);
4152         }
4153
4154         current_link_up = 0;
4155         current_speed = SPEED_UNKNOWN;
4156         current_duplex = DUPLEX_UNKNOWN;
4157         tp->phy_flags &= ~TG3_PHYFLG_MDIX_STATE;
4158         tp->link_config.rmt_adv = 0;
4159
4160         if (tp->phy_flags & TG3_PHYFLG_CAPACITIVE_COUPLING) {
4161                 err = tg3_phy_auxctl_read(tp,
4162                                           MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4163                                           &val);
4164                 if (!err && !(val & (1 << 10))) {
4165                         tg3_phy_auxctl_write(tp,
4166                                              MII_TG3_AUXCTL_SHDWSEL_MISCTEST,
4167                                              val | (1 << 10));
4168                         goto relink;
4169                 }
4170         }
4171
4172         bmsr = 0;
4173         for (i = 0; i < 100; i++) {
4174                 tg3_readphy(tp, MII_BMSR, &bmsr);
4175                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
4176                     (bmsr & BMSR_LSTATUS))
4177                         break;
4178                 udelay(40);
4179         }
4180
4181         if (bmsr & BMSR_LSTATUS) {
4182                 u32 aux_stat, bmcr;
4183
4184                 tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat);
4185                 for (i = 0; i < 2000; i++) {
4186                         udelay(10);
4187                         if (!tg3_readphy(tp, MII_TG3_AUX_STAT, &aux_stat) &&
4188                             aux_stat)
4189                                 break;
4190                 }
4191
4192                 tg3_aux_stat_to_speed_duplex(tp, aux_stat,
4193                                              &current_speed,
4194                                              &current_duplex);
4195
4196                 bmcr = 0;
4197                 for (i = 0; i < 200; i++) {
4198                         tg3_readphy(tp, MII_BMCR, &bmcr);
4199                         if (tg3_readphy(tp, MII_BMCR, &bmcr))
4200                                 continue;
4201                         if (bmcr && bmcr != 0x7fff)
4202                                 break;
4203                         udelay(10);
4204                 }
4205
4206                 lcl_adv = 0;
4207                 rmt_adv = 0;
4208
4209                 tp->link_config.active_speed = current_speed;
4210                 tp->link_config.active_duplex = current_duplex;
4211
4212                 if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4213                         if ((bmcr & BMCR_ANENABLE) &&
4214                             tg3_phy_copper_an_config_ok(tp, &lcl_adv) &&
4215                             tg3_phy_copper_fetch_rmtadv(tp, &rmt_adv))
4216                                 current_link_up = 1;
4217                 } else {
4218                         if (!(bmcr & BMCR_ANENABLE) &&
4219                             tp->link_config.speed == current_speed &&
4220                             tp->link_config.duplex == current_duplex &&
4221                             tp->link_config.flowctrl ==
4222                             tp->link_config.active_flowctrl) {
4223                                 current_link_up = 1;
4224                         }
4225                 }
4226
4227                 if (current_link_up == 1 &&
4228                     tp->link_config.active_duplex == DUPLEX_FULL) {
4229                         u32 reg, bit;
4230
4231                         if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
4232                                 reg = MII_TG3_FET_GEN_STAT;
4233                                 bit = MII_TG3_FET_GEN_STAT_MDIXSTAT;
4234                         } else {
4235                                 reg = MII_TG3_EXT_STAT;
4236                                 bit = MII_TG3_EXT_STAT_MDIX;
4237                         }
4238
4239                         if (!tg3_readphy(tp, reg, &val) && (val & bit))
4240                                 tp->phy_flags |= TG3_PHYFLG_MDIX_STATE;
4241
4242                         tg3_setup_flow_control(tp, lcl_adv, rmt_adv);
4243                 }
4244         }
4245
4246 relink:
4247         if (current_link_up == 0 || (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)) {
4248                 tg3_phy_copper_begin(tp);
4249
4250                 tg3_readphy(tp, MII_BMSR, &bmsr);
4251                 if ((!tg3_readphy(tp, MII_BMSR, &bmsr) && (bmsr & BMSR_LSTATUS)) ||
4252                     (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
4253                         current_link_up = 1;
4254         }
4255
4256         tp->mac_mode &= ~MAC_MODE_PORT_MODE_MASK;
4257         if (current_link_up == 1) {
4258                 if (tp->link_config.active_speed == SPEED_100 ||
4259                     tp->link_config.active_speed == SPEED_10)
4260                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4261                 else
4262                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4263         } else if (tp->phy_flags & TG3_PHYFLG_IS_FET)
4264                 tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
4265         else
4266                 tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
4267
4268         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
4269         if (tp->link_config.active_duplex == DUPLEX_HALF)
4270                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
4271
4272         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
4273                 if (current_link_up == 1 &&
4274                     tg3_5700_link_polarity(tp, tp->link_config.active_speed))
4275                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
4276                 else
4277                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
4278         }
4279
4280         /* ??? Without this setting Netgear GA302T PHY does not
4281          * ??? send/receive packets...
4282          */
4283         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5411 &&
4284             tp->pci_chip_rev_id == CHIPREV_ID_5700_ALTIMA) {
4285                 tp->mi_mode |= MAC_MI_MODE_AUTO_POLL;
4286                 tw32_f(MAC_MI_MODE, tp->mi_mode);
4287                 udelay(80);
4288         }
4289
4290         tw32_f(MAC_MODE, tp->mac_mode);
4291         udelay(40);
4292
4293         tg3_phy_eee_adjust(tp, current_link_up);
4294
4295         if (tg3_flag(tp, USE_LINKCHG_REG)) {
4296                 /* Polled via timer. */
4297                 tw32_f(MAC_EVENT, 0);
4298         } else {
4299                 tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
4300         }
4301         udelay(40);
4302
4303         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 &&
4304             current_link_up == 1 &&
4305             tp->link_config.active_speed == SPEED_1000 &&
4306             (tg3_flag(tp, PCIX_MODE) || tg3_flag(tp, PCI_HIGH_SPEED))) {
4307                 udelay(120);
4308                 tw32_f(MAC_STATUS,
4309                      (MAC_STATUS_SYNC_CHANGED |
4310                       MAC_STATUS_CFG_CHANGED));
4311                 udelay(40);
4312                 tg3_write_mem(tp,
4313                               NIC_SRAM_FIRMWARE_MBOX,
4314                               NIC_SRAM_FIRMWARE_MBOX_MAGIC2);
4315         }
4316
4317         /* Prevent send BD corruption. */
4318         if (tg3_flag(tp, CLKREQ_BUG)) {
4319                 u16 oldlnkctl, newlnkctl;
4320
4321                 pci_read_config_word(tp->pdev,
4322                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
4323                                      &oldlnkctl);
4324                 if (tp->link_config.active_speed == SPEED_100 ||
4325                     tp->link_config.active_speed == SPEED_10)
4326                         newlnkctl = oldlnkctl & ~PCI_EXP_LNKCTL_CLKREQ_EN;
4327                 else
4328                         newlnkctl = oldlnkctl | PCI_EXP_LNKCTL_CLKREQ_EN;
4329                 if (newlnkctl != oldlnkctl)
4330                         pci_write_config_word(tp->pdev,
4331                                               pci_pcie_cap(tp->pdev) +
4332                                               PCI_EXP_LNKCTL, newlnkctl);
4333         }
4334
4335         if (current_link_up != netif_carrier_ok(tp->dev)) {
4336                 if (current_link_up)
4337                         netif_carrier_on(tp->dev);
4338                 else
4339                         netif_carrier_off(tp->dev);
4340                 tg3_link_report(tp);
4341         }
4342
4343         return 0;
4344 }
4345
4346 struct tg3_fiber_aneginfo {
4347         int state;
4348 #define ANEG_STATE_UNKNOWN              0
4349 #define ANEG_STATE_AN_ENABLE            1
4350 #define ANEG_STATE_RESTART_INIT         2
4351 #define ANEG_STATE_RESTART              3
4352 #define ANEG_STATE_DISABLE_LINK_OK      4
4353 #define ANEG_STATE_ABILITY_DETECT_INIT  5
4354 #define ANEG_STATE_ABILITY_DETECT       6
4355 #define ANEG_STATE_ACK_DETECT_INIT      7
4356 #define ANEG_STATE_ACK_DETECT           8
4357 #define ANEG_STATE_COMPLETE_ACK_INIT    9
4358 #define ANEG_STATE_COMPLETE_ACK         10
4359 #define ANEG_STATE_IDLE_DETECT_INIT     11
4360 #define ANEG_STATE_IDLE_DETECT          12
4361 #define ANEG_STATE_LINK_OK              13
4362 #define ANEG_STATE_NEXT_PAGE_WAIT_INIT  14
4363 #define ANEG_STATE_NEXT_PAGE_WAIT       15
4364
4365         u32 flags;
4366 #define MR_AN_ENABLE            0x00000001
4367 #define MR_RESTART_AN           0x00000002
4368 #define MR_AN_COMPLETE          0x00000004
4369 #define MR_PAGE_RX              0x00000008
4370 #define MR_NP_LOADED            0x00000010
4371 #define MR_TOGGLE_TX            0x00000020
4372 #define MR_LP_ADV_FULL_DUPLEX   0x00000040
4373 #define MR_LP_ADV_HALF_DUPLEX   0x00000080
4374 #define MR_LP_ADV_SYM_PAUSE     0x00000100
4375 #define MR_LP_ADV_ASYM_PAUSE    0x00000200
4376 #define MR_LP_ADV_REMOTE_FAULT1 0x00000400
4377 #define MR_LP_ADV_REMOTE_FAULT2 0x00000800
4378 #define MR_LP_ADV_NEXT_PAGE     0x00001000
4379 #define MR_TOGGLE_RX            0x00002000
4380 #define MR_NP_RX                0x00004000
4381
4382 #define MR_LINK_OK              0x80000000
4383
4384         unsigned long link_time, cur_time;
4385
4386         u32 ability_match_cfg;
4387         int ability_match_count;
4388
4389         char ability_match, idle_match, ack_match;
4390
4391         u32 txconfig, rxconfig;
4392 #define ANEG_CFG_NP             0x00000080
4393 #define ANEG_CFG_ACK            0x00000040
4394 #define ANEG_CFG_RF2            0x00000020
4395 #define ANEG_CFG_RF1            0x00000010
4396 #define ANEG_CFG_PS2            0x00000001
4397 #define ANEG_CFG_PS1            0x00008000
4398 #define ANEG_CFG_HD             0x00004000
4399 #define ANEG_CFG_FD             0x00002000
4400 #define ANEG_CFG_INVAL          0x00001f06
4401
4402 };
4403 #define ANEG_OK         0
4404 #define ANEG_DONE       1
4405 #define ANEG_TIMER_ENAB 2
4406 #define ANEG_FAILED     -1
4407
4408 #define ANEG_STATE_SETTLE_TIME  10000
4409
4410 static int tg3_fiber_aneg_smachine(struct tg3 *tp,
4411                                    struct tg3_fiber_aneginfo *ap)
4412 {
4413         u16 flowctrl;
4414         unsigned long delta;
4415         u32 rx_cfg_reg;
4416         int ret;
4417
4418         if (ap->state == ANEG_STATE_UNKNOWN) {
4419                 ap->rxconfig = 0;
4420                 ap->link_time = 0;
4421                 ap->cur_time = 0;
4422                 ap->ability_match_cfg = 0;
4423                 ap->ability_match_count = 0;
4424                 ap->ability_match = 0;
4425                 ap->idle_match = 0;
4426                 ap->ack_match = 0;
4427         }
4428         ap->cur_time++;
4429
4430         if (tr32(MAC_STATUS) & MAC_STATUS_RCVD_CFG) {
4431                 rx_cfg_reg = tr32(MAC_RX_AUTO_NEG);
4432
4433                 if (rx_cfg_reg != ap->ability_match_cfg) {
4434                         ap->ability_match_cfg = rx_cfg_reg;
4435                         ap->ability_match = 0;
4436                         ap->ability_match_count = 0;
4437                 } else {
4438                         if (++ap->ability_match_count > 1) {
4439                                 ap->ability_match = 1;
4440                                 ap->ability_match_cfg = rx_cfg_reg;
4441                         }
4442                 }
4443                 if (rx_cfg_reg & ANEG_CFG_ACK)
4444                         ap->ack_match = 1;
4445                 else
4446                         ap->ack_match = 0;
4447
4448                 ap->idle_match = 0;
4449         } else {
4450                 ap->idle_match = 1;
4451                 ap->ability_match_cfg = 0;
4452                 ap->ability_match_count = 0;
4453                 ap->ability_match = 0;
4454                 ap->ack_match = 0;
4455
4456                 rx_cfg_reg = 0;
4457         }
4458
4459         ap->rxconfig = rx_cfg_reg;
4460         ret = ANEG_OK;
4461
4462         switch (ap->state) {
4463         case ANEG_STATE_UNKNOWN:
4464                 if (ap->flags & (MR_AN_ENABLE | MR_RESTART_AN))
4465                         ap->state = ANEG_STATE_AN_ENABLE;
4466
4467                 /* fallthru */
4468         case ANEG_STATE_AN_ENABLE:
4469                 ap->flags &= ~(MR_AN_COMPLETE | MR_PAGE_RX);
4470                 if (ap->flags & MR_AN_ENABLE) {
4471                         ap->link_time = 0;
4472                         ap->cur_time = 0;
4473                         ap->ability_match_cfg = 0;
4474                         ap->ability_match_count = 0;
4475                         ap->ability_match = 0;
4476                         ap->idle_match = 0;
4477                         ap->ack_match = 0;
4478
4479                         ap->state = ANEG_STATE_RESTART_INIT;
4480                 } else {
4481                         ap->state = ANEG_STATE_DISABLE_LINK_OK;
4482                 }
4483                 break;
4484
4485         case ANEG_STATE_RESTART_INIT:
4486                 ap->link_time = ap->cur_time;
4487                 ap->flags &= ~(MR_NP_LOADED);
4488                 ap->txconfig = 0;
4489                 tw32(MAC_TX_AUTO_NEG, 0);
4490                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4491                 tw32_f(MAC_MODE, tp->mac_mode);
4492                 udelay(40);
4493
4494                 ret = ANEG_TIMER_ENAB;
4495                 ap->state = ANEG_STATE_RESTART;
4496
4497                 /* fallthru */
4498         case ANEG_STATE_RESTART:
4499                 delta = ap->cur_time - ap->link_time;
4500                 if (delta > ANEG_STATE_SETTLE_TIME)
4501                         ap->state = ANEG_STATE_ABILITY_DETECT_INIT;
4502                 else
4503                         ret = ANEG_TIMER_ENAB;
4504                 break;
4505
4506         case ANEG_STATE_DISABLE_LINK_OK:
4507                 ret = ANEG_DONE;
4508                 break;
4509
4510         case ANEG_STATE_ABILITY_DETECT_INIT:
4511                 ap->flags &= ~(MR_TOGGLE_TX);
4512                 ap->txconfig = ANEG_CFG_FD;
4513                 flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4514                 if (flowctrl & ADVERTISE_1000XPAUSE)
4515                         ap->txconfig |= ANEG_CFG_PS1;
4516                 if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4517                         ap->txconfig |= ANEG_CFG_PS2;
4518                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4519                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4520                 tw32_f(MAC_MODE, tp->mac_mode);
4521                 udelay(40);
4522
4523                 ap->state = ANEG_STATE_ABILITY_DETECT;
4524                 break;
4525
4526         case ANEG_STATE_ABILITY_DETECT:
4527                 if (ap->ability_match != 0 && ap->rxconfig != 0)
4528                         ap->state = ANEG_STATE_ACK_DETECT_INIT;
4529                 break;
4530
4531         case ANEG_STATE_ACK_DETECT_INIT:
4532                 ap->txconfig |= ANEG_CFG_ACK;
4533                 tw32(MAC_TX_AUTO_NEG, ap->txconfig);
4534                 tp->mac_mode |= MAC_MODE_SEND_CONFIGS;
4535                 tw32_f(MAC_MODE, tp->mac_mode);
4536                 udelay(40);
4537
4538                 ap->state = ANEG_STATE_ACK_DETECT;
4539
4540                 /* fallthru */
4541         case ANEG_STATE_ACK_DETECT:
4542                 if (ap->ack_match != 0) {
4543                         if ((ap->rxconfig & ~ANEG_CFG_ACK) ==
4544                             (ap->ability_match_cfg & ~ANEG_CFG_ACK)) {
4545                                 ap->state = ANEG_STATE_COMPLETE_ACK_INIT;
4546                         } else {
4547                                 ap->state = ANEG_STATE_AN_ENABLE;
4548                         }
4549                 } else if (ap->ability_match != 0 &&
4550                            ap->rxconfig == 0) {
4551                         ap->state = ANEG_STATE_AN_ENABLE;
4552                 }
4553                 break;
4554
4555         case ANEG_STATE_COMPLETE_ACK_INIT:
4556                 if (ap->rxconfig & ANEG_CFG_INVAL) {
4557                         ret = ANEG_FAILED;
4558                         break;
4559                 }
4560                 ap->flags &= ~(MR_LP_ADV_FULL_DUPLEX |
4561                                MR_LP_ADV_HALF_DUPLEX |
4562                                MR_LP_ADV_SYM_PAUSE |
4563                                MR_LP_ADV_ASYM_PAUSE |
4564                                MR_LP_ADV_REMOTE_FAULT1 |
4565                                MR_LP_ADV_REMOTE_FAULT2 |
4566                                MR_LP_ADV_NEXT_PAGE |
4567                                MR_TOGGLE_RX |
4568                                MR_NP_RX);
4569                 if (ap->rxconfig & ANEG_CFG_FD)
4570                         ap->flags |= MR_LP_ADV_FULL_DUPLEX;
4571                 if (ap->rxconfig & ANEG_CFG_HD)
4572                         ap->flags |= MR_LP_ADV_HALF_DUPLEX;
4573                 if (ap->rxconfig & ANEG_CFG_PS1)
4574                         ap->flags |= MR_LP_ADV_SYM_PAUSE;
4575                 if (ap->rxconfig & ANEG_CFG_PS2)
4576                         ap->flags |= MR_LP_ADV_ASYM_PAUSE;
4577                 if (ap->rxconfig & ANEG_CFG_RF1)
4578                         ap->flags |= MR_LP_ADV_REMOTE_FAULT1;
4579                 if (ap->rxconfig & ANEG_CFG_RF2)
4580                         ap->flags |= MR_LP_ADV_REMOTE_FAULT2;
4581                 if (ap->rxconfig & ANEG_CFG_NP)
4582                         ap->flags |= MR_LP_ADV_NEXT_PAGE;
4583
4584                 ap->link_time = ap->cur_time;
4585
4586                 ap->flags ^= (MR_TOGGLE_TX);
4587                 if (ap->rxconfig & 0x0008)
4588                         ap->flags |= MR_TOGGLE_RX;
4589                 if (ap->rxconfig & ANEG_CFG_NP)
4590                         ap->flags |= MR_NP_RX;
4591                 ap->flags |= MR_PAGE_RX;
4592
4593                 ap->state = ANEG_STATE_COMPLETE_ACK;
4594                 ret = ANEG_TIMER_ENAB;
4595                 break;
4596
4597         case ANEG_STATE_COMPLETE_ACK:
4598                 if (ap->ability_match != 0 &&
4599                     ap->rxconfig == 0) {
4600                         ap->state = ANEG_STATE_AN_ENABLE;
4601                         break;
4602                 }
4603                 delta = ap->cur_time - ap->link_time;
4604                 if (delta > ANEG_STATE_SETTLE_TIME) {
4605                         if (!(ap->flags & (MR_LP_ADV_NEXT_PAGE))) {
4606                                 ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4607                         } else {
4608                                 if ((ap->txconfig & ANEG_CFG_NP) == 0 &&
4609                                     !(ap->flags & MR_NP_RX)) {
4610                                         ap->state = ANEG_STATE_IDLE_DETECT_INIT;
4611                                 } else {
4612                                         ret = ANEG_FAILED;
4613                                 }
4614                         }
4615                 }
4616                 break;
4617
4618         case ANEG_STATE_IDLE_DETECT_INIT:
4619                 ap->link_time = ap->cur_time;
4620                 tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4621                 tw32_f(MAC_MODE, tp->mac_mode);
4622                 udelay(40);
4623
4624                 ap->state = ANEG_STATE_IDLE_DETECT;
4625                 ret = ANEG_TIMER_ENAB;
4626                 break;
4627
4628         case ANEG_STATE_IDLE_DETECT:
4629                 if (ap->ability_match != 0 &&
4630                     ap->rxconfig == 0) {
4631                         ap->state = ANEG_STATE_AN_ENABLE;
4632                         break;
4633                 }
4634                 delta = ap->cur_time - ap->link_time;
4635                 if (delta > ANEG_STATE_SETTLE_TIME) {
4636                         /* XXX another gem from the Broadcom driver :( */
4637                         ap->state = ANEG_STATE_LINK_OK;
4638                 }
4639                 break;
4640
4641         case ANEG_STATE_LINK_OK:
4642                 ap->flags |= (MR_AN_COMPLETE | MR_LINK_OK);
4643                 ret = ANEG_DONE;
4644                 break;
4645
4646         case ANEG_STATE_NEXT_PAGE_WAIT_INIT:
4647                 /* ??? unimplemented */
4648                 break;
4649
4650         case ANEG_STATE_NEXT_PAGE_WAIT:
4651                 /* ??? unimplemented */
4652                 break;
4653
4654         default:
4655                 ret = ANEG_FAILED;
4656                 break;
4657         }
4658
4659         return ret;
4660 }
4661
4662 static int fiber_autoneg(struct tg3 *tp, u32 *txflags, u32 *rxflags)
4663 {
4664         int res = 0;
4665         struct tg3_fiber_aneginfo aninfo;
4666         int status = ANEG_FAILED;
4667         unsigned int tick;
4668         u32 tmp;
4669
4670         tw32_f(MAC_TX_AUTO_NEG, 0);
4671
4672         tmp = tp->mac_mode & ~MAC_MODE_PORT_MODE_MASK;
4673         tw32_f(MAC_MODE, tmp | MAC_MODE_PORT_MODE_GMII);
4674         udelay(40);
4675
4676         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_SEND_CONFIGS);
4677         udelay(40);
4678
4679         memset(&aninfo, 0, sizeof(aninfo));
4680         aninfo.flags |= MR_AN_ENABLE;
4681         aninfo.state = ANEG_STATE_UNKNOWN;
4682         aninfo.cur_time = 0;
4683         tick = 0;
4684         while (++tick < 195000) {
4685                 status = tg3_fiber_aneg_smachine(tp, &aninfo);
4686                 if (status == ANEG_DONE || status == ANEG_FAILED)
4687                         break;
4688
4689                 udelay(1);
4690         }
4691
4692         tp->mac_mode &= ~MAC_MODE_SEND_CONFIGS;
4693         tw32_f(MAC_MODE, tp->mac_mode);
4694         udelay(40);
4695
4696         *txflags = aninfo.txconfig;
4697         *rxflags = aninfo.flags;
4698
4699         if (status == ANEG_DONE &&
4700             (aninfo.flags & (MR_AN_COMPLETE | MR_LINK_OK |
4701                              MR_LP_ADV_FULL_DUPLEX)))
4702                 res = 1;
4703
4704         return res;
4705 }
4706
4707 static void tg3_init_bcm8002(struct tg3 *tp)
4708 {
4709         u32 mac_status = tr32(MAC_STATUS);
4710         int i;
4711
4712         /* Reset when initting first time or we have a link. */
4713         if (tg3_flag(tp, INIT_COMPLETE) &&
4714             !(mac_status & MAC_STATUS_PCS_SYNCED))
4715                 return;
4716
4717         /* Set PLL lock range. */
4718         tg3_writephy(tp, 0x16, 0x8007);
4719
4720         /* SW reset */
4721         tg3_writephy(tp, MII_BMCR, BMCR_RESET);
4722
4723         /* Wait for reset to complete. */
4724         /* XXX schedule_timeout() ... */
4725         for (i = 0; i < 500; i++)
4726                 udelay(10);
4727
4728         /* Config mode; select PMA/Ch 1 regs. */
4729         tg3_writephy(tp, 0x10, 0x8411);
4730
4731         /* Enable auto-lock and comdet, select txclk for tx. */
4732         tg3_writephy(tp, 0x11, 0x0a10);
4733
4734         tg3_writephy(tp, 0x18, 0x00a0);
4735         tg3_writephy(tp, 0x16, 0x41ff);
4736
4737         /* Assert and deassert POR. */
4738         tg3_writephy(tp, 0x13, 0x0400);
4739         udelay(40);
4740         tg3_writephy(tp, 0x13, 0x0000);
4741
4742         tg3_writephy(tp, 0x11, 0x0a50);
4743         udelay(40);
4744         tg3_writephy(tp, 0x11, 0x0a10);
4745
4746         /* Wait for signal to stabilize */
4747         /* XXX schedule_timeout() ... */
4748         for (i = 0; i < 15000; i++)
4749                 udelay(10);
4750
4751         /* Deselect the channel register so we can read the PHYID
4752          * later.
4753          */
4754         tg3_writephy(tp, 0x10, 0x8011);
4755 }
4756
4757 static int tg3_setup_fiber_hw_autoneg(struct tg3 *tp, u32 mac_status)
4758 {
4759         u16 flowctrl;
4760         u32 sg_dig_ctrl, sg_dig_status;
4761         u32 serdes_cfg, expected_sg_dig_ctrl;
4762         int workaround, port_a;
4763         int current_link_up;
4764
4765         serdes_cfg = 0;
4766         expected_sg_dig_ctrl = 0;
4767         workaround = 0;
4768         port_a = 1;
4769         current_link_up = 0;
4770
4771         if (tp->pci_chip_rev_id != CHIPREV_ID_5704_A0 &&
4772             tp->pci_chip_rev_id != CHIPREV_ID_5704_A1) {
4773                 workaround = 1;
4774                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
4775                         port_a = 0;
4776
4777                 /* preserve bits 0-11,13,14 for signal pre-emphasis */
4778                 /* preserve bits 20-23 for voltage regulator */
4779                 serdes_cfg = tr32(MAC_SERDES_CFG) & 0x00f06fff;
4780         }
4781
4782         sg_dig_ctrl = tr32(SG_DIG_CTRL);
4783
4784         if (tp->link_config.autoneg != AUTONEG_ENABLE) {
4785                 if (sg_dig_ctrl & SG_DIG_USING_HW_AUTONEG) {
4786                         if (workaround) {
4787                                 u32 val = serdes_cfg;
4788
4789                                 if (port_a)
4790                                         val |= 0xc010000;
4791                                 else
4792                                         val |= 0x4010000;
4793                                 tw32_f(MAC_SERDES_CFG, val);
4794                         }
4795
4796                         tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4797                 }
4798                 if (mac_status & MAC_STATUS_PCS_SYNCED) {
4799                         tg3_setup_flow_control(tp, 0, 0);
4800                         current_link_up = 1;
4801                 }
4802                 goto out;
4803         }
4804
4805         /* Want auto-negotiation.  */
4806         expected_sg_dig_ctrl = SG_DIG_USING_HW_AUTONEG | SG_DIG_COMMON_SETUP;
4807
4808         flowctrl = tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
4809         if (flowctrl & ADVERTISE_1000XPAUSE)
4810                 expected_sg_dig_ctrl |= SG_DIG_PAUSE_CAP;
4811         if (flowctrl & ADVERTISE_1000XPSE_ASYM)
4812                 expected_sg_dig_ctrl |= SG_DIG_ASYM_PAUSE;
4813
4814         if (sg_dig_ctrl != expected_sg_dig_ctrl) {
4815                 if ((tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT) &&
4816                     tp->serdes_counter &&
4817                     ((mac_status & (MAC_STATUS_PCS_SYNCED |
4818                                     MAC_STATUS_RCVD_CFG)) ==
4819                      MAC_STATUS_PCS_SYNCED)) {
4820                         tp->serdes_counter--;
4821                         current_link_up = 1;
4822                         goto out;
4823                 }
4824 restart_autoneg:
4825                 if (workaround)
4826                         tw32_f(MAC_SERDES_CFG, serdes_cfg | 0xc011000);
4827                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl | SG_DIG_SOFT_RESET);
4828                 udelay(5);
4829                 tw32_f(SG_DIG_CTRL, expected_sg_dig_ctrl);
4830
4831                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4832                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4833         } else if (mac_status & (MAC_STATUS_PCS_SYNCED |
4834                                  MAC_STATUS_SIGNAL_DET)) {
4835                 sg_dig_status = tr32(SG_DIG_STATUS);
4836                 mac_status = tr32(MAC_STATUS);
4837
4838                 if ((sg_dig_status & SG_DIG_AUTONEG_COMPLETE) &&
4839                     (mac_status & MAC_STATUS_PCS_SYNCED)) {
4840                         u32 local_adv = 0, remote_adv = 0;
4841
4842                         if (sg_dig_ctrl & SG_DIG_PAUSE_CAP)
4843                                 local_adv |= ADVERTISE_1000XPAUSE;
4844                         if (sg_dig_ctrl & SG_DIG_ASYM_PAUSE)
4845                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4846
4847                         if (sg_dig_status & SG_DIG_PARTNER_PAUSE_CAPABLE)
4848                                 remote_adv |= LPA_1000XPAUSE;
4849                         if (sg_dig_status & SG_DIG_PARTNER_ASYM_PAUSE)
4850                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4851
4852                         tp->link_config.rmt_adv =
4853                                            mii_adv_to_ethtool_adv_x(remote_adv);
4854
4855                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4856                         current_link_up = 1;
4857                         tp->serdes_counter = 0;
4858                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4859                 } else if (!(sg_dig_status & SG_DIG_AUTONEG_COMPLETE)) {
4860                         if (tp->serdes_counter)
4861                                 tp->serdes_counter--;
4862                         else {
4863                                 if (workaround) {
4864                                         u32 val = serdes_cfg;
4865
4866                                         if (port_a)
4867                                                 val |= 0xc010000;
4868                                         else
4869                                                 val |= 0x4010000;
4870
4871                                         tw32_f(MAC_SERDES_CFG, val);
4872                                 }
4873
4874                                 tw32_f(SG_DIG_CTRL, SG_DIG_COMMON_SETUP);
4875                                 udelay(40);
4876
4877                                 /* Link parallel detection - link is up */
4878                                 /* only if we have PCS_SYNC and not */
4879                                 /* receiving config code words */
4880                                 mac_status = tr32(MAC_STATUS);
4881                                 if ((mac_status & MAC_STATUS_PCS_SYNCED) &&
4882                                     !(mac_status & MAC_STATUS_RCVD_CFG)) {
4883                                         tg3_setup_flow_control(tp, 0, 0);
4884                                         current_link_up = 1;
4885                                         tp->phy_flags |=
4886                                                 TG3_PHYFLG_PARALLEL_DETECT;
4887                                         tp->serdes_counter =
4888                                                 SERDES_PARALLEL_DET_TIMEOUT;
4889                                 } else
4890                                         goto restart_autoneg;
4891                         }
4892                 }
4893         } else {
4894                 tp->serdes_counter = SERDES_AN_TIMEOUT_5704S;
4895                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
4896         }
4897
4898 out:
4899         return current_link_up;
4900 }
4901
4902 static int tg3_setup_fiber_by_hand(struct tg3 *tp, u32 mac_status)
4903 {
4904         int current_link_up = 0;
4905
4906         if (!(mac_status & MAC_STATUS_PCS_SYNCED))
4907                 goto out;
4908
4909         if (tp->link_config.autoneg == AUTONEG_ENABLE) {
4910                 u32 txflags, rxflags;
4911                 int i;
4912
4913                 if (fiber_autoneg(tp, &txflags, &rxflags)) {
4914                         u32 local_adv = 0, remote_adv = 0;
4915
4916                         if (txflags & ANEG_CFG_PS1)
4917                                 local_adv |= ADVERTISE_1000XPAUSE;
4918                         if (txflags & ANEG_CFG_PS2)
4919                                 local_adv |= ADVERTISE_1000XPSE_ASYM;
4920
4921                         if (rxflags & MR_LP_ADV_SYM_PAUSE)
4922                                 remote_adv |= LPA_1000XPAUSE;
4923                         if (rxflags & MR_LP_ADV_ASYM_PAUSE)
4924                                 remote_adv |= LPA_1000XPAUSE_ASYM;
4925
4926                         tp->link_config.rmt_adv =
4927                                            mii_adv_to_ethtool_adv_x(remote_adv);
4928
4929                         tg3_setup_flow_control(tp, local_adv, remote_adv);
4930
4931                         current_link_up = 1;
4932                 }
4933                 for (i = 0; i < 30; i++) {
4934                         udelay(20);
4935                         tw32_f(MAC_STATUS,
4936                                (MAC_STATUS_SYNC_CHANGED |
4937                                 MAC_STATUS_CFG_CHANGED));
4938                         udelay(40);
4939                         if ((tr32(MAC_STATUS) &
4940                              (MAC_STATUS_SYNC_CHANGED |
4941                               MAC_STATUS_CFG_CHANGED)) == 0)
4942                                 break;
4943                 }
4944
4945                 mac_status = tr32(MAC_STATUS);
4946                 if (current_link_up == 0 &&
4947                     (mac_status & MAC_STATUS_PCS_SYNCED) &&
4948                     !(mac_status & MAC_STATUS_RCVD_CFG))
4949                         current_link_up = 1;
4950         } else {
4951                 tg3_setup_flow_control(tp, 0, 0);
4952
4953                 /* Forcing 1000FD link up. */
4954                 current_link_up = 1;
4955
4956                 tw32_f(MAC_MODE, (tp->mac_mode | MAC_MODE_SEND_CONFIGS));
4957                 udelay(40);
4958
4959                 tw32_f(MAC_MODE, tp->mac_mode);
4960                 udelay(40);
4961         }
4962
4963 out:
4964         return current_link_up;
4965 }
4966
4967 static int tg3_setup_fiber_phy(struct tg3 *tp, int force_reset)
4968 {
4969         u32 orig_pause_cfg;
4970         u16 orig_active_speed;
4971         u8 orig_active_duplex;
4972         u32 mac_status;
4973         int current_link_up;
4974         int i;
4975
4976         orig_pause_cfg = tp->link_config.active_flowctrl;
4977         orig_active_speed = tp->link_config.active_speed;
4978         orig_active_duplex = tp->link_config.active_duplex;
4979
4980         if (!tg3_flag(tp, HW_AUTONEG) &&
4981             netif_carrier_ok(tp->dev) &&
4982             tg3_flag(tp, INIT_COMPLETE)) {
4983                 mac_status = tr32(MAC_STATUS);
4984                 mac_status &= (MAC_STATUS_PCS_SYNCED |
4985                                MAC_STATUS_SIGNAL_DET |
4986                                MAC_STATUS_CFG_CHANGED |
4987                                MAC_STATUS_RCVD_CFG);
4988                 if (mac_status == (MAC_STATUS_PCS_SYNCED |
4989                                    MAC_STATUS_SIGNAL_DET)) {
4990                         tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
4991                                             MAC_STATUS_CFG_CHANGED));
4992                         return 0;
4993                 }
4994         }
4995
4996         tw32_f(MAC_TX_AUTO_NEG, 0);
4997
4998         tp->mac_mode &= ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
4999         tp->mac_mode |= MAC_MODE_PORT_MODE_TBI;
5000         tw32_f(MAC_MODE, tp->mac_mode);
5001         udelay(40);
5002
5003         if (tp->phy_id == TG3_PHY_ID_BCM8002)
5004                 tg3_init_bcm8002(tp);
5005
5006         /* Enable link change event even when serdes polling.  */
5007         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5008         udelay(40);
5009
5010         current_link_up = 0;
5011         tp->link_config.rmt_adv = 0;
5012         mac_status = tr32(MAC_STATUS);
5013
5014         if (tg3_flag(tp, HW_AUTONEG))
5015                 current_link_up = tg3_setup_fiber_hw_autoneg(tp, mac_status);
5016         else
5017                 current_link_up = tg3_setup_fiber_by_hand(tp, mac_status);
5018
5019         tp->napi[0].hw_status->status =
5020                 (SD_STATUS_UPDATED |
5021                  (tp->napi[0].hw_status->status & ~SD_STATUS_LINK_CHG));
5022
5023         for (i = 0; i < 100; i++) {
5024                 tw32_f(MAC_STATUS, (MAC_STATUS_SYNC_CHANGED |
5025                                     MAC_STATUS_CFG_CHANGED));
5026                 udelay(5);
5027                 if ((tr32(MAC_STATUS) & (MAC_STATUS_SYNC_CHANGED |
5028                                          MAC_STATUS_CFG_CHANGED |
5029                                          MAC_STATUS_LNKSTATE_CHANGED)) == 0)
5030                         break;
5031         }
5032
5033         mac_status = tr32(MAC_STATUS);
5034         if ((mac_status & MAC_STATUS_PCS_SYNCED) == 0) {
5035                 current_link_up = 0;
5036                 if (tp->link_config.autoneg == AUTONEG_ENABLE &&
5037                     tp->serdes_counter == 0) {
5038                         tw32_f(MAC_MODE, (tp->mac_mode |
5039                                           MAC_MODE_SEND_CONFIGS));
5040                         udelay(1);
5041                         tw32_f(MAC_MODE, tp->mac_mode);
5042                 }
5043         }
5044
5045         if (current_link_up == 1) {
5046                 tp->link_config.active_speed = SPEED_1000;
5047                 tp->link_config.active_duplex = DUPLEX_FULL;
5048                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5049                                     LED_CTRL_LNKLED_OVERRIDE |
5050                                     LED_CTRL_1000MBPS_ON));
5051         } else {
5052                 tp->link_config.active_speed = SPEED_UNKNOWN;
5053                 tp->link_config.active_duplex = DUPLEX_UNKNOWN;
5054                 tw32(MAC_LED_CTRL, (tp->led_ctrl |
5055                                     LED_CTRL_LNKLED_OVERRIDE |
5056                                     LED_CTRL_TRAFFIC_OVERRIDE));
5057         }
5058
5059         if (current_link_up != netif_carrier_ok(tp->dev)) {
5060                 if (current_link_up)
5061                         netif_carrier_on(tp->dev);
5062                 else
5063                         netif_carrier_off(tp->dev);
5064                 tg3_link_report(tp);
5065         } else {
5066                 u32 now_pause_cfg = tp->link_config.active_flowctrl;
5067                 if (orig_pause_cfg != now_pause_cfg ||
5068                     orig_active_speed != tp->link_config.active_speed ||
5069                     orig_active_duplex != tp->link_config.active_duplex)
5070                         tg3_link_report(tp);
5071         }
5072
5073         return 0;
5074 }
5075
5076 static int tg3_setup_fiber_mii_phy(struct tg3 *tp, int force_reset)
5077 {
5078         int current_link_up, err = 0;
5079         u32 bmsr, bmcr;
5080         u16 current_speed;
5081         u8 current_duplex;
5082         u32 local_adv, remote_adv;
5083
5084         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
5085         tw32_f(MAC_MODE, tp->mac_mode);
5086         udelay(40);
5087
5088         tw32(MAC_EVENT, 0);
5089
5090         tw32_f(MAC_STATUS,
5091              (MAC_STATUS_SYNC_CHANGED |
5092               MAC_STATUS_CFG_CHANGED |
5093               MAC_STATUS_MI_COMPLETION |
5094               MAC_STATUS_LNKSTATE_CHANGED));
5095         udelay(40);
5096
5097         if (force_reset)
5098                 tg3_phy_reset(tp);
5099
5100         current_link_up = 0;
5101         current_speed = SPEED_UNKNOWN;
5102         current_duplex = DUPLEX_UNKNOWN;
5103         tp->link_config.rmt_adv = 0;
5104
5105         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5106         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5107         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
5108                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5109                         bmsr |= BMSR_LSTATUS;
5110                 else
5111                         bmsr &= ~BMSR_LSTATUS;
5112         }
5113
5114         err |= tg3_readphy(tp, MII_BMCR, &bmcr);
5115
5116         if ((tp->link_config.autoneg == AUTONEG_ENABLE) && !force_reset &&
5117             (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5118                 /* do nothing, just check for link up at the end */
5119         } else if (tp->link_config.autoneg == AUTONEG_ENABLE) {
5120                 u32 adv, newadv;
5121
5122                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5123                 newadv = adv & ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF |
5124                                  ADVERTISE_1000XPAUSE |
5125                                  ADVERTISE_1000XPSE_ASYM |
5126                                  ADVERTISE_SLCT);
5127
5128                 newadv |= tg3_advert_flowctrl_1000X(tp->link_config.flowctrl);
5129                 newadv |= ethtool_adv_to_mii_adv_x(tp->link_config.advertising);
5130
5131                 if ((newadv != adv) || !(bmcr & BMCR_ANENABLE)) {
5132                         tg3_writephy(tp, MII_ADVERTISE, newadv);
5133                         bmcr |= BMCR_ANENABLE | BMCR_ANRESTART;
5134                         tg3_writephy(tp, MII_BMCR, bmcr);
5135
5136                         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5137                         tp->serdes_counter = SERDES_AN_TIMEOUT_5714S;
5138                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5139
5140                         return err;
5141                 }
5142         } else {
5143                 u32 new_bmcr;
5144
5145                 bmcr &= ~BMCR_SPEED1000;
5146                 new_bmcr = bmcr & ~(BMCR_ANENABLE | BMCR_FULLDPLX);
5147
5148                 if (tp->link_config.duplex == DUPLEX_FULL)
5149                         new_bmcr |= BMCR_FULLDPLX;
5150
5151                 if (new_bmcr != bmcr) {
5152                         /* BMCR_SPEED1000 is a reserved bit that needs
5153                          * to be set on write.
5154                          */
5155                         new_bmcr |= BMCR_SPEED1000;
5156
5157                         /* Force a linkdown */
5158                         if (netif_carrier_ok(tp->dev)) {
5159                                 u32 adv;
5160
5161                                 err |= tg3_readphy(tp, MII_ADVERTISE, &adv);
5162                                 adv &= ~(ADVERTISE_1000XFULL |
5163                                          ADVERTISE_1000XHALF |
5164                                          ADVERTISE_SLCT);
5165                                 tg3_writephy(tp, MII_ADVERTISE, adv);
5166                                 tg3_writephy(tp, MII_BMCR, bmcr |
5167                                                            BMCR_ANRESTART |
5168                                                            BMCR_ANENABLE);
5169                                 udelay(10);
5170                                 netif_carrier_off(tp->dev);
5171                         }
5172                         tg3_writephy(tp, MII_BMCR, new_bmcr);
5173                         bmcr = new_bmcr;
5174                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5175                         err |= tg3_readphy(tp, MII_BMSR, &bmsr);
5176                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
5177                             ASIC_REV_5714) {
5178                                 if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
5179                                         bmsr |= BMSR_LSTATUS;
5180                                 else
5181                                         bmsr &= ~BMSR_LSTATUS;
5182                         }
5183                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5184                 }
5185         }
5186
5187         if (bmsr & BMSR_LSTATUS) {
5188                 current_speed = SPEED_1000;
5189                 current_link_up = 1;
5190                 if (bmcr & BMCR_FULLDPLX)
5191                         current_duplex = DUPLEX_FULL;
5192                 else
5193                         current_duplex = DUPLEX_HALF;
5194
5195                 local_adv = 0;
5196                 remote_adv = 0;
5197
5198                 if (bmcr & BMCR_ANENABLE) {
5199                         u32 common;
5200
5201                         err |= tg3_readphy(tp, MII_ADVERTISE, &local_adv);
5202                         err |= tg3_readphy(tp, MII_LPA, &remote_adv);
5203                         common = local_adv & remote_adv;
5204                         if (common & (ADVERTISE_1000XHALF |
5205                                       ADVERTISE_1000XFULL)) {
5206                                 if (common & ADVERTISE_1000XFULL)
5207                                         current_duplex = DUPLEX_FULL;
5208                                 else
5209                                         current_duplex = DUPLEX_HALF;
5210
5211                                 tp->link_config.rmt_adv =
5212                                            mii_adv_to_ethtool_adv_x(remote_adv);
5213                         } else if (!tg3_flag(tp, 5780_CLASS)) {
5214                                 /* Link is up via parallel detect */
5215                         } else {
5216                                 current_link_up = 0;
5217                         }
5218                 }
5219         }
5220
5221         if (current_link_up == 1 && current_duplex == DUPLEX_FULL)
5222                 tg3_setup_flow_control(tp, local_adv, remote_adv);
5223
5224         tp->mac_mode &= ~MAC_MODE_HALF_DUPLEX;
5225         if (tp->link_config.active_duplex == DUPLEX_HALF)
5226                 tp->mac_mode |= MAC_MODE_HALF_DUPLEX;
5227
5228         tw32_f(MAC_MODE, tp->mac_mode);
5229         udelay(40);
5230
5231         tw32_f(MAC_EVENT, MAC_EVENT_LNKSTATE_CHANGED);
5232
5233         tp->link_config.active_speed = current_speed;
5234         tp->link_config.active_duplex = current_duplex;
5235
5236         if (current_link_up != netif_carrier_ok(tp->dev)) {
5237                 if (current_link_up)
5238                         netif_carrier_on(tp->dev);
5239                 else {
5240                         netif_carrier_off(tp->dev);
5241                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5242                 }
5243                 tg3_link_report(tp);
5244         }
5245         return err;
5246 }
5247
5248 static void tg3_serdes_parallel_detect(struct tg3 *tp)
5249 {
5250         if (tp->serdes_counter) {
5251                 /* Give autoneg time to complete. */
5252                 tp->serdes_counter--;
5253                 return;
5254         }
5255
5256         if (!netif_carrier_ok(tp->dev) &&
5257             (tp->link_config.autoneg == AUTONEG_ENABLE)) {
5258                 u32 bmcr;
5259
5260                 tg3_readphy(tp, MII_BMCR, &bmcr);
5261                 if (bmcr & BMCR_ANENABLE) {
5262                         u32 phy1, phy2;
5263
5264                         /* Select shadow register 0x1f */
5265                         tg3_writephy(tp, MII_TG3_MISC_SHDW, 0x7c00);
5266                         tg3_readphy(tp, MII_TG3_MISC_SHDW, &phy1);
5267
5268                         /* Select expansion interrupt status register */
5269                         tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5270                                          MII_TG3_DSP_EXP1_INT_STAT);
5271                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5272                         tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5273
5274                         if ((phy1 & 0x10) && !(phy2 & 0x20)) {
5275                                 /* We have signal detect and not receiving
5276                                  * config code words, link is up by parallel
5277                                  * detection.
5278                                  */
5279
5280                                 bmcr &= ~BMCR_ANENABLE;
5281                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5282                                 tg3_writephy(tp, MII_BMCR, bmcr);
5283                                 tp->phy_flags |= TG3_PHYFLG_PARALLEL_DETECT;
5284                         }
5285                 }
5286         } else if (netif_carrier_ok(tp->dev) &&
5287                    (tp->link_config.autoneg == AUTONEG_ENABLE) &&
5288                    (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT)) {
5289                 u32 phy2;
5290
5291                 /* Select expansion interrupt status register */
5292                 tg3_writephy(tp, MII_TG3_DSP_ADDRESS,
5293                                  MII_TG3_DSP_EXP1_INT_STAT);
5294                 tg3_readphy(tp, MII_TG3_DSP_RW_PORT, &phy2);
5295                 if (phy2 & 0x20) {
5296                         u32 bmcr;
5297
5298                         /* Config code words received, turn on autoneg. */
5299                         tg3_readphy(tp, MII_BMCR, &bmcr);
5300                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANENABLE);
5301
5302                         tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
5303
5304                 }
5305         }
5306 }
5307
5308 static int tg3_setup_phy(struct tg3 *tp, int force_reset)
5309 {
5310         u32 val;
5311         int err;
5312
5313         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
5314                 err = tg3_setup_fiber_phy(tp, force_reset);
5315         else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
5316                 err = tg3_setup_fiber_mii_phy(tp, force_reset);
5317         else
5318                 err = tg3_setup_copper_phy(tp, force_reset);
5319
5320         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
5321                 u32 scale;
5322
5323                 val = tr32(TG3_CPMU_CLCK_STAT) & CPMU_CLCK_STAT_MAC_CLCK_MASK;
5324                 if (val == CPMU_CLCK_STAT_MAC_CLCK_62_5)
5325                         scale = 65;
5326                 else if (val == CPMU_CLCK_STAT_MAC_CLCK_6_25)
5327                         scale = 6;
5328                 else
5329                         scale = 12;
5330
5331                 val = tr32(GRC_MISC_CFG) & ~GRC_MISC_CFG_PRESCALAR_MASK;
5332                 val |= (scale << GRC_MISC_CFG_PRESCALAR_SHIFT);
5333                 tw32(GRC_MISC_CFG, val);
5334         }
5335
5336         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
5337               (6 << TX_LENGTHS_IPG_SHIFT);
5338         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
5339                 val |= tr32(MAC_TX_LENGTHS) &
5340                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
5341                         TX_LENGTHS_CNT_DWN_VAL_MSK);
5342
5343         if (tp->link_config.active_speed == SPEED_1000 &&
5344             tp->link_config.active_duplex == DUPLEX_HALF)
5345                 tw32(MAC_TX_LENGTHS, val |
5346                      (0xff << TX_LENGTHS_SLOT_TIME_SHIFT));
5347         else
5348                 tw32(MAC_TX_LENGTHS, val |
5349                      (32 << TX_LENGTHS_SLOT_TIME_SHIFT));
5350
5351         if (!tg3_flag(tp, 5705_PLUS)) {
5352                 if (netif_carrier_ok(tp->dev)) {
5353                         tw32(HOSTCC_STAT_COAL_TICKS,
5354                              tp->coal.stats_block_coalesce_usecs);
5355                 } else {
5356                         tw32(HOSTCC_STAT_COAL_TICKS, 0);
5357                 }
5358         }
5359
5360         if (tg3_flag(tp, ASPM_WORKAROUND)) {
5361                 val = tr32(PCIE_PWR_MGMT_THRESH);
5362                 if (!netif_carrier_ok(tp->dev))
5363                         val = (val & ~PCIE_PWR_MGMT_L1_THRESH_MSK) |
5364                               tp->pwrmgmt_thresh;
5365                 else
5366                         val |= PCIE_PWR_MGMT_L1_THRESH_MSK;
5367                 tw32(PCIE_PWR_MGMT_THRESH, val);
5368         }
5369
5370         return err;
5371 }
5372
5373 static inline int tg3_irq_sync(struct tg3 *tp)
5374 {
5375         return tp->irq_sync;
5376 }
5377
5378 static inline void tg3_rd32_loop(struct tg3 *tp, u32 *dst, u32 off, u32 len)
5379 {
5380         int i;
5381
5382         dst = (u32 *)((u8 *)dst + off);
5383         for (i = 0; i < len; i += sizeof(u32))
5384                 *dst++ = tr32(off + i);
5385 }
5386
5387 static void tg3_dump_legacy_regs(struct tg3 *tp, u32 *regs)
5388 {
5389         tg3_rd32_loop(tp, regs, TG3PCI_VENDOR, 0xb0);
5390         tg3_rd32_loop(tp, regs, MAILBOX_INTERRUPT_0, 0x200);
5391         tg3_rd32_loop(tp, regs, MAC_MODE, 0x4f0);
5392         tg3_rd32_loop(tp, regs, SNDDATAI_MODE, 0xe0);
5393         tg3_rd32_loop(tp, regs, SNDDATAC_MODE, 0x04);
5394         tg3_rd32_loop(tp, regs, SNDBDS_MODE, 0x80);
5395         tg3_rd32_loop(tp, regs, SNDBDI_MODE, 0x48);
5396         tg3_rd32_loop(tp, regs, SNDBDC_MODE, 0x04);
5397         tg3_rd32_loop(tp, regs, RCVLPC_MODE, 0x20);
5398         tg3_rd32_loop(tp, regs, RCVLPC_SELLST_BASE, 0x15c);
5399         tg3_rd32_loop(tp, regs, RCVDBDI_MODE, 0x0c);
5400         tg3_rd32_loop(tp, regs, RCVDBDI_JUMBO_BD, 0x3c);
5401         tg3_rd32_loop(tp, regs, RCVDBDI_BD_PROD_IDX_0, 0x44);
5402         tg3_rd32_loop(tp, regs, RCVDCC_MODE, 0x04);
5403         tg3_rd32_loop(tp, regs, RCVBDI_MODE, 0x20);
5404         tg3_rd32_loop(tp, regs, RCVCC_MODE, 0x14);
5405         tg3_rd32_loop(tp, regs, RCVLSC_MODE, 0x08);
5406         tg3_rd32_loop(tp, regs, MBFREE_MODE, 0x08);
5407         tg3_rd32_loop(tp, regs, HOSTCC_MODE, 0x100);
5408
5409         if (tg3_flag(tp, SUPPORT_MSIX))
5410                 tg3_rd32_loop(tp, regs, HOSTCC_RXCOL_TICKS_VEC1, 0x180);
5411
5412         tg3_rd32_loop(tp, regs, MEMARB_MODE, 0x10);
5413         tg3_rd32_loop(tp, regs, BUFMGR_MODE, 0x58);
5414         tg3_rd32_loop(tp, regs, RDMAC_MODE, 0x08);
5415         tg3_rd32_loop(tp, regs, WDMAC_MODE, 0x08);
5416         tg3_rd32_loop(tp, regs, RX_CPU_MODE, 0x04);
5417         tg3_rd32_loop(tp, regs, RX_CPU_STATE, 0x04);
5418         tg3_rd32_loop(tp, regs, RX_CPU_PGMCTR, 0x04);
5419         tg3_rd32_loop(tp, regs, RX_CPU_HWBKPT, 0x04);
5420
5421         if (!tg3_flag(tp, 5705_PLUS)) {
5422                 tg3_rd32_loop(tp, regs, TX_CPU_MODE, 0x04);
5423                 tg3_rd32_loop(tp, regs, TX_CPU_STATE, 0x04);
5424                 tg3_rd32_loop(tp, regs, TX_CPU_PGMCTR, 0x04);
5425         }
5426
5427         tg3_rd32_loop(tp, regs, GRCMBOX_INTERRUPT_0, 0x110);
5428         tg3_rd32_loop(tp, regs, FTQ_RESET, 0x120);
5429         tg3_rd32_loop(tp, regs, MSGINT_MODE, 0x0c);
5430         tg3_rd32_loop(tp, regs, DMAC_MODE, 0x04);
5431         tg3_rd32_loop(tp, regs, GRC_MODE, 0x4c);
5432
5433         if (tg3_flag(tp, NVRAM))
5434                 tg3_rd32_loop(tp, regs, NVRAM_CMD, 0x24);
5435 }
5436
5437 static void tg3_dump_state(struct tg3 *tp)
5438 {
5439         int i;
5440         u32 *regs;
5441
5442         regs = kzalloc(TG3_REG_BLK_SIZE, GFP_ATOMIC);
5443         if (!regs) {
5444                 netdev_err(tp->dev, "Failed allocating register dump buffer\n");
5445                 return;
5446         }
5447
5448         if (tg3_flag(tp, PCI_EXPRESS)) {
5449                 /* Read up to but not including private PCI registers */
5450                 for (i = 0; i < TG3_PCIE_TLDLPL_PORT; i += sizeof(u32))
5451                         regs[i / sizeof(u32)] = tr32(i);
5452         } else
5453                 tg3_dump_legacy_regs(tp, regs);
5454
5455         for (i = 0; i < TG3_REG_BLK_SIZE / sizeof(u32); i += 4) {
5456                 if (!regs[i + 0] && !regs[i + 1] &&
5457                     !regs[i + 2] && !regs[i + 3])
5458                         continue;
5459
5460                 netdev_err(tp->dev, "0x%08x: 0x%08x, 0x%08x, 0x%08x, 0x%08x\n",
5461                            i * 4,
5462                            regs[i + 0], regs[i + 1], regs[i + 2], regs[i + 3]);
5463         }
5464
5465         kfree(regs);
5466
5467         for (i = 0; i < tp->irq_cnt; i++) {
5468                 struct tg3_napi *tnapi = &tp->napi[i];
5469
5470                 /* SW status block */
5471                 netdev_err(tp->dev,
5472                          "%d: Host status block [%08x:%08x:(%04x:%04x:%04x):(%04x:%04x)]\n",
5473                            i,
5474                            tnapi->hw_status->status,
5475                            tnapi->hw_status->status_tag,
5476                            tnapi->hw_status->rx_jumbo_consumer,
5477                            tnapi->hw_status->rx_consumer,
5478                            tnapi->hw_status->rx_mini_consumer,
5479                            tnapi->hw_status->idx[0].rx_producer,
5480                            tnapi->hw_status->idx[0].tx_consumer);
5481
5482                 netdev_err(tp->dev,
5483                 "%d: NAPI info [%08x:%08x:(%04x:%04x:%04x):%04x:(%04x:%04x:%04x:%04x)]\n",
5484                            i,
5485                            tnapi->last_tag, tnapi->last_irq_tag,
5486                            tnapi->tx_prod, tnapi->tx_cons, tnapi->tx_pending,
5487                            tnapi->rx_rcb_ptr,
5488                            tnapi->prodring.rx_std_prod_idx,
5489                            tnapi->prodring.rx_std_cons_idx,
5490                            tnapi->prodring.rx_jmb_prod_idx,
5491                            tnapi->prodring.rx_jmb_cons_idx);
5492         }
5493 }
5494
5495 /* This is called whenever we suspect that the system chipset is re-
5496  * ordering the sequence of MMIO to the tx send mailbox. The symptom
5497  * is bogus tx completions. We try to recover by setting the
5498  * TG3_FLAG_MBOX_WRITE_REORDER flag and resetting the chip later
5499  * in the workqueue.
5500  */
5501 static void tg3_tx_recover(struct tg3 *tp)
5502 {
5503         BUG_ON(tg3_flag(tp, MBOX_WRITE_REORDER) ||
5504                tp->write32_tx_mbox == tg3_write_indirect_mbox);
5505
5506         netdev_warn(tp->dev,
5507                     "The system may be re-ordering memory-mapped I/O "
5508                     "cycles to the network device, attempting to recover. "
5509                     "Please report the problem to the driver maintainer "
5510                     "and include system chipset information.\n");
5511
5512         spin_lock(&tp->lock);
5513         tg3_flag_set(tp, TX_RECOVERY_PENDING);
5514         spin_unlock(&tp->lock);
5515 }
5516
5517 static inline u32 tg3_tx_avail(struct tg3_napi *tnapi)
5518 {
5519         /* Tell compiler to fetch tx indices from memory. */
5520         barrier();
5521         return tnapi->tx_pending -
5522                ((tnapi->tx_prod - tnapi->tx_cons) & (TG3_TX_RING_SIZE - 1));
5523 }
5524
5525 /* Tigon3 never reports partial packet sends.  So we do not
5526  * need special logic to handle SKBs that have not had all
5527  * of their frags sent yet, like SunGEM does.
5528  */
5529 static void tg3_tx(struct tg3_napi *tnapi)
5530 {
5531         struct tg3 *tp = tnapi->tp;
5532         u32 hw_idx = tnapi->hw_status->idx[0].tx_consumer;
5533         u32 sw_idx = tnapi->tx_cons;
5534         struct netdev_queue *txq;
5535         int index = tnapi - tp->napi;
5536         unsigned int pkts_compl = 0, bytes_compl = 0;
5537
5538         if (tg3_flag(tp, ENABLE_TSS))
5539                 index--;
5540
5541         txq = netdev_get_tx_queue(tp->dev, index);
5542
5543         while (sw_idx != hw_idx) {
5544                 struct tg3_tx_ring_info *ri = &tnapi->tx_buffers[sw_idx];
5545                 struct sk_buff *skb = ri->skb;
5546                 int i, tx_bug = 0;
5547
5548                 if (unlikely(skb == NULL)) {
5549                         tg3_tx_recover(tp);
5550                         return;
5551                 }
5552
5553                 pci_unmap_single(tp->pdev,
5554                                  dma_unmap_addr(ri, mapping),
5555                                  skb_headlen(skb),
5556                                  PCI_DMA_TODEVICE);
5557
5558                 ri->skb = NULL;
5559
5560                 while (ri->fragmented) {
5561                         ri->fragmented = false;
5562                         sw_idx = NEXT_TX(sw_idx);
5563                         ri = &tnapi->tx_buffers[sw_idx];
5564                 }
5565
5566                 sw_idx = NEXT_TX(sw_idx);
5567
5568                 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
5569                         ri = &tnapi->tx_buffers[sw_idx];
5570                         if (unlikely(ri->skb != NULL || sw_idx == hw_idx))
5571                                 tx_bug = 1;
5572
5573                         pci_unmap_page(tp->pdev,
5574                                        dma_unmap_addr(ri, mapping),
5575                                        skb_frag_size(&skb_shinfo(skb)->frags[i]),
5576                                        PCI_DMA_TODEVICE);
5577
5578                         while (ri->fragmented) {
5579                                 ri->fragmented = false;
5580                                 sw_idx = NEXT_TX(sw_idx);
5581                                 ri = &tnapi->tx_buffers[sw_idx];
5582                         }
5583
5584                         sw_idx = NEXT_TX(sw_idx);
5585                 }
5586
5587                 pkts_compl++;
5588                 bytes_compl += skb->len;
5589
5590                 dev_kfree_skb(skb);
5591
5592                 if (unlikely(tx_bug)) {
5593                         tg3_tx_recover(tp);
5594                         return;
5595                 }
5596         }
5597
5598         netdev_tx_completed_queue(txq, pkts_compl, bytes_compl);
5599
5600         tnapi->tx_cons = sw_idx;
5601
5602         /* Need to make the tx_cons update visible to tg3_start_xmit()
5603          * before checking for netif_queue_stopped().  Without the
5604          * memory barrier, there is a small possibility that tg3_start_xmit()
5605          * will miss it and cause the queue to be stopped forever.
5606          */
5607         smp_mb();
5608
5609         if (unlikely(netif_tx_queue_stopped(txq) &&
5610                      (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))) {
5611                 __netif_tx_lock(txq, smp_processor_id());
5612                 if (netif_tx_queue_stopped(txq) &&
5613                     (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi)))
5614                         netif_tx_wake_queue(txq);
5615                 __netif_tx_unlock(txq);
5616         }
5617 }
5618
5619 static void tg3_rx_data_free(struct tg3 *tp, struct ring_info *ri, u32 map_sz)
5620 {
5621         if (!ri->data)
5622                 return;
5623
5624         pci_unmap_single(tp->pdev, dma_unmap_addr(ri, mapping),
5625                          map_sz, PCI_DMA_FROMDEVICE);
5626         kfree(ri->data);
5627         ri->data = NULL;
5628 }
5629
5630 /* Returns size of skb allocated or < 0 on error.
5631  *
5632  * We only need to fill in the address because the other members
5633  * of the RX descriptor are invariant, see tg3_init_rings.
5634  *
5635  * Note the purposeful assymetry of cpu vs. chip accesses.  For
5636  * posting buffers we only dirty the first cache line of the RX
5637  * descriptor (containing the address).  Whereas for the RX status
5638  * buffers the cpu only reads the last cacheline of the RX descriptor
5639  * (to fetch the error flags, vlan tag, checksum, and opaque cookie).
5640  */
5641 static int tg3_alloc_rx_data(struct tg3 *tp, struct tg3_rx_prodring_set *tpr,
5642                             u32 opaque_key, u32 dest_idx_unmasked)
5643 {
5644         struct tg3_rx_buffer_desc *desc;
5645         struct ring_info *map;
5646         u8 *data;
5647         dma_addr_t mapping;
5648         int skb_size, data_size, dest_idx;
5649
5650         switch (opaque_key) {
5651         case RXD_OPAQUE_RING_STD:
5652                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5653                 desc = &tpr->rx_std[dest_idx];
5654                 map = &tpr->rx_std_buffers[dest_idx];
5655                 data_size = tp->rx_pkt_map_sz;
5656                 break;
5657
5658         case RXD_OPAQUE_RING_JUMBO:
5659                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5660                 desc = &tpr->rx_jmb[dest_idx].std;
5661                 map = &tpr->rx_jmb_buffers[dest_idx];
5662                 data_size = TG3_RX_JMB_MAP_SZ;
5663                 break;
5664
5665         default:
5666                 return -EINVAL;
5667         }
5668
5669         /* Do not overwrite any of the map or rp information
5670          * until we are sure we can commit to a new buffer.
5671          *
5672          * Callers depend upon this behavior and assume that
5673          * we leave everything unchanged if we fail.
5674          */
5675         skb_size = SKB_DATA_ALIGN(data_size + TG3_RX_OFFSET(tp)) +
5676                    SKB_DATA_ALIGN(sizeof(struct skb_shared_info));
5677         data = kmalloc(skb_size, GFP_ATOMIC);
5678         if (!data)
5679                 return -ENOMEM;
5680
5681         mapping = pci_map_single(tp->pdev,
5682                                  data + TG3_RX_OFFSET(tp),
5683                                  data_size,
5684                                  PCI_DMA_FROMDEVICE);
5685         if (pci_dma_mapping_error(tp->pdev, mapping)) {
5686                 kfree(data);
5687                 return -EIO;
5688         }
5689
5690         map->data = data;
5691         dma_unmap_addr_set(map, mapping, mapping);
5692
5693         desc->addr_hi = ((u64)mapping >> 32);
5694         desc->addr_lo = ((u64)mapping & 0xffffffff);
5695
5696         return data_size;
5697 }
5698
5699 /* We only need to move over in the address because the other
5700  * members of the RX descriptor are invariant.  See notes above
5701  * tg3_alloc_rx_data for full details.
5702  */
5703 static void tg3_recycle_rx(struct tg3_napi *tnapi,
5704                            struct tg3_rx_prodring_set *dpr,
5705                            u32 opaque_key, int src_idx,
5706                            u32 dest_idx_unmasked)
5707 {
5708         struct tg3 *tp = tnapi->tp;
5709         struct tg3_rx_buffer_desc *src_desc, *dest_desc;
5710         struct ring_info *src_map, *dest_map;
5711         struct tg3_rx_prodring_set *spr = &tp->napi[0].prodring;
5712         int dest_idx;
5713
5714         switch (opaque_key) {
5715         case RXD_OPAQUE_RING_STD:
5716                 dest_idx = dest_idx_unmasked & tp->rx_std_ring_mask;
5717                 dest_desc = &dpr->rx_std[dest_idx];
5718                 dest_map = &dpr->rx_std_buffers[dest_idx];
5719                 src_desc = &spr->rx_std[src_idx];
5720                 src_map = &spr->rx_std_buffers[src_idx];
5721                 break;
5722
5723         case RXD_OPAQUE_RING_JUMBO:
5724                 dest_idx = dest_idx_unmasked & tp->rx_jmb_ring_mask;
5725                 dest_desc = &dpr->rx_jmb[dest_idx].std;
5726                 dest_map = &dpr->rx_jmb_buffers[dest_idx];
5727                 src_desc = &spr->rx_jmb[src_idx].std;
5728                 src_map = &spr->rx_jmb_buffers[src_idx];
5729                 break;
5730
5731         default:
5732                 return;
5733         }
5734
5735         dest_map->data = src_map->data;
5736         dma_unmap_addr_set(dest_map, mapping,
5737                            dma_unmap_addr(src_map, mapping));
5738         dest_desc->addr_hi = src_desc->addr_hi;
5739         dest_desc->addr_lo = src_desc->addr_lo;
5740
5741         /* Ensure that the update to the skb happens after the physical
5742          * addresses have been transferred to the new BD location.
5743          */
5744         smp_wmb();
5745
5746         src_map->data = NULL;
5747 }
5748
5749 /* The RX ring scheme is composed of multiple rings which post fresh
5750  * buffers to the chip, and one special ring the chip uses to report
5751  * status back to the host.
5752  *
5753  * The special ring reports the status of received packets to the
5754  * host.  The chip does not write into the original descriptor the
5755  * RX buffer was obtained from.  The chip simply takes the original
5756  * descriptor as provided by the host, updates the status and length
5757  * field, then writes this into the next status ring entry.
5758  *
5759  * Each ring the host uses to post buffers to the chip is described
5760  * by a TG3_BDINFO entry in the chips SRAM area.  When a packet arrives,
5761  * it is first placed into the on-chip ram.  When the packet's length
5762  * is known, it walks down the TG3_BDINFO entries to select the ring.
5763  * Each TG3_BDINFO specifies a MAXLEN field and the first TG3_BDINFO
5764  * which is within the range of the new packet's length is chosen.
5765  *
5766  * The "separate ring for rx status" scheme may sound queer, but it makes
5767  * sense from a cache coherency perspective.  If only the host writes
5768  * to the buffer post rings, and only the chip writes to the rx status
5769  * rings, then cache lines never move beyond shared-modified state.
5770  * If both the host and chip were to write into the same ring, cache line
5771  * eviction could occur since both entities want it in an exclusive state.
5772  */
5773 static int tg3_rx(struct tg3_napi *tnapi, int budget)
5774 {
5775         struct tg3 *tp = tnapi->tp;
5776         u32 work_mask, rx_std_posted = 0;
5777         u32 std_prod_idx, jmb_prod_idx;
5778         u32 sw_idx = tnapi->rx_rcb_ptr;
5779         u16 hw_idx;
5780         int received;
5781         struct tg3_rx_prodring_set *tpr = &tnapi->prodring;
5782
5783         hw_idx = *(tnapi->rx_rcb_prod_idx);
5784         /*
5785          * We need to order the read of hw_idx and the read of
5786          * the opaque cookie.
5787          */
5788         rmb();
5789         work_mask = 0;
5790         received = 0;
5791         std_prod_idx = tpr->rx_std_prod_idx;
5792         jmb_prod_idx = tpr->rx_jmb_prod_idx;
5793         while (sw_idx != hw_idx && budget > 0) {
5794                 struct ring_info *ri;
5795                 struct tg3_rx_buffer_desc *desc = &tnapi->rx_rcb[sw_idx];
5796                 unsigned int len;
5797                 struct sk_buff *skb;
5798                 dma_addr_t dma_addr;
5799                 u32 opaque_key, desc_idx, *post_ptr;
5800                 u8 *data;
5801
5802                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
5803                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
5804                 if (opaque_key == RXD_OPAQUE_RING_STD) {
5805                         ri = &tp->napi[0].prodring.rx_std_buffers[desc_idx];
5806                         dma_addr = dma_unmap_addr(ri, mapping);
5807                         data = ri->data;
5808                         post_ptr = &std_prod_idx;
5809                         rx_std_posted++;
5810                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
5811                         ri = &tp->napi[0].prodring.rx_jmb_buffers[desc_idx];
5812                         dma_addr = dma_unmap_addr(ri, mapping);
5813                         data = ri->data;
5814                         post_ptr = &jmb_prod_idx;
5815                 } else
5816                         goto next_pkt_nopost;
5817
5818                 work_mask |= opaque_key;
5819
5820                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
5821                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII)) {
5822                 drop_it:
5823                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5824                                        desc_idx, *post_ptr);
5825                 drop_it_no_recycle:
5826                         /* Other statistics kept track of by card. */
5827                         tp->rx_dropped++;
5828                         goto next_pkt;
5829                 }
5830
5831                 prefetch(data + TG3_RX_OFFSET(tp));
5832                 len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT) -
5833                       ETH_FCS_LEN;
5834
5835                 if (len > TG3_RX_COPY_THRESH(tp)) {
5836                         int skb_size;
5837
5838                         skb_size = tg3_alloc_rx_data(tp, tpr, opaque_key,
5839                                                     *post_ptr);
5840                         if (skb_size < 0)
5841                                 goto drop_it;
5842
5843                         pci_unmap_single(tp->pdev, dma_addr, skb_size,
5844                                          PCI_DMA_FROMDEVICE);
5845
5846                         skb = build_skb(data);
5847                         if (!skb) {
5848                                 kfree(data);
5849                                 goto drop_it_no_recycle;
5850                         }
5851                         skb_reserve(skb, TG3_RX_OFFSET(tp));
5852                         /* Ensure that the update to the data happens
5853                          * after the usage of the old DMA mapping.
5854                          */
5855                         smp_wmb();
5856
5857                         ri->data = NULL;
5858
5859                 } else {
5860                         tg3_recycle_rx(tnapi, tpr, opaque_key,
5861                                        desc_idx, *post_ptr);
5862
5863                         skb = netdev_alloc_skb(tp->dev,
5864                                                len + TG3_RAW_IP_ALIGN);
5865                         if (skb == NULL)
5866                                 goto drop_it_no_recycle;
5867
5868                         skb_reserve(skb, TG3_RAW_IP_ALIGN);
5869                         pci_dma_sync_single_for_cpu(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5870                         memcpy(skb->data,
5871                                data + TG3_RX_OFFSET(tp),
5872                                len);
5873                         pci_dma_sync_single_for_device(tp->pdev, dma_addr, len, PCI_DMA_FROMDEVICE);
5874                 }
5875
5876                 skb_put(skb, len);
5877                 if ((tp->dev->features & NETIF_F_RXCSUM) &&
5878                     (desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
5879                     (((desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
5880                       >> RXD_TCPCSUM_SHIFT) == 0xffff))
5881                         skb->ip_summed = CHECKSUM_UNNECESSARY;
5882                 else
5883                         skb_checksum_none_assert(skb);
5884
5885                 skb->protocol = eth_type_trans(skb, tp->dev);
5886
5887                 if (len > (tp->dev->mtu + ETH_HLEN) &&
5888                     skb->protocol != htons(ETH_P_8021Q)) {
5889                         dev_kfree_skb(skb);
5890                         goto drop_it_no_recycle;
5891                 }
5892
5893                 if (desc->type_flags & RXD_FLAG_VLAN &&
5894                     !(tp->rx_mode & RX_MODE_KEEP_VLAN_TAG))
5895                         __vlan_hwaccel_put_tag(skb,
5896                                                desc->err_vlan & RXD_VLAN_MASK);
5897
5898                 napi_gro_receive(&tnapi->napi, skb);
5899
5900                 received++;
5901                 budget--;
5902
5903 next_pkt:
5904                 (*post_ptr)++;
5905
5906                 if (unlikely(rx_std_posted >= tp->rx_std_max_post)) {
5907                         tpr->rx_std_prod_idx = std_prod_idx &
5908                                                tp->rx_std_ring_mask;
5909                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5910                                      tpr->rx_std_prod_idx);
5911                         work_mask &= ~RXD_OPAQUE_RING_STD;
5912                         rx_std_posted = 0;
5913                 }
5914 next_pkt_nopost:
5915                 sw_idx++;
5916                 sw_idx &= tp->rx_ret_ring_mask;
5917
5918                 /* Refresh hw_idx to see if there is new work */
5919                 if (sw_idx == hw_idx) {
5920                         hw_idx = *(tnapi->rx_rcb_prod_idx);
5921                         rmb();
5922                 }
5923         }
5924
5925         /* ACK the status ring. */
5926         tnapi->rx_rcb_ptr = sw_idx;
5927         tw32_rx_mbox(tnapi->consmbox, sw_idx);
5928
5929         /* Refill RX ring(s). */
5930         if (!tg3_flag(tp, ENABLE_RSS)) {
5931                 /* Sync BD data before updating mailbox */
5932                 wmb();
5933
5934                 if (work_mask & RXD_OPAQUE_RING_STD) {
5935                         tpr->rx_std_prod_idx = std_prod_idx &
5936                                                tp->rx_std_ring_mask;
5937                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
5938                                      tpr->rx_std_prod_idx);
5939                 }
5940                 if (work_mask & RXD_OPAQUE_RING_JUMBO) {
5941                         tpr->rx_jmb_prod_idx = jmb_prod_idx &
5942                                                tp->rx_jmb_ring_mask;
5943                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
5944                                      tpr->rx_jmb_prod_idx);
5945                 }
5946                 mmiowb();
5947         } else if (work_mask) {
5948                 /* rx_std_buffers[] and rx_jmb_buffers[] entries must be
5949                  * updated before the producer indices can be updated.
5950                  */
5951                 smp_wmb();
5952
5953                 tpr->rx_std_prod_idx = std_prod_idx & tp->rx_std_ring_mask;
5954                 tpr->rx_jmb_prod_idx = jmb_prod_idx & tp->rx_jmb_ring_mask;
5955
5956                 if (tnapi != &tp->napi[1])
5957                         napi_schedule(&tp->napi[1].napi);
5958         }
5959
5960         return received;
5961 }
5962
5963 static void tg3_poll_link(struct tg3 *tp)
5964 {
5965         /* handle link change and other phy events */
5966         if (!(tg3_flag(tp, USE_LINKCHG_REG) || tg3_flag(tp, POLL_SERDES))) {
5967                 struct tg3_hw_status *sblk = tp->napi[0].hw_status;
5968
5969                 if (sblk->status & SD_STATUS_LINK_CHG) {
5970                         sblk->status = SD_STATUS_UPDATED |
5971                                        (sblk->status & ~SD_STATUS_LINK_CHG);
5972                         spin_lock(&tp->lock);
5973                         if (tg3_flag(tp, USE_PHYLIB)) {
5974                                 tw32_f(MAC_STATUS,
5975                                      (MAC_STATUS_SYNC_CHANGED |
5976                                       MAC_STATUS_CFG_CHANGED |
5977                                       MAC_STATUS_MI_COMPLETION |
5978                                       MAC_STATUS_LNKSTATE_CHANGED));
5979                                 udelay(40);
5980                         } else
5981                                 tg3_setup_phy(tp, 0);
5982                         spin_unlock(&tp->lock);
5983                 }
5984         }
5985 }
5986
5987 static int tg3_rx_prodring_xfer(struct tg3 *tp,
5988                                 struct tg3_rx_prodring_set *dpr,
5989                                 struct tg3_rx_prodring_set *spr)
5990 {
5991         u32 si, di, cpycnt, src_prod_idx;
5992         int i, err = 0;
5993
5994         while (1) {
5995                 src_prod_idx = spr->rx_std_prod_idx;
5996
5997                 /* Make sure updates to the rx_std_buffers[] entries and the
5998                  * standard producer index are seen in the correct order.
5999                  */
6000                 smp_rmb();
6001
6002                 if (spr->rx_std_cons_idx == src_prod_idx)
6003                         break;
6004
6005                 if (spr->rx_std_cons_idx < src_prod_idx)
6006                         cpycnt = src_prod_idx - spr->rx_std_cons_idx;
6007                 else
6008                         cpycnt = tp->rx_std_ring_mask + 1 -
6009                                  spr->rx_std_cons_idx;
6010
6011                 cpycnt = min(cpycnt,
6012                              tp->rx_std_ring_mask + 1 - dpr->rx_std_prod_idx);
6013
6014                 si = spr->rx_std_cons_idx;
6015                 di = dpr->rx_std_prod_idx;
6016
6017                 for (i = di; i < di + cpycnt; i++) {
6018                         if (dpr->rx_std_buffers[i].data) {
6019                                 cpycnt = i - di;
6020                                 err = -ENOSPC;
6021                                 break;
6022                         }
6023                 }
6024
6025                 if (!cpycnt)
6026                         break;
6027
6028                 /* Ensure that updates to the rx_std_buffers ring and the
6029                  * shadowed hardware producer ring from tg3_recycle_skb() are
6030                  * ordered correctly WRT the skb check above.
6031                  */
6032                 smp_rmb();
6033
6034                 memcpy(&dpr->rx_std_buffers[di],
6035                        &spr->rx_std_buffers[si],
6036                        cpycnt * sizeof(struct ring_info));
6037
6038                 for (i = 0; i < cpycnt; i++, di++, si++) {
6039                         struct tg3_rx_buffer_desc *sbd, *dbd;
6040                         sbd = &spr->rx_std[si];
6041                         dbd = &dpr->rx_std[di];
6042                         dbd->addr_hi = sbd->addr_hi;
6043                         dbd->addr_lo = sbd->addr_lo;
6044                 }
6045
6046                 spr->rx_std_cons_idx = (spr->rx_std_cons_idx + cpycnt) &
6047                                        tp->rx_std_ring_mask;
6048                 dpr->rx_std_prod_idx = (dpr->rx_std_prod_idx + cpycnt) &
6049                                        tp->rx_std_ring_mask;
6050         }
6051
6052         while (1) {
6053                 src_prod_idx = spr->rx_jmb_prod_idx;
6054
6055                 /* Make sure updates to the rx_jmb_buffers[] entries and
6056                  * the jumbo producer index are seen in the correct order.
6057                  */
6058                 smp_rmb();
6059
6060                 if (spr->rx_jmb_cons_idx == src_prod_idx)
6061                         break;
6062
6063                 if (spr->rx_jmb_cons_idx < src_prod_idx)
6064                         cpycnt = src_prod_idx - spr->rx_jmb_cons_idx;
6065                 else
6066                         cpycnt = tp->rx_jmb_ring_mask + 1 -
6067                                  spr->rx_jmb_cons_idx;
6068
6069                 cpycnt = min(cpycnt,
6070                              tp->rx_jmb_ring_mask + 1 - dpr->rx_jmb_prod_idx);
6071
6072                 si = spr->rx_jmb_cons_idx;
6073                 di = dpr->rx_jmb_prod_idx;
6074
6075                 for (i = di; i < di + cpycnt; i++) {
6076                         if (dpr->rx_jmb_buffers[i].data) {
6077                                 cpycnt = i - di;
6078                                 err = -ENOSPC;
6079                                 break;
6080                         }
6081                 }
6082
6083                 if (!cpycnt)
6084                         break;
6085
6086                 /* Ensure that updates to the rx_jmb_buffers ring and the
6087                  * shadowed hardware producer ring from tg3_recycle_skb() are
6088                  * ordered correctly WRT the skb check above.
6089                  */
6090                 smp_rmb();
6091
6092                 memcpy(&dpr->rx_jmb_buffers[di],
6093                        &spr->rx_jmb_buffers[si],
6094                        cpycnt * sizeof(struct ring_info));
6095
6096                 for (i = 0; i < cpycnt; i++, di++, si++) {
6097                         struct tg3_rx_buffer_desc *sbd, *dbd;
6098                         sbd = &spr->rx_jmb[si].std;
6099                         dbd = &dpr->rx_jmb[di].std;
6100                         dbd->addr_hi = sbd->addr_hi;
6101                         dbd->addr_lo = sbd->addr_lo;
6102                 }
6103
6104                 spr->rx_jmb_cons_idx = (spr->rx_jmb_cons_idx + cpycnt) &
6105                                        tp->rx_jmb_ring_mask;
6106                 dpr->rx_jmb_prod_idx = (dpr->rx_jmb_prod_idx + cpycnt) &
6107                                        tp->rx_jmb_ring_mask;
6108         }
6109
6110         return err;
6111 }
6112
6113 static int tg3_poll_work(struct tg3_napi *tnapi, int work_done, int budget)
6114 {
6115         struct tg3 *tp = tnapi->tp;
6116
6117         /* run TX completion thread */
6118         if (tnapi->hw_status->idx[0].tx_consumer != tnapi->tx_cons) {
6119                 tg3_tx(tnapi);
6120                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6121                         return work_done;
6122         }
6123
6124         /* run RX thread, within the bounds set by NAPI.
6125          * All RX "locking" is done by ensuring outside
6126          * code synchronizes with tg3->napi.poll()
6127          */
6128         if (*(tnapi->rx_rcb_prod_idx) != tnapi->rx_rcb_ptr)
6129                 work_done += tg3_rx(tnapi, budget - work_done);
6130
6131         if (tg3_flag(tp, ENABLE_RSS) && tnapi == &tp->napi[1]) {
6132                 struct tg3_rx_prodring_set *dpr = &tp->napi[0].prodring;
6133                 int i, err = 0;
6134                 u32 std_prod_idx = dpr->rx_std_prod_idx;
6135                 u32 jmb_prod_idx = dpr->rx_jmb_prod_idx;
6136
6137                 for (i = 1; i < tp->irq_cnt; i++)
6138                         err |= tg3_rx_prodring_xfer(tp, dpr,
6139                                                     &tp->napi[i].prodring);
6140
6141                 wmb();
6142
6143                 if (std_prod_idx != dpr->rx_std_prod_idx)
6144                         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG,
6145                                      dpr->rx_std_prod_idx);
6146
6147                 if (jmb_prod_idx != dpr->rx_jmb_prod_idx)
6148                         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG,
6149                                      dpr->rx_jmb_prod_idx);
6150
6151                 mmiowb();
6152
6153                 if (err)
6154                         tw32_f(HOSTCC_MODE, tp->coal_now);
6155         }
6156
6157         return work_done;
6158 }
6159
6160 static inline void tg3_reset_task_schedule(struct tg3 *tp)
6161 {
6162         if (!test_and_set_bit(TG3_FLAG_RESET_TASK_PENDING, tp->tg3_flags))
6163                 schedule_work(&tp->reset_task);
6164 }
6165
6166 static inline void tg3_reset_task_cancel(struct tg3 *tp)
6167 {
6168         cancel_work_sync(&tp->reset_task);
6169         tg3_flag_clear(tp, RESET_TASK_PENDING);
6170         tg3_flag_clear(tp, TX_RECOVERY_PENDING);
6171 }
6172
6173 static int tg3_poll_msix(struct napi_struct *napi, int budget)
6174 {
6175         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6176         struct tg3 *tp = tnapi->tp;
6177         int work_done = 0;
6178         struct tg3_hw_status *sblk = tnapi->hw_status;
6179
6180         while (1) {
6181                 work_done = tg3_poll_work(tnapi, work_done, budget);
6182
6183                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6184                         goto tx_recovery;
6185
6186                 if (unlikely(work_done >= budget))
6187                         break;
6188
6189                 /* tp->last_tag is used in tg3_int_reenable() below
6190                  * to tell the hw how much work has been processed,
6191                  * so we must read it before checking for more work.
6192                  */
6193                 tnapi->last_tag = sblk->status_tag;
6194                 tnapi->last_irq_tag = tnapi->last_tag;
6195                 rmb();
6196
6197                 /* check for RX/TX work to do */
6198                 if (likely(sblk->idx[0].tx_consumer == tnapi->tx_cons &&
6199                            *(tnapi->rx_rcb_prod_idx) == tnapi->rx_rcb_ptr)) {
6200                         napi_complete(napi);
6201                         /* Reenable interrupts. */
6202                         tw32_mailbox(tnapi->int_mbox, tnapi->last_tag << 24);
6203                         mmiowb();
6204                         break;
6205                 }
6206         }
6207
6208         return work_done;
6209
6210 tx_recovery:
6211         /* work_done is guaranteed to be less than budget. */
6212         napi_complete(napi);
6213         tg3_reset_task_schedule(tp);
6214         return work_done;
6215 }
6216
6217 static void tg3_process_error(struct tg3 *tp)
6218 {
6219         u32 val;
6220         bool real_error = false;
6221
6222         if (tg3_flag(tp, ERROR_PROCESSED))
6223                 return;
6224
6225         /* Check Flow Attention register */
6226         val = tr32(HOSTCC_FLOW_ATTN);
6227         if (val & ~HOSTCC_FLOW_ATTN_MBUF_LWM) {
6228                 netdev_err(tp->dev, "FLOW Attention error.  Resetting chip.\n");
6229                 real_error = true;
6230         }
6231
6232         if (tr32(MSGINT_STATUS) & ~MSGINT_STATUS_MSI_REQ) {
6233                 netdev_err(tp->dev, "MSI Status error.  Resetting chip.\n");
6234                 real_error = true;
6235         }
6236
6237         if (tr32(RDMAC_STATUS) || tr32(WDMAC_STATUS)) {
6238                 netdev_err(tp->dev, "DMA Status error.  Resetting chip.\n");
6239                 real_error = true;
6240         }
6241
6242         if (!real_error)
6243                 return;
6244
6245         tg3_dump_state(tp);
6246
6247         tg3_flag_set(tp, ERROR_PROCESSED);
6248         tg3_reset_task_schedule(tp);
6249 }
6250
6251 static int tg3_poll(struct napi_struct *napi, int budget)
6252 {
6253         struct tg3_napi *tnapi = container_of(napi, struct tg3_napi, napi);
6254         struct tg3 *tp = tnapi->tp;
6255         int work_done = 0;
6256         struct tg3_hw_status *sblk = tnapi->hw_status;
6257
6258         while (1) {
6259                 if (sblk->status & SD_STATUS_ERROR)
6260                         tg3_process_error(tp);
6261
6262                 tg3_poll_link(tp);
6263
6264                 work_done = tg3_poll_work(tnapi, work_done, budget);
6265
6266                 if (unlikely(tg3_flag(tp, TX_RECOVERY_PENDING)))
6267                         goto tx_recovery;
6268
6269                 if (unlikely(work_done >= budget))
6270                         break;
6271
6272                 if (tg3_flag(tp, TAGGED_STATUS)) {
6273                         /* tp->last_tag is used in tg3_int_reenable() below
6274                          * to tell the hw how much work has been processed,
6275                          * so we must read it before checking for more work.
6276                          */
6277                         tnapi->last_tag = sblk->status_tag;
6278                         tnapi->last_irq_tag = tnapi->last_tag;
6279                         rmb();
6280                 } else
6281                         sblk->status &= ~SD_STATUS_UPDATED;
6282
6283                 if (likely(!tg3_has_work(tnapi))) {
6284                         napi_complete(napi);
6285                         tg3_int_reenable(tnapi);
6286                         break;
6287                 }
6288         }
6289
6290         return work_done;
6291
6292 tx_recovery:
6293         /* work_done is guaranteed to be less than budget. */
6294         napi_complete(napi);
6295         tg3_reset_task_schedule(tp);
6296         return work_done;
6297 }
6298
6299 static void tg3_napi_disable(struct tg3 *tp)
6300 {
6301         int i;
6302
6303         for (i = tp->irq_cnt - 1; i >= 0; i--)
6304                 napi_disable(&tp->napi[i].napi);
6305 }
6306
6307 static void tg3_napi_enable(struct tg3 *tp)
6308 {
6309         int i;
6310
6311         for (i = 0; i < tp->irq_cnt; i++)
6312                 napi_enable(&tp->napi[i].napi);
6313 }
6314
6315 static void tg3_napi_init(struct tg3 *tp)
6316 {
6317         int i;
6318
6319         netif_napi_add(tp->dev, &tp->napi[0].napi, tg3_poll, 64);
6320         for (i = 1; i < tp->irq_cnt; i++)
6321                 netif_napi_add(tp->dev, &tp->napi[i].napi, tg3_poll_msix, 64);
6322 }
6323
6324 static void tg3_napi_fini(struct tg3 *tp)
6325 {
6326         int i;
6327
6328         for (i = 0; i < tp->irq_cnt; i++)
6329                 netif_napi_del(&tp->napi[i].napi);
6330 }
6331
6332 static inline void tg3_netif_stop(struct tg3 *tp)
6333 {
6334         tp->dev->trans_start = jiffies; /* prevent tx timeout */
6335         tg3_napi_disable(tp);
6336         netif_tx_disable(tp->dev);
6337 }
6338
6339 static inline void tg3_netif_start(struct tg3 *tp)
6340 {
6341         /* NOTE: unconditional netif_tx_wake_all_queues is only
6342          * appropriate so long as all callers are assured to
6343          * have free tx slots (such as after tg3_init_hw)
6344          */
6345         netif_tx_wake_all_queues(tp->dev);
6346
6347         tg3_napi_enable(tp);
6348         tp->napi[0].hw_status->status |= SD_STATUS_UPDATED;
6349         tg3_enable_ints(tp);
6350 }
6351
6352 static void tg3_irq_quiesce(struct tg3 *tp)
6353 {
6354         int i;
6355
6356         BUG_ON(tp->irq_sync);
6357
6358         tp->irq_sync = 1;
6359         smp_mb();
6360
6361         for (i = 0; i < tp->irq_cnt; i++)
6362                 synchronize_irq(tp->napi[i].irq_vec);
6363 }
6364
6365 /* Fully shutdown all tg3 driver activity elsewhere in the system.
6366  * If irq_sync is non-zero, then the IRQ handler must be synchronized
6367  * with as well.  Most of the time, this is not necessary except when
6368  * shutting down the device.
6369  */
6370 static inline void tg3_full_lock(struct tg3 *tp, int irq_sync)
6371 {
6372         spin_lock_bh(&tp->lock);
6373         if (irq_sync)
6374                 tg3_irq_quiesce(tp);
6375 }
6376
6377 static inline void tg3_full_unlock(struct tg3 *tp)
6378 {
6379         spin_unlock_bh(&tp->lock);
6380 }
6381
6382 /* One-shot MSI handler - Chip automatically disables interrupt
6383  * after sending MSI so driver doesn't have to do it.
6384  */
6385 static irqreturn_t tg3_msi_1shot(int irq, void *dev_id)
6386 {
6387         struct tg3_napi *tnapi = dev_id;
6388         struct tg3 *tp = tnapi->tp;
6389
6390         prefetch(tnapi->hw_status);
6391         if (tnapi->rx_rcb)
6392                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6393
6394         if (likely(!tg3_irq_sync(tp)))
6395                 napi_schedule(&tnapi->napi);
6396
6397         return IRQ_HANDLED;
6398 }
6399
6400 /* MSI ISR - No need to check for interrupt sharing and no need to
6401  * flush status block and interrupt mailbox. PCI ordering rules
6402  * guarantee that MSI will arrive after the status block.
6403  */
6404 static irqreturn_t tg3_msi(int irq, void *dev_id)
6405 {
6406         struct tg3_napi *tnapi = dev_id;
6407         struct tg3 *tp = tnapi->tp;
6408
6409         prefetch(tnapi->hw_status);
6410         if (tnapi->rx_rcb)
6411                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6412         /*
6413          * Writing any value to intr-mbox-0 clears PCI INTA# and
6414          * chip-internal interrupt pending events.
6415          * Writing non-zero to intr-mbox-0 additional tells the
6416          * NIC to stop sending us irqs, engaging "in-intr-handler"
6417          * event coalescing.
6418          */
6419         tw32_mailbox(tnapi->int_mbox, 0x00000001);
6420         if (likely(!tg3_irq_sync(tp)))
6421                 napi_schedule(&tnapi->napi);
6422
6423         return IRQ_RETVAL(1);
6424 }
6425
6426 static irqreturn_t tg3_interrupt(int irq, void *dev_id)
6427 {
6428         struct tg3_napi *tnapi = dev_id;
6429         struct tg3 *tp = tnapi->tp;
6430         struct tg3_hw_status *sblk = tnapi->hw_status;
6431         unsigned int handled = 1;
6432
6433         /* In INTx mode, it is possible for the interrupt to arrive at
6434          * the CPU before the status block posted prior to the interrupt.
6435          * Reading the PCI State register will confirm whether the
6436          * interrupt is ours and will flush the status block.
6437          */
6438         if (unlikely(!(sblk->status & SD_STATUS_UPDATED))) {
6439                 if (tg3_flag(tp, CHIP_RESETTING) ||
6440                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6441                         handled = 0;
6442                         goto out;
6443                 }
6444         }
6445
6446         /*
6447          * Writing any value to intr-mbox-0 clears PCI INTA# and
6448          * chip-internal interrupt pending events.
6449          * Writing non-zero to intr-mbox-0 additional tells the
6450          * NIC to stop sending us irqs, engaging "in-intr-handler"
6451          * event coalescing.
6452          *
6453          * Flush the mailbox to de-assert the IRQ immediately to prevent
6454          * spurious interrupts.  The flush impacts performance but
6455          * excessive spurious interrupts can be worse in some cases.
6456          */
6457         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6458         if (tg3_irq_sync(tp))
6459                 goto out;
6460         sblk->status &= ~SD_STATUS_UPDATED;
6461         if (likely(tg3_has_work(tnapi))) {
6462                 prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6463                 napi_schedule(&tnapi->napi);
6464         } else {
6465                 /* No work, shared interrupt perhaps?  re-enable
6466                  * interrupts, and flush that PCI write
6467                  */
6468                 tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW,
6469                                0x00000000);
6470         }
6471 out:
6472         return IRQ_RETVAL(handled);
6473 }
6474
6475 static irqreturn_t tg3_interrupt_tagged(int irq, void *dev_id)
6476 {
6477         struct tg3_napi *tnapi = dev_id;
6478         struct tg3 *tp = tnapi->tp;
6479         struct tg3_hw_status *sblk = tnapi->hw_status;
6480         unsigned int handled = 1;
6481
6482         /* In INTx mode, it is possible for the interrupt to arrive at
6483          * the CPU before the status block posted prior to the interrupt.
6484          * Reading the PCI State register will confirm whether the
6485          * interrupt is ours and will flush the status block.
6486          */
6487         if (unlikely(sblk->status_tag == tnapi->last_irq_tag)) {
6488                 if (tg3_flag(tp, CHIP_RESETTING) ||
6489                     (tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6490                         handled = 0;
6491                         goto out;
6492                 }
6493         }
6494
6495         /*
6496          * writing any value to intr-mbox-0 clears PCI INTA# and
6497          * chip-internal interrupt pending events.
6498          * writing non-zero to intr-mbox-0 additional tells the
6499          * NIC to stop sending us irqs, engaging "in-intr-handler"
6500          * event coalescing.
6501          *
6502          * Flush the mailbox to de-assert the IRQ immediately to prevent
6503          * spurious interrupts.  The flush impacts performance but
6504          * excessive spurious interrupts can be worse in some cases.
6505          */
6506         tw32_mailbox_f(MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW, 0x00000001);
6507
6508         /*
6509          * In a shared interrupt configuration, sometimes other devices'
6510          * interrupts will scream.  We record the current status tag here
6511          * so that the above check can report that the screaming interrupts
6512          * are unhandled.  Eventually they will be silenced.
6513          */
6514         tnapi->last_irq_tag = sblk->status_tag;
6515
6516         if (tg3_irq_sync(tp))
6517                 goto out;
6518
6519         prefetch(&tnapi->rx_rcb[tnapi->rx_rcb_ptr]);
6520
6521         napi_schedule(&tnapi->napi);
6522
6523 out:
6524         return IRQ_RETVAL(handled);
6525 }
6526
6527 /* ISR for interrupt test */
6528 static irqreturn_t tg3_test_isr(int irq, void *dev_id)
6529 {
6530         struct tg3_napi *tnapi = dev_id;
6531         struct tg3 *tp = tnapi->tp;
6532         struct tg3_hw_status *sblk = tnapi->hw_status;
6533
6534         if ((sblk->status & SD_STATUS_UPDATED) ||
6535             !(tr32(TG3PCI_PCISTATE) & PCISTATE_INT_NOT_ACTIVE)) {
6536                 tg3_disable_ints(tp);
6537                 return IRQ_RETVAL(1);
6538         }
6539         return IRQ_RETVAL(0);
6540 }
6541
6542 #ifdef CONFIG_NET_POLL_CONTROLLER
6543 static void tg3_poll_controller(struct net_device *dev)
6544 {
6545         int i;
6546         struct tg3 *tp = netdev_priv(dev);
6547
6548         for (i = 0; i < tp->irq_cnt; i++)
6549                 tg3_interrupt(tp->napi[i].irq_vec, &tp->napi[i]);
6550 }
6551 #endif
6552
6553 static void tg3_tx_timeout(struct net_device *dev)
6554 {
6555         struct tg3 *tp = netdev_priv(dev);
6556
6557         if (netif_msg_tx_err(tp)) {
6558                 netdev_err(dev, "transmit timed out, resetting\n");
6559                 tg3_dump_state(tp);
6560         }
6561
6562         tg3_reset_task_schedule(tp);
6563 }
6564
6565 /* Test for DMA buffers crossing any 4GB boundaries: 4G, 8G, etc */
6566 static inline int tg3_4g_overflow_test(dma_addr_t mapping, int len)
6567 {
6568         u32 base = (u32) mapping & 0xffffffff;
6569
6570         return (base > 0xffffdcc0) && (base + len + 8 < base);
6571 }
6572
6573 /* Test for DMA addresses > 40-bit */
6574 static inline int tg3_40bit_overflow_test(struct tg3 *tp, dma_addr_t mapping,
6575                                           int len)
6576 {
6577 #if defined(CONFIG_HIGHMEM) && (BITS_PER_LONG == 64)
6578         if (tg3_flag(tp, 40BIT_DMA_BUG))
6579                 return ((u64) mapping + len) > DMA_BIT_MASK(40);
6580         return 0;
6581 #else
6582         return 0;
6583 #endif
6584 }
6585
6586 static inline void tg3_tx_set_bd(struct tg3_tx_buffer_desc *txbd,
6587                                  dma_addr_t mapping, u32 len, u32 flags,
6588                                  u32 mss, u32 vlan)
6589 {
6590         txbd->addr_hi = ((u64) mapping >> 32);
6591         txbd->addr_lo = ((u64) mapping & 0xffffffff);
6592         txbd->len_flags = (len << TXD_LEN_SHIFT) | (flags & 0x0000ffff);
6593         txbd->vlan_tag = (mss << TXD_MSS_SHIFT) | (vlan << TXD_VLAN_TAG_SHIFT);
6594 }
6595
6596 static bool tg3_tx_frag_set(struct tg3_napi *tnapi, u32 *entry, u32 *budget,
6597                             dma_addr_t map, u32 len, u32 flags,
6598                             u32 mss, u32 vlan)
6599 {
6600         struct tg3 *tp = tnapi->tp;
6601         bool hwbug = false;
6602
6603         if (tg3_flag(tp, SHORT_DMA_BUG) && len <= 8)
6604                 hwbug = true;
6605
6606         if (tg3_4g_overflow_test(map, len))
6607                 hwbug = true;
6608
6609         if (tg3_40bit_overflow_test(tp, map, len))
6610                 hwbug = true;
6611
6612         if (tp->dma_limit) {
6613                 u32 prvidx = *entry;
6614                 u32 tmp_flag = flags & ~TXD_FLAG_END;
6615                 while (len > tp->dma_limit && *budget) {
6616                         u32 frag_len = tp->dma_limit;
6617                         len -= tp->dma_limit;
6618
6619                         /* Avoid the 8byte DMA problem */
6620                         if (len <= 8) {
6621                                 len += tp->dma_limit / 2;
6622                                 frag_len = tp->dma_limit / 2;
6623                         }
6624
6625                         tnapi->tx_buffers[*entry].fragmented = true;
6626
6627                         tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6628                                       frag_len, tmp_flag, mss, vlan);
6629                         *budget -= 1;
6630                         prvidx = *entry;
6631                         *entry = NEXT_TX(*entry);
6632
6633                         map += frag_len;
6634                 }
6635
6636                 if (len) {
6637                         if (*budget) {
6638                                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6639                                               len, flags, mss, vlan);
6640                                 *budget -= 1;
6641                                 *entry = NEXT_TX(*entry);
6642                         } else {
6643                                 hwbug = true;
6644                                 tnapi->tx_buffers[prvidx].fragmented = false;
6645                         }
6646                 }
6647         } else {
6648                 tg3_tx_set_bd(&tnapi->tx_ring[*entry], map,
6649                               len, flags, mss, vlan);
6650                 *entry = NEXT_TX(*entry);
6651         }
6652
6653         return hwbug;
6654 }
6655
6656 static void tg3_tx_skb_unmap(struct tg3_napi *tnapi, u32 entry, int last)
6657 {
6658         int i;
6659         struct sk_buff *skb;
6660         struct tg3_tx_ring_info *txb = &tnapi->tx_buffers[entry];
6661
6662         skb = txb->skb;
6663         txb->skb = NULL;
6664
6665         pci_unmap_single(tnapi->tp->pdev,
6666                          dma_unmap_addr(txb, mapping),
6667                          skb_headlen(skb),
6668                          PCI_DMA_TODEVICE);
6669
6670         while (txb->fragmented) {
6671                 txb->fragmented = false;
6672                 entry = NEXT_TX(entry);
6673                 txb = &tnapi->tx_buffers[entry];
6674         }
6675
6676         for (i = 0; i <= last; i++) {
6677                 const skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6678
6679                 entry = NEXT_TX(entry);
6680                 txb = &tnapi->tx_buffers[entry];
6681
6682                 pci_unmap_page(tnapi->tp->pdev,
6683                                dma_unmap_addr(txb, mapping),
6684                                skb_frag_size(frag), PCI_DMA_TODEVICE);
6685
6686                 while (txb->fragmented) {
6687                         txb->fragmented = false;
6688                         entry = NEXT_TX(entry);
6689                         txb = &tnapi->tx_buffers[entry];
6690                 }
6691         }
6692 }
6693
6694 /* Workaround 4GB and 40-bit hardware DMA bugs. */
6695 static int tigon3_dma_hwbug_workaround(struct tg3_napi *tnapi,
6696                                        struct sk_buff **pskb,
6697                                        u32 *entry, u32 *budget,
6698                                        u32 base_flags, u32 mss, u32 vlan)
6699 {
6700         struct tg3 *tp = tnapi->tp;
6701         struct sk_buff *new_skb, *skb = *pskb;
6702         dma_addr_t new_addr = 0;
6703         int ret = 0;
6704
6705         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
6706                 new_skb = skb_copy(skb, GFP_ATOMIC);
6707         else {
6708                 int more_headroom = 4 - ((unsigned long)skb->data & 3);
6709
6710                 new_skb = skb_copy_expand(skb,
6711                                           skb_headroom(skb) + more_headroom,
6712                                           skb_tailroom(skb), GFP_ATOMIC);
6713         }
6714
6715         if (!new_skb) {
6716                 ret = -1;
6717         } else {
6718                 /* New SKB is guaranteed to be linear. */
6719                 new_addr = pci_map_single(tp->pdev, new_skb->data, new_skb->len,
6720                                           PCI_DMA_TODEVICE);
6721                 /* Make sure the mapping succeeded */
6722                 if (pci_dma_mapping_error(tp->pdev, new_addr)) {
6723                         dev_kfree_skb(new_skb);
6724                         ret = -1;
6725                 } else {
6726                         u32 save_entry = *entry;
6727
6728                         base_flags |= TXD_FLAG_END;
6729
6730                         tnapi->tx_buffers[*entry].skb = new_skb;
6731                         dma_unmap_addr_set(&tnapi->tx_buffers[*entry],
6732                                            mapping, new_addr);
6733
6734                         if (tg3_tx_frag_set(tnapi, entry, budget, new_addr,
6735                                             new_skb->len, base_flags,
6736                                             mss, vlan)) {
6737                                 tg3_tx_skb_unmap(tnapi, save_entry, -1);
6738                                 dev_kfree_skb(new_skb);
6739                                 ret = -1;
6740                         }
6741                 }
6742         }
6743
6744         dev_kfree_skb(skb);
6745         *pskb = new_skb;
6746         return ret;
6747 }
6748
6749 static netdev_tx_t tg3_start_xmit(struct sk_buff *, struct net_device *);
6750
6751 /* Use GSO to workaround a rare TSO bug that may be triggered when the
6752  * TSO header is greater than 80 bytes.
6753  */
6754 static int tg3_tso_bug(struct tg3 *tp, struct sk_buff *skb)
6755 {
6756         struct sk_buff *segs, *nskb;
6757         u32 frag_cnt_est = skb_shinfo(skb)->gso_segs * 3;
6758
6759         /* Estimate the number of fragments in the worst case */
6760         if (unlikely(tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)) {
6761                 netif_stop_queue(tp->dev);
6762
6763                 /* netif_tx_stop_queue() must be done before checking
6764                  * checking tx index in tg3_tx_avail() below, because in
6765                  * tg3_tx(), we update tx index before checking for
6766                  * netif_tx_queue_stopped().
6767                  */
6768                 smp_mb();
6769                 if (tg3_tx_avail(&tp->napi[0]) <= frag_cnt_est)
6770                         return NETDEV_TX_BUSY;
6771
6772                 netif_wake_queue(tp->dev);
6773         }
6774
6775         segs = skb_gso_segment(skb, tp->dev->features & ~NETIF_F_TSO);
6776         if (IS_ERR(segs))
6777                 goto tg3_tso_bug_end;
6778
6779         do {
6780                 nskb = segs;
6781                 segs = segs->next;
6782                 nskb->next = NULL;
6783                 tg3_start_xmit(nskb, tp->dev);
6784         } while (segs);
6785
6786 tg3_tso_bug_end:
6787         dev_kfree_skb(skb);
6788
6789         return NETDEV_TX_OK;
6790 }
6791
6792 /* hard_start_xmit for devices that have the 4G bug and/or 40-bit bug and
6793  * support TG3_FLAG_HW_TSO_1 or firmware TSO only.
6794  */
6795 static netdev_tx_t tg3_start_xmit(struct sk_buff *skb, struct net_device *dev)
6796 {
6797         struct tg3 *tp = netdev_priv(dev);
6798         u32 len, entry, base_flags, mss, vlan = 0;
6799         u32 budget;
6800         int i = -1, would_hit_hwbug;
6801         dma_addr_t mapping;
6802         struct tg3_napi *tnapi;
6803         struct netdev_queue *txq;
6804         unsigned int last;
6805
6806         txq = netdev_get_tx_queue(dev, skb_get_queue_mapping(skb));
6807         tnapi = &tp->napi[skb_get_queue_mapping(skb)];
6808         if (tg3_flag(tp, ENABLE_TSS))
6809                 tnapi++;
6810
6811         budget = tg3_tx_avail(tnapi);
6812
6813         /* We are running in BH disabled context with netif_tx_lock
6814          * and TX reclaim runs via tp->napi.poll inside of a software
6815          * interrupt.  Furthermore, IRQ processing runs lockless so we have
6816          * no IRQ context deadlocks to worry about either.  Rejoice!
6817          */
6818         if (unlikely(budget <= (skb_shinfo(skb)->nr_frags + 1))) {
6819                 if (!netif_tx_queue_stopped(txq)) {
6820                         netif_tx_stop_queue(txq);
6821
6822                         /* This is a hard error, log it. */
6823                         netdev_err(dev,
6824                                    "BUG! Tx Ring full when queue awake!\n");
6825                 }
6826                 return NETDEV_TX_BUSY;
6827         }
6828
6829         entry = tnapi->tx_prod;
6830         base_flags = 0;
6831         if (skb->ip_summed == CHECKSUM_PARTIAL)
6832                 base_flags |= TXD_FLAG_TCPUDP_CSUM;
6833
6834         mss = skb_shinfo(skb)->gso_size;
6835         if (mss) {
6836                 struct iphdr *iph;
6837                 u32 tcp_opt_len, hdr_len;
6838
6839                 if (skb_header_cloned(skb) &&
6840                     pskb_expand_head(skb, 0, 0, GFP_ATOMIC))
6841                         goto drop;
6842
6843                 iph = ip_hdr(skb);
6844                 tcp_opt_len = tcp_optlen(skb);
6845
6846                 hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb) - ETH_HLEN;
6847
6848                 if (!skb_is_gso_v6(skb)) {
6849                         iph->check = 0;
6850                         iph->tot_len = htons(mss + hdr_len);
6851                 }
6852
6853                 if (unlikely((ETH_HLEN + hdr_len) > 80) &&
6854                     tg3_flag(tp, TSO_BUG))
6855                         return tg3_tso_bug(tp, skb);
6856
6857                 base_flags |= (TXD_FLAG_CPU_PRE_DMA |
6858                                TXD_FLAG_CPU_POST_DMA);
6859
6860                 if (tg3_flag(tp, HW_TSO_1) ||
6861                     tg3_flag(tp, HW_TSO_2) ||
6862                     tg3_flag(tp, HW_TSO_3)) {
6863                         tcp_hdr(skb)->check = 0;
6864                         base_flags &= ~TXD_FLAG_TCPUDP_CSUM;
6865                 } else
6866                         tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
6867                                                                  iph->daddr, 0,
6868                                                                  IPPROTO_TCP,
6869                                                                  0);
6870
6871                 if (tg3_flag(tp, HW_TSO_3)) {
6872                         mss |= (hdr_len & 0xc) << 12;
6873                         if (hdr_len & 0x10)
6874                                 base_flags |= 0x00000010;
6875                         base_flags |= (hdr_len & 0x3e0) << 5;
6876                 } else if (tg3_flag(tp, HW_TSO_2))
6877                         mss |= hdr_len << 9;
6878                 else if (tg3_flag(tp, HW_TSO_1) ||
6879                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
6880                         if (tcp_opt_len || iph->ihl > 5) {
6881                                 int tsflags;
6882
6883                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6884                                 mss |= (tsflags << 11);
6885                         }
6886                 } else {
6887                         if (tcp_opt_len || iph->ihl > 5) {
6888                                 int tsflags;
6889
6890                                 tsflags = (iph->ihl - 5) + (tcp_opt_len >> 2);
6891                                 base_flags |= tsflags << 12;
6892                         }
6893                 }
6894         }
6895
6896         if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
6897             !mss && skb->len > VLAN_ETH_FRAME_LEN)
6898                 base_flags |= TXD_FLAG_JMB_PKT;
6899
6900         if (vlan_tx_tag_present(skb)) {
6901                 base_flags |= TXD_FLAG_VLAN;
6902                 vlan = vlan_tx_tag_get(skb);
6903         }
6904
6905         len = skb_headlen(skb);
6906
6907         mapping = pci_map_single(tp->pdev, skb->data, len, PCI_DMA_TODEVICE);
6908         if (pci_dma_mapping_error(tp->pdev, mapping))
6909                 goto drop;
6910
6911
6912         tnapi->tx_buffers[entry].skb = skb;
6913         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping, mapping);
6914
6915         would_hit_hwbug = 0;
6916
6917         if (tg3_flag(tp, 5701_DMA_BUG))
6918                 would_hit_hwbug = 1;
6919
6920         if (tg3_tx_frag_set(tnapi, &entry, &budget, mapping, len, base_flags |
6921                           ((skb_shinfo(skb)->nr_frags == 0) ? TXD_FLAG_END : 0),
6922                             mss, vlan)) {
6923                 would_hit_hwbug = 1;
6924         } else if (skb_shinfo(skb)->nr_frags > 0) {
6925                 u32 tmp_mss = mss;
6926
6927                 if (!tg3_flag(tp, HW_TSO_1) &&
6928                     !tg3_flag(tp, HW_TSO_2) &&
6929                     !tg3_flag(tp, HW_TSO_3))
6930                         tmp_mss = 0;
6931
6932                 /* Now loop through additional data
6933                  * fragments, and queue them.
6934                  */
6935                 last = skb_shinfo(skb)->nr_frags - 1;
6936                 for (i = 0; i <= last; i++) {
6937                         skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6938
6939                         len = skb_frag_size(frag);
6940                         mapping = skb_frag_dma_map(&tp->pdev->dev, frag, 0,
6941                                                    len, DMA_TO_DEVICE);
6942
6943                         tnapi->tx_buffers[entry].skb = NULL;
6944                         dma_unmap_addr_set(&tnapi->tx_buffers[entry], mapping,
6945                                            mapping);
6946                         if (dma_mapping_error(&tp->pdev->dev, mapping))
6947                                 goto dma_error;
6948
6949                         if (!budget ||
6950                             tg3_tx_frag_set(tnapi, &entry, &budget, mapping,
6951                                             len, base_flags |
6952                                             ((i == last) ? TXD_FLAG_END : 0),
6953                                             tmp_mss, vlan)) {
6954                                 would_hit_hwbug = 1;
6955                                 break;
6956                         }
6957                 }
6958         }
6959
6960         if (would_hit_hwbug) {
6961                 tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, i);
6962
6963                 /* If the workaround fails due to memory/mapping
6964                  * failure, silently drop this packet.
6965                  */
6966                 entry = tnapi->tx_prod;
6967                 budget = tg3_tx_avail(tnapi);
6968                 if (tigon3_dma_hwbug_workaround(tnapi, &skb, &entry, &budget,
6969                                                 base_flags, mss, vlan))
6970                         goto drop_nofree;
6971         }
6972
6973         skb_tx_timestamp(skb);
6974         netdev_tx_sent_queue(txq, skb->len);
6975
6976         /* Sync BD data before updating mailbox */
6977         wmb();
6978
6979         /* Packets are ready, update Tx producer idx local and on card. */
6980         tw32_tx_mbox(tnapi->prodmbox, entry);
6981
6982         tnapi->tx_prod = entry;
6983         if (unlikely(tg3_tx_avail(tnapi) <= (MAX_SKB_FRAGS + 1))) {
6984                 netif_tx_stop_queue(txq);
6985
6986                 /* netif_tx_stop_queue() must be done before checking
6987                  * checking tx index in tg3_tx_avail() below, because in
6988                  * tg3_tx(), we update tx index before checking for
6989                  * netif_tx_queue_stopped().
6990                  */
6991                 smp_mb();
6992                 if (tg3_tx_avail(tnapi) > TG3_TX_WAKEUP_THRESH(tnapi))
6993                         netif_tx_wake_queue(txq);
6994         }
6995
6996         mmiowb();
6997         return NETDEV_TX_OK;
6998
6999 dma_error:
7000         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod, --i);
7001         tnapi->tx_buffers[tnapi->tx_prod].skb = NULL;
7002 drop:
7003         dev_kfree_skb(skb);
7004 drop_nofree:
7005         tp->tx_dropped++;
7006         return NETDEV_TX_OK;
7007 }
7008
7009 static void tg3_mac_loopback(struct tg3 *tp, bool enable)
7010 {
7011         if (enable) {
7012                 tp->mac_mode &= ~(MAC_MODE_HALF_DUPLEX |
7013                                   MAC_MODE_PORT_MODE_MASK);
7014
7015                 tp->mac_mode |= MAC_MODE_PORT_INT_LPBACK;
7016
7017                 if (!tg3_flag(tp, 5705_PLUS))
7018                         tp->mac_mode |= MAC_MODE_LINK_POLARITY;
7019
7020                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
7021                         tp->mac_mode |= MAC_MODE_PORT_MODE_MII;
7022                 else
7023                         tp->mac_mode |= MAC_MODE_PORT_MODE_GMII;
7024         } else {
7025                 tp->mac_mode &= ~MAC_MODE_PORT_INT_LPBACK;
7026
7027                 if (tg3_flag(tp, 5705_PLUS) ||
7028                     (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) ||
7029                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
7030                         tp->mac_mode &= ~MAC_MODE_LINK_POLARITY;
7031         }
7032
7033         tw32(MAC_MODE, tp->mac_mode);
7034         udelay(40);
7035 }
7036
7037 static int tg3_phy_lpbk_set(struct tg3 *tp, u32 speed, bool extlpbk)
7038 {
7039         u32 val, bmcr, mac_mode, ptest = 0;
7040
7041         tg3_phy_toggle_apd(tp, false);
7042         tg3_phy_toggle_automdix(tp, 0);
7043
7044         if (extlpbk && tg3_phy_set_extloopbk(tp))
7045                 return -EIO;
7046
7047         bmcr = BMCR_FULLDPLX;
7048         switch (speed) {
7049         case SPEED_10:
7050                 break;
7051         case SPEED_100:
7052                 bmcr |= BMCR_SPEED100;
7053                 break;
7054         case SPEED_1000:
7055         default:
7056                 if (tp->phy_flags & TG3_PHYFLG_IS_FET) {
7057                         speed = SPEED_100;
7058                         bmcr |= BMCR_SPEED100;
7059                 } else {
7060                         speed = SPEED_1000;
7061                         bmcr |= BMCR_SPEED1000;
7062                 }
7063         }
7064
7065         if (extlpbk) {
7066                 if (!(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
7067                         tg3_readphy(tp, MII_CTRL1000, &val);
7068                         val |= CTL1000_AS_MASTER |
7069                                CTL1000_ENABLE_MASTER;
7070                         tg3_writephy(tp, MII_CTRL1000, val);
7071                 } else {
7072                         ptest = MII_TG3_FET_PTEST_TRIM_SEL |
7073                                 MII_TG3_FET_PTEST_TRIM_2;
7074                         tg3_writephy(tp, MII_TG3_FET_PTEST, ptest);
7075                 }
7076         } else
7077                 bmcr |= BMCR_LOOPBACK;
7078
7079         tg3_writephy(tp, MII_BMCR, bmcr);
7080
7081         /* The write needs to be flushed for the FETs */
7082         if (tp->phy_flags & TG3_PHYFLG_IS_FET)
7083                 tg3_readphy(tp, MII_BMCR, &bmcr);
7084
7085         udelay(40);
7086
7087         if ((tp->phy_flags & TG3_PHYFLG_IS_FET) &&
7088             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
7089                 tg3_writephy(tp, MII_TG3_FET_PTEST, ptest |
7090                              MII_TG3_FET_PTEST_FRC_TX_LINK |
7091                              MII_TG3_FET_PTEST_FRC_TX_LOCK);
7092
7093                 /* The write needs to be flushed for the AC131 */
7094                 tg3_readphy(tp, MII_TG3_FET_PTEST, &val);
7095         }
7096
7097         /* Reset to prevent losing 1st rx packet intermittently */
7098         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
7099             tg3_flag(tp, 5780_CLASS)) {
7100                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
7101                 udelay(10);
7102                 tw32_f(MAC_RX_MODE, tp->rx_mode);
7103         }
7104
7105         mac_mode = tp->mac_mode &
7106                    ~(MAC_MODE_PORT_MODE_MASK | MAC_MODE_HALF_DUPLEX);
7107         if (speed == SPEED_1000)
7108                 mac_mode |= MAC_MODE_PORT_MODE_GMII;
7109         else
7110                 mac_mode |= MAC_MODE_PORT_MODE_MII;
7111
7112         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700) {
7113                 u32 masked_phy_id = tp->phy_id & TG3_PHY_ID_MASK;
7114
7115                 if (masked_phy_id == TG3_PHY_ID_BCM5401)
7116                         mac_mode &= ~MAC_MODE_LINK_POLARITY;
7117                 else if (masked_phy_id == TG3_PHY_ID_BCM5411)
7118                         mac_mode |= MAC_MODE_LINK_POLARITY;
7119
7120                 tg3_writephy(tp, MII_TG3_EXT_CTRL,
7121                              MII_TG3_EXT_CTRL_LNK3_LED_MODE);
7122         }
7123
7124         tw32(MAC_MODE, mac_mode);
7125         udelay(40);
7126
7127         return 0;
7128 }
7129
7130 static void tg3_set_loopback(struct net_device *dev, netdev_features_t features)
7131 {
7132         struct tg3 *tp = netdev_priv(dev);
7133
7134         if (features & NETIF_F_LOOPBACK) {
7135                 if (tp->mac_mode & MAC_MODE_PORT_INT_LPBACK)
7136                         return;
7137
7138                 spin_lock_bh(&tp->lock);
7139                 tg3_mac_loopback(tp, true);
7140                 netif_carrier_on(tp->dev);
7141                 spin_unlock_bh(&tp->lock);
7142                 netdev_info(dev, "Internal MAC loopback mode enabled.\n");
7143         } else {
7144                 if (!(tp->mac_mode & MAC_MODE_PORT_INT_LPBACK))
7145                         return;
7146
7147                 spin_lock_bh(&tp->lock);
7148                 tg3_mac_loopback(tp, false);
7149                 /* Force link status check */
7150                 tg3_setup_phy(tp, 1);
7151                 spin_unlock_bh(&tp->lock);
7152                 netdev_info(dev, "Internal MAC loopback mode disabled.\n");
7153         }
7154 }
7155
7156 static netdev_features_t tg3_fix_features(struct net_device *dev,
7157         netdev_features_t features)
7158 {
7159         struct tg3 *tp = netdev_priv(dev);
7160
7161         if (dev->mtu > ETH_DATA_LEN && tg3_flag(tp, 5780_CLASS))
7162                 features &= ~NETIF_F_ALL_TSO;
7163
7164         return features;
7165 }
7166
7167 static int tg3_set_features(struct net_device *dev, netdev_features_t features)
7168 {
7169         netdev_features_t changed = dev->features ^ features;
7170
7171         if ((changed & NETIF_F_LOOPBACK) && netif_running(dev))
7172                 tg3_set_loopback(dev, features);
7173
7174         return 0;
7175 }
7176
7177 static void tg3_rx_prodring_free(struct tg3 *tp,
7178                                  struct tg3_rx_prodring_set *tpr)
7179 {
7180         int i;
7181
7182         if (tpr != &tp->napi[0].prodring) {
7183                 for (i = tpr->rx_std_cons_idx; i != tpr->rx_std_prod_idx;
7184                      i = (i + 1) & tp->rx_std_ring_mask)
7185                         tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7186                                         tp->rx_pkt_map_sz);
7187
7188                 if (tg3_flag(tp, JUMBO_CAPABLE)) {
7189                         for (i = tpr->rx_jmb_cons_idx;
7190                              i != tpr->rx_jmb_prod_idx;
7191                              i = (i + 1) & tp->rx_jmb_ring_mask) {
7192                                 tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7193                                                 TG3_RX_JMB_MAP_SZ);
7194                         }
7195                 }
7196
7197                 return;
7198         }
7199
7200         for (i = 0; i <= tp->rx_std_ring_mask; i++)
7201                 tg3_rx_data_free(tp, &tpr->rx_std_buffers[i],
7202                                 tp->rx_pkt_map_sz);
7203
7204         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7205                 for (i = 0; i <= tp->rx_jmb_ring_mask; i++)
7206                         tg3_rx_data_free(tp, &tpr->rx_jmb_buffers[i],
7207                                         TG3_RX_JMB_MAP_SZ);
7208         }
7209 }
7210
7211 /* Initialize rx rings for packet processing.
7212  *
7213  * The chip has been shut down and the driver detached from
7214  * the networking, so no interrupts or new tx packets will
7215  * end up in the driver.  tp->{tx,}lock are held and thus
7216  * we may not sleep.
7217  */
7218 static int tg3_rx_prodring_alloc(struct tg3 *tp,
7219                                  struct tg3_rx_prodring_set *tpr)
7220 {
7221         u32 i, rx_pkt_dma_sz;
7222
7223         tpr->rx_std_cons_idx = 0;
7224         tpr->rx_std_prod_idx = 0;
7225         tpr->rx_jmb_cons_idx = 0;
7226         tpr->rx_jmb_prod_idx = 0;
7227
7228         if (tpr != &tp->napi[0].prodring) {
7229                 memset(&tpr->rx_std_buffers[0], 0,
7230                        TG3_RX_STD_BUFF_RING_SIZE(tp));
7231                 if (tpr->rx_jmb_buffers)
7232                         memset(&tpr->rx_jmb_buffers[0], 0,
7233                                TG3_RX_JMB_BUFF_RING_SIZE(tp));
7234                 goto done;
7235         }
7236
7237         /* Zero out all descriptors. */
7238         memset(tpr->rx_std, 0, TG3_RX_STD_RING_BYTES(tp));
7239
7240         rx_pkt_dma_sz = TG3_RX_STD_DMA_SZ;
7241         if (tg3_flag(tp, 5780_CLASS) &&
7242             tp->dev->mtu > ETH_DATA_LEN)
7243                 rx_pkt_dma_sz = TG3_RX_JMB_DMA_SZ;
7244         tp->rx_pkt_map_sz = TG3_RX_DMA_TO_MAP_SZ(rx_pkt_dma_sz);
7245
7246         /* Initialize invariants of the rings, we only set this
7247          * stuff once.  This works because the card does not
7248          * write into the rx buffer posting rings.
7249          */
7250         for (i = 0; i <= tp->rx_std_ring_mask; i++) {
7251                 struct tg3_rx_buffer_desc *rxd;
7252
7253                 rxd = &tpr->rx_std[i];
7254                 rxd->idx_len = rx_pkt_dma_sz << RXD_LEN_SHIFT;
7255                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT);
7256                 rxd->opaque = (RXD_OPAQUE_RING_STD |
7257                                (i << RXD_OPAQUE_INDEX_SHIFT));
7258         }
7259
7260         /* Now allocate fresh SKBs for each rx ring. */
7261         for (i = 0; i < tp->rx_pending; i++) {
7262                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_STD, i) < 0) {
7263                         netdev_warn(tp->dev,
7264                                     "Using a smaller RX standard ring. Only "
7265                                     "%d out of %d buffers were allocated "
7266                                     "successfully\n", i, tp->rx_pending);
7267                         if (i == 0)
7268                                 goto initfail;
7269                         tp->rx_pending = i;
7270                         break;
7271                 }
7272         }
7273
7274         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
7275                 goto done;
7276
7277         memset(tpr->rx_jmb, 0, TG3_RX_JMB_RING_BYTES(tp));
7278
7279         if (!tg3_flag(tp, JUMBO_RING_ENABLE))
7280                 goto done;
7281
7282         for (i = 0; i <= tp->rx_jmb_ring_mask; i++) {
7283                 struct tg3_rx_buffer_desc *rxd;
7284
7285                 rxd = &tpr->rx_jmb[i].std;
7286                 rxd->idx_len = TG3_RX_JMB_DMA_SZ << RXD_LEN_SHIFT;
7287                 rxd->type_flags = (RXD_FLAG_END << RXD_FLAGS_SHIFT) |
7288                                   RXD_FLAG_JUMBO;
7289                 rxd->opaque = (RXD_OPAQUE_RING_JUMBO |
7290                        (i << RXD_OPAQUE_INDEX_SHIFT));
7291         }
7292
7293         for (i = 0; i < tp->rx_jumbo_pending; i++) {
7294                 if (tg3_alloc_rx_data(tp, tpr, RXD_OPAQUE_RING_JUMBO, i) < 0) {
7295                         netdev_warn(tp->dev,
7296                                     "Using a smaller RX jumbo ring. Only %d "
7297                                     "out of %d buffers were allocated "
7298                                     "successfully\n", i, tp->rx_jumbo_pending);
7299                         if (i == 0)
7300                                 goto initfail;
7301                         tp->rx_jumbo_pending = i;
7302                         break;
7303                 }
7304         }
7305
7306 done:
7307         return 0;
7308
7309 initfail:
7310         tg3_rx_prodring_free(tp, tpr);
7311         return -ENOMEM;
7312 }
7313
7314 static void tg3_rx_prodring_fini(struct tg3 *tp,
7315                                  struct tg3_rx_prodring_set *tpr)
7316 {
7317         kfree(tpr->rx_std_buffers);
7318         tpr->rx_std_buffers = NULL;
7319         kfree(tpr->rx_jmb_buffers);
7320         tpr->rx_jmb_buffers = NULL;
7321         if (tpr->rx_std) {
7322                 dma_free_coherent(&tp->pdev->dev, TG3_RX_STD_RING_BYTES(tp),
7323                                   tpr->rx_std, tpr->rx_std_mapping);
7324                 tpr->rx_std = NULL;
7325         }
7326         if (tpr->rx_jmb) {
7327                 dma_free_coherent(&tp->pdev->dev, TG3_RX_JMB_RING_BYTES(tp),
7328                                   tpr->rx_jmb, tpr->rx_jmb_mapping);
7329                 tpr->rx_jmb = NULL;
7330         }
7331 }
7332
7333 static int tg3_rx_prodring_init(struct tg3 *tp,
7334                                 struct tg3_rx_prodring_set *tpr)
7335 {
7336         tpr->rx_std_buffers = kzalloc(TG3_RX_STD_BUFF_RING_SIZE(tp),
7337                                       GFP_KERNEL);
7338         if (!tpr->rx_std_buffers)
7339                 return -ENOMEM;
7340
7341         tpr->rx_std = dma_alloc_coherent(&tp->pdev->dev,
7342                                          TG3_RX_STD_RING_BYTES(tp),
7343                                          &tpr->rx_std_mapping,
7344                                          GFP_KERNEL);
7345         if (!tpr->rx_std)
7346                 goto err_out;
7347
7348         if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS)) {
7349                 tpr->rx_jmb_buffers = kzalloc(TG3_RX_JMB_BUFF_RING_SIZE(tp),
7350                                               GFP_KERNEL);
7351                 if (!tpr->rx_jmb_buffers)
7352                         goto err_out;
7353
7354                 tpr->rx_jmb = dma_alloc_coherent(&tp->pdev->dev,
7355                                                  TG3_RX_JMB_RING_BYTES(tp),
7356                                                  &tpr->rx_jmb_mapping,
7357                                                  GFP_KERNEL);
7358                 if (!tpr->rx_jmb)
7359                         goto err_out;
7360         }
7361
7362         return 0;
7363
7364 err_out:
7365         tg3_rx_prodring_fini(tp, tpr);
7366         return -ENOMEM;
7367 }
7368
7369 /* Free up pending packets in all rx/tx rings.
7370  *
7371  * The chip has been shut down and the driver detached from
7372  * the networking, so no interrupts or new tx packets will
7373  * end up in the driver.  tp->{tx,}lock is not held and we are not
7374  * in an interrupt context and thus may sleep.
7375  */
7376 static void tg3_free_rings(struct tg3 *tp)
7377 {
7378         int i, j;
7379
7380         for (j = 0; j < tp->irq_cnt; j++) {
7381                 struct tg3_napi *tnapi = &tp->napi[j];
7382
7383                 tg3_rx_prodring_free(tp, &tnapi->prodring);
7384
7385                 if (!tnapi->tx_buffers)
7386                         continue;
7387
7388                 for (i = 0; i < TG3_TX_RING_SIZE; i++) {
7389                         struct sk_buff *skb = tnapi->tx_buffers[i].skb;
7390
7391                         if (!skb)
7392                                 continue;
7393
7394                         tg3_tx_skb_unmap(tnapi, i,
7395                                          skb_shinfo(skb)->nr_frags - 1);
7396
7397                         dev_kfree_skb_any(skb);
7398                 }
7399                 netdev_tx_reset_queue(netdev_get_tx_queue(tp->dev, j));
7400         }
7401 }
7402
7403 /* Initialize tx/rx rings for packet processing.
7404  *
7405  * The chip has been shut down and the driver detached from
7406  * the networking, so no interrupts or new tx packets will
7407  * end up in the driver.  tp->{tx,}lock are held and thus
7408  * we may not sleep.
7409  */
7410 static int tg3_init_rings(struct tg3 *tp)
7411 {
7412         int i;
7413
7414         /* Free up all the SKBs. */
7415         tg3_free_rings(tp);
7416
7417         for (i = 0; i < tp->irq_cnt; i++) {
7418                 struct tg3_napi *tnapi = &tp->napi[i];
7419
7420                 tnapi->last_tag = 0;
7421                 tnapi->last_irq_tag = 0;
7422                 tnapi->hw_status->status = 0;
7423                 tnapi->hw_status->status_tag = 0;
7424                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7425
7426                 tnapi->tx_prod = 0;
7427                 tnapi->tx_cons = 0;
7428                 if (tnapi->tx_ring)
7429                         memset(tnapi->tx_ring, 0, TG3_TX_RING_BYTES);
7430
7431                 tnapi->rx_rcb_ptr = 0;
7432                 if (tnapi->rx_rcb)
7433                         memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7434
7435                 if (tg3_rx_prodring_alloc(tp, &tnapi->prodring)) {
7436                         tg3_free_rings(tp);
7437                         return -ENOMEM;
7438                 }
7439         }
7440
7441         return 0;
7442 }
7443
7444 /*
7445  * Must not be invoked with interrupt sources disabled and
7446  * the hardware shutdown down.
7447  */
7448 static void tg3_free_consistent(struct tg3 *tp)
7449 {
7450         int i;
7451
7452         for (i = 0; i < tp->irq_cnt; i++) {
7453                 struct tg3_napi *tnapi = &tp->napi[i];
7454
7455                 if (tnapi->tx_ring) {
7456                         dma_free_coherent(&tp->pdev->dev, TG3_TX_RING_BYTES,
7457                                 tnapi->tx_ring, tnapi->tx_desc_mapping);
7458                         tnapi->tx_ring = NULL;
7459                 }
7460
7461                 kfree(tnapi->tx_buffers);
7462                 tnapi->tx_buffers = NULL;
7463
7464                 if (tnapi->rx_rcb) {
7465                         dma_free_coherent(&tp->pdev->dev,
7466                                           TG3_RX_RCB_RING_BYTES(tp),
7467                                           tnapi->rx_rcb,
7468                                           tnapi->rx_rcb_mapping);
7469                         tnapi->rx_rcb = NULL;
7470                 }
7471
7472                 tg3_rx_prodring_fini(tp, &tnapi->prodring);
7473
7474                 if (tnapi->hw_status) {
7475                         dma_free_coherent(&tp->pdev->dev, TG3_HW_STATUS_SIZE,
7476                                           tnapi->hw_status,
7477                                           tnapi->status_mapping);
7478                         tnapi->hw_status = NULL;
7479                 }
7480         }
7481
7482         if (tp->hw_stats) {
7483                 dma_free_coherent(&tp->pdev->dev, sizeof(struct tg3_hw_stats),
7484                                   tp->hw_stats, tp->stats_mapping);
7485                 tp->hw_stats = NULL;
7486         }
7487 }
7488
7489 /*
7490  * Must not be invoked with interrupt sources disabled and
7491  * the hardware shutdown down.  Can sleep.
7492  */
7493 static int tg3_alloc_consistent(struct tg3 *tp)
7494 {
7495         int i;
7496
7497         tp->hw_stats = dma_alloc_coherent(&tp->pdev->dev,
7498                                           sizeof(struct tg3_hw_stats),
7499                                           &tp->stats_mapping,
7500                                           GFP_KERNEL);
7501         if (!tp->hw_stats)
7502                 goto err_out;
7503
7504         memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
7505
7506         for (i = 0; i < tp->irq_cnt; i++) {
7507                 struct tg3_napi *tnapi = &tp->napi[i];
7508                 struct tg3_hw_status *sblk;
7509
7510                 tnapi->hw_status = dma_alloc_coherent(&tp->pdev->dev,
7511                                                       TG3_HW_STATUS_SIZE,
7512                                                       &tnapi->status_mapping,
7513                                                       GFP_KERNEL);
7514                 if (!tnapi->hw_status)
7515                         goto err_out;
7516
7517                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7518                 sblk = tnapi->hw_status;
7519
7520                 if (tg3_rx_prodring_init(tp, &tnapi->prodring))
7521                         goto err_out;
7522
7523                 /* If multivector TSS is enabled, vector 0 does not handle
7524                  * tx interrupts.  Don't allocate any resources for it.
7525                  */
7526                 if ((!i && !tg3_flag(tp, ENABLE_TSS)) ||
7527                     (i && tg3_flag(tp, ENABLE_TSS))) {
7528                         tnapi->tx_buffers = kzalloc(
7529                                                sizeof(struct tg3_tx_ring_info) *
7530                                                TG3_TX_RING_SIZE, GFP_KERNEL);
7531                         if (!tnapi->tx_buffers)
7532                                 goto err_out;
7533
7534                         tnapi->tx_ring = dma_alloc_coherent(&tp->pdev->dev,
7535                                                             TG3_TX_RING_BYTES,
7536                                                         &tnapi->tx_desc_mapping,
7537                                                             GFP_KERNEL);
7538                         if (!tnapi->tx_ring)
7539                                 goto err_out;
7540                 }
7541
7542                 /*
7543                  * When RSS is enabled, the status block format changes
7544                  * slightly.  The "rx_jumbo_consumer", "reserved",
7545                  * and "rx_mini_consumer" members get mapped to the
7546                  * other three rx return ring producer indexes.
7547                  */
7548                 switch (i) {
7549                 default:
7550                         tnapi->rx_rcb_prod_idx = &sblk->idx[0].rx_producer;
7551                         break;
7552                 case 2:
7553                         tnapi->rx_rcb_prod_idx = &sblk->rx_jumbo_consumer;
7554                         break;
7555                 case 3:
7556                         tnapi->rx_rcb_prod_idx = &sblk->reserved;
7557                         break;
7558                 case 4:
7559                         tnapi->rx_rcb_prod_idx = &sblk->rx_mini_consumer;
7560                         break;
7561                 }
7562
7563                 /*
7564                  * If multivector RSS is enabled, vector 0 does not handle
7565                  * rx or tx interrupts.  Don't allocate any resources for it.
7566                  */
7567                 if (!i && tg3_flag(tp, ENABLE_RSS))
7568                         continue;
7569
7570                 tnapi->rx_rcb = dma_alloc_coherent(&tp->pdev->dev,
7571                                                    TG3_RX_RCB_RING_BYTES(tp),
7572                                                    &tnapi->rx_rcb_mapping,
7573                                                    GFP_KERNEL);
7574                 if (!tnapi->rx_rcb)
7575                         goto err_out;
7576
7577                 memset(tnapi->rx_rcb, 0, TG3_RX_RCB_RING_BYTES(tp));
7578         }
7579
7580         return 0;
7581
7582 err_out:
7583         tg3_free_consistent(tp);
7584         return -ENOMEM;
7585 }
7586
7587 #define MAX_WAIT_CNT 1000
7588
7589 /* To stop a block, clear the enable bit and poll till it
7590  * clears.  tp->lock is held.
7591  */
7592 static int tg3_stop_block(struct tg3 *tp, unsigned long ofs, u32 enable_bit, int silent)
7593 {
7594         unsigned int i;
7595         u32 val;
7596
7597         if (tg3_flag(tp, 5705_PLUS)) {
7598                 switch (ofs) {
7599                 case RCVLSC_MODE:
7600                 case DMAC_MODE:
7601                 case MBFREE_MODE:
7602                 case BUFMGR_MODE:
7603                 case MEMARB_MODE:
7604                         /* We can't enable/disable these bits of the
7605                          * 5705/5750, just say success.
7606                          */
7607                         return 0;
7608
7609                 default:
7610                         break;
7611                 }
7612         }
7613
7614         val = tr32(ofs);
7615         val &= ~enable_bit;
7616         tw32_f(ofs, val);
7617
7618         for (i = 0; i < MAX_WAIT_CNT; i++) {
7619                 udelay(100);
7620                 val = tr32(ofs);
7621                 if ((val & enable_bit) == 0)
7622                         break;
7623         }
7624
7625         if (i == MAX_WAIT_CNT && !silent) {
7626                 dev_err(&tp->pdev->dev,
7627                         "tg3_stop_block timed out, ofs=%lx enable_bit=%x\n",
7628                         ofs, enable_bit);
7629                 return -ENODEV;
7630         }
7631
7632         return 0;
7633 }
7634
7635 /* tp->lock is held. */
7636 static int tg3_abort_hw(struct tg3 *tp, int silent)
7637 {
7638         int i, err;
7639
7640         tg3_disable_ints(tp);
7641
7642         tp->rx_mode &= ~RX_MODE_ENABLE;
7643         tw32_f(MAC_RX_MODE, tp->rx_mode);
7644         udelay(10);
7645
7646         err  = tg3_stop_block(tp, RCVBDI_MODE, RCVBDI_MODE_ENABLE, silent);
7647         err |= tg3_stop_block(tp, RCVLPC_MODE, RCVLPC_MODE_ENABLE, silent);
7648         err |= tg3_stop_block(tp, RCVLSC_MODE, RCVLSC_MODE_ENABLE, silent);
7649         err |= tg3_stop_block(tp, RCVDBDI_MODE, RCVDBDI_MODE_ENABLE, silent);
7650         err |= tg3_stop_block(tp, RCVDCC_MODE, RCVDCC_MODE_ENABLE, silent);
7651         err |= tg3_stop_block(tp, RCVCC_MODE, RCVCC_MODE_ENABLE, silent);
7652
7653         err |= tg3_stop_block(tp, SNDBDS_MODE, SNDBDS_MODE_ENABLE, silent);
7654         err |= tg3_stop_block(tp, SNDBDI_MODE, SNDBDI_MODE_ENABLE, silent);
7655         err |= tg3_stop_block(tp, SNDDATAI_MODE, SNDDATAI_MODE_ENABLE, silent);
7656         err |= tg3_stop_block(tp, RDMAC_MODE, RDMAC_MODE_ENABLE, silent);
7657         err |= tg3_stop_block(tp, SNDDATAC_MODE, SNDDATAC_MODE_ENABLE, silent);
7658         err |= tg3_stop_block(tp, DMAC_MODE, DMAC_MODE_ENABLE, silent);
7659         err |= tg3_stop_block(tp, SNDBDC_MODE, SNDBDC_MODE_ENABLE, silent);
7660
7661         tp->mac_mode &= ~MAC_MODE_TDE_ENABLE;
7662         tw32_f(MAC_MODE, tp->mac_mode);
7663         udelay(40);
7664
7665         tp->tx_mode &= ~TX_MODE_ENABLE;
7666         tw32_f(MAC_TX_MODE, tp->tx_mode);
7667
7668         for (i = 0; i < MAX_WAIT_CNT; i++) {
7669                 udelay(100);
7670                 if (!(tr32(MAC_TX_MODE) & TX_MODE_ENABLE))
7671                         break;
7672         }
7673         if (i >= MAX_WAIT_CNT) {
7674                 dev_err(&tp->pdev->dev,
7675                         "%s timed out, TX_MODE_ENABLE will not clear "
7676                         "MAC_TX_MODE=%08x\n", __func__, tr32(MAC_TX_MODE));
7677                 err |= -ENODEV;
7678         }
7679
7680         err |= tg3_stop_block(tp, HOSTCC_MODE, HOSTCC_MODE_ENABLE, silent);
7681         err |= tg3_stop_block(tp, WDMAC_MODE, WDMAC_MODE_ENABLE, silent);
7682         err |= tg3_stop_block(tp, MBFREE_MODE, MBFREE_MODE_ENABLE, silent);
7683
7684         tw32(FTQ_RESET, 0xffffffff);
7685         tw32(FTQ_RESET, 0x00000000);
7686
7687         err |= tg3_stop_block(tp, BUFMGR_MODE, BUFMGR_MODE_ENABLE, silent);
7688         err |= tg3_stop_block(tp, MEMARB_MODE, MEMARB_MODE_ENABLE, silent);
7689
7690         for (i = 0; i < tp->irq_cnt; i++) {
7691                 struct tg3_napi *tnapi = &tp->napi[i];
7692                 if (tnapi->hw_status)
7693                         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
7694         }
7695
7696         return err;
7697 }
7698
7699 /* Save PCI command register before chip reset */
7700 static void tg3_save_pci_state(struct tg3 *tp)
7701 {
7702         pci_read_config_word(tp->pdev, PCI_COMMAND, &tp->pci_cmd);
7703 }
7704
7705 /* Restore PCI state after chip reset */
7706 static void tg3_restore_pci_state(struct tg3 *tp)
7707 {
7708         u32 val;
7709
7710         /* Re-enable indirect register accesses. */
7711         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
7712                                tp->misc_host_ctrl);
7713
7714         /* Set MAX PCI retry to zero. */
7715         val = (PCISTATE_ROM_ENABLE | PCISTATE_ROM_RETRY_ENABLE);
7716         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
7717             tg3_flag(tp, PCIX_MODE))
7718                 val |= PCISTATE_RETRY_SAME_DMA;
7719         /* Allow reads and writes to the APE register and memory space. */
7720         if (tg3_flag(tp, ENABLE_APE))
7721                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
7722                        PCISTATE_ALLOW_APE_SHMEM_WR |
7723                        PCISTATE_ALLOW_APE_PSPACE_WR;
7724         pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, val);
7725
7726         pci_write_config_word(tp->pdev, PCI_COMMAND, tp->pci_cmd);
7727
7728         if (!tg3_flag(tp, PCI_EXPRESS)) {
7729                 pci_write_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
7730                                       tp->pci_cacheline_sz);
7731                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
7732                                       tp->pci_lat_timer);
7733         }
7734
7735         /* Make sure PCI-X relaxed ordering bit is clear. */
7736         if (tg3_flag(tp, PCIX_MODE)) {
7737                 u16 pcix_cmd;
7738
7739                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7740                                      &pcix_cmd);
7741                 pcix_cmd &= ~PCI_X_CMD_ERO;
7742                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
7743                                       pcix_cmd);
7744         }
7745
7746         if (tg3_flag(tp, 5780_CLASS)) {
7747
7748                 /* Chip reset on 5780 will reset MSI enable bit,
7749                  * so need to restore it.
7750                  */
7751                 if (tg3_flag(tp, USING_MSI)) {
7752                         u16 ctrl;
7753
7754                         pci_read_config_word(tp->pdev,
7755                                              tp->msi_cap + PCI_MSI_FLAGS,
7756                                              &ctrl);
7757                         pci_write_config_word(tp->pdev,
7758                                               tp->msi_cap + PCI_MSI_FLAGS,
7759                                               ctrl | PCI_MSI_FLAGS_ENABLE);
7760                         val = tr32(MSGINT_MODE);
7761                         tw32(MSGINT_MODE, val | MSGINT_MODE_ENABLE);
7762                 }
7763         }
7764 }
7765
7766 /* tp->lock is held. */
7767 static int tg3_chip_reset(struct tg3 *tp)
7768 {
7769         u32 val;
7770         void (*write_op)(struct tg3 *, u32, u32);
7771         int i, err;
7772
7773         tg3_nvram_lock(tp);
7774
7775         tg3_ape_lock(tp, TG3_APE_LOCK_GRC);
7776
7777         /* No matching tg3_nvram_unlock() after this because
7778          * chip reset below will undo the nvram lock.
7779          */
7780         tp->nvram_lock_cnt = 0;
7781
7782         /* GRC_MISC_CFG core clock reset will clear the memory
7783          * enable bit in PCI register 4 and the MSI enable bit
7784          * on some chips, so we save relevant registers here.
7785          */
7786         tg3_save_pci_state(tp);
7787
7788         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
7789             tg3_flag(tp, 5755_PLUS))
7790                 tw32(GRC_FASTBOOT_PC, 0);
7791
7792         /*
7793          * We must avoid the readl() that normally takes place.
7794          * It locks machines, causes machine checks, and other
7795          * fun things.  So, temporarily disable the 5701
7796          * hardware workaround, while we do the reset.
7797          */
7798         write_op = tp->write32;
7799         if (write_op == tg3_write_flush_reg32)
7800                 tp->write32 = tg3_write32;
7801
7802         /* Prevent the irq handler from reading or writing PCI registers
7803          * during chip reset when the memory enable bit in the PCI command
7804          * register may be cleared.  The chip does not generate interrupt
7805          * at this time, but the irq handler may still be called due to irq
7806          * sharing or irqpoll.
7807          */
7808         tg3_flag_set(tp, CHIP_RESETTING);
7809         for (i = 0; i < tp->irq_cnt; i++) {
7810                 struct tg3_napi *tnapi = &tp->napi[i];
7811                 if (tnapi->hw_status) {
7812                         tnapi->hw_status->status = 0;
7813                         tnapi->hw_status->status_tag = 0;
7814                 }
7815                 tnapi->last_tag = 0;
7816                 tnapi->last_irq_tag = 0;
7817         }
7818         smp_mb();
7819
7820         for (i = 0; i < tp->irq_cnt; i++)
7821                 synchronize_irq(tp->napi[i].irq_vec);
7822
7823         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
7824                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
7825                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
7826         }
7827
7828         /* do the reset */
7829         val = GRC_MISC_CFG_CORECLK_RESET;
7830
7831         if (tg3_flag(tp, PCI_EXPRESS)) {
7832                 /* Force PCIe 1.0a mode */
7833                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7834                     !tg3_flag(tp, 57765_PLUS) &&
7835                     tr32(TG3_PCIE_PHY_TSTCTL) ==
7836                     (TG3_PCIE_PHY_TSTCTL_PCIE10 | TG3_PCIE_PHY_TSTCTL_PSCRAM))
7837                         tw32(TG3_PCIE_PHY_TSTCTL, TG3_PCIE_PHY_TSTCTL_PSCRAM);
7838
7839                 if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0) {
7840                         tw32(GRC_MISC_CFG, (1 << 29));
7841                         val |= (1 << 29);
7842                 }
7843         }
7844
7845         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
7846                 tw32(VCPU_STATUS, tr32(VCPU_STATUS) | VCPU_STATUS_DRV_RESET);
7847                 tw32(GRC_VCPU_EXT_CTRL,
7848                      tr32(GRC_VCPU_EXT_CTRL) & ~GRC_VCPU_EXT_CTRL_HALT_CPU);
7849         }
7850
7851         /* Manage gphy power for all CPMU absent PCIe devices. */
7852         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, CPMU_PRESENT))
7853                 val |= GRC_MISC_CFG_KEEP_GPHY_POWER;
7854
7855         tw32(GRC_MISC_CFG, val);
7856
7857         /* restore 5701 hardware bug workaround write method */
7858         tp->write32 = write_op;
7859
7860         /* Unfortunately, we have to delay before the PCI read back.
7861          * Some 575X chips even will not respond to a PCI cfg access
7862          * when the reset command is given to the chip.
7863          *
7864          * How do these hardware designers expect things to work
7865          * properly if the PCI write is posted for a long period
7866          * of time?  It is always necessary to have some method by
7867          * which a register read back can occur to push the write
7868          * out which does the reset.
7869          *
7870          * For most tg3 variants the trick below was working.
7871          * Ho hum...
7872          */
7873         udelay(120);
7874
7875         /* Flush PCI posted writes.  The normal MMIO registers
7876          * are inaccessible at this time so this is the only
7877          * way to make this reliably (actually, this is no longer
7878          * the case, see above).  I tried to use indirect
7879          * register read/write but this upset some 5701 variants.
7880          */
7881         pci_read_config_dword(tp->pdev, PCI_COMMAND, &val);
7882
7883         udelay(120);
7884
7885         if (tg3_flag(tp, PCI_EXPRESS) && pci_pcie_cap(tp->pdev)) {
7886                 u16 val16;
7887
7888                 if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A0) {
7889                         int i;
7890                         u32 cfg_val;
7891
7892                         /* Wait for link training to complete.  */
7893                         for (i = 0; i < 5000; i++)
7894                                 udelay(100);
7895
7896                         pci_read_config_dword(tp->pdev, 0xc4, &cfg_val);
7897                         pci_write_config_dword(tp->pdev, 0xc4,
7898                                                cfg_val | (1 << 15));
7899                 }
7900
7901                 /* Clear the "no snoop" and "relaxed ordering" bits. */
7902                 pci_read_config_word(tp->pdev,
7903                                      pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7904                                      &val16);
7905                 val16 &= ~(PCI_EXP_DEVCTL_RELAX_EN |
7906                            PCI_EXP_DEVCTL_NOSNOOP_EN);
7907                 /*
7908                  * Older PCIe devices only support the 128 byte
7909                  * MPS setting.  Enforce the restriction.
7910                  */
7911                 if (!tg3_flag(tp, CPMU_PRESENT))
7912                         val16 &= ~PCI_EXP_DEVCTL_PAYLOAD;
7913                 pci_write_config_word(tp->pdev,
7914                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVCTL,
7915                                       val16);
7916
7917                 /* Clear error status */
7918                 pci_write_config_word(tp->pdev,
7919                                       pci_pcie_cap(tp->pdev) + PCI_EXP_DEVSTA,
7920                                       PCI_EXP_DEVSTA_CED |
7921                                       PCI_EXP_DEVSTA_NFED |
7922                                       PCI_EXP_DEVSTA_FED |
7923                                       PCI_EXP_DEVSTA_URD);
7924         }
7925
7926         tg3_restore_pci_state(tp);
7927
7928         tg3_flag_clear(tp, CHIP_RESETTING);
7929         tg3_flag_clear(tp, ERROR_PROCESSED);
7930
7931         val = 0;
7932         if (tg3_flag(tp, 5780_CLASS))
7933                 val = tr32(MEMARB_MODE);
7934         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
7935
7936         if (tp->pci_chip_rev_id == CHIPREV_ID_5750_A3) {
7937                 tg3_stop_fw(tp);
7938                 tw32(0x5000, 0x400);
7939         }
7940
7941         tw32(GRC_MODE, tp->grc_mode);
7942
7943         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0) {
7944                 val = tr32(0xc4);
7945
7946                 tw32(0xc4, val | (1 << 15));
7947         }
7948
7949         if ((tp->nic_sram_data_cfg & NIC_SRAM_DATA_CFG_MINI_PCI) != 0 &&
7950             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
7951                 tp->pci_clock_ctrl |= CLOCK_CTRL_CLKRUN_OENABLE;
7952                 if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A0)
7953                         tp->pci_clock_ctrl |= CLOCK_CTRL_FORCE_CLKRUN;
7954                 tw32(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
7955         }
7956
7957         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
7958                 tp->mac_mode = MAC_MODE_PORT_MODE_TBI;
7959                 val = tp->mac_mode;
7960         } else if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
7961                 tp->mac_mode = MAC_MODE_PORT_MODE_GMII;
7962                 val = tp->mac_mode;
7963         } else
7964                 val = 0;
7965
7966         tw32_f(MAC_MODE, val);
7967         udelay(40);
7968
7969         tg3_ape_unlock(tp, TG3_APE_LOCK_GRC);
7970
7971         err = tg3_poll_fw(tp);
7972         if (err)
7973                 return err;
7974
7975         tg3_mdio_start(tp);
7976
7977         if (tg3_flag(tp, PCI_EXPRESS) &&
7978             tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
7979             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
7980             !tg3_flag(tp, 57765_PLUS)) {
7981                 val = tr32(0x7c00);
7982
7983                 tw32(0x7c00, val | (1 << 25));
7984         }
7985
7986         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
7987                 val = tr32(TG3_CPMU_CLCK_ORIDE);
7988                 tw32(TG3_CPMU_CLCK_ORIDE, val & ~CPMU_CLCK_ORIDE_MAC_ORIDE_EN);
7989         }
7990
7991         /* Reprobe ASF enable state.  */
7992         tg3_flag_clear(tp, ENABLE_ASF);
7993         tg3_flag_clear(tp, ASF_NEW_HANDSHAKE);
7994         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
7995         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
7996                 u32 nic_cfg;
7997
7998                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
7999                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
8000                         tg3_flag_set(tp, ENABLE_ASF);
8001                         tp->last_event_jiffies = jiffies;
8002                         if (tg3_flag(tp, 5750_PLUS))
8003                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
8004                 }
8005         }
8006
8007         return 0;
8008 }
8009
8010 static void tg3_get_nstats(struct tg3 *, struct rtnl_link_stats64 *);
8011 static void tg3_get_estats(struct tg3 *, struct tg3_ethtool_stats *);
8012
8013 /* tp->lock is held. */
8014 static int tg3_halt(struct tg3 *tp, int kind, int silent)
8015 {
8016         int err;
8017
8018         tg3_stop_fw(tp);
8019
8020         tg3_write_sig_pre_reset(tp, kind);
8021
8022         tg3_abort_hw(tp, silent);
8023         err = tg3_chip_reset(tp);
8024
8025         __tg3_set_mac_addr(tp, 0);
8026
8027         tg3_write_sig_legacy(tp, kind);
8028         tg3_write_sig_post_reset(tp, kind);
8029
8030         if (tp->hw_stats) {
8031                 /* Save the stats across chip resets... */
8032                 tg3_get_nstats(tp, &tp->net_stats_prev);
8033                 tg3_get_estats(tp, &tp->estats_prev);
8034
8035                 /* And make sure the next sample is new data */
8036                 memset(tp->hw_stats, 0, sizeof(struct tg3_hw_stats));
8037         }
8038
8039         if (err)
8040                 return err;
8041
8042         return 0;
8043 }
8044
8045 static int tg3_set_mac_addr(struct net_device *dev, void *p)
8046 {
8047         struct tg3 *tp = netdev_priv(dev);
8048         struct sockaddr *addr = p;
8049         int err = 0, skip_mac_1 = 0;
8050
8051         if (!is_valid_ether_addr(addr->sa_data))
8052                 return -EADDRNOTAVAIL;
8053
8054         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
8055
8056         if (!netif_running(dev))
8057                 return 0;
8058
8059         if (tg3_flag(tp, ENABLE_ASF)) {
8060                 u32 addr0_high, addr0_low, addr1_high, addr1_low;
8061
8062                 addr0_high = tr32(MAC_ADDR_0_HIGH);
8063                 addr0_low = tr32(MAC_ADDR_0_LOW);
8064                 addr1_high = tr32(MAC_ADDR_1_HIGH);
8065                 addr1_low = tr32(MAC_ADDR_1_LOW);
8066
8067                 /* Skip MAC addr 1 if ASF is using it. */
8068                 if ((addr0_high != addr1_high || addr0_low != addr1_low) &&
8069                     !(addr1_high == 0 && addr1_low == 0))
8070                         skip_mac_1 = 1;
8071         }
8072         spin_lock_bh(&tp->lock);
8073         __tg3_set_mac_addr(tp, skip_mac_1);
8074         spin_unlock_bh(&tp->lock);
8075
8076         return err;
8077 }
8078
8079 /* tp->lock is held. */
8080 static void tg3_set_bdinfo(struct tg3 *tp, u32 bdinfo_addr,
8081                            dma_addr_t mapping, u32 maxlen_flags,
8082                            u32 nic_addr)
8083 {
8084         tg3_write_mem(tp,
8085                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH),
8086                       ((u64) mapping >> 32));
8087         tg3_write_mem(tp,
8088                       (bdinfo_addr + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW),
8089                       ((u64) mapping & 0xffffffff));
8090         tg3_write_mem(tp,
8091                       (bdinfo_addr + TG3_BDINFO_MAXLEN_FLAGS),
8092                        maxlen_flags);
8093
8094         if (!tg3_flag(tp, 5705_PLUS))
8095                 tg3_write_mem(tp,
8096                               (bdinfo_addr + TG3_BDINFO_NIC_ADDR),
8097                               nic_addr);
8098 }
8099
8100 static void __tg3_set_coalesce(struct tg3 *tp, struct ethtool_coalesce *ec)
8101 {
8102         int i;
8103
8104         if (!tg3_flag(tp, ENABLE_TSS)) {
8105                 tw32(HOSTCC_TXCOL_TICKS, ec->tx_coalesce_usecs);
8106                 tw32(HOSTCC_TXMAX_FRAMES, ec->tx_max_coalesced_frames);
8107                 tw32(HOSTCC_TXCOAL_MAXF_INT, ec->tx_max_coalesced_frames_irq);
8108         } else {
8109                 tw32(HOSTCC_TXCOL_TICKS, 0);
8110                 tw32(HOSTCC_TXMAX_FRAMES, 0);
8111                 tw32(HOSTCC_TXCOAL_MAXF_INT, 0);
8112         }
8113
8114         if (!tg3_flag(tp, ENABLE_RSS)) {
8115                 tw32(HOSTCC_RXCOL_TICKS, ec->rx_coalesce_usecs);
8116                 tw32(HOSTCC_RXMAX_FRAMES, ec->rx_max_coalesced_frames);
8117                 tw32(HOSTCC_RXCOAL_MAXF_INT, ec->rx_max_coalesced_frames_irq);
8118         } else {
8119                 tw32(HOSTCC_RXCOL_TICKS, 0);
8120                 tw32(HOSTCC_RXMAX_FRAMES, 0);
8121                 tw32(HOSTCC_RXCOAL_MAXF_INT, 0);
8122         }
8123
8124         if (!tg3_flag(tp, 5705_PLUS)) {
8125                 u32 val = ec->stats_block_coalesce_usecs;
8126
8127                 tw32(HOSTCC_RXCOAL_TICK_INT, ec->rx_coalesce_usecs_irq);
8128                 tw32(HOSTCC_TXCOAL_TICK_INT, ec->tx_coalesce_usecs_irq);
8129
8130                 if (!netif_carrier_ok(tp->dev))
8131                         val = 0;
8132
8133                 tw32(HOSTCC_STAT_COAL_TICKS, val);
8134         }
8135
8136         for (i = 0; i < tp->irq_cnt - 1; i++) {
8137                 u32 reg;
8138
8139                 reg = HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18;
8140                 tw32(reg, ec->rx_coalesce_usecs);
8141                 reg = HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18;
8142                 tw32(reg, ec->rx_max_coalesced_frames);
8143                 reg = HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18;
8144                 tw32(reg, ec->rx_max_coalesced_frames_irq);
8145
8146                 if (tg3_flag(tp, ENABLE_TSS)) {
8147                         reg = HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18;
8148                         tw32(reg, ec->tx_coalesce_usecs);
8149                         reg = HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18;
8150                         tw32(reg, ec->tx_max_coalesced_frames);
8151                         reg = HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18;
8152                         tw32(reg, ec->tx_max_coalesced_frames_irq);
8153                 }
8154         }
8155
8156         for (; i < tp->irq_max - 1; i++) {
8157                 tw32(HOSTCC_RXCOL_TICKS_VEC1 + i * 0x18, 0);
8158                 tw32(HOSTCC_RXMAX_FRAMES_VEC1 + i * 0x18, 0);
8159                 tw32(HOSTCC_RXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8160
8161                 if (tg3_flag(tp, ENABLE_TSS)) {
8162                         tw32(HOSTCC_TXCOL_TICKS_VEC1 + i * 0x18, 0);
8163                         tw32(HOSTCC_TXMAX_FRAMES_VEC1 + i * 0x18, 0);
8164                         tw32(HOSTCC_TXCOAL_MAXF_INT_VEC1 + i * 0x18, 0);
8165                 }
8166         }
8167 }
8168
8169 /* tp->lock is held. */
8170 static void tg3_rings_reset(struct tg3 *tp)
8171 {
8172         int i;
8173         u32 stblk, txrcb, rxrcb, limit;
8174         struct tg3_napi *tnapi = &tp->napi[0];
8175
8176         /* Disable all transmit rings but the first. */
8177         if (!tg3_flag(tp, 5705_PLUS))
8178                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 16;
8179         else if (tg3_flag(tp, 5717_PLUS))
8180                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 4;
8181         else if (tg3_flag(tp, 57765_CLASS))
8182                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE * 2;
8183         else
8184                 limit = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8185
8186         for (txrcb = NIC_SRAM_SEND_RCB + TG3_BDINFO_SIZE;
8187              txrcb < limit; txrcb += TG3_BDINFO_SIZE)
8188                 tg3_write_mem(tp, txrcb + TG3_BDINFO_MAXLEN_FLAGS,
8189                               BDINFO_FLAGS_DISABLED);
8190
8191
8192         /* Disable all receive return rings but the first. */
8193         if (tg3_flag(tp, 5717_PLUS))
8194                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 17;
8195         else if (!tg3_flag(tp, 5705_PLUS))
8196                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 16;
8197         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8198                  tg3_flag(tp, 57765_CLASS))
8199                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE * 4;
8200         else
8201                 limit = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8202
8203         for (rxrcb = NIC_SRAM_RCV_RET_RCB + TG3_BDINFO_SIZE;
8204              rxrcb < limit; rxrcb += TG3_BDINFO_SIZE)
8205                 tg3_write_mem(tp, rxrcb + TG3_BDINFO_MAXLEN_FLAGS,
8206                               BDINFO_FLAGS_DISABLED);
8207
8208         /* Disable interrupts */
8209         tw32_mailbox_f(tp->napi[0].int_mbox, 1);
8210         tp->napi[0].chk_msi_cnt = 0;
8211         tp->napi[0].last_rx_cons = 0;
8212         tp->napi[0].last_tx_cons = 0;
8213
8214         /* Zero mailbox registers. */
8215         if (tg3_flag(tp, SUPPORT_MSIX)) {
8216                 for (i = 1; i < tp->irq_max; i++) {
8217                         tp->napi[i].tx_prod = 0;
8218                         tp->napi[i].tx_cons = 0;
8219                         if (tg3_flag(tp, ENABLE_TSS))
8220                                 tw32_mailbox(tp->napi[i].prodmbox, 0);
8221                         tw32_rx_mbox(tp->napi[i].consmbox, 0);
8222                         tw32_mailbox_f(tp->napi[i].int_mbox, 1);
8223                         tp->napi[i].chk_msi_cnt = 0;
8224                         tp->napi[i].last_rx_cons = 0;
8225                         tp->napi[i].last_tx_cons = 0;
8226                 }
8227                 if (!tg3_flag(tp, ENABLE_TSS))
8228                         tw32_mailbox(tp->napi[0].prodmbox, 0);
8229         } else {
8230                 tp->napi[0].tx_prod = 0;
8231                 tp->napi[0].tx_cons = 0;
8232                 tw32_mailbox(tp->napi[0].prodmbox, 0);
8233                 tw32_rx_mbox(tp->napi[0].consmbox, 0);
8234         }
8235
8236         /* Make sure the NIC-based send BD rings are disabled. */
8237         if (!tg3_flag(tp, 5705_PLUS)) {
8238                 u32 mbox = MAILBOX_SNDNIC_PROD_IDX_0 + TG3_64BIT_REG_LOW;
8239                 for (i = 0; i < 16; i++)
8240                         tw32_tx_mbox(mbox + i * 8, 0);
8241         }
8242
8243         txrcb = NIC_SRAM_SEND_RCB;
8244         rxrcb = NIC_SRAM_RCV_RET_RCB;
8245
8246         /* Clear status block in ram. */
8247         memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8248
8249         /* Set status block DMA address */
8250         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8251              ((u64) tnapi->status_mapping >> 32));
8252         tw32(HOSTCC_STATUS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8253              ((u64) tnapi->status_mapping & 0xffffffff));
8254
8255         if (tnapi->tx_ring) {
8256                 tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8257                                (TG3_TX_RING_SIZE <<
8258                                 BDINFO_FLAGS_MAXLEN_SHIFT),
8259                                NIC_SRAM_TX_BUFFER_DESC);
8260                 txrcb += TG3_BDINFO_SIZE;
8261         }
8262
8263         if (tnapi->rx_rcb) {
8264                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8265                                (tp->rx_ret_ring_mask + 1) <<
8266                                 BDINFO_FLAGS_MAXLEN_SHIFT, 0);
8267                 rxrcb += TG3_BDINFO_SIZE;
8268         }
8269
8270         stblk = HOSTCC_STATBLCK_RING1;
8271
8272         for (i = 1, tnapi++; i < tp->irq_cnt; i++, tnapi++) {
8273                 u64 mapping = (u64)tnapi->status_mapping;
8274                 tw32(stblk + TG3_64BIT_REG_HIGH, mapping >> 32);
8275                 tw32(stblk + TG3_64BIT_REG_LOW, mapping & 0xffffffff);
8276
8277                 /* Clear status block in ram. */
8278                 memset(tnapi->hw_status, 0, TG3_HW_STATUS_SIZE);
8279
8280                 if (tnapi->tx_ring) {
8281                         tg3_set_bdinfo(tp, txrcb, tnapi->tx_desc_mapping,
8282                                        (TG3_TX_RING_SIZE <<
8283                                         BDINFO_FLAGS_MAXLEN_SHIFT),
8284                                        NIC_SRAM_TX_BUFFER_DESC);
8285                         txrcb += TG3_BDINFO_SIZE;
8286                 }
8287
8288                 tg3_set_bdinfo(tp, rxrcb, tnapi->rx_rcb_mapping,
8289                                ((tp->rx_ret_ring_mask + 1) <<
8290                                 BDINFO_FLAGS_MAXLEN_SHIFT), 0);
8291
8292                 stblk += 8;
8293                 rxrcb += TG3_BDINFO_SIZE;
8294         }
8295 }
8296
8297 static void tg3_setup_rxbd_thresholds(struct tg3 *tp)
8298 {
8299         u32 val, bdcache_maxcnt, host_rep_thresh, nic_rep_thresh;
8300
8301         if (!tg3_flag(tp, 5750_PLUS) ||
8302             tg3_flag(tp, 5780_CLASS) ||
8303             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
8304             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
8305             tg3_flag(tp, 57765_PLUS))
8306                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5700;
8307         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
8308                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787)
8309                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5755;
8310         else
8311                 bdcache_maxcnt = TG3_SRAM_RX_STD_BDCACHE_SIZE_5906;
8312
8313         nic_rep_thresh = min(bdcache_maxcnt / 2, tp->rx_std_max_post);
8314         host_rep_thresh = max_t(u32, tp->rx_pending / 8, 1);
8315
8316         val = min(nic_rep_thresh, host_rep_thresh);
8317         tw32(RCVBDI_STD_THRESH, val);
8318
8319         if (tg3_flag(tp, 57765_PLUS))
8320                 tw32(STD_REPLENISH_LWM, bdcache_maxcnt);
8321
8322         if (!tg3_flag(tp, JUMBO_CAPABLE) || tg3_flag(tp, 5780_CLASS))
8323                 return;
8324
8325         bdcache_maxcnt = TG3_SRAM_RX_JMB_BDCACHE_SIZE_5700;
8326
8327         host_rep_thresh = max_t(u32, tp->rx_jumbo_pending / 8, 1);
8328
8329         val = min(bdcache_maxcnt / 2, host_rep_thresh);
8330         tw32(RCVBDI_JUMBO_THRESH, val);
8331
8332         if (tg3_flag(tp, 57765_PLUS))
8333                 tw32(JMB_REPLENISH_LWM, bdcache_maxcnt);
8334 }
8335
8336 static inline u32 calc_crc(unsigned char *buf, int len)
8337 {
8338         u32 reg;
8339         u32 tmp;
8340         int j, k;
8341
8342         reg = 0xffffffff;
8343
8344         for (j = 0; j < len; j++) {
8345                 reg ^= buf[j];
8346
8347                 for (k = 0; k < 8; k++) {
8348                         tmp = reg & 0x01;
8349
8350                         reg >>= 1;
8351
8352                         if (tmp)
8353                                 reg ^= 0xedb88320;
8354                 }
8355         }
8356
8357         return ~reg;
8358 }
8359
8360 static void tg3_set_multi(struct tg3 *tp, unsigned int accept_all)
8361 {
8362         /* accept or reject all multicast frames */
8363         tw32(MAC_HASH_REG_0, accept_all ? 0xffffffff : 0);
8364         tw32(MAC_HASH_REG_1, accept_all ? 0xffffffff : 0);
8365         tw32(MAC_HASH_REG_2, accept_all ? 0xffffffff : 0);
8366         tw32(MAC_HASH_REG_3, accept_all ? 0xffffffff : 0);
8367 }
8368
8369 static void __tg3_set_rx_mode(struct net_device *dev)
8370 {
8371         struct tg3 *tp = netdev_priv(dev);
8372         u32 rx_mode;
8373
8374         rx_mode = tp->rx_mode & ~(RX_MODE_PROMISC |
8375                                   RX_MODE_KEEP_VLAN_TAG);
8376
8377 #if !defined(CONFIG_VLAN_8021Q) && !defined(CONFIG_VLAN_8021Q_MODULE)
8378         /* When ASF is in use, we always keep the RX_MODE_KEEP_VLAN_TAG
8379          * flag clear.
8380          */
8381         if (!tg3_flag(tp, ENABLE_ASF))
8382                 rx_mode |= RX_MODE_KEEP_VLAN_TAG;
8383 #endif
8384
8385         if (dev->flags & IFF_PROMISC) {
8386                 /* Promiscuous mode. */
8387                 rx_mode |= RX_MODE_PROMISC;
8388         } else if (dev->flags & IFF_ALLMULTI) {
8389                 /* Accept all multicast. */
8390                 tg3_set_multi(tp, 1);
8391         } else if (netdev_mc_empty(dev)) {
8392                 /* Reject all multicast. */
8393                 tg3_set_multi(tp, 0);
8394         } else {
8395                 /* Accept one or more multicast(s). */
8396                 struct netdev_hw_addr *ha;
8397                 u32 mc_filter[4] = { 0, };
8398                 u32 regidx;
8399                 u32 bit;
8400                 u32 crc;
8401
8402                 netdev_for_each_mc_addr(ha, dev) {
8403                         crc = calc_crc(ha->addr, ETH_ALEN);
8404                         bit = ~crc & 0x7f;
8405                         regidx = (bit & 0x60) >> 5;
8406                         bit &= 0x1f;
8407                         mc_filter[regidx] |= (1 << bit);
8408                 }
8409
8410                 tw32(MAC_HASH_REG_0, mc_filter[0]);
8411                 tw32(MAC_HASH_REG_1, mc_filter[1]);
8412                 tw32(MAC_HASH_REG_2, mc_filter[2]);
8413                 tw32(MAC_HASH_REG_3, mc_filter[3]);
8414         }
8415
8416         if (rx_mode != tp->rx_mode) {
8417                 tp->rx_mode = rx_mode;
8418                 tw32_f(MAC_RX_MODE, rx_mode);
8419                 udelay(10);
8420         }
8421 }
8422
8423 static void tg3_rss_init_dflt_indir_tbl(struct tg3 *tp)
8424 {
8425         int i;
8426
8427         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
8428                 tp->rss_ind_tbl[i] =
8429                         ethtool_rxfh_indir_default(i, tp->irq_cnt - 1);
8430 }
8431
8432 static void tg3_rss_check_indir_tbl(struct tg3 *tp)
8433 {
8434         int i;
8435
8436         if (!tg3_flag(tp, SUPPORT_MSIX))
8437                 return;
8438
8439         if (tp->irq_cnt <= 2) {
8440                 memset(&tp->rss_ind_tbl[0], 0, sizeof(tp->rss_ind_tbl));
8441                 return;
8442         }
8443
8444         /* Validate table against current IRQ count */
8445         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++) {
8446                 if (tp->rss_ind_tbl[i] >= tp->irq_cnt - 1)
8447                         break;
8448         }
8449
8450         if (i != TG3_RSS_INDIR_TBL_SIZE)
8451                 tg3_rss_init_dflt_indir_tbl(tp);
8452 }
8453
8454 static void tg3_rss_write_indir_tbl(struct tg3 *tp)
8455 {
8456         int i = 0;
8457         u32 reg = MAC_RSS_INDIR_TBL_0;
8458
8459         while (i < TG3_RSS_INDIR_TBL_SIZE) {
8460                 u32 val = tp->rss_ind_tbl[i];
8461                 i++;
8462                 for (; i % 8; i++) {
8463                         val <<= 4;
8464                         val |= tp->rss_ind_tbl[i];
8465                 }
8466                 tw32(reg, val);
8467                 reg += 4;
8468         }
8469 }
8470
8471 /* tp->lock is held. */
8472 static int tg3_reset_hw(struct tg3 *tp, int reset_phy)
8473 {
8474         u32 val, rdmac_mode;
8475         int i, err, limit;
8476         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
8477
8478         tg3_disable_ints(tp);
8479
8480         tg3_stop_fw(tp);
8481
8482         tg3_write_sig_pre_reset(tp, RESET_KIND_INIT);
8483
8484         if (tg3_flag(tp, INIT_COMPLETE))
8485                 tg3_abort_hw(tp, 1);
8486
8487         /* Enable MAC control of LPI */
8488         if (tp->phy_flags & TG3_PHYFLG_EEE_CAP) {
8489                 tw32_f(TG3_CPMU_EEE_LNKIDL_CTRL,
8490                        TG3_CPMU_EEE_LNKIDL_PCIE_NL0 |
8491                        TG3_CPMU_EEE_LNKIDL_UART_IDL);
8492
8493                 tw32_f(TG3_CPMU_EEE_CTRL,
8494                        TG3_CPMU_EEE_CTRL_EXIT_20_1_US);
8495
8496                 val = TG3_CPMU_EEEMD_ERLY_L1_XIT_DET |
8497                       TG3_CPMU_EEEMD_LPI_IN_TX |
8498                       TG3_CPMU_EEEMD_LPI_IN_RX |
8499                       TG3_CPMU_EEEMD_EEE_ENABLE;
8500
8501                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8502                         val |= TG3_CPMU_EEEMD_SND_IDX_DET_EN;
8503
8504                 if (tg3_flag(tp, ENABLE_APE))
8505                         val |= TG3_CPMU_EEEMD_APE_TX_DET_EN;
8506
8507                 tw32_f(TG3_CPMU_EEE_MODE, val);
8508
8509                 tw32_f(TG3_CPMU_EEE_DBTMR1,
8510                        TG3_CPMU_DBTMR1_PCIEXIT_2047US |
8511                        TG3_CPMU_DBTMR1_LNKIDLE_2047US);
8512
8513                 tw32_f(TG3_CPMU_EEE_DBTMR2,
8514                        TG3_CPMU_DBTMR2_APE_TX_2047US |
8515                        TG3_CPMU_DBTMR2_TXIDXEQ_2047US);
8516         }
8517
8518         if (reset_phy)
8519                 tg3_phy_reset(tp);
8520
8521         err = tg3_chip_reset(tp);
8522         if (err)
8523                 return err;
8524
8525         tg3_write_sig_legacy(tp, RESET_KIND_INIT);
8526
8527         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX) {
8528                 val = tr32(TG3_CPMU_CTRL);
8529                 val &= ~(CPMU_CTRL_LINK_AWARE_MODE | CPMU_CTRL_LINK_IDLE_MODE);
8530                 tw32(TG3_CPMU_CTRL, val);
8531
8532                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8533                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8534                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8535                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8536
8537                 val = tr32(TG3_CPMU_LNK_AWARE_PWRMD);
8538                 val &= ~CPMU_LNK_AWARE_MACCLK_MASK;
8539                 val |= CPMU_LNK_AWARE_MACCLK_6_25;
8540                 tw32(TG3_CPMU_LNK_AWARE_PWRMD, val);
8541
8542                 val = tr32(TG3_CPMU_HST_ACC);
8543                 val &= ~CPMU_HST_ACC_MACCLK_MASK;
8544                 val |= CPMU_HST_ACC_MACCLK_6_25;
8545                 tw32(TG3_CPMU_HST_ACC, val);
8546         }
8547
8548         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
8549                 val = tr32(PCIE_PWR_MGMT_THRESH) & ~PCIE_PWR_MGMT_L1_THRESH_MSK;
8550                 val |= PCIE_PWR_MGMT_EXT_ASPM_TMR_EN |
8551                        PCIE_PWR_MGMT_L1_THRESH_4MS;
8552                 tw32(PCIE_PWR_MGMT_THRESH, val);
8553
8554                 val = tr32(TG3_PCIE_EIDLE_DELAY) & ~TG3_PCIE_EIDLE_DELAY_MASK;
8555                 tw32(TG3_PCIE_EIDLE_DELAY, val | TG3_PCIE_EIDLE_DELAY_13_CLKS);
8556
8557                 tw32(TG3_CORR_ERR_STAT, TG3_CORR_ERR_STAT_CLEAR);
8558
8559                 val = tr32(TG3_PCIE_LNKCTL) & ~TG3_PCIE_LNKCTL_L1_PLL_PD_EN;
8560                 tw32(TG3_PCIE_LNKCTL, val | TG3_PCIE_LNKCTL_L1_PLL_PD_DIS);
8561         }
8562
8563         if (tg3_flag(tp, L1PLLPD_EN)) {
8564                 u32 grc_mode = tr32(GRC_MODE);
8565
8566                 /* Access the lower 1K of PL PCIE block registers. */
8567                 val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8568                 tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8569
8570                 val = tr32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1);
8571                 tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL1,
8572                      val | TG3_PCIE_PL_LO_PHYCTL1_L1PLLPD_EN);
8573
8574                 tw32(GRC_MODE, grc_mode);
8575         }
8576
8577         if (tg3_flag(tp, 57765_CLASS)) {
8578                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0) {
8579                         u32 grc_mode = tr32(GRC_MODE);
8580
8581                         /* Access the lower 1K of PL PCIE block registers. */
8582                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8583                         tw32(GRC_MODE, val | GRC_MODE_PCIE_PL_SEL);
8584
8585                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8586                                    TG3_PCIE_PL_LO_PHYCTL5);
8587                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_PL_LO_PHYCTL5,
8588                              val | TG3_PCIE_PL_LO_PHYCTL5_DIS_L2CLKREQ);
8589
8590                         tw32(GRC_MODE, grc_mode);
8591                 }
8592
8593                 if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_57765_AX) {
8594                         u32 grc_mode = tr32(GRC_MODE);
8595
8596                         /* Access the lower 1K of DL PCIE block registers. */
8597                         val = grc_mode & ~GRC_MODE_PCIE_PORT_MASK;
8598                         tw32(GRC_MODE, val | GRC_MODE_PCIE_DL_SEL);
8599
8600                         val = tr32(TG3_PCIE_TLDLPL_PORT +
8601                                    TG3_PCIE_DL_LO_FTSMAX);
8602                         val &= ~TG3_PCIE_DL_LO_FTSMAX_MSK;
8603                         tw32(TG3_PCIE_TLDLPL_PORT + TG3_PCIE_DL_LO_FTSMAX,
8604                              val | TG3_PCIE_DL_LO_FTSMAX_VAL);
8605
8606                         tw32(GRC_MODE, grc_mode);
8607                 }
8608
8609                 val = tr32(TG3_CPMU_LSPD_10MB_CLK);
8610                 val &= ~CPMU_LSPD_10MB_MACCLK_MASK;
8611                 val |= CPMU_LSPD_10MB_MACCLK_6_25;
8612                 tw32(TG3_CPMU_LSPD_10MB_CLK, val);
8613         }
8614
8615         /* This works around an issue with Athlon chipsets on
8616          * B3 tigon3 silicon.  This bit has no effect on any
8617          * other revision.  But do not set this on PCI Express
8618          * chips and don't even touch the clocks if the CPMU is present.
8619          */
8620         if (!tg3_flag(tp, CPMU_PRESENT)) {
8621                 if (!tg3_flag(tp, PCI_EXPRESS))
8622                         tp->pci_clock_ctrl |= CLOCK_CTRL_DELAY_PCI_GRANT;
8623                 tw32_f(TG3PCI_CLOCK_CTRL, tp->pci_clock_ctrl);
8624         }
8625
8626         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0 &&
8627             tg3_flag(tp, PCIX_MODE)) {
8628                 val = tr32(TG3PCI_PCISTATE);
8629                 val |= PCISTATE_RETRY_SAME_DMA;
8630                 tw32(TG3PCI_PCISTATE, val);
8631         }
8632
8633         if (tg3_flag(tp, ENABLE_APE)) {
8634                 /* Allow reads and writes to the
8635                  * APE register and memory space.
8636                  */
8637                 val = tr32(TG3PCI_PCISTATE);
8638                 val |= PCISTATE_ALLOW_APE_CTLSPC_WR |
8639                        PCISTATE_ALLOW_APE_SHMEM_WR |
8640                        PCISTATE_ALLOW_APE_PSPACE_WR;
8641                 tw32(TG3PCI_PCISTATE, val);
8642         }
8643
8644         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_BX) {
8645                 /* Enable some hw fixes.  */
8646                 val = tr32(TG3PCI_MSI_DATA);
8647                 val |= (1 << 26) | (1 << 28) | (1 << 29);
8648                 tw32(TG3PCI_MSI_DATA, val);
8649         }
8650
8651         /* Descriptor ring init may make accesses to the
8652          * NIC SRAM area to setup the TX descriptors, so we
8653          * can only do this after the hardware has been
8654          * successfully reset.
8655          */
8656         err = tg3_init_rings(tp);
8657         if (err)
8658                 return err;
8659
8660         if (tg3_flag(tp, 57765_PLUS)) {
8661                 val = tr32(TG3PCI_DMA_RW_CTRL) &
8662                       ~DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
8663                 if (tp->pci_chip_rev_id == CHIPREV_ID_57765_A0)
8664                         val &= ~DMA_RWCTRL_CRDRDR_RDMA_MRRS_MSK;
8665                 if (!tg3_flag(tp, 57765_CLASS) &&
8666                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717)
8667                         val |= DMA_RWCTRL_TAGGED_STAT_WA;
8668                 tw32(TG3PCI_DMA_RW_CTRL, val | tp->dma_rwctrl);
8669         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5784 &&
8670                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5761) {
8671                 /* This value is determined during the probe time DMA
8672                  * engine test, tg3_test_dma.
8673                  */
8674                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
8675         }
8676
8677         tp->grc_mode &= ~(GRC_MODE_HOST_SENDBDS |
8678                           GRC_MODE_4X_NIC_SEND_RINGS |
8679                           GRC_MODE_NO_TX_PHDR_CSUM |
8680                           GRC_MODE_NO_RX_PHDR_CSUM);
8681         tp->grc_mode |= GRC_MODE_HOST_SENDBDS;
8682
8683         /* Pseudo-header checksum is done by hardware logic and not
8684          * the offload processers, so make the chip do the pseudo-
8685          * header checksums on receive.  For transmit it is more
8686          * convenient to do the pseudo-header checksum in software
8687          * as Linux does that on transmit for us in all cases.
8688          */
8689         tp->grc_mode |= GRC_MODE_NO_TX_PHDR_CSUM;
8690
8691         tw32(GRC_MODE,
8692              tp->grc_mode |
8693              (GRC_MODE_IRQ_ON_MAC_ATTN | GRC_MODE_HOST_STACKUP));
8694
8695         /* Setup the timer prescalar register.  Clock is always 66Mhz. */
8696         val = tr32(GRC_MISC_CFG);
8697         val &= ~0xff;
8698         val |= (65 << GRC_MISC_CFG_PRESCALAR_SHIFT);
8699         tw32(GRC_MISC_CFG, val);
8700
8701         /* Initialize MBUF/DESC pool. */
8702         if (tg3_flag(tp, 5750_PLUS)) {
8703                 /* Do nothing.  */
8704         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5705) {
8705                 tw32(BUFMGR_MB_POOL_ADDR, NIC_SRAM_MBUF_POOL_BASE);
8706                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
8707                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE64);
8708                 else
8709                         tw32(BUFMGR_MB_POOL_SIZE, NIC_SRAM_MBUF_POOL_SIZE96);
8710                 tw32(BUFMGR_DMA_DESC_POOL_ADDR, NIC_SRAM_DMA_DESC_POOL_BASE);
8711                 tw32(BUFMGR_DMA_DESC_POOL_SIZE, NIC_SRAM_DMA_DESC_POOL_SIZE);
8712         } else if (tg3_flag(tp, TSO_CAPABLE)) {
8713                 int fw_len;
8714
8715                 fw_len = tp->fw_len;
8716                 fw_len = (fw_len + (0x80 - 1)) & ~(0x80 - 1);
8717                 tw32(BUFMGR_MB_POOL_ADDR,
8718                      NIC_SRAM_MBUF_POOL_BASE5705 + fw_len);
8719                 tw32(BUFMGR_MB_POOL_SIZE,
8720                      NIC_SRAM_MBUF_POOL_SIZE5705 - fw_len - 0xa00);
8721         }
8722
8723         if (tp->dev->mtu <= ETH_DATA_LEN) {
8724                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8725                      tp->bufmgr_config.mbuf_read_dma_low_water);
8726                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8727                      tp->bufmgr_config.mbuf_mac_rx_low_water);
8728                 tw32(BUFMGR_MB_HIGH_WATER,
8729                      tp->bufmgr_config.mbuf_high_water);
8730         } else {
8731                 tw32(BUFMGR_MB_RDMA_LOW_WATER,
8732                      tp->bufmgr_config.mbuf_read_dma_low_water_jumbo);
8733                 tw32(BUFMGR_MB_MACRX_LOW_WATER,
8734                      tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo);
8735                 tw32(BUFMGR_MB_HIGH_WATER,
8736                      tp->bufmgr_config.mbuf_high_water_jumbo);
8737         }
8738         tw32(BUFMGR_DMA_LOW_WATER,
8739              tp->bufmgr_config.dma_low_water);
8740         tw32(BUFMGR_DMA_HIGH_WATER,
8741              tp->bufmgr_config.dma_high_water);
8742
8743         val = BUFMGR_MODE_ENABLE | BUFMGR_MODE_ATTN_ENABLE;
8744         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
8745                 val |= BUFMGR_MODE_NO_TX_UNDERRUN;
8746         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
8747             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8748             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0)
8749                 val |= BUFMGR_MODE_MBLOW_ATTN_ENAB;
8750         tw32(BUFMGR_MODE, val);
8751         for (i = 0; i < 2000; i++) {
8752                 if (tr32(BUFMGR_MODE) & BUFMGR_MODE_ENABLE)
8753                         break;
8754                 udelay(10);
8755         }
8756         if (i >= 2000) {
8757                 netdev_err(tp->dev, "%s cannot enable BUFMGR\n", __func__);
8758                 return -ENODEV;
8759         }
8760
8761         if (tp->pci_chip_rev_id == CHIPREV_ID_5906_A1)
8762                 tw32(ISO_PKT_TX, (tr32(ISO_PKT_TX) & ~0x3) | 0x2);
8763
8764         tg3_setup_rxbd_thresholds(tp);
8765
8766         /* Initialize TG3_BDINFO's at:
8767          *  RCVDBDI_STD_BD:     standard eth size rx ring
8768          *  RCVDBDI_JUMBO_BD:   jumbo frame rx ring
8769          *  RCVDBDI_MINI_BD:    small frame rx ring (??? does not work)
8770          *
8771          * like so:
8772          *  TG3_BDINFO_HOST_ADDR:       high/low parts of DMA address of ring
8773          *  TG3_BDINFO_MAXLEN_FLAGS:    (rx max buffer size << 16) |
8774          *                              ring attribute flags
8775          *  TG3_BDINFO_NIC_ADDR:        location of descriptors in nic SRAM
8776          *
8777          * Standard receive ring @ NIC_SRAM_RX_BUFFER_DESC, 512 entries.
8778          * Jumbo receive ring @ NIC_SRAM_RX_JUMBO_BUFFER_DESC, 256 entries.
8779          *
8780          * The size of each ring is fixed in the firmware, but the location is
8781          * configurable.
8782          */
8783         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8784              ((u64) tpr->rx_std_mapping >> 32));
8785         tw32(RCVDBDI_STD_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8786              ((u64) tpr->rx_std_mapping & 0xffffffff));
8787         if (!tg3_flag(tp, 5717_PLUS))
8788                 tw32(RCVDBDI_STD_BD + TG3_BDINFO_NIC_ADDR,
8789                      NIC_SRAM_RX_BUFFER_DESC);
8790
8791         /* Disable the mini ring */
8792         if (!tg3_flag(tp, 5705_PLUS))
8793                 tw32(RCVDBDI_MINI_BD + TG3_BDINFO_MAXLEN_FLAGS,
8794                      BDINFO_FLAGS_DISABLED);
8795
8796         /* Program the jumbo buffer descriptor ring control
8797          * blocks on those devices that have them.
8798          */
8799         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
8800             (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))) {
8801
8802                 if (tg3_flag(tp, JUMBO_RING_ENABLE)) {
8803                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_HIGH,
8804                              ((u64) tpr->rx_jmb_mapping >> 32));
8805                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_HOST_ADDR + TG3_64BIT_REG_LOW,
8806                              ((u64) tpr->rx_jmb_mapping & 0xffffffff));
8807                         val = TG3_RX_JMB_RING_SIZE(tp) <<
8808                               BDINFO_FLAGS_MAXLEN_SHIFT;
8809                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8810                              val | BDINFO_FLAGS_USE_EXT_RECV);
8811                         if (!tg3_flag(tp, USE_JUMBO_BDFLAG) ||
8812                             tg3_flag(tp, 57765_CLASS))
8813                                 tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_NIC_ADDR,
8814                                      NIC_SRAM_RX_JUMBO_BUFFER_DESC);
8815                 } else {
8816                         tw32(RCVDBDI_JUMBO_BD + TG3_BDINFO_MAXLEN_FLAGS,
8817                              BDINFO_FLAGS_DISABLED);
8818                 }
8819
8820                 if (tg3_flag(tp, 57765_PLUS)) {
8821                         val = TG3_RX_STD_RING_SIZE(tp);
8822                         val <<= BDINFO_FLAGS_MAXLEN_SHIFT;
8823                         val |= (TG3_RX_STD_DMA_SZ << 2);
8824                 } else
8825                         val = TG3_RX_STD_DMA_SZ << BDINFO_FLAGS_MAXLEN_SHIFT;
8826         } else
8827                 val = TG3_RX_STD_MAX_SIZE_5700 << BDINFO_FLAGS_MAXLEN_SHIFT;
8828
8829         tw32(RCVDBDI_STD_BD + TG3_BDINFO_MAXLEN_FLAGS, val);
8830
8831         tpr->rx_std_prod_idx = tp->rx_pending;
8832         tw32_rx_mbox(TG3_RX_STD_PROD_IDX_REG, tpr->rx_std_prod_idx);
8833
8834         tpr->rx_jmb_prod_idx =
8835                 tg3_flag(tp, JUMBO_RING_ENABLE) ? tp->rx_jumbo_pending : 0;
8836         tw32_rx_mbox(TG3_RX_JMB_PROD_IDX_REG, tpr->rx_jmb_prod_idx);
8837
8838         tg3_rings_reset(tp);
8839
8840         /* Initialize MAC address and backoff seed. */
8841         __tg3_set_mac_addr(tp, 0);
8842
8843         /* MTU + ethernet header + FCS + optional VLAN tag */
8844         tw32(MAC_RX_MTU_SIZE,
8845              tp->dev->mtu + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
8846
8847         /* The slot time is changed by tg3_setup_phy if we
8848          * run at gigabit with half duplex.
8849          */
8850         val = (2 << TX_LENGTHS_IPG_CRS_SHIFT) |
8851               (6 << TX_LENGTHS_IPG_SHIFT) |
8852               (32 << TX_LENGTHS_SLOT_TIME_SHIFT);
8853
8854         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8855                 val |= tr32(MAC_TX_LENGTHS) &
8856                        (TX_LENGTHS_JMB_FRM_LEN_MSK |
8857                         TX_LENGTHS_CNT_DWN_VAL_MSK);
8858
8859         tw32(MAC_TX_LENGTHS, val);
8860
8861         /* Receive rules. */
8862         tw32(MAC_RCV_RULE_CFG, RCV_RULE_CFG_DEFAULT_CLASS);
8863         tw32(RCVLPC_CONFIG, 0x0181);
8864
8865         /* Calculate RDMAC_MODE setting early, we need it to determine
8866          * the RCVLPC_STATE_ENABLE mask.
8867          */
8868         rdmac_mode = (RDMAC_MODE_ENABLE | RDMAC_MODE_TGTABORT_ENAB |
8869                       RDMAC_MODE_MSTABORT_ENAB | RDMAC_MODE_PARITYERR_ENAB |
8870                       RDMAC_MODE_ADDROFLOW_ENAB | RDMAC_MODE_FIFOOFLOW_ENAB |
8871                       RDMAC_MODE_FIFOURUN_ENAB | RDMAC_MODE_FIFOOREAD_ENAB |
8872                       RDMAC_MODE_LNGREAD_ENAB);
8873
8874         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717)
8875                 rdmac_mode |= RDMAC_MODE_MULT_DMA_RD_DIS;
8876
8877         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8878             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8879             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8880                 rdmac_mode |= RDMAC_MODE_BD_SBD_CRPT_ENAB |
8881                               RDMAC_MODE_MBUF_RBD_CRPT_ENAB |
8882                               RDMAC_MODE_MBUF_SBD_CRPT_ENAB;
8883
8884         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
8885             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
8886                 if (tg3_flag(tp, TSO_CAPABLE) &&
8887                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
8888                         rdmac_mode |= RDMAC_MODE_FIFO_SIZE_128;
8889                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
8890                            !tg3_flag(tp, IS_5788)) {
8891                         rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8892                 }
8893         }
8894
8895         if (tg3_flag(tp, PCI_EXPRESS))
8896                 rdmac_mode |= RDMAC_MODE_FIFO_LONG_BURST;
8897
8898         if (tg3_flag(tp, HW_TSO_1) ||
8899             tg3_flag(tp, HW_TSO_2) ||
8900             tg3_flag(tp, HW_TSO_3))
8901                 rdmac_mode |= RDMAC_MODE_IPV4_LSO_EN;
8902
8903         if (tg3_flag(tp, 57765_PLUS) ||
8904             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8905             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
8906                 rdmac_mode |= RDMAC_MODE_IPV6_LSO_EN;
8907
8908         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
8909                 rdmac_mode |= tr32(RDMAC_MODE) & RDMAC_MODE_H2BNC_VLAN_DET;
8910
8911         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
8912             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
8913             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
8914             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
8915             tg3_flag(tp, 57765_PLUS)) {
8916                 val = tr32(TG3_RDMA_RSRVCTRL_REG);
8917                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8918                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8919                         val &= ~(TG3_RDMA_RSRVCTRL_TXMRGN_MASK |
8920                                  TG3_RDMA_RSRVCTRL_FIFO_LWM_MASK |
8921                                  TG3_RDMA_RSRVCTRL_FIFO_HWM_MASK);
8922                         val |= TG3_RDMA_RSRVCTRL_TXMRGN_320B |
8923                                TG3_RDMA_RSRVCTRL_FIFO_LWM_1_5K |
8924                                TG3_RDMA_RSRVCTRL_FIFO_HWM_1_5K;
8925                 }
8926                 tw32(TG3_RDMA_RSRVCTRL_REG,
8927                      val | TG3_RDMA_RSRVCTRL_FIFO_OFLW_FIX);
8928         }
8929
8930         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
8931             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
8932                 val = tr32(TG3_LSO_RD_DMA_CRPTEN_CTRL);
8933                 tw32(TG3_LSO_RD_DMA_CRPTEN_CTRL, val |
8934                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_BD_4K |
8935                      TG3_LSO_RD_DMA_CRPTEN_CTRL_BLEN_LSO_4K);
8936         }
8937
8938         /* Receive/send statistics. */
8939         if (tg3_flag(tp, 5750_PLUS)) {
8940                 val = tr32(RCVLPC_STATS_ENABLE);
8941                 val &= ~RCVLPC_STATSENAB_DACK_FIX;
8942                 tw32(RCVLPC_STATS_ENABLE, val);
8943         } else if ((rdmac_mode & RDMAC_MODE_FIFO_SIZE_128) &&
8944                    tg3_flag(tp, TSO_CAPABLE)) {
8945                 val = tr32(RCVLPC_STATS_ENABLE);
8946                 val &= ~RCVLPC_STATSENAB_LNGBRST_RFIX;
8947                 tw32(RCVLPC_STATS_ENABLE, val);
8948         } else {
8949                 tw32(RCVLPC_STATS_ENABLE, 0xffffff);
8950         }
8951         tw32(RCVLPC_STATSCTRL, RCVLPC_STATSCTRL_ENABLE);
8952         tw32(SNDDATAI_STATSENAB, 0xffffff);
8953         tw32(SNDDATAI_STATSCTRL,
8954              (SNDDATAI_SCTRL_ENABLE |
8955               SNDDATAI_SCTRL_FASTUPD));
8956
8957         /* Setup host coalescing engine. */
8958         tw32(HOSTCC_MODE, 0);
8959         for (i = 0; i < 2000; i++) {
8960                 if (!(tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE))
8961                         break;
8962                 udelay(10);
8963         }
8964
8965         __tg3_set_coalesce(tp, &tp->coal);
8966
8967         if (!tg3_flag(tp, 5705_PLUS)) {
8968                 /* Status/statistics block address.  See tg3_timer,
8969                  * the tg3_periodic_fetch_stats call there, and
8970                  * tg3_get_stats to see how this works for 5705/5750 chips.
8971                  */
8972                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_HIGH,
8973                      ((u64) tp->stats_mapping >> 32));
8974                 tw32(HOSTCC_STATS_BLK_HOST_ADDR + TG3_64BIT_REG_LOW,
8975                      ((u64) tp->stats_mapping & 0xffffffff));
8976                 tw32(HOSTCC_STATS_BLK_NIC_ADDR, NIC_SRAM_STATS_BLK);
8977
8978                 tw32(HOSTCC_STATUS_BLK_NIC_ADDR, NIC_SRAM_STATUS_BLK);
8979
8980                 /* Clear statistics and status block memory areas */
8981                 for (i = NIC_SRAM_STATS_BLK;
8982                      i < NIC_SRAM_STATUS_BLK + TG3_HW_STATUS_SIZE;
8983                      i += sizeof(u32)) {
8984                         tg3_write_mem(tp, i, 0);
8985                         udelay(40);
8986                 }
8987         }
8988
8989         tw32(HOSTCC_MODE, HOSTCC_MODE_ENABLE | tp->coalesce_mode);
8990
8991         tw32(RCVCC_MODE, RCVCC_MODE_ENABLE | RCVCC_MODE_ATTN_ENABLE);
8992         tw32(RCVLPC_MODE, RCVLPC_MODE_ENABLE);
8993         if (!tg3_flag(tp, 5705_PLUS))
8994                 tw32(RCVLSC_MODE, RCVLSC_MODE_ENABLE | RCVLSC_MODE_ATTN_ENABLE);
8995
8996         if (tp->phy_flags & TG3_PHYFLG_MII_SERDES) {
8997                 tp->phy_flags &= ~TG3_PHYFLG_PARALLEL_DETECT;
8998                 /* reset to prevent losing 1st rx packet intermittently */
8999                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9000                 udelay(10);
9001         }
9002
9003         tp->mac_mode |= MAC_MODE_TXSTAT_ENABLE | MAC_MODE_RXSTAT_ENABLE |
9004                         MAC_MODE_TDE_ENABLE | MAC_MODE_RDE_ENABLE |
9005                         MAC_MODE_FHDE_ENABLE;
9006         if (tg3_flag(tp, ENABLE_APE))
9007                 tp->mac_mode |= MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
9008         if (!tg3_flag(tp, 5705_PLUS) &&
9009             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9010             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
9011                 tp->mac_mode |= MAC_MODE_LINK_POLARITY;
9012         tw32_f(MAC_MODE, tp->mac_mode | MAC_MODE_RXSTAT_CLEAR | MAC_MODE_TXSTAT_CLEAR);
9013         udelay(40);
9014
9015         /* tp->grc_local_ctrl is partially set up during tg3_get_invariants().
9016          * If TG3_FLAG_IS_NIC is zero, we should read the
9017          * register to preserve the GPIO settings for LOMs. The GPIOs,
9018          * whether used as inputs or outputs, are set by boot code after
9019          * reset.
9020          */
9021         if (!tg3_flag(tp, IS_NIC)) {
9022                 u32 gpio_mask;
9023
9024                 gpio_mask = GRC_LCLCTRL_GPIO_OE0 | GRC_LCLCTRL_GPIO_OE1 |
9025                             GRC_LCLCTRL_GPIO_OE2 | GRC_LCLCTRL_GPIO_OUTPUT0 |
9026                             GRC_LCLCTRL_GPIO_OUTPUT1 | GRC_LCLCTRL_GPIO_OUTPUT2;
9027
9028                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
9029                         gpio_mask |= GRC_LCLCTRL_GPIO_OE3 |
9030                                      GRC_LCLCTRL_GPIO_OUTPUT3;
9031
9032                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
9033                         gpio_mask |= GRC_LCLCTRL_GPIO_UART_SEL;
9034
9035                 tp->grc_local_ctrl &= ~gpio_mask;
9036                 tp->grc_local_ctrl |= tr32(GRC_LOCAL_CTRL) & gpio_mask;
9037
9038                 /* GPIO1 must be driven high for eeprom write protect */
9039                 if (tg3_flag(tp, EEPROM_WRITE_PROT))
9040                         tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
9041                                                GRC_LCLCTRL_GPIO_OUTPUT1);
9042         }
9043         tw32_f(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9044         udelay(100);
9045
9046         if (tg3_flag(tp, USING_MSIX)) {
9047                 val = tr32(MSGINT_MODE);
9048                 val |= MSGINT_MODE_ENABLE;
9049                 if (tp->irq_cnt > 1)
9050                         val |= MSGINT_MODE_MULTIVEC_EN;
9051                 if (!tg3_flag(tp, 1SHOT_MSI))
9052                         val |= MSGINT_MODE_ONE_SHOT_DISABLE;
9053                 tw32(MSGINT_MODE, val);
9054         }
9055
9056         if (!tg3_flag(tp, 5705_PLUS)) {
9057                 tw32_f(DMAC_MODE, DMAC_MODE_ENABLE);
9058                 udelay(40);
9059         }
9060
9061         val = (WDMAC_MODE_ENABLE | WDMAC_MODE_TGTABORT_ENAB |
9062                WDMAC_MODE_MSTABORT_ENAB | WDMAC_MODE_PARITYERR_ENAB |
9063                WDMAC_MODE_ADDROFLOW_ENAB | WDMAC_MODE_FIFOOFLOW_ENAB |
9064                WDMAC_MODE_FIFOURUN_ENAB | WDMAC_MODE_FIFOOREAD_ENAB |
9065                WDMAC_MODE_LNGREAD_ENAB);
9066
9067         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
9068             tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
9069                 if (tg3_flag(tp, TSO_CAPABLE) &&
9070                     (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 ||
9071                      tp->pci_chip_rev_id == CHIPREV_ID_5705_A2)) {
9072                         /* nothing */
9073                 } else if (!(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH) &&
9074                            !tg3_flag(tp, IS_5788)) {
9075                         val |= WDMAC_MODE_RX_ACCEL;
9076                 }
9077         }
9078
9079         /* Enable host coalescing bug fix */
9080         if (tg3_flag(tp, 5755_PLUS))
9081                 val |= WDMAC_MODE_STATUS_TAG_FIX;
9082
9083         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
9084                 val |= WDMAC_MODE_BURST_ALL_DATA;
9085
9086         tw32_f(WDMAC_MODE, val);
9087         udelay(40);
9088
9089         if (tg3_flag(tp, PCIX_MODE)) {
9090                 u16 pcix_cmd;
9091
9092                 pci_read_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9093                                      &pcix_cmd);
9094                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703) {
9095                         pcix_cmd &= ~PCI_X_CMD_MAX_READ;
9096                         pcix_cmd |= PCI_X_CMD_READ_2K;
9097                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
9098                         pcix_cmd &= ~(PCI_X_CMD_MAX_SPLIT | PCI_X_CMD_MAX_READ);
9099                         pcix_cmd |= PCI_X_CMD_READ_2K;
9100                 }
9101                 pci_write_config_word(tp->pdev, tp->pcix_cap + PCI_X_CMD,
9102                                       pcix_cmd);
9103         }
9104
9105         tw32_f(RDMAC_MODE, rdmac_mode);
9106         udelay(40);
9107
9108         tw32(RCVDCC_MODE, RCVDCC_MODE_ENABLE | RCVDCC_MODE_ATTN_ENABLE);
9109         if (!tg3_flag(tp, 5705_PLUS))
9110                 tw32(MBFREE_MODE, MBFREE_MODE_ENABLE);
9111
9112         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
9113                 tw32(SNDDATAC_MODE,
9114                      SNDDATAC_MODE_ENABLE | SNDDATAC_MODE_CDELAY);
9115         else
9116                 tw32(SNDDATAC_MODE, SNDDATAC_MODE_ENABLE);
9117
9118         tw32(SNDBDC_MODE, SNDBDC_MODE_ENABLE | SNDBDC_MODE_ATTN_ENABLE);
9119         tw32(RCVBDI_MODE, RCVBDI_MODE_ENABLE | RCVBDI_MODE_RCB_ATTN_ENAB);
9120         val = RCVDBDI_MODE_ENABLE | RCVDBDI_MODE_INV_RING_SZ;
9121         if (tg3_flag(tp, LRG_PROD_RING_CAP))
9122                 val |= RCVDBDI_MODE_LRG_RING_SZ;
9123         tw32(RCVDBDI_MODE, val);
9124         tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE);
9125         if (tg3_flag(tp, HW_TSO_1) ||
9126             tg3_flag(tp, HW_TSO_2) ||
9127             tg3_flag(tp, HW_TSO_3))
9128                 tw32(SNDDATAI_MODE, SNDDATAI_MODE_ENABLE | 0x8);
9129         val = SNDBDI_MODE_ENABLE | SNDBDI_MODE_ATTN_ENABLE;
9130         if (tg3_flag(tp, ENABLE_TSS))
9131                 val |= SNDBDI_MODE_MULTI_TXQ_EN;
9132         tw32(SNDBDI_MODE, val);
9133         tw32(SNDBDS_MODE, SNDBDS_MODE_ENABLE | SNDBDS_MODE_ATTN_ENABLE);
9134
9135         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9136                 err = tg3_load_5701_a0_firmware_fix(tp);
9137                 if (err)
9138                         return err;
9139         }
9140
9141         if (tg3_flag(tp, TSO_CAPABLE)) {
9142                 err = tg3_load_tso_firmware(tp);
9143                 if (err)
9144                         return err;
9145         }
9146
9147         tp->tx_mode = TX_MODE_ENABLE;
9148
9149         if (tg3_flag(tp, 5755_PLUS) ||
9150             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
9151                 tp->tx_mode |= TX_MODE_MBUF_LOCKUP_FIX;
9152
9153         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9154                 val = TX_MODE_JMB_FRM_LEN | TX_MODE_CNT_DN_MODE;
9155                 tp->tx_mode &= ~val;
9156                 tp->tx_mode |= tr32(MAC_TX_MODE) & val;
9157         }
9158
9159         tw32_f(MAC_TX_MODE, tp->tx_mode);
9160         udelay(100);
9161
9162         if (tg3_flag(tp, ENABLE_RSS)) {
9163                 tg3_rss_write_indir_tbl(tp);
9164
9165                 /* Setup the "secret" hash key. */
9166                 tw32(MAC_RSS_HASH_KEY_0, 0x5f865437);
9167                 tw32(MAC_RSS_HASH_KEY_1, 0xe4ac62cc);
9168                 tw32(MAC_RSS_HASH_KEY_2, 0x50103a45);
9169                 tw32(MAC_RSS_HASH_KEY_3, 0x36621985);
9170                 tw32(MAC_RSS_HASH_KEY_4, 0xbf14c0e8);
9171                 tw32(MAC_RSS_HASH_KEY_5, 0x1bc27a1e);
9172                 tw32(MAC_RSS_HASH_KEY_6, 0x84f4b556);
9173                 tw32(MAC_RSS_HASH_KEY_7, 0x094ea6fe);
9174                 tw32(MAC_RSS_HASH_KEY_8, 0x7dda01e7);
9175                 tw32(MAC_RSS_HASH_KEY_9, 0xc04d7481);
9176         }
9177
9178         tp->rx_mode = RX_MODE_ENABLE;
9179         if (tg3_flag(tp, 5755_PLUS))
9180                 tp->rx_mode |= RX_MODE_IPV6_CSUM_ENABLE;
9181
9182         if (tg3_flag(tp, ENABLE_RSS))
9183                 tp->rx_mode |= RX_MODE_RSS_ENABLE |
9184                                RX_MODE_RSS_ITBL_HASH_BITS_7 |
9185                                RX_MODE_RSS_IPV6_HASH_EN |
9186                                RX_MODE_RSS_TCP_IPV6_HASH_EN |
9187                                RX_MODE_RSS_IPV4_HASH_EN |
9188                                RX_MODE_RSS_TCP_IPV4_HASH_EN;
9189
9190         tw32_f(MAC_RX_MODE, tp->rx_mode);
9191         udelay(10);
9192
9193         tw32(MAC_LED_CTRL, tp->led_ctrl);
9194
9195         tw32(MAC_MI_STAT, MAC_MI_STAT_LNKSTAT_ATTN_ENAB);
9196         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9197                 tw32_f(MAC_RX_MODE, RX_MODE_RESET);
9198                 udelay(10);
9199         }
9200         tw32_f(MAC_RX_MODE, tp->rx_mode);
9201         udelay(10);
9202
9203         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
9204                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) &&
9205                         !(tp->phy_flags & TG3_PHYFLG_SERDES_PREEMPHASIS)) {
9206                         /* Set drive transmission level to 1.2V  */
9207                         /* only if the signal pre-emphasis bit is not set  */
9208                         val = tr32(MAC_SERDES_CFG);
9209                         val &= 0xfffff000;
9210                         val |= 0x880;
9211                         tw32(MAC_SERDES_CFG, val);
9212                 }
9213                 if (tp->pci_chip_rev_id == CHIPREV_ID_5703_A1)
9214                         tw32(MAC_SERDES_CFG, 0x616000);
9215         }
9216
9217         /* Prevent chip from dropping frames when flow control
9218          * is enabled.
9219          */
9220         if (tg3_flag(tp, 57765_CLASS))
9221                 val = 1;
9222         else
9223                 val = 2;
9224         tw32_f(MAC_LOW_WMARK_MAX_RX_FRAME, val);
9225
9226         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 &&
9227             (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
9228                 /* Use hardware link auto-negotiation */
9229                 tg3_flag_set(tp, HW_AUTONEG);
9230         }
9231
9232         if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9233             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
9234                 u32 tmp;
9235
9236                 tmp = tr32(SERDES_RX_CTRL);
9237                 tw32(SERDES_RX_CTRL, tmp | SERDES_RX_SIG_DETECT);
9238                 tp->grc_local_ctrl &= ~GRC_LCLCTRL_USE_EXT_SIG_DETECT;
9239                 tp->grc_local_ctrl |= GRC_LCLCTRL_USE_SIG_DETECT;
9240                 tw32(GRC_LOCAL_CTRL, tp->grc_local_ctrl);
9241         }
9242
9243         if (!tg3_flag(tp, USE_PHYLIB)) {
9244                 if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
9245                         tp->phy_flags &= ~TG3_PHYFLG_IS_LOW_POWER;
9246
9247                 err = tg3_setup_phy(tp, 0);
9248                 if (err)
9249                         return err;
9250
9251                 if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
9252                     !(tp->phy_flags & TG3_PHYFLG_IS_FET)) {
9253                         u32 tmp;
9254
9255                         /* Clear CRC stats. */
9256                         if (!tg3_readphy(tp, MII_TG3_TEST1, &tmp)) {
9257                                 tg3_writephy(tp, MII_TG3_TEST1,
9258                                              tmp | MII_TG3_TEST1_CRC_EN);
9259                                 tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &tmp);
9260                         }
9261                 }
9262         }
9263
9264         __tg3_set_rx_mode(tp->dev);
9265
9266         /* Initialize receive rules. */
9267         tw32(MAC_RCV_RULE_0,  0xc2000000 & RCV_RULE_DISABLE_MASK);
9268         tw32(MAC_RCV_VALUE_0, 0xffffffff & RCV_RULE_DISABLE_MASK);
9269         tw32(MAC_RCV_RULE_1,  0x86000004 & RCV_RULE_DISABLE_MASK);
9270         tw32(MAC_RCV_VALUE_1, 0xffffffff & RCV_RULE_DISABLE_MASK);
9271
9272         if (tg3_flag(tp, 5705_PLUS) && !tg3_flag(tp, 5780_CLASS))
9273                 limit = 8;
9274         else
9275                 limit = 16;
9276         if (tg3_flag(tp, ENABLE_ASF))
9277                 limit -= 4;
9278         switch (limit) {
9279         case 16:
9280                 tw32(MAC_RCV_RULE_15,  0); tw32(MAC_RCV_VALUE_15,  0);
9281         case 15:
9282                 tw32(MAC_RCV_RULE_14,  0); tw32(MAC_RCV_VALUE_14,  0);
9283         case 14:
9284                 tw32(MAC_RCV_RULE_13,  0); tw32(MAC_RCV_VALUE_13,  0);
9285         case 13:
9286                 tw32(MAC_RCV_RULE_12,  0); tw32(MAC_RCV_VALUE_12,  0);
9287         case 12:
9288                 tw32(MAC_RCV_RULE_11,  0); tw32(MAC_RCV_VALUE_11,  0);
9289         case 11:
9290                 tw32(MAC_RCV_RULE_10,  0); tw32(MAC_RCV_VALUE_10,  0);
9291         case 10:
9292                 tw32(MAC_RCV_RULE_9,  0); tw32(MAC_RCV_VALUE_9,  0);
9293         case 9:
9294                 tw32(MAC_RCV_RULE_8,  0); tw32(MAC_RCV_VALUE_8,  0);
9295         case 8:
9296                 tw32(MAC_RCV_RULE_7,  0); tw32(MAC_RCV_VALUE_7,  0);
9297         case 7:
9298                 tw32(MAC_RCV_RULE_6,  0); tw32(MAC_RCV_VALUE_6,  0);
9299         case 6:
9300                 tw32(MAC_RCV_RULE_5,  0); tw32(MAC_RCV_VALUE_5,  0);
9301         case 5:
9302                 tw32(MAC_RCV_RULE_4,  0); tw32(MAC_RCV_VALUE_4,  0);
9303         case 4:
9304                 /* tw32(MAC_RCV_RULE_3,  0); tw32(MAC_RCV_VALUE_3,  0); */
9305         case 3:
9306                 /* tw32(MAC_RCV_RULE_2,  0); tw32(MAC_RCV_VALUE_2,  0); */
9307         case 2:
9308         case 1:
9309
9310         default:
9311                 break;
9312         }
9313
9314         if (tg3_flag(tp, ENABLE_APE))
9315                 /* Write our heartbeat update interval to APE. */
9316                 tg3_ape_write32(tp, TG3_APE_HOST_HEARTBEAT_INT_MS,
9317                                 APE_HOST_HEARTBEAT_INT_DISABLE);
9318
9319         tg3_write_sig_post_reset(tp, RESET_KIND_INIT);
9320
9321         return 0;
9322 }
9323
9324 /* Called at device open time to get the chip ready for
9325  * packet processing.  Invoked with tp->lock held.
9326  */
9327 static int tg3_init_hw(struct tg3 *tp, int reset_phy)
9328 {
9329         tg3_switch_clocks(tp);
9330
9331         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
9332
9333         return tg3_reset_hw(tp, reset_phy);
9334 }
9335
9336 #define TG3_STAT_ADD32(PSTAT, REG) \
9337 do {    u32 __val = tr32(REG); \
9338         (PSTAT)->low += __val; \
9339         if ((PSTAT)->low < __val) \
9340                 (PSTAT)->high += 1; \
9341 } while (0)
9342
9343 static void tg3_periodic_fetch_stats(struct tg3 *tp)
9344 {
9345         struct tg3_hw_stats *sp = tp->hw_stats;
9346
9347         if (!netif_carrier_ok(tp->dev))
9348                 return;
9349
9350         TG3_STAT_ADD32(&sp->tx_octets, MAC_TX_STATS_OCTETS);
9351         TG3_STAT_ADD32(&sp->tx_collisions, MAC_TX_STATS_COLLISIONS);
9352         TG3_STAT_ADD32(&sp->tx_xon_sent, MAC_TX_STATS_XON_SENT);
9353         TG3_STAT_ADD32(&sp->tx_xoff_sent, MAC_TX_STATS_XOFF_SENT);
9354         TG3_STAT_ADD32(&sp->tx_mac_errors, MAC_TX_STATS_MAC_ERRORS);
9355         TG3_STAT_ADD32(&sp->tx_single_collisions, MAC_TX_STATS_SINGLE_COLLISIONS);
9356         TG3_STAT_ADD32(&sp->tx_mult_collisions, MAC_TX_STATS_MULT_COLLISIONS);
9357         TG3_STAT_ADD32(&sp->tx_deferred, MAC_TX_STATS_DEFERRED);
9358         TG3_STAT_ADD32(&sp->tx_excessive_collisions, MAC_TX_STATS_EXCESSIVE_COL);
9359         TG3_STAT_ADD32(&sp->tx_late_collisions, MAC_TX_STATS_LATE_COL);
9360         TG3_STAT_ADD32(&sp->tx_ucast_packets, MAC_TX_STATS_UCAST);
9361         TG3_STAT_ADD32(&sp->tx_mcast_packets, MAC_TX_STATS_MCAST);
9362         TG3_STAT_ADD32(&sp->tx_bcast_packets, MAC_TX_STATS_BCAST);
9363
9364         TG3_STAT_ADD32(&sp->rx_octets, MAC_RX_STATS_OCTETS);
9365         TG3_STAT_ADD32(&sp->rx_fragments, MAC_RX_STATS_FRAGMENTS);
9366         TG3_STAT_ADD32(&sp->rx_ucast_packets, MAC_RX_STATS_UCAST);
9367         TG3_STAT_ADD32(&sp->rx_mcast_packets, MAC_RX_STATS_MCAST);
9368         TG3_STAT_ADD32(&sp->rx_bcast_packets, MAC_RX_STATS_BCAST);
9369         TG3_STAT_ADD32(&sp->rx_fcs_errors, MAC_RX_STATS_FCS_ERRORS);
9370         TG3_STAT_ADD32(&sp->rx_align_errors, MAC_RX_STATS_ALIGN_ERRORS);
9371         TG3_STAT_ADD32(&sp->rx_xon_pause_rcvd, MAC_RX_STATS_XON_PAUSE_RECVD);
9372         TG3_STAT_ADD32(&sp->rx_xoff_pause_rcvd, MAC_RX_STATS_XOFF_PAUSE_RECVD);
9373         TG3_STAT_ADD32(&sp->rx_mac_ctrl_rcvd, MAC_RX_STATS_MAC_CTRL_RECVD);
9374         TG3_STAT_ADD32(&sp->rx_xoff_entered, MAC_RX_STATS_XOFF_ENTERED);
9375         TG3_STAT_ADD32(&sp->rx_frame_too_long_errors, MAC_RX_STATS_FRAME_TOO_LONG);
9376         TG3_STAT_ADD32(&sp->rx_jabbers, MAC_RX_STATS_JABBERS);
9377         TG3_STAT_ADD32(&sp->rx_undersize_packets, MAC_RX_STATS_UNDERSIZE);
9378
9379         TG3_STAT_ADD32(&sp->rxbds_empty, RCVLPC_NO_RCV_BD_CNT);
9380         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9381             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0 &&
9382             tp->pci_chip_rev_id != CHIPREV_ID_5720_A0) {
9383                 TG3_STAT_ADD32(&sp->rx_discards, RCVLPC_IN_DISCARDS_CNT);
9384         } else {
9385                 u32 val = tr32(HOSTCC_FLOW_ATTN);
9386                 val = (val & HOSTCC_FLOW_ATTN_MBUF_LWM) ? 1 : 0;
9387                 if (val) {
9388                         tw32(HOSTCC_FLOW_ATTN, HOSTCC_FLOW_ATTN_MBUF_LWM);
9389                         sp->rx_discards.low += val;
9390                         if (sp->rx_discards.low < val)
9391                                 sp->rx_discards.high += 1;
9392                 }
9393                 sp->mbuf_lwm_thresh_hit = sp->rx_discards;
9394         }
9395         TG3_STAT_ADD32(&sp->rx_errors, RCVLPC_IN_ERRORS_CNT);
9396 }
9397
9398 static void tg3_chk_missed_msi(struct tg3 *tp)
9399 {
9400         u32 i;
9401
9402         for (i = 0; i < tp->irq_cnt; i++) {
9403                 struct tg3_napi *tnapi = &tp->napi[i];
9404
9405                 if (tg3_has_work(tnapi)) {
9406                         if (tnapi->last_rx_cons == tnapi->rx_rcb_ptr &&
9407                             tnapi->last_tx_cons == tnapi->tx_cons) {
9408                                 if (tnapi->chk_msi_cnt < 1) {
9409                                         tnapi->chk_msi_cnt++;
9410                                         return;
9411                                 }
9412                                 tg3_msi(0, tnapi);
9413                         }
9414                 }
9415                 tnapi->chk_msi_cnt = 0;
9416                 tnapi->last_rx_cons = tnapi->rx_rcb_ptr;
9417                 tnapi->last_tx_cons = tnapi->tx_cons;
9418         }
9419 }
9420
9421 static void tg3_timer(unsigned long __opaque)
9422 {
9423         struct tg3 *tp = (struct tg3 *) __opaque;
9424
9425         if (tp->irq_sync || tg3_flag(tp, RESET_TASK_PENDING))
9426                 goto restart_timer;
9427
9428         spin_lock(&tp->lock);
9429
9430         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
9431             tg3_flag(tp, 57765_CLASS))
9432                 tg3_chk_missed_msi(tp);
9433
9434         if (!tg3_flag(tp, TAGGED_STATUS)) {
9435                 /* All of this garbage is because when using non-tagged
9436                  * IRQ status the mailbox/status_block protocol the chip
9437                  * uses with the cpu is race prone.
9438                  */
9439                 if (tp->napi[0].hw_status->status & SD_STATUS_UPDATED) {
9440                         tw32(GRC_LOCAL_CTRL,
9441                              tp->grc_local_ctrl | GRC_LCLCTRL_SETINT);
9442                 } else {
9443                         tw32(HOSTCC_MODE, tp->coalesce_mode |
9444                              HOSTCC_MODE_ENABLE | HOSTCC_MODE_NOW);
9445                 }
9446
9447                 if (!(tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
9448                         spin_unlock(&tp->lock);
9449                         tg3_reset_task_schedule(tp);
9450                         goto restart_timer;
9451                 }
9452         }
9453
9454         /* This part only runs once per second. */
9455         if (!--tp->timer_counter) {
9456                 if (tg3_flag(tp, 5705_PLUS))
9457                         tg3_periodic_fetch_stats(tp);
9458
9459                 if (tp->setlpicnt && !--tp->setlpicnt)
9460                         tg3_phy_eee_enable(tp);
9461
9462                 if (tg3_flag(tp, USE_LINKCHG_REG)) {
9463                         u32 mac_stat;
9464                         int phy_event;
9465
9466                         mac_stat = tr32(MAC_STATUS);
9467
9468                         phy_event = 0;
9469                         if (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) {
9470                                 if (mac_stat & MAC_STATUS_MI_INTERRUPT)
9471                                         phy_event = 1;
9472                         } else if (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)
9473                                 phy_event = 1;
9474
9475                         if (phy_event)
9476                                 tg3_setup_phy(tp, 0);
9477                 } else if (tg3_flag(tp, POLL_SERDES)) {
9478                         u32 mac_stat = tr32(MAC_STATUS);
9479                         int need_setup = 0;
9480
9481                         if (netif_carrier_ok(tp->dev) &&
9482                             (mac_stat & MAC_STATUS_LNKSTATE_CHANGED)) {
9483                                 need_setup = 1;
9484                         }
9485                         if (!netif_carrier_ok(tp->dev) &&
9486                             (mac_stat & (MAC_STATUS_PCS_SYNCED |
9487                                          MAC_STATUS_SIGNAL_DET))) {
9488                                 need_setup = 1;
9489                         }
9490                         if (need_setup) {
9491                                 if (!tp->serdes_counter) {
9492                                         tw32_f(MAC_MODE,
9493                                              (tp->mac_mode &
9494                                               ~MAC_MODE_PORT_MODE_MASK));
9495                                         udelay(40);
9496                                         tw32_f(MAC_MODE, tp->mac_mode);
9497                                         udelay(40);
9498                                 }
9499                                 tg3_setup_phy(tp, 0);
9500                         }
9501                 } else if ((tp->phy_flags & TG3_PHYFLG_MII_SERDES) &&
9502                            tg3_flag(tp, 5780_CLASS)) {
9503                         tg3_serdes_parallel_detect(tp);
9504                 }
9505
9506                 tp->timer_counter = tp->timer_multiplier;
9507         }
9508
9509         /* Heartbeat is only sent once every 2 seconds.
9510          *
9511          * The heartbeat is to tell the ASF firmware that the host
9512          * driver is still alive.  In the event that the OS crashes,
9513          * ASF needs to reset the hardware to free up the FIFO space
9514          * that may be filled with rx packets destined for the host.
9515          * If the FIFO is full, ASF will no longer function properly.
9516          *
9517          * Unintended resets have been reported on real time kernels
9518          * where the timer doesn't run on time.  Netpoll will also have
9519          * same problem.
9520          *
9521          * The new FWCMD_NICDRV_ALIVE3 command tells the ASF firmware
9522          * to check the ring condition when the heartbeat is expiring
9523          * before doing the reset.  This will prevent most unintended
9524          * resets.
9525          */
9526         if (!--tp->asf_counter) {
9527                 if (tg3_flag(tp, ENABLE_ASF) && !tg3_flag(tp, ENABLE_APE)) {
9528                         tg3_wait_for_event_ack(tp);
9529
9530                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_MBOX,
9531                                       FWCMD_NICDRV_ALIVE3);
9532                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_LEN_MBOX, 4);
9533                         tg3_write_mem(tp, NIC_SRAM_FW_CMD_DATA_MBOX,
9534                                       TG3_FW_UPDATE_TIMEOUT_SEC);
9535
9536                         tg3_generate_fw_event(tp);
9537                 }
9538                 tp->asf_counter = tp->asf_multiplier;
9539         }
9540
9541         spin_unlock(&tp->lock);
9542
9543 restart_timer:
9544         tp->timer.expires = jiffies + tp->timer_offset;
9545         add_timer(&tp->timer);
9546 }
9547
9548 static void __devinit tg3_timer_init(struct tg3 *tp)
9549 {
9550         if (tg3_flag(tp, TAGGED_STATUS) &&
9551             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5717 &&
9552             !tg3_flag(tp, 57765_CLASS))
9553                 tp->timer_offset = HZ;
9554         else
9555                 tp->timer_offset = HZ / 10;
9556
9557         BUG_ON(tp->timer_offset > HZ);
9558
9559         tp->timer_multiplier = (HZ / tp->timer_offset);
9560         tp->asf_multiplier = (HZ / tp->timer_offset) *
9561                              TG3_FW_UPDATE_FREQ_SEC;
9562
9563         init_timer(&tp->timer);
9564         tp->timer.data = (unsigned long) tp;
9565         tp->timer.function = tg3_timer;
9566 }
9567
9568 static void tg3_timer_start(struct tg3 *tp)
9569 {
9570         tp->asf_counter   = tp->asf_multiplier;
9571         tp->timer_counter = tp->timer_multiplier;
9572
9573         tp->timer.expires = jiffies + tp->timer_offset;
9574         add_timer(&tp->timer);
9575 }
9576
9577 static void tg3_timer_stop(struct tg3 *tp)
9578 {
9579         del_timer_sync(&tp->timer);
9580 }
9581
9582 /* Restart hardware after configuration changes, self-test, etc.
9583  * Invoked with tp->lock held.
9584  */
9585 static int tg3_restart_hw(struct tg3 *tp, int reset_phy)
9586         __releases(tp->lock)
9587         __acquires(tp->lock)
9588 {
9589         int err;
9590
9591         err = tg3_init_hw(tp, reset_phy);
9592         if (err) {
9593                 netdev_err(tp->dev,
9594                            "Failed to re-initialize device, aborting\n");
9595                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9596                 tg3_full_unlock(tp);
9597                 tg3_timer_stop(tp);
9598                 tp->irq_sync = 0;
9599                 tg3_napi_enable(tp);
9600                 dev_close(tp->dev);
9601                 tg3_full_lock(tp, 0);
9602         }
9603         return err;
9604 }
9605
9606 static void tg3_reset_task(struct work_struct *work)
9607 {
9608         struct tg3 *tp = container_of(work, struct tg3, reset_task);
9609         int err;
9610
9611         tg3_full_lock(tp, 0);
9612
9613         if (!netif_running(tp->dev)) {
9614                 tg3_flag_clear(tp, RESET_TASK_PENDING);
9615                 tg3_full_unlock(tp);
9616                 return;
9617         }
9618
9619         tg3_full_unlock(tp);
9620
9621         tg3_phy_stop(tp);
9622
9623         tg3_netif_stop(tp);
9624
9625         tg3_full_lock(tp, 1);
9626
9627         if (tg3_flag(tp, TX_RECOVERY_PENDING)) {
9628                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
9629                 tp->write32_rx_mbox = tg3_write_flush_reg32;
9630                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
9631                 tg3_flag_clear(tp, TX_RECOVERY_PENDING);
9632         }
9633
9634         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
9635         err = tg3_init_hw(tp, 1);
9636         if (err)
9637                 goto out;
9638
9639         tg3_netif_start(tp);
9640
9641 out:
9642         tg3_full_unlock(tp);
9643
9644         if (!err)
9645                 tg3_phy_start(tp);
9646
9647         tg3_flag_clear(tp, RESET_TASK_PENDING);
9648 }
9649
9650 static int tg3_request_irq(struct tg3 *tp, int irq_num)
9651 {
9652         irq_handler_t fn;
9653         unsigned long flags;
9654         char *name;
9655         struct tg3_napi *tnapi = &tp->napi[irq_num];
9656
9657         if (tp->irq_cnt == 1)
9658                 name = tp->dev->name;
9659         else {
9660                 name = &tnapi->irq_lbl[0];
9661                 snprintf(name, IFNAMSIZ, "%s-%d", tp->dev->name, irq_num);
9662                 name[IFNAMSIZ-1] = 0;
9663         }
9664
9665         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9666                 fn = tg3_msi;
9667                 if (tg3_flag(tp, 1SHOT_MSI))
9668                         fn = tg3_msi_1shot;
9669                 flags = 0;
9670         } else {
9671                 fn = tg3_interrupt;
9672                 if (tg3_flag(tp, TAGGED_STATUS))
9673                         fn = tg3_interrupt_tagged;
9674                 flags = IRQF_SHARED;
9675         }
9676
9677         return request_irq(tnapi->irq_vec, fn, flags, name, tnapi);
9678 }
9679
9680 static int tg3_test_interrupt(struct tg3 *tp)
9681 {
9682         struct tg3_napi *tnapi = &tp->napi[0];
9683         struct net_device *dev = tp->dev;
9684         int err, i, intr_ok = 0;
9685         u32 val;
9686
9687         if (!netif_running(dev))
9688                 return -ENODEV;
9689
9690         tg3_disable_ints(tp);
9691
9692         free_irq(tnapi->irq_vec, tnapi);
9693
9694         /*
9695          * Turn off MSI one shot mode.  Otherwise this test has no
9696          * observable way to know whether the interrupt was delivered.
9697          */
9698         if (tg3_flag(tp, 57765_PLUS)) {
9699                 val = tr32(MSGINT_MODE) | MSGINT_MODE_ONE_SHOT_DISABLE;
9700                 tw32(MSGINT_MODE, val);
9701         }
9702
9703         err = request_irq(tnapi->irq_vec, tg3_test_isr,
9704                           IRQF_SHARED, dev->name, tnapi);
9705         if (err)
9706                 return err;
9707
9708         tnapi->hw_status->status &= ~SD_STATUS_UPDATED;
9709         tg3_enable_ints(tp);
9710
9711         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
9712                tnapi->coal_now);
9713
9714         for (i = 0; i < 5; i++) {
9715                 u32 int_mbox, misc_host_ctrl;
9716
9717                 int_mbox = tr32_mailbox(tnapi->int_mbox);
9718                 misc_host_ctrl = tr32(TG3PCI_MISC_HOST_CTRL);
9719
9720                 if ((int_mbox != 0) ||
9721                     (misc_host_ctrl & MISC_HOST_CTRL_MASK_PCI_INT)) {
9722                         intr_ok = 1;
9723                         break;
9724                 }
9725
9726                 if (tg3_flag(tp, 57765_PLUS) &&
9727                     tnapi->hw_status->status_tag != tnapi->last_tag)
9728                         tw32_mailbox_f(tnapi->int_mbox, tnapi->last_tag << 24);
9729
9730                 msleep(10);
9731         }
9732
9733         tg3_disable_ints(tp);
9734
9735         free_irq(tnapi->irq_vec, tnapi);
9736
9737         err = tg3_request_irq(tp, 0);
9738
9739         if (err)
9740                 return err;
9741
9742         if (intr_ok) {
9743                 /* Reenable MSI one shot mode. */
9744                 if (tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, 1SHOT_MSI)) {
9745                         val = tr32(MSGINT_MODE) & ~MSGINT_MODE_ONE_SHOT_DISABLE;
9746                         tw32(MSGINT_MODE, val);
9747                 }
9748                 return 0;
9749         }
9750
9751         return -EIO;
9752 }
9753
9754 /* Returns 0 if MSI test succeeds or MSI test fails and INTx mode is
9755  * successfully restored
9756  */
9757 static int tg3_test_msi(struct tg3 *tp)
9758 {
9759         int err;
9760         u16 pci_cmd;
9761
9762         if (!tg3_flag(tp, USING_MSI))
9763                 return 0;
9764
9765         /* Turn off SERR reporting in case MSI terminates with Master
9766          * Abort.
9767          */
9768         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
9769         pci_write_config_word(tp->pdev, PCI_COMMAND,
9770                               pci_cmd & ~PCI_COMMAND_SERR);
9771
9772         err = tg3_test_interrupt(tp);
9773
9774         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
9775
9776         if (!err)
9777                 return 0;
9778
9779         /* other failures */
9780         if (err != -EIO)
9781                 return err;
9782
9783         /* MSI test failed, go back to INTx mode */
9784         netdev_warn(tp->dev, "No interrupt was generated using MSI. Switching "
9785                     "to INTx mode. Please report this failure to the PCI "
9786                     "maintainer and include system chipset information\n");
9787
9788         free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9789
9790         pci_disable_msi(tp->pdev);
9791
9792         tg3_flag_clear(tp, USING_MSI);
9793         tp->napi[0].irq_vec = tp->pdev->irq;
9794
9795         err = tg3_request_irq(tp, 0);
9796         if (err)
9797                 return err;
9798
9799         /* Need to reset the chip because the MSI cycle may have terminated
9800          * with Master Abort.
9801          */
9802         tg3_full_lock(tp, 1);
9803
9804         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
9805         err = tg3_init_hw(tp, 1);
9806
9807         tg3_full_unlock(tp);
9808
9809         if (err)
9810                 free_irq(tp->napi[0].irq_vec, &tp->napi[0]);
9811
9812         return err;
9813 }
9814
9815 static int tg3_request_firmware(struct tg3 *tp)
9816 {
9817         const __be32 *fw_data;
9818
9819         if (request_firmware(&tp->fw, tp->fw_needed, &tp->pdev->dev)) {
9820                 netdev_err(tp->dev, "Failed to load firmware \"%s\"\n",
9821                            tp->fw_needed);
9822                 return -ENOENT;
9823         }
9824
9825         fw_data = (void *)tp->fw->data;
9826
9827         /* Firmware blob starts with version numbers, followed by
9828          * start address and _full_ length including BSS sections
9829          * (which must be longer than the actual data, of course
9830          */
9831
9832         tp->fw_len = be32_to_cpu(fw_data[2]);   /* includes bss */
9833         if (tp->fw_len < (tp->fw->size - 12)) {
9834                 netdev_err(tp->dev, "bogus length %d in \"%s\"\n",
9835                            tp->fw_len, tp->fw_needed);
9836                 release_firmware(tp->fw);
9837                 tp->fw = NULL;
9838                 return -EINVAL;
9839         }
9840
9841         /* We no longer need firmware; we have it. */
9842         tp->fw_needed = NULL;
9843         return 0;
9844 }
9845
9846 static bool tg3_enable_msix(struct tg3 *tp)
9847 {
9848         int i, rc;
9849         struct msix_entry msix_ent[tp->irq_max];
9850
9851         tp->irq_cnt = num_online_cpus();
9852         if (tp->irq_cnt > 1) {
9853                 /* We want as many rx rings enabled as there are cpus.
9854                  * In multiqueue MSI-X mode, the first MSI-X vector
9855                  * only deals with link interrupts, etc, so we add
9856                  * one to the number of vectors we are requesting.
9857                  */
9858                 tp->irq_cnt = min_t(unsigned, tp->irq_cnt + 1, tp->irq_max);
9859         }
9860
9861         for (i = 0; i < tp->irq_max; i++) {
9862                 msix_ent[i].entry  = i;
9863                 msix_ent[i].vector = 0;
9864         }
9865
9866         rc = pci_enable_msix(tp->pdev, msix_ent, tp->irq_cnt);
9867         if (rc < 0) {
9868                 return false;
9869         } else if (rc != 0) {
9870                 if (pci_enable_msix(tp->pdev, msix_ent, rc))
9871                         return false;
9872                 netdev_notice(tp->dev, "Requested %d MSI-X vectors, received %d\n",
9873                               tp->irq_cnt, rc);
9874                 tp->irq_cnt = rc;
9875         }
9876
9877         for (i = 0; i < tp->irq_max; i++)
9878                 tp->napi[i].irq_vec = msix_ent[i].vector;
9879
9880         netif_set_real_num_tx_queues(tp->dev, 1);
9881         rc = tp->irq_cnt > 1 ? tp->irq_cnt - 1 : 1;
9882         if (netif_set_real_num_rx_queues(tp->dev, rc)) {
9883                 pci_disable_msix(tp->pdev);
9884                 return false;
9885         }
9886
9887         if (tp->irq_cnt > 1) {
9888                 tg3_flag_set(tp, ENABLE_RSS);
9889
9890                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
9891                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
9892                         tg3_flag_set(tp, ENABLE_TSS);
9893                         netif_set_real_num_tx_queues(tp->dev, tp->irq_cnt - 1);
9894                 }
9895         }
9896
9897         return true;
9898 }
9899
9900 static void tg3_ints_init(struct tg3 *tp)
9901 {
9902         if ((tg3_flag(tp, SUPPORT_MSI) || tg3_flag(tp, SUPPORT_MSIX)) &&
9903             !tg3_flag(tp, TAGGED_STATUS)) {
9904                 /* All MSI supporting chips should support tagged
9905                  * status.  Assert that this is the case.
9906                  */
9907                 netdev_warn(tp->dev,
9908                             "MSI without TAGGED_STATUS? Not using MSI\n");
9909                 goto defcfg;
9910         }
9911
9912         if (tg3_flag(tp, SUPPORT_MSIX) && tg3_enable_msix(tp))
9913                 tg3_flag_set(tp, USING_MSIX);
9914         else if (tg3_flag(tp, SUPPORT_MSI) && pci_enable_msi(tp->pdev) == 0)
9915                 tg3_flag_set(tp, USING_MSI);
9916
9917         if (tg3_flag(tp, USING_MSI) || tg3_flag(tp, USING_MSIX)) {
9918                 u32 msi_mode = tr32(MSGINT_MODE);
9919                 if (tg3_flag(tp, USING_MSIX) && tp->irq_cnt > 1)
9920                         msi_mode |= MSGINT_MODE_MULTIVEC_EN;
9921                 if (!tg3_flag(tp, 1SHOT_MSI))
9922                         msi_mode |= MSGINT_MODE_ONE_SHOT_DISABLE;
9923                 tw32(MSGINT_MODE, msi_mode | MSGINT_MODE_ENABLE);
9924         }
9925 defcfg:
9926         if (!tg3_flag(tp, USING_MSIX)) {
9927                 tp->irq_cnt = 1;
9928                 tp->napi[0].irq_vec = tp->pdev->irq;
9929                 netif_set_real_num_tx_queues(tp->dev, 1);
9930                 netif_set_real_num_rx_queues(tp->dev, 1);
9931         }
9932 }
9933
9934 static void tg3_ints_fini(struct tg3 *tp)
9935 {
9936         if (tg3_flag(tp, USING_MSIX))
9937                 pci_disable_msix(tp->pdev);
9938         else if (tg3_flag(tp, USING_MSI))
9939                 pci_disable_msi(tp->pdev);
9940         tg3_flag_clear(tp, USING_MSI);
9941         tg3_flag_clear(tp, USING_MSIX);
9942         tg3_flag_clear(tp, ENABLE_RSS);
9943         tg3_flag_clear(tp, ENABLE_TSS);
9944 }
9945
9946 static int tg3_open(struct net_device *dev)
9947 {
9948         struct tg3 *tp = netdev_priv(dev);
9949         int i, err;
9950
9951         if (tp->fw_needed) {
9952                 err = tg3_request_firmware(tp);
9953                 if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0) {
9954                         if (err)
9955                                 return err;
9956                 } else if (err) {
9957                         netdev_warn(tp->dev, "TSO capability disabled\n");
9958                         tg3_flag_clear(tp, TSO_CAPABLE);
9959                 } else if (!tg3_flag(tp, TSO_CAPABLE)) {
9960                         netdev_notice(tp->dev, "TSO capability restored\n");
9961                         tg3_flag_set(tp, TSO_CAPABLE);
9962                 }
9963         }
9964
9965         netif_carrier_off(tp->dev);
9966
9967         err = tg3_power_up(tp);
9968         if (err)
9969                 return err;
9970
9971         tg3_full_lock(tp, 0);
9972
9973         tg3_disable_ints(tp);
9974         tg3_flag_clear(tp, INIT_COMPLETE);
9975
9976         tg3_full_unlock(tp);
9977
9978         /*
9979          * Setup interrupts first so we know how
9980          * many NAPI resources to allocate
9981          */
9982         tg3_ints_init(tp);
9983
9984         tg3_rss_check_indir_tbl(tp);
9985
9986         /* The placement of this call is tied
9987          * to the setup and use of Host TX descriptors.
9988          */
9989         err = tg3_alloc_consistent(tp);
9990         if (err)
9991                 goto err_out1;
9992
9993         tg3_napi_init(tp);
9994
9995         tg3_napi_enable(tp);
9996
9997         for (i = 0; i < tp->irq_cnt; i++) {
9998                 struct tg3_napi *tnapi = &tp->napi[i];
9999                 err = tg3_request_irq(tp, i);
10000                 if (err) {
10001                         for (i--; i >= 0; i--) {
10002                                 tnapi = &tp->napi[i];
10003                                 free_irq(tnapi->irq_vec, tnapi);
10004                         }
10005                         goto err_out2;
10006                 }
10007         }
10008
10009         tg3_full_lock(tp, 0);
10010
10011         err = tg3_init_hw(tp, 1);
10012         if (err) {
10013                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10014                 tg3_free_rings(tp);
10015         }
10016
10017         tg3_full_unlock(tp);
10018
10019         if (err)
10020                 goto err_out3;
10021
10022         if (tg3_flag(tp, USING_MSI)) {
10023                 err = tg3_test_msi(tp);
10024
10025                 if (err) {
10026                         tg3_full_lock(tp, 0);
10027                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10028                         tg3_free_rings(tp);
10029                         tg3_full_unlock(tp);
10030
10031                         goto err_out2;
10032                 }
10033
10034                 if (!tg3_flag(tp, 57765_PLUS) && tg3_flag(tp, USING_MSI)) {
10035                         u32 val = tr32(PCIE_TRANSACTION_CFG);
10036
10037                         tw32(PCIE_TRANSACTION_CFG,
10038                              val | PCIE_TRANS_CFG_1SHOT_MSI);
10039                 }
10040         }
10041
10042         tg3_phy_start(tp);
10043
10044         tg3_full_lock(tp, 0);
10045
10046         tg3_timer_start(tp);
10047         tg3_flag_set(tp, INIT_COMPLETE);
10048         tg3_enable_ints(tp);
10049
10050         tg3_full_unlock(tp);
10051
10052         netif_tx_start_all_queues(dev);
10053
10054         /*
10055          * Reset loopback feature if it was turned on while the device was down
10056          * make sure that it's installed properly now.
10057          */
10058         if (dev->features & NETIF_F_LOOPBACK)
10059                 tg3_set_loopback(dev, dev->features);
10060
10061         return 0;
10062
10063 err_out3:
10064         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10065                 struct tg3_napi *tnapi = &tp->napi[i];
10066                 free_irq(tnapi->irq_vec, tnapi);
10067         }
10068
10069 err_out2:
10070         tg3_napi_disable(tp);
10071         tg3_napi_fini(tp);
10072         tg3_free_consistent(tp);
10073
10074 err_out1:
10075         tg3_ints_fini(tp);
10076         tg3_frob_aux_power(tp, false);
10077         pci_set_power_state(tp->pdev, PCI_D3hot);
10078         return err;
10079 }
10080
10081 static int tg3_close(struct net_device *dev)
10082 {
10083         int i;
10084         struct tg3 *tp = netdev_priv(dev);
10085
10086         tg3_napi_disable(tp);
10087         tg3_reset_task_cancel(tp);
10088
10089         netif_tx_stop_all_queues(dev);
10090
10091         tg3_timer_stop(tp);
10092
10093         tg3_phy_stop(tp);
10094
10095         tg3_full_lock(tp, 1);
10096
10097         tg3_disable_ints(tp);
10098
10099         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10100         tg3_free_rings(tp);
10101         tg3_flag_clear(tp, INIT_COMPLETE);
10102
10103         tg3_full_unlock(tp);
10104
10105         for (i = tp->irq_cnt - 1; i >= 0; i--) {
10106                 struct tg3_napi *tnapi = &tp->napi[i];
10107                 free_irq(tnapi->irq_vec, tnapi);
10108         }
10109
10110         tg3_ints_fini(tp);
10111
10112         /* Clear stats across close / open calls */
10113         memset(&tp->net_stats_prev, 0, sizeof(tp->net_stats_prev));
10114         memset(&tp->estats_prev, 0, sizeof(tp->estats_prev));
10115
10116         tg3_napi_fini(tp);
10117
10118         tg3_free_consistent(tp);
10119
10120         tg3_power_down(tp);
10121
10122         netif_carrier_off(tp->dev);
10123
10124         return 0;
10125 }
10126
10127 static inline u64 get_stat64(tg3_stat64_t *val)
10128 {
10129        return ((u64)val->high << 32) | ((u64)val->low);
10130 }
10131
10132 static u64 tg3_calc_crc_errors(struct tg3 *tp)
10133 {
10134         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10135
10136         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
10137             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
10138              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)) {
10139                 u32 val;
10140
10141                 if (!tg3_readphy(tp, MII_TG3_TEST1, &val)) {
10142                         tg3_writephy(tp, MII_TG3_TEST1,
10143                                      val | MII_TG3_TEST1_CRC_EN);
10144                         tg3_readphy(tp, MII_TG3_RXR_COUNTERS, &val);
10145                 } else
10146                         val = 0;
10147
10148                 tp->phy_crc_errors += val;
10149
10150                 return tp->phy_crc_errors;
10151         }
10152
10153         return get_stat64(&hw_stats->rx_fcs_errors);
10154 }
10155
10156 #define ESTAT_ADD(member) \
10157         estats->member =        old_estats->member + \
10158                                 get_stat64(&hw_stats->member)
10159
10160 static void tg3_get_estats(struct tg3 *tp, struct tg3_ethtool_stats *estats)
10161 {
10162         struct tg3_ethtool_stats *old_estats = &tp->estats_prev;
10163         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10164
10165         ESTAT_ADD(rx_octets);
10166         ESTAT_ADD(rx_fragments);
10167         ESTAT_ADD(rx_ucast_packets);
10168         ESTAT_ADD(rx_mcast_packets);
10169         ESTAT_ADD(rx_bcast_packets);
10170         ESTAT_ADD(rx_fcs_errors);
10171         ESTAT_ADD(rx_align_errors);
10172         ESTAT_ADD(rx_xon_pause_rcvd);
10173         ESTAT_ADD(rx_xoff_pause_rcvd);
10174         ESTAT_ADD(rx_mac_ctrl_rcvd);
10175         ESTAT_ADD(rx_xoff_entered);
10176         ESTAT_ADD(rx_frame_too_long_errors);
10177         ESTAT_ADD(rx_jabbers);
10178         ESTAT_ADD(rx_undersize_packets);
10179         ESTAT_ADD(rx_in_length_errors);
10180         ESTAT_ADD(rx_out_length_errors);
10181         ESTAT_ADD(rx_64_or_less_octet_packets);
10182         ESTAT_ADD(rx_65_to_127_octet_packets);
10183         ESTAT_ADD(rx_128_to_255_octet_packets);
10184         ESTAT_ADD(rx_256_to_511_octet_packets);
10185         ESTAT_ADD(rx_512_to_1023_octet_packets);
10186         ESTAT_ADD(rx_1024_to_1522_octet_packets);
10187         ESTAT_ADD(rx_1523_to_2047_octet_packets);
10188         ESTAT_ADD(rx_2048_to_4095_octet_packets);
10189         ESTAT_ADD(rx_4096_to_8191_octet_packets);
10190         ESTAT_ADD(rx_8192_to_9022_octet_packets);
10191
10192         ESTAT_ADD(tx_octets);
10193         ESTAT_ADD(tx_collisions);
10194         ESTAT_ADD(tx_xon_sent);
10195         ESTAT_ADD(tx_xoff_sent);
10196         ESTAT_ADD(tx_flow_control);
10197         ESTAT_ADD(tx_mac_errors);
10198         ESTAT_ADD(tx_single_collisions);
10199         ESTAT_ADD(tx_mult_collisions);
10200         ESTAT_ADD(tx_deferred);
10201         ESTAT_ADD(tx_excessive_collisions);
10202         ESTAT_ADD(tx_late_collisions);
10203         ESTAT_ADD(tx_collide_2times);
10204         ESTAT_ADD(tx_collide_3times);
10205         ESTAT_ADD(tx_collide_4times);
10206         ESTAT_ADD(tx_collide_5times);
10207         ESTAT_ADD(tx_collide_6times);
10208         ESTAT_ADD(tx_collide_7times);
10209         ESTAT_ADD(tx_collide_8times);
10210         ESTAT_ADD(tx_collide_9times);
10211         ESTAT_ADD(tx_collide_10times);
10212         ESTAT_ADD(tx_collide_11times);
10213         ESTAT_ADD(tx_collide_12times);
10214         ESTAT_ADD(tx_collide_13times);
10215         ESTAT_ADD(tx_collide_14times);
10216         ESTAT_ADD(tx_collide_15times);
10217         ESTAT_ADD(tx_ucast_packets);
10218         ESTAT_ADD(tx_mcast_packets);
10219         ESTAT_ADD(tx_bcast_packets);
10220         ESTAT_ADD(tx_carrier_sense_errors);
10221         ESTAT_ADD(tx_discards);
10222         ESTAT_ADD(tx_errors);
10223
10224         ESTAT_ADD(dma_writeq_full);
10225         ESTAT_ADD(dma_write_prioq_full);
10226         ESTAT_ADD(rxbds_empty);
10227         ESTAT_ADD(rx_discards);
10228         ESTAT_ADD(rx_errors);
10229         ESTAT_ADD(rx_threshold_hit);
10230
10231         ESTAT_ADD(dma_readq_full);
10232         ESTAT_ADD(dma_read_prioq_full);
10233         ESTAT_ADD(tx_comp_queue_full);
10234
10235         ESTAT_ADD(ring_set_send_prod_index);
10236         ESTAT_ADD(ring_status_update);
10237         ESTAT_ADD(nic_irqs);
10238         ESTAT_ADD(nic_avoided_irqs);
10239         ESTAT_ADD(nic_tx_threshold_hit);
10240
10241         ESTAT_ADD(mbuf_lwm_thresh_hit);
10242 }
10243
10244 static void tg3_get_nstats(struct tg3 *tp, struct rtnl_link_stats64 *stats)
10245 {
10246         struct rtnl_link_stats64 *old_stats = &tp->net_stats_prev;
10247         struct tg3_hw_stats *hw_stats = tp->hw_stats;
10248
10249         stats->rx_packets = old_stats->rx_packets +
10250                 get_stat64(&hw_stats->rx_ucast_packets) +
10251                 get_stat64(&hw_stats->rx_mcast_packets) +
10252                 get_stat64(&hw_stats->rx_bcast_packets);
10253
10254         stats->tx_packets = old_stats->tx_packets +
10255                 get_stat64(&hw_stats->tx_ucast_packets) +
10256                 get_stat64(&hw_stats->tx_mcast_packets) +
10257                 get_stat64(&hw_stats->tx_bcast_packets);
10258
10259         stats->rx_bytes = old_stats->rx_bytes +
10260                 get_stat64(&hw_stats->rx_octets);
10261         stats->tx_bytes = old_stats->tx_bytes +
10262                 get_stat64(&hw_stats->tx_octets);
10263
10264         stats->rx_errors = old_stats->rx_errors +
10265                 get_stat64(&hw_stats->rx_errors);
10266         stats->tx_errors = old_stats->tx_errors +
10267                 get_stat64(&hw_stats->tx_errors) +
10268                 get_stat64(&hw_stats->tx_mac_errors) +
10269                 get_stat64(&hw_stats->tx_carrier_sense_errors) +
10270                 get_stat64(&hw_stats->tx_discards);
10271
10272         stats->multicast = old_stats->multicast +
10273                 get_stat64(&hw_stats->rx_mcast_packets);
10274         stats->collisions = old_stats->collisions +
10275                 get_stat64(&hw_stats->tx_collisions);
10276
10277         stats->rx_length_errors = old_stats->rx_length_errors +
10278                 get_stat64(&hw_stats->rx_frame_too_long_errors) +
10279                 get_stat64(&hw_stats->rx_undersize_packets);
10280
10281         stats->rx_over_errors = old_stats->rx_over_errors +
10282                 get_stat64(&hw_stats->rxbds_empty);
10283         stats->rx_frame_errors = old_stats->rx_frame_errors +
10284                 get_stat64(&hw_stats->rx_align_errors);
10285         stats->tx_aborted_errors = old_stats->tx_aborted_errors +
10286                 get_stat64(&hw_stats->tx_discards);
10287         stats->tx_carrier_errors = old_stats->tx_carrier_errors +
10288                 get_stat64(&hw_stats->tx_carrier_sense_errors);
10289
10290         stats->rx_crc_errors = old_stats->rx_crc_errors +
10291                 tg3_calc_crc_errors(tp);
10292
10293         stats->rx_missed_errors = old_stats->rx_missed_errors +
10294                 get_stat64(&hw_stats->rx_discards);
10295
10296         stats->rx_dropped = tp->rx_dropped;
10297         stats->tx_dropped = tp->tx_dropped;
10298 }
10299
10300 static int tg3_get_regs_len(struct net_device *dev)
10301 {
10302         return TG3_REG_BLK_SIZE;
10303 }
10304
10305 static void tg3_get_regs(struct net_device *dev,
10306                 struct ethtool_regs *regs, void *_p)
10307 {
10308         struct tg3 *tp = netdev_priv(dev);
10309
10310         regs->version = 0;
10311
10312         memset(_p, 0, TG3_REG_BLK_SIZE);
10313
10314         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10315                 return;
10316
10317         tg3_full_lock(tp, 0);
10318
10319         tg3_dump_legacy_regs(tp, (u32 *)_p);
10320
10321         tg3_full_unlock(tp);
10322 }
10323
10324 static int tg3_get_eeprom_len(struct net_device *dev)
10325 {
10326         struct tg3 *tp = netdev_priv(dev);
10327
10328         return tp->nvram_size;
10329 }
10330
10331 static int tg3_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10332 {
10333         struct tg3 *tp = netdev_priv(dev);
10334         int ret;
10335         u8  *pd;
10336         u32 i, offset, len, b_offset, b_count;
10337         __be32 val;
10338
10339         if (tg3_flag(tp, NO_NVRAM))
10340                 return -EINVAL;
10341
10342         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10343                 return -EAGAIN;
10344
10345         offset = eeprom->offset;
10346         len = eeprom->len;
10347         eeprom->len = 0;
10348
10349         eeprom->magic = TG3_EEPROM_MAGIC;
10350
10351         if (offset & 3) {
10352                 /* adjustments to start on required 4 byte boundary */
10353                 b_offset = offset & 3;
10354                 b_count = 4 - b_offset;
10355                 if (b_count > len) {
10356                         /* i.e. offset=1 len=2 */
10357                         b_count = len;
10358                 }
10359                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &val);
10360                 if (ret)
10361                         return ret;
10362                 memcpy(data, ((char *)&val) + b_offset, b_count);
10363                 len -= b_count;
10364                 offset += b_count;
10365                 eeprom->len += b_count;
10366         }
10367
10368         /* read bytes up to the last 4 byte boundary */
10369         pd = &data[eeprom->len];
10370         for (i = 0; i < (len - (len & 3)); i += 4) {
10371                 ret = tg3_nvram_read_be32(tp, offset + i, &val);
10372                 if (ret) {
10373                         eeprom->len += i;
10374                         return ret;
10375                 }
10376                 memcpy(pd + i, &val, 4);
10377         }
10378         eeprom->len += i;
10379
10380         if (len & 3) {
10381                 /* read last bytes not ending on 4 byte boundary */
10382                 pd = &data[eeprom->len];
10383                 b_count = len & 3;
10384                 b_offset = offset + len - b_count;
10385                 ret = tg3_nvram_read_be32(tp, b_offset, &val);
10386                 if (ret)
10387                         return ret;
10388                 memcpy(pd, &val, b_count);
10389                 eeprom->len += b_count;
10390         }
10391         return 0;
10392 }
10393
10394 static int tg3_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom, u8 *data)
10395 {
10396         struct tg3 *tp = netdev_priv(dev);
10397         int ret;
10398         u32 offset, len, b_offset, odd_len;
10399         u8 *buf;
10400         __be32 start, end;
10401
10402         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
10403                 return -EAGAIN;
10404
10405         if (tg3_flag(tp, NO_NVRAM) ||
10406             eeprom->magic != TG3_EEPROM_MAGIC)
10407                 return -EINVAL;
10408
10409         offset = eeprom->offset;
10410         len = eeprom->len;
10411
10412         if ((b_offset = (offset & 3))) {
10413                 /* adjustments to start on required 4 byte boundary */
10414                 ret = tg3_nvram_read_be32(tp, offset-b_offset, &start);
10415                 if (ret)
10416                         return ret;
10417                 len += b_offset;
10418                 offset &= ~3;
10419                 if (len < 4)
10420                         len = 4;
10421         }
10422
10423         odd_len = 0;
10424         if (len & 3) {
10425                 /* adjustments to end on required 4 byte boundary */
10426                 odd_len = 1;
10427                 len = (len + 3) & ~3;
10428                 ret = tg3_nvram_read_be32(tp, offset+len-4, &end);
10429                 if (ret)
10430                         return ret;
10431         }
10432
10433         buf = data;
10434         if (b_offset || odd_len) {
10435                 buf = kmalloc(len, GFP_KERNEL);
10436                 if (!buf)
10437                         return -ENOMEM;
10438                 if (b_offset)
10439                         memcpy(buf, &start, 4);
10440                 if (odd_len)
10441                         memcpy(buf+len-4, &end, 4);
10442                 memcpy(buf + b_offset, data, eeprom->len);
10443         }
10444
10445         ret = tg3_nvram_write_block(tp, offset, len, buf);
10446
10447         if (buf != data)
10448                 kfree(buf);
10449
10450         return ret;
10451 }
10452
10453 static int tg3_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10454 {
10455         struct tg3 *tp = netdev_priv(dev);
10456
10457         if (tg3_flag(tp, USE_PHYLIB)) {
10458                 struct phy_device *phydev;
10459                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10460                         return -EAGAIN;
10461                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10462                 return phy_ethtool_gset(phydev, cmd);
10463         }
10464
10465         cmd->supported = (SUPPORTED_Autoneg);
10466
10467         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10468                 cmd->supported |= (SUPPORTED_1000baseT_Half |
10469                                    SUPPORTED_1000baseT_Full);
10470
10471         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10472                 cmd->supported |= (SUPPORTED_100baseT_Half |
10473                                   SUPPORTED_100baseT_Full |
10474                                   SUPPORTED_10baseT_Half |
10475                                   SUPPORTED_10baseT_Full |
10476                                   SUPPORTED_TP);
10477                 cmd->port = PORT_TP;
10478         } else {
10479                 cmd->supported |= SUPPORTED_FIBRE;
10480                 cmd->port = PORT_FIBRE;
10481         }
10482
10483         cmd->advertising = tp->link_config.advertising;
10484         if (tg3_flag(tp, PAUSE_AUTONEG)) {
10485                 if (tp->link_config.flowctrl & FLOW_CTRL_RX) {
10486                         if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10487                                 cmd->advertising |= ADVERTISED_Pause;
10488                         } else {
10489                                 cmd->advertising |= ADVERTISED_Pause |
10490                                                     ADVERTISED_Asym_Pause;
10491                         }
10492                 } else if (tp->link_config.flowctrl & FLOW_CTRL_TX) {
10493                         cmd->advertising |= ADVERTISED_Asym_Pause;
10494                 }
10495         }
10496         if (netif_running(dev) && netif_carrier_ok(dev)) {
10497                 ethtool_cmd_speed_set(cmd, tp->link_config.active_speed);
10498                 cmd->duplex = tp->link_config.active_duplex;
10499                 cmd->lp_advertising = tp->link_config.rmt_adv;
10500                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES)) {
10501                         if (tp->phy_flags & TG3_PHYFLG_MDIX_STATE)
10502                                 cmd->eth_tp_mdix = ETH_TP_MDI_X;
10503                         else
10504                                 cmd->eth_tp_mdix = ETH_TP_MDI;
10505                 }
10506         } else {
10507                 ethtool_cmd_speed_set(cmd, SPEED_UNKNOWN);
10508                 cmd->duplex = DUPLEX_UNKNOWN;
10509                 cmd->eth_tp_mdix = ETH_TP_MDI_INVALID;
10510         }
10511         cmd->phy_address = tp->phy_addr;
10512         cmd->transceiver = XCVR_INTERNAL;
10513         cmd->autoneg = tp->link_config.autoneg;
10514         cmd->maxtxpkt = 0;
10515         cmd->maxrxpkt = 0;
10516         return 0;
10517 }
10518
10519 static int tg3_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
10520 {
10521         struct tg3 *tp = netdev_priv(dev);
10522         u32 speed = ethtool_cmd_speed(cmd);
10523
10524         if (tg3_flag(tp, USE_PHYLIB)) {
10525                 struct phy_device *phydev;
10526                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10527                         return -EAGAIN;
10528                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10529                 return phy_ethtool_sset(phydev, cmd);
10530         }
10531
10532         if (cmd->autoneg != AUTONEG_ENABLE &&
10533             cmd->autoneg != AUTONEG_DISABLE)
10534                 return -EINVAL;
10535
10536         if (cmd->autoneg == AUTONEG_DISABLE &&
10537             cmd->duplex != DUPLEX_FULL &&
10538             cmd->duplex != DUPLEX_HALF)
10539                 return -EINVAL;
10540
10541         if (cmd->autoneg == AUTONEG_ENABLE) {
10542                 u32 mask = ADVERTISED_Autoneg |
10543                            ADVERTISED_Pause |
10544                            ADVERTISED_Asym_Pause;
10545
10546                 if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
10547                         mask |= ADVERTISED_1000baseT_Half |
10548                                 ADVERTISED_1000baseT_Full;
10549
10550                 if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
10551                         mask |= ADVERTISED_100baseT_Half |
10552                                 ADVERTISED_100baseT_Full |
10553                                 ADVERTISED_10baseT_Half |
10554                                 ADVERTISED_10baseT_Full |
10555                                 ADVERTISED_TP;
10556                 else
10557                         mask |= ADVERTISED_FIBRE;
10558
10559                 if (cmd->advertising & ~mask)
10560                         return -EINVAL;
10561
10562                 mask &= (ADVERTISED_1000baseT_Half |
10563                          ADVERTISED_1000baseT_Full |
10564                          ADVERTISED_100baseT_Half |
10565                          ADVERTISED_100baseT_Full |
10566                          ADVERTISED_10baseT_Half |
10567                          ADVERTISED_10baseT_Full);
10568
10569                 cmd->advertising &= mask;
10570         } else {
10571                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES) {
10572                         if (speed != SPEED_1000)
10573                                 return -EINVAL;
10574
10575                         if (cmd->duplex != DUPLEX_FULL)
10576                                 return -EINVAL;
10577                 } else {
10578                         if (speed != SPEED_100 &&
10579                             speed != SPEED_10)
10580                                 return -EINVAL;
10581                 }
10582         }
10583
10584         tg3_full_lock(tp, 0);
10585
10586         tp->link_config.autoneg = cmd->autoneg;
10587         if (cmd->autoneg == AUTONEG_ENABLE) {
10588                 tp->link_config.advertising = (cmd->advertising |
10589                                               ADVERTISED_Autoneg);
10590                 tp->link_config.speed = SPEED_UNKNOWN;
10591                 tp->link_config.duplex = DUPLEX_UNKNOWN;
10592         } else {
10593                 tp->link_config.advertising = 0;
10594                 tp->link_config.speed = speed;
10595                 tp->link_config.duplex = cmd->duplex;
10596         }
10597
10598         if (netif_running(dev))
10599                 tg3_setup_phy(tp, 1);
10600
10601         tg3_full_unlock(tp);
10602
10603         return 0;
10604 }
10605
10606 static void tg3_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
10607 {
10608         struct tg3 *tp = netdev_priv(dev);
10609
10610         strlcpy(info->driver, DRV_MODULE_NAME, sizeof(info->driver));
10611         strlcpy(info->version, DRV_MODULE_VERSION, sizeof(info->version));
10612         strlcpy(info->fw_version, tp->fw_ver, sizeof(info->fw_version));
10613         strlcpy(info->bus_info, pci_name(tp->pdev), sizeof(info->bus_info));
10614 }
10615
10616 static void tg3_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10617 {
10618         struct tg3 *tp = netdev_priv(dev);
10619
10620         if (tg3_flag(tp, WOL_CAP) && device_can_wakeup(&tp->pdev->dev))
10621                 wol->supported = WAKE_MAGIC;
10622         else
10623                 wol->supported = 0;
10624         wol->wolopts = 0;
10625         if (tg3_flag(tp, WOL_ENABLE) && device_can_wakeup(&tp->pdev->dev))
10626                 wol->wolopts = WAKE_MAGIC;
10627         memset(&wol->sopass, 0, sizeof(wol->sopass));
10628 }
10629
10630 static int tg3_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
10631 {
10632         struct tg3 *tp = netdev_priv(dev);
10633         struct device *dp = &tp->pdev->dev;
10634
10635         if (wol->wolopts & ~WAKE_MAGIC)
10636                 return -EINVAL;
10637         if ((wol->wolopts & WAKE_MAGIC) &&
10638             !(tg3_flag(tp, WOL_CAP) && device_can_wakeup(dp)))
10639                 return -EINVAL;
10640
10641         device_set_wakeup_enable(dp, wol->wolopts & WAKE_MAGIC);
10642
10643         spin_lock_bh(&tp->lock);
10644         if (device_may_wakeup(dp))
10645                 tg3_flag_set(tp, WOL_ENABLE);
10646         else
10647                 tg3_flag_clear(tp, WOL_ENABLE);
10648         spin_unlock_bh(&tp->lock);
10649
10650         return 0;
10651 }
10652
10653 static u32 tg3_get_msglevel(struct net_device *dev)
10654 {
10655         struct tg3 *tp = netdev_priv(dev);
10656         return tp->msg_enable;
10657 }
10658
10659 static void tg3_set_msglevel(struct net_device *dev, u32 value)
10660 {
10661         struct tg3 *tp = netdev_priv(dev);
10662         tp->msg_enable = value;
10663 }
10664
10665 static int tg3_nway_reset(struct net_device *dev)
10666 {
10667         struct tg3 *tp = netdev_priv(dev);
10668         int r;
10669
10670         if (!netif_running(dev))
10671                 return -EAGAIN;
10672
10673         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
10674                 return -EINVAL;
10675
10676         if (tg3_flag(tp, USE_PHYLIB)) {
10677                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
10678                         return -EAGAIN;
10679                 r = phy_start_aneg(tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR]);
10680         } else {
10681                 u32 bmcr;
10682
10683                 spin_lock_bh(&tp->lock);
10684                 r = -EINVAL;
10685                 tg3_readphy(tp, MII_BMCR, &bmcr);
10686                 if (!tg3_readphy(tp, MII_BMCR, &bmcr) &&
10687                     ((bmcr & BMCR_ANENABLE) ||
10688                      (tp->phy_flags & TG3_PHYFLG_PARALLEL_DETECT))) {
10689                         tg3_writephy(tp, MII_BMCR, bmcr | BMCR_ANRESTART |
10690                                                    BMCR_ANENABLE);
10691                         r = 0;
10692                 }
10693                 spin_unlock_bh(&tp->lock);
10694         }
10695
10696         return r;
10697 }
10698
10699 static void tg3_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10700 {
10701         struct tg3 *tp = netdev_priv(dev);
10702
10703         ering->rx_max_pending = tp->rx_std_ring_mask;
10704         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10705                 ering->rx_jumbo_max_pending = tp->rx_jmb_ring_mask;
10706         else
10707                 ering->rx_jumbo_max_pending = 0;
10708
10709         ering->tx_max_pending = TG3_TX_RING_SIZE - 1;
10710
10711         ering->rx_pending = tp->rx_pending;
10712         if (tg3_flag(tp, JUMBO_RING_ENABLE))
10713                 ering->rx_jumbo_pending = tp->rx_jumbo_pending;
10714         else
10715                 ering->rx_jumbo_pending = 0;
10716
10717         ering->tx_pending = tp->napi[0].tx_pending;
10718 }
10719
10720 static int tg3_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
10721 {
10722         struct tg3 *tp = netdev_priv(dev);
10723         int i, irq_sync = 0, err = 0;
10724
10725         if ((ering->rx_pending > tp->rx_std_ring_mask) ||
10726             (ering->rx_jumbo_pending > tp->rx_jmb_ring_mask) ||
10727             (ering->tx_pending > TG3_TX_RING_SIZE - 1) ||
10728             (ering->tx_pending <= MAX_SKB_FRAGS) ||
10729             (tg3_flag(tp, TSO_BUG) &&
10730              (ering->tx_pending <= (MAX_SKB_FRAGS * 3))))
10731                 return -EINVAL;
10732
10733         if (netif_running(dev)) {
10734                 tg3_phy_stop(tp);
10735                 tg3_netif_stop(tp);
10736                 irq_sync = 1;
10737         }
10738
10739         tg3_full_lock(tp, irq_sync);
10740
10741         tp->rx_pending = ering->rx_pending;
10742
10743         if (tg3_flag(tp, MAX_RXPEND_64) &&
10744             tp->rx_pending > 63)
10745                 tp->rx_pending = 63;
10746         tp->rx_jumbo_pending = ering->rx_jumbo_pending;
10747
10748         for (i = 0; i < tp->irq_max; i++)
10749                 tp->napi[i].tx_pending = ering->tx_pending;
10750
10751         if (netif_running(dev)) {
10752                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10753                 err = tg3_restart_hw(tp, 1);
10754                 if (!err)
10755                         tg3_netif_start(tp);
10756         }
10757
10758         tg3_full_unlock(tp);
10759
10760         if (irq_sync && !err)
10761                 tg3_phy_start(tp);
10762
10763         return err;
10764 }
10765
10766 static void tg3_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10767 {
10768         struct tg3 *tp = netdev_priv(dev);
10769
10770         epause->autoneg = !!tg3_flag(tp, PAUSE_AUTONEG);
10771
10772         if (tp->link_config.flowctrl & FLOW_CTRL_RX)
10773                 epause->rx_pause = 1;
10774         else
10775                 epause->rx_pause = 0;
10776
10777         if (tp->link_config.flowctrl & FLOW_CTRL_TX)
10778                 epause->tx_pause = 1;
10779         else
10780                 epause->tx_pause = 0;
10781 }
10782
10783 static int tg3_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
10784 {
10785         struct tg3 *tp = netdev_priv(dev);
10786         int err = 0;
10787
10788         if (tg3_flag(tp, USE_PHYLIB)) {
10789                 u32 newadv;
10790                 struct phy_device *phydev;
10791
10792                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
10793
10794                 if (!(phydev->supported & SUPPORTED_Pause) ||
10795                     (!(phydev->supported & SUPPORTED_Asym_Pause) &&
10796                      (epause->rx_pause != epause->tx_pause)))
10797                         return -EINVAL;
10798
10799                 tp->link_config.flowctrl = 0;
10800                 if (epause->rx_pause) {
10801                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10802
10803                         if (epause->tx_pause) {
10804                                 tp->link_config.flowctrl |= FLOW_CTRL_TX;
10805                                 newadv = ADVERTISED_Pause;
10806                         } else
10807                                 newadv = ADVERTISED_Pause |
10808                                          ADVERTISED_Asym_Pause;
10809                 } else if (epause->tx_pause) {
10810                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10811                         newadv = ADVERTISED_Asym_Pause;
10812                 } else
10813                         newadv = 0;
10814
10815                 if (epause->autoneg)
10816                         tg3_flag_set(tp, PAUSE_AUTONEG);
10817                 else
10818                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10819
10820                 if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
10821                         u32 oldadv = phydev->advertising &
10822                                      (ADVERTISED_Pause | ADVERTISED_Asym_Pause);
10823                         if (oldadv != newadv) {
10824                                 phydev->advertising &=
10825                                         ~(ADVERTISED_Pause |
10826                                           ADVERTISED_Asym_Pause);
10827                                 phydev->advertising |= newadv;
10828                                 if (phydev->autoneg) {
10829                                         /*
10830                                          * Always renegotiate the link to
10831                                          * inform our link partner of our
10832                                          * flow control settings, even if the
10833                                          * flow control is forced.  Let
10834                                          * tg3_adjust_link() do the final
10835                                          * flow control setup.
10836                                          */
10837                                         return phy_start_aneg(phydev);
10838                                 }
10839                         }
10840
10841                         if (!epause->autoneg)
10842                                 tg3_setup_flow_control(tp, 0, 0);
10843                 } else {
10844                         tp->link_config.advertising &=
10845                                         ~(ADVERTISED_Pause |
10846                                           ADVERTISED_Asym_Pause);
10847                         tp->link_config.advertising |= newadv;
10848                 }
10849         } else {
10850                 int irq_sync = 0;
10851
10852                 if (netif_running(dev)) {
10853                         tg3_netif_stop(tp);
10854                         irq_sync = 1;
10855                 }
10856
10857                 tg3_full_lock(tp, irq_sync);
10858
10859                 if (epause->autoneg)
10860                         tg3_flag_set(tp, PAUSE_AUTONEG);
10861                 else
10862                         tg3_flag_clear(tp, PAUSE_AUTONEG);
10863                 if (epause->rx_pause)
10864                         tp->link_config.flowctrl |= FLOW_CTRL_RX;
10865                 else
10866                         tp->link_config.flowctrl &= ~FLOW_CTRL_RX;
10867                 if (epause->tx_pause)
10868                         tp->link_config.flowctrl |= FLOW_CTRL_TX;
10869                 else
10870                         tp->link_config.flowctrl &= ~FLOW_CTRL_TX;
10871
10872                 if (netif_running(dev)) {
10873                         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
10874                         err = tg3_restart_hw(tp, 1);
10875                         if (!err)
10876                                 tg3_netif_start(tp);
10877                 }
10878
10879                 tg3_full_unlock(tp);
10880         }
10881
10882         return err;
10883 }
10884
10885 static int tg3_get_sset_count(struct net_device *dev, int sset)
10886 {
10887         switch (sset) {
10888         case ETH_SS_TEST:
10889                 return TG3_NUM_TEST;
10890         case ETH_SS_STATS:
10891                 return TG3_NUM_STATS;
10892         default:
10893                 return -EOPNOTSUPP;
10894         }
10895 }
10896
10897 static int tg3_get_rxnfc(struct net_device *dev, struct ethtool_rxnfc *info,
10898                          u32 *rules __always_unused)
10899 {
10900         struct tg3 *tp = netdev_priv(dev);
10901
10902         if (!tg3_flag(tp, SUPPORT_MSIX))
10903                 return -EOPNOTSUPP;
10904
10905         switch (info->cmd) {
10906         case ETHTOOL_GRXRINGS:
10907                 if (netif_running(tp->dev))
10908                         info->data = tp->irq_cnt;
10909                 else {
10910                         info->data = num_online_cpus();
10911                         if (info->data > TG3_IRQ_MAX_VECS_RSS)
10912                                 info->data = TG3_IRQ_MAX_VECS_RSS;
10913                 }
10914
10915                 /* The first interrupt vector only
10916                  * handles link interrupts.
10917                  */
10918                 info->data -= 1;
10919                 return 0;
10920
10921         default:
10922                 return -EOPNOTSUPP;
10923         }
10924 }
10925
10926 static u32 tg3_get_rxfh_indir_size(struct net_device *dev)
10927 {
10928         u32 size = 0;
10929         struct tg3 *tp = netdev_priv(dev);
10930
10931         if (tg3_flag(tp, SUPPORT_MSIX))
10932                 size = TG3_RSS_INDIR_TBL_SIZE;
10933
10934         return size;
10935 }
10936
10937 static int tg3_get_rxfh_indir(struct net_device *dev, u32 *indir)
10938 {
10939         struct tg3 *tp = netdev_priv(dev);
10940         int i;
10941
10942         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10943                 indir[i] = tp->rss_ind_tbl[i];
10944
10945         return 0;
10946 }
10947
10948 static int tg3_set_rxfh_indir(struct net_device *dev, const u32 *indir)
10949 {
10950         struct tg3 *tp = netdev_priv(dev);
10951         size_t i;
10952
10953         for (i = 0; i < TG3_RSS_INDIR_TBL_SIZE; i++)
10954                 tp->rss_ind_tbl[i] = indir[i];
10955
10956         if (!netif_running(dev) || !tg3_flag(tp, ENABLE_RSS))
10957                 return 0;
10958
10959         /* It is legal to write the indirection
10960          * table while the device is running.
10961          */
10962         tg3_full_lock(tp, 0);
10963         tg3_rss_write_indir_tbl(tp);
10964         tg3_full_unlock(tp);
10965
10966         return 0;
10967 }
10968
10969 static void tg3_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10970 {
10971         switch (stringset) {
10972         case ETH_SS_STATS:
10973                 memcpy(buf, &ethtool_stats_keys, sizeof(ethtool_stats_keys));
10974                 break;
10975         case ETH_SS_TEST:
10976                 memcpy(buf, &ethtool_test_keys, sizeof(ethtool_test_keys));
10977                 break;
10978         default:
10979                 WARN_ON(1);     /* we need a WARN() */
10980                 break;
10981         }
10982 }
10983
10984 static int tg3_set_phys_id(struct net_device *dev,
10985                             enum ethtool_phys_id_state state)
10986 {
10987         struct tg3 *tp = netdev_priv(dev);
10988
10989         if (!netif_running(tp->dev))
10990                 return -EAGAIN;
10991
10992         switch (state) {
10993         case ETHTOOL_ID_ACTIVE:
10994                 return 1;       /* cycle on/off once per second */
10995
10996         case ETHTOOL_ID_ON:
10997                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
10998                      LED_CTRL_1000MBPS_ON |
10999                      LED_CTRL_100MBPS_ON |
11000                      LED_CTRL_10MBPS_ON |
11001                      LED_CTRL_TRAFFIC_OVERRIDE |
11002                      LED_CTRL_TRAFFIC_BLINK |
11003                      LED_CTRL_TRAFFIC_LED);
11004                 break;
11005
11006         case ETHTOOL_ID_OFF:
11007                 tw32(MAC_LED_CTRL, LED_CTRL_LNKLED_OVERRIDE |
11008                      LED_CTRL_TRAFFIC_OVERRIDE);
11009                 break;
11010
11011         case ETHTOOL_ID_INACTIVE:
11012                 tw32(MAC_LED_CTRL, tp->led_ctrl);
11013                 break;
11014         }
11015
11016         return 0;
11017 }
11018
11019 static void tg3_get_ethtool_stats(struct net_device *dev,
11020                                    struct ethtool_stats *estats, u64 *tmp_stats)
11021 {
11022         struct tg3 *tp = netdev_priv(dev);
11023
11024         if (tp->hw_stats)
11025                 tg3_get_estats(tp, (struct tg3_ethtool_stats *)tmp_stats);
11026         else
11027                 memset(tmp_stats, 0, sizeof(struct tg3_ethtool_stats));
11028 }
11029
11030 static __be32 *tg3_vpd_readblock(struct tg3 *tp, u32 *vpdlen)
11031 {
11032         int i;
11033         __be32 *buf;
11034         u32 offset = 0, len = 0;
11035         u32 magic, val;
11036
11037         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &magic))
11038                 return NULL;
11039
11040         if (magic == TG3_EEPROM_MAGIC) {
11041                 for (offset = TG3_NVM_DIR_START;
11042                      offset < TG3_NVM_DIR_END;
11043                      offset += TG3_NVM_DIRENT_SIZE) {
11044                         if (tg3_nvram_read(tp, offset, &val))
11045                                 return NULL;
11046
11047                         if ((val >> TG3_NVM_DIRTYPE_SHIFT) ==
11048                             TG3_NVM_DIRTYPE_EXTVPD)
11049                                 break;
11050                 }
11051
11052                 if (offset != TG3_NVM_DIR_END) {
11053                         len = (val & TG3_NVM_DIRTYPE_LENMSK) * 4;
11054                         if (tg3_nvram_read(tp, offset + 4, &offset))
11055                                 return NULL;
11056
11057                         offset = tg3_nvram_logical_addr(tp, offset);
11058                 }
11059         }
11060
11061         if (!offset || !len) {
11062                 offset = TG3_NVM_VPD_OFF;
11063                 len = TG3_NVM_VPD_LEN;
11064         }
11065
11066         buf = kmalloc(len, GFP_KERNEL);
11067         if (buf == NULL)
11068                 return NULL;
11069
11070         if (magic == TG3_EEPROM_MAGIC) {
11071                 for (i = 0; i < len; i += 4) {
11072                         /* The data is in little-endian format in NVRAM.
11073                          * Use the big-endian read routines to preserve
11074                          * the byte order as it exists in NVRAM.
11075                          */
11076                         if (tg3_nvram_read_be32(tp, offset + i, &buf[i/4]))
11077                                 goto error;
11078                 }
11079         } else {
11080                 u8 *ptr;
11081                 ssize_t cnt;
11082                 unsigned int pos = 0;
11083
11084                 ptr = (u8 *)&buf[0];
11085                 for (i = 0; pos < len && i < 3; i++, pos += cnt, ptr += cnt) {
11086                         cnt = pci_read_vpd(tp->pdev, pos,
11087                                            len - pos, ptr);
11088                         if (cnt == -ETIMEDOUT || cnt == -EINTR)
11089                                 cnt = 0;
11090                         else if (cnt < 0)
11091                                 goto error;
11092                 }
11093                 if (pos != len)
11094                         goto error;
11095         }
11096
11097         *vpdlen = len;
11098
11099         return buf;
11100
11101 error:
11102         kfree(buf);
11103         return NULL;
11104 }
11105
11106 #define NVRAM_TEST_SIZE 0x100
11107 #define NVRAM_SELFBOOT_FORMAT1_0_SIZE   0x14
11108 #define NVRAM_SELFBOOT_FORMAT1_2_SIZE   0x18
11109 #define NVRAM_SELFBOOT_FORMAT1_3_SIZE   0x1c
11110 #define NVRAM_SELFBOOT_FORMAT1_4_SIZE   0x20
11111 #define NVRAM_SELFBOOT_FORMAT1_5_SIZE   0x24
11112 #define NVRAM_SELFBOOT_FORMAT1_6_SIZE   0x50
11113 #define NVRAM_SELFBOOT_HW_SIZE 0x20
11114 #define NVRAM_SELFBOOT_DATA_SIZE 0x1c
11115
11116 static int tg3_test_nvram(struct tg3 *tp)
11117 {
11118         u32 csum, magic, len;
11119         __be32 *buf;
11120         int i, j, k, err = 0, size;
11121
11122         if (tg3_flag(tp, NO_NVRAM))
11123                 return 0;
11124
11125         if (tg3_nvram_read(tp, 0, &magic) != 0)
11126                 return -EIO;
11127
11128         if (magic == TG3_EEPROM_MAGIC)
11129                 size = NVRAM_TEST_SIZE;
11130         else if ((magic & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW) {
11131                 if ((magic & TG3_EEPROM_SB_FORMAT_MASK) ==
11132                     TG3_EEPROM_SB_FORMAT_1) {
11133                         switch (magic & TG3_EEPROM_SB_REVISION_MASK) {
11134                         case TG3_EEPROM_SB_REVISION_0:
11135                                 size = NVRAM_SELFBOOT_FORMAT1_0_SIZE;
11136                                 break;
11137                         case TG3_EEPROM_SB_REVISION_2:
11138                                 size = NVRAM_SELFBOOT_FORMAT1_2_SIZE;
11139                                 break;
11140                         case TG3_EEPROM_SB_REVISION_3:
11141                                 size = NVRAM_SELFBOOT_FORMAT1_3_SIZE;
11142                                 break;
11143                         case TG3_EEPROM_SB_REVISION_4:
11144                                 size = NVRAM_SELFBOOT_FORMAT1_4_SIZE;
11145                                 break;
11146                         case TG3_EEPROM_SB_REVISION_5:
11147                                 size = NVRAM_SELFBOOT_FORMAT1_5_SIZE;
11148                                 break;
11149                         case TG3_EEPROM_SB_REVISION_6:
11150                                 size = NVRAM_SELFBOOT_FORMAT1_6_SIZE;
11151                                 break;
11152                         default:
11153                                 return -EIO;
11154                         }
11155                 } else
11156                         return 0;
11157         } else if ((magic & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
11158                 size = NVRAM_SELFBOOT_HW_SIZE;
11159         else
11160                 return -EIO;
11161
11162         buf = kmalloc(size, GFP_KERNEL);
11163         if (buf == NULL)
11164                 return -ENOMEM;
11165
11166         err = -EIO;
11167         for (i = 0, j = 0; i < size; i += 4, j++) {
11168                 err = tg3_nvram_read_be32(tp, i, &buf[j]);
11169                 if (err)
11170                         break;
11171         }
11172         if (i < size)
11173                 goto out;
11174
11175         /* Selfboot format */
11176         magic = be32_to_cpu(buf[0]);
11177         if ((magic & TG3_EEPROM_MAGIC_FW_MSK) ==
11178             TG3_EEPROM_MAGIC_FW) {
11179                 u8 *buf8 = (u8 *) buf, csum8 = 0;
11180
11181                 if ((magic & TG3_EEPROM_SB_REVISION_MASK) ==
11182                     TG3_EEPROM_SB_REVISION_2) {
11183                         /* For rev 2, the csum doesn't include the MBA. */
11184                         for (i = 0; i < TG3_EEPROM_SB_F1R2_MBA_OFF; i++)
11185                                 csum8 += buf8[i];
11186                         for (i = TG3_EEPROM_SB_F1R2_MBA_OFF + 4; i < size; i++)
11187                                 csum8 += buf8[i];
11188                 } else {
11189                         for (i = 0; i < size; i++)
11190                                 csum8 += buf8[i];
11191                 }
11192
11193                 if (csum8 == 0) {
11194                         err = 0;
11195                         goto out;
11196                 }
11197
11198                 err = -EIO;
11199                 goto out;
11200         }
11201
11202         if ((magic & TG3_EEPROM_MAGIC_HW_MSK) ==
11203             TG3_EEPROM_MAGIC_HW) {
11204                 u8 data[NVRAM_SELFBOOT_DATA_SIZE];
11205                 u8 parity[NVRAM_SELFBOOT_DATA_SIZE];
11206                 u8 *buf8 = (u8 *) buf;
11207
11208                 /* Separate the parity bits and the data bytes.  */
11209                 for (i = 0, j = 0, k = 0; i < NVRAM_SELFBOOT_HW_SIZE; i++) {
11210                         if ((i == 0) || (i == 8)) {
11211                                 int l;
11212                                 u8 msk;
11213
11214                                 for (l = 0, msk = 0x80; l < 7; l++, msk >>= 1)
11215                                         parity[k++] = buf8[i] & msk;
11216                                 i++;
11217                         } else if (i == 16) {
11218                                 int l;
11219                                 u8 msk;
11220
11221                                 for (l = 0, msk = 0x20; l < 6; l++, msk >>= 1)
11222                                         parity[k++] = buf8[i] & msk;
11223                                 i++;
11224
11225                                 for (l = 0, msk = 0x80; l < 8; l++, msk >>= 1)
11226                                         parity[k++] = buf8[i] & msk;
11227                                 i++;
11228                         }
11229                         data[j++] = buf8[i];
11230                 }
11231
11232                 err = -EIO;
11233                 for (i = 0; i < NVRAM_SELFBOOT_DATA_SIZE; i++) {
11234                         u8 hw8 = hweight8(data[i]);
11235
11236                         if ((hw8 & 0x1) && parity[i])
11237                                 goto out;
11238                         else if (!(hw8 & 0x1) && !parity[i])
11239                                 goto out;
11240                 }
11241                 err = 0;
11242                 goto out;
11243         }
11244
11245         err = -EIO;
11246
11247         /* Bootstrap checksum at offset 0x10 */
11248         csum = calc_crc((unsigned char *) buf, 0x10);
11249         if (csum != le32_to_cpu(buf[0x10/4]))
11250                 goto out;
11251
11252         /* Manufacturing block starts at offset 0x74, checksum at 0xfc */
11253         csum = calc_crc((unsigned char *) &buf[0x74/4], 0x88);
11254         if (csum != le32_to_cpu(buf[0xfc/4]))
11255                 goto out;
11256
11257         kfree(buf);
11258
11259         buf = tg3_vpd_readblock(tp, &len);
11260         if (!buf)
11261                 return -ENOMEM;
11262
11263         i = pci_vpd_find_tag((u8 *)buf, 0, len, PCI_VPD_LRDT_RO_DATA);
11264         if (i > 0) {
11265                 j = pci_vpd_lrdt_size(&((u8 *)buf)[i]);
11266                 if (j < 0)
11267                         goto out;
11268
11269                 if (i + PCI_VPD_LRDT_TAG_SIZE + j > len)
11270                         goto out;
11271
11272                 i += PCI_VPD_LRDT_TAG_SIZE;
11273                 j = pci_vpd_find_info_keyword((u8 *)buf, i, j,
11274                                               PCI_VPD_RO_KEYWORD_CHKSUM);
11275                 if (j > 0) {
11276                         u8 csum8 = 0;
11277
11278                         j += PCI_VPD_INFO_FLD_HDR_SIZE;
11279
11280                         for (i = 0; i <= j; i++)
11281                                 csum8 += ((u8 *)buf)[i];
11282
11283                         if (csum8)
11284                                 goto out;
11285                 }
11286         }
11287
11288         err = 0;
11289
11290 out:
11291         kfree(buf);
11292         return err;
11293 }
11294
11295 #define TG3_SERDES_TIMEOUT_SEC  2
11296 #define TG3_COPPER_TIMEOUT_SEC  6
11297
11298 static int tg3_test_link(struct tg3 *tp)
11299 {
11300         int i, max;
11301
11302         if (!netif_running(tp->dev))
11303                 return -ENODEV;
11304
11305         if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
11306                 max = TG3_SERDES_TIMEOUT_SEC;
11307         else
11308                 max = TG3_COPPER_TIMEOUT_SEC;
11309
11310         for (i = 0; i < max; i++) {
11311                 if (netif_carrier_ok(tp->dev))
11312                         return 0;
11313
11314                 if (msleep_interruptible(1000))
11315                         break;
11316         }
11317
11318         return -EIO;
11319 }
11320
11321 /* Only test the commonly used registers */
11322 static int tg3_test_registers(struct tg3 *tp)
11323 {
11324         int i, is_5705, is_5750;
11325         u32 offset, read_mask, write_mask, val, save_val, read_val;
11326         static struct {
11327                 u16 offset;
11328                 u16 flags;
11329 #define TG3_FL_5705     0x1
11330 #define TG3_FL_NOT_5705 0x2
11331 #define TG3_FL_NOT_5788 0x4
11332 #define TG3_FL_NOT_5750 0x8
11333                 u32 read_mask;
11334                 u32 write_mask;
11335         } reg_tbl[] = {
11336                 /* MAC Control Registers */
11337                 { MAC_MODE, TG3_FL_NOT_5705,
11338                         0x00000000, 0x00ef6f8c },
11339                 { MAC_MODE, TG3_FL_5705,
11340                         0x00000000, 0x01ef6b8c },
11341                 { MAC_STATUS, TG3_FL_NOT_5705,
11342                         0x03800107, 0x00000000 },
11343                 { MAC_STATUS, TG3_FL_5705,
11344                         0x03800100, 0x00000000 },
11345                 { MAC_ADDR_0_HIGH, 0x0000,
11346                         0x00000000, 0x0000ffff },
11347                 { MAC_ADDR_0_LOW, 0x0000,
11348                         0x00000000, 0xffffffff },
11349                 { MAC_RX_MTU_SIZE, 0x0000,
11350                         0x00000000, 0x0000ffff },
11351                 { MAC_TX_MODE, 0x0000,
11352                         0x00000000, 0x00000070 },
11353                 { MAC_TX_LENGTHS, 0x0000,
11354                         0x00000000, 0x00003fff },
11355                 { MAC_RX_MODE, TG3_FL_NOT_5705,
11356                         0x00000000, 0x000007fc },
11357                 { MAC_RX_MODE, TG3_FL_5705,
11358                         0x00000000, 0x000007dc },
11359                 { MAC_HASH_REG_0, 0x0000,
11360                         0x00000000, 0xffffffff },
11361                 { MAC_HASH_REG_1, 0x0000,
11362                         0x00000000, 0xffffffff },
11363                 { MAC_HASH_REG_2, 0x0000,
11364                         0x00000000, 0xffffffff },
11365                 { MAC_HASH_REG_3, 0x0000,
11366                         0x00000000, 0xffffffff },
11367
11368                 /* Receive Data and Receive BD Initiator Control Registers. */
11369                 { RCVDBDI_JUMBO_BD+0, TG3_FL_NOT_5705,
11370                         0x00000000, 0xffffffff },
11371                 { RCVDBDI_JUMBO_BD+4, TG3_FL_NOT_5705,
11372                         0x00000000, 0xffffffff },
11373                 { RCVDBDI_JUMBO_BD+8, TG3_FL_NOT_5705,
11374                         0x00000000, 0x00000003 },
11375                 { RCVDBDI_JUMBO_BD+0xc, TG3_FL_NOT_5705,
11376                         0x00000000, 0xffffffff },
11377                 { RCVDBDI_STD_BD+0, 0x0000,
11378                         0x00000000, 0xffffffff },
11379                 { RCVDBDI_STD_BD+4, 0x0000,
11380                         0x00000000, 0xffffffff },
11381                 { RCVDBDI_STD_BD+8, 0x0000,
11382                         0x00000000, 0xffff0002 },
11383                 { RCVDBDI_STD_BD+0xc, 0x0000,
11384                         0x00000000, 0xffffffff },
11385
11386                 /* Receive BD Initiator Control Registers. */
11387                 { RCVBDI_STD_THRESH, TG3_FL_NOT_5705,
11388                         0x00000000, 0xffffffff },
11389                 { RCVBDI_STD_THRESH, TG3_FL_5705,
11390                         0x00000000, 0x000003ff },
11391                 { RCVBDI_JUMBO_THRESH, TG3_FL_NOT_5705,
11392                         0x00000000, 0xffffffff },
11393
11394                 /* Host Coalescing Control Registers. */
11395                 { HOSTCC_MODE, TG3_FL_NOT_5705,
11396                         0x00000000, 0x00000004 },
11397                 { HOSTCC_MODE, TG3_FL_5705,
11398                         0x00000000, 0x000000f6 },
11399                 { HOSTCC_RXCOL_TICKS, TG3_FL_NOT_5705,
11400                         0x00000000, 0xffffffff },
11401                 { HOSTCC_RXCOL_TICKS, TG3_FL_5705,
11402                         0x00000000, 0x000003ff },
11403                 { HOSTCC_TXCOL_TICKS, TG3_FL_NOT_5705,
11404                         0x00000000, 0xffffffff },
11405                 { HOSTCC_TXCOL_TICKS, TG3_FL_5705,
11406                         0x00000000, 0x000003ff },
11407                 { HOSTCC_RXMAX_FRAMES, TG3_FL_NOT_5705,
11408                         0x00000000, 0xffffffff },
11409                 { HOSTCC_RXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11410                         0x00000000, 0x000000ff },
11411                 { HOSTCC_TXMAX_FRAMES, TG3_FL_NOT_5705,
11412                         0x00000000, 0xffffffff },
11413                 { HOSTCC_TXMAX_FRAMES, TG3_FL_5705 | TG3_FL_NOT_5788,
11414                         0x00000000, 0x000000ff },
11415                 { HOSTCC_RXCOAL_TICK_INT, TG3_FL_NOT_5705,
11416                         0x00000000, 0xffffffff },
11417                 { HOSTCC_TXCOAL_TICK_INT, TG3_FL_NOT_5705,
11418                         0x00000000, 0xffffffff },
11419                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11420                         0x00000000, 0xffffffff },
11421                 { HOSTCC_RXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11422                         0x00000000, 0x000000ff },
11423                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_NOT_5705,
11424                         0x00000000, 0xffffffff },
11425                 { HOSTCC_TXCOAL_MAXF_INT, TG3_FL_5705 | TG3_FL_NOT_5788,
11426                         0x00000000, 0x000000ff },
11427                 { HOSTCC_STAT_COAL_TICKS, TG3_FL_NOT_5705,
11428                         0x00000000, 0xffffffff },
11429                 { HOSTCC_STATS_BLK_HOST_ADDR, TG3_FL_NOT_5705,
11430                         0x00000000, 0xffffffff },
11431                 { HOSTCC_STATS_BLK_HOST_ADDR+4, TG3_FL_NOT_5705,
11432                         0x00000000, 0xffffffff },
11433                 { HOSTCC_STATUS_BLK_HOST_ADDR, 0x0000,
11434                         0x00000000, 0xffffffff },
11435                 { HOSTCC_STATUS_BLK_HOST_ADDR+4, 0x0000,
11436                         0x00000000, 0xffffffff },
11437                 { HOSTCC_STATS_BLK_NIC_ADDR, 0x0000,
11438                         0xffffffff, 0x00000000 },
11439                 { HOSTCC_STATUS_BLK_NIC_ADDR, 0x0000,
11440                         0xffffffff, 0x00000000 },
11441
11442                 /* Buffer Manager Control Registers. */
11443                 { BUFMGR_MB_POOL_ADDR, TG3_FL_NOT_5750,
11444                         0x00000000, 0x007fff80 },
11445                 { BUFMGR_MB_POOL_SIZE, TG3_FL_NOT_5750,
11446                         0x00000000, 0x007fffff },
11447                 { BUFMGR_MB_RDMA_LOW_WATER, 0x0000,
11448                         0x00000000, 0x0000003f },
11449                 { BUFMGR_MB_MACRX_LOW_WATER, 0x0000,
11450                         0x00000000, 0x000001ff },
11451                 { BUFMGR_MB_HIGH_WATER, 0x0000,
11452                         0x00000000, 0x000001ff },
11453                 { BUFMGR_DMA_DESC_POOL_ADDR, TG3_FL_NOT_5705,
11454                         0xffffffff, 0x00000000 },
11455                 { BUFMGR_DMA_DESC_POOL_SIZE, TG3_FL_NOT_5705,
11456                         0xffffffff, 0x00000000 },
11457
11458                 /* Mailbox Registers */
11459                 { GRCMBOX_RCVSTD_PROD_IDX+4, 0x0000,
11460                         0x00000000, 0x000001ff },
11461                 { GRCMBOX_RCVJUMBO_PROD_IDX+4, TG3_FL_NOT_5705,
11462                         0x00000000, 0x000001ff },
11463                 { GRCMBOX_RCVRET_CON_IDX_0+4, 0x0000,
11464                         0x00000000, 0x000007ff },
11465                 { GRCMBOX_SNDHOST_PROD_IDX_0+4, 0x0000,
11466                         0x00000000, 0x000001ff },
11467
11468                 { 0xffff, 0x0000, 0x00000000, 0x00000000 },
11469         };
11470
11471         is_5705 = is_5750 = 0;
11472         if (tg3_flag(tp, 5705_PLUS)) {
11473                 is_5705 = 1;
11474                 if (tg3_flag(tp, 5750_PLUS))
11475                         is_5750 = 1;
11476         }
11477
11478         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
11479                 if (is_5705 && (reg_tbl[i].flags & TG3_FL_NOT_5705))
11480                         continue;
11481
11482                 if (!is_5705 && (reg_tbl[i].flags & TG3_FL_5705))
11483                         continue;
11484
11485                 if (tg3_flag(tp, IS_5788) &&
11486                     (reg_tbl[i].flags & TG3_FL_NOT_5788))
11487                         continue;
11488
11489                 if (is_5750 && (reg_tbl[i].flags & TG3_FL_NOT_5750))
11490                         continue;
11491
11492                 offset = (u32) reg_tbl[i].offset;
11493                 read_mask = reg_tbl[i].read_mask;
11494                 write_mask = reg_tbl[i].write_mask;
11495
11496                 /* Save the original register content */
11497                 save_val = tr32(offset);
11498
11499                 /* Determine the read-only value. */
11500                 read_val = save_val & read_mask;
11501
11502                 /* Write zero to the register, then make sure the read-only bits
11503                  * are not changed and the read/write bits are all zeros.
11504                  */
11505                 tw32(offset, 0);
11506
11507                 val = tr32(offset);
11508
11509                 /* Test the read-only and read/write bits. */
11510                 if (((val & read_mask) != read_val) || (val & write_mask))
11511                         goto out;
11512
11513                 /* Write ones to all the bits defined by RdMask and WrMask, then
11514                  * make sure the read-only bits are not changed and the
11515                  * read/write bits are all ones.
11516                  */
11517                 tw32(offset, read_mask | write_mask);
11518
11519                 val = tr32(offset);
11520
11521                 /* Test the read-only bits. */
11522                 if ((val & read_mask) != read_val)
11523                         goto out;
11524
11525                 /* Test the read/write bits. */
11526                 if ((val & write_mask) != write_mask)
11527                         goto out;
11528
11529                 tw32(offset, save_val);
11530         }
11531
11532         return 0;
11533
11534 out:
11535         if (netif_msg_hw(tp))
11536                 netdev_err(tp->dev,
11537                            "Register test failed at offset %x\n", offset);
11538         tw32(offset, save_val);
11539         return -EIO;
11540 }
11541
11542 static int tg3_do_mem_test(struct tg3 *tp, u32 offset, u32 len)
11543 {
11544         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0xaa55a55a };
11545         int i;
11546         u32 j;
11547
11548         for (i = 0; i < ARRAY_SIZE(test_pattern); i++) {
11549                 for (j = 0; j < len; j += 4) {
11550                         u32 val;
11551
11552                         tg3_write_mem(tp, offset + j, test_pattern[i]);
11553                         tg3_read_mem(tp, offset + j, &val);
11554                         if (val != test_pattern[i])
11555                                 return -EIO;
11556                 }
11557         }
11558         return 0;
11559 }
11560
11561 static int tg3_test_memory(struct tg3 *tp)
11562 {
11563         static struct mem_entry {
11564                 u32 offset;
11565                 u32 len;
11566         } mem_tbl_570x[] = {
11567                 { 0x00000000, 0x00b50},
11568                 { 0x00002000, 0x1c000},
11569                 { 0xffffffff, 0x00000}
11570         }, mem_tbl_5705[] = {
11571                 { 0x00000100, 0x0000c},
11572                 { 0x00000200, 0x00008},
11573                 { 0x00004000, 0x00800},
11574                 { 0x00006000, 0x01000},
11575                 { 0x00008000, 0x02000},
11576                 { 0x00010000, 0x0e000},
11577                 { 0xffffffff, 0x00000}
11578         }, mem_tbl_5755[] = {
11579                 { 0x00000200, 0x00008},
11580                 { 0x00004000, 0x00800},
11581                 { 0x00006000, 0x00800},
11582                 { 0x00008000, 0x02000},
11583                 { 0x00010000, 0x0c000},
11584                 { 0xffffffff, 0x00000}
11585         }, mem_tbl_5906[] = {
11586                 { 0x00000200, 0x00008},
11587                 { 0x00004000, 0x00400},
11588                 { 0x00006000, 0x00400},
11589                 { 0x00008000, 0x01000},
11590                 { 0x00010000, 0x01000},
11591                 { 0xffffffff, 0x00000}
11592         }, mem_tbl_5717[] = {
11593                 { 0x00000200, 0x00008},
11594                 { 0x00010000, 0x0a000},
11595                 { 0x00020000, 0x13c00},
11596                 { 0xffffffff, 0x00000}
11597         }, mem_tbl_57765[] = {
11598                 { 0x00000200, 0x00008},
11599                 { 0x00004000, 0x00800},
11600                 { 0x00006000, 0x09800},
11601                 { 0x00010000, 0x0a000},
11602                 { 0xffffffff, 0x00000}
11603         };
11604         struct mem_entry *mem_tbl;
11605         int err = 0;
11606         int i;
11607
11608         if (tg3_flag(tp, 5717_PLUS))
11609                 mem_tbl = mem_tbl_5717;
11610         else if (tg3_flag(tp, 57765_CLASS))
11611                 mem_tbl = mem_tbl_57765;
11612         else if (tg3_flag(tp, 5755_PLUS))
11613                 mem_tbl = mem_tbl_5755;
11614         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
11615                 mem_tbl = mem_tbl_5906;
11616         else if (tg3_flag(tp, 5705_PLUS))
11617                 mem_tbl = mem_tbl_5705;
11618         else
11619                 mem_tbl = mem_tbl_570x;
11620
11621         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
11622                 err = tg3_do_mem_test(tp, mem_tbl[i].offset, mem_tbl[i].len);
11623                 if (err)
11624                         break;
11625         }
11626
11627         return err;
11628 }
11629
11630 #define TG3_TSO_MSS             500
11631
11632 #define TG3_TSO_IP_HDR_LEN      20
11633 #define TG3_TSO_TCP_HDR_LEN     20
11634 #define TG3_TSO_TCP_OPT_LEN     12
11635
11636 static const u8 tg3_tso_header[] = {
11637 0x08, 0x00,
11638 0x45, 0x00, 0x00, 0x00,
11639 0x00, 0x00, 0x40, 0x00,
11640 0x40, 0x06, 0x00, 0x00,
11641 0x0a, 0x00, 0x00, 0x01,
11642 0x0a, 0x00, 0x00, 0x02,
11643 0x0d, 0x00, 0xe0, 0x00,
11644 0x00, 0x00, 0x01, 0x00,
11645 0x00, 0x00, 0x02, 0x00,
11646 0x80, 0x10, 0x10, 0x00,
11647 0x14, 0x09, 0x00, 0x00,
11648 0x01, 0x01, 0x08, 0x0a,
11649 0x11, 0x11, 0x11, 0x11,
11650 0x11, 0x11, 0x11, 0x11,
11651 };
11652
11653 static int tg3_run_loopback(struct tg3 *tp, u32 pktsz, bool tso_loopback)
11654 {
11655         u32 rx_start_idx, rx_idx, tx_idx, opaque_key;
11656         u32 base_flags = 0, mss = 0, desc_idx, coal_now, data_off, val;
11657         u32 budget;
11658         struct sk_buff *skb;
11659         u8 *tx_data, *rx_data;
11660         dma_addr_t map;
11661         int num_pkts, tx_len, rx_len, i, err;
11662         struct tg3_rx_buffer_desc *desc;
11663         struct tg3_napi *tnapi, *rnapi;
11664         struct tg3_rx_prodring_set *tpr = &tp->napi[0].prodring;
11665
11666         tnapi = &tp->napi[0];
11667         rnapi = &tp->napi[0];
11668         if (tp->irq_cnt > 1) {
11669                 if (tg3_flag(tp, ENABLE_RSS))
11670                         rnapi = &tp->napi[1];
11671                 if (tg3_flag(tp, ENABLE_TSS))
11672                         tnapi = &tp->napi[1];
11673         }
11674         coal_now = tnapi->coal_now | rnapi->coal_now;
11675
11676         err = -EIO;
11677
11678         tx_len = pktsz;
11679         skb = netdev_alloc_skb(tp->dev, tx_len);
11680         if (!skb)
11681                 return -ENOMEM;
11682
11683         tx_data = skb_put(skb, tx_len);
11684         memcpy(tx_data, tp->dev->dev_addr, 6);
11685         memset(tx_data + 6, 0x0, 8);
11686
11687         tw32(MAC_RX_MTU_SIZE, tx_len + ETH_FCS_LEN);
11688
11689         if (tso_loopback) {
11690                 struct iphdr *iph = (struct iphdr *)&tx_data[ETH_HLEN];
11691
11692                 u32 hdr_len = TG3_TSO_IP_HDR_LEN + TG3_TSO_TCP_HDR_LEN +
11693                               TG3_TSO_TCP_OPT_LEN;
11694
11695                 memcpy(tx_data + ETH_ALEN * 2, tg3_tso_header,
11696                        sizeof(tg3_tso_header));
11697                 mss = TG3_TSO_MSS;
11698
11699                 val = tx_len - ETH_ALEN * 2 - sizeof(tg3_tso_header);
11700                 num_pkts = DIV_ROUND_UP(val, TG3_TSO_MSS);
11701
11702                 /* Set the total length field in the IP header */
11703                 iph->tot_len = htons((u16)(mss + hdr_len));
11704
11705                 base_flags = (TXD_FLAG_CPU_PRE_DMA |
11706                               TXD_FLAG_CPU_POST_DMA);
11707
11708                 if (tg3_flag(tp, HW_TSO_1) ||
11709                     tg3_flag(tp, HW_TSO_2) ||
11710                     tg3_flag(tp, HW_TSO_3)) {
11711                         struct tcphdr *th;
11712                         val = ETH_HLEN + TG3_TSO_IP_HDR_LEN;
11713                         th = (struct tcphdr *)&tx_data[val];
11714                         th->check = 0;
11715                 } else
11716                         base_flags |= TXD_FLAG_TCPUDP_CSUM;
11717
11718                 if (tg3_flag(tp, HW_TSO_3)) {
11719                         mss |= (hdr_len & 0xc) << 12;
11720                         if (hdr_len & 0x10)
11721                                 base_flags |= 0x00000010;
11722                         base_flags |= (hdr_len & 0x3e0) << 5;
11723                 } else if (tg3_flag(tp, HW_TSO_2))
11724                         mss |= hdr_len << 9;
11725                 else if (tg3_flag(tp, HW_TSO_1) ||
11726                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705) {
11727                         mss |= (TG3_TSO_TCP_OPT_LEN << 9);
11728                 } else {
11729                         base_flags |= (TG3_TSO_TCP_OPT_LEN << 10);
11730                 }
11731
11732                 data_off = ETH_ALEN * 2 + sizeof(tg3_tso_header);
11733         } else {
11734                 num_pkts = 1;
11735                 data_off = ETH_HLEN;
11736
11737                 if (tg3_flag(tp, USE_JUMBO_BDFLAG) &&
11738                     tx_len > VLAN_ETH_FRAME_LEN)
11739                         base_flags |= TXD_FLAG_JMB_PKT;
11740         }
11741
11742         for (i = data_off; i < tx_len; i++)
11743                 tx_data[i] = (u8) (i & 0xff);
11744
11745         map = pci_map_single(tp->pdev, skb->data, tx_len, PCI_DMA_TODEVICE);
11746         if (pci_dma_mapping_error(tp->pdev, map)) {
11747                 dev_kfree_skb(skb);
11748                 return -EIO;
11749         }
11750
11751         val = tnapi->tx_prod;
11752         tnapi->tx_buffers[val].skb = skb;
11753         dma_unmap_addr_set(&tnapi->tx_buffers[val], mapping, map);
11754
11755         tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11756                rnapi->coal_now);
11757
11758         udelay(10);
11759
11760         rx_start_idx = rnapi->hw_status->idx[0].rx_producer;
11761
11762         budget = tg3_tx_avail(tnapi);
11763         if (tg3_tx_frag_set(tnapi, &val, &budget, map, tx_len,
11764                             base_flags | TXD_FLAG_END, mss, 0)) {
11765                 tnapi->tx_buffers[val].skb = NULL;
11766                 dev_kfree_skb(skb);
11767                 return -EIO;
11768         }
11769
11770         tnapi->tx_prod++;
11771
11772         /* Sync BD data before updating mailbox */
11773         wmb();
11774
11775         tw32_tx_mbox(tnapi->prodmbox, tnapi->tx_prod);
11776         tr32_mailbox(tnapi->prodmbox);
11777
11778         udelay(10);
11779
11780         /* 350 usec to allow enough time on some 10/100 Mbps devices.  */
11781         for (i = 0; i < 35; i++) {
11782                 tw32_f(HOSTCC_MODE, tp->coalesce_mode | HOSTCC_MODE_ENABLE |
11783                        coal_now);
11784
11785                 udelay(10);
11786
11787                 tx_idx = tnapi->hw_status->idx[0].tx_consumer;
11788                 rx_idx = rnapi->hw_status->idx[0].rx_producer;
11789                 if ((tx_idx == tnapi->tx_prod) &&
11790                     (rx_idx == (rx_start_idx + num_pkts)))
11791                         break;
11792         }
11793
11794         tg3_tx_skb_unmap(tnapi, tnapi->tx_prod - 1, -1);
11795         dev_kfree_skb(skb);
11796
11797         if (tx_idx != tnapi->tx_prod)
11798                 goto out;
11799
11800         if (rx_idx != rx_start_idx + num_pkts)
11801                 goto out;
11802
11803         val = data_off;
11804         while (rx_idx != rx_start_idx) {
11805                 desc = &rnapi->rx_rcb[rx_start_idx++];
11806                 desc_idx = desc->opaque & RXD_OPAQUE_INDEX_MASK;
11807                 opaque_key = desc->opaque & RXD_OPAQUE_RING_MASK;
11808
11809                 if ((desc->err_vlan & RXD_ERR_MASK) != 0 &&
11810                     (desc->err_vlan != RXD_ERR_ODD_NIBBLE_RCVD_MII))
11811                         goto out;
11812
11813                 rx_len = ((desc->idx_len & RXD_LEN_MASK) >> RXD_LEN_SHIFT)
11814                          - ETH_FCS_LEN;
11815
11816                 if (!tso_loopback) {
11817                         if (rx_len != tx_len)
11818                                 goto out;
11819
11820                         if (pktsz <= TG3_RX_STD_DMA_SZ - ETH_FCS_LEN) {
11821                                 if (opaque_key != RXD_OPAQUE_RING_STD)
11822                                         goto out;
11823                         } else {
11824                                 if (opaque_key != RXD_OPAQUE_RING_JUMBO)
11825                                         goto out;
11826                         }
11827                 } else if ((desc->type_flags & RXD_FLAG_TCPUDP_CSUM) &&
11828                            (desc->ip_tcp_csum & RXD_TCPCSUM_MASK)
11829                             >> RXD_TCPCSUM_SHIFT != 0xffff) {
11830                         goto out;
11831                 }
11832
11833                 if (opaque_key == RXD_OPAQUE_RING_STD) {
11834                         rx_data = tpr->rx_std_buffers[desc_idx].data;
11835                         map = dma_unmap_addr(&tpr->rx_std_buffers[desc_idx],
11836                                              mapping);
11837                 } else if (opaque_key == RXD_OPAQUE_RING_JUMBO) {
11838                         rx_data = tpr->rx_jmb_buffers[desc_idx].data;
11839                         map = dma_unmap_addr(&tpr->rx_jmb_buffers[desc_idx],
11840                                              mapping);
11841                 } else
11842                         goto out;
11843
11844                 pci_dma_sync_single_for_cpu(tp->pdev, map, rx_len,
11845                                             PCI_DMA_FROMDEVICE);
11846
11847                 rx_data += TG3_RX_OFFSET(tp);
11848                 for (i = data_off; i < rx_len; i++, val++) {
11849                         if (*(rx_data + i) != (u8) (val & 0xff))
11850                                 goto out;
11851                 }
11852         }
11853
11854         err = 0;
11855
11856         /* tg3_free_rings will unmap and free the rx_data */
11857 out:
11858         return err;
11859 }
11860
11861 #define TG3_STD_LOOPBACK_FAILED         1
11862 #define TG3_JMB_LOOPBACK_FAILED         2
11863 #define TG3_TSO_LOOPBACK_FAILED         4
11864 #define TG3_LOOPBACK_FAILED \
11865         (TG3_STD_LOOPBACK_FAILED | \
11866          TG3_JMB_LOOPBACK_FAILED | \
11867          TG3_TSO_LOOPBACK_FAILED)
11868
11869 static int tg3_test_loopback(struct tg3 *tp, u64 *data, bool do_extlpbk)
11870 {
11871         int err = -EIO;
11872         u32 eee_cap;
11873         u32 jmb_pkt_sz = 9000;
11874
11875         if (tp->dma_limit)
11876                 jmb_pkt_sz = tp->dma_limit - ETH_HLEN;
11877
11878         eee_cap = tp->phy_flags & TG3_PHYFLG_EEE_CAP;
11879         tp->phy_flags &= ~TG3_PHYFLG_EEE_CAP;
11880
11881         if (!netif_running(tp->dev)) {
11882                 data[0] = TG3_LOOPBACK_FAILED;
11883                 data[1] = TG3_LOOPBACK_FAILED;
11884                 if (do_extlpbk)
11885                         data[2] = TG3_LOOPBACK_FAILED;
11886                 goto done;
11887         }
11888
11889         err = tg3_reset_hw(tp, 1);
11890         if (err) {
11891                 data[0] = TG3_LOOPBACK_FAILED;
11892                 data[1] = TG3_LOOPBACK_FAILED;
11893                 if (do_extlpbk)
11894                         data[2] = TG3_LOOPBACK_FAILED;
11895                 goto done;
11896         }
11897
11898         if (tg3_flag(tp, ENABLE_RSS)) {
11899                 int i;
11900
11901                 /* Reroute all rx packets to the 1st queue */
11902                 for (i = MAC_RSS_INDIR_TBL_0;
11903                      i < MAC_RSS_INDIR_TBL_0 + TG3_RSS_INDIR_TBL_SIZE; i += 4)
11904                         tw32(i, 0x0);
11905         }
11906
11907         /* HW errata - mac loopback fails in some cases on 5780.
11908          * Normal traffic and PHY loopback are not affected by
11909          * errata.  Also, the MAC loopback test is deprecated for
11910          * all newer ASIC revisions.
11911          */
11912         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
11913             !tg3_flag(tp, CPMU_PRESENT)) {
11914                 tg3_mac_loopback(tp, true);
11915
11916                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11917                         data[0] |= TG3_STD_LOOPBACK_FAILED;
11918
11919                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11920                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11921                         data[0] |= TG3_JMB_LOOPBACK_FAILED;
11922
11923                 tg3_mac_loopback(tp, false);
11924         }
11925
11926         if (!(tp->phy_flags & TG3_PHYFLG_PHY_SERDES) &&
11927             !tg3_flag(tp, USE_PHYLIB)) {
11928                 int i;
11929
11930                 tg3_phy_lpbk_set(tp, 0, false);
11931
11932                 /* Wait for link */
11933                 for (i = 0; i < 100; i++) {
11934                         if (tr32(MAC_TX_STATUS) & TX_STATUS_LINK_UP)
11935                                 break;
11936                         mdelay(1);
11937                 }
11938
11939                 if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11940                         data[1] |= TG3_STD_LOOPBACK_FAILED;
11941                 if (tg3_flag(tp, TSO_CAPABLE) &&
11942                     tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11943                         data[1] |= TG3_TSO_LOOPBACK_FAILED;
11944                 if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11945                     tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11946                         data[1] |= TG3_JMB_LOOPBACK_FAILED;
11947
11948                 if (do_extlpbk) {
11949                         tg3_phy_lpbk_set(tp, 0, true);
11950
11951                         /* All link indications report up, but the hardware
11952                          * isn't really ready for about 20 msec.  Double it
11953                          * to be sure.
11954                          */
11955                         mdelay(40);
11956
11957                         if (tg3_run_loopback(tp, ETH_FRAME_LEN, false))
11958                                 data[2] |= TG3_STD_LOOPBACK_FAILED;
11959                         if (tg3_flag(tp, TSO_CAPABLE) &&
11960                             tg3_run_loopback(tp, ETH_FRAME_LEN, true))
11961                                 data[2] |= TG3_TSO_LOOPBACK_FAILED;
11962                         if (tg3_flag(tp, JUMBO_RING_ENABLE) &&
11963                             tg3_run_loopback(tp, jmb_pkt_sz + ETH_HLEN, false))
11964                                 data[2] |= TG3_JMB_LOOPBACK_FAILED;
11965                 }
11966
11967                 /* Re-enable gphy autopowerdown. */
11968                 if (tp->phy_flags & TG3_PHYFLG_ENABLE_APD)
11969                         tg3_phy_toggle_apd(tp, true);
11970         }
11971
11972         err = (data[0] | data[1] | data[2]) ? -EIO : 0;
11973
11974 done:
11975         tp->phy_flags |= eee_cap;
11976
11977         return err;
11978 }
11979
11980 static void tg3_self_test(struct net_device *dev, struct ethtool_test *etest,
11981                           u64 *data)
11982 {
11983         struct tg3 *tp = netdev_priv(dev);
11984         bool doextlpbk = etest->flags & ETH_TEST_FL_EXTERNAL_LB;
11985
11986         if ((tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER) &&
11987             tg3_power_up(tp)) {
11988                 etest->flags |= ETH_TEST_FL_FAILED;
11989                 memset(data, 1, sizeof(u64) * TG3_NUM_TEST);
11990                 return;
11991         }
11992
11993         memset(data, 0, sizeof(u64) * TG3_NUM_TEST);
11994
11995         if (tg3_test_nvram(tp) != 0) {
11996                 etest->flags |= ETH_TEST_FL_FAILED;
11997                 data[0] = 1;
11998         }
11999         if (!doextlpbk && tg3_test_link(tp)) {
12000                 etest->flags |= ETH_TEST_FL_FAILED;
12001                 data[1] = 1;
12002         }
12003         if (etest->flags & ETH_TEST_FL_OFFLINE) {
12004                 int err, err2 = 0, irq_sync = 0;
12005
12006                 if (netif_running(dev)) {
12007                         tg3_phy_stop(tp);
12008                         tg3_netif_stop(tp);
12009                         irq_sync = 1;
12010                 }
12011
12012                 tg3_full_lock(tp, irq_sync);
12013
12014                 tg3_halt(tp, RESET_KIND_SUSPEND, 1);
12015                 err = tg3_nvram_lock(tp);
12016                 tg3_halt_cpu(tp, RX_CPU_BASE);
12017                 if (!tg3_flag(tp, 5705_PLUS))
12018                         tg3_halt_cpu(tp, TX_CPU_BASE);
12019                 if (!err)
12020                         tg3_nvram_unlock(tp);
12021
12022                 if (tp->phy_flags & TG3_PHYFLG_MII_SERDES)
12023                         tg3_phy_reset(tp);
12024
12025                 if (tg3_test_registers(tp) != 0) {
12026                         etest->flags |= ETH_TEST_FL_FAILED;
12027                         data[2] = 1;
12028                 }
12029
12030                 if (tg3_test_memory(tp) != 0) {
12031                         etest->flags |= ETH_TEST_FL_FAILED;
12032                         data[3] = 1;
12033                 }
12034
12035                 if (doextlpbk)
12036                         etest->flags |= ETH_TEST_FL_EXTERNAL_LB_DONE;
12037
12038                 if (tg3_test_loopback(tp, &data[4], doextlpbk))
12039                         etest->flags |= ETH_TEST_FL_FAILED;
12040
12041                 tg3_full_unlock(tp);
12042
12043                 if (tg3_test_interrupt(tp) != 0) {
12044                         etest->flags |= ETH_TEST_FL_FAILED;
12045                         data[7] = 1;
12046                 }
12047
12048                 tg3_full_lock(tp, 0);
12049
12050                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12051                 if (netif_running(dev)) {
12052                         tg3_flag_set(tp, INIT_COMPLETE);
12053                         err2 = tg3_restart_hw(tp, 1);
12054                         if (!err2)
12055                                 tg3_netif_start(tp);
12056                 }
12057
12058                 tg3_full_unlock(tp);
12059
12060                 if (irq_sync && !err2)
12061                         tg3_phy_start(tp);
12062         }
12063         if (tp->phy_flags & TG3_PHYFLG_IS_LOW_POWER)
12064                 tg3_power_down(tp);
12065
12066 }
12067
12068 static int tg3_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
12069 {
12070         struct mii_ioctl_data *data = if_mii(ifr);
12071         struct tg3 *tp = netdev_priv(dev);
12072         int err;
12073
12074         if (tg3_flag(tp, USE_PHYLIB)) {
12075                 struct phy_device *phydev;
12076                 if (!(tp->phy_flags & TG3_PHYFLG_IS_CONNECTED))
12077                         return -EAGAIN;
12078                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
12079                 return phy_mii_ioctl(phydev, ifr, cmd);
12080         }
12081
12082         switch (cmd) {
12083         case SIOCGMIIPHY:
12084                 data->phy_id = tp->phy_addr;
12085
12086                 /* fallthru */
12087         case SIOCGMIIREG: {
12088                 u32 mii_regval;
12089
12090                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12091                         break;                  /* We have no PHY */
12092
12093                 if (!netif_running(dev))
12094                         return -EAGAIN;
12095
12096                 spin_lock_bh(&tp->lock);
12097                 err = tg3_readphy(tp, data->reg_num & 0x1f, &mii_regval);
12098                 spin_unlock_bh(&tp->lock);
12099
12100                 data->val_out = mii_regval;
12101
12102                 return err;
12103         }
12104
12105         case SIOCSMIIREG:
12106                 if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
12107                         break;                  /* We have no PHY */
12108
12109                 if (!netif_running(dev))
12110                         return -EAGAIN;
12111
12112                 spin_lock_bh(&tp->lock);
12113                 err = tg3_writephy(tp, data->reg_num & 0x1f, data->val_in);
12114                 spin_unlock_bh(&tp->lock);
12115
12116                 return err;
12117
12118         default:
12119                 /* do nothing */
12120                 break;
12121         }
12122         return -EOPNOTSUPP;
12123 }
12124
12125 static int tg3_get_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12126 {
12127         struct tg3 *tp = netdev_priv(dev);
12128
12129         memcpy(ec, &tp->coal, sizeof(*ec));
12130         return 0;
12131 }
12132
12133 static int tg3_set_coalesce(struct net_device *dev, struct ethtool_coalesce *ec)
12134 {
12135         struct tg3 *tp = netdev_priv(dev);
12136         u32 max_rxcoal_tick_int = 0, max_txcoal_tick_int = 0;
12137         u32 max_stat_coal_ticks = 0, min_stat_coal_ticks = 0;
12138
12139         if (!tg3_flag(tp, 5705_PLUS)) {
12140                 max_rxcoal_tick_int = MAX_RXCOAL_TICK_INT;
12141                 max_txcoal_tick_int = MAX_TXCOAL_TICK_INT;
12142                 max_stat_coal_ticks = MAX_STAT_COAL_TICKS;
12143                 min_stat_coal_ticks = MIN_STAT_COAL_TICKS;
12144         }
12145
12146         if ((ec->rx_coalesce_usecs > MAX_RXCOL_TICKS) ||
12147             (ec->tx_coalesce_usecs > MAX_TXCOL_TICKS) ||
12148             (ec->rx_max_coalesced_frames > MAX_RXMAX_FRAMES) ||
12149             (ec->tx_max_coalesced_frames > MAX_TXMAX_FRAMES) ||
12150             (ec->rx_coalesce_usecs_irq > max_rxcoal_tick_int) ||
12151             (ec->tx_coalesce_usecs_irq > max_txcoal_tick_int) ||
12152             (ec->rx_max_coalesced_frames_irq > MAX_RXCOAL_MAXF_INT) ||
12153             (ec->tx_max_coalesced_frames_irq > MAX_TXCOAL_MAXF_INT) ||
12154             (ec->stats_block_coalesce_usecs > max_stat_coal_ticks) ||
12155             (ec->stats_block_coalesce_usecs < min_stat_coal_ticks))
12156                 return -EINVAL;
12157
12158         /* No rx interrupts will be generated if both are zero */
12159         if ((ec->rx_coalesce_usecs == 0) &&
12160             (ec->rx_max_coalesced_frames == 0))
12161                 return -EINVAL;
12162
12163         /* No tx interrupts will be generated if both are zero */
12164         if ((ec->tx_coalesce_usecs == 0) &&
12165             (ec->tx_max_coalesced_frames == 0))
12166                 return -EINVAL;
12167
12168         /* Only copy relevant parameters, ignore all others. */
12169         tp->coal.rx_coalesce_usecs = ec->rx_coalesce_usecs;
12170         tp->coal.tx_coalesce_usecs = ec->tx_coalesce_usecs;
12171         tp->coal.rx_max_coalesced_frames = ec->rx_max_coalesced_frames;
12172         tp->coal.tx_max_coalesced_frames = ec->tx_max_coalesced_frames;
12173         tp->coal.rx_coalesce_usecs_irq = ec->rx_coalesce_usecs_irq;
12174         tp->coal.tx_coalesce_usecs_irq = ec->tx_coalesce_usecs_irq;
12175         tp->coal.rx_max_coalesced_frames_irq = ec->rx_max_coalesced_frames_irq;
12176         tp->coal.tx_max_coalesced_frames_irq = ec->tx_max_coalesced_frames_irq;
12177         tp->coal.stats_block_coalesce_usecs = ec->stats_block_coalesce_usecs;
12178
12179         if (netif_running(dev)) {
12180                 tg3_full_lock(tp, 0);
12181                 __tg3_set_coalesce(tp, &tp->coal);
12182                 tg3_full_unlock(tp);
12183         }
12184         return 0;
12185 }
12186
12187 static const struct ethtool_ops tg3_ethtool_ops = {
12188         .get_settings           = tg3_get_settings,
12189         .set_settings           = tg3_set_settings,
12190         .get_drvinfo            = tg3_get_drvinfo,
12191         .get_regs_len           = tg3_get_regs_len,
12192         .get_regs               = tg3_get_regs,
12193         .get_wol                = tg3_get_wol,
12194         .set_wol                = tg3_set_wol,
12195         .get_msglevel           = tg3_get_msglevel,
12196         .set_msglevel           = tg3_set_msglevel,
12197         .nway_reset             = tg3_nway_reset,
12198         .get_link               = ethtool_op_get_link,
12199         .get_eeprom_len         = tg3_get_eeprom_len,
12200         .get_eeprom             = tg3_get_eeprom,
12201         .set_eeprom             = tg3_set_eeprom,
12202         .get_ringparam          = tg3_get_ringparam,
12203         .set_ringparam          = tg3_set_ringparam,
12204         .get_pauseparam         = tg3_get_pauseparam,
12205         .set_pauseparam         = tg3_set_pauseparam,
12206         .self_test              = tg3_self_test,
12207         .get_strings            = tg3_get_strings,
12208         .set_phys_id            = tg3_set_phys_id,
12209         .get_ethtool_stats      = tg3_get_ethtool_stats,
12210         .get_coalesce           = tg3_get_coalesce,
12211         .set_coalesce           = tg3_set_coalesce,
12212         .get_sset_count         = tg3_get_sset_count,
12213         .get_rxnfc              = tg3_get_rxnfc,
12214         .get_rxfh_indir_size    = tg3_get_rxfh_indir_size,
12215         .get_rxfh_indir         = tg3_get_rxfh_indir,
12216         .set_rxfh_indir         = tg3_set_rxfh_indir,
12217 };
12218
12219 static struct rtnl_link_stats64 *tg3_get_stats64(struct net_device *dev,
12220                                                 struct rtnl_link_stats64 *stats)
12221 {
12222         struct tg3 *tp = netdev_priv(dev);
12223
12224         if (!tp->hw_stats)
12225                 return &tp->net_stats_prev;
12226
12227         spin_lock_bh(&tp->lock);
12228         tg3_get_nstats(tp, stats);
12229         spin_unlock_bh(&tp->lock);
12230
12231         return stats;
12232 }
12233
12234 static void tg3_set_rx_mode(struct net_device *dev)
12235 {
12236         struct tg3 *tp = netdev_priv(dev);
12237
12238         if (!netif_running(dev))
12239                 return;
12240
12241         tg3_full_lock(tp, 0);
12242         __tg3_set_rx_mode(dev);
12243         tg3_full_unlock(tp);
12244 }
12245
12246 static inline void tg3_set_mtu(struct net_device *dev, struct tg3 *tp,
12247                                int new_mtu)
12248 {
12249         dev->mtu = new_mtu;
12250
12251         if (new_mtu > ETH_DATA_LEN) {
12252                 if (tg3_flag(tp, 5780_CLASS)) {
12253                         netdev_update_features(dev);
12254                         tg3_flag_clear(tp, TSO_CAPABLE);
12255                 } else {
12256                         tg3_flag_set(tp, JUMBO_RING_ENABLE);
12257                 }
12258         } else {
12259                 if (tg3_flag(tp, 5780_CLASS)) {
12260                         tg3_flag_set(tp, TSO_CAPABLE);
12261                         netdev_update_features(dev);
12262                 }
12263                 tg3_flag_clear(tp, JUMBO_RING_ENABLE);
12264         }
12265 }
12266
12267 static int tg3_change_mtu(struct net_device *dev, int new_mtu)
12268 {
12269         struct tg3 *tp = netdev_priv(dev);
12270         int err, reset_phy = 0;
12271
12272         if (new_mtu < TG3_MIN_MTU || new_mtu > TG3_MAX_MTU(tp))
12273                 return -EINVAL;
12274
12275         if (!netif_running(dev)) {
12276                 /* We'll just catch it later when the
12277                  * device is up'd.
12278                  */
12279                 tg3_set_mtu(dev, tp, new_mtu);
12280                 return 0;
12281         }
12282
12283         tg3_phy_stop(tp);
12284
12285         tg3_netif_stop(tp);
12286
12287         tg3_full_lock(tp, 1);
12288
12289         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
12290
12291         tg3_set_mtu(dev, tp, new_mtu);
12292
12293         /* Reset PHY, otherwise the read DMA engine will be in a mode that
12294          * breaks all requests to 256 bytes.
12295          */
12296         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
12297                 reset_phy = 1;
12298
12299         err = tg3_restart_hw(tp, reset_phy);
12300
12301         if (!err)
12302                 tg3_netif_start(tp);
12303
12304         tg3_full_unlock(tp);
12305
12306         if (!err)
12307                 tg3_phy_start(tp);
12308
12309         return err;
12310 }
12311
12312 static const struct net_device_ops tg3_netdev_ops = {
12313         .ndo_open               = tg3_open,
12314         .ndo_stop               = tg3_close,
12315         .ndo_start_xmit         = tg3_start_xmit,
12316         .ndo_get_stats64        = tg3_get_stats64,
12317         .ndo_validate_addr      = eth_validate_addr,
12318         .ndo_set_rx_mode        = tg3_set_rx_mode,
12319         .ndo_set_mac_address    = tg3_set_mac_addr,
12320         .ndo_do_ioctl           = tg3_ioctl,
12321         .ndo_tx_timeout         = tg3_tx_timeout,
12322         .ndo_change_mtu         = tg3_change_mtu,
12323         .ndo_fix_features       = tg3_fix_features,
12324         .ndo_set_features       = tg3_set_features,
12325 #ifdef CONFIG_NET_POLL_CONTROLLER
12326         .ndo_poll_controller    = tg3_poll_controller,
12327 #endif
12328 };
12329
12330 static void __devinit tg3_get_eeprom_size(struct tg3 *tp)
12331 {
12332         u32 cursize, val, magic;
12333
12334         tp->nvram_size = EEPROM_CHIP_SIZE;
12335
12336         if (tg3_nvram_read(tp, 0, &magic) != 0)
12337                 return;
12338
12339         if ((magic != TG3_EEPROM_MAGIC) &&
12340             ((magic & TG3_EEPROM_MAGIC_FW_MSK) != TG3_EEPROM_MAGIC_FW) &&
12341             ((magic & TG3_EEPROM_MAGIC_HW_MSK) != TG3_EEPROM_MAGIC_HW))
12342                 return;
12343
12344         /*
12345          * Size the chip by reading offsets at increasing powers of two.
12346          * When we encounter our validation signature, we know the addressing
12347          * has wrapped around, and thus have our chip size.
12348          */
12349         cursize = 0x10;
12350
12351         while (cursize < tp->nvram_size) {
12352                 if (tg3_nvram_read(tp, cursize, &val) != 0)
12353                         return;
12354
12355                 if (val == magic)
12356                         break;
12357
12358                 cursize <<= 1;
12359         }
12360
12361         tp->nvram_size = cursize;
12362 }
12363
12364 static void __devinit tg3_get_nvram_size(struct tg3 *tp)
12365 {
12366         u32 val;
12367
12368         if (tg3_flag(tp, NO_NVRAM) || tg3_nvram_read(tp, 0, &val) != 0)
12369                 return;
12370
12371         /* Selfboot format */
12372         if (val != TG3_EEPROM_MAGIC) {
12373                 tg3_get_eeprom_size(tp);
12374                 return;
12375         }
12376
12377         if (tg3_nvram_read(tp, 0xf0, &val) == 0) {
12378                 if (val != 0) {
12379                         /* This is confusing.  We want to operate on the
12380                          * 16-bit value at offset 0xf2.  The tg3_nvram_read()
12381                          * call will read from NVRAM and byteswap the data
12382                          * according to the byteswapping settings for all
12383                          * other register accesses.  This ensures the data we
12384                          * want will always reside in the lower 16-bits.
12385                          * However, the data in NVRAM is in LE format, which
12386                          * means the data from the NVRAM read will always be
12387                          * opposite the endianness of the CPU.  The 16-bit
12388                          * byteswap then brings the data to CPU endianness.
12389                          */
12390                         tp->nvram_size = swab16((u16)(val & 0x0000ffff)) * 1024;
12391                         return;
12392                 }
12393         }
12394         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12395 }
12396
12397 static void __devinit tg3_get_nvram_info(struct tg3 *tp)
12398 {
12399         u32 nvcfg1;
12400
12401         nvcfg1 = tr32(NVRAM_CFG1);
12402         if (nvcfg1 & NVRAM_CFG1_FLASHIF_ENAB) {
12403                 tg3_flag_set(tp, FLASH);
12404         } else {
12405                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12406                 tw32(NVRAM_CFG1, nvcfg1);
12407         }
12408
12409         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
12410             tg3_flag(tp, 5780_CLASS)) {
12411                 switch (nvcfg1 & NVRAM_CFG1_VENDOR_MASK) {
12412                 case FLASH_VENDOR_ATMEL_FLASH_BUFFERED:
12413                         tp->nvram_jedecnum = JEDEC_ATMEL;
12414                         tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12415                         tg3_flag_set(tp, NVRAM_BUFFERED);
12416                         break;
12417                 case FLASH_VENDOR_ATMEL_FLASH_UNBUFFERED:
12418                         tp->nvram_jedecnum = JEDEC_ATMEL;
12419                         tp->nvram_pagesize = ATMEL_AT25F512_PAGE_SIZE;
12420                         break;
12421                 case FLASH_VENDOR_ATMEL_EEPROM:
12422                         tp->nvram_jedecnum = JEDEC_ATMEL;
12423                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12424                         tg3_flag_set(tp, NVRAM_BUFFERED);
12425                         break;
12426                 case FLASH_VENDOR_ST:
12427                         tp->nvram_jedecnum = JEDEC_ST;
12428                         tp->nvram_pagesize = ST_M45PEX0_PAGE_SIZE;
12429                         tg3_flag_set(tp, NVRAM_BUFFERED);
12430                         break;
12431                 case FLASH_VENDOR_SAIFUN:
12432                         tp->nvram_jedecnum = JEDEC_SAIFUN;
12433                         tp->nvram_pagesize = SAIFUN_SA25F0XX_PAGE_SIZE;
12434                         break;
12435                 case FLASH_VENDOR_SST_SMALL:
12436                 case FLASH_VENDOR_SST_LARGE:
12437                         tp->nvram_jedecnum = JEDEC_SST;
12438                         tp->nvram_pagesize = SST_25VF0X0_PAGE_SIZE;
12439                         break;
12440                 }
12441         } else {
12442                 tp->nvram_jedecnum = JEDEC_ATMEL;
12443                 tp->nvram_pagesize = ATMEL_AT45DB0X1B_PAGE_SIZE;
12444                 tg3_flag_set(tp, NVRAM_BUFFERED);
12445         }
12446 }
12447
12448 static void __devinit tg3_nvram_get_pagesize(struct tg3 *tp, u32 nvmcfg1)
12449 {
12450         switch (nvmcfg1 & NVRAM_CFG1_5752PAGE_SIZE_MASK) {
12451         case FLASH_5752PAGE_SIZE_256:
12452                 tp->nvram_pagesize = 256;
12453                 break;
12454         case FLASH_5752PAGE_SIZE_512:
12455                 tp->nvram_pagesize = 512;
12456                 break;
12457         case FLASH_5752PAGE_SIZE_1K:
12458                 tp->nvram_pagesize = 1024;
12459                 break;
12460         case FLASH_5752PAGE_SIZE_2K:
12461                 tp->nvram_pagesize = 2048;
12462                 break;
12463         case FLASH_5752PAGE_SIZE_4K:
12464                 tp->nvram_pagesize = 4096;
12465                 break;
12466         case FLASH_5752PAGE_SIZE_264:
12467                 tp->nvram_pagesize = 264;
12468                 break;
12469         case FLASH_5752PAGE_SIZE_528:
12470                 tp->nvram_pagesize = 528;
12471                 break;
12472         }
12473 }
12474
12475 static void __devinit tg3_get_5752_nvram_info(struct tg3 *tp)
12476 {
12477         u32 nvcfg1;
12478
12479         nvcfg1 = tr32(NVRAM_CFG1);
12480
12481         /* NVRAM protection for TPM */
12482         if (nvcfg1 & (1 << 27))
12483                 tg3_flag_set(tp, PROTECTED_NVRAM);
12484
12485         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12486         case FLASH_5752VENDOR_ATMEL_EEPROM_64KHZ:
12487         case FLASH_5752VENDOR_ATMEL_EEPROM_376KHZ:
12488                 tp->nvram_jedecnum = JEDEC_ATMEL;
12489                 tg3_flag_set(tp, NVRAM_BUFFERED);
12490                 break;
12491         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12492                 tp->nvram_jedecnum = JEDEC_ATMEL;
12493                 tg3_flag_set(tp, NVRAM_BUFFERED);
12494                 tg3_flag_set(tp, FLASH);
12495                 break;
12496         case FLASH_5752VENDOR_ST_M45PE10:
12497         case FLASH_5752VENDOR_ST_M45PE20:
12498         case FLASH_5752VENDOR_ST_M45PE40:
12499                 tp->nvram_jedecnum = JEDEC_ST;
12500                 tg3_flag_set(tp, NVRAM_BUFFERED);
12501                 tg3_flag_set(tp, FLASH);
12502                 break;
12503         }
12504
12505         if (tg3_flag(tp, FLASH)) {
12506                 tg3_nvram_get_pagesize(tp, nvcfg1);
12507         } else {
12508                 /* For eeprom, set pagesize to maximum eeprom size */
12509                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12510
12511                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12512                 tw32(NVRAM_CFG1, nvcfg1);
12513         }
12514 }
12515
12516 static void __devinit tg3_get_5755_nvram_info(struct tg3 *tp)
12517 {
12518         u32 nvcfg1, protect = 0;
12519
12520         nvcfg1 = tr32(NVRAM_CFG1);
12521
12522         /* NVRAM protection for TPM */
12523         if (nvcfg1 & (1 << 27)) {
12524                 tg3_flag_set(tp, PROTECTED_NVRAM);
12525                 protect = 1;
12526         }
12527
12528         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12529         switch (nvcfg1) {
12530         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12531         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12532         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12533         case FLASH_5755VENDOR_ATMEL_FLASH_5:
12534                 tp->nvram_jedecnum = JEDEC_ATMEL;
12535                 tg3_flag_set(tp, NVRAM_BUFFERED);
12536                 tg3_flag_set(tp, FLASH);
12537                 tp->nvram_pagesize = 264;
12538                 if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_1 ||
12539                     nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_5)
12540                         tp->nvram_size = (protect ? 0x3e200 :
12541                                           TG3_NVRAM_SIZE_512KB);
12542                 else if (nvcfg1 == FLASH_5755VENDOR_ATMEL_FLASH_2)
12543                         tp->nvram_size = (protect ? 0x1f200 :
12544                                           TG3_NVRAM_SIZE_256KB);
12545                 else
12546                         tp->nvram_size = (protect ? 0x1f200 :
12547                                           TG3_NVRAM_SIZE_128KB);
12548                 break;
12549         case FLASH_5752VENDOR_ST_M45PE10:
12550         case FLASH_5752VENDOR_ST_M45PE20:
12551         case FLASH_5752VENDOR_ST_M45PE40:
12552                 tp->nvram_jedecnum = JEDEC_ST;
12553                 tg3_flag_set(tp, NVRAM_BUFFERED);
12554                 tg3_flag_set(tp, FLASH);
12555                 tp->nvram_pagesize = 256;
12556                 if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE10)
12557                         tp->nvram_size = (protect ?
12558                                           TG3_NVRAM_SIZE_64KB :
12559                                           TG3_NVRAM_SIZE_128KB);
12560                 else if (nvcfg1 == FLASH_5752VENDOR_ST_M45PE20)
12561                         tp->nvram_size = (protect ?
12562                                           TG3_NVRAM_SIZE_64KB :
12563                                           TG3_NVRAM_SIZE_256KB);
12564                 else
12565                         tp->nvram_size = (protect ?
12566                                           TG3_NVRAM_SIZE_128KB :
12567                                           TG3_NVRAM_SIZE_512KB);
12568                 break;
12569         }
12570 }
12571
12572 static void __devinit tg3_get_5787_nvram_info(struct tg3 *tp)
12573 {
12574         u32 nvcfg1;
12575
12576         nvcfg1 = tr32(NVRAM_CFG1);
12577
12578         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12579         case FLASH_5787VENDOR_ATMEL_EEPROM_64KHZ:
12580         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12581         case FLASH_5787VENDOR_MICRO_EEPROM_64KHZ:
12582         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12583                 tp->nvram_jedecnum = JEDEC_ATMEL;
12584                 tg3_flag_set(tp, NVRAM_BUFFERED);
12585                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12586
12587                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12588                 tw32(NVRAM_CFG1, nvcfg1);
12589                 break;
12590         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12591         case FLASH_5755VENDOR_ATMEL_FLASH_1:
12592         case FLASH_5755VENDOR_ATMEL_FLASH_2:
12593         case FLASH_5755VENDOR_ATMEL_FLASH_3:
12594                 tp->nvram_jedecnum = JEDEC_ATMEL;
12595                 tg3_flag_set(tp, NVRAM_BUFFERED);
12596                 tg3_flag_set(tp, FLASH);
12597                 tp->nvram_pagesize = 264;
12598                 break;
12599         case FLASH_5752VENDOR_ST_M45PE10:
12600         case FLASH_5752VENDOR_ST_M45PE20:
12601         case FLASH_5752VENDOR_ST_M45PE40:
12602                 tp->nvram_jedecnum = JEDEC_ST;
12603                 tg3_flag_set(tp, NVRAM_BUFFERED);
12604                 tg3_flag_set(tp, FLASH);
12605                 tp->nvram_pagesize = 256;
12606                 break;
12607         }
12608 }
12609
12610 static void __devinit tg3_get_5761_nvram_info(struct tg3 *tp)
12611 {
12612         u32 nvcfg1, protect = 0;
12613
12614         nvcfg1 = tr32(NVRAM_CFG1);
12615
12616         /* NVRAM protection for TPM */
12617         if (nvcfg1 & (1 << 27)) {
12618                 tg3_flag_set(tp, PROTECTED_NVRAM);
12619                 protect = 1;
12620         }
12621
12622         nvcfg1 &= NVRAM_CFG1_5752VENDOR_MASK;
12623         switch (nvcfg1) {
12624         case FLASH_5761VENDOR_ATMEL_ADB021D:
12625         case FLASH_5761VENDOR_ATMEL_ADB041D:
12626         case FLASH_5761VENDOR_ATMEL_ADB081D:
12627         case FLASH_5761VENDOR_ATMEL_ADB161D:
12628         case FLASH_5761VENDOR_ATMEL_MDB021D:
12629         case FLASH_5761VENDOR_ATMEL_MDB041D:
12630         case FLASH_5761VENDOR_ATMEL_MDB081D:
12631         case FLASH_5761VENDOR_ATMEL_MDB161D:
12632                 tp->nvram_jedecnum = JEDEC_ATMEL;
12633                 tg3_flag_set(tp, NVRAM_BUFFERED);
12634                 tg3_flag_set(tp, FLASH);
12635                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12636                 tp->nvram_pagesize = 256;
12637                 break;
12638         case FLASH_5761VENDOR_ST_A_M45PE20:
12639         case FLASH_5761VENDOR_ST_A_M45PE40:
12640         case FLASH_5761VENDOR_ST_A_M45PE80:
12641         case FLASH_5761VENDOR_ST_A_M45PE16:
12642         case FLASH_5761VENDOR_ST_M_M45PE20:
12643         case FLASH_5761VENDOR_ST_M_M45PE40:
12644         case FLASH_5761VENDOR_ST_M_M45PE80:
12645         case FLASH_5761VENDOR_ST_M_M45PE16:
12646                 tp->nvram_jedecnum = JEDEC_ST;
12647                 tg3_flag_set(tp, NVRAM_BUFFERED);
12648                 tg3_flag_set(tp, FLASH);
12649                 tp->nvram_pagesize = 256;
12650                 break;
12651         }
12652
12653         if (protect) {
12654                 tp->nvram_size = tr32(NVRAM_ADDR_LOCKOUT);
12655         } else {
12656                 switch (nvcfg1) {
12657                 case FLASH_5761VENDOR_ATMEL_ADB161D:
12658                 case FLASH_5761VENDOR_ATMEL_MDB161D:
12659                 case FLASH_5761VENDOR_ST_A_M45PE16:
12660                 case FLASH_5761VENDOR_ST_M_M45PE16:
12661                         tp->nvram_size = TG3_NVRAM_SIZE_2MB;
12662                         break;
12663                 case FLASH_5761VENDOR_ATMEL_ADB081D:
12664                 case FLASH_5761VENDOR_ATMEL_MDB081D:
12665                 case FLASH_5761VENDOR_ST_A_M45PE80:
12666                 case FLASH_5761VENDOR_ST_M_M45PE80:
12667                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12668                         break;
12669                 case FLASH_5761VENDOR_ATMEL_ADB041D:
12670                 case FLASH_5761VENDOR_ATMEL_MDB041D:
12671                 case FLASH_5761VENDOR_ST_A_M45PE40:
12672                 case FLASH_5761VENDOR_ST_M_M45PE40:
12673                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12674                         break;
12675                 case FLASH_5761VENDOR_ATMEL_ADB021D:
12676                 case FLASH_5761VENDOR_ATMEL_MDB021D:
12677                 case FLASH_5761VENDOR_ST_A_M45PE20:
12678                 case FLASH_5761VENDOR_ST_M_M45PE20:
12679                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12680                         break;
12681                 }
12682         }
12683 }
12684
12685 static void __devinit tg3_get_5906_nvram_info(struct tg3 *tp)
12686 {
12687         tp->nvram_jedecnum = JEDEC_ATMEL;
12688         tg3_flag_set(tp, NVRAM_BUFFERED);
12689         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12690 }
12691
12692 static void __devinit tg3_get_57780_nvram_info(struct tg3 *tp)
12693 {
12694         u32 nvcfg1;
12695
12696         nvcfg1 = tr32(NVRAM_CFG1);
12697
12698         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12699         case FLASH_5787VENDOR_ATMEL_EEPROM_376KHZ:
12700         case FLASH_5787VENDOR_MICRO_EEPROM_376KHZ:
12701                 tp->nvram_jedecnum = JEDEC_ATMEL;
12702                 tg3_flag_set(tp, NVRAM_BUFFERED);
12703                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12704
12705                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12706                 tw32(NVRAM_CFG1, nvcfg1);
12707                 return;
12708         case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12709         case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12710         case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12711         case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12712         case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12713         case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12714         case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12715                 tp->nvram_jedecnum = JEDEC_ATMEL;
12716                 tg3_flag_set(tp, NVRAM_BUFFERED);
12717                 tg3_flag_set(tp, FLASH);
12718
12719                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12720                 case FLASH_5752VENDOR_ATMEL_FLASH_BUFFERED:
12721                 case FLASH_57780VENDOR_ATMEL_AT45DB011D:
12722                 case FLASH_57780VENDOR_ATMEL_AT45DB011B:
12723                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12724                         break;
12725                 case FLASH_57780VENDOR_ATMEL_AT45DB021D:
12726                 case FLASH_57780VENDOR_ATMEL_AT45DB021B:
12727                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12728                         break;
12729                 case FLASH_57780VENDOR_ATMEL_AT45DB041D:
12730                 case FLASH_57780VENDOR_ATMEL_AT45DB041B:
12731                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12732                         break;
12733                 }
12734                 break;
12735         case FLASH_5752VENDOR_ST_M45PE10:
12736         case FLASH_5752VENDOR_ST_M45PE20:
12737         case FLASH_5752VENDOR_ST_M45PE40:
12738                 tp->nvram_jedecnum = JEDEC_ST;
12739                 tg3_flag_set(tp, NVRAM_BUFFERED);
12740                 tg3_flag_set(tp, FLASH);
12741
12742                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12743                 case FLASH_5752VENDOR_ST_M45PE10:
12744                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12745                         break;
12746                 case FLASH_5752VENDOR_ST_M45PE20:
12747                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12748                         break;
12749                 case FLASH_5752VENDOR_ST_M45PE40:
12750                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12751                         break;
12752                 }
12753                 break;
12754         default:
12755                 tg3_flag_set(tp, NO_NVRAM);
12756                 return;
12757         }
12758
12759         tg3_nvram_get_pagesize(tp, nvcfg1);
12760         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12761                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12762 }
12763
12764
12765 static void __devinit tg3_get_5717_nvram_info(struct tg3 *tp)
12766 {
12767         u32 nvcfg1;
12768
12769         nvcfg1 = tr32(NVRAM_CFG1);
12770
12771         switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12772         case FLASH_5717VENDOR_ATMEL_EEPROM:
12773         case FLASH_5717VENDOR_MICRO_EEPROM:
12774                 tp->nvram_jedecnum = JEDEC_ATMEL;
12775                 tg3_flag_set(tp, NVRAM_BUFFERED);
12776                 tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12777
12778                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12779                 tw32(NVRAM_CFG1, nvcfg1);
12780                 return;
12781         case FLASH_5717VENDOR_ATMEL_MDB011D:
12782         case FLASH_5717VENDOR_ATMEL_ADB011B:
12783         case FLASH_5717VENDOR_ATMEL_ADB011D:
12784         case FLASH_5717VENDOR_ATMEL_MDB021D:
12785         case FLASH_5717VENDOR_ATMEL_ADB021B:
12786         case FLASH_5717VENDOR_ATMEL_ADB021D:
12787         case FLASH_5717VENDOR_ATMEL_45USPT:
12788                 tp->nvram_jedecnum = JEDEC_ATMEL;
12789                 tg3_flag_set(tp, NVRAM_BUFFERED);
12790                 tg3_flag_set(tp, FLASH);
12791
12792                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12793                 case FLASH_5717VENDOR_ATMEL_MDB021D:
12794                         /* Detect size with tg3_nvram_get_size() */
12795                         break;
12796                 case FLASH_5717VENDOR_ATMEL_ADB021B:
12797                 case FLASH_5717VENDOR_ATMEL_ADB021D:
12798                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12799                         break;
12800                 default:
12801                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12802                         break;
12803                 }
12804                 break;
12805         case FLASH_5717VENDOR_ST_M_M25PE10:
12806         case FLASH_5717VENDOR_ST_A_M25PE10:
12807         case FLASH_5717VENDOR_ST_M_M45PE10:
12808         case FLASH_5717VENDOR_ST_A_M45PE10:
12809         case FLASH_5717VENDOR_ST_M_M25PE20:
12810         case FLASH_5717VENDOR_ST_A_M25PE20:
12811         case FLASH_5717VENDOR_ST_M_M45PE20:
12812         case FLASH_5717VENDOR_ST_A_M45PE20:
12813         case FLASH_5717VENDOR_ST_25USPT:
12814         case FLASH_5717VENDOR_ST_45USPT:
12815                 tp->nvram_jedecnum = JEDEC_ST;
12816                 tg3_flag_set(tp, NVRAM_BUFFERED);
12817                 tg3_flag_set(tp, FLASH);
12818
12819                 switch (nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK) {
12820                 case FLASH_5717VENDOR_ST_M_M25PE20:
12821                 case FLASH_5717VENDOR_ST_M_M45PE20:
12822                         /* Detect size with tg3_nvram_get_size() */
12823                         break;
12824                 case FLASH_5717VENDOR_ST_A_M25PE20:
12825                 case FLASH_5717VENDOR_ST_A_M45PE20:
12826                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12827                         break;
12828                 default:
12829                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12830                         break;
12831                 }
12832                 break;
12833         default:
12834                 tg3_flag_set(tp, NO_NVRAM);
12835                 return;
12836         }
12837
12838         tg3_nvram_get_pagesize(tp, nvcfg1);
12839         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12840                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12841 }
12842
12843 static void __devinit tg3_get_5720_nvram_info(struct tg3 *tp)
12844 {
12845         u32 nvcfg1, nvmpinstrp;
12846
12847         nvcfg1 = tr32(NVRAM_CFG1);
12848         nvmpinstrp = nvcfg1 & NVRAM_CFG1_5752VENDOR_MASK;
12849
12850         switch (nvmpinstrp) {
12851         case FLASH_5720_EEPROM_HD:
12852         case FLASH_5720_EEPROM_LD:
12853                 tp->nvram_jedecnum = JEDEC_ATMEL;
12854                 tg3_flag_set(tp, NVRAM_BUFFERED);
12855
12856                 nvcfg1 &= ~NVRAM_CFG1_COMPAT_BYPASS;
12857                 tw32(NVRAM_CFG1, nvcfg1);
12858                 if (nvmpinstrp == FLASH_5720_EEPROM_HD)
12859                         tp->nvram_pagesize = ATMEL_AT24C512_CHIP_SIZE;
12860                 else
12861                         tp->nvram_pagesize = ATMEL_AT24C02_CHIP_SIZE;
12862                 return;
12863         case FLASH_5720VENDOR_M_ATMEL_DB011D:
12864         case FLASH_5720VENDOR_A_ATMEL_DB011B:
12865         case FLASH_5720VENDOR_A_ATMEL_DB011D:
12866         case FLASH_5720VENDOR_M_ATMEL_DB021D:
12867         case FLASH_5720VENDOR_A_ATMEL_DB021B:
12868         case FLASH_5720VENDOR_A_ATMEL_DB021D:
12869         case FLASH_5720VENDOR_M_ATMEL_DB041D:
12870         case FLASH_5720VENDOR_A_ATMEL_DB041B:
12871         case FLASH_5720VENDOR_A_ATMEL_DB041D:
12872         case FLASH_5720VENDOR_M_ATMEL_DB081D:
12873         case FLASH_5720VENDOR_A_ATMEL_DB081D:
12874         case FLASH_5720VENDOR_ATMEL_45USPT:
12875                 tp->nvram_jedecnum = JEDEC_ATMEL;
12876                 tg3_flag_set(tp, NVRAM_BUFFERED);
12877                 tg3_flag_set(tp, FLASH);
12878
12879                 switch (nvmpinstrp) {
12880                 case FLASH_5720VENDOR_M_ATMEL_DB021D:
12881                 case FLASH_5720VENDOR_A_ATMEL_DB021B:
12882                 case FLASH_5720VENDOR_A_ATMEL_DB021D:
12883                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12884                         break;
12885                 case FLASH_5720VENDOR_M_ATMEL_DB041D:
12886                 case FLASH_5720VENDOR_A_ATMEL_DB041B:
12887                 case FLASH_5720VENDOR_A_ATMEL_DB041D:
12888                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12889                         break;
12890                 case FLASH_5720VENDOR_M_ATMEL_DB081D:
12891                 case FLASH_5720VENDOR_A_ATMEL_DB081D:
12892                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12893                         break;
12894                 default:
12895                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12896                         break;
12897                 }
12898                 break;
12899         case FLASH_5720VENDOR_M_ST_M25PE10:
12900         case FLASH_5720VENDOR_M_ST_M45PE10:
12901         case FLASH_5720VENDOR_A_ST_M25PE10:
12902         case FLASH_5720VENDOR_A_ST_M45PE10:
12903         case FLASH_5720VENDOR_M_ST_M25PE20:
12904         case FLASH_5720VENDOR_M_ST_M45PE20:
12905         case FLASH_5720VENDOR_A_ST_M25PE20:
12906         case FLASH_5720VENDOR_A_ST_M45PE20:
12907         case FLASH_5720VENDOR_M_ST_M25PE40:
12908         case FLASH_5720VENDOR_M_ST_M45PE40:
12909         case FLASH_5720VENDOR_A_ST_M25PE40:
12910         case FLASH_5720VENDOR_A_ST_M45PE40:
12911         case FLASH_5720VENDOR_M_ST_M25PE80:
12912         case FLASH_5720VENDOR_M_ST_M45PE80:
12913         case FLASH_5720VENDOR_A_ST_M25PE80:
12914         case FLASH_5720VENDOR_A_ST_M45PE80:
12915         case FLASH_5720VENDOR_ST_25USPT:
12916         case FLASH_5720VENDOR_ST_45USPT:
12917                 tp->nvram_jedecnum = JEDEC_ST;
12918                 tg3_flag_set(tp, NVRAM_BUFFERED);
12919                 tg3_flag_set(tp, FLASH);
12920
12921                 switch (nvmpinstrp) {
12922                 case FLASH_5720VENDOR_M_ST_M25PE20:
12923                 case FLASH_5720VENDOR_M_ST_M45PE20:
12924                 case FLASH_5720VENDOR_A_ST_M25PE20:
12925                 case FLASH_5720VENDOR_A_ST_M45PE20:
12926                         tp->nvram_size = TG3_NVRAM_SIZE_256KB;
12927                         break;
12928                 case FLASH_5720VENDOR_M_ST_M25PE40:
12929                 case FLASH_5720VENDOR_M_ST_M45PE40:
12930                 case FLASH_5720VENDOR_A_ST_M25PE40:
12931                 case FLASH_5720VENDOR_A_ST_M45PE40:
12932                         tp->nvram_size = TG3_NVRAM_SIZE_512KB;
12933                         break;
12934                 case FLASH_5720VENDOR_M_ST_M25PE80:
12935                 case FLASH_5720VENDOR_M_ST_M45PE80:
12936                 case FLASH_5720VENDOR_A_ST_M25PE80:
12937                 case FLASH_5720VENDOR_A_ST_M45PE80:
12938                         tp->nvram_size = TG3_NVRAM_SIZE_1MB;
12939                         break;
12940                 default:
12941                         tp->nvram_size = TG3_NVRAM_SIZE_128KB;
12942                         break;
12943                 }
12944                 break;
12945         default:
12946                 tg3_flag_set(tp, NO_NVRAM);
12947                 return;
12948         }
12949
12950         tg3_nvram_get_pagesize(tp, nvcfg1);
12951         if (tp->nvram_pagesize != 264 && tp->nvram_pagesize != 528)
12952                 tg3_flag_set(tp, NO_NVRAM_ADDR_TRANS);
12953 }
12954
12955 /* Chips other than 5700/5701 use the NVRAM for fetching info. */
12956 static void __devinit tg3_nvram_init(struct tg3 *tp)
12957 {
12958         tw32_f(GRC_EEPROM_ADDR,
12959              (EEPROM_ADDR_FSM_RESET |
12960               (EEPROM_DEFAULT_CLOCK_PERIOD <<
12961                EEPROM_ADDR_CLKPERD_SHIFT)));
12962
12963         msleep(1);
12964
12965         /* Enable seeprom accesses. */
12966         tw32_f(GRC_LOCAL_CTRL,
12967              tr32(GRC_LOCAL_CTRL) | GRC_LCLCTRL_AUTO_SEEPROM);
12968         udelay(100);
12969
12970         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
12971             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701) {
12972                 tg3_flag_set(tp, NVRAM);
12973
12974                 if (tg3_nvram_lock(tp)) {
12975                         netdev_warn(tp->dev,
12976                                     "Cannot get nvram lock, %s failed\n",
12977                                     __func__);
12978                         return;
12979                 }
12980                 tg3_enable_nvram_access(tp);
12981
12982                 tp->nvram_size = 0;
12983
12984                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
12985                         tg3_get_5752_nvram_info(tp);
12986                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
12987                         tg3_get_5755_nvram_info(tp);
12988                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
12989                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
12990                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
12991                         tg3_get_5787_nvram_info(tp);
12992                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761)
12993                         tg3_get_5761_nvram_info(tp);
12994                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
12995                         tg3_get_5906_nvram_info(tp);
12996                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
12997                          tg3_flag(tp, 57765_CLASS))
12998                         tg3_get_57780_nvram_info(tp);
12999                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13000                          GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
13001                         tg3_get_5717_nvram_info(tp);
13002                 else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13003                         tg3_get_5720_nvram_info(tp);
13004                 else
13005                         tg3_get_nvram_info(tp);
13006
13007                 if (tp->nvram_size == 0)
13008                         tg3_get_nvram_size(tp);
13009
13010                 tg3_disable_nvram_access(tp);
13011                 tg3_nvram_unlock(tp);
13012
13013         } else {
13014                 tg3_flag_clear(tp, NVRAM);
13015                 tg3_flag_clear(tp, NVRAM_BUFFERED);
13016
13017                 tg3_get_eeprom_size(tp);
13018         }
13019 }
13020
13021 struct subsys_tbl_ent {
13022         u16 subsys_vendor, subsys_devid;
13023         u32 phy_id;
13024 };
13025
13026 static struct subsys_tbl_ent subsys_id_to_phy_id[] __devinitdata = {
13027         /* Broadcom boards. */
13028         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13029           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A6, TG3_PHY_ID_BCM5401 },
13030         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13031           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A5, TG3_PHY_ID_BCM5701 },
13032         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13033           TG3PCI_SUBDEVICE_ID_BROADCOM_95700T6, TG3_PHY_ID_BCM8002 },
13034         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13035           TG3PCI_SUBDEVICE_ID_BROADCOM_95700A9, 0 },
13036         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13037           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T1, TG3_PHY_ID_BCM5701 },
13038         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13039           TG3PCI_SUBDEVICE_ID_BROADCOM_95701T8, TG3_PHY_ID_BCM5701 },
13040         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13041           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A7, 0 },
13042         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13043           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A10, TG3_PHY_ID_BCM5701 },
13044         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13045           TG3PCI_SUBDEVICE_ID_BROADCOM_95701A12, TG3_PHY_ID_BCM5701 },
13046         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13047           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX1, TG3_PHY_ID_BCM5703 },
13048         { TG3PCI_SUBVENDOR_ID_BROADCOM,
13049           TG3PCI_SUBDEVICE_ID_BROADCOM_95703AX2, TG3_PHY_ID_BCM5703 },
13050
13051         /* 3com boards. */
13052         { TG3PCI_SUBVENDOR_ID_3COM,
13053           TG3PCI_SUBDEVICE_ID_3COM_3C996T, TG3_PHY_ID_BCM5401 },
13054         { TG3PCI_SUBVENDOR_ID_3COM,
13055           TG3PCI_SUBDEVICE_ID_3COM_3C996BT, TG3_PHY_ID_BCM5701 },
13056         { TG3PCI_SUBVENDOR_ID_3COM,
13057           TG3PCI_SUBDEVICE_ID_3COM_3C996SX, 0 },
13058         { TG3PCI_SUBVENDOR_ID_3COM,
13059           TG3PCI_SUBDEVICE_ID_3COM_3C1000T, TG3_PHY_ID_BCM5701 },
13060         { TG3PCI_SUBVENDOR_ID_3COM,
13061           TG3PCI_SUBDEVICE_ID_3COM_3C940BR01, TG3_PHY_ID_BCM5701 },
13062
13063         /* DELL boards. */
13064         { TG3PCI_SUBVENDOR_ID_DELL,
13065           TG3PCI_SUBDEVICE_ID_DELL_VIPER, TG3_PHY_ID_BCM5401 },
13066         { TG3PCI_SUBVENDOR_ID_DELL,
13067           TG3PCI_SUBDEVICE_ID_DELL_JAGUAR, TG3_PHY_ID_BCM5401 },
13068         { TG3PCI_SUBVENDOR_ID_DELL,
13069           TG3PCI_SUBDEVICE_ID_DELL_MERLOT, TG3_PHY_ID_BCM5411 },
13070         { TG3PCI_SUBVENDOR_ID_DELL,
13071           TG3PCI_SUBDEVICE_ID_DELL_SLIM_MERLOT, TG3_PHY_ID_BCM5411 },
13072
13073         /* Compaq boards. */
13074         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13075           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE, TG3_PHY_ID_BCM5701 },
13076         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13077           TG3PCI_SUBDEVICE_ID_COMPAQ_BANSHEE_2, TG3_PHY_ID_BCM5701 },
13078         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13079           TG3PCI_SUBDEVICE_ID_COMPAQ_CHANGELING, 0 },
13080         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13081           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780, TG3_PHY_ID_BCM5701 },
13082         { TG3PCI_SUBVENDOR_ID_COMPAQ,
13083           TG3PCI_SUBDEVICE_ID_COMPAQ_NC7780_2, TG3_PHY_ID_BCM5701 },
13084
13085         /* IBM boards. */
13086         { TG3PCI_SUBVENDOR_ID_IBM,
13087           TG3PCI_SUBDEVICE_ID_IBM_5703SAX2, 0 }
13088 };
13089
13090 static struct subsys_tbl_ent * __devinit tg3_lookup_by_subsys(struct tg3 *tp)
13091 {
13092         int i;
13093
13094         for (i = 0; i < ARRAY_SIZE(subsys_id_to_phy_id); i++) {
13095                 if ((subsys_id_to_phy_id[i].subsys_vendor ==
13096                      tp->pdev->subsystem_vendor) &&
13097                     (subsys_id_to_phy_id[i].subsys_devid ==
13098                      tp->pdev->subsystem_device))
13099                         return &subsys_id_to_phy_id[i];
13100         }
13101         return NULL;
13102 }
13103
13104 static void __devinit tg3_get_eeprom_hw_cfg(struct tg3 *tp)
13105 {
13106         u32 val;
13107
13108         tp->phy_id = TG3_PHY_ID_INVALID;
13109         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13110
13111         /* Assume an onboard device and WOL capable by default.  */
13112         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13113         tg3_flag_set(tp, WOL_CAP);
13114
13115         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13116                 if (!(tr32(PCIE_TRANSACTION_CFG) & PCIE_TRANS_CFG_LOM)) {
13117                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13118                         tg3_flag_set(tp, IS_NIC);
13119                 }
13120                 val = tr32(VCPU_CFGSHDW);
13121                 if (val & VCPU_CFGSHDW_ASPM_DBNC)
13122                         tg3_flag_set(tp, ASPM_WORKAROUND);
13123                 if ((val & VCPU_CFGSHDW_WOL_ENABLE) &&
13124                     (val & VCPU_CFGSHDW_WOL_MAGPKT)) {
13125                         tg3_flag_set(tp, WOL_ENABLE);
13126                         device_set_wakeup_enable(&tp->pdev->dev, true);
13127                 }
13128                 goto done;
13129         }
13130
13131         tg3_read_mem(tp, NIC_SRAM_DATA_SIG, &val);
13132         if (val == NIC_SRAM_DATA_SIG_MAGIC) {
13133                 u32 nic_cfg, led_cfg;
13134                 u32 nic_phy_id, ver, cfg2 = 0, cfg4 = 0, eeprom_phy_id;
13135                 int eeprom_phy_serdes = 0;
13136
13137                 tg3_read_mem(tp, NIC_SRAM_DATA_CFG, &nic_cfg);
13138                 tp->nic_sram_data_cfg = nic_cfg;
13139
13140                 tg3_read_mem(tp, NIC_SRAM_DATA_VER, &ver);
13141                 ver >>= NIC_SRAM_DATA_VER_SHIFT;
13142                 if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
13143                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
13144                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5703 &&
13145                     (ver > 0) && (ver < 0x100))
13146                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_2, &cfg2);
13147
13148                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785)
13149                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_4, &cfg4);
13150
13151                 if ((nic_cfg & NIC_SRAM_DATA_CFG_PHY_TYPE_MASK) ==
13152                     NIC_SRAM_DATA_CFG_PHY_TYPE_FIBER)
13153                         eeprom_phy_serdes = 1;
13154
13155                 tg3_read_mem(tp, NIC_SRAM_DATA_PHY_ID, &nic_phy_id);
13156                 if (nic_phy_id != 0) {
13157                         u32 id1 = nic_phy_id & NIC_SRAM_DATA_PHY_ID1_MASK;
13158                         u32 id2 = nic_phy_id & NIC_SRAM_DATA_PHY_ID2_MASK;
13159
13160                         eeprom_phy_id  = (id1 >> 16) << 10;
13161                         eeprom_phy_id |= (id2 & 0xfc00) << 16;
13162                         eeprom_phy_id |= (id2 & 0x03ff) <<  0;
13163                 } else
13164                         eeprom_phy_id = 0;
13165
13166                 tp->phy_id = eeprom_phy_id;
13167                 if (eeprom_phy_serdes) {
13168                         if (!tg3_flag(tp, 5705_PLUS))
13169                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13170                         else
13171                                 tp->phy_flags |= TG3_PHYFLG_MII_SERDES;
13172                 }
13173
13174                 if (tg3_flag(tp, 5750_PLUS))
13175                         led_cfg = cfg2 & (NIC_SRAM_DATA_CFG_LED_MODE_MASK |
13176                                     SHASTA_EXT_LED_MODE_MASK);
13177                 else
13178                         led_cfg = nic_cfg & NIC_SRAM_DATA_CFG_LED_MODE_MASK;
13179
13180                 switch (led_cfg) {
13181                 default:
13182                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_1:
13183                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13184                         break;
13185
13186                 case NIC_SRAM_DATA_CFG_LED_MODE_PHY_2:
13187                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13188                         break;
13189
13190                 case NIC_SRAM_DATA_CFG_LED_MODE_MAC:
13191                         tp->led_ctrl = LED_CTRL_MODE_MAC;
13192
13193                         /* Default to PHY_1_MODE if 0 (MAC_MODE) is
13194                          * read on some older 5700/5701 bootcode.
13195                          */
13196                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
13197                             ASIC_REV_5700 ||
13198                             GET_ASIC_REV(tp->pci_chip_rev_id) ==
13199                             ASIC_REV_5701)
13200                                 tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13201
13202                         break;
13203
13204                 case SHASTA_EXT_LED_SHARED:
13205                         tp->led_ctrl = LED_CTRL_MODE_SHARED;
13206                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0 &&
13207                             tp->pci_chip_rev_id != CHIPREV_ID_5750_A1)
13208                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13209                                                  LED_CTRL_MODE_PHY_2);
13210                         break;
13211
13212                 case SHASTA_EXT_LED_MAC:
13213                         tp->led_ctrl = LED_CTRL_MODE_SHASTA_MAC;
13214                         break;
13215
13216                 case SHASTA_EXT_LED_COMBO:
13217                         tp->led_ctrl = LED_CTRL_MODE_COMBO;
13218                         if (tp->pci_chip_rev_id != CHIPREV_ID_5750_A0)
13219                                 tp->led_ctrl |= (LED_CTRL_MODE_PHY_1 |
13220                                                  LED_CTRL_MODE_PHY_2);
13221                         break;
13222
13223                 }
13224
13225                 if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
13226                      GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) &&
13227                     tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL)
13228                         tp->led_ctrl = LED_CTRL_MODE_PHY_2;
13229
13230                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5784_AX)
13231                         tp->led_ctrl = LED_CTRL_MODE_PHY_1;
13232
13233                 if (nic_cfg & NIC_SRAM_DATA_CFG_EEPROM_WP) {
13234                         tg3_flag_set(tp, EEPROM_WRITE_PROT);
13235                         if ((tp->pdev->subsystem_vendor ==
13236                              PCI_VENDOR_ID_ARIMA) &&
13237                             (tp->pdev->subsystem_device == 0x205a ||
13238                              tp->pdev->subsystem_device == 0x2063))
13239                                 tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13240                 } else {
13241                         tg3_flag_clear(tp, EEPROM_WRITE_PROT);
13242                         tg3_flag_set(tp, IS_NIC);
13243                 }
13244
13245                 if (nic_cfg & NIC_SRAM_DATA_CFG_ASF_ENABLE) {
13246                         tg3_flag_set(tp, ENABLE_ASF);
13247                         if (tg3_flag(tp, 5750_PLUS))
13248                                 tg3_flag_set(tp, ASF_NEW_HANDSHAKE);
13249                 }
13250
13251                 if ((nic_cfg & NIC_SRAM_DATA_CFG_APE_ENABLE) &&
13252                     tg3_flag(tp, 5750_PLUS))
13253                         tg3_flag_set(tp, ENABLE_APE);
13254
13255                 if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES &&
13256                     !(nic_cfg & NIC_SRAM_DATA_CFG_FIBER_WOL))
13257                         tg3_flag_clear(tp, WOL_CAP);
13258
13259                 if (tg3_flag(tp, WOL_CAP) &&
13260                     (nic_cfg & NIC_SRAM_DATA_CFG_WOL_ENABLE)) {
13261                         tg3_flag_set(tp, WOL_ENABLE);
13262                         device_set_wakeup_enable(&tp->pdev->dev, true);
13263                 }
13264
13265                 if (cfg2 & (1 << 17))
13266                         tp->phy_flags |= TG3_PHYFLG_CAPACITIVE_COUPLING;
13267
13268                 /* serdes signal pre-emphasis in register 0x590 set by */
13269                 /* bootcode if bit 18 is set */
13270                 if (cfg2 & (1 << 18))
13271                         tp->phy_flags |= TG3_PHYFLG_SERDES_PREEMPHASIS;
13272
13273                 if ((tg3_flag(tp, 57765_PLUS) ||
13274                      (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
13275                       GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX)) &&
13276                     (cfg2 & NIC_SRAM_DATA_CFG_2_APD_EN))
13277                         tp->phy_flags |= TG3_PHYFLG_ENABLE_APD;
13278
13279                 if (tg3_flag(tp, PCI_EXPRESS) &&
13280                     GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
13281                     !tg3_flag(tp, 57765_PLUS)) {
13282                         u32 cfg3;
13283
13284                         tg3_read_mem(tp, NIC_SRAM_DATA_CFG_3, &cfg3);
13285                         if (cfg3 & NIC_SRAM_ASPM_DEBOUNCE)
13286                                 tg3_flag_set(tp, ASPM_WORKAROUND);
13287                 }
13288
13289                 if (cfg4 & NIC_SRAM_RGMII_INBAND_DISABLE)
13290                         tg3_flag_set(tp, RGMII_INBAND_DISABLE);
13291                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_RX_EN)
13292                         tg3_flag_set(tp, RGMII_EXT_IBND_RX_EN);
13293                 if (cfg4 & NIC_SRAM_RGMII_EXT_IBND_TX_EN)
13294                         tg3_flag_set(tp, RGMII_EXT_IBND_TX_EN);
13295         }
13296 done:
13297         if (tg3_flag(tp, WOL_CAP))
13298                 device_set_wakeup_enable(&tp->pdev->dev,
13299                                          tg3_flag(tp, WOL_ENABLE));
13300         else
13301                 device_set_wakeup_capable(&tp->pdev->dev, false);
13302 }
13303
13304 static int __devinit tg3_issue_otp_command(struct tg3 *tp, u32 cmd)
13305 {
13306         int i;
13307         u32 val;
13308
13309         tw32(OTP_CTRL, cmd | OTP_CTRL_OTP_CMD_START);
13310         tw32(OTP_CTRL, cmd);
13311
13312         /* Wait for up to 1 ms for command to execute. */
13313         for (i = 0; i < 100; i++) {
13314                 val = tr32(OTP_STATUS);
13315                 if (val & OTP_STATUS_CMD_DONE)
13316                         break;
13317                 udelay(10);
13318         }
13319
13320         return (val & OTP_STATUS_CMD_DONE) ? 0 : -EBUSY;
13321 }
13322
13323 /* Read the gphy configuration from the OTP region of the chip.  The gphy
13324  * configuration is a 32-bit value that straddles the alignment boundary.
13325  * We do two 32-bit reads and then shift and merge the results.
13326  */
13327 static u32 __devinit tg3_read_otp_phycfg(struct tg3 *tp)
13328 {
13329         u32 bhalf_otp, thalf_otp;
13330
13331         tw32(OTP_MODE, OTP_MODE_OTP_THRU_GRC);
13332
13333         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_INIT))
13334                 return 0;
13335
13336         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC1);
13337
13338         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13339                 return 0;
13340
13341         thalf_otp = tr32(OTP_READ_DATA);
13342
13343         tw32(OTP_ADDRESS, OTP_ADDRESS_MAGIC2);
13344
13345         if (tg3_issue_otp_command(tp, OTP_CTRL_OTP_CMD_READ))
13346                 return 0;
13347
13348         bhalf_otp = tr32(OTP_READ_DATA);
13349
13350         return ((thalf_otp & 0x0000ffff) << 16) | (bhalf_otp >> 16);
13351 }
13352
13353 static void __devinit tg3_phy_init_link_config(struct tg3 *tp)
13354 {
13355         u32 adv = ADVERTISED_Autoneg;
13356
13357         if (!(tp->phy_flags & TG3_PHYFLG_10_100_ONLY))
13358                 adv |= ADVERTISED_1000baseT_Half |
13359                        ADVERTISED_1000baseT_Full;
13360
13361         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
13362                 adv |= ADVERTISED_100baseT_Half |
13363                        ADVERTISED_100baseT_Full |
13364                        ADVERTISED_10baseT_Half |
13365                        ADVERTISED_10baseT_Full |
13366                        ADVERTISED_TP;
13367         else
13368                 adv |= ADVERTISED_FIBRE;
13369
13370         tp->link_config.advertising = adv;
13371         tp->link_config.speed = SPEED_UNKNOWN;
13372         tp->link_config.duplex = DUPLEX_UNKNOWN;
13373         tp->link_config.autoneg = AUTONEG_ENABLE;
13374         tp->link_config.active_speed = SPEED_UNKNOWN;
13375         tp->link_config.active_duplex = DUPLEX_UNKNOWN;
13376
13377         tp->old_link = -1;
13378 }
13379
13380 static int __devinit tg3_phy_probe(struct tg3 *tp)
13381 {
13382         u32 hw_phy_id_1, hw_phy_id_2;
13383         u32 hw_phy_id, hw_phy_id_masked;
13384         int err;
13385
13386         /* flow control autonegotiation is default behavior */
13387         tg3_flag_set(tp, PAUSE_AUTONEG);
13388         tp->link_config.flowctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
13389
13390         if (tg3_flag(tp, USE_PHYLIB))
13391                 return tg3_phy_init(tp);
13392
13393         /* Reading the PHY ID register can conflict with ASF
13394          * firmware access to the PHY hardware.
13395          */
13396         err = 0;
13397         if (tg3_flag(tp, ENABLE_ASF) || tg3_flag(tp, ENABLE_APE)) {
13398                 hw_phy_id = hw_phy_id_masked = TG3_PHY_ID_INVALID;
13399         } else {
13400                 /* Now read the physical PHY_ID from the chip and verify
13401                  * that it is sane.  If it doesn't look good, we fall back
13402                  * to either the hard-coded table based PHY_ID and failing
13403                  * that the value found in the eeprom area.
13404                  */
13405                 err |= tg3_readphy(tp, MII_PHYSID1, &hw_phy_id_1);
13406                 err |= tg3_readphy(tp, MII_PHYSID2, &hw_phy_id_2);
13407
13408                 hw_phy_id  = (hw_phy_id_1 & 0xffff) << 10;
13409                 hw_phy_id |= (hw_phy_id_2 & 0xfc00) << 16;
13410                 hw_phy_id |= (hw_phy_id_2 & 0x03ff) <<  0;
13411
13412                 hw_phy_id_masked = hw_phy_id & TG3_PHY_ID_MASK;
13413         }
13414
13415         if (!err && TG3_KNOWN_PHY_ID(hw_phy_id_masked)) {
13416                 tp->phy_id = hw_phy_id;
13417                 if (hw_phy_id_masked == TG3_PHY_ID_BCM8002)
13418                         tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13419                 else
13420                         tp->phy_flags &= ~TG3_PHYFLG_PHY_SERDES;
13421         } else {
13422                 if (tp->phy_id != TG3_PHY_ID_INVALID) {
13423                         /* Do nothing, phy ID already set up in
13424                          * tg3_get_eeprom_hw_cfg().
13425                          */
13426                 } else {
13427                         struct subsys_tbl_ent *p;
13428
13429                         /* No eeprom signature?  Try the hardcoded
13430                          * subsys device table.
13431                          */
13432                         p = tg3_lookup_by_subsys(tp);
13433                         if (!p)
13434                                 return -ENODEV;
13435
13436                         tp->phy_id = p->phy_id;
13437                         if (!tp->phy_id ||
13438                             tp->phy_id == TG3_PHY_ID_BCM8002)
13439                                 tp->phy_flags |= TG3_PHYFLG_PHY_SERDES;
13440                 }
13441         }
13442
13443         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13444             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13445              GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720 ||
13446              (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 &&
13447               tp->pci_chip_rev_id != CHIPREV_ID_5717_A0) ||
13448              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 &&
13449               tp->pci_chip_rev_id != CHIPREV_ID_57765_A0)))
13450                 tp->phy_flags |= TG3_PHYFLG_EEE_CAP;
13451
13452         tg3_phy_init_link_config(tp);
13453
13454         if (!(tp->phy_flags & TG3_PHYFLG_ANY_SERDES) &&
13455             !tg3_flag(tp, ENABLE_APE) &&
13456             !tg3_flag(tp, ENABLE_ASF)) {
13457                 u32 bmsr, dummy;
13458
13459                 tg3_readphy(tp, MII_BMSR, &bmsr);
13460                 if (!tg3_readphy(tp, MII_BMSR, &bmsr) &&
13461                     (bmsr & BMSR_LSTATUS))
13462                         goto skip_phy_reset;
13463
13464                 err = tg3_phy_reset(tp);
13465                 if (err)
13466                         return err;
13467
13468                 tg3_phy_set_wirespeed(tp);
13469
13470                 if (!tg3_phy_copper_an_config_ok(tp, &dummy)) {
13471                         tg3_phy_autoneg_cfg(tp, tp->link_config.advertising,
13472                                             tp->link_config.flowctrl);
13473
13474                         tg3_writephy(tp, MII_BMCR,
13475                                      BMCR_ANENABLE | BMCR_ANRESTART);
13476                 }
13477         }
13478
13479 skip_phy_reset:
13480         if ((tp->phy_id & TG3_PHY_ID_MASK) == TG3_PHY_ID_BCM5401) {
13481                 err = tg3_init_5401phy_dsp(tp);
13482                 if (err)
13483                         return err;
13484
13485                 err = tg3_init_5401phy_dsp(tp);
13486         }
13487
13488         return err;
13489 }
13490
13491 static void __devinit tg3_read_vpd(struct tg3 *tp)
13492 {
13493         u8 *vpd_data;
13494         unsigned int block_end, rosize, len;
13495         u32 vpdlen;
13496         int j, i = 0;
13497
13498         vpd_data = (u8 *)tg3_vpd_readblock(tp, &vpdlen);
13499         if (!vpd_data)
13500                 goto out_no_vpd;
13501
13502         i = pci_vpd_find_tag(vpd_data, 0, vpdlen, PCI_VPD_LRDT_RO_DATA);
13503         if (i < 0)
13504                 goto out_not_found;
13505
13506         rosize = pci_vpd_lrdt_size(&vpd_data[i]);
13507         block_end = i + PCI_VPD_LRDT_TAG_SIZE + rosize;
13508         i += PCI_VPD_LRDT_TAG_SIZE;
13509
13510         if (block_end > vpdlen)
13511                 goto out_not_found;
13512
13513         j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13514                                       PCI_VPD_RO_KEYWORD_MFR_ID);
13515         if (j > 0) {
13516                 len = pci_vpd_info_field_size(&vpd_data[j]);
13517
13518                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13519                 if (j + len > block_end || len != 4 ||
13520                     memcmp(&vpd_data[j], "1028", 4))
13521                         goto partno;
13522
13523                 j = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13524                                               PCI_VPD_RO_KEYWORD_VENDOR0);
13525                 if (j < 0)
13526                         goto partno;
13527
13528                 len = pci_vpd_info_field_size(&vpd_data[j]);
13529
13530                 j += PCI_VPD_INFO_FLD_HDR_SIZE;
13531                 if (j + len > block_end)
13532                         goto partno;
13533
13534                 memcpy(tp->fw_ver, &vpd_data[j], len);
13535                 strncat(tp->fw_ver, " bc ", vpdlen - len - 1);
13536         }
13537
13538 partno:
13539         i = pci_vpd_find_info_keyword(vpd_data, i, rosize,
13540                                       PCI_VPD_RO_KEYWORD_PARTNO);
13541         if (i < 0)
13542                 goto out_not_found;
13543
13544         len = pci_vpd_info_field_size(&vpd_data[i]);
13545
13546         i += PCI_VPD_INFO_FLD_HDR_SIZE;
13547         if (len > TG3_BPN_SIZE ||
13548             (len + i) > vpdlen)
13549                 goto out_not_found;
13550
13551         memcpy(tp->board_part_number, &vpd_data[i], len);
13552
13553 out_not_found:
13554         kfree(vpd_data);
13555         if (tp->board_part_number[0])
13556                 return;
13557
13558 out_no_vpd:
13559         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
13560                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717)
13561                         strcpy(tp->board_part_number, "BCM5717");
13562                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718)
13563                         strcpy(tp->board_part_number, "BCM5718");
13564                 else
13565                         goto nomatch;
13566         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780) {
13567                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57780)
13568                         strcpy(tp->board_part_number, "BCM57780");
13569                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57760)
13570                         strcpy(tp->board_part_number, "BCM57760");
13571                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790)
13572                         strcpy(tp->board_part_number, "BCM57790");
13573                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57788)
13574                         strcpy(tp->board_part_number, "BCM57788");
13575                 else
13576                         goto nomatch;
13577         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765) {
13578                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761)
13579                         strcpy(tp->board_part_number, "BCM57761");
13580                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765)
13581                         strcpy(tp->board_part_number, "BCM57765");
13582                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781)
13583                         strcpy(tp->board_part_number, "BCM57781");
13584                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785)
13585                         strcpy(tp->board_part_number, "BCM57785");
13586                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791)
13587                         strcpy(tp->board_part_number, "BCM57791");
13588                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795)
13589                         strcpy(tp->board_part_number, "BCM57795");
13590                 else
13591                         goto nomatch;
13592         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766) {
13593                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762)
13594                         strcpy(tp->board_part_number, "BCM57762");
13595                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766)
13596                         strcpy(tp->board_part_number, "BCM57766");
13597                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782)
13598                         strcpy(tp->board_part_number, "BCM57782");
13599                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13600                         strcpy(tp->board_part_number, "BCM57786");
13601                 else
13602                         goto nomatch;
13603         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
13604                 strcpy(tp->board_part_number, "BCM95906");
13605         } else {
13606 nomatch:
13607                 strcpy(tp->board_part_number, "none");
13608         }
13609 }
13610
13611 static int __devinit tg3_fw_img_is_valid(struct tg3 *tp, u32 offset)
13612 {
13613         u32 val;
13614
13615         if (tg3_nvram_read(tp, offset, &val) ||
13616             (val & 0xfc000000) != 0x0c000000 ||
13617             tg3_nvram_read(tp, offset + 4, &val) ||
13618             val != 0)
13619                 return 0;
13620
13621         return 1;
13622 }
13623
13624 static void __devinit tg3_read_bc_ver(struct tg3 *tp)
13625 {
13626         u32 val, offset, start, ver_offset;
13627         int i, dst_off;
13628         bool newver = false;
13629
13630         if (tg3_nvram_read(tp, 0xc, &offset) ||
13631             tg3_nvram_read(tp, 0x4, &start))
13632                 return;
13633
13634         offset = tg3_nvram_logical_addr(tp, offset);
13635
13636         if (tg3_nvram_read(tp, offset, &val))
13637                 return;
13638
13639         if ((val & 0xfc000000) == 0x0c000000) {
13640                 if (tg3_nvram_read(tp, offset + 4, &val))
13641                         return;
13642
13643                 if (val == 0)
13644                         newver = true;
13645         }
13646
13647         dst_off = strlen(tp->fw_ver);
13648
13649         if (newver) {
13650                 if (TG3_VER_SIZE - dst_off < 16 ||
13651                     tg3_nvram_read(tp, offset + 8, &ver_offset))
13652                         return;
13653
13654                 offset = offset + ver_offset - start;
13655                 for (i = 0; i < 16; i += 4) {
13656                         __be32 v;
13657                         if (tg3_nvram_read_be32(tp, offset + i, &v))
13658                                 return;
13659
13660                         memcpy(tp->fw_ver + dst_off + i, &v, sizeof(v));
13661                 }
13662         } else {
13663                 u32 major, minor;
13664
13665                 if (tg3_nvram_read(tp, TG3_NVM_PTREV_BCVER, &ver_offset))
13666                         return;
13667
13668                 major = (ver_offset & TG3_NVM_BCVER_MAJMSK) >>
13669                         TG3_NVM_BCVER_MAJSFT;
13670                 minor = ver_offset & TG3_NVM_BCVER_MINMSK;
13671                 snprintf(&tp->fw_ver[dst_off], TG3_VER_SIZE - dst_off,
13672                          "v%d.%02d", major, minor);
13673         }
13674 }
13675
13676 static void __devinit tg3_read_hwsb_ver(struct tg3 *tp)
13677 {
13678         u32 val, major, minor;
13679
13680         /* Use native endian representation */
13681         if (tg3_nvram_read(tp, TG3_NVM_HWSB_CFG1, &val))
13682                 return;
13683
13684         major = (val & TG3_NVM_HWSB_CFG1_MAJMSK) >>
13685                 TG3_NVM_HWSB_CFG1_MAJSFT;
13686         minor = (val & TG3_NVM_HWSB_CFG1_MINMSK) >>
13687                 TG3_NVM_HWSB_CFG1_MINSFT;
13688
13689         snprintf(&tp->fw_ver[0], 32, "sb v%d.%02d", major, minor);
13690 }
13691
13692 static void __devinit tg3_read_sb_ver(struct tg3 *tp, u32 val)
13693 {
13694         u32 offset, major, minor, build;
13695
13696         strncat(tp->fw_ver, "sb", TG3_VER_SIZE - strlen(tp->fw_ver) - 1);
13697
13698         if ((val & TG3_EEPROM_SB_FORMAT_MASK) != TG3_EEPROM_SB_FORMAT_1)
13699                 return;
13700
13701         switch (val & TG3_EEPROM_SB_REVISION_MASK) {
13702         case TG3_EEPROM_SB_REVISION_0:
13703                 offset = TG3_EEPROM_SB_F1R0_EDH_OFF;
13704                 break;
13705         case TG3_EEPROM_SB_REVISION_2:
13706                 offset = TG3_EEPROM_SB_F1R2_EDH_OFF;
13707                 break;
13708         case TG3_EEPROM_SB_REVISION_3:
13709                 offset = TG3_EEPROM_SB_F1R3_EDH_OFF;
13710                 break;
13711         case TG3_EEPROM_SB_REVISION_4:
13712                 offset = TG3_EEPROM_SB_F1R4_EDH_OFF;
13713                 break;
13714         case TG3_EEPROM_SB_REVISION_5:
13715                 offset = TG3_EEPROM_SB_F1R5_EDH_OFF;
13716                 break;
13717         case TG3_EEPROM_SB_REVISION_6:
13718                 offset = TG3_EEPROM_SB_F1R6_EDH_OFF;
13719                 break;
13720         default:
13721                 return;
13722         }
13723
13724         if (tg3_nvram_read(tp, offset, &val))
13725                 return;
13726
13727         build = (val & TG3_EEPROM_SB_EDH_BLD_MASK) >>
13728                 TG3_EEPROM_SB_EDH_BLD_SHFT;
13729         major = (val & TG3_EEPROM_SB_EDH_MAJ_MASK) >>
13730                 TG3_EEPROM_SB_EDH_MAJ_SHFT;
13731         minor =  val & TG3_EEPROM_SB_EDH_MIN_MASK;
13732
13733         if (minor > 99 || build > 26)
13734                 return;
13735
13736         offset = strlen(tp->fw_ver);
13737         snprintf(&tp->fw_ver[offset], TG3_VER_SIZE - offset,
13738                  " v%d.%02d", major, minor);
13739
13740         if (build > 0) {
13741                 offset = strlen(tp->fw_ver);
13742                 if (offset < TG3_VER_SIZE - 1)
13743                         tp->fw_ver[offset] = 'a' + build - 1;
13744         }
13745 }
13746
13747 static void __devinit tg3_read_mgmtfw_ver(struct tg3 *tp)
13748 {
13749         u32 val, offset, start;
13750         int i, vlen;
13751
13752         for (offset = TG3_NVM_DIR_START;
13753              offset < TG3_NVM_DIR_END;
13754              offset += TG3_NVM_DIRENT_SIZE) {
13755                 if (tg3_nvram_read(tp, offset, &val))
13756                         return;
13757
13758                 if ((val >> TG3_NVM_DIRTYPE_SHIFT) == TG3_NVM_DIRTYPE_ASFINI)
13759                         break;
13760         }
13761
13762         if (offset == TG3_NVM_DIR_END)
13763                 return;
13764
13765         if (!tg3_flag(tp, 5705_PLUS))
13766                 start = 0x08000000;
13767         else if (tg3_nvram_read(tp, offset - 4, &start))
13768                 return;
13769
13770         if (tg3_nvram_read(tp, offset + 4, &offset) ||
13771             !tg3_fw_img_is_valid(tp, offset) ||
13772             tg3_nvram_read(tp, offset + 8, &val))
13773                 return;
13774
13775         offset += val - start;
13776
13777         vlen = strlen(tp->fw_ver);
13778
13779         tp->fw_ver[vlen++] = ',';
13780         tp->fw_ver[vlen++] = ' ';
13781
13782         for (i = 0; i < 4; i++) {
13783                 __be32 v;
13784                 if (tg3_nvram_read_be32(tp, offset, &v))
13785                         return;
13786
13787                 offset += sizeof(v);
13788
13789                 if (vlen > TG3_VER_SIZE - sizeof(v)) {
13790                         memcpy(&tp->fw_ver[vlen], &v, TG3_VER_SIZE - vlen);
13791                         break;
13792                 }
13793
13794                 memcpy(&tp->fw_ver[vlen], &v, sizeof(v));
13795                 vlen += sizeof(v);
13796         }
13797 }
13798
13799 static void __devinit tg3_read_dash_ver(struct tg3 *tp)
13800 {
13801         int vlen;
13802         u32 apedata;
13803         char *fwtype;
13804
13805         if (!tg3_flag(tp, ENABLE_APE) || !tg3_flag(tp, ENABLE_ASF))
13806                 return;
13807
13808         apedata = tg3_ape_read32(tp, TG3_APE_SEG_SIG);
13809         if (apedata != APE_SEG_SIG_MAGIC)
13810                 return;
13811
13812         apedata = tg3_ape_read32(tp, TG3_APE_FW_STATUS);
13813         if (!(apedata & APE_FW_STATUS_READY))
13814                 return;
13815
13816         apedata = tg3_ape_read32(tp, TG3_APE_FW_VERSION);
13817
13818         if (tg3_ape_read32(tp, TG3_APE_FW_FEATURES) & TG3_APE_FW_FEATURE_NCSI) {
13819                 tg3_flag_set(tp, APE_HAS_NCSI);
13820                 fwtype = "NCSI";
13821         } else {
13822                 fwtype = "DASH";
13823         }
13824
13825         vlen = strlen(tp->fw_ver);
13826
13827         snprintf(&tp->fw_ver[vlen], TG3_VER_SIZE - vlen, " %s v%d.%d.%d.%d",
13828                  fwtype,
13829                  (apedata & APE_FW_VERSION_MAJMSK) >> APE_FW_VERSION_MAJSFT,
13830                  (apedata & APE_FW_VERSION_MINMSK) >> APE_FW_VERSION_MINSFT,
13831                  (apedata & APE_FW_VERSION_REVMSK) >> APE_FW_VERSION_REVSFT,
13832                  (apedata & APE_FW_VERSION_BLDMSK));
13833 }
13834
13835 static void __devinit tg3_read_fw_ver(struct tg3 *tp)
13836 {
13837         u32 val;
13838         bool vpd_vers = false;
13839
13840         if (tp->fw_ver[0] != 0)
13841                 vpd_vers = true;
13842
13843         if (tg3_flag(tp, NO_NVRAM)) {
13844                 strcat(tp->fw_ver, "sb");
13845                 return;
13846         }
13847
13848         if (tg3_nvram_read(tp, 0, &val))
13849                 return;
13850
13851         if (val == TG3_EEPROM_MAGIC)
13852                 tg3_read_bc_ver(tp);
13853         else if ((val & TG3_EEPROM_MAGIC_FW_MSK) == TG3_EEPROM_MAGIC_FW)
13854                 tg3_read_sb_ver(tp, val);
13855         else if ((val & TG3_EEPROM_MAGIC_HW_MSK) == TG3_EEPROM_MAGIC_HW)
13856                 tg3_read_hwsb_ver(tp);
13857         else
13858                 return;
13859
13860         if (vpd_vers)
13861                 goto done;
13862
13863         if (tg3_flag(tp, ENABLE_APE)) {
13864                 if (tg3_flag(tp, ENABLE_ASF))
13865                         tg3_read_dash_ver(tp);
13866         } else if (tg3_flag(tp, ENABLE_ASF)) {
13867                 tg3_read_mgmtfw_ver(tp);
13868         }
13869
13870 done:
13871         tp->fw_ver[TG3_VER_SIZE - 1] = 0;
13872 }
13873
13874 static inline u32 tg3_rx_ret_ring_size(struct tg3 *tp)
13875 {
13876         if (tg3_flag(tp, LRG_PROD_RING_CAP))
13877                 return TG3_RX_RET_MAX_SIZE_5717;
13878         else if (tg3_flag(tp, JUMBO_CAPABLE) && !tg3_flag(tp, 5780_CLASS))
13879                 return TG3_RX_RET_MAX_SIZE_5700;
13880         else
13881                 return TG3_RX_RET_MAX_SIZE_5705;
13882 }
13883
13884 static DEFINE_PCI_DEVICE_TABLE(tg3_write_reorder_chipsets) = {
13885         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_FE_GATE_700C) },
13886         { PCI_DEVICE(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_8131_BRIDGE) },
13887         { PCI_DEVICE(PCI_VENDOR_ID_VIA, PCI_DEVICE_ID_VIA_8385_0) },
13888         { },
13889 };
13890
13891 static struct pci_dev * __devinit tg3_find_peer(struct tg3 *tp)
13892 {
13893         struct pci_dev *peer;
13894         unsigned int func, devnr = tp->pdev->devfn & ~7;
13895
13896         for (func = 0; func < 8; func++) {
13897                 peer = pci_get_slot(tp->pdev->bus, devnr | func);
13898                 if (peer && peer != tp->pdev)
13899                         break;
13900                 pci_dev_put(peer);
13901         }
13902         /* 5704 can be configured in single-port mode, set peer to
13903          * tp->pdev in that case.
13904          */
13905         if (!peer) {
13906                 peer = tp->pdev;
13907                 return peer;
13908         }
13909
13910         /*
13911          * We don't need to keep the refcount elevated; there's no way
13912          * to remove one half of this device without removing the other
13913          */
13914         pci_dev_put(peer);
13915
13916         return peer;
13917 }
13918
13919 static void __devinit tg3_detect_asic_rev(struct tg3 *tp, u32 misc_ctrl_reg)
13920 {
13921         tp->pci_chip_rev_id = misc_ctrl_reg >> MISC_HOST_CTRL_CHIPREV_SHIFT;
13922         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_USE_PROD_ID_REG) {
13923                 u32 reg;
13924
13925                 /* All devices that use the alternate
13926                  * ASIC REV location have a CPMU.
13927                  */
13928                 tg3_flag_set(tp, CPMU_PRESENT);
13929
13930                 if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
13931                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
13932                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
13933                     tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720)
13934                         reg = TG3PCI_GEN2_PRODID_ASICREV;
13935                 else if (tp->pdev->device == TG3PCI_DEVICE_TIGON3_57781 ||
13936                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57785 ||
13937                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57761 ||
13938                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57765 ||
13939                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
13940                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
13941                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57762 ||
13942                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57766 ||
13943                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57782 ||
13944                          tp->pdev->device == TG3PCI_DEVICE_TIGON3_57786)
13945                         reg = TG3PCI_GEN15_PRODID_ASICREV;
13946                 else
13947                         reg = TG3PCI_PRODID_ASICREV;
13948
13949                 pci_read_config_dword(tp->pdev, reg, &tp->pci_chip_rev_id);
13950         }
13951
13952         /* Wrong chip ID in 5752 A0. This code can be removed later
13953          * as A0 is not in production.
13954          */
13955         if (tp->pci_chip_rev_id == CHIPREV_ID_5752_A0_HW)
13956                 tp->pci_chip_rev_id = CHIPREV_ID_5752_A0;
13957
13958         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
13959             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
13960             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
13961                 tg3_flag_set(tp, 5717_PLUS);
13962
13963         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57765 ||
13964             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57766)
13965                 tg3_flag_set(tp, 57765_CLASS);
13966
13967         if (tg3_flag(tp, 57765_CLASS) || tg3_flag(tp, 5717_PLUS))
13968                 tg3_flag_set(tp, 57765_PLUS);
13969
13970         /* Intentionally exclude ASIC_REV_5906 */
13971         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
13972             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
13973             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
13974             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
13975             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
13976             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
13977             tg3_flag(tp, 57765_PLUS))
13978                 tg3_flag_set(tp, 5755_PLUS);
13979
13980         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780 ||
13981             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
13982                 tg3_flag_set(tp, 5780_CLASS);
13983
13984         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
13985             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
13986             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906 ||
13987             tg3_flag(tp, 5755_PLUS) ||
13988             tg3_flag(tp, 5780_CLASS))
13989                 tg3_flag_set(tp, 5750_PLUS);
13990
13991         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
13992             tg3_flag(tp, 5750_PLUS))
13993                 tg3_flag_set(tp, 5705_PLUS);
13994 }
13995
13996 static int __devinit tg3_get_invariants(struct tg3 *tp)
13997 {
13998         u32 misc_ctrl_reg;
13999         u32 pci_state_reg, grc_misc_cfg;
14000         u32 val;
14001         u16 pci_cmd;
14002         int err;
14003
14004         /* Force memory write invalidate off.  If we leave it on,
14005          * then on 5700_BX chips we have to enable a workaround.
14006          * The workaround is to set the TG3PCI_DMA_RW_CTRL boundary
14007          * to match the cacheline size.  The Broadcom driver have this
14008          * workaround but turns MWI off all the times so never uses
14009          * it.  This seems to suggest that the workaround is insufficient.
14010          */
14011         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14012         pci_cmd &= ~PCI_COMMAND_INVALIDATE;
14013         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14014
14015         /* Important! -- Make sure register accesses are byteswapped
14016          * correctly.  Also, for those chips that require it, make
14017          * sure that indirect register accesses are enabled before
14018          * the first operation.
14019          */
14020         pci_read_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14021                               &misc_ctrl_reg);
14022         tp->misc_host_ctrl |= (misc_ctrl_reg &
14023                                MISC_HOST_CTRL_CHIPREV);
14024         pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14025                                tp->misc_host_ctrl);
14026
14027         tg3_detect_asic_rev(tp, misc_ctrl_reg);
14028
14029         /* If we have 5702/03 A1 or A2 on certain ICH chipsets,
14030          * we need to disable memory and use config. cycles
14031          * only to access all registers. The 5702/03 chips
14032          * can mistakenly decode the special cycles from the
14033          * ICH chipsets as memory write cycles, causing corruption
14034          * of register and memory space. Only certain ICH bridges
14035          * will drive special cycles with non-zero data during the
14036          * address phase which can fall within the 5703's address
14037          * range. This is not an ICH bug as the PCI spec allows
14038          * non-zero address during special cycles. However, only
14039          * these ICH bridges are known to drive non-zero addresses
14040          * during special cycles.
14041          *
14042          * Since special cycles do not cross PCI bridges, we only
14043          * enable this workaround if the 5703 is on the secondary
14044          * bus of these ICH bridges.
14045          */
14046         if ((tp->pci_chip_rev_id == CHIPREV_ID_5703_A1) ||
14047             (tp->pci_chip_rev_id == CHIPREV_ID_5703_A2)) {
14048                 static struct tg3_dev_id {
14049                         u32     vendor;
14050                         u32     device;
14051                         u32     rev;
14052                 } ich_chipsets[] = {
14053                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AA_8,
14054                           PCI_ANY_ID },
14055                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801AB_8,
14056                           PCI_ANY_ID },
14057                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_11,
14058                           0xa },
14059                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82801BA_6,
14060                           PCI_ANY_ID },
14061                         { },
14062                 };
14063                 struct tg3_dev_id *pci_id = &ich_chipsets[0];
14064                 struct pci_dev *bridge = NULL;
14065
14066                 while (pci_id->vendor != 0) {
14067                         bridge = pci_get_device(pci_id->vendor, pci_id->device,
14068                                                 bridge);
14069                         if (!bridge) {
14070                                 pci_id++;
14071                                 continue;
14072                         }
14073                         if (pci_id->rev != PCI_ANY_ID) {
14074                                 if (bridge->revision > pci_id->rev)
14075                                         continue;
14076                         }
14077                         if (bridge->subordinate &&
14078                             (bridge->subordinate->number ==
14079                              tp->pdev->bus->number)) {
14080                                 tg3_flag_set(tp, ICH_WORKAROUND);
14081                                 pci_dev_put(bridge);
14082                                 break;
14083                         }
14084                 }
14085         }
14086
14087         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
14088                 static struct tg3_dev_id {
14089                         u32     vendor;
14090                         u32     device;
14091                 } bridge_chipsets[] = {
14092                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_0 },
14093                         { PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXH_1 },
14094                         { },
14095                 };
14096                 struct tg3_dev_id *pci_id = &bridge_chipsets[0];
14097                 struct pci_dev *bridge = NULL;
14098
14099                 while (pci_id->vendor != 0) {
14100                         bridge = pci_get_device(pci_id->vendor,
14101                                                 pci_id->device,
14102                                                 bridge);
14103                         if (!bridge) {
14104                                 pci_id++;
14105                                 continue;
14106                         }
14107                         if (bridge->subordinate &&
14108                             (bridge->subordinate->number <=
14109                              tp->pdev->bus->number) &&
14110                             (bridge->subordinate->subordinate >=
14111                              tp->pdev->bus->number)) {
14112                                 tg3_flag_set(tp, 5701_DMA_BUG);
14113                                 pci_dev_put(bridge);
14114                                 break;
14115                         }
14116                 }
14117         }
14118
14119         /* The EPB bridge inside 5714, 5715, and 5780 cannot support
14120          * DMA addresses > 40-bit. This bridge may have other additional
14121          * 57xx devices behind it in some 4-port NIC designs for example.
14122          * Any tg3 device found behind the bridge will also need the 40-bit
14123          * DMA workaround.
14124          */
14125         if (tg3_flag(tp, 5780_CLASS)) {
14126                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14127                 tp->msi_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_MSI);
14128         } else {
14129                 struct pci_dev *bridge = NULL;
14130
14131                 do {
14132                         bridge = pci_get_device(PCI_VENDOR_ID_SERVERWORKS,
14133                                                 PCI_DEVICE_ID_SERVERWORKS_EPB,
14134                                                 bridge);
14135                         if (bridge && bridge->subordinate &&
14136                             (bridge->subordinate->number <=
14137                              tp->pdev->bus->number) &&
14138                             (bridge->subordinate->subordinate >=
14139                              tp->pdev->bus->number)) {
14140                                 tg3_flag_set(tp, 40BIT_DMA_BUG);
14141                                 pci_dev_put(bridge);
14142                                 break;
14143                         }
14144                 } while (bridge);
14145         }
14146
14147         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14148             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714)
14149                 tp->pdev_peer = tg3_find_peer(tp);
14150
14151         /* Determine TSO capabilities */
14152         if (tp->pci_chip_rev_id == CHIPREV_ID_5719_A0)
14153                 ; /* Do nothing. HW bug. */
14154         else if (tg3_flag(tp, 57765_PLUS))
14155                 tg3_flag_set(tp, HW_TSO_3);
14156         else if (tg3_flag(tp, 5755_PLUS) ||
14157                  GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14158                 tg3_flag_set(tp, HW_TSO_2);
14159         else if (tg3_flag(tp, 5750_PLUS)) {
14160                 tg3_flag_set(tp, HW_TSO_1);
14161                 tg3_flag_set(tp, TSO_BUG);
14162                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 &&
14163                     tp->pci_chip_rev_id >= CHIPREV_ID_5750_C2)
14164                         tg3_flag_clear(tp, TSO_BUG);
14165         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14166                    GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14167                    tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) {
14168                         tg3_flag_set(tp, TSO_BUG);
14169                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705)
14170                         tp->fw_needed = FIRMWARE_TG3TSO5;
14171                 else
14172                         tp->fw_needed = FIRMWARE_TG3TSO;
14173         }
14174
14175         /* Selectively allow TSO based on operating conditions */
14176         if (tg3_flag(tp, HW_TSO_1) ||
14177             tg3_flag(tp, HW_TSO_2) ||
14178             tg3_flag(tp, HW_TSO_3) ||
14179             tp->fw_needed) {
14180                 /* For firmware TSO, assume ASF is disabled.
14181                  * We'll disable TSO later if we discover ASF
14182                  * is enabled in tg3_get_eeprom_hw_cfg().
14183                  */
14184                 tg3_flag_set(tp, TSO_CAPABLE);
14185         } else {
14186                 tg3_flag_clear(tp, TSO_CAPABLE);
14187                 tg3_flag_clear(tp, TSO_BUG);
14188                 tp->fw_needed = NULL;
14189         }
14190
14191         if (tp->pci_chip_rev_id == CHIPREV_ID_5701_A0)
14192                 tp->fw_needed = FIRMWARE_TG3;
14193
14194         tp->irq_max = 1;
14195
14196         if (tg3_flag(tp, 5750_PLUS)) {
14197                 tg3_flag_set(tp, SUPPORT_MSI);
14198                 if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_AX ||
14199                     GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5750_BX ||
14200                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714 &&
14201                      tp->pci_chip_rev_id <= CHIPREV_ID_5714_A2 &&
14202                      tp->pdev_peer == tp->pdev))
14203                         tg3_flag_clear(tp, SUPPORT_MSI);
14204
14205                 if (tg3_flag(tp, 5755_PLUS) ||
14206                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14207                         tg3_flag_set(tp, 1SHOT_MSI);
14208                 }
14209
14210                 if (tg3_flag(tp, 57765_PLUS)) {
14211                         tg3_flag_set(tp, SUPPORT_MSIX);
14212                         tp->irq_max = TG3_IRQ_MAX_VECS;
14213                         tg3_rss_init_dflt_indir_tbl(tp);
14214                 }
14215         }
14216
14217         if (tg3_flag(tp, 5755_PLUS))
14218                 tg3_flag_set(tp, SHORT_DMA_BUG);
14219
14220         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719)
14221                 tp->dma_limit = TG3_TX_BD_DMA_MAX_4K;
14222
14223         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14224             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14225             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14226                 tg3_flag_set(tp, LRG_PROD_RING_CAP);
14227
14228         if (tg3_flag(tp, 57765_PLUS) &&
14229             tp->pci_chip_rev_id != CHIPREV_ID_5719_A0)
14230                 tg3_flag_set(tp, USE_JUMBO_BDFLAG);
14231
14232         if (!tg3_flag(tp, 5705_PLUS) ||
14233             tg3_flag(tp, 5780_CLASS) ||
14234             tg3_flag(tp, USE_JUMBO_BDFLAG))
14235                 tg3_flag_set(tp, JUMBO_CAPABLE);
14236
14237         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14238                               &pci_state_reg);
14239
14240         if (pci_is_pcie(tp->pdev)) {
14241                 u16 lnkctl;
14242
14243                 tg3_flag_set(tp, PCI_EXPRESS);
14244
14245                 pci_read_config_word(tp->pdev,
14246                                      pci_pcie_cap(tp->pdev) + PCI_EXP_LNKCTL,
14247                                      &lnkctl);
14248                 if (lnkctl & PCI_EXP_LNKCTL_CLKREQ_EN) {
14249                         if (GET_ASIC_REV(tp->pci_chip_rev_id) ==
14250                             ASIC_REV_5906) {
14251                                 tg3_flag_clear(tp, HW_TSO_2);
14252                                 tg3_flag_clear(tp, TSO_CAPABLE);
14253                         }
14254                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14255                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
14256                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A0 ||
14257                             tp->pci_chip_rev_id == CHIPREV_ID_57780_A1)
14258                                 tg3_flag_set(tp, CLKREQ_BUG);
14259                 } else if (tp->pci_chip_rev_id == CHIPREV_ID_5717_A0) {
14260                         tg3_flag_set(tp, L1PLLPD_EN);
14261                 }
14262         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785) {
14263                 /* BCM5785 devices are effectively PCIe devices, and should
14264                  * follow PCIe codepaths, but do not have a PCIe capabilities
14265                  * section.
14266                  */
14267                 tg3_flag_set(tp, PCI_EXPRESS);
14268         } else if (!tg3_flag(tp, 5705_PLUS) ||
14269                    tg3_flag(tp, 5780_CLASS)) {
14270                 tp->pcix_cap = pci_find_capability(tp->pdev, PCI_CAP_ID_PCIX);
14271                 if (!tp->pcix_cap) {
14272                         dev_err(&tp->pdev->dev,
14273                                 "Cannot find PCI-X capability, aborting\n");
14274                         return -EIO;
14275                 }
14276
14277                 if (!(pci_state_reg & PCISTATE_CONV_PCI_MODE))
14278                         tg3_flag_set(tp, PCIX_MODE);
14279         }
14280
14281         /* If we have an AMD 762 or VIA K8T800 chipset, write
14282          * reordering to the mailbox registers done by the host
14283          * controller can cause major troubles.  We read back from
14284          * every mailbox register write to force the writes to be
14285          * posted to the chip in order.
14286          */
14287         if (pci_dev_present(tg3_write_reorder_chipsets) &&
14288             !tg3_flag(tp, PCI_EXPRESS))
14289                 tg3_flag_set(tp, MBOX_WRITE_REORDER);
14290
14291         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE,
14292                              &tp->pci_cacheline_sz);
14293         pci_read_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14294                              &tp->pci_lat_timer);
14295         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14296             tp->pci_lat_timer < 64) {
14297                 tp->pci_lat_timer = 64;
14298                 pci_write_config_byte(tp->pdev, PCI_LATENCY_TIMER,
14299                                       tp->pci_lat_timer);
14300         }
14301
14302         /* Important! -- It is critical that the PCI-X hw workaround
14303          * situation is decided before the first MMIO register access.
14304          */
14305         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5700_BX) {
14306                 /* 5700 BX chips need to have their TX producer index
14307                  * mailboxes written twice to workaround a bug.
14308                  */
14309                 tg3_flag_set(tp, TXD_MBOX_HWBUG);
14310
14311                 /* If we are in PCI-X mode, enable register write workaround.
14312                  *
14313                  * The workaround is to use indirect register accesses
14314                  * for all chip writes not to mailbox registers.
14315                  */
14316                 if (tg3_flag(tp, PCIX_MODE)) {
14317                         u32 pm_reg;
14318
14319                         tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14320
14321                         /* The chip can have it's power management PCI config
14322                          * space registers clobbered due to this bug.
14323                          * So explicitly force the chip into D0 here.
14324                          */
14325                         pci_read_config_dword(tp->pdev,
14326                                               tp->pm_cap + PCI_PM_CTRL,
14327                                               &pm_reg);
14328                         pm_reg &= ~PCI_PM_CTRL_STATE_MASK;
14329                         pm_reg |= PCI_PM_CTRL_PME_ENABLE | 0 /* D0 */;
14330                         pci_write_config_dword(tp->pdev,
14331                                                tp->pm_cap + PCI_PM_CTRL,
14332                                                pm_reg);
14333
14334                         /* Also, force SERR#/PERR# in PCI command. */
14335                         pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14336                         pci_cmd |= PCI_COMMAND_PARITY | PCI_COMMAND_SERR;
14337                         pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14338                 }
14339         }
14340
14341         if ((pci_state_reg & PCISTATE_BUS_SPEED_HIGH) != 0)
14342                 tg3_flag_set(tp, PCI_HIGH_SPEED);
14343         if ((pci_state_reg & PCISTATE_BUS_32BIT) != 0)
14344                 tg3_flag_set(tp, PCI_32BIT);
14345
14346         /* Chip-specific fixup from Broadcom driver */
14347         if ((tp->pci_chip_rev_id == CHIPREV_ID_5704_A0) &&
14348             (!(pci_state_reg & PCISTATE_RETRY_SAME_DMA))) {
14349                 pci_state_reg |= PCISTATE_RETRY_SAME_DMA;
14350                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE, pci_state_reg);
14351         }
14352
14353         /* Default fast path register access methods */
14354         tp->read32 = tg3_read32;
14355         tp->write32 = tg3_write32;
14356         tp->read32_mbox = tg3_read32;
14357         tp->write32_mbox = tg3_write32;
14358         tp->write32_tx_mbox = tg3_write32;
14359         tp->write32_rx_mbox = tg3_write32;
14360
14361         /* Various workaround register access methods */
14362         if (tg3_flag(tp, PCIX_TARGET_HWBUG))
14363                 tp->write32 = tg3_write_indirect_reg32;
14364         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 ||
14365                  (tg3_flag(tp, PCI_EXPRESS) &&
14366                   tp->pci_chip_rev_id == CHIPREV_ID_5750_A0)) {
14367                 /*
14368                  * Back to back register writes can cause problems on these
14369                  * chips, the workaround is to read back all reg writes
14370                  * except those to mailbox regs.
14371                  *
14372                  * See tg3_write_indirect_reg32().
14373                  */
14374                 tp->write32 = tg3_write_flush_reg32;
14375         }
14376
14377         if (tg3_flag(tp, TXD_MBOX_HWBUG) || tg3_flag(tp, MBOX_WRITE_REORDER)) {
14378                 tp->write32_tx_mbox = tg3_write32_tx_mbox;
14379                 if (tg3_flag(tp, MBOX_WRITE_REORDER))
14380                         tp->write32_rx_mbox = tg3_write_flush_reg32;
14381         }
14382
14383         if (tg3_flag(tp, ICH_WORKAROUND)) {
14384                 tp->read32 = tg3_read_indirect_reg32;
14385                 tp->write32 = tg3_write_indirect_reg32;
14386                 tp->read32_mbox = tg3_read_indirect_mbox;
14387                 tp->write32_mbox = tg3_write_indirect_mbox;
14388                 tp->write32_tx_mbox = tg3_write_indirect_mbox;
14389                 tp->write32_rx_mbox = tg3_write_indirect_mbox;
14390
14391                 iounmap(tp->regs);
14392                 tp->regs = NULL;
14393
14394                 pci_read_config_word(tp->pdev, PCI_COMMAND, &pci_cmd);
14395                 pci_cmd &= ~PCI_COMMAND_MEMORY;
14396                 pci_write_config_word(tp->pdev, PCI_COMMAND, pci_cmd);
14397         }
14398         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
14399                 tp->read32_mbox = tg3_read32_mbox_5906;
14400                 tp->write32_mbox = tg3_write32_mbox_5906;
14401                 tp->write32_tx_mbox = tg3_write32_mbox_5906;
14402                 tp->write32_rx_mbox = tg3_write32_mbox_5906;
14403         }
14404
14405         if (tp->write32 == tg3_write_indirect_reg32 ||
14406             (tg3_flag(tp, PCIX_MODE) &&
14407              (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14408               GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701)))
14409                 tg3_flag_set(tp, SRAM_USE_CONFIG);
14410
14411         /* The memory arbiter has to be enabled in order for SRAM accesses
14412          * to succeed.  Normally on powerup the tg3 chip firmware will make
14413          * sure it is enabled, but other entities such as system netboot
14414          * code might disable it.
14415          */
14416         val = tr32(MEMARB_MODE);
14417         tw32(MEMARB_MODE, val | MEMARB_MODE_ENABLE);
14418
14419         tp->pci_fn = PCI_FUNC(tp->pdev->devfn) & 3;
14420         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14421             tg3_flag(tp, 5780_CLASS)) {
14422                 if (tg3_flag(tp, PCIX_MODE)) {
14423                         pci_read_config_dword(tp->pdev,
14424                                               tp->pcix_cap + PCI_X_STATUS,
14425                                               &val);
14426                         tp->pci_fn = val & 0x7;
14427                 }
14428         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717) {
14429                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14430                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14431                     NIC_SRAM_CPMUSTAT_SIG) {
14432                         tp->pci_fn = val & TG3_CPMU_STATUS_FMSK_5717;
14433                         tp->pci_fn = tp->pci_fn ? 1 : 0;
14434                 }
14435         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5719 ||
14436                    GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720) {
14437                 tg3_read_mem(tp, NIC_SRAM_CPMU_STATUS, &val);
14438                 if ((val & NIC_SRAM_CPMUSTAT_SIG_MSK) ==
14439                     NIC_SRAM_CPMUSTAT_SIG) {
14440                         tp->pci_fn = (val & TG3_CPMU_STATUS_FMSK_5719) >>
14441                                      TG3_CPMU_STATUS_FSHFT_5719;
14442                 }
14443         }
14444
14445         /* Get eeprom hw config before calling tg3_set_power_state().
14446          * In particular, the TG3_FLAG_IS_NIC flag must be
14447          * determined before calling tg3_set_power_state() so that
14448          * we know whether or not to switch out of Vaux power.
14449          * When the flag is set, it means that GPIO1 is used for eeprom
14450          * write protect and also implies that it is a LOM where GPIOs
14451          * are not used to switch power.
14452          */
14453         tg3_get_eeprom_hw_cfg(tp);
14454
14455         if (tp->fw_needed && tg3_flag(tp, ENABLE_ASF)) {
14456                 tg3_flag_clear(tp, TSO_CAPABLE);
14457                 tg3_flag_clear(tp, TSO_BUG);
14458                 tp->fw_needed = NULL;
14459         }
14460
14461         if (tg3_flag(tp, ENABLE_APE)) {
14462                 /* Allow reads and writes to the
14463                  * APE register and memory space.
14464                  */
14465                 pci_state_reg |= PCISTATE_ALLOW_APE_CTLSPC_WR |
14466                                  PCISTATE_ALLOW_APE_SHMEM_WR |
14467                                  PCISTATE_ALLOW_APE_PSPACE_WR;
14468                 pci_write_config_dword(tp->pdev, TG3PCI_PCISTATE,
14469                                        pci_state_reg);
14470
14471                 tg3_ape_lock_init(tp);
14472         }
14473
14474         /* Set up tp->grc_local_ctrl before calling
14475          * tg3_pwrsrc_switch_to_vmain().  GPIO1 driven high
14476          * will bring 5700's external PHY out of reset.
14477          * It is also used as eeprom write protect on LOMs.
14478          */
14479         tp->grc_local_ctrl = GRC_LCLCTRL_INT_ON_ATTN | GRC_LCLCTRL_AUTO_SEEPROM;
14480         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14481             tg3_flag(tp, EEPROM_WRITE_PROT))
14482                 tp->grc_local_ctrl |= (GRC_LCLCTRL_GPIO_OE1 |
14483                                        GRC_LCLCTRL_GPIO_OUTPUT1);
14484         /* Unused GPIO3 must be driven as output on 5752 because there
14485          * are no pull-up resistors on unused GPIO pins.
14486          */
14487         else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752)
14488                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE3;
14489
14490         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14491             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780 ||
14492             tg3_flag(tp, 57765_CLASS))
14493                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14494
14495         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
14496             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S) {
14497                 /* Turn off the debug UART. */
14498                 tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_UART_SEL;
14499                 if (tg3_flag(tp, IS_NIC))
14500                         /* Keep VMain power. */
14501                         tp->grc_local_ctrl |= GRC_LCLCTRL_GPIO_OE0 |
14502                                               GRC_LCLCTRL_GPIO_OUTPUT0;
14503         }
14504
14505         /* Switch out of Vaux if it is a NIC */
14506         tg3_pwrsrc_switch_to_vmain(tp);
14507
14508         /* Derive initial jumbo mode from MTU assigned in
14509          * ether_setup() via the alloc_etherdev() call
14510          */
14511         if (tp->dev->mtu > ETH_DATA_LEN && !tg3_flag(tp, 5780_CLASS))
14512                 tg3_flag_set(tp, JUMBO_RING_ENABLE);
14513
14514         /* Determine WakeOnLan speed to use. */
14515         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14516             tp->pci_chip_rev_id == CHIPREV_ID_5701_A0 ||
14517             tp->pci_chip_rev_id == CHIPREV_ID_5701_B0 ||
14518             tp->pci_chip_rev_id == CHIPREV_ID_5701_B2) {
14519                 tg3_flag_clear(tp, WOL_SPEED_100MB);
14520         } else {
14521                 tg3_flag_set(tp, WOL_SPEED_100MB);
14522         }
14523
14524         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14525                 tp->phy_flags |= TG3_PHYFLG_IS_FET;
14526
14527         /* A few boards don't want Ethernet@WireSpeed phy feature */
14528         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
14529             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14530              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A0) &&
14531              (tp->pci_chip_rev_id != CHIPREV_ID_5705_A1)) ||
14532             (tp->phy_flags & TG3_PHYFLG_IS_FET) ||
14533             (tp->phy_flags & TG3_PHYFLG_ANY_SERDES))
14534                 tp->phy_flags |= TG3_PHYFLG_NO_ETH_WIRE_SPEED;
14535
14536         if (GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5703_AX ||
14537             GET_CHIP_REV(tp->pci_chip_rev_id) == CHIPREV_5704_AX)
14538                 tp->phy_flags |= TG3_PHYFLG_ADC_BUG;
14539         if (tp->pci_chip_rev_id == CHIPREV_ID_5704_A0)
14540                 tp->phy_flags |= TG3_PHYFLG_5704_A0_BUG;
14541
14542         if (tg3_flag(tp, 5705_PLUS) &&
14543             !(tp->phy_flags & TG3_PHYFLG_IS_FET) &&
14544             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5785 &&
14545             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_57780 &&
14546             !tg3_flag(tp, 57765_PLUS)) {
14547                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755 ||
14548                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5787 ||
14549                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 ||
14550                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761) {
14551                         if (tp->pdev->device != PCI_DEVICE_ID_TIGON3_5756 &&
14552                             tp->pdev->device != PCI_DEVICE_ID_TIGON3_5722)
14553                                 tp->phy_flags |= TG3_PHYFLG_JITTER_BUG;
14554                         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5755M)
14555                                 tp->phy_flags |= TG3_PHYFLG_ADJUST_TRIM;
14556                 } else
14557                         tp->phy_flags |= TG3_PHYFLG_BER_BUG;
14558         }
14559
14560         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
14561             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) {
14562                 tp->phy_otp = tg3_read_otp_phycfg(tp);
14563                 if (tp->phy_otp == 0)
14564                         tp->phy_otp = TG3_OTP_DEFAULT;
14565         }
14566
14567         if (tg3_flag(tp, CPMU_PRESENT))
14568                 tp->mi_mode = MAC_MI_MODE_500KHZ_CONST;
14569         else
14570                 tp->mi_mode = MAC_MI_MODE_BASE;
14571
14572         tp->coalesce_mode = 0;
14573         if (GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_AX &&
14574             GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5700_BX)
14575                 tp->coalesce_mode |= HOSTCC_MODE_32BYTE;
14576
14577         /* Set these bits to enable statistics workaround. */
14578         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5717 ||
14579             tp->pci_chip_rev_id == CHIPREV_ID_5719_A0 ||
14580             tp->pci_chip_rev_id == CHIPREV_ID_5720_A0) {
14581                 tp->coalesce_mode |= HOSTCC_MODE_ATTN;
14582                 tp->grc_mode |= GRC_MODE_IRQ_ON_FLOW_ATTN;
14583         }
14584
14585         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
14586             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
14587                 tg3_flag_set(tp, USE_PHYLIB);
14588
14589         err = tg3_mdio_init(tp);
14590         if (err)
14591                 return err;
14592
14593         /* Initialize data/descriptor byte/word swapping. */
14594         val = tr32(GRC_MODE);
14595         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5720)
14596                 val &= (GRC_MODE_BYTE_SWAP_B2HRX_DATA |
14597                         GRC_MODE_WORD_SWAP_B2HRX_DATA |
14598                         GRC_MODE_B2HRX_ENABLE |
14599                         GRC_MODE_HTX2B_ENABLE |
14600                         GRC_MODE_HOST_STACKUP);
14601         else
14602                 val &= GRC_MODE_HOST_STACKUP;
14603
14604         tw32(GRC_MODE, val | tp->grc_mode);
14605
14606         tg3_switch_clocks(tp);
14607
14608         /* Clear this out for sanity. */
14609         tw32(TG3PCI_MEM_WIN_BASE_ADDR, 0);
14610
14611         pci_read_config_dword(tp->pdev, TG3PCI_PCISTATE,
14612                               &pci_state_reg);
14613         if ((pci_state_reg & PCISTATE_CONV_PCI_MODE) == 0 &&
14614             !tg3_flag(tp, PCIX_TARGET_HWBUG)) {
14615                 u32 chiprevid = GET_CHIP_REV_ID(tp->misc_host_ctrl);
14616
14617                 if (chiprevid == CHIPREV_ID_5701_A0 ||
14618                     chiprevid == CHIPREV_ID_5701_B0 ||
14619                     chiprevid == CHIPREV_ID_5701_B2 ||
14620                     chiprevid == CHIPREV_ID_5701_B5) {
14621                         void __iomem *sram_base;
14622
14623                         /* Write some dummy words into the SRAM status block
14624                          * area, see if it reads back correctly.  If the return
14625                          * value is bad, force enable the PCIX workaround.
14626                          */
14627                         sram_base = tp->regs + NIC_SRAM_WIN_BASE + NIC_SRAM_STATS_BLK;
14628
14629                         writel(0x00000000, sram_base);
14630                         writel(0x00000000, sram_base + 4);
14631                         writel(0xffffffff, sram_base + 4);
14632                         if (readl(sram_base) != 0x00000000)
14633                                 tg3_flag_set(tp, PCIX_TARGET_HWBUG);
14634                 }
14635         }
14636
14637         udelay(50);
14638         tg3_nvram_init(tp);
14639
14640         grc_misc_cfg = tr32(GRC_MISC_CFG);
14641         grc_misc_cfg &= GRC_MISC_CFG_BOARD_ID_MASK;
14642
14643         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14644             (grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788 ||
14645              grc_misc_cfg == GRC_MISC_CFG_BOARD_ID_5788M))
14646                 tg3_flag_set(tp, IS_5788);
14647
14648         if (!tg3_flag(tp, IS_5788) &&
14649             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700)
14650                 tg3_flag_set(tp, TAGGED_STATUS);
14651         if (tg3_flag(tp, TAGGED_STATUS)) {
14652                 tp->coalesce_mode |= (HOSTCC_MODE_CLRTICK_RXBD |
14653                                       HOSTCC_MODE_CLRTICK_TXBD);
14654
14655                 tp->misc_host_ctrl |= MISC_HOST_CTRL_TAGGED_STATUS;
14656                 pci_write_config_dword(tp->pdev, TG3PCI_MISC_HOST_CTRL,
14657                                        tp->misc_host_ctrl);
14658         }
14659
14660         /* Preserve the APE MAC_MODE bits */
14661         if (tg3_flag(tp, ENABLE_APE))
14662                 tp->mac_mode = MAC_MODE_APE_TX_EN | MAC_MODE_APE_RX_EN;
14663         else
14664                 tp->mac_mode = 0;
14665
14666         /* these are limited to 10/100 only */
14667         if ((GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 &&
14668              (grc_misc_cfg == 0x8000 || grc_misc_cfg == 0x4000)) ||
14669             (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 &&
14670              tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14671              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901 ||
14672               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5901_2 ||
14673               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5705F)) ||
14674             (tp->pdev->vendor == PCI_VENDOR_ID_BROADCOM &&
14675              (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5751F ||
14676               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5753F ||
14677               tp->pdev->device == PCI_DEVICE_ID_TIGON3_5787F)) ||
14678             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57790 ||
14679             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57791 ||
14680             tp->pdev->device == TG3PCI_DEVICE_TIGON3_57795 ||
14681             (tp->phy_flags & TG3_PHYFLG_IS_FET))
14682                 tp->phy_flags |= TG3_PHYFLG_10_100_ONLY;
14683
14684         err = tg3_phy_probe(tp);
14685         if (err) {
14686                 dev_err(&tp->pdev->dev, "phy probe failed, err %d\n", err);
14687                 /* ... but do not return immediately ... */
14688                 tg3_mdio_fini(tp);
14689         }
14690
14691         tg3_read_vpd(tp);
14692         tg3_read_fw_ver(tp);
14693
14694         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES) {
14695                 tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14696         } else {
14697                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14698                         tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14699                 else
14700                         tp->phy_flags &= ~TG3_PHYFLG_USE_MI_INTERRUPT;
14701         }
14702
14703         /* 5700 {AX,BX} chips have a broken status block link
14704          * change bit implementation, so we must use the
14705          * status register in those cases.
14706          */
14707         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700)
14708                 tg3_flag_set(tp, USE_LINKCHG_REG);
14709         else
14710                 tg3_flag_clear(tp, USE_LINKCHG_REG);
14711
14712         /* The led_ctrl is set during tg3_phy_probe, here we might
14713          * have to force the link status polling mechanism based
14714          * upon subsystem IDs.
14715          */
14716         if (tp->pdev->subsystem_vendor == PCI_VENDOR_ID_DELL &&
14717             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14718             !(tp->phy_flags & TG3_PHYFLG_PHY_SERDES)) {
14719                 tp->phy_flags |= TG3_PHYFLG_USE_MI_INTERRUPT;
14720                 tg3_flag_set(tp, USE_LINKCHG_REG);
14721         }
14722
14723         /* For all SERDES we poll the MAC status register. */
14724         if (tp->phy_flags & TG3_PHYFLG_PHY_SERDES)
14725                 tg3_flag_set(tp, POLL_SERDES);
14726         else
14727                 tg3_flag_clear(tp, POLL_SERDES);
14728
14729         tp->rx_offset = NET_SKB_PAD + NET_IP_ALIGN;
14730         tp->rx_copy_thresh = TG3_RX_COPY_THRESHOLD;
14731         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701 &&
14732             tg3_flag(tp, PCIX_MODE)) {
14733                 tp->rx_offset = NET_SKB_PAD;
14734 #ifndef CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS
14735                 tp->rx_copy_thresh = ~(u16)0;
14736 #endif
14737         }
14738
14739         tp->rx_std_ring_mask = TG3_RX_STD_RING_SIZE(tp) - 1;
14740         tp->rx_jmb_ring_mask = TG3_RX_JMB_RING_SIZE(tp) - 1;
14741         tp->rx_ret_ring_mask = tg3_rx_ret_ring_size(tp) - 1;
14742
14743         tp->rx_std_max_post = tp->rx_std_ring_mask + 1;
14744
14745         /* Increment the rx prod index on the rx std ring by at most
14746          * 8 for these chips to workaround hw errata.
14747          */
14748         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750 ||
14749             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5752 ||
14750             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5755)
14751                 tp->rx_std_max_post = 8;
14752
14753         if (tg3_flag(tp, ASPM_WORKAROUND))
14754                 tp->pwrmgmt_thresh = tr32(PCIE_PWR_MGMT_THRESH) &
14755                                      PCIE_PWR_MGMT_L1_THRESH_MSK;
14756
14757         return err;
14758 }
14759
14760 #ifdef CONFIG_SPARC
14761 static int __devinit tg3_get_macaddr_sparc(struct tg3 *tp)
14762 {
14763         struct net_device *dev = tp->dev;
14764         struct pci_dev *pdev = tp->pdev;
14765         struct device_node *dp = pci_device_to_OF_node(pdev);
14766         const unsigned char *addr;
14767         int len;
14768
14769         addr = of_get_property(dp, "local-mac-address", &len);
14770         if (addr && len == 6) {
14771                 memcpy(dev->dev_addr, addr, 6);
14772                 memcpy(dev->perm_addr, dev->dev_addr, 6);
14773                 return 0;
14774         }
14775         return -ENODEV;
14776 }
14777
14778 static int __devinit tg3_get_default_macaddr_sparc(struct tg3 *tp)
14779 {
14780         struct net_device *dev = tp->dev;
14781
14782         memcpy(dev->dev_addr, idprom->id_ethaddr, 6);
14783         memcpy(dev->perm_addr, idprom->id_ethaddr, 6);
14784         return 0;
14785 }
14786 #endif
14787
14788 static int __devinit tg3_get_device_address(struct tg3 *tp)
14789 {
14790         struct net_device *dev = tp->dev;
14791         u32 hi, lo, mac_offset;
14792         int addr_ok = 0;
14793
14794 #ifdef CONFIG_SPARC
14795         if (!tg3_get_macaddr_sparc(tp))
14796                 return 0;
14797 #endif
14798
14799         mac_offset = 0x7c;
14800         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704 ||
14801             tg3_flag(tp, 5780_CLASS)) {
14802                 if (tr32(TG3PCI_DUAL_MAC_CTRL) & DUAL_MAC_CTRL_ID)
14803                         mac_offset = 0xcc;
14804                 if (tg3_nvram_lock(tp))
14805                         tw32_f(NVRAM_CMD, NVRAM_CMD_RESET);
14806                 else
14807                         tg3_nvram_unlock(tp);
14808         } else if (tg3_flag(tp, 5717_PLUS)) {
14809                 if (tp->pci_fn & 1)
14810                         mac_offset = 0xcc;
14811                 if (tp->pci_fn > 1)
14812                         mac_offset += 0x18c;
14813         } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906)
14814                 mac_offset = 0x10;
14815
14816         /* First try to get it from MAC address mailbox. */
14817         tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_HIGH_MBOX, &hi);
14818         if ((hi >> 16) == 0x484b) {
14819                 dev->dev_addr[0] = (hi >>  8) & 0xff;
14820                 dev->dev_addr[1] = (hi >>  0) & 0xff;
14821
14822                 tg3_read_mem(tp, NIC_SRAM_MAC_ADDR_LOW_MBOX, &lo);
14823                 dev->dev_addr[2] = (lo >> 24) & 0xff;
14824                 dev->dev_addr[3] = (lo >> 16) & 0xff;
14825                 dev->dev_addr[4] = (lo >>  8) & 0xff;
14826                 dev->dev_addr[5] = (lo >>  0) & 0xff;
14827
14828                 /* Some old bootcode may report a 0 MAC address in SRAM */
14829                 addr_ok = is_valid_ether_addr(&dev->dev_addr[0]);
14830         }
14831         if (!addr_ok) {
14832                 /* Next, try NVRAM. */
14833                 if (!tg3_flag(tp, NO_NVRAM) &&
14834                     !tg3_nvram_read_be32(tp, mac_offset + 0, &hi) &&
14835                     !tg3_nvram_read_be32(tp, mac_offset + 4, &lo)) {
14836                         memcpy(&dev->dev_addr[0], ((char *)&hi) + 2, 2);
14837                         memcpy(&dev->dev_addr[2], (char *)&lo, sizeof(lo));
14838                 }
14839                 /* Finally just fetch it out of the MAC control regs. */
14840                 else {
14841                         hi = tr32(MAC_ADDR_0_HIGH);
14842                         lo = tr32(MAC_ADDR_0_LOW);
14843
14844                         dev->dev_addr[5] = lo & 0xff;
14845                         dev->dev_addr[4] = (lo >> 8) & 0xff;
14846                         dev->dev_addr[3] = (lo >> 16) & 0xff;
14847                         dev->dev_addr[2] = (lo >> 24) & 0xff;
14848                         dev->dev_addr[1] = hi & 0xff;
14849                         dev->dev_addr[0] = (hi >> 8) & 0xff;
14850                 }
14851         }
14852
14853         if (!is_valid_ether_addr(&dev->dev_addr[0])) {
14854 #ifdef CONFIG_SPARC
14855                 if (!tg3_get_default_macaddr_sparc(tp))
14856                         return 0;
14857 #endif
14858                 return -EINVAL;
14859         }
14860         memcpy(dev->perm_addr, dev->dev_addr, dev->addr_len);
14861         return 0;
14862 }
14863
14864 #define BOUNDARY_SINGLE_CACHELINE       1
14865 #define BOUNDARY_MULTI_CACHELINE        2
14866
14867 static u32 __devinit tg3_calc_dma_bndry(struct tg3 *tp, u32 val)
14868 {
14869         int cacheline_size;
14870         u8 byte;
14871         int goal;
14872
14873         pci_read_config_byte(tp->pdev, PCI_CACHE_LINE_SIZE, &byte);
14874         if (byte == 0)
14875                 cacheline_size = 1024;
14876         else
14877                 cacheline_size = (int) byte * 4;
14878
14879         /* On 5703 and later chips, the boundary bits have no
14880          * effect.
14881          */
14882         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
14883             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701 &&
14884             !tg3_flag(tp, PCI_EXPRESS))
14885                 goto out;
14886
14887 #if defined(CONFIG_PPC64) || defined(CONFIG_IA64) || defined(CONFIG_PARISC)
14888         goal = BOUNDARY_MULTI_CACHELINE;
14889 #else
14890 #if defined(CONFIG_SPARC64) || defined(CONFIG_ALPHA)
14891         goal = BOUNDARY_SINGLE_CACHELINE;
14892 #else
14893         goal = 0;
14894 #endif
14895 #endif
14896
14897         if (tg3_flag(tp, 57765_PLUS)) {
14898                 val = goal ? 0 : DMA_RWCTRL_DIS_CACHE_ALIGNMENT;
14899                 goto out;
14900         }
14901
14902         if (!goal)
14903                 goto out;
14904
14905         /* PCI controllers on most RISC systems tend to disconnect
14906          * when a device tries to burst across a cache-line boundary.
14907          * Therefore, letting tg3 do so just wastes PCI bandwidth.
14908          *
14909          * Unfortunately, for PCI-E there are only limited
14910          * write-side controls for this, and thus for reads
14911          * we will still get the disconnects.  We'll also waste
14912          * these PCI cycles for both read and write for chips
14913          * other than 5700 and 5701 which do not implement the
14914          * boundary bits.
14915          */
14916         if (tg3_flag(tp, PCIX_MODE) && !tg3_flag(tp, PCI_EXPRESS)) {
14917                 switch (cacheline_size) {
14918                 case 16:
14919                 case 32:
14920                 case 64:
14921                 case 128:
14922                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14923                                 val |= (DMA_RWCTRL_READ_BNDRY_128_PCIX |
14924                                         DMA_RWCTRL_WRITE_BNDRY_128_PCIX);
14925                         } else {
14926                                 val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14927                                         DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14928                         }
14929                         break;
14930
14931                 case 256:
14932                         val |= (DMA_RWCTRL_READ_BNDRY_256_PCIX |
14933                                 DMA_RWCTRL_WRITE_BNDRY_256_PCIX);
14934                         break;
14935
14936                 default:
14937                         val |= (DMA_RWCTRL_READ_BNDRY_384_PCIX |
14938                                 DMA_RWCTRL_WRITE_BNDRY_384_PCIX);
14939                         break;
14940                 }
14941         } else if (tg3_flag(tp, PCI_EXPRESS)) {
14942                 switch (cacheline_size) {
14943                 case 16:
14944                 case 32:
14945                 case 64:
14946                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14947                                 val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14948                                 val |= DMA_RWCTRL_WRITE_BNDRY_64_PCIE;
14949                                 break;
14950                         }
14951                         /* fallthrough */
14952                 case 128:
14953                 default:
14954                         val &= ~DMA_RWCTRL_WRITE_BNDRY_DISAB_PCIE;
14955                         val |= DMA_RWCTRL_WRITE_BNDRY_128_PCIE;
14956                         break;
14957                 }
14958         } else {
14959                 switch (cacheline_size) {
14960                 case 16:
14961                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14962                                 val |= (DMA_RWCTRL_READ_BNDRY_16 |
14963                                         DMA_RWCTRL_WRITE_BNDRY_16);
14964                                 break;
14965                         }
14966                         /* fallthrough */
14967                 case 32:
14968                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14969                                 val |= (DMA_RWCTRL_READ_BNDRY_32 |
14970                                         DMA_RWCTRL_WRITE_BNDRY_32);
14971                                 break;
14972                         }
14973                         /* fallthrough */
14974                 case 64:
14975                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14976                                 val |= (DMA_RWCTRL_READ_BNDRY_64 |
14977                                         DMA_RWCTRL_WRITE_BNDRY_64);
14978                                 break;
14979                         }
14980                         /* fallthrough */
14981                 case 128:
14982                         if (goal == BOUNDARY_SINGLE_CACHELINE) {
14983                                 val |= (DMA_RWCTRL_READ_BNDRY_128 |
14984                                         DMA_RWCTRL_WRITE_BNDRY_128);
14985                                 break;
14986                         }
14987                         /* fallthrough */
14988                 case 256:
14989                         val |= (DMA_RWCTRL_READ_BNDRY_256 |
14990                                 DMA_RWCTRL_WRITE_BNDRY_256);
14991                         break;
14992                 case 512:
14993                         val |= (DMA_RWCTRL_READ_BNDRY_512 |
14994                                 DMA_RWCTRL_WRITE_BNDRY_512);
14995                         break;
14996                 case 1024:
14997                 default:
14998                         val |= (DMA_RWCTRL_READ_BNDRY_1024 |
14999                                 DMA_RWCTRL_WRITE_BNDRY_1024);
15000                         break;
15001                 }
15002         }
15003
15004 out:
15005         return val;
15006 }
15007
15008 static int __devinit tg3_do_test_dma(struct tg3 *tp, u32 *buf, dma_addr_t buf_dma, int size, int to_device)
15009 {
15010         struct tg3_internal_buffer_desc test_desc;
15011         u32 sram_dma_descs;
15012         int i, ret;
15013
15014         sram_dma_descs = NIC_SRAM_DMA_DESC_POOL_BASE;
15015
15016         tw32(FTQ_RCVBD_COMP_FIFO_ENQDEQ, 0);
15017         tw32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ, 0);
15018         tw32(RDMAC_STATUS, 0);
15019         tw32(WDMAC_STATUS, 0);
15020
15021         tw32(BUFMGR_MODE, 0);
15022         tw32(FTQ_RESET, 0);
15023
15024         test_desc.addr_hi = ((u64) buf_dma) >> 32;
15025         test_desc.addr_lo = buf_dma & 0xffffffff;
15026         test_desc.nic_mbuf = 0x00002100;
15027         test_desc.len = size;
15028
15029         /*
15030          * HP ZX1 was seeing test failures for 5701 cards running at 33Mhz
15031          * the *second* time the tg3 driver was getting loaded after an
15032          * initial scan.
15033          *
15034          * Broadcom tells me:
15035          *   ...the DMA engine is connected to the GRC block and a DMA
15036          *   reset may affect the GRC block in some unpredictable way...
15037          *   The behavior of resets to individual blocks has not been tested.
15038          *
15039          * Broadcom noted the GRC reset will also reset all sub-components.
15040          */
15041         if (to_device) {
15042                 test_desc.cqid_sqid = (13 << 8) | 2;
15043
15044                 tw32_f(RDMAC_MODE, RDMAC_MODE_ENABLE);
15045                 udelay(40);
15046         } else {
15047                 test_desc.cqid_sqid = (16 << 8) | 7;
15048
15049                 tw32_f(WDMAC_MODE, WDMAC_MODE_ENABLE);
15050                 udelay(40);
15051         }
15052         test_desc.flags = 0x00000005;
15053
15054         for (i = 0; i < (sizeof(test_desc) / sizeof(u32)); i++) {
15055                 u32 val;
15056
15057                 val = *(((u32 *)&test_desc) + i);
15058                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR,
15059                                        sram_dma_descs + (i * sizeof(u32)));
15060                 pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_DATA, val);
15061         }
15062         pci_write_config_dword(tp->pdev, TG3PCI_MEM_WIN_BASE_ADDR, 0);
15063
15064         if (to_device)
15065                 tw32(FTQ_DMA_HIGH_READ_FIFO_ENQDEQ, sram_dma_descs);
15066         else
15067                 tw32(FTQ_DMA_HIGH_WRITE_FIFO_ENQDEQ, sram_dma_descs);
15068
15069         ret = -ENODEV;
15070         for (i = 0; i < 40; i++) {
15071                 u32 val;
15072
15073                 if (to_device)
15074                         val = tr32(FTQ_RCVBD_COMP_FIFO_ENQDEQ);
15075                 else
15076                         val = tr32(FTQ_RCVDATA_COMP_FIFO_ENQDEQ);
15077                 if ((val & 0xffff) == sram_dma_descs) {
15078                         ret = 0;
15079                         break;
15080                 }
15081
15082                 udelay(100);
15083         }
15084
15085         return ret;
15086 }
15087
15088 #define TEST_BUFFER_SIZE        0x2000
15089
15090 static DEFINE_PCI_DEVICE_TABLE(tg3_dma_wait_state_chipsets) = {
15091         { PCI_DEVICE(PCI_VENDOR_ID_APPLE, PCI_DEVICE_ID_APPLE_UNI_N_PCI15) },
15092         { },
15093 };
15094
15095 static int __devinit tg3_test_dma(struct tg3 *tp)
15096 {
15097         dma_addr_t buf_dma;
15098         u32 *buf, saved_dma_rwctrl;
15099         int ret = 0;
15100
15101         buf = dma_alloc_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE,
15102                                  &buf_dma, GFP_KERNEL);
15103         if (!buf) {
15104                 ret = -ENOMEM;
15105                 goto out_nofree;
15106         }
15107
15108         tp->dma_rwctrl = ((0x7 << DMA_RWCTRL_PCI_WRITE_CMD_SHIFT) |
15109                           (0x6 << DMA_RWCTRL_PCI_READ_CMD_SHIFT));
15110
15111         tp->dma_rwctrl = tg3_calc_dma_bndry(tp, tp->dma_rwctrl);
15112
15113         if (tg3_flag(tp, 57765_PLUS))
15114                 goto out;
15115
15116         if (tg3_flag(tp, PCI_EXPRESS)) {
15117                 /* DMA read watermark not used on PCIE */
15118                 tp->dma_rwctrl |= 0x00180000;
15119         } else if (!tg3_flag(tp, PCIX_MODE)) {
15120                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5705 ||
15121                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5750)
15122                         tp->dma_rwctrl |= 0x003f0000;
15123                 else
15124                         tp->dma_rwctrl |= 0x003f000f;
15125         } else {
15126                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15127                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704) {
15128                         u32 ccval = (tr32(TG3PCI_CLOCK_CTRL) & 0x1f);
15129                         u32 read_water = 0x7;
15130
15131                         /* If the 5704 is behind the EPB bridge, we can
15132                          * do the less restrictive ONE_DMA workaround for
15133                          * better performance.
15134                          */
15135                         if (tg3_flag(tp, 40BIT_DMA_BUG) &&
15136                             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15137                                 tp->dma_rwctrl |= 0x8000;
15138                         else if (ccval == 0x6 || ccval == 0x7)
15139                                 tp->dma_rwctrl |= DMA_RWCTRL_ONE_DMA;
15140
15141                         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703)
15142                                 read_water = 4;
15143                         /* Set bit 23 to enable PCIX hw bug fix */
15144                         tp->dma_rwctrl |=
15145                                 (read_water << DMA_RWCTRL_READ_WATER_SHIFT) |
15146                                 (0x3 << DMA_RWCTRL_WRITE_WATER_SHIFT) |
15147                                 (1 << 23);
15148                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5780) {
15149                         /* 5780 always in PCIX mode */
15150                         tp->dma_rwctrl |= 0x00144000;
15151                 } else if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5714) {
15152                         /* 5714 always in PCIX mode */
15153                         tp->dma_rwctrl |= 0x00148000;
15154                 } else {
15155                         tp->dma_rwctrl |= 0x001b000f;
15156                 }
15157         }
15158
15159         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5703 ||
15160             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5704)
15161                 tp->dma_rwctrl &= 0xfffffff0;
15162
15163         if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5700 ||
15164             GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5701) {
15165                 /* Remove this if it causes problems for some boards. */
15166                 tp->dma_rwctrl |= DMA_RWCTRL_USE_MEM_READ_MULT;
15167
15168                 /* On 5700/5701 chips, we need to set this bit.
15169                  * Otherwise the chip will issue cacheline transactions
15170                  * to streamable DMA memory with not all the byte
15171                  * enables turned on.  This is an error on several
15172                  * RISC PCI controllers, in particular sparc64.
15173                  *
15174                  * On 5703/5704 chips, this bit has been reassigned
15175                  * a different meaning.  In particular, it is used
15176                  * on those chips to enable a PCI-X workaround.
15177                  */
15178                 tp->dma_rwctrl |= DMA_RWCTRL_ASSERT_ALL_BE;
15179         }
15180
15181         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15182
15183 #if 0
15184         /* Unneeded, already done by tg3_get_invariants.  */
15185         tg3_switch_clocks(tp);
15186 #endif
15187
15188         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5700 &&
15189             GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5701)
15190                 goto out;
15191
15192         /* It is best to perform DMA test with maximum write burst size
15193          * to expose the 5700/5701 write DMA bug.
15194          */
15195         saved_dma_rwctrl = tp->dma_rwctrl;
15196         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15197         tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15198
15199         while (1) {
15200                 u32 *p = buf, i;
15201
15202                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++)
15203                         p[i] = i;
15204
15205                 /* Send the buffer to the chip. */
15206                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 1);
15207                 if (ret) {
15208                         dev_err(&tp->pdev->dev,
15209                                 "%s: Buffer write failed. err = %d\n",
15210                                 __func__, ret);
15211                         break;
15212                 }
15213
15214 #if 0
15215                 /* validate data reached card RAM correctly. */
15216                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15217                         u32 val;
15218                         tg3_read_mem(tp, 0x2100 + (i*4), &val);
15219                         if (le32_to_cpu(val) != p[i]) {
15220                                 dev_err(&tp->pdev->dev,
15221                                         "%s: Buffer corrupted on device! "
15222                                         "(%d != %d)\n", __func__, val, i);
15223                                 /* ret = -ENODEV here? */
15224                         }
15225                         p[i] = 0;
15226                 }
15227 #endif
15228                 /* Now read it back. */
15229                 ret = tg3_do_test_dma(tp, buf, buf_dma, TEST_BUFFER_SIZE, 0);
15230                 if (ret) {
15231                         dev_err(&tp->pdev->dev, "%s: Buffer read failed. "
15232                                 "err = %d\n", __func__, ret);
15233                         break;
15234                 }
15235
15236                 /* Verify it. */
15237                 for (i = 0; i < TEST_BUFFER_SIZE / sizeof(u32); i++) {
15238                         if (p[i] == i)
15239                                 continue;
15240
15241                         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15242                             DMA_RWCTRL_WRITE_BNDRY_16) {
15243                                 tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15244                                 tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15245                                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15246                                 break;
15247                         } else {
15248                                 dev_err(&tp->pdev->dev,
15249                                         "%s: Buffer corrupted on read back! "
15250                                         "(%d != %d)\n", __func__, p[i], i);
15251                                 ret = -ENODEV;
15252                                 goto out;
15253                         }
15254                 }
15255
15256                 if (i == (TEST_BUFFER_SIZE / sizeof(u32))) {
15257                         /* Success. */
15258                         ret = 0;
15259                         break;
15260                 }
15261         }
15262         if ((tp->dma_rwctrl & DMA_RWCTRL_WRITE_BNDRY_MASK) !=
15263             DMA_RWCTRL_WRITE_BNDRY_16) {
15264                 /* DMA test passed without adjusting DMA boundary,
15265                  * now look for chipsets that are known to expose the
15266                  * DMA bug without failing the test.
15267                  */
15268                 if (pci_dev_present(tg3_dma_wait_state_chipsets)) {
15269                         tp->dma_rwctrl &= ~DMA_RWCTRL_WRITE_BNDRY_MASK;
15270                         tp->dma_rwctrl |= DMA_RWCTRL_WRITE_BNDRY_16;
15271                 } else {
15272                         /* Safe to use the calculated DMA boundary. */
15273                         tp->dma_rwctrl = saved_dma_rwctrl;
15274                 }
15275
15276                 tw32(TG3PCI_DMA_RW_CTRL, tp->dma_rwctrl);
15277         }
15278
15279 out:
15280         dma_free_coherent(&tp->pdev->dev, TEST_BUFFER_SIZE, buf, buf_dma);
15281 out_nofree:
15282         return ret;
15283 }
15284
15285 static void __devinit tg3_init_bufmgr_config(struct tg3 *tp)
15286 {
15287         if (tg3_flag(tp, 57765_PLUS)) {
15288                 tp->bufmgr_config.mbuf_read_dma_low_water =
15289                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15290                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15291                         DEFAULT_MB_MACRX_LOW_WATER_57765;
15292                 tp->bufmgr_config.mbuf_high_water =
15293                         DEFAULT_MB_HIGH_WATER_57765;
15294
15295                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15296                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15297                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15298                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_57765;
15299                 tp->bufmgr_config.mbuf_high_water_jumbo =
15300                         DEFAULT_MB_HIGH_WATER_JUMBO_57765;
15301         } else if (tg3_flag(tp, 5705_PLUS)) {
15302                 tp->bufmgr_config.mbuf_read_dma_low_water =
15303                         DEFAULT_MB_RDMA_LOW_WATER_5705;
15304                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15305                         DEFAULT_MB_MACRX_LOW_WATER_5705;
15306                 tp->bufmgr_config.mbuf_high_water =
15307                         DEFAULT_MB_HIGH_WATER_5705;
15308                 if (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5906) {
15309                         tp->bufmgr_config.mbuf_mac_rx_low_water =
15310                                 DEFAULT_MB_MACRX_LOW_WATER_5906;
15311                         tp->bufmgr_config.mbuf_high_water =
15312                                 DEFAULT_MB_HIGH_WATER_5906;
15313                 }
15314
15315                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15316                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO_5780;
15317                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15318                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO_5780;
15319                 tp->bufmgr_config.mbuf_high_water_jumbo =
15320                         DEFAULT_MB_HIGH_WATER_JUMBO_5780;
15321         } else {
15322                 tp->bufmgr_config.mbuf_read_dma_low_water =
15323                         DEFAULT_MB_RDMA_LOW_WATER;
15324                 tp->bufmgr_config.mbuf_mac_rx_low_water =
15325                         DEFAULT_MB_MACRX_LOW_WATER;
15326                 tp->bufmgr_config.mbuf_high_water =
15327                         DEFAULT_MB_HIGH_WATER;
15328
15329                 tp->bufmgr_config.mbuf_read_dma_low_water_jumbo =
15330                         DEFAULT_MB_RDMA_LOW_WATER_JUMBO;
15331                 tp->bufmgr_config.mbuf_mac_rx_low_water_jumbo =
15332                         DEFAULT_MB_MACRX_LOW_WATER_JUMBO;
15333                 tp->bufmgr_config.mbuf_high_water_jumbo =
15334                         DEFAULT_MB_HIGH_WATER_JUMBO;
15335         }
15336
15337         tp->bufmgr_config.dma_low_water = DEFAULT_DMA_LOW_WATER;
15338         tp->bufmgr_config.dma_high_water = DEFAULT_DMA_HIGH_WATER;
15339 }
15340
15341 static char * __devinit tg3_phy_string(struct tg3 *tp)
15342 {
15343         switch (tp->phy_id & TG3_PHY_ID_MASK) {
15344         case TG3_PHY_ID_BCM5400:        return "5400";
15345         case TG3_PHY_ID_BCM5401:        return "5401";
15346         case TG3_PHY_ID_BCM5411:        return "5411";
15347         case TG3_PHY_ID_BCM5701:        return "5701";
15348         case TG3_PHY_ID_BCM5703:        return "5703";
15349         case TG3_PHY_ID_BCM5704:        return "5704";
15350         case TG3_PHY_ID_BCM5705:        return "5705";
15351         case TG3_PHY_ID_BCM5750:        return "5750";
15352         case TG3_PHY_ID_BCM5752:        return "5752";
15353         case TG3_PHY_ID_BCM5714:        return "5714";
15354         case TG3_PHY_ID_BCM5780:        return "5780";
15355         case TG3_PHY_ID_BCM5755:        return "5755";
15356         case TG3_PHY_ID_BCM5787:        return "5787";
15357         case TG3_PHY_ID_BCM5784:        return "5784";
15358         case TG3_PHY_ID_BCM5756:        return "5722/5756";
15359         case TG3_PHY_ID_BCM5906:        return "5906";
15360         case TG3_PHY_ID_BCM5761:        return "5761";
15361         case TG3_PHY_ID_BCM5718C:       return "5718C";
15362         case TG3_PHY_ID_BCM5718S:       return "5718S";
15363         case TG3_PHY_ID_BCM57765:       return "57765";
15364         case TG3_PHY_ID_BCM5719C:       return "5719C";
15365         case TG3_PHY_ID_BCM5720C:       return "5720C";
15366         case TG3_PHY_ID_BCM8002:        return "8002/serdes";
15367         case 0:                 return "serdes";
15368         default:                return "unknown";
15369         }
15370 }
15371
15372 static char * __devinit tg3_bus_string(struct tg3 *tp, char *str)
15373 {
15374         if (tg3_flag(tp, PCI_EXPRESS)) {
15375                 strcpy(str, "PCI Express");
15376                 return str;
15377         } else if (tg3_flag(tp, PCIX_MODE)) {
15378                 u32 clock_ctrl = tr32(TG3PCI_CLOCK_CTRL) & 0x1f;
15379
15380                 strcpy(str, "PCIX:");
15381
15382                 if ((clock_ctrl == 7) ||
15383                     ((tr32(GRC_MISC_CFG) & GRC_MISC_CFG_BOARD_ID_MASK) ==
15384                      GRC_MISC_CFG_BOARD_ID_5704CIOBE))
15385                         strcat(str, "133MHz");
15386                 else if (clock_ctrl == 0)
15387                         strcat(str, "33MHz");
15388                 else if (clock_ctrl == 2)
15389                         strcat(str, "50MHz");
15390                 else if (clock_ctrl == 4)
15391                         strcat(str, "66MHz");
15392                 else if (clock_ctrl == 6)
15393                         strcat(str, "100MHz");
15394         } else {
15395                 strcpy(str, "PCI:");
15396                 if (tg3_flag(tp, PCI_HIGH_SPEED))
15397                         strcat(str, "66MHz");
15398                 else
15399                         strcat(str, "33MHz");
15400         }
15401         if (tg3_flag(tp, PCI_32BIT))
15402                 strcat(str, ":32-bit");
15403         else
15404                 strcat(str, ":64-bit");
15405         return str;
15406 }
15407
15408 static void __devinit tg3_init_coal(struct tg3 *tp)
15409 {
15410         struct ethtool_coalesce *ec = &tp->coal;
15411
15412         memset(ec, 0, sizeof(*ec));
15413         ec->cmd = ETHTOOL_GCOALESCE;
15414         ec->rx_coalesce_usecs = LOW_RXCOL_TICKS;
15415         ec->tx_coalesce_usecs = LOW_TXCOL_TICKS;
15416         ec->rx_max_coalesced_frames = LOW_RXMAX_FRAMES;
15417         ec->tx_max_coalesced_frames = LOW_TXMAX_FRAMES;
15418         ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT;
15419         ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT;
15420         ec->rx_max_coalesced_frames_irq = DEFAULT_RXCOAL_MAXF_INT;
15421         ec->tx_max_coalesced_frames_irq = DEFAULT_TXCOAL_MAXF_INT;
15422         ec->stats_block_coalesce_usecs = DEFAULT_STAT_COAL_TICKS;
15423
15424         if (tp->coalesce_mode & (HOSTCC_MODE_CLRTICK_RXBD |
15425                                  HOSTCC_MODE_CLRTICK_TXBD)) {
15426                 ec->rx_coalesce_usecs = LOW_RXCOL_TICKS_CLRTCKS;
15427                 ec->rx_coalesce_usecs_irq = DEFAULT_RXCOAL_TICK_INT_CLRTCKS;
15428                 ec->tx_coalesce_usecs = LOW_TXCOL_TICKS_CLRTCKS;
15429                 ec->tx_coalesce_usecs_irq = DEFAULT_TXCOAL_TICK_INT_CLRTCKS;
15430         }
15431
15432         if (tg3_flag(tp, 5705_PLUS)) {
15433                 ec->rx_coalesce_usecs_irq = 0;
15434                 ec->tx_coalesce_usecs_irq = 0;
15435                 ec->stats_block_coalesce_usecs = 0;
15436         }
15437 }
15438
15439 static int __devinit tg3_init_one(struct pci_dev *pdev,
15440                                   const struct pci_device_id *ent)
15441 {
15442         struct net_device *dev;
15443         struct tg3 *tp;
15444         int i, err, pm_cap;
15445         u32 sndmbx, rcvmbx, intmbx;
15446         char str[40];
15447         u64 dma_mask, persist_dma_mask;
15448         netdev_features_t features = 0;
15449
15450         printk_once(KERN_INFO "%s\n", version);
15451
15452         err = pci_enable_device(pdev);
15453         if (err) {
15454                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting\n");
15455                 return err;
15456         }
15457
15458         err = pci_request_regions(pdev, DRV_MODULE_NAME);
15459         if (err) {
15460                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting\n");
15461                 goto err_out_disable_pdev;
15462         }
15463
15464         pci_set_master(pdev);
15465
15466         /* Find power-management capability. */
15467         pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
15468         if (pm_cap == 0) {
15469                 dev_err(&pdev->dev,
15470                         "Cannot find Power Management capability, aborting\n");
15471                 err = -EIO;
15472                 goto err_out_free_res;
15473         }
15474
15475         err = pci_set_power_state(pdev, PCI_D0);
15476         if (err) {
15477                 dev_err(&pdev->dev, "Transition to D0 failed, aborting\n");
15478                 goto err_out_free_res;
15479         }
15480
15481         dev = alloc_etherdev_mq(sizeof(*tp), TG3_IRQ_MAX_VECS);
15482         if (!dev) {
15483                 err = -ENOMEM;
15484                 goto err_out_power_down;
15485         }
15486
15487         SET_NETDEV_DEV(dev, &pdev->dev);
15488
15489         tp = netdev_priv(dev);
15490         tp->pdev = pdev;
15491         tp->dev = dev;
15492         tp->pm_cap = pm_cap;
15493         tp->rx_mode = TG3_DEF_RX_MODE;
15494         tp->tx_mode = TG3_DEF_TX_MODE;
15495
15496         if (tg3_debug > 0)
15497                 tp->msg_enable = tg3_debug;
15498         else
15499                 tp->msg_enable = TG3_DEF_MSG_ENABLE;
15500
15501         /* The word/byte swap controls here control register access byte
15502          * swapping.  DMA data byte swapping is controlled in the GRC_MODE
15503          * setting below.
15504          */
15505         tp->misc_host_ctrl =
15506                 MISC_HOST_CTRL_MASK_PCI_INT |
15507                 MISC_HOST_CTRL_WORD_SWAP |
15508                 MISC_HOST_CTRL_INDIR_ACCESS |
15509                 MISC_HOST_CTRL_PCISTATE_RW;
15510
15511         /* The NONFRM (non-frame) byte/word swap controls take effect
15512          * on descriptor entries, anything which isn't packet data.
15513          *
15514          * The StrongARM chips on the board (one for tx, one for rx)
15515          * are running in big-endian mode.
15516          */
15517         tp->grc_mode = (GRC_MODE_WSWAP_DATA | GRC_MODE_BSWAP_DATA |
15518                         GRC_MODE_WSWAP_NONFRM_DATA);
15519 #ifdef __BIG_ENDIAN
15520         tp->grc_mode |= GRC_MODE_BSWAP_NONFRM_DATA;
15521 #endif
15522         spin_lock_init(&tp->lock);
15523         spin_lock_init(&tp->indirect_lock);
15524         INIT_WORK(&tp->reset_task, tg3_reset_task);
15525
15526         tp->regs = pci_ioremap_bar(pdev, BAR_0);
15527         if (!tp->regs) {
15528                 dev_err(&pdev->dev, "Cannot map device registers, aborting\n");
15529                 err = -ENOMEM;
15530                 goto err_out_free_dev;
15531         }
15532
15533         if (tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761 ||
15534             tp->pdev->device == PCI_DEVICE_ID_TIGON3_5761E ||
15535             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761S ||
15536             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5761SE ||
15537             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5717 ||
15538             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5718 ||
15539             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5719 ||
15540             tp->pdev->device == TG3PCI_DEVICE_TIGON3_5720) {
15541                 tg3_flag_set(tp, ENABLE_APE);
15542                 tp->aperegs = pci_ioremap_bar(pdev, BAR_2);
15543                 if (!tp->aperegs) {
15544                         dev_err(&pdev->dev,
15545                                 "Cannot map APE registers, aborting\n");
15546                         err = -ENOMEM;
15547                         goto err_out_iounmap;
15548                 }
15549         }
15550
15551         tp->rx_pending = TG3_DEF_RX_RING_PENDING;
15552         tp->rx_jumbo_pending = TG3_DEF_RX_JUMBO_RING_PENDING;
15553
15554         dev->ethtool_ops = &tg3_ethtool_ops;
15555         dev->watchdog_timeo = TG3_TX_TIMEOUT;
15556         dev->netdev_ops = &tg3_netdev_ops;
15557         dev->irq = pdev->irq;
15558
15559         err = tg3_get_invariants(tp);
15560         if (err) {
15561                 dev_err(&pdev->dev,
15562                         "Problem fetching invariants of chip, aborting\n");
15563                 goto err_out_apeunmap;
15564         }
15565
15566         /* The EPB bridge inside 5714, 5715, and 5780 and any
15567          * device behind the EPB cannot support DMA addresses > 40-bit.
15568          * On 64-bit systems with IOMMU, use 40-bit dma_mask.
15569          * On 64-bit systems without IOMMU, use 64-bit dma_mask and
15570          * do DMA address check in tg3_start_xmit().
15571          */
15572         if (tg3_flag(tp, IS_5788))
15573                 persist_dma_mask = dma_mask = DMA_BIT_MASK(32);
15574         else if (tg3_flag(tp, 40BIT_DMA_BUG)) {
15575                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
15576 #ifdef CONFIG_HIGHMEM
15577                 dma_mask = DMA_BIT_MASK(64);
15578 #endif
15579         } else
15580                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
15581
15582         /* Configure DMA attributes. */
15583         if (dma_mask > DMA_BIT_MASK(32)) {
15584                 err = pci_set_dma_mask(pdev, dma_mask);
15585                 if (!err) {
15586                         features |= NETIF_F_HIGHDMA;
15587                         err = pci_set_consistent_dma_mask(pdev,
15588                                                           persist_dma_mask);
15589                         if (err < 0) {
15590                                 dev_err(&pdev->dev, "Unable to obtain 64 bit "
15591                                         "DMA for consistent allocations\n");
15592                                 goto err_out_apeunmap;
15593                         }
15594                 }
15595         }
15596         if (err || dma_mask == DMA_BIT_MASK(32)) {
15597                 err = pci_set_dma_mask(pdev, DMA_BIT_MASK(32));
15598                 if (err) {
15599                         dev_err(&pdev->dev,
15600                                 "No usable DMA configuration, aborting\n");
15601                         goto err_out_apeunmap;
15602                 }
15603         }
15604
15605         tg3_init_bufmgr_config(tp);
15606
15607         features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
15608
15609         /* 5700 B0 chips do not support checksumming correctly due
15610          * to hardware bugs.
15611          */
15612         if (tp->pci_chip_rev_id != CHIPREV_ID_5700_B0) {
15613                 features |= NETIF_F_SG | NETIF_F_IP_CSUM | NETIF_F_RXCSUM;
15614
15615                 if (tg3_flag(tp, 5755_PLUS))
15616                         features |= NETIF_F_IPV6_CSUM;
15617         }
15618
15619         /* TSO is on by default on chips that support hardware TSO.
15620          * Firmware TSO on older chips gives lower performance, so it
15621          * is off by default, but can be enabled using ethtool.
15622          */
15623         if ((tg3_flag(tp, HW_TSO_1) ||
15624              tg3_flag(tp, HW_TSO_2) ||
15625              tg3_flag(tp, HW_TSO_3)) &&
15626             (features & NETIF_F_IP_CSUM))
15627                 features |= NETIF_F_TSO;
15628         if (tg3_flag(tp, HW_TSO_2) || tg3_flag(tp, HW_TSO_3)) {
15629                 if (features & NETIF_F_IPV6_CSUM)
15630                         features |= NETIF_F_TSO6;
15631                 if (tg3_flag(tp, HW_TSO_3) ||
15632                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5761 ||
15633                     (GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5784 &&
15634                      GET_CHIP_REV(tp->pci_chip_rev_id) != CHIPREV_5784_AX) ||
15635                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_5785 ||
15636                     GET_ASIC_REV(tp->pci_chip_rev_id) == ASIC_REV_57780)
15637                         features |= NETIF_F_TSO_ECN;
15638         }
15639
15640         dev->features |= features;
15641         dev->vlan_features |= features;
15642
15643         /*
15644          * Add loopback capability only for a subset of devices that support
15645          * MAC-LOOPBACK. Eventually this need to be enhanced to allow INT-PHY
15646          * loopback for the remaining devices.
15647          */
15648         if (GET_ASIC_REV(tp->pci_chip_rev_id) != ASIC_REV_5780 &&
15649             !tg3_flag(tp, CPMU_PRESENT))
15650                 /* Add the loopback capability */
15651                 features |= NETIF_F_LOOPBACK;
15652
15653         dev->hw_features |= features;
15654
15655         if (tp->pci_chip_rev_id == CHIPREV_ID_5705_A1 &&
15656             !tg3_flag(tp, TSO_CAPABLE) &&
15657             !(tr32(TG3PCI_PCISTATE) & PCISTATE_BUS_SPEED_HIGH)) {
15658                 tg3_flag_set(tp, MAX_RXPEND_64);
15659                 tp->rx_pending = 63;
15660         }
15661
15662         err = tg3_get_device_address(tp);
15663         if (err) {
15664                 dev_err(&pdev->dev,
15665                         "Could not obtain valid ethernet address, aborting\n");
15666                 goto err_out_apeunmap;
15667         }
15668
15669         /*
15670          * Reset chip in case UNDI or EFI driver did not shutdown
15671          * DMA self test will enable WDMAC and we'll see (spurious)
15672          * pending DMA on the PCI bus at that point.
15673          */
15674         if ((tr32(HOSTCC_MODE) & HOSTCC_MODE_ENABLE) ||
15675             (tr32(WDMAC_MODE) & WDMAC_MODE_ENABLE)) {
15676                 tw32(MEMARB_MODE, MEMARB_MODE_ENABLE);
15677                 tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15678         }
15679
15680         err = tg3_test_dma(tp);
15681         if (err) {
15682                 dev_err(&pdev->dev, "DMA engine test failed, aborting\n");
15683                 goto err_out_apeunmap;
15684         }
15685
15686         intmbx = MAILBOX_INTERRUPT_0 + TG3_64BIT_REG_LOW;
15687         rcvmbx = MAILBOX_RCVRET_CON_IDX_0 + TG3_64BIT_REG_LOW;
15688         sndmbx = MAILBOX_SNDHOST_PROD_IDX_0 + TG3_64BIT_REG_LOW;
15689         for (i = 0; i < tp->irq_max; i++) {
15690                 struct tg3_napi *tnapi = &tp->napi[i];
15691
15692                 tnapi->tp = tp;
15693                 tnapi->tx_pending = TG3_DEF_TX_RING_PENDING;
15694
15695                 tnapi->int_mbox = intmbx;
15696                 if (i <= 4)
15697                         intmbx += 0x8;
15698                 else
15699                         intmbx += 0x4;
15700
15701                 tnapi->consmbox = rcvmbx;
15702                 tnapi->prodmbox = sndmbx;
15703
15704                 if (i)
15705                         tnapi->coal_now = HOSTCC_MODE_COAL_VEC1_NOW << (i - 1);
15706                 else
15707                         tnapi->coal_now = HOSTCC_MODE_NOW;
15708
15709                 if (!tg3_flag(tp, SUPPORT_MSIX))
15710                         break;
15711
15712                 /*
15713                  * If we support MSIX, we'll be using RSS.  If we're using
15714                  * RSS, the first vector only handles link interrupts and the
15715                  * remaining vectors handle rx and tx interrupts.  Reuse the
15716                  * mailbox values for the next iteration.  The values we setup
15717                  * above are still useful for the single vectored mode.
15718                  */
15719                 if (!i)
15720                         continue;
15721
15722                 rcvmbx += 0x8;
15723
15724                 if (sndmbx & 0x4)
15725                         sndmbx -= 0x4;
15726                 else
15727                         sndmbx += 0xc;
15728         }
15729
15730         tg3_init_coal(tp);
15731
15732         pci_set_drvdata(pdev, dev);
15733
15734         if (tg3_flag(tp, 5717_PLUS)) {
15735                 /* Resume a low-power mode */
15736                 tg3_frob_aux_power(tp, false);
15737         }
15738
15739         tg3_timer_init(tp);
15740
15741         err = register_netdev(dev);
15742         if (err) {
15743                 dev_err(&pdev->dev, "Cannot register net device, aborting\n");
15744                 goto err_out_apeunmap;
15745         }
15746
15747         netdev_info(dev, "Tigon3 [partno(%s) rev %04x] (%s) MAC address %pM\n",
15748                     tp->board_part_number,
15749                     tp->pci_chip_rev_id,
15750                     tg3_bus_string(tp, str),
15751                     dev->dev_addr);
15752
15753         if (tp->phy_flags & TG3_PHYFLG_IS_CONNECTED) {
15754                 struct phy_device *phydev;
15755                 phydev = tp->mdio_bus->phy_map[TG3_PHY_MII_ADDR];
15756                 netdev_info(dev,
15757                             "attached PHY driver [%s] (mii_bus:phy_addr=%s)\n",
15758                             phydev->drv->name, dev_name(&phydev->dev));
15759         } else {
15760                 char *ethtype;
15761
15762                 if (tp->phy_flags & TG3_PHYFLG_10_100_ONLY)
15763                         ethtype = "10/100Base-TX";
15764                 else if (tp->phy_flags & TG3_PHYFLG_ANY_SERDES)
15765                         ethtype = "1000Base-SX";
15766                 else
15767                         ethtype = "10/100/1000Base-T";
15768
15769                 netdev_info(dev, "attached PHY is %s (%s Ethernet) "
15770                             "(WireSpeed[%d], EEE[%d])\n",
15771                             tg3_phy_string(tp), ethtype,
15772                             (tp->phy_flags & TG3_PHYFLG_NO_ETH_WIRE_SPEED) == 0,
15773                             (tp->phy_flags & TG3_PHYFLG_EEE_CAP) != 0);
15774         }
15775
15776         netdev_info(dev, "RXcsums[%d] LinkChgREG[%d] MIirq[%d] ASF[%d] TSOcap[%d]\n",
15777                     (dev->features & NETIF_F_RXCSUM) != 0,
15778                     tg3_flag(tp, USE_LINKCHG_REG) != 0,
15779                     (tp->phy_flags & TG3_PHYFLG_USE_MI_INTERRUPT) != 0,
15780                     tg3_flag(tp, ENABLE_ASF) != 0,
15781                     tg3_flag(tp, TSO_CAPABLE) != 0);
15782         netdev_info(dev, "dma_rwctrl[%08x] dma_mask[%d-bit]\n",
15783                     tp->dma_rwctrl,
15784                     pdev->dma_mask == DMA_BIT_MASK(32) ? 32 :
15785                     ((u64)pdev->dma_mask) == DMA_BIT_MASK(40) ? 40 : 64);
15786
15787         pci_save_state(pdev);
15788
15789         return 0;
15790
15791 err_out_apeunmap:
15792         if (tp->aperegs) {
15793                 iounmap(tp->aperegs);
15794                 tp->aperegs = NULL;
15795         }
15796
15797 err_out_iounmap:
15798         if (tp->regs) {
15799                 iounmap(tp->regs);
15800                 tp->regs = NULL;
15801         }
15802
15803 err_out_free_dev:
15804         free_netdev(dev);
15805
15806 err_out_power_down:
15807         pci_set_power_state(pdev, PCI_D3hot);
15808
15809 err_out_free_res:
15810         pci_release_regions(pdev);
15811
15812 err_out_disable_pdev:
15813         pci_disable_device(pdev);
15814         pci_set_drvdata(pdev, NULL);
15815         return err;
15816 }
15817
15818 static void __devexit tg3_remove_one(struct pci_dev *pdev)
15819 {
15820         struct net_device *dev = pci_get_drvdata(pdev);
15821
15822         if (dev) {
15823                 struct tg3 *tp = netdev_priv(dev);
15824
15825                 if (tp->fw)
15826                         release_firmware(tp->fw);
15827
15828                 tg3_reset_task_cancel(tp);
15829
15830                 if (tg3_flag(tp, USE_PHYLIB)) {
15831                         tg3_phy_fini(tp);
15832                         tg3_mdio_fini(tp);
15833                 }
15834
15835                 unregister_netdev(dev);
15836                 if (tp->aperegs) {
15837                         iounmap(tp->aperegs);
15838                         tp->aperegs = NULL;
15839                 }
15840                 if (tp->regs) {
15841                         iounmap(tp->regs);
15842                         tp->regs = NULL;
15843                 }
15844                 free_netdev(dev);
15845                 pci_release_regions(pdev);
15846                 pci_disable_device(pdev);
15847                 pci_set_drvdata(pdev, NULL);
15848         }
15849 }
15850
15851 #ifdef CONFIG_PM_SLEEP
15852 static int tg3_suspend(struct device *device)
15853 {
15854         struct pci_dev *pdev = to_pci_dev(device);
15855         struct net_device *dev = pci_get_drvdata(pdev);
15856         struct tg3 *tp = netdev_priv(dev);
15857         int err;
15858
15859         if (!netif_running(dev))
15860                 return 0;
15861
15862         tg3_reset_task_cancel(tp);
15863         tg3_phy_stop(tp);
15864         tg3_netif_stop(tp);
15865
15866         tg3_timer_stop(tp);
15867
15868         tg3_full_lock(tp, 1);
15869         tg3_disable_ints(tp);
15870         tg3_full_unlock(tp);
15871
15872         netif_device_detach(dev);
15873
15874         tg3_full_lock(tp, 0);
15875         tg3_halt(tp, RESET_KIND_SHUTDOWN, 1);
15876         tg3_flag_clear(tp, INIT_COMPLETE);
15877         tg3_full_unlock(tp);
15878
15879         err = tg3_power_down_prepare(tp);
15880         if (err) {
15881                 int err2;
15882
15883                 tg3_full_lock(tp, 0);
15884
15885                 tg3_flag_set(tp, INIT_COMPLETE);
15886                 err2 = tg3_restart_hw(tp, 1);
15887                 if (err2)
15888                         goto out;
15889
15890                 tg3_timer_start(tp);
15891
15892                 netif_device_attach(dev);
15893                 tg3_netif_start(tp);
15894
15895 out:
15896                 tg3_full_unlock(tp);
15897
15898                 if (!err2)
15899                         tg3_phy_start(tp);
15900         }
15901
15902         return err;
15903 }
15904
15905 static int tg3_resume(struct device *device)
15906 {
15907         struct pci_dev *pdev = to_pci_dev(device);
15908         struct net_device *dev = pci_get_drvdata(pdev);
15909         struct tg3 *tp = netdev_priv(dev);
15910         int err;
15911
15912         if (!netif_running(dev))
15913                 return 0;
15914
15915         netif_device_attach(dev);
15916
15917         tg3_full_lock(tp, 0);
15918
15919         tg3_flag_set(tp, INIT_COMPLETE);
15920         err = tg3_restart_hw(tp, 1);
15921         if (err)
15922                 goto out;
15923
15924         tg3_timer_start(tp);
15925
15926         tg3_netif_start(tp);
15927
15928 out:
15929         tg3_full_unlock(tp);
15930
15931         if (!err)
15932                 tg3_phy_start(tp);
15933
15934         return err;
15935 }
15936
15937 static SIMPLE_DEV_PM_OPS(tg3_pm_ops, tg3_suspend, tg3_resume);
15938 #define TG3_PM_OPS (&tg3_pm_ops)
15939
15940 #else
15941
15942 #define TG3_PM_OPS NULL
15943
15944 #endif /* CONFIG_PM_SLEEP */
15945
15946 /**
15947  * tg3_io_error_detected - called when PCI error is detected
15948  * @pdev: Pointer to PCI device
15949  * @state: The current pci connection state
15950  *
15951  * This function is called after a PCI bus error affecting
15952  * this device has been detected.
15953  */
15954 static pci_ers_result_t tg3_io_error_detected(struct pci_dev *pdev,
15955                                               pci_channel_state_t state)
15956 {
15957         struct net_device *netdev = pci_get_drvdata(pdev);
15958         struct tg3 *tp = netdev_priv(netdev);
15959         pci_ers_result_t err = PCI_ERS_RESULT_NEED_RESET;
15960
15961         netdev_info(netdev, "PCI I/O error detected\n");
15962
15963         rtnl_lock();
15964
15965         if (!netif_running(netdev))
15966                 goto done;
15967
15968         tg3_phy_stop(tp);
15969
15970         tg3_netif_stop(tp);
15971
15972         tg3_timer_stop(tp);
15973
15974         /* Want to make sure that the reset task doesn't run */
15975         tg3_reset_task_cancel(tp);
15976
15977         netif_device_detach(netdev);
15978
15979         /* Clean up software state, even if MMIO is blocked */
15980         tg3_full_lock(tp, 0);
15981         tg3_halt(tp, RESET_KIND_SHUTDOWN, 0);
15982         tg3_full_unlock(tp);
15983
15984 done:
15985         if (state == pci_channel_io_perm_failure)
15986                 err = PCI_ERS_RESULT_DISCONNECT;
15987         else
15988                 pci_disable_device(pdev);
15989
15990         rtnl_unlock();
15991
15992         return err;
15993 }
15994
15995 /**
15996  * tg3_io_slot_reset - called after the pci bus has been reset.
15997  * @pdev: Pointer to PCI device
15998  *
15999  * Restart the card from scratch, as if from a cold-boot.
16000  * At this point, the card has exprienced a hard reset,
16001  * followed by fixups by BIOS, and has its config space
16002  * set up identically to what it was at cold boot.
16003  */
16004 static pci_ers_result_t tg3_io_slot_reset(struct pci_dev *pdev)
16005 {
16006         struct net_device *netdev = pci_get_drvdata(pdev);
16007         struct tg3 *tp = netdev_priv(netdev);
16008         pci_ers_result_t rc = PCI_ERS_RESULT_DISCONNECT;
16009         int err;
16010
16011         rtnl_lock();
16012
16013         if (pci_enable_device(pdev)) {
16014                 netdev_err(netdev, "Cannot re-enable PCI device after reset.\n");
16015                 goto done;
16016         }
16017
16018         pci_set_master(pdev);
16019         pci_restore_state(pdev);
16020         pci_save_state(pdev);
16021
16022         if (!netif_running(netdev)) {
16023                 rc = PCI_ERS_RESULT_RECOVERED;
16024                 goto done;
16025         }
16026
16027         err = tg3_power_up(tp);
16028         if (err)
16029                 goto done;
16030
16031         rc = PCI_ERS_RESULT_RECOVERED;
16032
16033 done:
16034         rtnl_unlock();
16035
16036         return rc;
16037 }
16038
16039 /**
16040  * tg3_io_resume - called when traffic can start flowing again.
16041  * @pdev: Pointer to PCI device
16042  *
16043  * This callback is called when the error recovery driver tells
16044  * us that its OK to resume normal operation.
16045  */
16046 static void tg3_io_resume(struct pci_dev *pdev)
16047 {
16048         struct net_device *netdev = pci_get_drvdata(pdev);
16049         struct tg3 *tp = netdev_priv(netdev);
16050         int err;
16051
16052         rtnl_lock();
16053
16054         if (!netif_running(netdev))
16055                 goto done;
16056
16057         tg3_full_lock(tp, 0);
16058         tg3_flag_set(tp, INIT_COMPLETE);
16059         err = tg3_restart_hw(tp, 1);
16060         tg3_full_unlock(tp);
16061         if (err) {
16062                 netdev_err(netdev, "Cannot restart hardware after reset.\n");
16063                 goto done;
16064         }
16065
16066         netif_device_attach(netdev);
16067
16068         tg3_timer_start(tp);
16069
16070         tg3_netif_start(tp);
16071
16072         tg3_phy_start(tp);
16073
16074 done:
16075         rtnl_unlock();
16076 }
16077
16078 static struct pci_error_handlers tg3_err_handler = {
16079         .error_detected = tg3_io_error_detected,
16080         .slot_reset     = tg3_io_slot_reset,
16081         .resume         = tg3_io_resume
16082 };
16083
16084 static struct pci_driver tg3_driver = {
16085         .name           = DRV_MODULE_NAME,
16086         .id_table       = tg3_pci_tbl,
16087         .probe          = tg3_init_one,
16088         .remove         = __devexit_p(tg3_remove_one),
16089         .err_handler    = &tg3_err_handler,
16090         .driver.pm      = TG3_PM_OPS,
16091 };
16092
16093 static int __init tg3_init(void)
16094 {
16095         return pci_register_driver(&tg3_driver);
16096 }
16097
16098 static void __exit tg3_cleanup(void)
16099 {
16100         pci_unregister_driver(&tg3_driver);
16101 }
16102
16103 module_init(tg3_init);
16104 module_exit(tg3_cleanup);