bnx2: Update firmware to 5.0.0.j3.
[linux-flexiantxendom0-3.2.10.git] / drivers / net / bnx2.c
1 /* bnx2.c: Broadcom NX2 network driver.
2  *
3  * Copyright (c) 2004-2009 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Written by: Michael Chan  (mchan@broadcom.com)
10  */
11
12
13 #include <linux/module.h>
14 #include <linux/moduleparam.h>
15
16 #include <linux/kernel.h>
17 #include <linux/timer.h>
18 #include <linux/errno.h>
19 #include <linux/ioport.h>
20 #include <linux/slab.h>
21 #include <linux/vmalloc.h>
22 #include <linux/interrupt.h>
23 #include <linux/pci.h>
24 #include <linux/init.h>
25 #include <linux/netdevice.h>
26 #include <linux/etherdevice.h>
27 #include <linux/skbuff.h>
28 #include <linux/dma-mapping.h>
29 #include <linux/bitops.h>
30 #include <asm/io.h>
31 #include <asm/irq.h>
32 #include <linux/delay.h>
33 #include <asm/byteorder.h>
34 #include <asm/page.h>
35 #include <linux/time.h>
36 #include <linux/ethtool.h>
37 #include <linux/mii.h>
38 #include <linux/if_vlan.h>
39 #if defined(CONFIG_VLAN_8021Q) || defined(CONFIG_VLAN_8021Q_MODULE)
40 #define BCM_VLAN 1
41 #endif
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <linux/workqueue.h>
46 #include <linux/crc32.h>
47 #include <linux/prefetch.h>
48 #include <linux/cache.h>
49 #include <linux/firmware.h>
50 #include <linux/log2.h>
51 #include <linux/list.h>
52
53 #if defined(CONFIG_CNIC) || defined(CONFIG_CNIC_MODULE)
54 #define BCM_CNIC 1
55 #include "cnic_if.h"
56 #endif
57 #include "bnx2.h"
58 #include "bnx2_fw.h"
59
60 #define DRV_MODULE_NAME         "bnx2"
61 #define PFX DRV_MODULE_NAME     ": "
62 #define DRV_MODULE_VERSION      "2.0.2"
63 #define DRV_MODULE_RELDATE      "Aug 21, 2009"
64 #define FW_MIPS_FILE_06         "bnx2/bnx2-mips-06-5.0.0.j3.fw"
65 #define FW_RV2P_FILE_06         "bnx2/bnx2-rv2p-06-5.0.0.j3.fw"
66 #define FW_MIPS_FILE_09         "bnx2/bnx2-mips-09-5.0.0.j3.fw"
67 #define FW_RV2P_FILE_09_Ax      "bnx2/bnx2-rv2p-09ax-5.0.0.j3.fw"
68 #define FW_RV2P_FILE_09         "bnx2/bnx2-rv2p-09-5.0.0.j3.fw"
69
70 #define RUN_AT(x) (jiffies + (x))
71
72 /* Time in jiffies before concluding the transmitter is hung. */
73 #define TX_TIMEOUT  (5*HZ)
74
75 static char version[] __devinitdata =
76         "Broadcom NetXtreme II Gigabit Ethernet Driver " DRV_MODULE_NAME " v" DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
77
78 MODULE_AUTHOR("Michael Chan <mchan@broadcom.com>");
79 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM5706/5708/5709/5716 Driver");
80 MODULE_LICENSE("GPL");
81 MODULE_VERSION(DRV_MODULE_VERSION);
82 MODULE_FIRMWARE(FW_MIPS_FILE_06);
83 MODULE_FIRMWARE(FW_RV2P_FILE_06);
84 MODULE_FIRMWARE(FW_MIPS_FILE_09);
85 MODULE_FIRMWARE(FW_RV2P_FILE_09);
86 MODULE_FIRMWARE(FW_RV2P_FILE_09_Ax);
87
88 static int disable_msi = 0;
89
90 module_param(disable_msi, int, 0);
91 MODULE_PARM_DESC(disable_msi, "Disable Message Signaled Interrupt (MSI)");
92
93 typedef enum {
94         BCM5706 = 0,
95         NC370T,
96         NC370I,
97         BCM5706S,
98         NC370F,
99         BCM5708,
100         BCM5708S,
101         BCM5709,
102         BCM5709S,
103         BCM5716,
104         BCM5716S,
105 } board_t;
106
107 /* indexed by board_t, above */
108 static struct {
109         char *name;
110 } board_info[] __devinitdata = {
111         { "Broadcom NetXtreme II BCM5706 1000Base-T" },
112         { "HP NC370T Multifunction Gigabit Server Adapter" },
113         { "HP NC370i Multifunction Gigabit Server Adapter" },
114         { "Broadcom NetXtreme II BCM5706 1000Base-SX" },
115         { "HP NC370F Multifunction Gigabit Server Adapter" },
116         { "Broadcom NetXtreme II BCM5708 1000Base-T" },
117         { "Broadcom NetXtreme II BCM5708 1000Base-SX" },
118         { "Broadcom NetXtreme II BCM5709 1000Base-T" },
119         { "Broadcom NetXtreme II BCM5709 1000Base-SX" },
120         { "Broadcom NetXtreme II BCM5716 1000Base-T" },
121         { "Broadcom NetXtreme II BCM5716 1000Base-SX" },
122         };
123
124 static DEFINE_PCI_DEVICE_TABLE(bnx2_pci_tbl) = {
125         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
126           PCI_VENDOR_ID_HP, 0x3101, 0, 0, NC370T },
127         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
128           PCI_VENDOR_ID_HP, 0x3106, 0, 0, NC370I },
129         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706,
130           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706 },
131         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708,
132           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708 },
133         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
134           PCI_VENDOR_ID_HP, 0x3102, 0, 0, NC370F },
135         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5706S,
136           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5706S },
137         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5708S,
138           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5708S },
139         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709,
140           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709 },
141         { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_5709S,
142           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5709S },
143         { PCI_VENDOR_ID_BROADCOM, 0x163b,
144           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716 },
145         { PCI_VENDOR_ID_BROADCOM, 0x163c,
146           PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM5716S },
147         { 0, }
148 };
149
150 static const struct flash_spec flash_table[] =
151 {
152 #define BUFFERED_FLAGS          (BNX2_NV_BUFFERED | BNX2_NV_TRANSLATE)
153 #define NONBUFFERED_FLAGS       (BNX2_NV_WREN)
154         /* Slow EEPROM */
155         {0x00000000, 0x40830380, 0x009f0081, 0xa184a053, 0xaf000400,
156          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
157          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
158          "EEPROM - slow"},
159         /* Expansion entry 0001 */
160         {0x08000002, 0x4b808201, 0x00050081, 0x03840253, 0xaf020406,
161          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
162          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
163          "Entry 0001"},
164         /* Saifun SA25F010 (non-buffered flash) */
165         /* strap, cfg1, & write1 need updates */
166         {0x04000001, 0x47808201, 0x00050081, 0x03840253, 0xaf020406,
167          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
168          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*2,
169          "Non-buffered flash (128kB)"},
170         /* Saifun SA25F020 (non-buffered flash) */
171         /* strap, cfg1, & write1 need updates */
172         {0x0c000003, 0x4f808201, 0x00050081, 0x03840253, 0xaf020406,
173          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
174          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE*4,
175          "Non-buffered flash (256kB)"},
176         /* Expansion entry 0100 */
177         {0x11000000, 0x53808201, 0x00050081, 0x03840253, 0xaf020406,
178          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
179          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
180          "Entry 0100"},
181         /* Entry 0101: ST M45PE10 (non-buffered flash, TetonII B0) */
182         {0x19000002, 0x5b808201, 0x000500db, 0x03840253, 0xaf020406,
183          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
184          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*2,
185          "Entry 0101: ST M45PE10 (128kB non-bufferred)"},
186         /* Entry 0110: ST M45PE20 (non-buffered flash)*/
187         {0x15000001, 0x57808201, 0x000500db, 0x03840253, 0xaf020406,
188          NONBUFFERED_FLAGS, ST_MICRO_FLASH_PAGE_BITS, ST_MICRO_FLASH_PAGE_SIZE,
189          ST_MICRO_FLASH_BYTE_ADDR_MASK, ST_MICRO_FLASH_BASE_TOTAL_SIZE*4,
190          "Entry 0110: ST M45PE20 (256kB non-bufferred)"},
191         /* Saifun SA25F005 (non-buffered flash) */
192         /* strap, cfg1, & write1 need updates */
193         {0x1d000003, 0x5f808201, 0x00050081, 0x03840253, 0xaf020406,
194          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
195          SAIFUN_FLASH_BYTE_ADDR_MASK, SAIFUN_FLASH_BASE_TOTAL_SIZE,
196          "Non-buffered flash (64kB)"},
197         /* Fast EEPROM */
198         {0x22000000, 0x62808380, 0x009f0081, 0xa184a053, 0xaf000400,
199          BUFFERED_FLAGS, SEEPROM_PAGE_BITS, SEEPROM_PAGE_SIZE,
200          SEEPROM_BYTE_ADDR_MASK, SEEPROM_TOTAL_SIZE,
201          "EEPROM - fast"},
202         /* Expansion entry 1001 */
203         {0x2a000002, 0x6b808201, 0x00050081, 0x03840253, 0xaf020406,
204          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
205          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
206          "Entry 1001"},
207         /* Expansion entry 1010 */
208         {0x26000001, 0x67808201, 0x00050081, 0x03840253, 0xaf020406,
209          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
210          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
211          "Entry 1010"},
212         /* ATMEL AT45DB011B (buffered flash) */
213         {0x2e000003, 0x6e808273, 0x00570081, 0x68848353, 0xaf000400,
214          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
215          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE,
216          "Buffered flash (128kB)"},
217         /* Expansion entry 1100 */
218         {0x33000000, 0x73808201, 0x00050081, 0x03840253, 0xaf020406,
219          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
220          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
221          "Entry 1100"},
222         /* Expansion entry 1101 */
223         {0x3b000002, 0x7b808201, 0x00050081, 0x03840253, 0xaf020406,
224          NONBUFFERED_FLAGS, SAIFUN_FLASH_PAGE_BITS, SAIFUN_FLASH_PAGE_SIZE,
225          SAIFUN_FLASH_BYTE_ADDR_MASK, 0,
226          "Entry 1101"},
227         /* Ateml Expansion entry 1110 */
228         {0x37000001, 0x76808273, 0x00570081, 0x68848353, 0xaf000400,
229          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
230          BUFFERED_FLASH_BYTE_ADDR_MASK, 0,
231          "Entry 1110 (Atmel)"},
232         /* ATMEL AT45DB021B (buffered flash) */
233         {0x3f000003, 0x7e808273, 0x00570081, 0x68848353, 0xaf000400,
234          BUFFERED_FLAGS, BUFFERED_FLASH_PAGE_BITS, BUFFERED_FLASH_PAGE_SIZE,
235          BUFFERED_FLASH_BYTE_ADDR_MASK, BUFFERED_FLASH_TOTAL_SIZE*2,
236          "Buffered flash (256kB)"},
237 };
238
239 static const struct flash_spec flash_5709 = {
240         .flags          = BNX2_NV_BUFFERED,
241         .page_bits      = BCM5709_FLASH_PAGE_BITS,
242         .page_size      = BCM5709_FLASH_PAGE_SIZE,
243         .addr_mask      = BCM5709_FLASH_BYTE_ADDR_MASK,
244         .total_size     = BUFFERED_FLASH_TOTAL_SIZE*2,
245         .name           = "5709 Buffered flash (256kB)",
246 };
247
248 MODULE_DEVICE_TABLE(pci, bnx2_pci_tbl);
249
250 static inline u32 bnx2_tx_avail(struct bnx2 *bp, struct bnx2_tx_ring_info *txr)
251 {
252         u32 diff;
253
254         smp_mb();
255
256         /* The ring uses 256 indices for 255 entries, one of them
257          * needs to be skipped.
258          */
259         diff = txr->tx_prod - txr->tx_cons;
260         if (unlikely(diff >= TX_DESC_CNT)) {
261                 diff &= 0xffff;
262                 if (diff == TX_DESC_CNT)
263                         diff = MAX_TX_DESC_CNT;
264         }
265         return (bp->tx_ring_size - diff);
266 }
267
268 static u32
269 bnx2_reg_rd_ind(struct bnx2 *bp, u32 offset)
270 {
271         u32 val;
272
273         spin_lock_bh(&bp->indirect_lock);
274         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
275         val = REG_RD(bp, BNX2_PCICFG_REG_WINDOW);
276         spin_unlock_bh(&bp->indirect_lock);
277         return val;
278 }
279
280 static void
281 bnx2_reg_wr_ind(struct bnx2 *bp, u32 offset, u32 val)
282 {
283         spin_lock_bh(&bp->indirect_lock);
284         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, offset);
285         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, val);
286         spin_unlock_bh(&bp->indirect_lock);
287 }
288
289 static void
290 bnx2_shmem_wr(struct bnx2 *bp, u32 offset, u32 val)
291 {
292         bnx2_reg_wr_ind(bp, bp->shmem_base + offset, val);
293 }
294
295 static u32
296 bnx2_shmem_rd(struct bnx2 *bp, u32 offset)
297 {
298         return (bnx2_reg_rd_ind(bp, bp->shmem_base + offset));
299 }
300
301 static void
302 bnx2_ctx_wr(struct bnx2 *bp, u32 cid_addr, u32 offset, u32 val)
303 {
304         offset += cid_addr;
305         spin_lock_bh(&bp->indirect_lock);
306         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
307                 int i;
308
309                 REG_WR(bp, BNX2_CTX_CTX_DATA, val);
310                 REG_WR(bp, BNX2_CTX_CTX_CTRL,
311                        offset | BNX2_CTX_CTX_CTRL_WRITE_REQ);
312                 for (i = 0; i < 5; i++) {
313                         val = REG_RD(bp, BNX2_CTX_CTX_CTRL);
314                         if ((val & BNX2_CTX_CTX_CTRL_WRITE_REQ) == 0)
315                                 break;
316                         udelay(5);
317                 }
318         } else {
319                 REG_WR(bp, BNX2_CTX_DATA_ADR, offset);
320                 REG_WR(bp, BNX2_CTX_DATA, val);
321         }
322         spin_unlock_bh(&bp->indirect_lock);
323 }
324
325 #ifdef BCM_CNIC
326 static int
327 bnx2_drv_ctl(struct net_device *dev, struct drv_ctl_info *info)
328 {
329         struct bnx2 *bp = netdev_priv(dev);
330         struct drv_ctl_io *io = &info->data.io;
331
332         switch (info->cmd) {
333         case DRV_CTL_IO_WR_CMD:
334                 bnx2_reg_wr_ind(bp, io->offset, io->data);
335                 break;
336         case DRV_CTL_IO_RD_CMD:
337                 io->data = bnx2_reg_rd_ind(bp, io->offset);
338                 break;
339         case DRV_CTL_CTX_WR_CMD:
340                 bnx2_ctx_wr(bp, io->cid_addr, io->offset, io->data);
341                 break;
342         default:
343                 return -EINVAL;
344         }
345         return 0;
346 }
347
348 static void bnx2_setup_cnic_irq_info(struct bnx2 *bp)
349 {
350         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
351         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
352         int sb_id;
353
354         if (bp->flags & BNX2_FLAG_USING_MSIX) {
355                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
356                 bnapi->cnic_present = 0;
357                 sb_id = bp->irq_nvecs;
358                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
359         } else {
360                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
361                 bnapi->cnic_tag = bnapi->last_status_idx;
362                 bnapi->cnic_present = 1;
363                 sb_id = 0;
364                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
365         }
366
367         cp->irq_arr[0].vector = bp->irq_tbl[sb_id].vector;
368         cp->irq_arr[0].status_blk = (void *)
369                 ((unsigned long) bnapi->status_blk.msi +
370                 (BNX2_SBLK_MSIX_ALIGN_SIZE * sb_id));
371         cp->irq_arr[0].status_blk_num = sb_id;
372         cp->num_irq = 1;
373 }
374
375 static int bnx2_register_cnic(struct net_device *dev, struct cnic_ops *ops,
376                               void *data)
377 {
378         struct bnx2 *bp = netdev_priv(dev);
379         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
380
381         if (ops == NULL)
382                 return -EINVAL;
383
384         if (cp->drv_state & CNIC_DRV_STATE_REGD)
385                 return -EBUSY;
386
387         bp->cnic_data = data;
388         rcu_assign_pointer(bp->cnic_ops, ops);
389
390         cp->num_irq = 0;
391         cp->drv_state = CNIC_DRV_STATE_REGD;
392
393         bnx2_setup_cnic_irq_info(bp);
394
395         return 0;
396 }
397
398 static int bnx2_unregister_cnic(struct net_device *dev)
399 {
400         struct bnx2 *bp = netdev_priv(dev);
401         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
402         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
403
404         cp->drv_state = 0;
405         bnapi->cnic_present = 0;
406         rcu_assign_pointer(bp->cnic_ops, NULL);
407         synchronize_rcu();
408         return 0;
409 }
410
411 struct cnic_eth_dev *bnx2_cnic_probe(struct net_device *dev)
412 {
413         struct bnx2 *bp = netdev_priv(dev);
414         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
415
416         cp->drv_owner = THIS_MODULE;
417         cp->chip_id = bp->chip_id;
418         cp->pdev = bp->pdev;
419         cp->io_base = bp->regview;
420         cp->drv_ctl = bnx2_drv_ctl;
421         cp->drv_register_cnic = bnx2_register_cnic;
422         cp->drv_unregister_cnic = bnx2_unregister_cnic;
423
424         return cp;
425 }
426 EXPORT_SYMBOL(bnx2_cnic_probe);
427
428 static void
429 bnx2_cnic_stop(struct bnx2 *bp)
430 {
431         struct cnic_ops *c_ops;
432         struct cnic_ctl_info info;
433
434         rcu_read_lock();
435         c_ops = rcu_dereference(bp->cnic_ops);
436         if (c_ops) {
437                 info.cmd = CNIC_CTL_STOP_CMD;
438                 c_ops->cnic_ctl(bp->cnic_data, &info);
439         }
440         rcu_read_unlock();
441 }
442
443 static void
444 bnx2_cnic_start(struct bnx2 *bp)
445 {
446         struct cnic_ops *c_ops;
447         struct cnic_ctl_info info;
448
449         rcu_read_lock();
450         c_ops = rcu_dereference(bp->cnic_ops);
451         if (c_ops) {
452                 if (!(bp->flags & BNX2_FLAG_USING_MSIX)) {
453                         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
454
455                         bnapi->cnic_tag = bnapi->last_status_idx;
456                 }
457                 info.cmd = CNIC_CTL_START_CMD;
458                 c_ops->cnic_ctl(bp->cnic_data, &info);
459         }
460         rcu_read_unlock();
461 }
462
463 #else
464
465 static void
466 bnx2_cnic_stop(struct bnx2 *bp)
467 {
468 }
469
470 static void
471 bnx2_cnic_start(struct bnx2 *bp)
472 {
473 }
474
475 #endif
476
477 static int
478 bnx2_read_phy(struct bnx2 *bp, u32 reg, u32 *val)
479 {
480         u32 val1;
481         int i, ret;
482
483         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
484                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
485                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
486
487                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
488                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
489
490                 udelay(40);
491         }
492
493         val1 = (bp->phy_addr << 21) | (reg << 16) |
494                 BNX2_EMAC_MDIO_COMM_COMMAND_READ | BNX2_EMAC_MDIO_COMM_DISEXT |
495                 BNX2_EMAC_MDIO_COMM_START_BUSY;
496         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
497
498         for (i = 0; i < 50; i++) {
499                 udelay(10);
500
501                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
502                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
503                         udelay(5);
504
505                         val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
506                         val1 &= BNX2_EMAC_MDIO_COMM_DATA;
507
508                         break;
509                 }
510         }
511
512         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY) {
513                 *val = 0x0;
514                 ret = -EBUSY;
515         }
516         else {
517                 *val = val1;
518                 ret = 0;
519         }
520
521         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
522                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
523                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
524
525                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
526                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
527
528                 udelay(40);
529         }
530
531         return ret;
532 }
533
534 static int
535 bnx2_write_phy(struct bnx2 *bp, u32 reg, u32 val)
536 {
537         u32 val1;
538         int i, ret;
539
540         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
541                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
542                 val1 &= ~BNX2_EMAC_MDIO_MODE_AUTO_POLL;
543
544                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
545                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
546
547                 udelay(40);
548         }
549
550         val1 = (bp->phy_addr << 21) | (reg << 16) | val |
551                 BNX2_EMAC_MDIO_COMM_COMMAND_WRITE |
552                 BNX2_EMAC_MDIO_COMM_START_BUSY | BNX2_EMAC_MDIO_COMM_DISEXT;
553         REG_WR(bp, BNX2_EMAC_MDIO_COMM, val1);
554
555         for (i = 0; i < 50; i++) {
556                 udelay(10);
557
558                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_COMM);
559                 if (!(val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)) {
560                         udelay(5);
561                         break;
562                 }
563         }
564
565         if (val1 & BNX2_EMAC_MDIO_COMM_START_BUSY)
566                 ret = -EBUSY;
567         else
568                 ret = 0;
569
570         if (bp->phy_flags & BNX2_PHY_FLAG_INT_MODE_AUTO_POLLING) {
571                 val1 = REG_RD(bp, BNX2_EMAC_MDIO_MODE);
572                 val1 |= BNX2_EMAC_MDIO_MODE_AUTO_POLL;
573
574                 REG_WR(bp, BNX2_EMAC_MDIO_MODE, val1);
575                 REG_RD(bp, BNX2_EMAC_MDIO_MODE);
576
577                 udelay(40);
578         }
579
580         return ret;
581 }
582
583 static void
584 bnx2_disable_int(struct bnx2 *bp)
585 {
586         int i;
587         struct bnx2_napi *bnapi;
588
589         for (i = 0; i < bp->irq_nvecs; i++) {
590                 bnapi = &bp->bnx2_napi[i];
591                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
592                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
593         }
594         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
595 }
596
597 static void
598 bnx2_enable_int(struct bnx2 *bp)
599 {
600         int i;
601         struct bnx2_napi *bnapi;
602
603         for (i = 0; i < bp->irq_nvecs; i++) {
604                 bnapi = &bp->bnx2_napi[i];
605
606                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
607                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
608                        BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
609                        bnapi->last_status_idx);
610
611                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
612                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
613                        bnapi->last_status_idx);
614         }
615         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
616 }
617
618 static void
619 bnx2_disable_int_sync(struct bnx2 *bp)
620 {
621         int i;
622
623         atomic_inc(&bp->intr_sem);
624         if (!netif_running(bp->dev))
625                 return;
626
627         bnx2_disable_int(bp);
628         for (i = 0; i < bp->irq_nvecs; i++)
629                 synchronize_irq(bp->irq_tbl[i].vector);
630 }
631
632 static void
633 bnx2_napi_disable(struct bnx2 *bp)
634 {
635         int i;
636
637         for (i = 0; i < bp->irq_nvecs; i++)
638                 napi_disable(&bp->bnx2_napi[i].napi);
639 }
640
641 static void
642 bnx2_napi_enable(struct bnx2 *bp)
643 {
644         int i;
645
646         for (i = 0; i < bp->irq_nvecs; i++)
647                 napi_enable(&bp->bnx2_napi[i].napi);
648 }
649
650 static void
651 bnx2_netif_stop(struct bnx2 *bp)
652 {
653         bnx2_cnic_stop(bp);
654         bnx2_disable_int_sync(bp);
655         if (netif_running(bp->dev)) {
656                 bnx2_napi_disable(bp);
657                 netif_tx_disable(bp->dev);
658                 bp->dev->trans_start = jiffies; /* prevent tx timeout */
659         }
660 }
661
662 static void
663 bnx2_netif_start(struct bnx2 *bp)
664 {
665         if (atomic_dec_and_test(&bp->intr_sem)) {
666                 if (netif_running(bp->dev)) {
667                         netif_tx_wake_all_queues(bp->dev);
668                         bnx2_napi_enable(bp);
669                         bnx2_enable_int(bp);
670                         bnx2_cnic_start(bp);
671                 }
672         }
673 }
674
675 static void
676 bnx2_free_tx_mem(struct bnx2 *bp)
677 {
678         int i;
679
680         for (i = 0; i < bp->num_tx_rings; i++) {
681                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
682                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
683
684                 if (txr->tx_desc_ring) {
685                         pci_free_consistent(bp->pdev, TXBD_RING_SIZE,
686                                             txr->tx_desc_ring,
687                                             txr->tx_desc_mapping);
688                         txr->tx_desc_ring = NULL;
689                 }
690                 kfree(txr->tx_buf_ring);
691                 txr->tx_buf_ring = NULL;
692         }
693 }
694
695 static void
696 bnx2_free_rx_mem(struct bnx2 *bp)
697 {
698         int i;
699
700         for (i = 0; i < bp->num_rx_rings; i++) {
701                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
702                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
703                 int j;
704
705                 for (j = 0; j < bp->rx_max_ring; j++) {
706                         if (rxr->rx_desc_ring[j])
707                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
708                                                     rxr->rx_desc_ring[j],
709                                                     rxr->rx_desc_mapping[j]);
710                         rxr->rx_desc_ring[j] = NULL;
711                 }
712                 vfree(rxr->rx_buf_ring);
713                 rxr->rx_buf_ring = NULL;
714
715                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
716                         if (rxr->rx_pg_desc_ring[j])
717                                 pci_free_consistent(bp->pdev, RXBD_RING_SIZE,
718                                                     rxr->rx_pg_desc_ring[j],
719                                                     rxr->rx_pg_desc_mapping[j]);
720                         rxr->rx_pg_desc_ring[j] = NULL;
721                 }
722                 vfree(rxr->rx_pg_ring);
723                 rxr->rx_pg_ring = NULL;
724         }
725 }
726
727 static int
728 bnx2_alloc_tx_mem(struct bnx2 *bp)
729 {
730         int i;
731
732         for (i = 0; i < bp->num_tx_rings; i++) {
733                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
734                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
735
736                 txr->tx_buf_ring = kzalloc(SW_TXBD_RING_SIZE, GFP_KERNEL);
737                 if (txr->tx_buf_ring == NULL)
738                         return -ENOMEM;
739
740                 txr->tx_desc_ring =
741                         pci_alloc_consistent(bp->pdev, TXBD_RING_SIZE,
742                                              &txr->tx_desc_mapping);
743                 if (txr->tx_desc_ring == NULL)
744                         return -ENOMEM;
745         }
746         return 0;
747 }
748
749 static int
750 bnx2_alloc_rx_mem(struct bnx2 *bp)
751 {
752         int i;
753
754         for (i = 0; i < bp->num_rx_rings; i++) {
755                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
756                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
757                 int j;
758
759                 rxr->rx_buf_ring =
760                         vmalloc(SW_RXBD_RING_SIZE * bp->rx_max_ring);
761                 if (rxr->rx_buf_ring == NULL)
762                         return -ENOMEM;
763
764                 memset(rxr->rx_buf_ring, 0,
765                        SW_RXBD_RING_SIZE * bp->rx_max_ring);
766
767                 for (j = 0; j < bp->rx_max_ring; j++) {
768                         rxr->rx_desc_ring[j] =
769                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
770                                                      &rxr->rx_desc_mapping[j]);
771                         if (rxr->rx_desc_ring[j] == NULL)
772                                 return -ENOMEM;
773
774                 }
775
776                 if (bp->rx_pg_ring_size) {
777                         rxr->rx_pg_ring = vmalloc(SW_RXPG_RING_SIZE *
778                                                   bp->rx_max_pg_ring);
779                         if (rxr->rx_pg_ring == NULL)
780                                 return -ENOMEM;
781
782                         memset(rxr->rx_pg_ring, 0, SW_RXPG_RING_SIZE *
783                                bp->rx_max_pg_ring);
784                 }
785
786                 for (j = 0; j < bp->rx_max_pg_ring; j++) {
787                         rxr->rx_pg_desc_ring[j] =
788                                 pci_alloc_consistent(bp->pdev, RXBD_RING_SIZE,
789                                                 &rxr->rx_pg_desc_mapping[j]);
790                         if (rxr->rx_pg_desc_ring[j] == NULL)
791                                 return -ENOMEM;
792
793                 }
794         }
795         return 0;
796 }
797
798 static void
799 bnx2_free_mem(struct bnx2 *bp)
800 {
801         int i;
802         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
803
804         bnx2_free_tx_mem(bp);
805         bnx2_free_rx_mem(bp);
806
807         for (i = 0; i < bp->ctx_pages; i++) {
808                 if (bp->ctx_blk[i]) {
809                         pci_free_consistent(bp->pdev, BCM_PAGE_SIZE,
810                                             bp->ctx_blk[i],
811                                             bp->ctx_blk_mapping[i]);
812                         bp->ctx_blk[i] = NULL;
813                 }
814         }
815         if (bnapi->status_blk.msi) {
816                 pci_free_consistent(bp->pdev, bp->status_stats_size,
817                                     bnapi->status_blk.msi,
818                                     bp->status_blk_mapping);
819                 bnapi->status_blk.msi = NULL;
820                 bp->stats_blk = NULL;
821         }
822 }
823
824 static int
825 bnx2_alloc_mem(struct bnx2 *bp)
826 {
827         int i, status_blk_size, err;
828         struct bnx2_napi *bnapi;
829         void *status_blk;
830
831         /* Combine status and statistics blocks into one allocation. */
832         status_blk_size = L1_CACHE_ALIGN(sizeof(struct status_block));
833         if (bp->flags & BNX2_FLAG_MSIX_CAP)
834                 status_blk_size = L1_CACHE_ALIGN(BNX2_MAX_MSIX_HW_VEC *
835                                                  BNX2_SBLK_MSIX_ALIGN_SIZE);
836         bp->status_stats_size = status_blk_size +
837                                 sizeof(struct statistics_block);
838
839         status_blk = pci_alloc_consistent(bp->pdev, bp->status_stats_size,
840                                           &bp->status_blk_mapping);
841         if (status_blk == NULL)
842                 goto alloc_mem_err;
843
844         memset(status_blk, 0, bp->status_stats_size);
845
846         bnapi = &bp->bnx2_napi[0];
847         bnapi->status_blk.msi = status_blk;
848         bnapi->hw_tx_cons_ptr =
849                 &bnapi->status_blk.msi->status_tx_quick_consumer_index0;
850         bnapi->hw_rx_cons_ptr =
851                 &bnapi->status_blk.msi->status_rx_quick_consumer_index0;
852         if (bp->flags & BNX2_FLAG_MSIX_CAP) {
853                 for (i = 1; i < BNX2_MAX_MSIX_VEC; i++) {
854                         struct status_block_msix *sblk;
855
856                         bnapi = &bp->bnx2_napi[i];
857
858                         sblk = (void *) (status_blk +
859                                          BNX2_SBLK_MSIX_ALIGN_SIZE * i);
860                         bnapi->status_blk.msix = sblk;
861                         bnapi->hw_tx_cons_ptr =
862                                 &sblk->status_tx_quick_consumer_index;
863                         bnapi->hw_rx_cons_ptr =
864                                 &sblk->status_rx_quick_consumer_index;
865                         bnapi->int_num = i << 24;
866                 }
867         }
868
869         bp->stats_blk = status_blk + status_blk_size;
870
871         bp->stats_blk_mapping = bp->status_blk_mapping + status_blk_size;
872
873         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
874                 bp->ctx_pages = 0x2000 / BCM_PAGE_SIZE;
875                 if (bp->ctx_pages == 0)
876                         bp->ctx_pages = 1;
877                 for (i = 0; i < bp->ctx_pages; i++) {
878                         bp->ctx_blk[i] = pci_alloc_consistent(bp->pdev,
879                                                 BCM_PAGE_SIZE,
880                                                 &bp->ctx_blk_mapping[i]);
881                         if (bp->ctx_blk[i] == NULL)
882                                 goto alloc_mem_err;
883                 }
884         }
885
886         err = bnx2_alloc_rx_mem(bp);
887         if (err)
888                 goto alloc_mem_err;
889
890         err = bnx2_alloc_tx_mem(bp);
891         if (err)
892                 goto alloc_mem_err;
893
894         return 0;
895
896 alloc_mem_err:
897         bnx2_free_mem(bp);
898         return -ENOMEM;
899 }
900
901 static void
902 bnx2_report_fw_link(struct bnx2 *bp)
903 {
904         u32 fw_link_status = 0;
905
906         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
907                 return;
908
909         if (bp->link_up) {
910                 u32 bmsr;
911
912                 switch (bp->line_speed) {
913                 case SPEED_10:
914                         if (bp->duplex == DUPLEX_HALF)
915                                 fw_link_status = BNX2_LINK_STATUS_10HALF;
916                         else
917                                 fw_link_status = BNX2_LINK_STATUS_10FULL;
918                         break;
919                 case SPEED_100:
920                         if (bp->duplex == DUPLEX_HALF)
921                                 fw_link_status = BNX2_LINK_STATUS_100HALF;
922                         else
923                                 fw_link_status = BNX2_LINK_STATUS_100FULL;
924                         break;
925                 case SPEED_1000:
926                         if (bp->duplex == DUPLEX_HALF)
927                                 fw_link_status = BNX2_LINK_STATUS_1000HALF;
928                         else
929                                 fw_link_status = BNX2_LINK_STATUS_1000FULL;
930                         break;
931                 case SPEED_2500:
932                         if (bp->duplex == DUPLEX_HALF)
933                                 fw_link_status = BNX2_LINK_STATUS_2500HALF;
934                         else
935                                 fw_link_status = BNX2_LINK_STATUS_2500FULL;
936                         break;
937                 }
938
939                 fw_link_status |= BNX2_LINK_STATUS_LINK_UP;
940
941                 if (bp->autoneg) {
942                         fw_link_status |= BNX2_LINK_STATUS_AN_ENABLED;
943
944                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
945                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
946
947                         if (!(bmsr & BMSR_ANEGCOMPLETE) ||
948                             bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)
949                                 fw_link_status |= BNX2_LINK_STATUS_PARALLEL_DET;
950                         else
951                                 fw_link_status |= BNX2_LINK_STATUS_AN_COMPLETE;
952                 }
953         }
954         else
955                 fw_link_status = BNX2_LINK_STATUS_LINK_DOWN;
956
957         bnx2_shmem_wr(bp, BNX2_LINK_STATUS, fw_link_status);
958 }
959
960 static char *
961 bnx2_xceiver_str(struct bnx2 *bp)
962 {
963         return ((bp->phy_port == PORT_FIBRE) ? "SerDes" :
964                 ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) ? "Remote Copper" :
965                  "Copper"));
966 }
967
968 static void
969 bnx2_report_link(struct bnx2 *bp)
970 {
971         if (bp->link_up) {
972                 netif_carrier_on(bp->dev);
973                 printk(KERN_INFO PFX "%s NIC %s Link is Up, ", bp->dev->name,
974                        bnx2_xceiver_str(bp));
975
976                 printk("%d Mbps ", bp->line_speed);
977
978                 if (bp->duplex == DUPLEX_FULL)
979                         printk("full duplex");
980                 else
981                         printk("half duplex");
982
983                 if (bp->flow_ctrl) {
984                         if (bp->flow_ctrl & FLOW_CTRL_RX) {
985                                 printk(", receive ");
986                                 if (bp->flow_ctrl & FLOW_CTRL_TX)
987                                         printk("& transmit ");
988                         }
989                         else {
990                                 printk(", transmit ");
991                         }
992                         printk("flow control ON");
993                 }
994                 printk("\n");
995         }
996         else {
997                 netif_carrier_off(bp->dev);
998                 printk(KERN_ERR PFX "%s NIC %s Link is Down\n", bp->dev->name,
999                        bnx2_xceiver_str(bp));
1000         }
1001
1002         bnx2_report_fw_link(bp);
1003 }
1004
1005 static void
1006 bnx2_resolve_flow_ctrl(struct bnx2 *bp)
1007 {
1008         u32 local_adv, remote_adv;
1009
1010         bp->flow_ctrl = 0;
1011         if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1012                 (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1013
1014                 if (bp->duplex == DUPLEX_FULL) {
1015                         bp->flow_ctrl = bp->req_flow_ctrl;
1016                 }
1017                 return;
1018         }
1019
1020         if (bp->duplex != DUPLEX_FULL) {
1021                 return;
1022         }
1023
1024         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1025             (CHIP_NUM(bp) == CHIP_NUM_5708)) {
1026                 u32 val;
1027
1028                 bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1029                 if (val & BCM5708S_1000X_STAT1_TX_PAUSE)
1030                         bp->flow_ctrl |= FLOW_CTRL_TX;
1031                 if (val & BCM5708S_1000X_STAT1_RX_PAUSE)
1032                         bp->flow_ctrl |= FLOW_CTRL_RX;
1033                 return;
1034         }
1035
1036         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1037         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1038
1039         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1040                 u32 new_local_adv = 0;
1041                 u32 new_remote_adv = 0;
1042
1043                 if (local_adv & ADVERTISE_1000XPAUSE)
1044                         new_local_adv |= ADVERTISE_PAUSE_CAP;
1045                 if (local_adv & ADVERTISE_1000XPSE_ASYM)
1046                         new_local_adv |= ADVERTISE_PAUSE_ASYM;
1047                 if (remote_adv & ADVERTISE_1000XPAUSE)
1048                         new_remote_adv |= ADVERTISE_PAUSE_CAP;
1049                 if (remote_adv & ADVERTISE_1000XPSE_ASYM)
1050                         new_remote_adv |= ADVERTISE_PAUSE_ASYM;
1051
1052                 local_adv = new_local_adv;
1053                 remote_adv = new_remote_adv;
1054         }
1055
1056         /* See Table 28B-3 of 802.3ab-1999 spec. */
1057         if (local_adv & ADVERTISE_PAUSE_CAP) {
1058                 if(local_adv & ADVERTISE_PAUSE_ASYM) {
1059                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1060                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1061                         }
1062                         else if (remote_adv & ADVERTISE_PAUSE_ASYM) {
1063                                 bp->flow_ctrl = FLOW_CTRL_RX;
1064                         }
1065                 }
1066                 else {
1067                         if (remote_adv & ADVERTISE_PAUSE_CAP) {
1068                                 bp->flow_ctrl = FLOW_CTRL_TX | FLOW_CTRL_RX;
1069                         }
1070                 }
1071         }
1072         else if (local_adv & ADVERTISE_PAUSE_ASYM) {
1073                 if ((remote_adv & ADVERTISE_PAUSE_CAP) &&
1074                         (remote_adv & ADVERTISE_PAUSE_ASYM)) {
1075
1076                         bp->flow_ctrl = FLOW_CTRL_TX;
1077                 }
1078         }
1079 }
1080
1081 static int
1082 bnx2_5709s_linkup(struct bnx2 *bp)
1083 {
1084         u32 val, speed;
1085
1086         bp->link_up = 1;
1087
1088         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_GP_STATUS);
1089         bnx2_read_phy(bp, MII_BNX2_GP_TOP_AN_STATUS1, &val);
1090         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1091
1092         if ((bp->autoneg & AUTONEG_SPEED) == 0) {
1093                 bp->line_speed = bp->req_line_speed;
1094                 bp->duplex = bp->req_duplex;
1095                 return 0;
1096         }
1097         speed = val & MII_BNX2_GP_TOP_AN_SPEED_MSK;
1098         switch (speed) {
1099                 case MII_BNX2_GP_TOP_AN_SPEED_10:
1100                         bp->line_speed = SPEED_10;
1101                         break;
1102                 case MII_BNX2_GP_TOP_AN_SPEED_100:
1103                         bp->line_speed = SPEED_100;
1104                         break;
1105                 case MII_BNX2_GP_TOP_AN_SPEED_1G:
1106                 case MII_BNX2_GP_TOP_AN_SPEED_1GKV:
1107                         bp->line_speed = SPEED_1000;
1108                         break;
1109                 case MII_BNX2_GP_TOP_AN_SPEED_2_5G:
1110                         bp->line_speed = SPEED_2500;
1111                         break;
1112         }
1113         if (val & MII_BNX2_GP_TOP_AN_FD)
1114                 bp->duplex = DUPLEX_FULL;
1115         else
1116                 bp->duplex = DUPLEX_HALF;
1117         return 0;
1118 }
1119
1120 static int
1121 bnx2_5708s_linkup(struct bnx2 *bp)
1122 {
1123         u32 val;
1124
1125         bp->link_up = 1;
1126         bnx2_read_phy(bp, BCM5708S_1000X_STAT1, &val);
1127         switch (val & BCM5708S_1000X_STAT1_SPEED_MASK) {
1128                 case BCM5708S_1000X_STAT1_SPEED_10:
1129                         bp->line_speed = SPEED_10;
1130                         break;
1131                 case BCM5708S_1000X_STAT1_SPEED_100:
1132                         bp->line_speed = SPEED_100;
1133                         break;
1134                 case BCM5708S_1000X_STAT1_SPEED_1G:
1135                         bp->line_speed = SPEED_1000;
1136                         break;
1137                 case BCM5708S_1000X_STAT1_SPEED_2G5:
1138                         bp->line_speed = SPEED_2500;
1139                         break;
1140         }
1141         if (val & BCM5708S_1000X_STAT1_FD)
1142                 bp->duplex = DUPLEX_FULL;
1143         else
1144                 bp->duplex = DUPLEX_HALF;
1145
1146         return 0;
1147 }
1148
1149 static int
1150 bnx2_5706s_linkup(struct bnx2 *bp)
1151 {
1152         u32 bmcr, local_adv, remote_adv, common;
1153
1154         bp->link_up = 1;
1155         bp->line_speed = SPEED_1000;
1156
1157         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1158         if (bmcr & BMCR_FULLDPLX) {
1159                 bp->duplex = DUPLEX_FULL;
1160         }
1161         else {
1162                 bp->duplex = DUPLEX_HALF;
1163         }
1164
1165         if (!(bmcr & BMCR_ANENABLE)) {
1166                 return 0;
1167         }
1168
1169         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1170         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1171
1172         common = local_adv & remote_adv;
1173         if (common & (ADVERTISE_1000XHALF | ADVERTISE_1000XFULL)) {
1174
1175                 if (common & ADVERTISE_1000XFULL) {
1176                         bp->duplex = DUPLEX_FULL;
1177                 }
1178                 else {
1179                         bp->duplex = DUPLEX_HALF;
1180                 }
1181         }
1182
1183         return 0;
1184 }
1185
1186 static int
1187 bnx2_copper_linkup(struct bnx2 *bp)
1188 {
1189         u32 bmcr;
1190
1191         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1192         if (bmcr & BMCR_ANENABLE) {
1193                 u32 local_adv, remote_adv, common;
1194
1195                 bnx2_read_phy(bp, MII_CTRL1000, &local_adv);
1196                 bnx2_read_phy(bp, MII_STAT1000, &remote_adv);
1197
1198                 common = local_adv & (remote_adv >> 2);
1199                 if (common & ADVERTISE_1000FULL) {
1200                         bp->line_speed = SPEED_1000;
1201                         bp->duplex = DUPLEX_FULL;
1202                 }
1203                 else if (common & ADVERTISE_1000HALF) {
1204                         bp->line_speed = SPEED_1000;
1205                         bp->duplex = DUPLEX_HALF;
1206                 }
1207                 else {
1208                         bnx2_read_phy(bp, bp->mii_adv, &local_adv);
1209                         bnx2_read_phy(bp, bp->mii_lpa, &remote_adv);
1210
1211                         common = local_adv & remote_adv;
1212                         if (common & ADVERTISE_100FULL) {
1213                                 bp->line_speed = SPEED_100;
1214                                 bp->duplex = DUPLEX_FULL;
1215                         }
1216                         else if (common & ADVERTISE_100HALF) {
1217                                 bp->line_speed = SPEED_100;
1218                                 bp->duplex = DUPLEX_HALF;
1219                         }
1220                         else if (common & ADVERTISE_10FULL) {
1221                                 bp->line_speed = SPEED_10;
1222                                 bp->duplex = DUPLEX_FULL;
1223                         }
1224                         else if (common & ADVERTISE_10HALF) {
1225                                 bp->line_speed = SPEED_10;
1226                                 bp->duplex = DUPLEX_HALF;
1227                         }
1228                         else {
1229                                 bp->line_speed = 0;
1230                                 bp->link_up = 0;
1231                         }
1232                 }
1233         }
1234         else {
1235                 if (bmcr & BMCR_SPEED100) {
1236                         bp->line_speed = SPEED_100;
1237                 }
1238                 else {
1239                         bp->line_speed = SPEED_10;
1240                 }
1241                 if (bmcr & BMCR_FULLDPLX) {
1242                         bp->duplex = DUPLEX_FULL;
1243                 }
1244                 else {
1245                         bp->duplex = DUPLEX_HALF;
1246                 }
1247         }
1248
1249         return 0;
1250 }
1251
1252 static void
1253 bnx2_init_rx_context(struct bnx2 *bp, u32 cid)
1254 {
1255         u32 val, rx_cid_addr = GET_CID_ADDR(cid);
1256
1257         val = BNX2_L2CTX_CTX_TYPE_CTX_BD_CHN_TYPE_VALUE;
1258         val |= BNX2_L2CTX_CTX_TYPE_SIZE_L2;
1259         val |= 0x02 << 8;
1260
1261         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1262                 u32 lo_water, hi_water;
1263
1264                 if (bp->flow_ctrl & FLOW_CTRL_TX)
1265                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DEFAULT;
1266                 else
1267                         lo_water = BNX2_L2CTX_LO_WATER_MARK_DIS;
1268                 if (lo_water >= bp->rx_ring_size)
1269                         lo_water = 0;
1270
1271                 hi_water = bp->rx_ring_size / 4;
1272
1273                 if (hi_water <= lo_water)
1274                         lo_water = 0;
1275
1276                 hi_water /= BNX2_L2CTX_HI_WATER_MARK_SCALE;
1277                 lo_water /= BNX2_L2CTX_LO_WATER_MARK_SCALE;
1278
1279                 if (hi_water > 0xf)
1280                         hi_water = 0xf;
1281                 else if (hi_water == 0)
1282                         lo_water = 0;
1283                 val |= lo_water | (hi_water << BNX2_L2CTX_HI_WATER_MARK_SHIFT);
1284         }
1285         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_CTX_TYPE, val);
1286 }
1287
1288 static void
1289 bnx2_init_all_rx_contexts(struct bnx2 *bp)
1290 {
1291         int i;
1292         u32 cid;
1293
1294         for (i = 0, cid = RX_CID; i < bp->num_rx_rings; i++, cid++) {
1295                 if (i == 1)
1296                         cid = RX_RSS_CID;
1297                 bnx2_init_rx_context(bp, cid);
1298         }
1299 }
1300
1301 static void
1302 bnx2_set_mac_link(struct bnx2 *bp)
1303 {
1304         u32 val;
1305
1306         REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x2620);
1307         if (bp->link_up && (bp->line_speed == SPEED_1000) &&
1308                 (bp->duplex == DUPLEX_HALF)) {
1309                 REG_WR(bp, BNX2_EMAC_TX_LENGTHS, 0x26ff);
1310         }
1311
1312         /* Configure the EMAC mode register. */
1313         val = REG_RD(bp, BNX2_EMAC_MODE);
1314
1315         val &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
1316                 BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
1317                 BNX2_EMAC_MODE_25G_MODE);
1318
1319         if (bp->link_up) {
1320                 switch (bp->line_speed) {
1321                         case SPEED_10:
1322                                 if (CHIP_NUM(bp) != CHIP_NUM_5706) {
1323                                         val |= BNX2_EMAC_MODE_PORT_MII_10M;
1324                                         break;
1325                                 }
1326                                 /* fall through */
1327                         case SPEED_100:
1328                                 val |= BNX2_EMAC_MODE_PORT_MII;
1329                                 break;
1330                         case SPEED_2500:
1331                                 val |= BNX2_EMAC_MODE_25G_MODE;
1332                                 /* fall through */
1333                         case SPEED_1000:
1334                                 val |= BNX2_EMAC_MODE_PORT_GMII;
1335                                 break;
1336                 }
1337         }
1338         else {
1339                 val |= BNX2_EMAC_MODE_PORT_GMII;
1340         }
1341
1342         /* Set the MAC to operate in the appropriate duplex mode. */
1343         if (bp->duplex == DUPLEX_HALF)
1344                 val |= BNX2_EMAC_MODE_HALF_DUPLEX;
1345         REG_WR(bp, BNX2_EMAC_MODE, val);
1346
1347         /* Enable/disable rx PAUSE. */
1348         bp->rx_mode &= ~BNX2_EMAC_RX_MODE_FLOW_EN;
1349
1350         if (bp->flow_ctrl & FLOW_CTRL_RX)
1351                 bp->rx_mode |= BNX2_EMAC_RX_MODE_FLOW_EN;
1352         REG_WR(bp, BNX2_EMAC_RX_MODE, bp->rx_mode);
1353
1354         /* Enable/disable tx PAUSE. */
1355         val = REG_RD(bp, BNX2_EMAC_TX_MODE);
1356         val &= ~BNX2_EMAC_TX_MODE_FLOW_EN;
1357
1358         if (bp->flow_ctrl & FLOW_CTRL_TX)
1359                 val |= BNX2_EMAC_TX_MODE_FLOW_EN;
1360         REG_WR(bp, BNX2_EMAC_TX_MODE, val);
1361
1362         /* Acknowledge the interrupt. */
1363         REG_WR(bp, BNX2_EMAC_STATUS, BNX2_EMAC_STATUS_LINK_CHANGE);
1364
1365         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1366                 bnx2_init_all_rx_contexts(bp);
1367 }
1368
1369 static void
1370 bnx2_enable_bmsr1(struct bnx2 *bp)
1371 {
1372         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1373             (CHIP_NUM(bp) == CHIP_NUM_5709))
1374                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1375                                MII_BNX2_BLK_ADDR_GP_STATUS);
1376 }
1377
1378 static void
1379 bnx2_disable_bmsr1(struct bnx2 *bp)
1380 {
1381         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1382             (CHIP_NUM(bp) == CHIP_NUM_5709))
1383                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1384                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1385 }
1386
1387 static int
1388 bnx2_test_and_enable_2g5(struct bnx2 *bp)
1389 {
1390         u32 up1;
1391         int ret = 1;
1392
1393         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1394                 return 0;
1395
1396         if (bp->autoneg & AUTONEG_SPEED)
1397                 bp->advertising |= ADVERTISED_2500baseX_Full;
1398
1399         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1400                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1401
1402         bnx2_read_phy(bp, bp->mii_up1, &up1);
1403         if (!(up1 & BCM5708S_UP1_2G5)) {
1404                 up1 |= BCM5708S_UP1_2G5;
1405                 bnx2_write_phy(bp, bp->mii_up1, up1);
1406                 ret = 0;
1407         }
1408
1409         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1410                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1411                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1412
1413         return ret;
1414 }
1415
1416 static int
1417 bnx2_test_and_disable_2g5(struct bnx2 *bp)
1418 {
1419         u32 up1;
1420         int ret = 0;
1421
1422         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1423                 return 0;
1424
1425         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1426                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
1427
1428         bnx2_read_phy(bp, bp->mii_up1, &up1);
1429         if (up1 & BCM5708S_UP1_2G5) {
1430                 up1 &= ~BCM5708S_UP1_2G5;
1431                 bnx2_write_phy(bp, bp->mii_up1, up1);
1432                 ret = 1;
1433         }
1434
1435         if (CHIP_NUM(bp) == CHIP_NUM_5709)
1436                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1437                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1438
1439         return ret;
1440 }
1441
1442 static void
1443 bnx2_enable_forced_2g5(struct bnx2 *bp)
1444 {
1445         u32 bmcr;
1446
1447         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1448                 return;
1449
1450         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1451                 u32 val;
1452
1453                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1454                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1455                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1456                 val &= ~MII_BNX2_SD_MISC1_FORCE_MSK;
1457                 val |= MII_BNX2_SD_MISC1_FORCE | MII_BNX2_SD_MISC1_FORCE_2_5G;
1458                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1459
1460                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1461                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1462                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1463
1464         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1465                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1466                 bmcr |= BCM5708S_BMCR_FORCE_2500;
1467         }
1468
1469         if (bp->autoneg & AUTONEG_SPEED) {
1470                 bmcr &= ~BMCR_ANENABLE;
1471                 if (bp->req_duplex == DUPLEX_FULL)
1472                         bmcr |= BMCR_FULLDPLX;
1473         }
1474         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1475 }
1476
1477 static void
1478 bnx2_disable_forced_2g5(struct bnx2 *bp)
1479 {
1480         u32 bmcr;
1481
1482         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
1483                 return;
1484
1485         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1486                 u32 val;
1487
1488                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1489                                MII_BNX2_BLK_ADDR_SERDES_DIG);
1490                 bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_MISC1, &val);
1491                 val &= ~MII_BNX2_SD_MISC1_FORCE;
1492                 bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_MISC1, val);
1493
1494                 bnx2_write_phy(bp, MII_BNX2_BLK_ADDR,
1495                                MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
1496                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1497
1498         } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1499                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1500                 bmcr &= ~BCM5708S_BMCR_FORCE_2500;
1501         }
1502
1503         if (bp->autoneg & AUTONEG_SPEED)
1504                 bmcr |= BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_ANRESTART;
1505         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1506 }
1507
1508 static void
1509 bnx2_5706s_force_link_dn(struct bnx2 *bp, int start)
1510 {
1511         u32 val;
1512
1513         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_SERDES_CTL);
1514         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
1515         if (start)
1516                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val & 0xff0f);
1517         else
1518                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val | 0xc0);
1519 }
1520
1521 static int
1522 bnx2_set_link(struct bnx2 *bp)
1523 {
1524         u32 bmsr;
1525         u8 link_up;
1526
1527         if (bp->loopback == MAC_LOOPBACK || bp->loopback == PHY_LOOPBACK) {
1528                 bp->link_up = 1;
1529                 return 0;
1530         }
1531
1532         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1533                 return 0;
1534
1535         link_up = bp->link_up;
1536
1537         bnx2_enable_bmsr1(bp);
1538         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1539         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
1540         bnx2_disable_bmsr1(bp);
1541
1542         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1543             (CHIP_NUM(bp) == CHIP_NUM_5706)) {
1544                 u32 val, an_dbg;
1545
1546                 if (bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN) {
1547                         bnx2_5706s_force_link_dn(bp, 0);
1548                         bp->phy_flags &= ~BNX2_PHY_FLAG_FORCED_DOWN;
1549                 }
1550                 val = REG_RD(bp, BNX2_EMAC_STATUS);
1551
1552                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
1553                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1554                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
1555
1556                 if ((val & BNX2_EMAC_STATUS_LINK) &&
1557                     !(an_dbg & MISC_SHDW_AN_DBG_NOSYNC))
1558                         bmsr |= BMSR_LSTATUS;
1559                 else
1560                         bmsr &= ~BMSR_LSTATUS;
1561         }
1562
1563         if (bmsr & BMSR_LSTATUS) {
1564                 bp->link_up = 1;
1565
1566                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1567                         if (CHIP_NUM(bp) == CHIP_NUM_5706)
1568                                 bnx2_5706s_linkup(bp);
1569                         else if (CHIP_NUM(bp) == CHIP_NUM_5708)
1570                                 bnx2_5708s_linkup(bp);
1571                         else if (CHIP_NUM(bp) == CHIP_NUM_5709)
1572                                 bnx2_5709s_linkup(bp);
1573                 }
1574                 else {
1575                         bnx2_copper_linkup(bp);
1576                 }
1577                 bnx2_resolve_flow_ctrl(bp);
1578         }
1579         else {
1580                 if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
1581                     (bp->autoneg & AUTONEG_SPEED))
1582                         bnx2_disable_forced_2g5(bp);
1583
1584                 if (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT) {
1585                         u32 bmcr;
1586
1587                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1588                         bmcr |= BMCR_ANENABLE;
1589                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
1590
1591                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
1592                 }
1593                 bp->link_up = 0;
1594         }
1595
1596         if (bp->link_up != link_up) {
1597                 bnx2_report_link(bp);
1598         }
1599
1600         bnx2_set_mac_link(bp);
1601
1602         return 0;
1603 }
1604
1605 static int
1606 bnx2_reset_phy(struct bnx2 *bp)
1607 {
1608         int i;
1609         u32 reg;
1610
1611         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_RESET);
1612
1613 #define PHY_RESET_MAX_WAIT 100
1614         for (i = 0; i < PHY_RESET_MAX_WAIT; i++) {
1615                 udelay(10);
1616
1617                 bnx2_read_phy(bp, bp->mii_bmcr, &reg);
1618                 if (!(reg & BMCR_RESET)) {
1619                         udelay(20);
1620                         break;
1621                 }
1622         }
1623         if (i == PHY_RESET_MAX_WAIT) {
1624                 return -EBUSY;
1625         }
1626         return 0;
1627 }
1628
1629 static u32
1630 bnx2_phy_get_pause_adv(struct bnx2 *bp)
1631 {
1632         u32 adv = 0;
1633
1634         if ((bp->req_flow_ctrl & (FLOW_CTRL_RX | FLOW_CTRL_TX)) ==
1635                 (FLOW_CTRL_RX | FLOW_CTRL_TX)) {
1636
1637                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1638                         adv = ADVERTISE_1000XPAUSE;
1639                 }
1640                 else {
1641                         adv = ADVERTISE_PAUSE_CAP;
1642                 }
1643         }
1644         else if (bp->req_flow_ctrl & FLOW_CTRL_TX) {
1645                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1646                         adv = ADVERTISE_1000XPSE_ASYM;
1647                 }
1648                 else {
1649                         adv = ADVERTISE_PAUSE_ASYM;
1650                 }
1651         }
1652         else if (bp->req_flow_ctrl & FLOW_CTRL_RX) {
1653                 if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1654                         adv = ADVERTISE_1000XPAUSE | ADVERTISE_1000XPSE_ASYM;
1655                 }
1656                 else {
1657                         adv = ADVERTISE_PAUSE_CAP | ADVERTISE_PAUSE_ASYM;
1658                 }
1659         }
1660         return adv;
1661 }
1662
1663 static int bnx2_fw_sync(struct bnx2 *, u32, int, int);
1664
1665 static int
1666 bnx2_setup_remote_phy(struct bnx2 *bp, u8 port)
1667 __releases(&bp->phy_lock)
1668 __acquires(&bp->phy_lock)
1669 {
1670         u32 speed_arg = 0, pause_adv;
1671
1672         pause_adv = bnx2_phy_get_pause_adv(bp);
1673
1674         if (bp->autoneg & AUTONEG_SPEED) {
1675                 speed_arg |= BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG;
1676                 if (bp->advertising & ADVERTISED_10baseT_Half)
1677                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1678                 if (bp->advertising & ADVERTISED_10baseT_Full)
1679                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1680                 if (bp->advertising & ADVERTISED_100baseT_Half)
1681                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1682                 if (bp->advertising & ADVERTISED_100baseT_Full)
1683                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1684                 if (bp->advertising & ADVERTISED_1000baseT_Full)
1685                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1686                 if (bp->advertising & ADVERTISED_2500baseX_Full)
1687                         speed_arg |= BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1688         } else {
1689                 if (bp->req_line_speed == SPEED_2500)
1690                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_2G5FULL;
1691                 else if (bp->req_line_speed == SPEED_1000)
1692                         speed_arg = BNX2_NETLINK_SET_LINK_SPEED_1GFULL;
1693                 else if (bp->req_line_speed == SPEED_100) {
1694                         if (bp->req_duplex == DUPLEX_FULL)
1695                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100FULL;
1696                         else
1697                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_100HALF;
1698                 } else if (bp->req_line_speed == SPEED_10) {
1699                         if (bp->req_duplex == DUPLEX_FULL)
1700                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10FULL;
1701                         else
1702                                 speed_arg = BNX2_NETLINK_SET_LINK_SPEED_10HALF;
1703                 }
1704         }
1705
1706         if (pause_adv & (ADVERTISE_1000XPAUSE | ADVERTISE_PAUSE_CAP))
1707                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_SYM_PAUSE;
1708         if (pause_adv & (ADVERTISE_1000XPSE_ASYM | ADVERTISE_PAUSE_ASYM))
1709                 speed_arg |= BNX2_NETLINK_SET_LINK_FC_ASYM_PAUSE;
1710
1711         if (port == PORT_TP)
1712                 speed_arg |= BNX2_NETLINK_SET_LINK_PHY_APP_REMOTE |
1713                              BNX2_NETLINK_SET_LINK_ETH_AT_WIRESPEED;
1714
1715         bnx2_shmem_wr(bp, BNX2_DRV_MB_ARG0, speed_arg);
1716
1717         spin_unlock_bh(&bp->phy_lock);
1718         bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_CMD_SET_LINK, 1, 0);
1719         spin_lock_bh(&bp->phy_lock);
1720
1721         return 0;
1722 }
1723
1724 static int
1725 bnx2_setup_serdes_phy(struct bnx2 *bp, u8 port)
1726 __releases(&bp->phy_lock)
1727 __acquires(&bp->phy_lock)
1728 {
1729         u32 adv, bmcr;
1730         u32 new_adv = 0;
1731
1732         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
1733                 return (bnx2_setup_remote_phy(bp, port));
1734
1735         if (!(bp->autoneg & AUTONEG_SPEED)) {
1736                 u32 new_bmcr;
1737                 int force_link_down = 0;
1738
1739                 if (bp->req_line_speed == SPEED_2500) {
1740                         if (!bnx2_test_and_enable_2g5(bp))
1741                                 force_link_down = 1;
1742                 } else if (bp->req_line_speed == SPEED_1000) {
1743                         if (bnx2_test_and_disable_2g5(bp))
1744                                 force_link_down = 1;
1745                 }
1746                 bnx2_read_phy(bp, bp->mii_adv, &adv);
1747                 adv &= ~(ADVERTISE_1000XFULL | ADVERTISE_1000XHALF);
1748
1749                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1750                 new_bmcr = bmcr & ~BMCR_ANENABLE;
1751                 new_bmcr |= BMCR_SPEED1000;
1752
1753                 if (CHIP_NUM(bp) == CHIP_NUM_5709) {
1754                         if (bp->req_line_speed == SPEED_2500)
1755                                 bnx2_enable_forced_2g5(bp);
1756                         else if (bp->req_line_speed == SPEED_1000) {
1757                                 bnx2_disable_forced_2g5(bp);
1758                                 new_bmcr &= ~0x2000;
1759                         }
1760
1761                 } else if (CHIP_NUM(bp) == CHIP_NUM_5708) {
1762                         if (bp->req_line_speed == SPEED_2500)
1763                                 new_bmcr |= BCM5708S_BMCR_FORCE_2500;
1764                         else
1765                                 new_bmcr = bmcr & ~BCM5708S_BMCR_FORCE_2500;
1766                 }
1767
1768                 if (bp->req_duplex == DUPLEX_FULL) {
1769                         adv |= ADVERTISE_1000XFULL;
1770                         new_bmcr |= BMCR_FULLDPLX;
1771                 }
1772                 else {
1773                         adv |= ADVERTISE_1000XHALF;
1774                         new_bmcr &= ~BMCR_FULLDPLX;
1775                 }
1776                 if ((new_bmcr != bmcr) || (force_link_down)) {
1777                         /* Force a link down visible on the other side */
1778                         if (bp->link_up) {
1779                                 bnx2_write_phy(bp, bp->mii_adv, adv &
1780                                                ~(ADVERTISE_1000XFULL |
1781                                                  ADVERTISE_1000XHALF));
1782                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr |
1783                                         BMCR_ANRESTART | BMCR_ANENABLE);
1784
1785                                 bp->link_up = 0;
1786                                 netif_carrier_off(bp->dev);
1787                                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1788                                 bnx2_report_link(bp);
1789                         }
1790                         bnx2_write_phy(bp, bp->mii_adv, adv);
1791                         bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
1792                 } else {
1793                         bnx2_resolve_flow_ctrl(bp);
1794                         bnx2_set_mac_link(bp);
1795                 }
1796                 return 0;
1797         }
1798
1799         bnx2_test_and_enable_2g5(bp);
1800
1801         if (bp->advertising & ADVERTISED_1000baseT_Full)
1802                 new_adv |= ADVERTISE_1000XFULL;
1803
1804         new_adv |= bnx2_phy_get_pause_adv(bp);
1805
1806         bnx2_read_phy(bp, bp->mii_adv, &adv);
1807         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
1808
1809         bp->serdes_an_pending = 0;
1810         if ((adv != new_adv) || ((bmcr & BMCR_ANENABLE) == 0)) {
1811                 /* Force a link down visible on the other side */
1812                 if (bp->link_up) {
1813                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
1814                         spin_unlock_bh(&bp->phy_lock);
1815                         msleep(20);
1816                         spin_lock_bh(&bp->phy_lock);
1817                 }
1818
1819                 bnx2_write_phy(bp, bp->mii_adv, new_adv);
1820                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART |
1821                         BMCR_ANENABLE);
1822                 /* Speed up link-up time when the link partner
1823                  * does not autonegotiate which is very common
1824                  * in blade servers. Some blade servers use
1825                  * IPMI for kerboard input and it's important
1826                  * to minimize link disruptions. Autoneg. involves
1827                  * exchanging base pages plus 3 next pages and
1828                  * normally completes in about 120 msec.
1829                  */
1830                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
1831                 bp->serdes_an_pending = 1;
1832                 mod_timer(&bp->timer, jiffies + bp->current_interval);
1833         } else {
1834                 bnx2_resolve_flow_ctrl(bp);
1835                 bnx2_set_mac_link(bp);
1836         }
1837
1838         return 0;
1839 }
1840
1841 #define ETHTOOL_ALL_FIBRE_SPEED                                         \
1842         (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ?                  \
1843                 (ADVERTISED_2500baseX_Full | ADVERTISED_1000baseT_Full) :\
1844                 (ADVERTISED_1000baseT_Full)
1845
1846 #define ETHTOOL_ALL_COPPER_SPEED                                        \
1847         (ADVERTISED_10baseT_Half | ADVERTISED_10baseT_Full |            \
1848         ADVERTISED_100baseT_Half | ADVERTISED_100baseT_Full |           \
1849         ADVERTISED_1000baseT_Full)
1850
1851 #define PHY_ALL_10_100_SPEED (ADVERTISE_10HALF | ADVERTISE_10FULL | \
1852         ADVERTISE_100HALF | ADVERTISE_100FULL | ADVERTISE_CSMA)
1853
1854 #define PHY_ALL_1000_SPEED (ADVERTISE_1000HALF | ADVERTISE_1000FULL)
1855
1856 static void
1857 bnx2_set_default_remote_link(struct bnx2 *bp)
1858 {
1859         u32 link;
1860
1861         if (bp->phy_port == PORT_TP)
1862                 link = bnx2_shmem_rd(bp, BNX2_RPHY_COPPER_LINK);
1863         else
1864                 link = bnx2_shmem_rd(bp, BNX2_RPHY_SERDES_LINK);
1865
1866         if (link & BNX2_NETLINK_SET_LINK_ENABLE_AUTONEG) {
1867                 bp->req_line_speed = 0;
1868                 bp->autoneg |= AUTONEG_SPEED;
1869                 bp->advertising = ADVERTISED_Autoneg;
1870                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1871                         bp->advertising |= ADVERTISED_10baseT_Half;
1872                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10FULL)
1873                         bp->advertising |= ADVERTISED_10baseT_Full;
1874                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1875                         bp->advertising |= ADVERTISED_100baseT_Half;
1876                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100FULL)
1877                         bp->advertising |= ADVERTISED_100baseT_Full;
1878                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1879                         bp->advertising |= ADVERTISED_1000baseT_Full;
1880                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1881                         bp->advertising |= ADVERTISED_2500baseX_Full;
1882         } else {
1883                 bp->autoneg = 0;
1884                 bp->advertising = 0;
1885                 bp->req_duplex = DUPLEX_FULL;
1886                 if (link & BNX2_NETLINK_SET_LINK_SPEED_10) {
1887                         bp->req_line_speed = SPEED_10;
1888                         if (link & BNX2_NETLINK_SET_LINK_SPEED_10HALF)
1889                                 bp->req_duplex = DUPLEX_HALF;
1890                 }
1891                 if (link & BNX2_NETLINK_SET_LINK_SPEED_100) {
1892                         bp->req_line_speed = SPEED_100;
1893                         if (link & BNX2_NETLINK_SET_LINK_SPEED_100HALF)
1894                                 bp->req_duplex = DUPLEX_HALF;
1895                 }
1896                 if (link & BNX2_NETLINK_SET_LINK_SPEED_1GFULL)
1897                         bp->req_line_speed = SPEED_1000;
1898                 if (link & BNX2_NETLINK_SET_LINK_SPEED_2G5FULL)
1899                         bp->req_line_speed = SPEED_2500;
1900         }
1901 }
1902
1903 static void
1904 bnx2_set_default_link(struct bnx2 *bp)
1905 {
1906         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
1907                 bnx2_set_default_remote_link(bp);
1908                 return;
1909         }
1910
1911         bp->autoneg = AUTONEG_SPEED | AUTONEG_FLOW_CTRL;
1912         bp->req_line_speed = 0;
1913         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
1914                 u32 reg;
1915
1916                 bp->advertising = ETHTOOL_ALL_FIBRE_SPEED | ADVERTISED_Autoneg;
1917
1918                 reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG);
1919                 reg &= BNX2_PORT_HW_CFG_CFG_DFLT_LINK_MASK;
1920                 if (reg == BNX2_PORT_HW_CFG_CFG_DFLT_LINK_1G) {
1921                         bp->autoneg = 0;
1922                         bp->req_line_speed = bp->line_speed = SPEED_1000;
1923                         bp->req_duplex = DUPLEX_FULL;
1924                 }
1925         } else
1926                 bp->advertising = ETHTOOL_ALL_COPPER_SPEED | ADVERTISED_Autoneg;
1927 }
1928
1929 static void
1930 bnx2_send_heart_beat(struct bnx2 *bp)
1931 {
1932         u32 msg;
1933         u32 addr;
1934
1935         spin_lock(&bp->indirect_lock);
1936         msg = (u32) (++bp->fw_drv_pulse_wr_seq & BNX2_DRV_PULSE_SEQ_MASK);
1937         addr = bp->shmem_base + BNX2_DRV_PULSE_MB;
1938         REG_WR(bp, BNX2_PCICFG_REG_WINDOW_ADDRESS, addr);
1939         REG_WR(bp, BNX2_PCICFG_REG_WINDOW, msg);
1940         spin_unlock(&bp->indirect_lock);
1941 }
1942
1943 static void
1944 bnx2_remote_phy_event(struct bnx2 *bp)
1945 {
1946         u32 msg;
1947         u8 link_up = bp->link_up;
1948         u8 old_port;
1949
1950         msg = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
1951
1952         if (msg & BNX2_LINK_STATUS_HEART_BEAT_EXPIRED)
1953                 bnx2_send_heart_beat(bp);
1954
1955         msg &= ~BNX2_LINK_STATUS_HEART_BEAT_EXPIRED;
1956
1957         if ((msg & BNX2_LINK_STATUS_LINK_UP) == BNX2_LINK_STATUS_LINK_DOWN)
1958                 bp->link_up = 0;
1959         else {
1960                 u32 speed;
1961
1962                 bp->link_up = 1;
1963                 speed = msg & BNX2_LINK_STATUS_SPEED_MASK;
1964                 bp->duplex = DUPLEX_FULL;
1965                 switch (speed) {
1966                         case BNX2_LINK_STATUS_10HALF:
1967                                 bp->duplex = DUPLEX_HALF;
1968                         case BNX2_LINK_STATUS_10FULL:
1969                                 bp->line_speed = SPEED_10;
1970                                 break;
1971                         case BNX2_LINK_STATUS_100HALF:
1972                                 bp->duplex = DUPLEX_HALF;
1973                         case BNX2_LINK_STATUS_100BASE_T4:
1974                         case BNX2_LINK_STATUS_100FULL:
1975                                 bp->line_speed = SPEED_100;
1976                                 break;
1977                         case BNX2_LINK_STATUS_1000HALF:
1978                                 bp->duplex = DUPLEX_HALF;
1979                         case BNX2_LINK_STATUS_1000FULL:
1980                                 bp->line_speed = SPEED_1000;
1981                                 break;
1982                         case BNX2_LINK_STATUS_2500HALF:
1983                                 bp->duplex = DUPLEX_HALF;
1984                         case BNX2_LINK_STATUS_2500FULL:
1985                                 bp->line_speed = SPEED_2500;
1986                                 break;
1987                         default:
1988                                 bp->line_speed = 0;
1989                                 break;
1990                 }
1991
1992                 bp->flow_ctrl = 0;
1993                 if ((bp->autoneg & (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) !=
1994                     (AUTONEG_SPEED | AUTONEG_FLOW_CTRL)) {
1995                         if (bp->duplex == DUPLEX_FULL)
1996                                 bp->flow_ctrl = bp->req_flow_ctrl;
1997                 } else {
1998                         if (msg & BNX2_LINK_STATUS_TX_FC_ENABLED)
1999                                 bp->flow_ctrl |= FLOW_CTRL_TX;
2000                         if (msg & BNX2_LINK_STATUS_RX_FC_ENABLED)
2001                                 bp->flow_ctrl |= FLOW_CTRL_RX;
2002                 }
2003
2004                 old_port = bp->phy_port;
2005                 if (msg & BNX2_LINK_STATUS_SERDES_LINK)
2006                         bp->phy_port = PORT_FIBRE;
2007                 else
2008                         bp->phy_port = PORT_TP;
2009
2010                 if (old_port != bp->phy_port)
2011                         bnx2_set_default_link(bp);
2012
2013         }
2014         if (bp->link_up != link_up)
2015                 bnx2_report_link(bp);
2016
2017         bnx2_set_mac_link(bp);
2018 }
2019
2020 static int
2021 bnx2_set_remote_link(struct bnx2 *bp)
2022 {
2023         u32 evt_code;
2024
2025         evt_code = bnx2_shmem_rd(bp, BNX2_FW_EVT_CODE_MB);
2026         switch (evt_code) {
2027                 case BNX2_FW_EVT_CODE_LINK_EVENT:
2028                         bnx2_remote_phy_event(bp);
2029                         break;
2030                 case BNX2_FW_EVT_CODE_SW_TIMER_EXPIRATION_EVENT:
2031                 default:
2032                         bnx2_send_heart_beat(bp);
2033                         break;
2034         }
2035         return 0;
2036 }
2037
2038 static int
2039 bnx2_setup_copper_phy(struct bnx2 *bp)
2040 __releases(&bp->phy_lock)
2041 __acquires(&bp->phy_lock)
2042 {
2043         u32 bmcr;
2044         u32 new_bmcr;
2045
2046         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
2047
2048         if (bp->autoneg & AUTONEG_SPEED) {
2049                 u32 adv_reg, adv1000_reg;
2050                 u32 new_adv_reg = 0;
2051                 u32 new_adv1000_reg = 0;
2052
2053                 bnx2_read_phy(bp, bp->mii_adv, &adv_reg);
2054                 adv_reg &= (PHY_ALL_10_100_SPEED | ADVERTISE_PAUSE_CAP |
2055                         ADVERTISE_PAUSE_ASYM);
2056
2057                 bnx2_read_phy(bp, MII_CTRL1000, &adv1000_reg);
2058                 adv1000_reg &= PHY_ALL_1000_SPEED;
2059
2060                 if (bp->advertising & ADVERTISED_10baseT_Half)
2061                         new_adv_reg |= ADVERTISE_10HALF;
2062                 if (bp->advertising & ADVERTISED_10baseT_Full)
2063                         new_adv_reg |= ADVERTISE_10FULL;
2064                 if (bp->advertising & ADVERTISED_100baseT_Half)
2065                         new_adv_reg |= ADVERTISE_100HALF;
2066                 if (bp->advertising & ADVERTISED_100baseT_Full)
2067                         new_adv_reg |= ADVERTISE_100FULL;
2068                 if (bp->advertising & ADVERTISED_1000baseT_Full)
2069                         new_adv1000_reg |= ADVERTISE_1000FULL;
2070
2071                 new_adv_reg |= ADVERTISE_CSMA;
2072
2073                 new_adv_reg |= bnx2_phy_get_pause_adv(bp);
2074
2075                 if ((adv1000_reg != new_adv1000_reg) ||
2076                         (adv_reg != new_adv_reg) ||
2077                         ((bmcr & BMCR_ANENABLE) == 0)) {
2078
2079                         bnx2_write_phy(bp, bp->mii_adv, new_adv_reg);
2080                         bnx2_write_phy(bp, MII_CTRL1000, new_adv1000_reg);
2081                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_ANRESTART |
2082                                 BMCR_ANENABLE);
2083                 }
2084                 else if (bp->link_up) {
2085                         /* Flow ctrl may have changed from auto to forced */
2086                         /* or vice-versa. */
2087
2088                         bnx2_resolve_flow_ctrl(bp);
2089                         bnx2_set_mac_link(bp);
2090                 }
2091                 return 0;
2092         }
2093
2094         new_bmcr = 0;
2095         if (bp->req_line_speed == SPEED_100) {
2096                 new_bmcr |= BMCR_SPEED100;
2097         }
2098         if (bp->req_duplex == DUPLEX_FULL) {
2099                 new_bmcr |= BMCR_FULLDPLX;
2100         }
2101         if (new_bmcr != bmcr) {
2102                 u32 bmsr;
2103
2104                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2105                 bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2106
2107                 if (bmsr & BMSR_LSTATUS) {
2108                         /* Force link down */
2109                         bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
2110                         spin_unlock_bh(&bp->phy_lock);
2111                         msleep(50);
2112                         spin_lock_bh(&bp->phy_lock);
2113
2114                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2115                         bnx2_read_phy(bp, bp->mii_bmsr, &bmsr);
2116                 }
2117
2118                 bnx2_write_phy(bp, bp->mii_bmcr, new_bmcr);
2119
2120                 /* Normally, the new speed is setup after the link has
2121                  * gone down and up again. In some cases, link will not go
2122                  * down so we need to set up the new speed here.
2123                  */
2124                 if (bmsr & BMSR_LSTATUS) {
2125                         bp->line_speed = bp->req_line_speed;
2126                         bp->duplex = bp->req_duplex;
2127                         bnx2_resolve_flow_ctrl(bp);
2128                         bnx2_set_mac_link(bp);
2129                 }
2130         } else {
2131                 bnx2_resolve_flow_ctrl(bp);
2132                 bnx2_set_mac_link(bp);
2133         }
2134         return 0;
2135 }
2136
2137 static int
2138 bnx2_setup_phy(struct bnx2 *bp, u8 port)
2139 __releases(&bp->phy_lock)
2140 __acquires(&bp->phy_lock)
2141 {
2142         if (bp->loopback == MAC_LOOPBACK)
2143                 return 0;
2144
2145         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2146                 return (bnx2_setup_serdes_phy(bp, port));
2147         }
2148         else {
2149                 return (bnx2_setup_copper_phy(bp));
2150         }
2151 }
2152
2153 static int
2154 bnx2_init_5709s_phy(struct bnx2 *bp, int reset_phy)
2155 {
2156         u32 val;
2157
2158         bp->mii_bmcr = MII_BMCR + 0x10;
2159         bp->mii_bmsr = MII_BMSR + 0x10;
2160         bp->mii_bmsr1 = MII_BNX2_GP_TOP_AN_STATUS1;
2161         bp->mii_adv = MII_ADVERTISE + 0x10;
2162         bp->mii_lpa = MII_LPA + 0x10;
2163         bp->mii_up1 = MII_BNX2_OVER1G_UP1;
2164
2165         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_AER);
2166         bnx2_write_phy(bp, MII_BNX2_AER_AER, MII_BNX2_AER_AER_AN_MMD);
2167
2168         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2169         if (reset_phy)
2170                 bnx2_reset_phy(bp);
2171
2172         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_SERDES_DIG);
2173
2174         bnx2_read_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, &val);
2175         val &= ~MII_BNX2_SD_1000XCTL1_AUTODET;
2176         val |= MII_BNX2_SD_1000XCTL1_FIBER;
2177         bnx2_write_phy(bp, MII_BNX2_SERDES_DIG_1000XCTL1, val);
2178
2179         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_OVER1G);
2180         bnx2_read_phy(bp, MII_BNX2_OVER1G_UP1, &val);
2181         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
2182                 val |= BCM5708S_UP1_2G5;
2183         else
2184                 val &= ~BCM5708S_UP1_2G5;
2185         bnx2_write_phy(bp, MII_BNX2_OVER1G_UP1, val);
2186
2187         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_BAM_NXTPG);
2188         bnx2_read_phy(bp, MII_BNX2_BAM_NXTPG_CTL, &val);
2189         val |= MII_BNX2_NXTPG_CTL_T2 | MII_BNX2_NXTPG_CTL_BAM;
2190         bnx2_write_phy(bp, MII_BNX2_BAM_NXTPG_CTL, val);
2191
2192         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_CL73_USERB0);
2193
2194         val = MII_BNX2_CL73_BAM_EN | MII_BNX2_CL73_BAM_STA_MGR_EN |
2195               MII_BNX2_CL73_BAM_NP_AFT_BP_EN;
2196         bnx2_write_phy(bp, MII_BNX2_CL73_BAM_CTL1, val);
2197
2198         bnx2_write_phy(bp, MII_BNX2_BLK_ADDR, MII_BNX2_BLK_ADDR_COMBO_IEEEB0);
2199
2200         return 0;
2201 }
2202
2203 static int
2204 bnx2_init_5708s_phy(struct bnx2 *bp, int reset_phy)
2205 {
2206         u32 val;
2207
2208         if (reset_phy)
2209                 bnx2_reset_phy(bp);
2210
2211         bp->mii_up1 = BCM5708S_UP1;
2212
2213         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG3);
2214         bnx2_write_phy(bp, BCM5708S_DIG_3_0, BCM5708S_DIG_3_0_USE_IEEE);
2215         bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2216
2217         bnx2_read_phy(bp, BCM5708S_1000X_CTL1, &val);
2218         val |= BCM5708S_1000X_CTL1_FIBER_MODE | BCM5708S_1000X_CTL1_AUTODET_EN;
2219         bnx2_write_phy(bp, BCM5708S_1000X_CTL1, val);
2220
2221         bnx2_read_phy(bp, BCM5708S_1000X_CTL2, &val);
2222         val |= BCM5708S_1000X_CTL2_PLLEL_DET_EN;
2223         bnx2_write_phy(bp, BCM5708S_1000X_CTL2, val);
2224
2225         if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) {
2226                 bnx2_read_phy(bp, BCM5708S_UP1, &val);
2227                 val |= BCM5708S_UP1_2G5;
2228                 bnx2_write_phy(bp, BCM5708S_UP1, val);
2229         }
2230
2231         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
2232             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
2233             (CHIP_ID(bp) == CHIP_ID_5708_B1)) {
2234                 /* increase tx signal amplitude */
2235                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2236                                BCM5708S_BLK_ADDR_TX_MISC);
2237                 bnx2_read_phy(bp, BCM5708S_TX_ACTL1, &val);
2238                 val &= ~BCM5708S_TX_ACTL1_DRIVER_VCM;
2239                 bnx2_write_phy(bp, BCM5708S_TX_ACTL1, val);
2240                 bnx2_write_phy(bp, BCM5708S_BLK_ADDR, BCM5708S_BLK_ADDR_DIG);
2241         }
2242
2243         val = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_CONFIG) &
2244               BNX2_PORT_HW_CFG_CFG_TXCTL3_MASK;
2245
2246         if (val) {
2247                 u32 is_backplane;
2248
2249                 is_backplane = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
2250                 if (is_backplane & BNX2_SHARED_HW_CFG_PHY_BACKPLANE) {
2251                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2252                                        BCM5708S_BLK_ADDR_TX_MISC);
2253                         bnx2_write_phy(bp, BCM5708S_TX_ACTL3, val);
2254                         bnx2_write_phy(bp, BCM5708S_BLK_ADDR,
2255                                        BCM5708S_BLK_ADDR_DIG);
2256                 }
2257         }
2258         return 0;
2259 }
2260
2261 static int
2262 bnx2_init_5706s_phy(struct bnx2 *bp, int reset_phy)
2263 {
2264         if (reset_phy)
2265                 bnx2_reset_phy(bp);
2266
2267         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
2268
2269         if (CHIP_NUM(bp) == CHIP_NUM_5706)
2270                 REG_WR(bp, BNX2_MISC_GP_HW_CTL0, 0x300);
2271
2272         if (bp->dev->mtu > 1500) {
2273                 u32 val;
2274
2275                 /* Set extended packet length bit */
2276                 bnx2_write_phy(bp, 0x18, 0x7);
2277                 bnx2_read_phy(bp, 0x18, &val);
2278                 bnx2_write_phy(bp, 0x18, (val & 0xfff8) | 0x4000);
2279
2280                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2281                 bnx2_read_phy(bp, 0x1c, &val);
2282                 bnx2_write_phy(bp, 0x1c, (val & 0x3ff) | 0xec02);
2283         }
2284         else {
2285                 u32 val;
2286
2287                 bnx2_write_phy(bp, 0x18, 0x7);
2288                 bnx2_read_phy(bp, 0x18, &val);
2289                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2290
2291                 bnx2_write_phy(bp, 0x1c, 0x6c00);
2292                 bnx2_read_phy(bp, 0x1c, &val);
2293                 bnx2_write_phy(bp, 0x1c, (val & 0x3fd) | 0xec00);
2294         }
2295
2296         return 0;
2297 }
2298
2299 static int
2300 bnx2_init_copper_phy(struct bnx2 *bp, int reset_phy)
2301 {
2302         u32 val;
2303
2304         if (reset_phy)
2305                 bnx2_reset_phy(bp);
2306
2307         if (bp->phy_flags & BNX2_PHY_FLAG_CRC_FIX) {
2308                 bnx2_write_phy(bp, 0x18, 0x0c00);
2309                 bnx2_write_phy(bp, 0x17, 0x000a);
2310                 bnx2_write_phy(bp, 0x15, 0x310b);
2311                 bnx2_write_phy(bp, 0x17, 0x201f);
2312                 bnx2_write_phy(bp, 0x15, 0x9506);
2313                 bnx2_write_phy(bp, 0x17, 0x401f);
2314                 bnx2_write_phy(bp, 0x15, 0x14e2);
2315                 bnx2_write_phy(bp, 0x18, 0x0400);
2316         }
2317
2318         if (bp->phy_flags & BNX2_PHY_FLAG_DIS_EARLY_DAC) {
2319                 bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS,
2320                                MII_BNX2_DSP_EXPAND_REG | 0x8);
2321                 bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &val);
2322                 val &= ~(1 << 8);
2323                 bnx2_write_phy(bp, MII_BNX2_DSP_RW_PORT, val);
2324         }
2325
2326         if (bp->dev->mtu > 1500) {
2327                 /* Set extended packet length bit */
2328                 bnx2_write_phy(bp, 0x18, 0x7);
2329                 bnx2_read_phy(bp, 0x18, &val);
2330                 bnx2_write_phy(bp, 0x18, val | 0x4000);
2331
2332                 bnx2_read_phy(bp, 0x10, &val);
2333                 bnx2_write_phy(bp, 0x10, val | 0x1);
2334         }
2335         else {
2336                 bnx2_write_phy(bp, 0x18, 0x7);
2337                 bnx2_read_phy(bp, 0x18, &val);
2338                 bnx2_write_phy(bp, 0x18, val & ~0x4007);
2339
2340                 bnx2_read_phy(bp, 0x10, &val);
2341                 bnx2_write_phy(bp, 0x10, val & ~0x1);
2342         }
2343
2344         /* ethernet@wirespeed */
2345         bnx2_write_phy(bp, 0x18, 0x7007);
2346         bnx2_read_phy(bp, 0x18, &val);
2347         bnx2_write_phy(bp, 0x18, val | (1 << 15) | (1 << 4));
2348         return 0;
2349 }
2350
2351
2352 static int
2353 bnx2_init_phy(struct bnx2 *bp, int reset_phy)
2354 __releases(&bp->phy_lock)
2355 __acquires(&bp->phy_lock)
2356 {
2357         u32 val;
2358         int rc = 0;
2359
2360         bp->phy_flags &= ~BNX2_PHY_FLAG_INT_MODE_MASK;
2361         bp->phy_flags |= BNX2_PHY_FLAG_INT_MODE_LINK_READY;
2362
2363         bp->mii_bmcr = MII_BMCR;
2364         bp->mii_bmsr = MII_BMSR;
2365         bp->mii_bmsr1 = MII_BMSR;
2366         bp->mii_adv = MII_ADVERTISE;
2367         bp->mii_lpa = MII_LPA;
2368
2369         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
2370
2371         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
2372                 goto setup_phy;
2373
2374         bnx2_read_phy(bp, MII_PHYSID1, &val);
2375         bp->phy_id = val << 16;
2376         bnx2_read_phy(bp, MII_PHYSID2, &val);
2377         bp->phy_id |= val & 0xffff;
2378
2379         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
2380                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
2381                         rc = bnx2_init_5706s_phy(bp, reset_phy);
2382                 else if (CHIP_NUM(bp) == CHIP_NUM_5708)
2383                         rc = bnx2_init_5708s_phy(bp, reset_phy);
2384                 else if (CHIP_NUM(bp) == CHIP_NUM_5709)
2385                         rc = bnx2_init_5709s_phy(bp, reset_phy);
2386         }
2387         else {
2388                 rc = bnx2_init_copper_phy(bp, reset_phy);
2389         }
2390
2391 setup_phy:
2392         if (!rc)
2393                 rc = bnx2_setup_phy(bp, bp->phy_port);
2394
2395         return rc;
2396 }
2397
2398 static int
2399 bnx2_set_mac_loopback(struct bnx2 *bp)
2400 {
2401         u32 mac_mode;
2402
2403         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2404         mac_mode &= ~BNX2_EMAC_MODE_PORT;
2405         mac_mode |= BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK;
2406         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2407         bp->link_up = 1;
2408         return 0;
2409 }
2410
2411 static int bnx2_test_link(struct bnx2 *);
2412
2413 static int
2414 bnx2_set_phy_loopback(struct bnx2 *bp)
2415 {
2416         u32 mac_mode;
2417         int rc, i;
2418
2419         spin_lock_bh(&bp->phy_lock);
2420         rc = bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK | BMCR_FULLDPLX |
2421                             BMCR_SPEED1000);
2422         spin_unlock_bh(&bp->phy_lock);
2423         if (rc)
2424                 return rc;
2425
2426         for (i = 0; i < 10; i++) {
2427                 if (bnx2_test_link(bp) == 0)
2428                         break;
2429                 msleep(100);
2430         }
2431
2432         mac_mode = REG_RD(bp, BNX2_EMAC_MODE);
2433         mac_mode &= ~(BNX2_EMAC_MODE_PORT | BNX2_EMAC_MODE_HALF_DUPLEX |
2434                       BNX2_EMAC_MODE_MAC_LOOP | BNX2_EMAC_MODE_FORCE_LINK |
2435                       BNX2_EMAC_MODE_25G_MODE);
2436
2437         mac_mode |= BNX2_EMAC_MODE_PORT_GMII;
2438         REG_WR(bp, BNX2_EMAC_MODE, mac_mode);
2439         bp->link_up = 1;
2440         return 0;
2441 }
2442
2443 static int
2444 bnx2_fw_sync(struct bnx2 *bp, u32 msg_data, int ack, int silent)
2445 {
2446         int i;
2447         u32 val;
2448
2449         bp->fw_wr_seq++;
2450         msg_data |= bp->fw_wr_seq;
2451
2452         bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2453
2454         if (!ack)
2455                 return 0;
2456
2457         /* wait for an acknowledgement. */
2458         for (i = 0; i < (BNX2_FW_ACK_TIME_OUT_MS / 10); i++) {
2459                 msleep(10);
2460
2461                 val = bnx2_shmem_rd(bp, BNX2_FW_MB);
2462
2463                 if ((val & BNX2_FW_MSG_ACK) == (msg_data & BNX2_DRV_MSG_SEQ))
2464                         break;
2465         }
2466         if ((msg_data & BNX2_DRV_MSG_DATA) == BNX2_DRV_MSG_DATA_WAIT0)
2467                 return 0;
2468
2469         /* If we timed out, inform the firmware that this is the case. */
2470         if ((val & BNX2_FW_MSG_ACK) != (msg_data & BNX2_DRV_MSG_SEQ)) {
2471                 if (!silent)
2472                         printk(KERN_ERR PFX "fw sync timeout, reset code = "
2473                                             "%x\n", msg_data);
2474
2475                 msg_data &= ~BNX2_DRV_MSG_CODE;
2476                 msg_data |= BNX2_DRV_MSG_CODE_FW_TIMEOUT;
2477
2478                 bnx2_shmem_wr(bp, BNX2_DRV_MB, msg_data);
2479
2480                 return -EBUSY;
2481         }
2482
2483         if ((val & BNX2_FW_MSG_STATUS_MASK) != BNX2_FW_MSG_STATUS_OK)
2484                 return -EIO;
2485
2486         return 0;
2487 }
2488
2489 static int
2490 bnx2_init_5709_context(struct bnx2 *bp)
2491 {
2492         int i, ret = 0;
2493         u32 val;
2494
2495         val = BNX2_CTX_COMMAND_ENABLED | BNX2_CTX_COMMAND_MEM_INIT | (1 << 12);
2496         val |= (BCM_PAGE_BITS - 8) << 16;
2497         REG_WR(bp, BNX2_CTX_COMMAND, val);
2498         for (i = 0; i < 10; i++) {
2499                 val = REG_RD(bp, BNX2_CTX_COMMAND);
2500                 if (!(val & BNX2_CTX_COMMAND_MEM_INIT))
2501                         break;
2502                 udelay(2);
2503         }
2504         if (val & BNX2_CTX_COMMAND_MEM_INIT)
2505                 return -EBUSY;
2506
2507         for (i = 0; i < bp->ctx_pages; i++) {
2508                 int j;
2509
2510                 if (bp->ctx_blk[i])
2511                         memset(bp->ctx_blk[i], 0, BCM_PAGE_SIZE);
2512                 else
2513                         return -ENOMEM;
2514
2515                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA0,
2516                        (bp->ctx_blk_mapping[i] & 0xffffffff) |
2517                        BNX2_CTX_HOST_PAGE_TBL_DATA0_VALID);
2518                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_DATA1,
2519                        (u64) bp->ctx_blk_mapping[i] >> 32);
2520                 REG_WR(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL, i |
2521                        BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ);
2522                 for (j = 0; j < 10; j++) {
2523
2524                         val = REG_RD(bp, BNX2_CTX_HOST_PAGE_TBL_CTRL);
2525                         if (!(val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ))
2526                                 break;
2527                         udelay(5);
2528                 }
2529                 if (val & BNX2_CTX_HOST_PAGE_TBL_CTRL_WRITE_REQ) {
2530                         ret = -EBUSY;
2531                         break;
2532                 }
2533         }
2534         return ret;
2535 }
2536
2537 static void
2538 bnx2_init_context(struct bnx2 *bp)
2539 {
2540         u32 vcid;
2541
2542         vcid = 96;
2543         while (vcid) {
2544                 u32 vcid_addr, pcid_addr, offset;
2545                 int i;
2546
2547                 vcid--;
2548
2549                 if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
2550                         u32 new_vcid;
2551
2552                         vcid_addr = GET_PCID_ADDR(vcid);
2553                         if (vcid & 0x8) {
2554                                 new_vcid = 0x60 + (vcid & 0xf0) + (vcid & 0x7);
2555                         }
2556                         else {
2557                                 new_vcid = vcid;
2558                         }
2559                         pcid_addr = GET_PCID_ADDR(new_vcid);
2560                 }
2561                 else {
2562                         vcid_addr = GET_CID_ADDR(vcid);
2563                         pcid_addr = vcid_addr;
2564                 }
2565
2566                 for (i = 0; i < (CTX_SIZE / PHY_CTX_SIZE); i++) {
2567                         vcid_addr += (i << PHY_CTX_SHIFT);
2568                         pcid_addr += (i << PHY_CTX_SHIFT);
2569
2570                         REG_WR(bp, BNX2_CTX_VIRT_ADDR, vcid_addr);
2571                         REG_WR(bp, BNX2_CTX_PAGE_TBL, pcid_addr);
2572
2573                         /* Zero out the context. */
2574                         for (offset = 0; offset < PHY_CTX_SIZE; offset += 4)
2575                                 bnx2_ctx_wr(bp, vcid_addr, offset, 0);
2576                 }
2577         }
2578 }
2579
2580 static int
2581 bnx2_alloc_bad_rbuf(struct bnx2 *bp)
2582 {
2583         u16 *good_mbuf;
2584         u32 good_mbuf_cnt;
2585         u32 val;
2586
2587         good_mbuf = kmalloc(512 * sizeof(u16), GFP_KERNEL);
2588         if (good_mbuf == NULL) {
2589                 printk(KERN_ERR PFX "Failed to allocate memory in "
2590                                     "bnx2_alloc_bad_rbuf\n");
2591                 return -ENOMEM;
2592         }
2593
2594         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
2595                 BNX2_MISC_ENABLE_SET_BITS_RX_MBUF_ENABLE);
2596
2597         good_mbuf_cnt = 0;
2598
2599         /* Allocate a bunch of mbufs and save the good ones in an array. */
2600         val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2601         while (val & BNX2_RBUF_STATUS1_FREE_COUNT) {
2602                 bnx2_reg_wr_ind(bp, BNX2_RBUF_COMMAND,
2603                                 BNX2_RBUF_COMMAND_ALLOC_REQ);
2604
2605                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_FW_BUF_ALLOC);
2606
2607                 val &= BNX2_RBUF_FW_BUF_ALLOC_VALUE;
2608
2609                 /* The addresses with Bit 9 set are bad memory blocks. */
2610                 if (!(val & (1 << 9))) {
2611                         good_mbuf[good_mbuf_cnt] = (u16) val;
2612                         good_mbuf_cnt++;
2613                 }
2614
2615                 val = bnx2_reg_rd_ind(bp, BNX2_RBUF_STATUS1);
2616         }
2617
2618         /* Free the good ones back to the mbuf pool thus discarding
2619          * all the bad ones. */
2620         while (good_mbuf_cnt) {
2621                 good_mbuf_cnt--;
2622
2623                 val = good_mbuf[good_mbuf_cnt];
2624                 val = (val << 9) | val | 1;
2625
2626                 bnx2_reg_wr_ind(bp, BNX2_RBUF_FW_BUF_FREE, val);
2627         }
2628         kfree(good_mbuf);
2629         return 0;
2630 }
2631
2632 static void
2633 bnx2_set_mac_addr(struct bnx2 *bp, u8 *mac_addr, u32 pos)
2634 {
2635         u32 val;
2636
2637         val = (mac_addr[0] << 8) | mac_addr[1];
2638
2639         REG_WR(bp, BNX2_EMAC_MAC_MATCH0 + (pos * 8), val);
2640
2641         val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
2642                 (mac_addr[4] << 8) | mac_addr[5];
2643
2644         REG_WR(bp, BNX2_EMAC_MAC_MATCH1 + (pos * 8), val);
2645 }
2646
2647 static inline int
2648 bnx2_alloc_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2649 {
2650         dma_addr_t mapping;
2651         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2652         struct rx_bd *rxbd =
2653                 &rxr->rx_pg_desc_ring[RX_RING(index)][RX_IDX(index)];
2654         struct page *page = alloc_page(GFP_ATOMIC);
2655
2656         if (!page)
2657                 return -ENOMEM;
2658         mapping = pci_map_page(bp->pdev, page, 0, PAGE_SIZE,
2659                                PCI_DMA_FROMDEVICE);
2660         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2661                 __free_page(page);
2662                 return -EIO;
2663         }
2664
2665         rx_pg->page = page;
2666         pci_unmap_addr_set(rx_pg, mapping, mapping);
2667         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2668         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2669         return 0;
2670 }
2671
2672 static void
2673 bnx2_free_rx_page(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2674 {
2675         struct sw_pg *rx_pg = &rxr->rx_pg_ring[index];
2676         struct page *page = rx_pg->page;
2677
2678         if (!page)
2679                 return;
2680
2681         pci_unmap_page(bp->pdev, pci_unmap_addr(rx_pg, mapping), PAGE_SIZE,
2682                        PCI_DMA_FROMDEVICE);
2683
2684         __free_page(page);
2685         rx_pg->page = NULL;
2686 }
2687
2688 static inline int
2689 bnx2_alloc_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, u16 index)
2690 {
2691         struct sk_buff *skb;
2692         struct sw_bd *rx_buf = &rxr->rx_buf_ring[index];
2693         dma_addr_t mapping;
2694         struct rx_bd *rxbd = &rxr->rx_desc_ring[RX_RING(index)][RX_IDX(index)];
2695         unsigned long align;
2696
2697         skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
2698         if (skb == NULL) {
2699                 return -ENOMEM;
2700         }
2701
2702         if (unlikely((align = (unsigned long) skb->data & (BNX2_RX_ALIGN - 1))))
2703                 skb_reserve(skb, BNX2_RX_ALIGN - align);
2704
2705         mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_use_size,
2706                 PCI_DMA_FROMDEVICE);
2707         if (pci_dma_mapping_error(bp->pdev, mapping)) {
2708                 dev_kfree_skb(skb);
2709                 return -EIO;
2710         }
2711
2712         rx_buf->skb = skb;
2713         pci_unmap_addr_set(rx_buf, mapping, mapping);
2714
2715         rxbd->rx_bd_haddr_hi = (u64) mapping >> 32;
2716         rxbd->rx_bd_haddr_lo = (u64) mapping & 0xffffffff;
2717
2718         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2719
2720         return 0;
2721 }
2722
2723 static int
2724 bnx2_phy_event_is_set(struct bnx2 *bp, struct bnx2_napi *bnapi, u32 event)
2725 {
2726         struct status_block *sblk = bnapi->status_blk.msi;
2727         u32 new_link_state, old_link_state;
2728         int is_set = 1;
2729
2730         new_link_state = sblk->status_attn_bits & event;
2731         old_link_state = sblk->status_attn_bits_ack & event;
2732         if (new_link_state != old_link_state) {
2733                 if (new_link_state)
2734                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_SET_CMD, event);
2735                 else
2736                         REG_WR(bp, BNX2_PCICFG_STATUS_BIT_CLEAR_CMD, event);
2737         } else
2738                 is_set = 0;
2739
2740         return is_set;
2741 }
2742
2743 static void
2744 bnx2_phy_int(struct bnx2 *bp, struct bnx2_napi *bnapi)
2745 {
2746         spin_lock(&bp->phy_lock);
2747
2748         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_LINK_STATE))
2749                 bnx2_set_link(bp);
2750         if (bnx2_phy_event_is_set(bp, bnapi, STATUS_ATTN_BITS_TIMER_ABORT))
2751                 bnx2_set_remote_link(bp);
2752
2753         spin_unlock(&bp->phy_lock);
2754
2755 }
2756
2757 static inline u16
2758 bnx2_get_hw_tx_cons(struct bnx2_napi *bnapi)
2759 {
2760         u16 cons;
2761
2762         /* Tell compiler that status block fields can change. */
2763         barrier();
2764         cons = *bnapi->hw_tx_cons_ptr;
2765         barrier();
2766         if (unlikely((cons & MAX_TX_DESC_CNT) == MAX_TX_DESC_CNT))
2767                 cons++;
2768         return cons;
2769 }
2770
2771 static int
2772 bnx2_tx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
2773 {
2774         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
2775         u16 hw_cons, sw_cons, sw_ring_cons;
2776         int tx_pkt = 0, index;
2777         struct netdev_queue *txq;
2778
2779         index = (bnapi - bp->bnx2_napi);
2780         txq = netdev_get_tx_queue(bp->dev, index);
2781
2782         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2783         sw_cons = txr->tx_cons;
2784
2785         while (sw_cons != hw_cons) {
2786                 struct sw_tx_bd *tx_buf;
2787                 struct sk_buff *skb;
2788                 int i, last;
2789
2790                 sw_ring_cons = TX_RING_IDX(sw_cons);
2791
2792                 tx_buf = &txr->tx_buf_ring[sw_ring_cons];
2793                 skb = tx_buf->skb;
2794
2795                 /* prefetch skb_end_pointer() to speedup skb_shinfo(skb) */
2796                 prefetch(&skb->end);
2797
2798                 /* partial BD completions possible with TSO packets */
2799                 if (tx_buf->is_gso) {
2800                         u16 last_idx, last_ring_idx;
2801
2802                         last_idx = sw_cons + tx_buf->nr_frags + 1;
2803                         last_ring_idx = sw_ring_cons + tx_buf->nr_frags + 1;
2804                         if (unlikely(last_ring_idx >= MAX_TX_DESC_CNT)) {
2805                                 last_idx++;
2806                         }
2807                         if (((s16) ((s16) last_idx - (s16) hw_cons)) > 0) {
2808                                 break;
2809                         }
2810                 }
2811
2812                 skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
2813
2814                 tx_buf->skb = NULL;
2815                 last = tx_buf->nr_frags;
2816
2817                 for (i = 0; i < last; i++) {
2818                         sw_cons = NEXT_TX_BD(sw_cons);
2819                 }
2820
2821                 sw_cons = NEXT_TX_BD(sw_cons);
2822
2823                 dev_kfree_skb(skb);
2824                 tx_pkt++;
2825                 if (tx_pkt == budget)
2826                         break;
2827
2828                 if (hw_cons == sw_cons)
2829                         hw_cons = bnx2_get_hw_tx_cons(bnapi);
2830         }
2831
2832         txr->hw_tx_cons = hw_cons;
2833         txr->tx_cons = sw_cons;
2834
2835         /* Need to make the tx_cons update visible to bnx2_start_xmit()
2836          * before checking for netif_tx_queue_stopped().  Without the
2837          * memory barrier, there is a small possibility that bnx2_start_xmit()
2838          * will miss it and cause the queue to be stopped forever.
2839          */
2840         smp_mb();
2841
2842         if (unlikely(netif_tx_queue_stopped(txq)) &&
2843                      (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)) {
2844                 __netif_tx_lock(txq, smp_processor_id());
2845                 if ((netif_tx_queue_stopped(txq)) &&
2846                     (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh))
2847                         netif_tx_wake_queue(txq);
2848                 __netif_tx_unlock(txq);
2849         }
2850
2851         return tx_pkt;
2852 }
2853
2854 static void
2855 bnx2_reuse_rx_skb_pages(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2856                         struct sk_buff *skb, int count)
2857 {
2858         struct sw_pg *cons_rx_pg, *prod_rx_pg;
2859         struct rx_bd *cons_bd, *prod_bd;
2860         int i;
2861         u16 hw_prod, prod;
2862         u16 cons = rxr->rx_pg_cons;
2863
2864         cons_rx_pg = &rxr->rx_pg_ring[cons];
2865
2866         /* The caller was unable to allocate a new page to replace the
2867          * last one in the frags array, so we need to recycle that page
2868          * and then free the skb.
2869          */
2870         if (skb) {
2871                 struct page *page;
2872                 struct skb_shared_info *shinfo;
2873
2874                 shinfo = skb_shinfo(skb);
2875                 shinfo->nr_frags--;
2876                 page = shinfo->frags[shinfo->nr_frags].page;
2877                 shinfo->frags[shinfo->nr_frags].page = NULL;
2878
2879                 cons_rx_pg->page = page;
2880                 dev_kfree_skb(skb);
2881         }
2882
2883         hw_prod = rxr->rx_pg_prod;
2884
2885         for (i = 0; i < count; i++) {
2886                 prod = RX_PG_RING_IDX(hw_prod);
2887
2888                 prod_rx_pg = &rxr->rx_pg_ring[prod];
2889                 cons_rx_pg = &rxr->rx_pg_ring[cons];
2890                 cons_bd = &rxr->rx_pg_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2891                 prod_bd = &rxr->rx_pg_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2892
2893                 if (prod != cons) {
2894                         prod_rx_pg->page = cons_rx_pg->page;
2895                         cons_rx_pg->page = NULL;
2896                         pci_unmap_addr_set(prod_rx_pg, mapping,
2897                                 pci_unmap_addr(cons_rx_pg, mapping));
2898
2899                         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2900                         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2901
2902                 }
2903                 cons = RX_PG_RING_IDX(NEXT_RX_BD(cons));
2904                 hw_prod = NEXT_RX_BD(hw_prod);
2905         }
2906         rxr->rx_pg_prod = hw_prod;
2907         rxr->rx_pg_cons = cons;
2908 }
2909
2910 static inline void
2911 bnx2_reuse_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr,
2912                   struct sk_buff *skb, u16 cons, u16 prod)
2913 {
2914         struct sw_bd *cons_rx_buf, *prod_rx_buf;
2915         struct rx_bd *cons_bd, *prod_bd;
2916
2917         cons_rx_buf = &rxr->rx_buf_ring[cons];
2918         prod_rx_buf = &rxr->rx_buf_ring[prod];
2919
2920         pci_dma_sync_single_for_device(bp->pdev,
2921                 pci_unmap_addr(cons_rx_buf, mapping),
2922                 BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
2923
2924         rxr->rx_prod_bseq += bp->rx_buf_use_size;
2925
2926         prod_rx_buf->skb = skb;
2927
2928         if (cons == prod)
2929                 return;
2930
2931         pci_unmap_addr_set(prod_rx_buf, mapping,
2932                         pci_unmap_addr(cons_rx_buf, mapping));
2933
2934         cons_bd = &rxr->rx_desc_ring[RX_RING(cons)][RX_IDX(cons)];
2935         prod_bd = &rxr->rx_desc_ring[RX_RING(prod)][RX_IDX(prod)];
2936         prod_bd->rx_bd_haddr_hi = cons_bd->rx_bd_haddr_hi;
2937         prod_bd->rx_bd_haddr_lo = cons_bd->rx_bd_haddr_lo;
2938 }
2939
2940 static int
2941 bnx2_rx_skb(struct bnx2 *bp, struct bnx2_rx_ring_info *rxr, struct sk_buff *skb,
2942             unsigned int len, unsigned int hdr_len, dma_addr_t dma_addr,
2943             u32 ring_idx)
2944 {
2945         int err;
2946         u16 prod = ring_idx & 0xffff;
2947
2948         err = bnx2_alloc_rx_skb(bp, rxr, prod);
2949         if (unlikely(err)) {
2950                 bnx2_reuse_rx_skb(bp, rxr, skb, (u16) (ring_idx >> 16), prod);
2951                 if (hdr_len) {
2952                         unsigned int raw_len = len + 4;
2953                         int pages = PAGE_ALIGN(raw_len - hdr_len) >> PAGE_SHIFT;
2954
2955                         bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
2956                 }
2957                 return err;
2958         }
2959
2960         skb_reserve(skb, BNX2_RX_OFFSET);
2961         pci_unmap_single(bp->pdev, dma_addr, bp->rx_buf_use_size,
2962                          PCI_DMA_FROMDEVICE);
2963
2964         if (hdr_len == 0) {
2965                 skb_put(skb, len);
2966                 return 0;
2967         } else {
2968                 unsigned int i, frag_len, frag_size, pages;
2969                 struct sw_pg *rx_pg;
2970                 u16 pg_cons = rxr->rx_pg_cons;
2971                 u16 pg_prod = rxr->rx_pg_prod;
2972
2973                 frag_size = len + 4 - hdr_len;
2974                 pages = PAGE_ALIGN(frag_size) >> PAGE_SHIFT;
2975                 skb_put(skb, hdr_len);
2976
2977                 for (i = 0; i < pages; i++) {
2978                         dma_addr_t mapping_old;
2979
2980                         frag_len = min(frag_size, (unsigned int) PAGE_SIZE);
2981                         if (unlikely(frag_len <= 4)) {
2982                                 unsigned int tail = 4 - frag_len;
2983
2984                                 rxr->rx_pg_cons = pg_cons;
2985                                 rxr->rx_pg_prod = pg_prod;
2986                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL,
2987                                                         pages - i);
2988                                 skb->len -= tail;
2989                                 if (i == 0) {
2990                                         skb->tail -= tail;
2991                                 } else {
2992                                         skb_frag_t *frag =
2993                                                 &skb_shinfo(skb)->frags[i - 1];
2994                                         frag->size -= tail;
2995                                         skb->data_len -= tail;
2996                                         skb->truesize -= tail;
2997                                 }
2998                                 return 0;
2999                         }
3000                         rx_pg = &rxr->rx_pg_ring[pg_cons];
3001
3002                         /* Don't unmap yet.  If we're unable to allocate a new
3003                          * page, we need to recycle the page and the DMA addr.
3004                          */
3005                         mapping_old = pci_unmap_addr(rx_pg, mapping);
3006                         if (i == pages - 1)
3007                                 frag_len -= 4;
3008
3009                         skb_fill_page_desc(skb, i, rx_pg->page, 0, frag_len);
3010                         rx_pg->page = NULL;
3011
3012                         err = bnx2_alloc_rx_page(bp, rxr,
3013                                                  RX_PG_RING_IDX(pg_prod));
3014                         if (unlikely(err)) {
3015                                 rxr->rx_pg_cons = pg_cons;
3016                                 rxr->rx_pg_prod = pg_prod;
3017                                 bnx2_reuse_rx_skb_pages(bp, rxr, skb,
3018                                                         pages - i);
3019                                 return err;
3020                         }
3021
3022                         pci_unmap_page(bp->pdev, mapping_old,
3023                                        PAGE_SIZE, PCI_DMA_FROMDEVICE);
3024
3025                         frag_size -= frag_len;
3026                         skb->data_len += frag_len;
3027                         skb->truesize += frag_len;
3028                         skb->len += frag_len;
3029
3030                         pg_prod = NEXT_RX_BD(pg_prod);
3031                         pg_cons = RX_PG_RING_IDX(NEXT_RX_BD(pg_cons));
3032                 }
3033                 rxr->rx_pg_prod = pg_prod;
3034                 rxr->rx_pg_cons = pg_cons;
3035         }
3036         return 0;
3037 }
3038
3039 static inline u16
3040 bnx2_get_hw_rx_cons(struct bnx2_napi *bnapi)
3041 {
3042         u16 cons;
3043
3044         /* Tell compiler that status block fields can change. */
3045         barrier();
3046         cons = *bnapi->hw_rx_cons_ptr;
3047         barrier();
3048         if (unlikely((cons & MAX_RX_DESC_CNT) == MAX_RX_DESC_CNT))
3049                 cons++;
3050         return cons;
3051 }
3052
3053 static int
3054 bnx2_rx_int(struct bnx2 *bp, struct bnx2_napi *bnapi, int budget)
3055 {
3056         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3057         u16 hw_cons, sw_cons, sw_ring_cons, sw_prod, sw_ring_prod;
3058         struct l2_fhdr *rx_hdr;
3059         int rx_pkt = 0, pg_ring_used = 0;
3060
3061         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3062         sw_cons = rxr->rx_cons;
3063         sw_prod = rxr->rx_prod;
3064
3065         /* Memory barrier necessary as speculative reads of the rx
3066          * buffer can be ahead of the index in the status block
3067          */
3068         rmb();
3069         while (sw_cons != hw_cons) {
3070                 unsigned int len, hdr_len;
3071                 u32 status;
3072                 struct sw_bd *rx_buf;
3073                 struct sk_buff *skb;
3074                 dma_addr_t dma_addr;
3075                 u16 vtag = 0;
3076                 int hw_vlan __maybe_unused = 0;
3077
3078                 sw_ring_cons = RX_RING_IDX(sw_cons);
3079                 sw_ring_prod = RX_RING_IDX(sw_prod);
3080
3081                 rx_buf = &rxr->rx_buf_ring[sw_ring_cons];
3082                 skb = rx_buf->skb;
3083
3084                 rx_buf->skb = NULL;
3085
3086                 dma_addr = pci_unmap_addr(rx_buf, mapping);
3087
3088                 pci_dma_sync_single_for_cpu(bp->pdev, dma_addr,
3089                         BNX2_RX_OFFSET + BNX2_RX_COPY_THRESH,
3090                         PCI_DMA_FROMDEVICE);
3091
3092                 rx_hdr = (struct l2_fhdr *) skb->data;
3093                 len = rx_hdr->l2_fhdr_pkt_len;
3094                 status = rx_hdr->l2_fhdr_status;
3095
3096                 hdr_len = 0;
3097                 if (status & L2_FHDR_STATUS_SPLIT) {
3098                         hdr_len = rx_hdr->l2_fhdr_ip_xsum;
3099                         pg_ring_used = 1;
3100                 } else if (len > bp->rx_jumbo_thresh) {
3101                         hdr_len = bp->rx_jumbo_thresh;
3102                         pg_ring_used = 1;
3103                 }
3104
3105                 if (unlikely(status & (L2_FHDR_ERRORS_BAD_CRC |
3106                                        L2_FHDR_ERRORS_PHY_DECODE |
3107                                        L2_FHDR_ERRORS_ALIGNMENT |
3108                                        L2_FHDR_ERRORS_TOO_SHORT |
3109                                        L2_FHDR_ERRORS_GIANT_FRAME))) {
3110
3111                         bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3112                                           sw_ring_prod);
3113                         if (pg_ring_used) {
3114                                 int pages;
3115
3116                                 pages = PAGE_ALIGN(len - hdr_len) >> PAGE_SHIFT;
3117
3118                                 bnx2_reuse_rx_skb_pages(bp, rxr, NULL, pages);
3119                         }
3120                         goto next_rx;
3121                 }
3122
3123                 len -= 4;
3124
3125                 if (len <= bp->rx_copy_thresh) {
3126                         struct sk_buff *new_skb;
3127
3128                         new_skb = netdev_alloc_skb(bp->dev, len + 6);
3129                         if (new_skb == NULL) {
3130                                 bnx2_reuse_rx_skb(bp, rxr, skb, sw_ring_cons,
3131                                                   sw_ring_prod);
3132                                 goto next_rx;
3133                         }
3134
3135                         /* aligned copy */
3136                         skb_copy_from_linear_data_offset(skb,
3137                                                          BNX2_RX_OFFSET - 6,
3138                                       new_skb->data, len + 6);
3139                         skb_reserve(new_skb, 6);
3140                         skb_put(new_skb, len);
3141
3142                         bnx2_reuse_rx_skb(bp, rxr, skb,
3143                                 sw_ring_cons, sw_ring_prod);
3144
3145                         skb = new_skb;
3146                 } else if (unlikely(bnx2_rx_skb(bp, rxr, skb, len, hdr_len,
3147                            dma_addr, (sw_ring_cons << 16) | sw_ring_prod)))
3148                         goto next_rx;
3149
3150                 if ((status & L2_FHDR_STATUS_L2_VLAN_TAG) &&
3151                     !(bp->rx_mode & BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG)) {
3152                         vtag = rx_hdr->l2_fhdr_vlan_tag;
3153 #ifdef BCM_VLAN
3154                         if (bp->vlgrp)
3155                                 hw_vlan = 1;
3156                         else
3157 #endif
3158                         {
3159                                 struct vlan_ethhdr *ve = (struct vlan_ethhdr *)
3160                                         __skb_push(skb, 4);
3161
3162                                 memmove(ve, skb->data + 4, ETH_ALEN * 2);
3163                                 ve->h_vlan_proto = htons(ETH_P_8021Q);
3164                                 ve->h_vlan_TCI = htons(vtag);
3165                                 len += 4;
3166                         }
3167                 }
3168
3169                 skb->protocol = eth_type_trans(skb, bp->dev);
3170
3171                 if ((len > (bp->dev->mtu + ETH_HLEN)) &&
3172                         (ntohs(skb->protocol) != 0x8100)) {
3173
3174                         dev_kfree_skb(skb);
3175                         goto next_rx;
3176
3177                 }
3178
3179                 skb->ip_summed = CHECKSUM_NONE;
3180                 if (bp->rx_csum &&
3181                         (status & (L2_FHDR_STATUS_TCP_SEGMENT |
3182                         L2_FHDR_STATUS_UDP_DATAGRAM))) {
3183
3184                         if (likely((status & (L2_FHDR_ERRORS_TCP_XSUM |
3185                                               L2_FHDR_ERRORS_UDP_XSUM)) == 0))
3186                                 skb->ip_summed = CHECKSUM_UNNECESSARY;
3187                 }
3188
3189                 skb_record_rx_queue(skb, bnapi - &bp->bnx2_napi[0]);
3190
3191 #ifdef BCM_VLAN
3192                 if (hw_vlan)
3193                         vlan_hwaccel_receive_skb(skb, bp->vlgrp, vtag);
3194                 else
3195 #endif
3196                         netif_receive_skb(skb);
3197
3198                 rx_pkt++;
3199
3200 next_rx:
3201                 sw_cons = NEXT_RX_BD(sw_cons);
3202                 sw_prod = NEXT_RX_BD(sw_prod);
3203
3204                 if ((rx_pkt == budget))
3205                         break;
3206
3207                 /* Refresh hw_cons to see if there is new work */
3208                 if (sw_cons == hw_cons) {
3209                         hw_cons = bnx2_get_hw_rx_cons(bnapi);
3210                         rmb();
3211                 }
3212         }
3213         rxr->rx_cons = sw_cons;
3214         rxr->rx_prod = sw_prod;
3215
3216         if (pg_ring_used)
3217                 REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
3218
3219         REG_WR16(bp, rxr->rx_bidx_addr, sw_prod);
3220
3221         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
3222
3223         mmiowb();
3224
3225         return rx_pkt;
3226
3227 }
3228
3229 /* MSI ISR - The only difference between this and the INTx ISR
3230  * is that the MSI interrupt is always serviced.
3231  */
3232 static irqreturn_t
3233 bnx2_msi(int irq, void *dev_instance)
3234 {
3235         struct bnx2_napi *bnapi = dev_instance;
3236         struct bnx2 *bp = bnapi->bp;
3237
3238         prefetch(bnapi->status_blk.msi);
3239         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3240                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3241                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3242
3243         /* Return here if interrupt is disabled. */
3244         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3245                 return IRQ_HANDLED;
3246
3247         napi_schedule(&bnapi->napi);
3248
3249         return IRQ_HANDLED;
3250 }
3251
3252 static irqreturn_t
3253 bnx2_msi_1shot(int irq, void *dev_instance)
3254 {
3255         struct bnx2_napi *bnapi = dev_instance;
3256         struct bnx2 *bp = bnapi->bp;
3257
3258         prefetch(bnapi->status_blk.msi);
3259
3260         /* Return here if interrupt is disabled. */
3261         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3262                 return IRQ_HANDLED;
3263
3264         napi_schedule(&bnapi->napi);
3265
3266         return IRQ_HANDLED;
3267 }
3268
3269 static irqreturn_t
3270 bnx2_interrupt(int irq, void *dev_instance)
3271 {
3272         struct bnx2_napi *bnapi = dev_instance;
3273         struct bnx2 *bp = bnapi->bp;
3274         struct status_block *sblk = bnapi->status_blk.msi;
3275
3276         /* When using INTx, it is possible for the interrupt to arrive
3277          * at the CPU before the status block posted prior to the
3278          * interrupt. Reading a register will flush the status block.
3279          * When using MSI, the MSI message will always complete after
3280          * the status block write.
3281          */
3282         if ((sblk->status_idx == bnapi->last_status_idx) &&
3283             (REG_RD(bp, BNX2_PCICFG_MISC_STATUS) &
3284              BNX2_PCICFG_MISC_STATUS_INTA_VALUE))
3285                 return IRQ_NONE;
3286
3287         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3288                 BNX2_PCICFG_INT_ACK_CMD_USE_INT_HC_PARAM |
3289                 BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
3290
3291         /* Read back to deassert IRQ immediately to avoid too many
3292          * spurious interrupts.
3293          */
3294         REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD);
3295
3296         /* Return here if interrupt is shared and is disabled. */
3297         if (unlikely(atomic_read(&bp->intr_sem) != 0))
3298                 return IRQ_HANDLED;
3299
3300         if (napi_schedule_prep(&bnapi->napi)) {
3301                 bnapi->last_status_idx = sblk->status_idx;
3302                 __napi_schedule(&bnapi->napi);
3303         }
3304
3305         return IRQ_HANDLED;
3306 }
3307
3308 static inline int
3309 bnx2_has_fast_work(struct bnx2_napi *bnapi)
3310 {
3311         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3312         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3313
3314         if ((bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons) ||
3315             (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons))
3316                 return 1;
3317         return 0;
3318 }
3319
3320 #define STATUS_ATTN_EVENTS      (STATUS_ATTN_BITS_LINK_STATE | \
3321                                  STATUS_ATTN_BITS_TIMER_ABORT)
3322
3323 static inline int
3324 bnx2_has_work(struct bnx2_napi *bnapi)
3325 {
3326         struct status_block *sblk = bnapi->status_blk.msi;
3327
3328         if (bnx2_has_fast_work(bnapi))
3329                 return 1;
3330
3331 #ifdef BCM_CNIC
3332         if (bnapi->cnic_present && (bnapi->cnic_tag != sblk->status_idx))
3333                 return 1;
3334 #endif
3335
3336         if ((sblk->status_attn_bits & STATUS_ATTN_EVENTS) !=
3337             (sblk->status_attn_bits_ack & STATUS_ATTN_EVENTS))
3338                 return 1;
3339
3340         return 0;
3341 }
3342
3343 static void
3344 bnx2_chk_missed_msi(struct bnx2 *bp)
3345 {
3346         struct bnx2_napi *bnapi = &bp->bnx2_napi[0];
3347         u32 msi_ctrl;
3348
3349         if (bnx2_has_work(bnapi)) {
3350                 msi_ctrl = REG_RD(bp, BNX2_PCICFG_MSI_CONTROL);
3351                 if (!(msi_ctrl & BNX2_PCICFG_MSI_CONTROL_ENABLE))
3352                         return;
3353
3354                 if (bnapi->last_status_idx == bp->idle_chk_status_idx) {
3355                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl &
3356                                ~BNX2_PCICFG_MSI_CONTROL_ENABLE);
3357                         REG_WR(bp, BNX2_PCICFG_MSI_CONTROL, msi_ctrl);
3358                         bnx2_msi(bp->irq_tbl[0].vector, bnapi);
3359                 }
3360         }
3361
3362         bp->idle_chk_status_idx = bnapi->last_status_idx;
3363 }
3364
3365 #ifdef BCM_CNIC
3366 static void bnx2_poll_cnic(struct bnx2 *bp, struct bnx2_napi *bnapi)
3367 {
3368         struct cnic_ops *c_ops;
3369
3370         if (!bnapi->cnic_present)
3371                 return;
3372
3373         rcu_read_lock();
3374         c_ops = rcu_dereference(bp->cnic_ops);
3375         if (c_ops)
3376                 bnapi->cnic_tag = c_ops->cnic_handler(bp->cnic_data,
3377                                                       bnapi->status_blk.msi);
3378         rcu_read_unlock();
3379 }
3380 #endif
3381
3382 static void bnx2_poll_link(struct bnx2 *bp, struct bnx2_napi *bnapi)
3383 {
3384         struct status_block *sblk = bnapi->status_blk.msi;
3385         u32 status_attn_bits = sblk->status_attn_bits;
3386         u32 status_attn_bits_ack = sblk->status_attn_bits_ack;
3387
3388         if ((status_attn_bits & STATUS_ATTN_EVENTS) !=
3389             (status_attn_bits_ack & STATUS_ATTN_EVENTS)) {
3390
3391                 bnx2_phy_int(bp, bnapi);
3392
3393                 /* This is needed to take care of transient status
3394                  * during link changes.
3395                  */
3396                 REG_WR(bp, BNX2_HC_COMMAND,
3397                        bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
3398                 REG_RD(bp, BNX2_HC_COMMAND);
3399         }
3400 }
3401
3402 static int bnx2_poll_work(struct bnx2 *bp, struct bnx2_napi *bnapi,
3403                           int work_done, int budget)
3404 {
3405         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
3406         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
3407
3408         if (bnx2_get_hw_tx_cons(bnapi) != txr->hw_tx_cons)
3409                 bnx2_tx_int(bp, bnapi, 0);
3410
3411         if (bnx2_get_hw_rx_cons(bnapi) != rxr->rx_cons)
3412                 work_done += bnx2_rx_int(bp, bnapi, budget - work_done);
3413
3414         return work_done;
3415 }
3416
3417 static int bnx2_poll_msix(struct napi_struct *napi, int budget)
3418 {
3419         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3420         struct bnx2 *bp = bnapi->bp;
3421         int work_done = 0;
3422         struct status_block_msix *sblk = bnapi->status_blk.msix;
3423
3424         while (1) {
3425                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3426                 if (unlikely(work_done >= budget))
3427                         break;
3428
3429                 bnapi->last_status_idx = sblk->status_idx;
3430                 /* status idx must be read before checking for more work. */
3431                 rmb();
3432                 if (likely(!bnx2_has_fast_work(bnapi))) {
3433
3434                         napi_complete(napi);
3435                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, bnapi->int_num |
3436                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3437                                bnapi->last_status_idx);
3438                         break;
3439                 }
3440         }
3441         return work_done;
3442 }
3443
3444 static int bnx2_poll(struct napi_struct *napi, int budget)
3445 {
3446         struct bnx2_napi *bnapi = container_of(napi, struct bnx2_napi, napi);
3447         struct bnx2 *bp = bnapi->bp;
3448         int work_done = 0;
3449         struct status_block *sblk = bnapi->status_blk.msi;
3450
3451         while (1) {
3452                 bnx2_poll_link(bp, bnapi);
3453
3454                 work_done = bnx2_poll_work(bp, bnapi, work_done, budget);
3455
3456 #ifdef BCM_CNIC
3457                 bnx2_poll_cnic(bp, bnapi);
3458 #endif
3459
3460                 /* bnapi->last_status_idx is used below to tell the hw how
3461                  * much work has been processed, so we must read it before
3462                  * checking for more work.
3463                  */
3464                 bnapi->last_status_idx = sblk->status_idx;
3465
3466                 if (unlikely(work_done >= budget))
3467                         break;
3468
3469                 rmb();
3470                 if (likely(!bnx2_has_work(bnapi))) {
3471                         napi_complete(napi);
3472                         if (likely(bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)) {
3473                                 REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3474                                        BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3475                                        bnapi->last_status_idx);
3476                                 break;
3477                         }
3478                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3479                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3480                                BNX2_PCICFG_INT_ACK_CMD_MASK_INT |
3481                                bnapi->last_status_idx);
3482
3483                         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD,
3484                                BNX2_PCICFG_INT_ACK_CMD_INDEX_VALID |
3485                                bnapi->last_status_idx);
3486                         break;
3487                 }
3488         }
3489
3490         return work_done;
3491 }
3492
3493 /* Called with rtnl_lock from vlan functions and also netif_tx_lock
3494  * from set_multicast.
3495  */
3496 static void
3497 bnx2_set_rx_mode(struct net_device *dev)
3498 {
3499         struct bnx2 *bp = netdev_priv(dev);
3500         u32 rx_mode, sort_mode;
3501         struct netdev_hw_addr *ha;
3502         int i;
3503
3504         if (!netif_running(dev))
3505                 return;
3506
3507         spin_lock_bh(&bp->phy_lock);
3508
3509         rx_mode = bp->rx_mode & ~(BNX2_EMAC_RX_MODE_PROMISCUOUS |
3510                                   BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG);
3511         sort_mode = 1 | BNX2_RPM_SORT_USER0_BC_EN;
3512 #ifdef BCM_VLAN
3513         if (!bp->vlgrp && (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN))
3514                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3515 #else
3516         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
3517                 rx_mode |= BNX2_EMAC_RX_MODE_KEEP_VLAN_TAG;
3518 #endif
3519         if (dev->flags & IFF_PROMISC) {
3520                 /* Promiscuous mode. */
3521                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3522                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3523                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3524         }
3525         else if (dev->flags & IFF_ALLMULTI) {
3526                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3527                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3528                                0xffffffff);
3529                 }
3530                 sort_mode |= BNX2_RPM_SORT_USER0_MC_EN;
3531         }
3532         else {
3533                 /* Accept one or more multicast(s). */
3534                 struct dev_mc_list *mclist;
3535                 u32 mc_filter[NUM_MC_HASH_REGISTERS];
3536                 u32 regidx;
3537                 u32 bit;
3538                 u32 crc;
3539
3540                 memset(mc_filter, 0, 4 * NUM_MC_HASH_REGISTERS);
3541
3542                 for (i = 0, mclist = dev->mc_list; mclist && i < dev->mc_count;
3543                      i++, mclist = mclist->next) {
3544
3545                         crc = ether_crc_le(ETH_ALEN, mclist->dmi_addr);
3546                         bit = crc & 0xff;
3547                         regidx = (bit & 0xe0) >> 5;
3548                         bit &= 0x1f;
3549                         mc_filter[regidx] |= (1 << bit);
3550                 }
3551
3552                 for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3553                         REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3554                                mc_filter[i]);
3555                 }
3556
3557                 sort_mode |= BNX2_RPM_SORT_USER0_MC_HSH_EN;
3558         }
3559
3560         if (dev->uc.count > BNX2_MAX_UNICAST_ADDRESSES) {
3561                 rx_mode |= BNX2_EMAC_RX_MODE_PROMISCUOUS;
3562                 sort_mode |= BNX2_RPM_SORT_USER0_PROM_EN |
3563                              BNX2_RPM_SORT_USER0_PROM_VLAN;
3564         } else if (!(dev->flags & IFF_PROMISC)) {
3565                 /* Add all entries into to the match filter list */
3566                 i = 0;
3567                 list_for_each_entry(ha, &dev->uc.list, list) {
3568                         bnx2_set_mac_addr(bp, ha->addr,
3569                                           i + BNX2_START_UNICAST_ADDRESS_INDEX);
3570                         sort_mode |= (1 <<
3571                                       (i + BNX2_START_UNICAST_ADDRESS_INDEX));
3572                         i++;
3573                 }
3574
3575         }
3576
3577         if (rx_mode != bp->rx_mode) {
3578                 bp->rx_mode = rx_mode;
3579                 REG_WR(bp, BNX2_EMAC_RX_MODE, rx_mode);
3580         }
3581
3582         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3583         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode);
3584         REG_WR(bp, BNX2_RPM_SORT_USER0, sort_mode | BNX2_RPM_SORT_USER0_ENA);
3585
3586         spin_unlock_bh(&bp->phy_lock);
3587 }
3588
3589 static int __devinit
3590 check_fw_section(const struct firmware *fw,
3591                  const struct bnx2_fw_file_section *section,
3592                  u32 alignment, bool non_empty)
3593 {
3594         u32 offset = be32_to_cpu(section->offset);
3595         u32 len = be32_to_cpu(section->len);
3596
3597         if ((offset == 0 && len != 0) || offset >= fw->size || offset & 3)
3598                 return -EINVAL;
3599         if ((non_empty && len == 0) || len > fw->size - offset ||
3600             len & (alignment - 1))
3601                 return -EINVAL;
3602         return 0;
3603 }
3604
3605 static int __devinit
3606 check_mips_fw_entry(const struct firmware *fw,
3607                     const struct bnx2_mips_fw_file_entry *entry)
3608 {
3609         if (check_fw_section(fw, &entry->text, 4, true) ||
3610             check_fw_section(fw, &entry->data, 4, false) ||
3611             check_fw_section(fw, &entry->rodata, 4, false))
3612                 return -EINVAL;
3613         return 0;
3614 }
3615
3616 static int __devinit
3617 bnx2_request_firmware(struct bnx2 *bp)
3618 {
3619         const char *mips_fw_file, *rv2p_fw_file;
3620         const struct bnx2_mips_fw_file *mips_fw;
3621         const struct bnx2_rv2p_fw_file *rv2p_fw;
3622         int rc;
3623
3624         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
3625                 mips_fw_file = FW_MIPS_FILE_09;
3626                 if ((CHIP_ID(bp) == CHIP_ID_5709_A0) ||
3627                     (CHIP_ID(bp) == CHIP_ID_5709_A1))
3628                         rv2p_fw_file = FW_RV2P_FILE_09_Ax;
3629                 else
3630                         rv2p_fw_file = FW_RV2P_FILE_09;
3631         } else {
3632                 mips_fw_file = FW_MIPS_FILE_06;
3633                 rv2p_fw_file = FW_RV2P_FILE_06;
3634         }
3635
3636         rc = request_firmware(&bp->mips_firmware, mips_fw_file, &bp->pdev->dev);
3637         if (rc) {
3638                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3639                        mips_fw_file);
3640                 return rc;
3641         }
3642
3643         rc = request_firmware(&bp->rv2p_firmware, rv2p_fw_file, &bp->pdev->dev);
3644         if (rc) {
3645                 printk(KERN_ERR PFX "Can't load firmware file \"%s\"\n",
3646                        rv2p_fw_file);
3647                 return rc;
3648         }
3649         mips_fw = (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3650         rv2p_fw = (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3651         if (bp->mips_firmware->size < sizeof(*mips_fw) ||
3652             check_mips_fw_entry(bp->mips_firmware, &mips_fw->com) ||
3653             check_mips_fw_entry(bp->mips_firmware, &mips_fw->cp) ||
3654             check_mips_fw_entry(bp->mips_firmware, &mips_fw->rxp) ||
3655             check_mips_fw_entry(bp->mips_firmware, &mips_fw->tpat) ||
3656             check_mips_fw_entry(bp->mips_firmware, &mips_fw->txp)) {
3657                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3658                        mips_fw_file);
3659                 return -EINVAL;
3660         }
3661         if (bp->rv2p_firmware->size < sizeof(*rv2p_fw) ||
3662             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc1.rv2p, 8, true) ||
3663             check_fw_section(bp->rv2p_firmware, &rv2p_fw->proc2.rv2p, 8, true)) {
3664                 printk(KERN_ERR PFX "Firmware file \"%s\" is invalid\n",
3665                        rv2p_fw_file);
3666                 return -EINVAL;
3667         }
3668
3669         return 0;
3670 }
3671
3672 static u32
3673 rv2p_fw_fixup(u32 rv2p_proc, int idx, u32 loc, u32 rv2p_code)
3674 {
3675         switch (idx) {
3676         case RV2P_P1_FIXUP_PAGE_SIZE_IDX:
3677                 rv2p_code &= ~RV2P_BD_PAGE_SIZE_MSK;
3678                 rv2p_code |= RV2P_BD_PAGE_SIZE;
3679                 break;
3680         }
3681         return rv2p_code;
3682 }
3683
3684 static int
3685 load_rv2p_fw(struct bnx2 *bp, u32 rv2p_proc,
3686              const struct bnx2_rv2p_fw_file_entry *fw_entry)
3687 {
3688         u32 rv2p_code_len, file_offset;
3689         __be32 *rv2p_code;
3690         int i;
3691         u32 val, cmd, addr;
3692
3693         rv2p_code_len = be32_to_cpu(fw_entry->rv2p.len);
3694         file_offset = be32_to_cpu(fw_entry->rv2p.offset);
3695
3696         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3697
3698         if (rv2p_proc == RV2P_PROC1) {
3699                 cmd = BNX2_RV2P_PROC1_ADDR_CMD_RDWR;
3700                 addr = BNX2_RV2P_PROC1_ADDR_CMD;
3701         } else {
3702                 cmd = BNX2_RV2P_PROC2_ADDR_CMD_RDWR;
3703                 addr = BNX2_RV2P_PROC2_ADDR_CMD;
3704         }
3705
3706         for (i = 0; i < rv2p_code_len; i += 8) {
3707                 REG_WR(bp, BNX2_RV2P_INSTR_HIGH, be32_to_cpu(*rv2p_code));
3708                 rv2p_code++;
3709                 REG_WR(bp, BNX2_RV2P_INSTR_LOW, be32_to_cpu(*rv2p_code));
3710                 rv2p_code++;
3711
3712                 val = (i / 8) | cmd;
3713                 REG_WR(bp, addr, val);
3714         }
3715
3716         rv2p_code = (__be32 *)(bp->rv2p_firmware->data + file_offset);
3717         for (i = 0; i < 8; i++) {
3718                 u32 loc, code;
3719
3720                 loc = be32_to_cpu(fw_entry->fixup[i]);
3721                 if (loc && ((loc * 4) < rv2p_code_len)) {
3722                         code = be32_to_cpu(*(rv2p_code + loc - 1));
3723                         REG_WR(bp, BNX2_RV2P_INSTR_HIGH, code);
3724                         code = be32_to_cpu(*(rv2p_code + loc));
3725                         code = rv2p_fw_fixup(rv2p_proc, i, loc, code);
3726                         REG_WR(bp, BNX2_RV2P_INSTR_LOW, code);
3727
3728                         val = (loc / 2) | cmd;
3729                         REG_WR(bp, addr, val);
3730                 }
3731         }
3732
3733         /* Reset the processor, un-stall is done later. */
3734         if (rv2p_proc == RV2P_PROC1) {
3735                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC1_RESET);
3736         }
3737         else {
3738                 REG_WR(bp, BNX2_RV2P_COMMAND, BNX2_RV2P_COMMAND_PROC2_RESET);
3739         }
3740
3741         return 0;
3742 }
3743
3744 static int
3745 load_cpu_fw(struct bnx2 *bp, const struct cpu_reg *cpu_reg,
3746             const struct bnx2_mips_fw_file_entry *fw_entry)
3747 {
3748         u32 addr, len, file_offset;
3749         __be32 *data;
3750         u32 offset;
3751         u32 val;
3752
3753         /* Halt the CPU. */
3754         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3755         val |= cpu_reg->mode_value_halt;
3756         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3757         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3758
3759         /* Load the Text area. */
3760         addr = be32_to_cpu(fw_entry->text.addr);
3761         len = be32_to_cpu(fw_entry->text.len);
3762         file_offset = be32_to_cpu(fw_entry->text.offset);
3763         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3764
3765         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3766         if (len) {
3767                 int j;
3768
3769                 for (j = 0; j < (len / 4); j++, offset += 4)
3770                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3771         }
3772
3773         /* Load the Data area. */
3774         addr = be32_to_cpu(fw_entry->data.addr);
3775         len = be32_to_cpu(fw_entry->data.len);
3776         file_offset = be32_to_cpu(fw_entry->data.offset);
3777         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3778
3779         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3780         if (len) {
3781                 int j;
3782
3783                 for (j = 0; j < (len / 4); j++, offset += 4)
3784                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3785         }
3786
3787         /* Load the Read-Only area. */
3788         addr = be32_to_cpu(fw_entry->rodata.addr);
3789         len = be32_to_cpu(fw_entry->rodata.len);
3790         file_offset = be32_to_cpu(fw_entry->rodata.offset);
3791         data = (__be32 *)(bp->mips_firmware->data + file_offset);
3792
3793         offset = cpu_reg->spad_base + (addr - cpu_reg->mips_view_base);
3794         if (len) {
3795                 int j;
3796
3797                 for (j = 0; j < (len / 4); j++, offset += 4)
3798                         bnx2_reg_wr_ind(bp, offset, be32_to_cpu(data[j]));
3799         }
3800
3801         /* Clear the pre-fetch instruction. */
3802         bnx2_reg_wr_ind(bp, cpu_reg->inst, 0);
3803
3804         val = be32_to_cpu(fw_entry->start_addr);
3805         bnx2_reg_wr_ind(bp, cpu_reg->pc, val);
3806
3807         /* Start the CPU. */
3808         val = bnx2_reg_rd_ind(bp, cpu_reg->mode);
3809         val &= ~cpu_reg->mode_value_halt;
3810         bnx2_reg_wr_ind(bp, cpu_reg->state, cpu_reg->state_value_clear);
3811         bnx2_reg_wr_ind(bp, cpu_reg->mode, val);
3812
3813         return 0;
3814 }
3815
3816 static int
3817 bnx2_init_cpus(struct bnx2 *bp)
3818 {
3819         const struct bnx2_mips_fw_file *mips_fw =
3820                 (const struct bnx2_mips_fw_file *) bp->mips_firmware->data;
3821         const struct bnx2_rv2p_fw_file *rv2p_fw =
3822                 (const struct bnx2_rv2p_fw_file *) bp->rv2p_firmware->data;
3823         int rc;
3824
3825         /* Initialize the RV2P processor. */
3826         load_rv2p_fw(bp, RV2P_PROC1, &rv2p_fw->proc1);
3827         load_rv2p_fw(bp, RV2P_PROC2, &rv2p_fw->proc2);
3828
3829         /* Initialize the RX Processor. */
3830         rc = load_cpu_fw(bp, &cpu_reg_rxp, &mips_fw->rxp);
3831         if (rc)
3832                 goto init_cpu_err;
3833
3834         /* Initialize the TX Processor. */
3835         rc = load_cpu_fw(bp, &cpu_reg_txp, &mips_fw->txp);
3836         if (rc)
3837                 goto init_cpu_err;
3838
3839         /* Initialize the TX Patch-up Processor. */
3840         rc = load_cpu_fw(bp, &cpu_reg_tpat, &mips_fw->tpat);
3841         if (rc)
3842                 goto init_cpu_err;
3843
3844         /* Initialize the Completion Processor. */
3845         rc = load_cpu_fw(bp, &cpu_reg_com, &mips_fw->com);
3846         if (rc)
3847                 goto init_cpu_err;
3848
3849         /* Initialize the Command Processor. */
3850         rc = load_cpu_fw(bp, &cpu_reg_cp, &mips_fw->cp);
3851
3852 init_cpu_err:
3853         return rc;
3854 }
3855
3856 static int
3857 bnx2_set_power_state(struct bnx2 *bp, pci_power_t state)
3858 {
3859         u16 pmcsr;
3860
3861         pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
3862
3863         switch (state) {
3864         case PCI_D0: {
3865                 u32 val;
3866
3867                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3868                         (pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
3869                         PCI_PM_CTRL_PME_STATUS);
3870
3871                 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
3872                         /* delay required during transition out of D3hot */
3873                         msleep(20);
3874
3875                 val = REG_RD(bp, BNX2_EMAC_MODE);
3876                 val |= BNX2_EMAC_MODE_MPKT_RCVD | BNX2_EMAC_MODE_ACPI_RCVD;
3877                 val &= ~BNX2_EMAC_MODE_MPKT;
3878                 REG_WR(bp, BNX2_EMAC_MODE, val);
3879
3880                 val = REG_RD(bp, BNX2_RPM_CONFIG);
3881                 val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3882                 REG_WR(bp, BNX2_RPM_CONFIG, val);
3883                 break;
3884         }
3885         case PCI_D3hot: {
3886                 int i;
3887                 u32 val, wol_msg;
3888
3889                 if (bp->wol) {
3890                         u32 advertising;
3891                         u8 autoneg;
3892
3893                         autoneg = bp->autoneg;
3894                         advertising = bp->advertising;
3895
3896                         if (bp->phy_port == PORT_TP) {
3897                                 bp->autoneg = AUTONEG_SPEED;
3898                                 bp->advertising = ADVERTISED_10baseT_Half |
3899                                         ADVERTISED_10baseT_Full |
3900                                         ADVERTISED_100baseT_Half |
3901                                         ADVERTISED_100baseT_Full |
3902                                         ADVERTISED_Autoneg;
3903                         }
3904
3905                         spin_lock_bh(&bp->phy_lock);
3906                         bnx2_setup_phy(bp, bp->phy_port);
3907                         spin_unlock_bh(&bp->phy_lock);
3908
3909                         bp->autoneg = autoneg;
3910                         bp->advertising = advertising;
3911
3912                         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
3913
3914                         val = REG_RD(bp, BNX2_EMAC_MODE);
3915
3916                         /* Enable port mode. */
3917                         val &= ~BNX2_EMAC_MODE_PORT;
3918                         val |= BNX2_EMAC_MODE_MPKT_RCVD |
3919                                BNX2_EMAC_MODE_ACPI_RCVD |
3920                                BNX2_EMAC_MODE_MPKT;
3921                         if (bp->phy_port == PORT_TP)
3922                                 val |= BNX2_EMAC_MODE_PORT_MII;
3923                         else {
3924                                 val |= BNX2_EMAC_MODE_PORT_GMII;
3925                                 if (bp->line_speed == SPEED_2500)
3926                                         val |= BNX2_EMAC_MODE_25G_MODE;
3927                         }
3928
3929                         REG_WR(bp, BNX2_EMAC_MODE, val);
3930
3931                         /* receive all multicast */
3932                         for (i = 0; i < NUM_MC_HASH_REGISTERS; i++) {
3933                                 REG_WR(bp, BNX2_EMAC_MULTICAST_HASH0 + (i * 4),
3934                                        0xffffffff);
3935                         }
3936                         REG_WR(bp, BNX2_EMAC_RX_MODE,
3937                                BNX2_EMAC_RX_MODE_SORT_MODE);
3938
3939                         val = 1 | BNX2_RPM_SORT_USER0_BC_EN |
3940                               BNX2_RPM_SORT_USER0_MC_EN;
3941                         REG_WR(bp, BNX2_RPM_SORT_USER0, 0x0);
3942                         REG_WR(bp, BNX2_RPM_SORT_USER0, val);
3943                         REG_WR(bp, BNX2_RPM_SORT_USER0, val |
3944                                BNX2_RPM_SORT_USER0_ENA);
3945
3946                         /* Need to enable EMAC and RPM for WOL. */
3947                         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
3948                                BNX2_MISC_ENABLE_SET_BITS_RX_PARSER_MAC_ENABLE |
3949                                BNX2_MISC_ENABLE_SET_BITS_TX_HEADER_Q_ENABLE |
3950                                BNX2_MISC_ENABLE_SET_BITS_EMAC_ENABLE);
3951
3952                         val = REG_RD(bp, BNX2_RPM_CONFIG);
3953                         val &= ~BNX2_RPM_CONFIG_ACPI_ENA;
3954                         REG_WR(bp, BNX2_RPM_CONFIG, val);
3955
3956                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
3957                 }
3958                 else {
3959                         wol_msg = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
3960                 }
3961
3962                 if (!(bp->flags & BNX2_FLAG_NO_WOL))
3963                         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT3 | wol_msg,
3964                                      1, 0);
3965
3966                 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
3967                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
3968                     (CHIP_ID(bp) == CHIP_ID_5706_A1)) {
3969
3970                         if (bp->wol)
3971                                 pmcsr |= 3;
3972                 }
3973                 else {
3974                         pmcsr |= 3;
3975                 }
3976                 if (bp->wol) {
3977                         pmcsr |= PCI_PM_CTRL_PME_ENABLE;
3978                 }
3979                 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
3980                                       pmcsr);
3981
3982                 /* No more memory access after this point until
3983                  * device is brought back to D0.
3984                  */
3985                 udelay(50);
3986                 break;
3987         }
3988         default:
3989                 return -EINVAL;
3990         }
3991         return 0;
3992 }
3993
3994 static int
3995 bnx2_acquire_nvram_lock(struct bnx2 *bp)
3996 {
3997         u32 val;
3998         int j;
3999
4000         /* Request access to the flash interface. */
4001         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_SET2);
4002         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4003                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4004                 if (val & BNX2_NVM_SW_ARB_ARB_ARB2)
4005                         break;
4006
4007                 udelay(5);
4008         }
4009
4010         if (j >= NVRAM_TIMEOUT_COUNT)
4011                 return -EBUSY;
4012
4013         return 0;
4014 }
4015
4016 static int
4017 bnx2_release_nvram_lock(struct bnx2 *bp)
4018 {
4019         int j;
4020         u32 val;
4021
4022         /* Relinquish nvram interface. */
4023         REG_WR(bp, BNX2_NVM_SW_ARB, BNX2_NVM_SW_ARB_ARB_REQ_CLR2);
4024
4025         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4026                 val = REG_RD(bp, BNX2_NVM_SW_ARB);
4027                 if (!(val & BNX2_NVM_SW_ARB_ARB_ARB2))
4028                         break;
4029
4030                 udelay(5);
4031         }
4032
4033         if (j >= NVRAM_TIMEOUT_COUNT)
4034                 return -EBUSY;
4035
4036         return 0;
4037 }
4038
4039
4040 static int
4041 bnx2_enable_nvram_write(struct bnx2 *bp)
4042 {
4043         u32 val;
4044
4045         val = REG_RD(bp, BNX2_MISC_CFG);
4046         REG_WR(bp, BNX2_MISC_CFG, val | BNX2_MISC_CFG_NVM_WR_EN_PCI);
4047
4048         if (bp->flash_info->flags & BNX2_NV_WREN) {
4049                 int j;
4050
4051                 REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4052                 REG_WR(bp, BNX2_NVM_COMMAND,
4053                        BNX2_NVM_COMMAND_WREN | BNX2_NVM_COMMAND_DOIT);
4054
4055                 for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4056                         udelay(5);
4057
4058                         val = REG_RD(bp, BNX2_NVM_COMMAND);
4059                         if (val & BNX2_NVM_COMMAND_DONE)
4060                                 break;
4061                 }
4062
4063                 if (j >= NVRAM_TIMEOUT_COUNT)
4064                         return -EBUSY;
4065         }
4066         return 0;
4067 }
4068
4069 static void
4070 bnx2_disable_nvram_write(struct bnx2 *bp)
4071 {
4072         u32 val;
4073
4074         val = REG_RD(bp, BNX2_MISC_CFG);
4075         REG_WR(bp, BNX2_MISC_CFG, val & ~BNX2_MISC_CFG_NVM_WR_EN);
4076 }
4077
4078
4079 static void
4080 bnx2_enable_nvram_access(struct bnx2 *bp)
4081 {
4082         u32 val;
4083
4084         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4085         /* Enable both bits, even on read. */
4086         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4087                val | BNX2_NVM_ACCESS_ENABLE_EN | BNX2_NVM_ACCESS_ENABLE_WR_EN);
4088 }
4089
4090 static void
4091 bnx2_disable_nvram_access(struct bnx2 *bp)
4092 {
4093         u32 val;
4094
4095         val = REG_RD(bp, BNX2_NVM_ACCESS_ENABLE);
4096         /* Disable both bits, even after read. */
4097         REG_WR(bp, BNX2_NVM_ACCESS_ENABLE,
4098                 val & ~(BNX2_NVM_ACCESS_ENABLE_EN |
4099                         BNX2_NVM_ACCESS_ENABLE_WR_EN));
4100 }
4101
4102 static int
4103 bnx2_nvram_erase_page(struct bnx2 *bp, u32 offset)
4104 {
4105         u32 cmd;
4106         int j;
4107
4108         if (bp->flash_info->flags & BNX2_NV_BUFFERED)
4109                 /* Buffered flash, no erase needed */
4110                 return 0;
4111
4112         /* Build an erase command */
4113         cmd = BNX2_NVM_COMMAND_ERASE | BNX2_NVM_COMMAND_WR |
4114               BNX2_NVM_COMMAND_DOIT;
4115
4116         /* Need to clear DONE bit separately. */
4117         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4118
4119         /* Address of the NVRAM to read from. */
4120         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4121
4122         /* Issue an erase command. */
4123         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4124
4125         /* Wait for completion. */
4126         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4127                 u32 val;
4128
4129                 udelay(5);
4130
4131                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4132                 if (val & BNX2_NVM_COMMAND_DONE)
4133                         break;
4134         }
4135
4136         if (j >= NVRAM_TIMEOUT_COUNT)
4137                 return -EBUSY;
4138
4139         return 0;
4140 }
4141
4142 static int
4143 bnx2_nvram_read_dword(struct bnx2 *bp, u32 offset, u8 *ret_val, u32 cmd_flags)
4144 {
4145         u32 cmd;
4146         int j;
4147
4148         /* Build the command word. */
4149         cmd = BNX2_NVM_COMMAND_DOIT | cmd_flags;
4150
4151         /* Calculate an offset of a buffered flash, not needed for 5709. */
4152         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4153                 offset = ((offset / bp->flash_info->page_size) <<
4154                            bp->flash_info->page_bits) +
4155                           (offset % bp->flash_info->page_size);
4156         }
4157
4158         /* Need to clear DONE bit separately. */
4159         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4160
4161         /* Address of the NVRAM to read from. */
4162         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4163
4164         /* Issue a read command. */
4165         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4166
4167         /* Wait for completion. */
4168         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4169                 u32 val;
4170
4171                 udelay(5);
4172
4173                 val = REG_RD(bp, BNX2_NVM_COMMAND);
4174                 if (val & BNX2_NVM_COMMAND_DONE) {
4175                         __be32 v = cpu_to_be32(REG_RD(bp, BNX2_NVM_READ));
4176                         memcpy(ret_val, &v, 4);
4177                         break;
4178                 }
4179         }
4180         if (j >= NVRAM_TIMEOUT_COUNT)
4181                 return -EBUSY;
4182
4183         return 0;
4184 }
4185
4186
4187 static int
4188 bnx2_nvram_write_dword(struct bnx2 *bp, u32 offset, u8 *val, u32 cmd_flags)
4189 {
4190         u32 cmd;
4191         __be32 val32;
4192         int j;
4193
4194         /* Build the command word. */
4195         cmd = BNX2_NVM_COMMAND_DOIT | BNX2_NVM_COMMAND_WR | cmd_flags;
4196
4197         /* Calculate an offset of a buffered flash, not needed for 5709. */
4198         if (bp->flash_info->flags & BNX2_NV_TRANSLATE) {
4199                 offset = ((offset / bp->flash_info->page_size) <<
4200                           bp->flash_info->page_bits) +
4201                          (offset % bp->flash_info->page_size);
4202         }
4203
4204         /* Need to clear DONE bit separately. */
4205         REG_WR(bp, BNX2_NVM_COMMAND, BNX2_NVM_COMMAND_DONE);
4206
4207         memcpy(&val32, val, 4);
4208
4209         /* Write the data. */
4210         REG_WR(bp, BNX2_NVM_WRITE, be32_to_cpu(val32));
4211
4212         /* Address of the NVRAM to write to. */
4213         REG_WR(bp, BNX2_NVM_ADDR, offset & BNX2_NVM_ADDR_NVM_ADDR_VALUE);
4214
4215         /* Issue the write command. */
4216         REG_WR(bp, BNX2_NVM_COMMAND, cmd);
4217
4218         /* Wait for completion. */
4219         for (j = 0; j < NVRAM_TIMEOUT_COUNT; j++) {
4220                 udelay(5);
4221
4222                 if (REG_RD(bp, BNX2_NVM_COMMAND) & BNX2_NVM_COMMAND_DONE)
4223                         break;
4224         }
4225         if (j >= NVRAM_TIMEOUT_COUNT)
4226                 return -EBUSY;
4227
4228         return 0;
4229 }
4230
4231 static int
4232 bnx2_init_nvram(struct bnx2 *bp)
4233 {
4234         u32 val;
4235         int j, entry_count, rc = 0;
4236         const struct flash_spec *flash;
4237
4238         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4239                 bp->flash_info = &flash_5709;
4240                 goto get_flash_size;
4241         }
4242
4243         /* Determine the selected interface. */
4244         val = REG_RD(bp, BNX2_NVM_CFG1);
4245
4246         entry_count = ARRAY_SIZE(flash_table);
4247
4248         if (val & 0x40000000) {
4249
4250                 /* Flash interface has been reconfigured */
4251                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4252                      j++, flash++) {
4253                         if ((val & FLASH_BACKUP_STRAP_MASK) ==
4254                             (flash->config1 & FLASH_BACKUP_STRAP_MASK)) {
4255                                 bp->flash_info = flash;
4256                                 break;
4257                         }
4258                 }
4259         }
4260         else {
4261                 u32 mask;
4262                 /* Not yet been reconfigured */
4263
4264                 if (val & (1 << 23))
4265                         mask = FLASH_BACKUP_STRAP_MASK;
4266                 else
4267                         mask = FLASH_STRAP_MASK;
4268
4269                 for (j = 0, flash = &flash_table[0]; j < entry_count;
4270                         j++, flash++) {
4271
4272                         if ((val & mask) == (flash->strapping & mask)) {
4273                                 bp->flash_info = flash;
4274
4275                                 /* Request access to the flash interface. */
4276                                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4277                                         return rc;
4278
4279                                 /* Enable access to flash interface */
4280                                 bnx2_enable_nvram_access(bp);
4281
4282                                 /* Reconfigure the flash interface */
4283                                 REG_WR(bp, BNX2_NVM_CFG1, flash->config1);
4284                                 REG_WR(bp, BNX2_NVM_CFG2, flash->config2);
4285                                 REG_WR(bp, BNX2_NVM_CFG3, flash->config3);
4286                                 REG_WR(bp, BNX2_NVM_WRITE1, flash->write1);
4287
4288                                 /* Disable access to flash interface */
4289                                 bnx2_disable_nvram_access(bp);
4290                                 bnx2_release_nvram_lock(bp);
4291
4292                                 break;
4293                         }
4294                 }
4295         } /* if (val & 0x40000000) */
4296
4297         if (j == entry_count) {
4298                 bp->flash_info = NULL;
4299                 printk(KERN_ALERT PFX "Unknown flash/EEPROM type.\n");
4300                 return -ENODEV;
4301         }
4302
4303 get_flash_size:
4304         val = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG2);
4305         val &= BNX2_SHARED_HW_CFG2_NVM_SIZE_MASK;
4306         if (val)
4307                 bp->flash_size = val;
4308         else
4309                 bp->flash_size = bp->flash_info->total_size;
4310
4311         return rc;
4312 }
4313
4314 static int
4315 bnx2_nvram_read(struct bnx2 *bp, u32 offset, u8 *ret_buf,
4316                 int buf_size)
4317 {
4318         int rc = 0;
4319         u32 cmd_flags, offset32, len32, extra;
4320
4321         if (buf_size == 0)
4322                 return 0;
4323
4324         /* Request access to the flash interface. */
4325         if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4326                 return rc;
4327
4328         /* Enable access to flash interface */
4329         bnx2_enable_nvram_access(bp);
4330
4331         len32 = buf_size;
4332         offset32 = offset;
4333         extra = 0;
4334
4335         cmd_flags = 0;
4336
4337         if (offset32 & 3) {
4338                 u8 buf[4];
4339                 u32 pre_len;
4340
4341                 offset32 &= ~3;
4342                 pre_len = 4 - (offset & 3);
4343
4344                 if (pre_len >= len32) {
4345                         pre_len = len32;
4346                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4347                                     BNX2_NVM_COMMAND_LAST;
4348                 }
4349                 else {
4350                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4351                 }
4352
4353                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4354
4355                 if (rc)
4356                         return rc;
4357
4358                 memcpy(ret_buf, buf + (offset & 3), pre_len);
4359
4360                 offset32 += 4;
4361                 ret_buf += pre_len;
4362                 len32 -= pre_len;
4363         }
4364         if (len32 & 3) {
4365                 extra = 4 - (len32 & 3);
4366                 len32 = (len32 + 4) & ~3;
4367         }
4368
4369         if (len32 == 4) {
4370                 u8 buf[4];
4371
4372                 if (cmd_flags)
4373                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4374                 else
4375                         cmd_flags = BNX2_NVM_COMMAND_FIRST |
4376                                     BNX2_NVM_COMMAND_LAST;
4377
4378                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4379
4380                 memcpy(ret_buf, buf, 4 - extra);
4381         }
4382         else if (len32 > 0) {
4383                 u8 buf[4];
4384
4385                 /* Read the first word. */
4386                 if (cmd_flags)
4387                         cmd_flags = 0;
4388                 else
4389                         cmd_flags = BNX2_NVM_COMMAND_FIRST;
4390
4391                 rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, cmd_flags);
4392
4393                 /* Advance to the next dword. */
4394                 offset32 += 4;
4395                 ret_buf += 4;
4396                 len32 -= 4;
4397
4398                 while (len32 > 4 && rc == 0) {
4399                         rc = bnx2_nvram_read_dword(bp, offset32, ret_buf, 0);
4400
4401                         /* Advance to the next dword. */
4402                         offset32 += 4;
4403                         ret_buf += 4;
4404                         len32 -= 4;
4405                 }
4406
4407                 if (rc)
4408                         return rc;
4409
4410                 cmd_flags = BNX2_NVM_COMMAND_LAST;
4411                 rc = bnx2_nvram_read_dword(bp, offset32, buf, cmd_flags);
4412
4413                 memcpy(ret_buf, buf, 4 - extra);
4414         }
4415
4416         /* Disable access to flash interface */
4417         bnx2_disable_nvram_access(bp);
4418
4419         bnx2_release_nvram_lock(bp);
4420
4421         return rc;
4422 }
4423
4424 static int
4425 bnx2_nvram_write(struct bnx2 *bp, u32 offset, u8 *data_buf,
4426                 int buf_size)
4427 {
4428         u32 written, offset32, len32;
4429         u8 *buf, start[4], end[4], *align_buf = NULL, *flash_buffer = NULL;
4430         int rc = 0;
4431         int align_start, align_end;
4432
4433         buf = data_buf;
4434         offset32 = offset;
4435         len32 = buf_size;
4436         align_start = align_end = 0;
4437
4438         if ((align_start = (offset32 & 3))) {
4439                 offset32 &= ~3;
4440                 len32 += align_start;
4441                 if (len32 < 4)
4442                         len32 = 4;
4443                 if ((rc = bnx2_nvram_read(bp, offset32, start, 4)))
4444                         return rc;
4445         }
4446
4447         if (len32 & 3) {
4448                 align_end = 4 - (len32 & 3);
4449                 len32 += align_end;
4450                 if ((rc = bnx2_nvram_read(bp, offset32 + len32 - 4, end, 4)))
4451                         return rc;
4452         }
4453
4454         if (align_start || align_end) {
4455                 align_buf = kmalloc(len32, GFP_KERNEL);
4456                 if (align_buf == NULL)
4457                         return -ENOMEM;
4458                 if (align_start) {
4459                         memcpy(align_buf, start, 4);
4460                 }
4461                 if (align_end) {
4462                         memcpy(align_buf + len32 - 4, end, 4);
4463                 }
4464                 memcpy(align_buf + align_start, data_buf, buf_size);
4465                 buf = align_buf;
4466         }
4467
4468         if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4469                 flash_buffer = kmalloc(264, GFP_KERNEL);
4470                 if (flash_buffer == NULL) {
4471                         rc = -ENOMEM;
4472                         goto nvram_write_end;
4473                 }
4474         }
4475
4476         written = 0;
4477         while ((written < len32) && (rc == 0)) {
4478                 u32 page_start, page_end, data_start, data_end;
4479                 u32 addr, cmd_flags;
4480                 int i;
4481
4482                 /* Find the page_start addr */
4483                 page_start = offset32 + written;
4484                 page_start -= (page_start % bp->flash_info->page_size);
4485                 /* Find the page_end addr */
4486                 page_end = page_start + bp->flash_info->page_size;
4487                 /* Find the data_start addr */
4488                 data_start = (written == 0) ? offset32 : page_start;
4489                 /* Find the data_end addr */
4490                 data_end = (page_end > offset32 + len32) ?
4491                         (offset32 + len32) : page_end;
4492
4493                 /* Request access to the flash interface. */
4494                 if ((rc = bnx2_acquire_nvram_lock(bp)) != 0)
4495                         goto nvram_write_end;
4496
4497                 /* Enable access to flash interface */
4498                 bnx2_enable_nvram_access(bp);
4499
4500                 cmd_flags = BNX2_NVM_COMMAND_FIRST;
4501                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4502                         int j;
4503
4504                         /* Read the whole page into the buffer
4505                          * (non-buffer flash only) */
4506                         for (j = 0; j < bp->flash_info->page_size; j += 4) {
4507                                 if (j == (bp->flash_info->page_size - 4)) {
4508                                         cmd_flags |= BNX2_NVM_COMMAND_LAST;
4509                                 }
4510                                 rc = bnx2_nvram_read_dword(bp,
4511                                         page_start + j,
4512                                         &flash_buffer[j],
4513                                         cmd_flags);
4514
4515                                 if (rc)
4516                                         goto nvram_write_end;
4517
4518                                 cmd_flags = 0;
4519                         }
4520                 }
4521
4522                 /* Enable writes to flash interface (unlock write-protect) */
4523                 if ((rc = bnx2_enable_nvram_write(bp)) != 0)
4524                         goto nvram_write_end;
4525
4526                 /* Loop to write back the buffer data from page_start to
4527                  * data_start */
4528                 i = 0;
4529                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4530                         /* Erase the page */
4531                         if ((rc = bnx2_nvram_erase_page(bp, page_start)) != 0)
4532                                 goto nvram_write_end;
4533
4534                         /* Re-enable the write again for the actual write */
4535                         bnx2_enable_nvram_write(bp);
4536
4537                         for (addr = page_start; addr < data_start;
4538                                 addr += 4, i += 4) {
4539
4540                                 rc = bnx2_nvram_write_dword(bp, addr,
4541                                         &flash_buffer[i], cmd_flags);
4542
4543                                 if (rc != 0)
4544                                         goto nvram_write_end;
4545
4546                                 cmd_flags = 0;
4547                         }
4548                 }
4549
4550                 /* Loop to write the new data from data_start to data_end */
4551                 for (addr = data_start; addr < data_end; addr += 4, i += 4) {
4552                         if ((addr == page_end - 4) ||
4553                                 ((bp->flash_info->flags & BNX2_NV_BUFFERED) &&
4554                                  (addr == data_end - 4))) {
4555
4556                                 cmd_flags |= BNX2_NVM_COMMAND_LAST;
4557                         }
4558                         rc = bnx2_nvram_write_dword(bp, addr, buf,
4559                                 cmd_flags);
4560
4561                         if (rc != 0)
4562                                 goto nvram_write_end;
4563
4564                         cmd_flags = 0;
4565                         buf += 4;
4566                 }
4567
4568                 /* Loop to write back the buffer data from data_end
4569                  * to page_end */
4570                 if (!(bp->flash_info->flags & BNX2_NV_BUFFERED)) {
4571                         for (addr = data_end; addr < page_end;
4572                                 addr += 4, i += 4) {
4573
4574                                 if (addr == page_end-4) {
4575                                         cmd_flags = BNX2_NVM_COMMAND_LAST;
4576                                 }
4577                                 rc = bnx2_nvram_write_dword(bp, addr,
4578                                         &flash_buffer[i], cmd_flags);
4579
4580                                 if (rc != 0)
4581                                         goto nvram_write_end;
4582
4583                                 cmd_flags = 0;
4584                         }
4585                 }
4586
4587                 /* Disable writes to flash interface (lock write-protect) */
4588                 bnx2_disable_nvram_write(bp);
4589
4590                 /* Disable access to flash interface */
4591                 bnx2_disable_nvram_access(bp);
4592                 bnx2_release_nvram_lock(bp);
4593
4594                 /* Increment written */
4595                 written += data_end - data_start;
4596         }
4597
4598 nvram_write_end:
4599         kfree(flash_buffer);
4600         kfree(align_buf);
4601         return rc;
4602 }
4603
4604 static void
4605 bnx2_init_fw_cap(struct bnx2 *bp)
4606 {
4607         u32 val, sig = 0;
4608
4609         bp->phy_flags &= ~BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4610         bp->flags &= ~BNX2_FLAG_CAN_KEEP_VLAN;
4611
4612         if (!(bp->flags & BNX2_FLAG_ASF_ENABLE))
4613                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4614
4615         val = bnx2_shmem_rd(bp, BNX2_FW_CAP_MB);
4616         if ((val & BNX2_FW_CAP_SIGNATURE_MASK) != BNX2_FW_CAP_SIGNATURE)
4617                 return;
4618
4619         if ((val & BNX2_FW_CAP_CAN_KEEP_VLAN) == BNX2_FW_CAP_CAN_KEEP_VLAN) {
4620                 bp->flags |= BNX2_FLAG_CAN_KEEP_VLAN;
4621                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE | BNX2_FW_CAP_CAN_KEEP_VLAN;
4622         }
4623
4624         if ((bp->phy_flags & BNX2_PHY_FLAG_SERDES) &&
4625             (val & BNX2_FW_CAP_REMOTE_PHY_CAPABLE)) {
4626                 u32 link;
4627
4628                 bp->phy_flags |= BNX2_PHY_FLAG_REMOTE_PHY_CAP;
4629
4630                 link = bnx2_shmem_rd(bp, BNX2_LINK_STATUS);
4631                 if (link & BNX2_LINK_STATUS_SERDES_LINK)
4632                         bp->phy_port = PORT_FIBRE;
4633                 else
4634                         bp->phy_port = PORT_TP;
4635
4636                 sig |= BNX2_DRV_ACK_CAP_SIGNATURE |
4637                        BNX2_FW_CAP_REMOTE_PHY_CAPABLE;
4638         }
4639
4640         if (netif_running(bp->dev) && sig)
4641                 bnx2_shmem_wr(bp, BNX2_DRV_ACK_CAP_MB, sig);
4642 }
4643
4644 static void
4645 bnx2_setup_msix_tbl(struct bnx2 *bp)
4646 {
4647         REG_WR(bp, BNX2_PCI_GRC_WINDOW_ADDR, BNX2_PCI_GRC_WINDOW_ADDR_SEP_WIN);
4648
4649         REG_WR(bp, BNX2_PCI_GRC_WINDOW2_ADDR, BNX2_MSIX_TABLE_ADDR);
4650         REG_WR(bp, BNX2_PCI_GRC_WINDOW3_ADDR, BNX2_MSIX_PBA_ADDR);
4651 }
4652
4653 static int
4654 bnx2_reset_chip(struct bnx2 *bp, u32 reset_code)
4655 {
4656         u32 val;
4657         int i, rc = 0;
4658         u8 old_port;
4659
4660         /* Wait for the current PCI transaction to complete before
4661          * issuing a reset. */
4662         REG_WR(bp, BNX2_MISC_ENABLE_CLR_BITS,
4663                BNX2_MISC_ENABLE_CLR_BITS_TX_DMA_ENABLE |
4664                BNX2_MISC_ENABLE_CLR_BITS_DMA_ENGINE_ENABLE |
4665                BNX2_MISC_ENABLE_CLR_BITS_RX_DMA_ENABLE |
4666                BNX2_MISC_ENABLE_CLR_BITS_HOST_COALESCE_ENABLE);
4667         val = REG_RD(bp, BNX2_MISC_ENABLE_CLR_BITS);
4668         udelay(5);
4669
4670         /* Wait for the firmware to tell us it is ok to issue a reset. */
4671         bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT0 | reset_code, 1, 1);
4672
4673         /* Deposit a driver reset signature so the firmware knows that
4674          * this is a soft reset. */
4675         bnx2_shmem_wr(bp, BNX2_DRV_RESET_SIGNATURE,
4676                       BNX2_DRV_RESET_SIGNATURE_MAGIC);
4677
4678         /* Do a dummy read to force the chip to complete all current transaction
4679          * before we issue a reset. */
4680         val = REG_RD(bp, BNX2_MISC_ID);
4681
4682         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4683                 REG_WR(bp, BNX2_MISC_COMMAND, BNX2_MISC_COMMAND_SW_RESET);
4684                 REG_RD(bp, BNX2_MISC_COMMAND);
4685                 udelay(5);
4686
4687                 val = BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4688                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4689
4690                 pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG, val);
4691
4692         } else {
4693                 val = BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4694                       BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
4695                       BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP;
4696
4697                 /* Chip reset. */
4698                 REG_WR(bp, BNX2_PCICFG_MISC_CONFIG, val);
4699
4700                 /* Reading back any register after chip reset will hang the
4701                  * bus on 5706 A0 and A1.  The msleep below provides plenty
4702                  * of margin for write posting.
4703                  */
4704                 if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
4705                     (CHIP_ID(bp) == CHIP_ID_5706_A1))
4706                         msleep(20);
4707
4708                 /* Reset takes approximate 30 usec */
4709                 for (i = 0; i < 10; i++) {
4710                         val = REG_RD(bp, BNX2_PCICFG_MISC_CONFIG);
4711                         if ((val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4712                                     BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) == 0)
4713                                 break;
4714                         udelay(10);
4715                 }
4716
4717                 if (val & (BNX2_PCICFG_MISC_CONFIG_CORE_RST_REQ |
4718                            BNX2_PCICFG_MISC_CONFIG_CORE_RST_BSY)) {
4719                         printk(KERN_ERR PFX "Chip reset did not complete\n");
4720                         return -EBUSY;
4721                 }
4722         }
4723
4724         /* Make sure byte swapping is properly configured. */
4725         val = REG_RD(bp, BNX2_PCI_SWAP_DIAG0);
4726         if (val != 0x01020304) {
4727                 printk(KERN_ERR PFX "Chip not in correct endian mode\n");
4728                 return -ENODEV;
4729         }
4730
4731         /* Wait for the firmware to finish its initialization. */
4732         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT1 | reset_code, 1, 0);
4733         if (rc)
4734                 return rc;
4735
4736         spin_lock_bh(&bp->phy_lock);
4737         old_port = bp->phy_port;
4738         bnx2_init_fw_cap(bp);
4739         if ((bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) &&
4740             old_port != bp->phy_port)
4741                 bnx2_set_default_remote_link(bp);
4742         spin_unlock_bh(&bp->phy_lock);
4743
4744         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4745                 /* Adjust the voltage regular to two steps lower.  The default
4746                  * of this register is 0x0000000e. */
4747                 REG_WR(bp, BNX2_MISC_VREG_CONTROL, 0x000000fa);
4748
4749                 /* Remove bad rbuf memory from the free pool. */
4750                 rc = bnx2_alloc_bad_rbuf(bp);
4751         }
4752
4753         if (bp->flags & BNX2_FLAG_USING_MSIX)
4754                 bnx2_setup_msix_tbl(bp);
4755
4756         return rc;
4757 }
4758
4759 static int
4760 bnx2_init_chip(struct bnx2 *bp)
4761 {
4762         u32 val, mtu;
4763         int rc, i;
4764
4765         /* Make sure the interrupt is not active. */
4766         REG_WR(bp, BNX2_PCICFG_INT_ACK_CMD, BNX2_PCICFG_INT_ACK_CMD_MASK_INT);
4767
4768         val = BNX2_DMA_CONFIG_DATA_BYTE_SWAP |
4769               BNX2_DMA_CONFIG_DATA_WORD_SWAP |
4770 #ifdef __BIG_ENDIAN
4771               BNX2_DMA_CONFIG_CNTL_BYTE_SWAP |
4772 #endif
4773               BNX2_DMA_CONFIG_CNTL_WORD_SWAP |
4774               DMA_READ_CHANS << 12 |
4775               DMA_WRITE_CHANS << 16;
4776
4777         val |= (0x2 << 20) | (1 << 11);
4778
4779         if ((bp->flags & BNX2_FLAG_PCIX) && (bp->bus_speed_mhz == 133))
4780                 val |= (1 << 23);
4781
4782         if ((CHIP_NUM(bp) == CHIP_NUM_5706) &&
4783             (CHIP_ID(bp) != CHIP_ID_5706_A0) && !(bp->flags & BNX2_FLAG_PCIX))
4784                 val |= BNX2_DMA_CONFIG_CNTL_PING_PONG_DMA;
4785
4786         REG_WR(bp, BNX2_DMA_CONFIG, val);
4787
4788         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
4789                 val = REG_RD(bp, BNX2_TDMA_CONFIG);
4790                 val |= BNX2_TDMA_CONFIG_ONE_DMA;
4791                 REG_WR(bp, BNX2_TDMA_CONFIG, val);
4792         }
4793
4794         if (bp->flags & BNX2_FLAG_PCIX) {
4795                 u16 val16;
4796
4797                 pci_read_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4798                                      &val16);
4799                 pci_write_config_word(bp->pdev, bp->pcix_cap + PCI_X_CMD,
4800                                       val16 & ~PCI_X_CMD_ERO);
4801         }
4802
4803         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS,
4804                BNX2_MISC_ENABLE_SET_BITS_HOST_COALESCE_ENABLE |
4805                BNX2_MISC_ENABLE_STATUS_BITS_RX_V2P_ENABLE |
4806                BNX2_MISC_ENABLE_STATUS_BITS_CONTEXT_ENABLE);
4807
4808         /* Initialize context mapping and zero out the quick contexts.  The
4809          * context block must have already been enabled. */
4810         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4811                 rc = bnx2_init_5709_context(bp);
4812                 if (rc)
4813                         return rc;
4814         } else
4815                 bnx2_init_context(bp);
4816
4817         if ((rc = bnx2_init_cpus(bp)) != 0)
4818                 return rc;
4819
4820         bnx2_init_nvram(bp);
4821
4822         bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
4823
4824         val = REG_RD(bp, BNX2_MQ_CONFIG);
4825         val &= ~BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE;
4826         val |= BNX2_MQ_CONFIG_KNL_BYP_BLK_SIZE_256;
4827         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4828                 val |= BNX2_MQ_CONFIG_BIN_MQ_MODE;
4829                 if (CHIP_REV(bp) == CHIP_REV_Ax)
4830                         val |= BNX2_MQ_CONFIG_HALT_DIS;
4831         }
4832
4833         REG_WR(bp, BNX2_MQ_CONFIG, val);
4834
4835         val = 0x10000 + (MAX_CID_CNT * MB_KERNEL_CTX_SIZE);
4836         REG_WR(bp, BNX2_MQ_KNL_BYP_WIND_START, val);
4837         REG_WR(bp, BNX2_MQ_KNL_WIND_END, val);
4838
4839         val = (BCM_PAGE_BITS - 8) << 24;
4840         REG_WR(bp, BNX2_RV2P_CONFIG, val);
4841
4842         /* Configure page size. */
4843         val = REG_RD(bp, BNX2_TBDR_CONFIG);
4844         val &= ~BNX2_TBDR_CONFIG_PAGE_SIZE;
4845         val |= (BCM_PAGE_BITS - 8) << 24 | 0x40;
4846         REG_WR(bp, BNX2_TBDR_CONFIG, val);
4847
4848         val = bp->mac_addr[0] +
4849               (bp->mac_addr[1] << 8) +
4850               (bp->mac_addr[2] << 16) +
4851               bp->mac_addr[3] +
4852               (bp->mac_addr[4] << 8) +
4853               (bp->mac_addr[5] << 16);
4854         REG_WR(bp, BNX2_EMAC_BACKOFF_SEED, val);
4855
4856         /* Program the MTU.  Also include 4 bytes for CRC32. */
4857         mtu = bp->dev->mtu;
4858         val = mtu + ETH_HLEN + ETH_FCS_LEN;
4859         if (val > (MAX_ETHERNET_PACKET_SIZE + 4))
4860                 val |= BNX2_EMAC_RX_MTU_SIZE_JUMBO_ENA;
4861         REG_WR(bp, BNX2_EMAC_RX_MTU_SIZE, val);
4862
4863         if (mtu < 1500)
4864                 mtu = 1500;
4865
4866         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG, BNX2_RBUF_CONFIG_VAL(mtu));
4867         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG2, BNX2_RBUF_CONFIG2_VAL(mtu));
4868         bnx2_reg_wr_ind(bp, BNX2_RBUF_CONFIG3, BNX2_RBUF_CONFIG3_VAL(mtu));
4869
4870         memset(bp->bnx2_napi[0].status_blk.msi, 0, bp->status_stats_size);
4871         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++)
4872                 bp->bnx2_napi[i].last_status_idx = 0;
4873
4874         bp->idle_chk_status_idx = 0xffff;
4875
4876         bp->rx_mode = BNX2_EMAC_RX_MODE_SORT_MODE;
4877
4878         /* Set up how to generate a link change interrupt. */
4879         REG_WR(bp, BNX2_EMAC_ATTENTION_ENA, BNX2_EMAC_ATTENTION_ENA_LINK);
4880
4881         REG_WR(bp, BNX2_HC_STATUS_ADDR_L,
4882                (u64) bp->status_blk_mapping & 0xffffffff);
4883         REG_WR(bp, BNX2_HC_STATUS_ADDR_H, (u64) bp->status_blk_mapping >> 32);
4884
4885         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_L,
4886                (u64) bp->stats_blk_mapping & 0xffffffff);
4887         REG_WR(bp, BNX2_HC_STATISTICS_ADDR_H,
4888                (u64) bp->stats_blk_mapping >> 32);
4889
4890         REG_WR(bp, BNX2_HC_TX_QUICK_CONS_TRIP,
4891                (bp->tx_quick_cons_trip_int << 16) | bp->tx_quick_cons_trip);
4892
4893         REG_WR(bp, BNX2_HC_RX_QUICK_CONS_TRIP,
4894                (bp->rx_quick_cons_trip_int << 16) | bp->rx_quick_cons_trip);
4895
4896         REG_WR(bp, BNX2_HC_COMP_PROD_TRIP,
4897                (bp->comp_prod_trip_int << 16) | bp->comp_prod_trip);
4898
4899         REG_WR(bp, BNX2_HC_TX_TICKS, (bp->tx_ticks_int << 16) | bp->tx_ticks);
4900
4901         REG_WR(bp, BNX2_HC_RX_TICKS, (bp->rx_ticks_int << 16) | bp->rx_ticks);
4902
4903         REG_WR(bp, BNX2_HC_COM_TICKS,
4904                (bp->com_ticks_int << 16) | bp->com_ticks);
4905
4906         REG_WR(bp, BNX2_HC_CMD_TICKS,
4907                (bp->cmd_ticks_int << 16) | bp->cmd_ticks);
4908
4909         if (bp->flags & BNX2_FLAG_BROKEN_STATS)
4910                 REG_WR(bp, BNX2_HC_STATS_TICKS, 0);
4911         else
4912                 REG_WR(bp, BNX2_HC_STATS_TICKS, bp->stats_ticks);
4913         REG_WR(bp, BNX2_HC_STAT_COLLECT_TICKS, 0xbb8);  /* 3ms */
4914
4915         if (CHIP_ID(bp) == CHIP_ID_5706_A1)
4916                 val = BNX2_HC_CONFIG_COLLECT_STATS;
4917         else {
4918                 val = BNX2_HC_CONFIG_RX_TMR_MODE | BNX2_HC_CONFIG_TX_TMR_MODE |
4919                       BNX2_HC_CONFIG_COLLECT_STATS;
4920         }
4921
4922         if (bp->irq_nvecs > 1) {
4923                 REG_WR(bp, BNX2_HC_MSIX_BIT_VECTOR,
4924                        BNX2_HC_MSIX_BIT_VECTOR_VAL);
4925
4926                 val |= BNX2_HC_CONFIG_SB_ADDR_INC_128B;
4927         }
4928
4929         if (bp->flags & BNX2_FLAG_ONE_SHOT_MSI)
4930                 val |= BNX2_HC_CONFIG_ONE_SHOT | BNX2_HC_CONFIG_USE_INT_PARAM;
4931
4932         REG_WR(bp, BNX2_HC_CONFIG, val);
4933
4934         for (i = 1; i < bp->irq_nvecs; i++) {
4935                 u32 base = ((i - 1) * BNX2_HC_SB_CONFIG_SIZE) +
4936                            BNX2_HC_SB_CONFIG_1;
4937
4938                 REG_WR(bp, base,
4939                         BNX2_HC_SB_CONFIG_1_TX_TMR_MODE |
4940                         BNX2_HC_SB_CONFIG_1_RX_TMR_MODE |
4941                         BNX2_HC_SB_CONFIG_1_ONE_SHOT);
4942
4943                 REG_WR(bp, base + BNX2_HC_TX_QUICK_CONS_TRIP_OFF,
4944                         (bp->tx_quick_cons_trip_int << 16) |
4945                          bp->tx_quick_cons_trip);
4946
4947                 REG_WR(bp, base + BNX2_HC_TX_TICKS_OFF,
4948                         (bp->tx_ticks_int << 16) | bp->tx_ticks);
4949
4950                 REG_WR(bp, base + BNX2_HC_RX_QUICK_CONS_TRIP_OFF,
4951                        (bp->rx_quick_cons_trip_int << 16) |
4952                         bp->rx_quick_cons_trip);
4953
4954                 REG_WR(bp, base + BNX2_HC_RX_TICKS_OFF,
4955                         (bp->rx_ticks_int << 16) | bp->rx_ticks);
4956         }
4957
4958         /* Clear internal stats counters. */
4959         REG_WR(bp, BNX2_HC_COMMAND, BNX2_HC_COMMAND_CLR_STAT_NOW);
4960
4961         REG_WR(bp, BNX2_HC_ATTN_BITS_ENABLE, STATUS_ATTN_EVENTS);
4962
4963         /* Initialize the receive filter. */
4964         bnx2_set_rx_mode(bp->dev);
4965
4966         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
4967                 val = REG_RD(bp, BNX2_MISC_NEW_CORE_CTL);
4968                 val |= BNX2_MISC_NEW_CORE_CTL_DMA_ENABLE;
4969                 REG_WR(bp, BNX2_MISC_NEW_CORE_CTL, val);
4970         }
4971         rc = bnx2_fw_sync(bp, BNX2_DRV_MSG_DATA_WAIT2 | BNX2_DRV_MSG_CODE_RESET,
4972                           1, 0);
4973
4974         REG_WR(bp, BNX2_MISC_ENABLE_SET_BITS, BNX2_MISC_ENABLE_DEFAULT);
4975         REG_RD(bp, BNX2_MISC_ENABLE_SET_BITS);
4976
4977         udelay(20);
4978
4979         bp->hc_cmd = REG_RD(bp, BNX2_HC_COMMAND);
4980
4981         return rc;
4982 }
4983
4984 static void
4985 bnx2_clear_ring_states(struct bnx2 *bp)
4986 {
4987         struct bnx2_napi *bnapi;
4988         struct bnx2_tx_ring_info *txr;
4989         struct bnx2_rx_ring_info *rxr;
4990         int i;
4991
4992         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
4993                 bnapi = &bp->bnx2_napi[i];
4994                 txr = &bnapi->tx_ring;
4995                 rxr = &bnapi->rx_ring;
4996
4997                 txr->tx_cons = 0;
4998                 txr->hw_tx_cons = 0;
4999                 rxr->rx_prod_bseq = 0;
5000                 rxr->rx_prod = 0;
5001                 rxr->rx_cons = 0;
5002                 rxr->rx_pg_prod = 0;
5003                 rxr->rx_pg_cons = 0;
5004         }
5005 }
5006
5007 static void
5008 bnx2_init_tx_context(struct bnx2 *bp, u32 cid, struct bnx2_tx_ring_info *txr)
5009 {
5010         u32 val, offset0, offset1, offset2, offset3;
5011         u32 cid_addr = GET_CID_ADDR(cid);
5012
5013         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5014                 offset0 = BNX2_L2CTX_TYPE_XI;
5015                 offset1 = BNX2_L2CTX_CMD_TYPE_XI;
5016                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI_XI;
5017                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO_XI;
5018         } else {
5019                 offset0 = BNX2_L2CTX_TYPE;
5020                 offset1 = BNX2_L2CTX_CMD_TYPE;
5021                 offset2 = BNX2_L2CTX_TBDR_BHADDR_HI;
5022                 offset3 = BNX2_L2CTX_TBDR_BHADDR_LO;
5023         }
5024         val = BNX2_L2CTX_TYPE_TYPE_L2 | BNX2_L2CTX_TYPE_SIZE_L2;
5025         bnx2_ctx_wr(bp, cid_addr, offset0, val);
5026
5027         val = BNX2_L2CTX_CMD_TYPE_TYPE_L2 | (8 << 16);
5028         bnx2_ctx_wr(bp, cid_addr, offset1, val);
5029
5030         val = (u64) txr->tx_desc_mapping >> 32;
5031         bnx2_ctx_wr(bp, cid_addr, offset2, val);
5032
5033         val = (u64) txr->tx_desc_mapping & 0xffffffff;
5034         bnx2_ctx_wr(bp, cid_addr, offset3, val);
5035 }
5036
5037 static void
5038 bnx2_init_tx_ring(struct bnx2 *bp, int ring_num)
5039 {
5040         struct tx_bd *txbd;
5041         u32 cid = TX_CID;
5042         struct bnx2_napi *bnapi;
5043         struct bnx2_tx_ring_info *txr;
5044
5045         bnapi = &bp->bnx2_napi[ring_num];
5046         txr = &bnapi->tx_ring;
5047
5048         if (ring_num == 0)
5049                 cid = TX_CID;
5050         else
5051                 cid = TX_TSS_CID + ring_num - 1;
5052
5053         bp->tx_wake_thresh = bp->tx_ring_size / 2;
5054
5055         txbd = &txr->tx_desc_ring[MAX_TX_DESC_CNT];
5056
5057         txbd->tx_bd_haddr_hi = (u64) txr->tx_desc_mapping >> 32;
5058         txbd->tx_bd_haddr_lo = (u64) txr->tx_desc_mapping & 0xffffffff;
5059
5060         txr->tx_prod = 0;
5061         txr->tx_prod_bseq = 0;
5062
5063         txr->tx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BIDX;
5064         txr->tx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_TX_HOST_BSEQ;
5065
5066         bnx2_init_tx_context(bp, cid, txr);
5067 }
5068
5069 static void
5070 bnx2_init_rxbd_rings(struct rx_bd *rx_ring[], dma_addr_t dma[], u32 buf_size,
5071                      int num_rings)
5072 {
5073         int i;
5074         struct rx_bd *rxbd;
5075
5076         for (i = 0; i < num_rings; i++) {
5077                 int j;
5078
5079                 rxbd = &rx_ring[i][0];
5080                 for (j = 0; j < MAX_RX_DESC_CNT; j++, rxbd++) {
5081                         rxbd->rx_bd_len = buf_size;
5082                         rxbd->rx_bd_flags = RX_BD_FLAGS_START | RX_BD_FLAGS_END;
5083                 }
5084                 if (i == (num_rings - 1))
5085                         j = 0;
5086                 else
5087                         j = i + 1;
5088                 rxbd->rx_bd_haddr_hi = (u64) dma[j] >> 32;
5089                 rxbd->rx_bd_haddr_lo = (u64) dma[j] & 0xffffffff;
5090         }
5091 }
5092
5093 static void
5094 bnx2_init_rx_ring(struct bnx2 *bp, int ring_num)
5095 {
5096         int i;
5097         u16 prod, ring_prod;
5098         u32 cid, rx_cid_addr, val;
5099         struct bnx2_napi *bnapi = &bp->bnx2_napi[ring_num];
5100         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5101
5102         if (ring_num == 0)
5103                 cid = RX_CID;
5104         else
5105                 cid = RX_RSS_CID + ring_num - 1;
5106
5107         rx_cid_addr = GET_CID_ADDR(cid);
5108
5109         bnx2_init_rxbd_rings(rxr->rx_desc_ring, rxr->rx_desc_mapping,
5110                              bp->rx_buf_use_size, bp->rx_max_ring);
5111
5112         bnx2_init_rx_context(bp, cid);
5113
5114         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
5115                 val = REG_RD(bp, BNX2_MQ_MAP_L2_5);
5116                 REG_WR(bp, BNX2_MQ_MAP_L2_5, val | BNX2_MQ_MAP_L2_5_ARM);
5117         }
5118
5119         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, 0);
5120         if (bp->rx_pg_ring_size) {
5121                 bnx2_init_rxbd_rings(rxr->rx_pg_desc_ring,
5122                                      rxr->rx_pg_desc_mapping,
5123                                      PAGE_SIZE, bp->rx_max_pg_ring);
5124                 val = (bp->rx_buf_use_size << 16) | PAGE_SIZE;
5125                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_PG_BUF_SIZE, val);
5126                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_RBDC_KEY,
5127                        BNX2_L2CTX_RBDC_JUMBO_KEY - ring_num);
5128
5129                 val = (u64) rxr->rx_pg_desc_mapping[0] >> 32;
5130                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_HI, val);
5131
5132                 val = (u64) rxr->rx_pg_desc_mapping[0] & 0xffffffff;
5133                 bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_PG_BDHADDR_LO, val);
5134
5135                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
5136                         REG_WR(bp, BNX2_MQ_MAP_L2_3, BNX2_MQ_MAP_L2_3_DEFAULT);
5137         }
5138
5139         val = (u64) rxr->rx_desc_mapping[0] >> 32;
5140         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_HI, val);
5141
5142         val = (u64) rxr->rx_desc_mapping[0] & 0xffffffff;
5143         bnx2_ctx_wr(bp, rx_cid_addr, BNX2_L2CTX_NX_BDHADDR_LO, val);
5144
5145         ring_prod = prod = rxr->rx_pg_prod;
5146         for (i = 0; i < bp->rx_pg_ring_size; i++) {
5147                 if (bnx2_alloc_rx_page(bp, rxr, ring_prod) < 0)
5148                         break;
5149                 prod = NEXT_RX_BD(prod);
5150                 ring_prod = RX_PG_RING_IDX(prod);
5151         }
5152         rxr->rx_pg_prod = prod;
5153
5154         ring_prod = prod = rxr->rx_prod;
5155         for (i = 0; i < bp->rx_ring_size; i++) {
5156                 if (bnx2_alloc_rx_skb(bp, rxr, ring_prod) < 0)
5157                         break;
5158                 prod = NEXT_RX_BD(prod);
5159                 ring_prod = RX_RING_IDX(prod);
5160         }
5161         rxr->rx_prod = prod;
5162
5163         rxr->rx_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BDIDX;
5164         rxr->rx_bseq_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_BSEQ;
5165         rxr->rx_pg_bidx_addr = MB_GET_CID_ADDR(cid) + BNX2_L2CTX_HOST_PG_BDIDX;
5166
5167         REG_WR16(bp, rxr->rx_pg_bidx_addr, rxr->rx_pg_prod);
5168         REG_WR16(bp, rxr->rx_bidx_addr, prod);
5169
5170         REG_WR(bp, rxr->rx_bseq_addr, rxr->rx_prod_bseq);
5171 }
5172
5173 static void
5174 bnx2_init_all_rings(struct bnx2 *bp)
5175 {
5176         int i;
5177         u32 val;
5178
5179         bnx2_clear_ring_states(bp);
5180
5181         REG_WR(bp, BNX2_TSCH_TSS_CFG, 0);
5182         for (i = 0; i < bp->num_tx_rings; i++)
5183                 bnx2_init_tx_ring(bp, i);
5184
5185         if (bp->num_tx_rings > 1)
5186                 REG_WR(bp, BNX2_TSCH_TSS_CFG, ((bp->num_tx_rings - 1) << 24) |
5187                        (TX_TSS_CID << 7));
5188
5189         REG_WR(bp, BNX2_RLUP_RSS_CONFIG, 0);
5190         bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ, 0);
5191
5192         for (i = 0; i < bp->num_rx_rings; i++)
5193                 bnx2_init_rx_ring(bp, i);
5194
5195         if (bp->num_rx_rings > 1) {
5196                 u32 tbl_32;
5197                 u8 *tbl = (u8 *) &tbl_32;
5198
5199                 bnx2_reg_wr_ind(bp, BNX2_RXP_SCRATCH_RSS_TBL_SZ,
5200                                 BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES);
5201
5202                 for (i = 0; i < BNX2_RXP_SCRATCH_RSS_TBL_MAX_ENTRIES; i++) {
5203                         tbl[i % 4] = i % (bp->num_rx_rings - 1);
5204                         if ((i % 4) == 3)
5205                                 bnx2_reg_wr_ind(bp,
5206                                                 BNX2_RXP_SCRATCH_RSS_TBL + i,
5207                                                 cpu_to_be32(tbl_32));
5208                 }
5209
5210                 val = BNX2_RLUP_RSS_CONFIG_IPV4_RSS_TYPE_ALL_XI |
5211                       BNX2_RLUP_RSS_CONFIG_IPV6_RSS_TYPE_ALL_XI;
5212
5213                 REG_WR(bp, BNX2_RLUP_RSS_CONFIG, val);
5214
5215         }
5216 }
5217
5218 static u32 bnx2_find_max_ring(u32 ring_size, u32 max_size)
5219 {
5220         u32 max, num_rings = 1;
5221
5222         while (ring_size > MAX_RX_DESC_CNT) {
5223                 ring_size -= MAX_RX_DESC_CNT;
5224                 num_rings++;
5225         }
5226         /* round to next power of 2 */
5227         max = max_size;
5228         while ((max & num_rings) == 0)
5229                 max >>= 1;
5230
5231         if (num_rings != max)
5232                 max <<= 1;
5233
5234         return max;
5235 }
5236
5237 static void
5238 bnx2_set_rx_ring_size(struct bnx2 *bp, u32 size)
5239 {
5240         u32 rx_size, rx_space, jumbo_size;
5241
5242         /* 8 for CRC and VLAN */
5243         rx_size = bp->dev->mtu + ETH_HLEN + BNX2_RX_OFFSET + 8;
5244
5245         rx_space = SKB_DATA_ALIGN(rx_size + BNX2_RX_ALIGN) + NET_SKB_PAD +
5246                 sizeof(struct skb_shared_info);
5247
5248         bp->rx_copy_thresh = BNX2_RX_COPY_THRESH;
5249         bp->rx_pg_ring_size = 0;
5250         bp->rx_max_pg_ring = 0;
5251         bp->rx_max_pg_ring_idx = 0;
5252         if ((rx_space > PAGE_SIZE) && !(bp->flags & BNX2_FLAG_JUMBO_BROKEN)) {
5253                 int pages = PAGE_ALIGN(bp->dev->mtu - 40) >> PAGE_SHIFT;
5254
5255                 jumbo_size = size * pages;
5256                 if (jumbo_size > MAX_TOTAL_RX_PG_DESC_CNT)
5257                         jumbo_size = MAX_TOTAL_RX_PG_DESC_CNT;
5258
5259                 bp->rx_pg_ring_size = jumbo_size;
5260                 bp->rx_max_pg_ring = bnx2_find_max_ring(jumbo_size,
5261                                                         MAX_RX_PG_RINGS);
5262                 bp->rx_max_pg_ring_idx = (bp->rx_max_pg_ring * RX_DESC_CNT) - 1;
5263                 rx_size = BNX2_RX_COPY_THRESH + BNX2_RX_OFFSET;
5264                 bp->rx_copy_thresh = 0;
5265         }
5266
5267         bp->rx_buf_use_size = rx_size;
5268         /* hw alignment */
5269         bp->rx_buf_size = bp->rx_buf_use_size + BNX2_RX_ALIGN;
5270         bp->rx_jumbo_thresh = rx_size - BNX2_RX_OFFSET;
5271         bp->rx_ring_size = size;
5272         bp->rx_max_ring = bnx2_find_max_ring(size, MAX_RX_RINGS);
5273         bp->rx_max_ring_idx = (bp->rx_max_ring * RX_DESC_CNT) - 1;
5274 }
5275
5276 static void
5277 bnx2_free_tx_skbs(struct bnx2 *bp)
5278 {
5279         int i;
5280
5281         for (i = 0; i < bp->num_tx_rings; i++) {
5282                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5283                 struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5284                 int j;
5285
5286                 if (txr->tx_buf_ring == NULL)
5287                         continue;
5288
5289                 for (j = 0; j < TX_DESC_CNT; ) {
5290                         struct sw_tx_bd *tx_buf = &txr->tx_buf_ring[j];
5291                         struct sk_buff *skb = tx_buf->skb;
5292
5293                         if (skb == NULL) {
5294                                 j++;
5295                                 continue;
5296                         }
5297
5298                         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5299
5300                         tx_buf->skb = NULL;
5301
5302                         j += skb_shinfo(skb)->nr_frags + 1;
5303                         dev_kfree_skb(skb);
5304                 }
5305         }
5306 }
5307
5308 static void
5309 bnx2_free_rx_skbs(struct bnx2 *bp)
5310 {
5311         int i;
5312
5313         for (i = 0; i < bp->num_rx_rings; i++) {
5314                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
5315                 struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5316                 int j;
5317
5318                 if (rxr->rx_buf_ring == NULL)
5319                         return;
5320
5321                 for (j = 0; j < bp->rx_max_ring_idx; j++) {
5322                         struct sw_bd *rx_buf = &rxr->rx_buf_ring[j];
5323                         struct sk_buff *skb = rx_buf->skb;
5324
5325                         if (skb == NULL)
5326                                 continue;
5327
5328                         pci_unmap_single(bp->pdev,
5329                                          pci_unmap_addr(rx_buf, mapping),
5330                                          bp->rx_buf_use_size,
5331                                          PCI_DMA_FROMDEVICE);
5332
5333                         rx_buf->skb = NULL;
5334
5335                         dev_kfree_skb(skb);
5336                 }
5337                 for (j = 0; j < bp->rx_max_pg_ring_idx; j++)
5338                         bnx2_free_rx_page(bp, rxr, j);
5339         }
5340 }
5341
5342 static void
5343 bnx2_free_skbs(struct bnx2 *bp)
5344 {
5345         bnx2_free_tx_skbs(bp);
5346         bnx2_free_rx_skbs(bp);
5347 }
5348
5349 static int
5350 bnx2_reset_nic(struct bnx2 *bp, u32 reset_code)
5351 {
5352         int rc;
5353
5354         rc = bnx2_reset_chip(bp, reset_code);
5355         bnx2_free_skbs(bp);
5356         if (rc)
5357                 return rc;
5358
5359         if ((rc = bnx2_init_chip(bp)) != 0)
5360                 return rc;
5361
5362         bnx2_init_all_rings(bp);
5363         return 0;
5364 }
5365
5366 static int
5367 bnx2_init_nic(struct bnx2 *bp, int reset_phy)
5368 {
5369         int rc;
5370
5371         if ((rc = bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET)) != 0)
5372                 return rc;
5373
5374         spin_lock_bh(&bp->phy_lock);
5375         bnx2_init_phy(bp, reset_phy);
5376         bnx2_set_link(bp);
5377         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5378                 bnx2_remote_phy_event(bp);
5379         spin_unlock_bh(&bp->phy_lock);
5380         return 0;
5381 }
5382
5383 static int
5384 bnx2_shutdown_chip(struct bnx2 *bp)
5385 {
5386         u32 reset_code;
5387
5388         if (bp->flags & BNX2_FLAG_NO_WOL)
5389                 reset_code = BNX2_DRV_MSG_CODE_UNLOAD_LNK_DN;
5390         else if (bp->wol)
5391                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_WOL;
5392         else
5393                 reset_code = BNX2_DRV_MSG_CODE_SUSPEND_NO_WOL;
5394
5395         return bnx2_reset_chip(bp, reset_code);
5396 }
5397
5398 static int
5399 bnx2_test_registers(struct bnx2 *bp)
5400 {
5401         int ret;
5402         int i, is_5709;
5403         static const struct {
5404                 u16   offset;
5405                 u16   flags;
5406 #define BNX2_FL_NOT_5709        1
5407                 u32   rw_mask;
5408                 u32   ro_mask;
5409         } reg_tbl[] = {
5410                 { 0x006c, 0, 0x00000000, 0x0000003f },
5411                 { 0x0090, 0, 0xffffffff, 0x00000000 },
5412                 { 0x0094, 0, 0x00000000, 0x00000000 },
5413
5414                 { 0x0404, BNX2_FL_NOT_5709, 0x00003f00, 0x00000000 },
5415                 { 0x0418, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5416                 { 0x041c, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5417                 { 0x0420, BNX2_FL_NOT_5709, 0x00000000, 0x80ffffff },
5418                 { 0x0424, BNX2_FL_NOT_5709, 0x00000000, 0x00000000 },
5419                 { 0x0428, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5420                 { 0x0450, BNX2_FL_NOT_5709, 0x00000000, 0x0000ffff },
5421                 { 0x0454, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5422                 { 0x0458, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5423
5424                 { 0x0808, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5425                 { 0x0854, BNX2_FL_NOT_5709, 0x00000000, 0xffffffff },
5426                 { 0x0868, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5427                 { 0x086c, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5428                 { 0x0870, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5429                 { 0x0874, BNX2_FL_NOT_5709, 0x00000000, 0x77777777 },
5430
5431                 { 0x0c00, BNX2_FL_NOT_5709, 0x00000000, 0x00000001 },
5432                 { 0x0c04, BNX2_FL_NOT_5709, 0x00000000, 0x03ff0001 },
5433                 { 0x0c08, BNX2_FL_NOT_5709,  0x0f0ff073, 0x00000000 },
5434
5435                 { 0x1000, 0, 0x00000000, 0x00000001 },
5436                 { 0x1004, BNX2_FL_NOT_5709, 0x00000000, 0x000f0001 },
5437
5438                 { 0x1408, 0, 0x01c00800, 0x00000000 },
5439                 { 0x149c, 0, 0x8000ffff, 0x00000000 },
5440                 { 0x14a8, 0, 0x00000000, 0x000001ff },
5441                 { 0x14ac, 0, 0x0fffffff, 0x10000000 },
5442                 { 0x14b0, 0, 0x00000002, 0x00000001 },
5443                 { 0x14b8, 0, 0x00000000, 0x00000000 },
5444                 { 0x14c0, 0, 0x00000000, 0x00000009 },
5445                 { 0x14c4, 0, 0x00003fff, 0x00000000 },
5446                 { 0x14cc, 0, 0x00000000, 0x00000001 },
5447                 { 0x14d0, 0, 0xffffffff, 0x00000000 },
5448
5449                 { 0x1800, 0, 0x00000000, 0x00000001 },
5450                 { 0x1804, 0, 0x00000000, 0x00000003 },
5451
5452                 { 0x2800, 0, 0x00000000, 0x00000001 },
5453                 { 0x2804, 0, 0x00000000, 0x00003f01 },
5454                 { 0x2808, 0, 0x0f3f3f03, 0x00000000 },
5455                 { 0x2810, 0, 0xffff0000, 0x00000000 },
5456                 { 0x2814, 0, 0xffff0000, 0x00000000 },
5457                 { 0x2818, 0, 0xffff0000, 0x00000000 },
5458                 { 0x281c, 0, 0xffff0000, 0x00000000 },
5459                 { 0x2834, 0, 0xffffffff, 0x00000000 },
5460                 { 0x2840, 0, 0x00000000, 0xffffffff },
5461                 { 0x2844, 0, 0x00000000, 0xffffffff },
5462                 { 0x2848, 0, 0xffffffff, 0x00000000 },
5463                 { 0x284c, 0, 0xf800f800, 0x07ff07ff },
5464
5465                 { 0x2c00, 0, 0x00000000, 0x00000011 },
5466                 { 0x2c04, 0, 0x00000000, 0x00030007 },
5467
5468                 { 0x3c00, 0, 0x00000000, 0x00000001 },
5469                 { 0x3c04, 0, 0x00000000, 0x00070000 },
5470                 { 0x3c08, 0, 0x00007f71, 0x07f00000 },
5471                 { 0x3c0c, 0, 0x1f3ffffc, 0x00000000 },
5472                 { 0x3c10, 0, 0xffffffff, 0x00000000 },
5473                 { 0x3c14, 0, 0x00000000, 0xffffffff },
5474                 { 0x3c18, 0, 0x00000000, 0xffffffff },
5475                 { 0x3c1c, 0, 0xfffff000, 0x00000000 },
5476                 { 0x3c20, 0, 0xffffff00, 0x00000000 },
5477
5478                 { 0x5004, 0, 0x00000000, 0x0000007f },
5479                 { 0x5008, 0, 0x0f0007ff, 0x00000000 },
5480
5481                 { 0x5c00, 0, 0x00000000, 0x00000001 },
5482                 { 0x5c04, 0, 0x00000000, 0x0003000f },
5483                 { 0x5c08, 0, 0x00000003, 0x00000000 },
5484                 { 0x5c0c, 0, 0x0000fff8, 0x00000000 },
5485                 { 0x5c10, 0, 0x00000000, 0xffffffff },
5486                 { 0x5c80, 0, 0x00000000, 0x0f7113f1 },
5487                 { 0x5c84, 0, 0x00000000, 0x0000f333 },
5488                 { 0x5c88, 0, 0x00000000, 0x00077373 },
5489                 { 0x5c8c, 0, 0x00000000, 0x0007f737 },
5490
5491                 { 0x6808, 0, 0x0000ff7f, 0x00000000 },
5492                 { 0x680c, 0, 0xffffffff, 0x00000000 },
5493                 { 0x6810, 0, 0xffffffff, 0x00000000 },
5494                 { 0x6814, 0, 0xffffffff, 0x00000000 },
5495                 { 0x6818, 0, 0xffffffff, 0x00000000 },
5496                 { 0x681c, 0, 0xffffffff, 0x00000000 },
5497                 { 0x6820, 0, 0x00ff00ff, 0x00000000 },
5498                 { 0x6824, 0, 0x00ff00ff, 0x00000000 },
5499                 { 0x6828, 0, 0x00ff00ff, 0x00000000 },
5500                 { 0x682c, 0, 0x03ff03ff, 0x00000000 },
5501                 { 0x6830, 0, 0x03ff03ff, 0x00000000 },
5502                 { 0x6834, 0, 0x03ff03ff, 0x00000000 },
5503                 { 0x6838, 0, 0x03ff03ff, 0x00000000 },
5504                 { 0x683c, 0, 0x0000ffff, 0x00000000 },
5505                 { 0x6840, 0, 0x00000ff0, 0x00000000 },
5506                 { 0x6844, 0, 0x00ffff00, 0x00000000 },
5507                 { 0x684c, 0, 0xffffffff, 0x00000000 },
5508                 { 0x6850, 0, 0x7f7f7f7f, 0x00000000 },
5509                 { 0x6854, 0, 0x7f7f7f7f, 0x00000000 },
5510                 { 0x6858, 0, 0x7f7f7f7f, 0x00000000 },
5511                 { 0x685c, 0, 0x7f7f7f7f, 0x00000000 },
5512                 { 0x6908, 0, 0x00000000, 0x0001ff0f },
5513                 { 0x690c, 0, 0x00000000, 0x0ffe00f0 },
5514
5515                 { 0xffff, 0, 0x00000000, 0x00000000 },
5516         };
5517
5518         ret = 0;
5519         is_5709 = 0;
5520         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5521                 is_5709 = 1;
5522
5523         for (i = 0; reg_tbl[i].offset != 0xffff; i++) {
5524                 u32 offset, rw_mask, ro_mask, save_val, val;
5525                 u16 flags = reg_tbl[i].flags;
5526
5527                 if (is_5709 && (flags & BNX2_FL_NOT_5709))
5528                         continue;
5529
5530                 offset = (u32) reg_tbl[i].offset;
5531                 rw_mask = reg_tbl[i].rw_mask;
5532                 ro_mask = reg_tbl[i].ro_mask;
5533
5534                 save_val = readl(bp->regview + offset);
5535
5536                 writel(0, bp->regview + offset);
5537
5538                 val = readl(bp->regview + offset);
5539                 if ((val & rw_mask) != 0) {
5540                         goto reg_test_err;
5541                 }
5542
5543                 if ((val & ro_mask) != (save_val & ro_mask)) {
5544                         goto reg_test_err;
5545                 }
5546
5547                 writel(0xffffffff, bp->regview + offset);
5548
5549                 val = readl(bp->regview + offset);
5550                 if ((val & rw_mask) != rw_mask) {
5551                         goto reg_test_err;
5552                 }
5553
5554                 if ((val & ro_mask) != (save_val & ro_mask)) {
5555                         goto reg_test_err;
5556                 }
5557
5558                 writel(save_val, bp->regview + offset);
5559                 continue;
5560
5561 reg_test_err:
5562                 writel(save_val, bp->regview + offset);
5563                 ret = -ENODEV;
5564                 break;
5565         }
5566         return ret;
5567 }
5568
5569 static int
5570 bnx2_do_mem_test(struct bnx2 *bp, u32 start, u32 size)
5571 {
5572         static const u32 test_pattern[] = { 0x00000000, 0xffffffff, 0x55555555,
5573                 0xaaaaaaaa , 0xaa55aa55, 0x55aa55aa };
5574         int i;
5575
5576         for (i = 0; i < sizeof(test_pattern) / 4; i++) {
5577                 u32 offset;
5578
5579                 for (offset = 0; offset < size; offset += 4) {
5580
5581                         bnx2_reg_wr_ind(bp, start + offset, test_pattern[i]);
5582
5583                         if (bnx2_reg_rd_ind(bp, start + offset) !=
5584                                 test_pattern[i]) {
5585                                 return -ENODEV;
5586                         }
5587                 }
5588         }
5589         return 0;
5590 }
5591
5592 static int
5593 bnx2_test_memory(struct bnx2 *bp)
5594 {
5595         int ret = 0;
5596         int i;
5597         static struct mem_entry {
5598                 u32   offset;
5599                 u32   len;
5600         } mem_tbl_5706[] = {
5601                 { 0x60000,  0x4000 },
5602                 { 0xa0000,  0x3000 },
5603                 { 0xe0000,  0x4000 },
5604                 { 0x120000, 0x4000 },
5605                 { 0x1a0000, 0x4000 },
5606                 { 0x160000, 0x4000 },
5607                 { 0xffffffff, 0    },
5608         },
5609         mem_tbl_5709[] = {
5610                 { 0x60000,  0x4000 },
5611                 { 0xa0000,  0x3000 },
5612                 { 0xe0000,  0x4000 },
5613                 { 0x120000, 0x4000 },
5614                 { 0x1a0000, 0x4000 },
5615                 { 0xffffffff, 0    },
5616         };
5617         struct mem_entry *mem_tbl;
5618
5619         if (CHIP_NUM(bp) == CHIP_NUM_5709)
5620                 mem_tbl = mem_tbl_5709;
5621         else
5622                 mem_tbl = mem_tbl_5706;
5623
5624         for (i = 0; mem_tbl[i].offset != 0xffffffff; i++) {
5625                 if ((ret = bnx2_do_mem_test(bp, mem_tbl[i].offset,
5626                         mem_tbl[i].len)) != 0) {
5627                         return ret;
5628                 }
5629         }
5630
5631         return ret;
5632 }
5633
5634 #define BNX2_MAC_LOOPBACK       0
5635 #define BNX2_PHY_LOOPBACK       1
5636
5637 static int
5638 bnx2_run_loopback(struct bnx2 *bp, int loopback_mode)
5639 {
5640         unsigned int pkt_size, num_pkts, i;
5641         struct sk_buff *skb, *rx_skb;
5642         unsigned char *packet;
5643         u16 rx_start_idx, rx_idx;
5644         dma_addr_t map;
5645         struct tx_bd *txbd;
5646         struct sw_bd *rx_buf;
5647         struct l2_fhdr *rx_hdr;
5648         int ret = -ENODEV;
5649         struct bnx2_napi *bnapi = &bp->bnx2_napi[0], *tx_napi;
5650         struct bnx2_tx_ring_info *txr = &bnapi->tx_ring;
5651         struct bnx2_rx_ring_info *rxr = &bnapi->rx_ring;
5652
5653         tx_napi = bnapi;
5654
5655         txr = &tx_napi->tx_ring;
5656         rxr = &bnapi->rx_ring;
5657         if (loopback_mode == BNX2_MAC_LOOPBACK) {
5658                 bp->loopback = MAC_LOOPBACK;
5659                 bnx2_set_mac_loopback(bp);
5660         }
5661         else if (loopback_mode == BNX2_PHY_LOOPBACK) {
5662                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5663                         return 0;
5664
5665                 bp->loopback = PHY_LOOPBACK;
5666                 bnx2_set_phy_loopback(bp);
5667         }
5668         else
5669                 return -EINVAL;
5670
5671         pkt_size = min(bp->dev->mtu + ETH_HLEN, bp->rx_jumbo_thresh - 4);
5672         skb = netdev_alloc_skb(bp->dev, pkt_size);
5673         if (!skb)
5674                 return -ENOMEM;
5675         packet = skb_put(skb, pkt_size);
5676         memcpy(packet, bp->dev->dev_addr, 6);
5677         memset(packet + 6, 0x0, 8);
5678         for (i = 14; i < pkt_size; i++)
5679                 packet[i] = (unsigned char) (i & 0xff);
5680
5681         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
5682                 dev_kfree_skb(skb);
5683                 return -EIO;
5684         }
5685         map = skb_shinfo(skb)->dma_head;
5686
5687         REG_WR(bp, BNX2_HC_COMMAND,
5688                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5689
5690         REG_RD(bp, BNX2_HC_COMMAND);
5691
5692         udelay(5);
5693         rx_start_idx = bnx2_get_hw_rx_cons(bnapi);
5694
5695         num_pkts = 0;
5696
5697         txbd = &txr->tx_desc_ring[TX_RING_IDX(txr->tx_prod)];
5698
5699         txbd->tx_bd_haddr_hi = (u64) map >> 32;
5700         txbd->tx_bd_haddr_lo = (u64) map & 0xffffffff;
5701         txbd->tx_bd_mss_nbytes = pkt_size;
5702         txbd->tx_bd_vlan_tag_flags = TX_BD_FLAGS_START | TX_BD_FLAGS_END;
5703
5704         num_pkts++;
5705         txr->tx_prod = NEXT_TX_BD(txr->tx_prod);
5706         txr->tx_prod_bseq += pkt_size;
5707
5708         REG_WR16(bp, txr->tx_bidx_addr, txr->tx_prod);
5709         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
5710
5711         udelay(100);
5712
5713         REG_WR(bp, BNX2_HC_COMMAND,
5714                bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW_WO_INT);
5715
5716         REG_RD(bp, BNX2_HC_COMMAND);
5717
5718         udelay(5);
5719
5720         skb_dma_unmap(&bp->pdev->dev, skb, DMA_TO_DEVICE);
5721         dev_kfree_skb(skb);
5722
5723         if (bnx2_get_hw_tx_cons(tx_napi) != txr->tx_prod)
5724                 goto loopback_test_done;
5725
5726         rx_idx = bnx2_get_hw_rx_cons(bnapi);
5727         if (rx_idx != rx_start_idx + num_pkts) {
5728                 goto loopback_test_done;
5729         }
5730
5731         rx_buf = &rxr->rx_buf_ring[rx_start_idx];
5732         rx_skb = rx_buf->skb;
5733
5734         rx_hdr = (struct l2_fhdr *) rx_skb->data;
5735         skb_reserve(rx_skb, BNX2_RX_OFFSET);
5736
5737         pci_dma_sync_single_for_cpu(bp->pdev,
5738                 pci_unmap_addr(rx_buf, mapping),
5739                 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
5740
5741         if (rx_hdr->l2_fhdr_status &
5742                 (L2_FHDR_ERRORS_BAD_CRC |
5743                 L2_FHDR_ERRORS_PHY_DECODE |
5744                 L2_FHDR_ERRORS_ALIGNMENT |
5745                 L2_FHDR_ERRORS_TOO_SHORT |
5746                 L2_FHDR_ERRORS_GIANT_FRAME)) {
5747
5748                 goto loopback_test_done;
5749         }
5750
5751         if ((rx_hdr->l2_fhdr_pkt_len - 4) != pkt_size) {
5752                 goto loopback_test_done;
5753         }
5754
5755         for (i = 14; i < pkt_size; i++) {
5756                 if (*(rx_skb->data + i) != (unsigned char) (i & 0xff)) {
5757                         goto loopback_test_done;
5758                 }
5759         }
5760
5761         ret = 0;
5762
5763 loopback_test_done:
5764         bp->loopback = 0;
5765         return ret;
5766 }
5767
5768 #define BNX2_MAC_LOOPBACK_FAILED        1
5769 #define BNX2_PHY_LOOPBACK_FAILED        2
5770 #define BNX2_LOOPBACK_FAILED            (BNX2_MAC_LOOPBACK_FAILED |     \
5771                                          BNX2_PHY_LOOPBACK_FAILED)
5772
5773 static int
5774 bnx2_test_loopback(struct bnx2 *bp)
5775 {
5776         int rc = 0;
5777
5778         if (!netif_running(bp->dev))
5779                 return BNX2_LOOPBACK_FAILED;
5780
5781         bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
5782         spin_lock_bh(&bp->phy_lock);
5783         bnx2_init_phy(bp, 1);
5784         spin_unlock_bh(&bp->phy_lock);
5785         if (bnx2_run_loopback(bp, BNX2_MAC_LOOPBACK))
5786                 rc |= BNX2_MAC_LOOPBACK_FAILED;
5787         if (bnx2_run_loopback(bp, BNX2_PHY_LOOPBACK))
5788                 rc |= BNX2_PHY_LOOPBACK_FAILED;
5789         return rc;
5790 }
5791
5792 #define NVRAM_SIZE 0x200
5793 #define CRC32_RESIDUAL 0xdebb20e3
5794
5795 static int
5796 bnx2_test_nvram(struct bnx2 *bp)
5797 {
5798         __be32 buf[NVRAM_SIZE / 4];
5799         u8 *data = (u8 *) buf;
5800         int rc = 0;
5801         u32 magic, csum;
5802
5803         if ((rc = bnx2_nvram_read(bp, 0, data, 4)) != 0)
5804                 goto test_nvram_done;
5805
5806         magic = be32_to_cpu(buf[0]);
5807         if (magic != 0x669955aa) {
5808                 rc = -ENODEV;
5809                 goto test_nvram_done;
5810         }
5811
5812         if ((rc = bnx2_nvram_read(bp, 0x100, data, NVRAM_SIZE)) != 0)
5813                 goto test_nvram_done;
5814
5815         csum = ether_crc_le(0x100, data);
5816         if (csum != CRC32_RESIDUAL) {
5817                 rc = -ENODEV;
5818                 goto test_nvram_done;
5819         }
5820
5821         csum = ether_crc_le(0x100, data + 0x100);
5822         if (csum != CRC32_RESIDUAL) {
5823                 rc = -ENODEV;
5824         }
5825
5826 test_nvram_done:
5827         return rc;
5828 }
5829
5830 static int
5831 bnx2_test_link(struct bnx2 *bp)
5832 {
5833         u32 bmsr;
5834
5835         if (!netif_running(bp->dev))
5836                 return -ENODEV;
5837
5838         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
5839                 if (bp->link_up)
5840                         return 0;
5841                 return -ENODEV;
5842         }
5843         spin_lock_bh(&bp->phy_lock);
5844         bnx2_enable_bmsr1(bp);
5845         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5846         bnx2_read_phy(bp, bp->mii_bmsr1, &bmsr);
5847         bnx2_disable_bmsr1(bp);
5848         spin_unlock_bh(&bp->phy_lock);
5849
5850         if (bmsr & BMSR_LSTATUS) {
5851                 return 0;
5852         }
5853         return -ENODEV;
5854 }
5855
5856 static int
5857 bnx2_test_intr(struct bnx2 *bp)
5858 {
5859         int i;
5860         u16 status_idx;
5861
5862         if (!netif_running(bp->dev))
5863                 return -ENODEV;
5864
5865         status_idx = REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff;
5866
5867         /* This register is not touched during run-time. */
5868         REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd | BNX2_HC_COMMAND_COAL_NOW);
5869         REG_RD(bp, BNX2_HC_COMMAND);
5870
5871         for (i = 0; i < 10; i++) {
5872                 if ((REG_RD(bp, BNX2_PCICFG_INT_ACK_CMD) & 0xffff) !=
5873                         status_idx) {
5874
5875                         break;
5876                 }
5877
5878                 msleep_interruptible(10);
5879         }
5880         if (i < 10)
5881                 return 0;
5882
5883         return -ENODEV;
5884 }
5885
5886 /* Determining link for parallel detection. */
5887 static int
5888 bnx2_5706_serdes_has_link(struct bnx2 *bp)
5889 {
5890         u32 mode_ctl, an_dbg, exp;
5891
5892         if (bp->phy_flags & BNX2_PHY_FLAG_NO_PARALLEL)
5893                 return 0;
5894
5895         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_MODE_CTL);
5896         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &mode_ctl);
5897
5898         if (!(mode_ctl & MISC_SHDW_MODE_CTL_SIG_DET))
5899                 return 0;
5900
5901         bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5902         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5903         bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &an_dbg);
5904
5905         if (an_dbg & (MISC_SHDW_AN_DBG_NOSYNC | MISC_SHDW_AN_DBG_RUDI_INVALID))
5906                 return 0;
5907
5908         bnx2_write_phy(bp, MII_BNX2_DSP_ADDRESS, MII_EXPAND_REG1);
5909         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5910         bnx2_read_phy(bp, MII_BNX2_DSP_RW_PORT, &exp);
5911
5912         if (exp & MII_EXPAND_REG1_RUDI_C)       /* receiving CONFIG */
5913                 return 0;
5914
5915         return 1;
5916 }
5917
5918 static void
5919 bnx2_5706_serdes_timer(struct bnx2 *bp)
5920 {
5921         int check_link = 1;
5922
5923         spin_lock(&bp->phy_lock);
5924         if (bp->serdes_an_pending) {
5925                 bp->serdes_an_pending--;
5926                 check_link = 0;
5927         } else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5928                 u32 bmcr;
5929
5930                 bp->current_interval = BNX2_TIMER_INTERVAL;
5931
5932                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5933
5934                 if (bmcr & BMCR_ANENABLE) {
5935                         if (bnx2_5706_serdes_has_link(bp)) {
5936                                 bmcr &= ~BMCR_ANENABLE;
5937                                 bmcr |= BMCR_SPEED1000 | BMCR_FULLDPLX;
5938                                 bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5939                                 bp->phy_flags |= BNX2_PHY_FLAG_PARALLEL_DETECT;
5940                         }
5941                 }
5942         }
5943         else if ((bp->link_up) && (bp->autoneg & AUTONEG_SPEED) &&
5944                  (bp->phy_flags & BNX2_PHY_FLAG_PARALLEL_DETECT)) {
5945                 u32 phy2;
5946
5947                 bnx2_write_phy(bp, 0x17, 0x0f01);
5948                 bnx2_read_phy(bp, 0x15, &phy2);
5949                 if (phy2 & 0x20) {
5950                         u32 bmcr;
5951
5952                         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5953                         bmcr |= BMCR_ANENABLE;
5954                         bnx2_write_phy(bp, bp->mii_bmcr, bmcr);
5955
5956                         bp->phy_flags &= ~BNX2_PHY_FLAG_PARALLEL_DETECT;
5957                 }
5958         } else
5959                 bp->current_interval = BNX2_TIMER_INTERVAL;
5960
5961         if (check_link) {
5962                 u32 val;
5963
5964                 bnx2_write_phy(bp, MII_BNX2_MISC_SHADOW, MISC_SHDW_AN_DBG);
5965                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5966                 bnx2_read_phy(bp, MII_BNX2_MISC_SHADOW, &val);
5967
5968                 if (bp->link_up && (val & MISC_SHDW_AN_DBG_NOSYNC)) {
5969                         if (!(bp->phy_flags & BNX2_PHY_FLAG_FORCED_DOWN)) {
5970                                 bnx2_5706s_force_link_dn(bp, 1);
5971                                 bp->phy_flags |= BNX2_PHY_FLAG_FORCED_DOWN;
5972                         } else
5973                                 bnx2_set_link(bp);
5974                 } else if (!bp->link_up && !(val & MISC_SHDW_AN_DBG_NOSYNC))
5975                         bnx2_set_link(bp);
5976         }
5977         spin_unlock(&bp->phy_lock);
5978 }
5979
5980 static void
5981 bnx2_5708_serdes_timer(struct bnx2 *bp)
5982 {
5983         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
5984                 return;
5985
5986         if ((bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) == 0) {
5987                 bp->serdes_an_pending = 0;
5988                 return;
5989         }
5990
5991         spin_lock(&bp->phy_lock);
5992         if (bp->serdes_an_pending)
5993                 bp->serdes_an_pending--;
5994         else if ((bp->link_up == 0) && (bp->autoneg & AUTONEG_SPEED)) {
5995                 u32 bmcr;
5996
5997                 bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
5998                 if (bmcr & BMCR_ANENABLE) {
5999                         bnx2_enable_forced_2g5(bp);
6000                         bp->current_interval = BNX2_SERDES_FORCED_TIMEOUT;
6001                 } else {
6002                         bnx2_disable_forced_2g5(bp);
6003                         bp->serdes_an_pending = 2;
6004                         bp->current_interval = BNX2_TIMER_INTERVAL;
6005                 }
6006
6007         } else
6008                 bp->current_interval = BNX2_TIMER_INTERVAL;
6009
6010         spin_unlock(&bp->phy_lock);
6011 }
6012
6013 static void
6014 bnx2_timer(unsigned long data)
6015 {
6016         struct bnx2 *bp = (struct bnx2 *) data;
6017
6018         if (!netif_running(bp->dev))
6019                 return;
6020
6021         if (atomic_read(&bp->intr_sem) != 0)
6022                 goto bnx2_restart_timer;
6023
6024         if ((bp->flags & (BNX2_FLAG_USING_MSI | BNX2_FLAG_ONE_SHOT_MSI)) ==
6025              BNX2_FLAG_USING_MSI)
6026                 bnx2_chk_missed_msi(bp);
6027
6028         bnx2_send_heart_beat(bp);
6029
6030         bp->stats_blk->stat_FwRxDrop =
6031                 bnx2_reg_rd_ind(bp, BNX2_FW_RX_DROP_COUNT);
6032
6033         /* workaround occasional corrupted counters */
6034         if ((bp->flags & BNX2_FLAG_BROKEN_STATS) && bp->stats_ticks)
6035                 REG_WR(bp, BNX2_HC_COMMAND, bp->hc_cmd |
6036                                             BNX2_HC_COMMAND_STATS_NOW);
6037
6038         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6039                 if (CHIP_NUM(bp) == CHIP_NUM_5706)
6040                         bnx2_5706_serdes_timer(bp);
6041                 else
6042                         bnx2_5708_serdes_timer(bp);
6043         }
6044
6045 bnx2_restart_timer:
6046         mod_timer(&bp->timer, jiffies + bp->current_interval);
6047 }
6048
6049 static int
6050 bnx2_request_irq(struct bnx2 *bp)
6051 {
6052         unsigned long flags;
6053         struct bnx2_irq *irq;
6054         int rc = 0, i;
6055
6056         if (bp->flags & BNX2_FLAG_USING_MSI_OR_MSIX)
6057                 flags = 0;
6058         else
6059                 flags = IRQF_SHARED;
6060
6061         for (i = 0; i < bp->irq_nvecs; i++) {
6062                 irq = &bp->irq_tbl[i];
6063                 rc = request_irq(irq->vector, irq->handler, flags, irq->name,
6064                                  &bp->bnx2_napi[i]);
6065                 if (rc)
6066                         break;
6067                 irq->requested = 1;
6068         }
6069         return rc;
6070 }
6071
6072 static void
6073 bnx2_free_irq(struct bnx2 *bp)
6074 {
6075         struct bnx2_irq *irq;
6076         int i;
6077
6078         for (i = 0; i < bp->irq_nvecs; i++) {
6079                 irq = &bp->irq_tbl[i];
6080                 if (irq->requested)
6081                         free_irq(irq->vector, &bp->bnx2_napi[i]);
6082                 irq->requested = 0;
6083         }
6084         if (bp->flags & BNX2_FLAG_USING_MSI)
6085                 pci_disable_msi(bp->pdev);
6086         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6087                 pci_disable_msix(bp->pdev);
6088
6089         bp->flags &= ~(BNX2_FLAG_USING_MSI_OR_MSIX | BNX2_FLAG_ONE_SHOT_MSI);
6090 }
6091
6092 static void
6093 bnx2_enable_msix(struct bnx2 *bp, int msix_vecs)
6094 {
6095         int i, rc;
6096         struct msix_entry msix_ent[BNX2_MAX_MSIX_VEC];
6097         struct net_device *dev = bp->dev;
6098         const int len = sizeof(bp->irq_tbl[0].name);
6099
6100         bnx2_setup_msix_tbl(bp);
6101         REG_WR(bp, BNX2_PCI_MSIX_CONTROL, BNX2_MAX_MSIX_HW_VEC - 1);
6102         REG_WR(bp, BNX2_PCI_MSIX_TBL_OFF_BIR, BNX2_PCI_GRC_WINDOW2_BASE);
6103         REG_WR(bp, BNX2_PCI_MSIX_PBA_OFF_BIT, BNX2_PCI_GRC_WINDOW3_BASE);
6104
6105         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6106                 msix_ent[i].entry = i;
6107                 msix_ent[i].vector = 0;
6108         }
6109
6110         rc = pci_enable_msix(bp->pdev, msix_ent, BNX2_MAX_MSIX_VEC);
6111         if (rc != 0)
6112                 return;
6113
6114         bp->irq_nvecs = msix_vecs;
6115         bp->flags |= BNX2_FLAG_USING_MSIX | BNX2_FLAG_ONE_SHOT_MSI;
6116         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
6117                 bp->irq_tbl[i].vector = msix_ent[i].vector;
6118                 snprintf(bp->irq_tbl[i].name, len, "%s-%d", dev->name, i);
6119                 bp->irq_tbl[i].handler = bnx2_msi_1shot;
6120         }
6121 }
6122
6123 static void
6124 bnx2_setup_int_mode(struct bnx2 *bp, int dis_msi)
6125 {
6126         int cpus = num_online_cpus();
6127         int msix_vecs = min(cpus + 1, RX_MAX_RINGS);
6128
6129         bp->irq_tbl[0].handler = bnx2_interrupt;
6130         strcpy(bp->irq_tbl[0].name, bp->dev->name);
6131         bp->irq_nvecs = 1;
6132         bp->irq_tbl[0].vector = bp->pdev->irq;
6133
6134         if ((bp->flags & BNX2_FLAG_MSIX_CAP) && !dis_msi && cpus > 1)
6135                 bnx2_enable_msix(bp, msix_vecs);
6136
6137         if ((bp->flags & BNX2_FLAG_MSI_CAP) && !dis_msi &&
6138             !(bp->flags & BNX2_FLAG_USING_MSIX)) {
6139                 if (pci_enable_msi(bp->pdev) == 0) {
6140                         bp->flags |= BNX2_FLAG_USING_MSI;
6141                         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
6142                                 bp->flags |= BNX2_FLAG_ONE_SHOT_MSI;
6143                                 bp->irq_tbl[0].handler = bnx2_msi_1shot;
6144                         } else
6145                                 bp->irq_tbl[0].handler = bnx2_msi;
6146
6147                         bp->irq_tbl[0].vector = bp->pdev->irq;
6148                 }
6149         }
6150
6151         bp->num_tx_rings = rounddown_pow_of_two(bp->irq_nvecs);
6152         bp->dev->real_num_tx_queues = bp->num_tx_rings;
6153
6154         bp->num_rx_rings = bp->irq_nvecs;
6155 }
6156
6157 /* Called with rtnl_lock */
6158 static int
6159 bnx2_open(struct net_device *dev)
6160 {
6161         struct bnx2 *bp = netdev_priv(dev);
6162         int rc;
6163
6164         netif_carrier_off(dev);
6165
6166         bnx2_set_power_state(bp, PCI_D0);
6167         bnx2_disable_int(bp);
6168
6169         bnx2_setup_int_mode(bp, disable_msi);
6170         bnx2_napi_enable(bp);
6171         rc = bnx2_alloc_mem(bp);
6172         if (rc)
6173                 goto open_err;
6174
6175         rc = bnx2_request_irq(bp);
6176         if (rc)
6177                 goto open_err;
6178
6179         rc = bnx2_init_nic(bp, 1);
6180         if (rc)
6181                 goto open_err;
6182
6183         mod_timer(&bp->timer, jiffies + bp->current_interval);
6184
6185         atomic_set(&bp->intr_sem, 0);
6186
6187         bnx2_enable_int(bp);
6188
6189         if (bp->flags & BNX2_FLAG_USING_MSI) {
6190                 /* Test MSI to make sure it is working
6191                  * If MSI test fails, go back to INTx mode
6192                  */
6193                 if (bnx2_test_intr(bp) != 0) {
6194                         printk(KERN_WARNING PFX "%s: No interrupt was generated"
6195                                " using MSI, switching to INTx mode. Please"
6196                                " report this failure to the PCI maintainer"
6197                                " and include system chipset information.\n",
6198                                bp->dev->name);
6199
6200                         bnx2_disable_int(bp);
6201                         bnx2_free_irq(bp);
6202
6203                         bnx2_setup_int_mode(bp, 1);
6204
6205                         rc = bnx2_init_nic(bp, 0);
6206
6207                         if (!rc)
6208                                 rc = bnx2_request_irq(bp);
6209
6210                         if (rc) {
6211                                 del_timer_sync(&bp->timer);
6212                                 goto open_err;
6213                         }
6214                         bnx2_enable_int(bp);
6215                 }
6216         }
6217         if (bp->flags & BNX2_FLAG_USING_MSI)
6218                 printk(KERN_INFO PFX "%s: using MSI\n", dev->name);
6219         else if (bp->flags & BNX2_FLAG_USING_MSIX)
6220                 printk(KERN_INFO PFX "%s: using MSIX\n", dev->name);
6221
6222         netif_tx_start_all_queues(dev);
6223
6224         return 0;
6225
6226 open_err:
6227         bnx2_napi_disable(bp);
6228         bnx2_free_skbs(bp);
6229         bnx2_free_irq(bp);
6230         bnx2_free_mem(bp);
6231         return rc;
6232 }
6233
6234 static void
6235 bnx2_reset_task(struct work_struct *work)
6236 {
6237         struct bnx2 *bp = container_of(work, struct bnx2, reset_task);
6238
6239         if (!netif_running(bp->dev))
6240                 return;
6241
6242         bnx2_netif_stop(bp);
6243
6244         bnx2_init_nic(bp, 1);
6245
6246         atomic_set(&bp->intr_sem, 1);
6247         bnx2_netif_start(bp);
6248 }
6249
6250 static void
6251 bnx2_tx_timeout(struct net_device *dev)
6252 {
6253         struct bnx2 *bp = netdev_priv(dev);
6254
6255         /* This allows the netif to be shutdown gracefully before resetting */
6256         schedule_work(&bp->reset_task);
6257 }
6258
6259 #ifdef BCM_VLAN
6260 /* Called with rtnl_lock */
6261 static void
6262 bnx2_vlan_rx_register(struct net_device *dev, struct vlan_group *vlgrp)
6263 {
6264         struct bnx2 *bp = netdev_priv(dev);
6265
6266         if (netif_running(dev))
6267                 bnx2_netif_stop(bp);
6268
6269         bp->vlgrp = vlgrp;
6270
6271         if (!netif_running(dev))
6272                 return;
6273
6274         bnx2_set_rx_mode(dev);
6275         if (bp->flags & BNX2_FLAG_CAN_KEEP_VLAN)
6276                 bnx2_fw_sync(bp, BNX2_DRV_MSG_CODE_KEEP_VLAN_UPDATE, 0, 1);
6277
6278         bnx2_netif_start(bp);
6279 }
6280 #endif
6281
6282 /* Called with netif_tx_lock.
6283  * bnx2_tx_int() runs without netif_tx_lock unless it needs to call
6284  * netif_wake_queue().
6285  */
6286 static int
6287 bnx2_start_xmit(struct sk_buff *skb, struct net_device *dev)
6288 {
6289         struct bnx2 *bp = netdev_priv(dev);
6290         dma_addr_t mapping;
6291         struct tx_bd *txbd;
6292         struct sw_tx_bd *tx_buf;
6293         u32 len, vlan_tag_flags, last_frag, mss;
6294         u16 prod, ring_prod;
6295         int i;
6296         struct bnx2_napi *bnapi;
6297         struct bnx2_tx_ring_info *txr;
6298         struct netdev_queue *txq;
6299         struct skb_shared_info *sp;
6300
6301         /*  Determine which tx ring we will be placed on */
6302         i = skb_get_queue_mapping(skb);
6303         bnapi = &bp->bnx2_napi[i];
6304         txr = &bnapi->tx_ring;
6305         txq = netdev_get_tx_queue(dev, i);
6306
6307         if (unlikely(bnx2_tx_avail(bp, txr) <
6308             (skb_shinfo(skb)->nr_frags + 1))) {
6309                 netif_tx_stop_queue(txq);
6310                 printk(KERN_ERR PFX "%s: BUG! Tx ring full when queue awake!\n",
6311                         dev->name);
6312
6313                 return NETDEV_TX_BUSY;
6314         }
6315         len = skb_headlen(skb);
6316         prod = txr->tx_prod;
6317         ring_prod = TX_RING_IDX(prod);
6318
6319         vlan_tag_flags = 0;
6320         if (skb->ip_summed == CHECKSUM_PARTIAL) {
6321                 vlan_tag_flags |= TX_BD_FLAGS_TCP_UDP_CKSUM;
6322         }
6323
6324 #ifdef BCM_VLAN
6325         if (bp->vlgrp && vlan_tx_tag_present(skb)) {
6326                 vlan_tag_flags |=
6327                         (TX_BD_FLAGS_VLAN_TAG | (vlan_tx_tag_get(skb) << 16));
6328         }
6329 #endif
6330         if ((mss = skb_shinfo(skb)->gso_size)) {
6331                 u32 tcp_opt_len;
6332                 struct iphdr *iph;
6333
6334                 vlan_tag_flags |= TX_BD_FLAGS_SW_LSO;
6335
6336                 tcp_opt_len = tcp_optlen(skb);
6337
6338                 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6) {
6339                         u32 tcp_off = skb_transport_offset(skb) -
6340                                       sizeof(struct ipv6hdr) - ETH_HLEN;
6341
6342                         vlan_tag_flags |= ((tcp_opt_len >> 2) << 8) |
6343                                           TX_BD_FLAGS_SW_FLAGS;
6344                         if (likely(tcp_off == 0))
6345                                 vlan_tag_flags &= ~TX_BD_FLAGS_TCP6_OFF0_MSK;
6346                         else {
6347                                 tcp_off >>= 3;
6348                                 vlan_tag_flags |= ((tcp_off & 0x3) <<
6349                                                    TX_BD_FLAGS_TCP6_OFF0_SHL) |
6350                                                   ((tcp_off & 0x10) <<
6351                                                    TX_BD_FLAGS_TCP6_OFF4_SHL);
6352                                 mss |= (tcp_off & 0xc) << TX_BD_TCP6_OFF2_SHL;
6353                         }
6354                 } else {
6355                         iph = ip_hdr(skb);
6356                         if (tcp_opt_len || (iph->ihl > 5)) {
6357                                 vlan_tag_flags |= ((iph->ihl - 5) +
6358                                                    (tcp_opt_len >> 2)) << 8;
6359                         }
6360                 }
6361         } else
6362                 mss = 0;
6363
6364         if (skb_dma_map(&bp->pdev->dev, skb, DMA_TO_DEVICE)) {
6365                 dev_kfree_skb(skb);
6366                 return NETDEV_TX_OK;
6367         }
6368
6369         sp = skb_shinfo(skb);
6370         mapping = sp->dma_head;
6371
6372         tx_buf = &txr->tx_buf_ring[ring_prod];
6373         tx_buf->skb = skb;
6374
6375         txbd = &txr->tx_desc_ring[ring_prod];
6376
6377         txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6378         txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6379         txbd->tx_bd_mss_nbytes = len | (mss << 16);
6380         txbd->tx_bd_vlan_tag_flags = vlan_tag_flags | TX_BD_FLAGS_START;
6381
6382         last_frag = skb_shinfo(skb)->nr_frags;
6383         tx_buf->nr_frags = last_frag;
6384         tx_buf->is_gso = skb_is_gso(skb);
6385
6386         for (i = 0; i < last_frag; i++) {
6387                 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
6388
6389                 prod = NEXT_TX_BD(prod);
6390                 ring_prod = TX_RING_IDX(prod);
6391                 txbd = &txr->tx_desc_ring[ring_prod];
6392
6393                 len = frag->size;
6394                 mapping = sp->dma_maps[i];
6395
6396                 txbd->tx_bd_haddr_hi = (u64) mapping >> 32;
6397                 txbd->tx_bd_haddr_lo = (u64) mapping & 0xffffffff;
6398                 txbd->tx_bd_mss_nbytes = len | (mss << 16);
6399                 txbd->tx_bd_vlan_tag_flags = vlan_tag_flags;
6400
6401         }
6402         txbd->tx_bd_vlan_tag_flags |= TX_BD_FLAGS_END;
6403
6404         prod = NEXT_TX_BD(prod);
6405         txr->tx_prod_bseq += skb->len;
6406
6407         REG_WR16(bp, txr->tx_bidx_addr, prod);
6408         REG_WR(bp, txr->tx_bseq_addr, txr->tx_prod_bseq);
6409
6410         mmiowb();
6411
6412         txr->tx_prod = prod;
6413
6414         if (unlikely(bnx2_tx_avail(bp, txr) <= MAX_SKB_FRAGS)) {
6415                 netif_tx_stop_queue(txq);
6416                 if (bnx2_tx_avail(bp, txr) > bp->tx_wake_thresh)
6417                         netif_tx_wake_queue(txq);
6418         }
6419
6420         return NETDEV_TX_OK;
6421 }
6422
6423 /* Called with rtnl_lock */
6424 static int
6425 bnx2_close(struct net_device *dev)
6426 {
6427         struct bnx2 *bp = netdev_priv(dev);
6428
6429         cancel_work_sync(&bp->reset_task);
6430
6431         bnx2_disable_int_sync(bp);
6432         bnx2_napi_disable(bp);
6433         del_timer_sync(&bp->timer);
6434         bnx2_shutdown_chip(bp);
6435         bnx2_free_irq(bp);
6436         bnx2_free_skbs(bp);
6437         bnx2_free_mem(bp);
6438         bp->link_up = 0;
6439         netif_carrier_off(bp->dev);
6440         bnx2_set_power_state(bp, PCI_D3hot);
6441         return 0;
6442 }
6443
6444 #define GET_NET_STATS64(ctr)                                    \
6445         (unsigned long) ((unsigned long) (ctr##_hi) << 32) +    \
6446         (unsigned long) (ctr##_lo)
6447
6448 #define GET_NET_STATS32(ctr)            \
6449         (ctr##_lo)
6450
6451 #if (BITS_PER_LONG == 64)
6452 #define GET_NET_STATS   GET_NET_STATS64
6453 #else
6454 #define GET_NET_STATS   GET_NET_STATS32
6455 #endif
6456
6457 static struct net_device_stats *
6458 bnx2_get_stats(struct net_device *dev)
6459 {
6460         struct bnx2 *bp = netdev_priv(dev);
6461         struct statistics_block *stats_blk = bp->stats_blk;
6462         struct net_device_stats *net_stats = &dev->stats;
6463
6464         if (bp->stats_blk == NULL) {
6465                 return net_stats;
6466         }
6467         net_stats->rx_packets =
6468                 GET_NET_STATS(stats_blk->stat_IfHCInUcastPkts) +
6469                 GET_NET_STATS(stats_blk->stat_IfHCInMulticastPkts) +
6470                 GET_NET_STATS(stats_blk->stat_IfHCInBroadcastPkts);
6471
6472         net_stats->tx_packets =
6473                 GET_NET_STATS(stats_blk->stat_IfHCOutUcastPkts) +
6474                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts) +
6475                 GET_NET_STATS(stats_blk->stat_IfHCOutBroadcastPkts);
6476
6477         net_stats->rx_bytes =
6478                 GET_NET_STATS(stats_blk->stat_IfHCInOctets);
6479
6480         net_stats->tx_bytes =
6481                 GET_NET_STATS(stats_blk->stat_IfHCOutOctets);
6482
6483         net_stats->multicast =
6484                 GET_NET_STATS(stats_blk->stat_IfHCOutMulticastPkts);
6485
6486         net_stats->collisions =
6487                 (unsigned long) stats_blk->stat_EtherStatsCollisions;
6488
6489         net_stats->rx_length_errors =
6490                 (unsigned long) (stats_blk->stat_EtherStatsUndersizePkts +
6491                 stats_blk->stat_EtherStatsOverrsizePkts);
6492
6493         net_stats->rx_over_errors =
6494                 (unsigned long) (stats_blk->stat_IfInFTQDiscards +
6495                 stats_blk->stat_IfInMBUFDiscards);
6496
6497         net_stats->rx_frame_errors =
6498                 (unsigned long) stats_blk->stat_Dot3StatsAlignmentErrors;
6499
6500         net_stats->rx_crc_errors =
6501                 (unsigned long) stats_blk->stat_Dot3StatsFCSErrors;
6502
6503         net_stats->rx_errors = net_stats->rx_length_errors +
6504                 net_stats->rx_over_errors + net_stats->rx_frame_errors +
6505                 net_stats->rx_crc_errors;
6506
6507         net_stats->tx_aborted_errors =
6508                 (unsigned long) (stats_blk->stat_Dot3StatsExcessiveCollisions +
6509                 stats_blk->stat_Dot3StatsLateCollisions);
6510
6511         if ((CHIP_NUM(bp) == CHIP_NUM_5706) ||
6512             (CHIP_ID(bp) == CHIP_ID_5708_A0))
6513                 net_stats->tx_carrier_errors = 0;
6514         else {
6515                 net_stats->tx_carrier_errors =
6516                         (unsigned long)
6517                         stats_blk->stat_Dot3StatsCarrierSenseErrors;
6518         }
6519
6520         net_stats->tx_errors =
6521                 (unsigned long)
6522                 stats_blk->stat_emac_tx_stat_dot3statsinternalmactransmiterrors
6523                 +
6524                 net_stats->tx_aborted_errors +
6525                 net_stats->tx_carrier_errors;
6526
6527         net_stats->rx_missed_errors =
6528                 (unsigned long) (stats_blk->stat_IfInFTQDiscards +
6529                 stats_blk->stat_IfInMBUFDiscards + stats_blk->stat_FwRxDrop);
6530
6531         return net_stats;
6532 }
6533
6534 /* All ethtool functions called with rtnl_lock */
6535
6536 static int
6537 bnx2_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6538 {
6539         struct bnx2 *bp = netdev_priv(dev);
6540         int support_serdes = 0, support_copper = 0;
6541
6542         cmd->supported = SUPPORTED_Autoneg;
6543         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6544                 support_serdes = 1;
6545                 support_copper = 1;
6546         } else if (bp->phy_port == PORT_FIBRE)
6547                 support_serdes = 1;
6548         else
6549                 support_copper = 1;
6550
6551         if (support_serdes) {
6552                 cmd->supported |= SUPPORTED_1000baseT_Full |
6553                         SUPPORTED_FIBRE;
6554                 if (bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE)
6555                         cmd->supported |= SUPPORTED_2500baseX_Full;
6556
6557         }
6558         if (support_copper) {
6559                 cmd->supported |= SUPPORTED_10baseT_Half |
6560                         SUPPORTED_10baseT_Full |
6561                         SUPPORTED_100baseT_Half |
6562                         SUPPORTED_100baseT_Full |
6563                         SUPPORTED_1000baseT_Full |
6564                         SUPPORTED_TP;
6565
6566         }
6567
6568         spin_lock_bh(&bp->phy_lock);
6569         cmd->port = bp->phy_port;
6570         cmd->advertising = bp->advertising;
6571
6572         if (bp->autoneg & AUTONEG_SPEED) {
6573                 cmd->autoneg = AUTONEG_ENABLE;
6574         }
6575         else {
6576                 cmd->autoneg = AUTONEG_DISABLE;
6577         }
6578
6579         if (netif_carrier_ok(dev)) {
6580                 cmd->speed = bp->line_speed;
6581                 cmd->duplex = bp->duplex;
6582         }
6583         else {
6584                 cmd->speed = -1;
6585                 cmd->duplex = -1;
6586         }
6587         spin_unlock_bh(&bp->phy_lock);
6588
6589         cmd->transceiver = XCVR_INTERNAL;
6590         cmd->phy_address = bp->phy_addr;
6591
6592         return 0;
6593 }
6594
6595 static int
6596 bnx2_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
6597 {
6598         struct bnx2 *bp = netdev_priv(dev);
6599         u8 autoneg = bp->autoneg;
6600         u8 req_duplex = bp->req_duplex;
6601         u16 req_line_speed = bp->req_line_speed;
6602         u32 advertising = bp->advertising;
6603         int err = -EINVAL;
6604
6605         spin_lock_bh(&bp->phy_lock);
6606
6607         if (cmd->port != PORT_TP && cmd->port != PORT_FIBRE)
6608                 goto err_out_unlock;
6609
6610         if (cmd->port != bp->phy_port &&
6611             !(bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP))
6612                 goto err_out_unlock;
6613
6614         /* If device is down, we can store the settings only if the user
6615          * is setting the currently active port.
6616          */
6617         if (!netif_running(dev) && cmd->port != bp->phy_port)
6618                 goto err_out_unlock;
6619
6620         if (cmd->autoneg == AUTONEG_ENABLE) {
6621                 autoneg |= AUTONEG_SPEED;
6622
6623                 cmd->advertising &= ETHTOOL_ALL_COPPER_SPEED;
6624
6625                 /* allow advertising 1 speed */
6626                 if ((cmd->advertising == ADVERTISED_10baseT_Half) ||
6627                         (cmd->advertising == ADVERTISED_10baseT_Full) ||
6628                         (cmd->advertising == ADVERTISED_100baseT_Half) ||
6629                         (cmd->advertising == ADVERTISED_100baseT_Full)) {
6630
6631                         if (cmd->port == PORT_FIBRE)
6632                                 goto err_out_unlock;
6633
6634                         advertising = cmd->advertising;
6635
6636                 } else if (cmd->advertising == ADVERTISED_2500baseX_Full) {
6637                         if (!(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE) ||
6638                             (cmd->port == PORT_TP))
6639                                 goto err_out_unlock;
6640                 } else if (cmd->advertising == ADVERTISED_1000baseT_Full)
6641                         advertising = cmd->advertising;
6642                 else if (cmd->advertising == ADVERTISED_1000baseT_Half)
6643                         goto err_out_unlock;
6644                 else {
6645                         if (cmd->port == PORT_FIBRE)
6646                                 advertising = ETHTOOL_ALL_FIBRE_SPEED;
6647                         else
6648                                 advertising = ETHTOOL_ALL_COPPER_SPEED;
6649                 }
6650                 advertising |= ADVERTISED_Autoneg;
6651         }
6652         else {
6653                 if (cmd->port == PORT_FIBRE) {
6654                         if ((cmd->speed != SPEED_1000 &&
6655                              cmd->speed != SPEED_2500) ||
6656                             (cmd->duplex != DUPLEX_FULL))
6657                                 goto err_out_unlock;
6658
6659                         if (cmd->speed == SPEED_2500 &&
6660                             !(bp->phy_flags & BNX2_PHY_FLAG_2_5G_CAPABLE))
6661                                 goto err_out_unlock;
6662                 }
6663                 else if (cmd->speed == SPEED_1000 || cmd->speed == SPEED_2500)
6664                         goto err_out_unlock;
6665
6666                 autoneg &= ~AUTONEG_SPEED;
6667                 req_line_speed = cmd->speed;
6668                 req_duplex = cmd->duplex;
6669                 advertising = 0;
6670         }
6671
6672         bp->autoneg = autoneg;
6673         bp->advertising = advertising;
6674         bp->req_line_speed = req_line_speed;
6675         bp->req_duplex = req_duplex;
6676
6677         err = 0;
6678         /* If device is down, the new settings will be picked up when it is
6679          * brought up.
6680          */
6681         if (netif_running(dev))
6682                 err = bnx2_setup_phy(bp, cmd->port);
6683
6684 err_out_unlock:
6685         spin_unlock_bh(&bp->phy_lock);
6686
6687         return err;
6688 }
6689
6690 static void
6691 bnx2_get_drvinfo(struct net_device *dev, struct ethtool_drvinfo *info)
6692 {
6693         struct bnx2 *bp = netdev_priv(dev);
6694
6695         strcpy(info->driver, DRV_MODULE_NAME);
6696         strcpy(info->version, DRV_MODULE_VERSION);
6697         strcpy(info->bus_info, pci_name(bp->pdev));
6698         strcpy(info->fw_version, bp->fw_version);
6699 }
6700
6701 #define BNX2_REGDUMP_LEN                (32 * 1024)
6702
6703 static int
6704 bnx2_get_regs_len(struct net_device *dev)
6705 {
6706         return BNX2_REGDUMP_LEN;
6707 }
6708
6709 static void
6710 bnx2_get_regs(struct net_device *dev, struct ethtool_regs *regs, void *_p)
6711 {
6712         u32 *p = _p, i, offset;
6713         u8 *orig_p = _p;
6714         struct bnx2 *bp = netdev_priv(dev);
6715         u32 reg_boundaries[] = { 0x0000, 0x0098, 0x0400, 0x045c,
6716                                  0x0800, 0x0880, 0x0c00, 0x0c10,
6717                                  0x0c30, 0x0d08, 0x1000, 0x101c,
6718                                  0x1040, 0x1048, 0x1080, 0x10a4,
6719                                  0x1400, 0x1490, 0x1498, 0x14f0,
6720                                  0x1500, 0x155c, 0x1580, 0x15dc,
6721                                  0x1600, 0x1658, 0x1680, 0x16d8,
6722                                  0x1800, 0x1820, 0x1840, 0x1854,
6723                                  0x1880, 0x1894, 0x1900, 0x1984,
6724                                  0x1c00, 0x1c0c, 0x1c40, 0x1c54,
6725                                  0x1c80, 0x1c94, 0x1d00, 0x1d84,
6726                                  0x2000, 0x2030, 0x23c0, 0x2400,
6727                                  0x2800, 0x2820, 0x2830, 0x2850,
6728                                  0x2b40, 0x2c10, 0x2fc0, 0x3058,
6729                                  0x3c00, 0x3c94, 0x4000, 0x4010,
6730                                  0x4080, 0x4090, 0x43c0, 0x4458,
6731                                  0x4c00, 0x4c18, 0x4c40, 0x4c54,
6732                                  0x4fc0, 0x5010, 0x53c0, 0x5444,
6733                                  0x5c00, 0x5c18, 0x5c80, 0x5c90,
6734                                  0x5fc0, 0x6000, 0x6400, 0x6428,
6735                                  0x6800, 0x6848, 0x684c, 0x6860,
6736                                  0x6888, 0x6910, 0x8000 };
6737
6738         regs->version = 0;
6739
6740         memset(p, 0, BNX2_REGDUMP_LEN);
6741
6742         if (!netif_running(bp->dev))
6743                 return;
6744
6745         i = 0;
6746         offset = reg_boundaries[0];
6747         p += offset;
6748         while (offset < BNX2_REGDUMP_LEN) {
6749                 *p++ = REG_RD(bp, offset);
6750                 offset += 4;
6751                 if (offset == reg_boundaries[i + 1]) {
6752                         offset = reg_boundaries[i + 2];
6753                         p = (u32 *) (orig_p + offset);
6754                         i += 2;
6755                 }
6756         }
6757 }
6758
6759 static void
6760 bnx2_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6761 {
6762         struct bnx2 *bp = netdev_priv(dev);
6763
6764         if (bp->flags & BNX2_FLAG_NO_WOL) {
6765                 wol->supported = 0;
6766                 wol->wolopts = 0;
6767         }
6768         else {
6769                 wol->supported = WAKE_MAGIC;
6770                 if (bp->wol)
6771                         wol->wolopts = WAKE_MAGIC;
6772                 else
6773                         wol->wolopts = 0;
6774         }
6775         memset(&wol->sopass, 0, sizeof(wol->sopass));
6776 }
6777
6778 static int
6779 bnx2_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
6780 {
6781         struct bnx2 *bp = netdev_priv(dev);
6782
6783         if (wol->wolopts & ~WAKE_MAGIC)
6784                 return -EINVAL;
6785
6786         if (wol->wolopts & WAKE_MAGIC) {
6787                 if (bp->flags & BNX2_FLAG_NO_WOL)
6788                         return -EINVAL;
6789
6790                 bp->wol = 1;
6791         }
6792         else {
6793                 bp->wol = 0;
6794         }
6795         return 0;
6796 }
6797
6798 static int
6799 bnx2_nway_reset(struct net_device *dev)
6800 {
6801         struct bnx2 *bp = netdev_priv(dev);
6802         u32 bmcr;
6803
6804         if (!netif_running(dev))
6805                 return -EAGAIN;
6806
6807         if (!(bp->autoneg & AUTONEG_SPEED)) {
6808                 return -EINVAL;
6809         }
6810
6811         spin_lock_bh(&bp->phy_lock);
6812
6813         if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP) {
6814                 int rc;
6815
6816                 rc = bnx2_setup_remote_phy(bp, bp->phy_port);
6817                 spin_unlock_bh(&bp->phy_lock);
6818                 return rc;
6819         }
6820
6821         /* Force a link down visible on the other side */
6822         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
6823                 bnx2_write_phy(bp, bp->mii_bmcr, BMCR_LOOPBACK);
6824                 spin_unlock_bh(&bp->phy_lock);
6825
6826                 msleep(20);
6827
6828                 spin_lock_bh(&bp->phy_lock);
6829
6830                 bp->current_interval = BNX2_SERDES_AN_TIMEOUT;
6831                 bp->serdes_an_pending = 1;
6832                 mod_timer(&bp->timer, jiffies + bp->current_interval);
6833         }
6834
6835         bnx2_read_phy(bp, bp->mii_bmcr, &bmcr);
6836         bmcr &= ~BMCR_LOOPBACK;
6837         bnx2_write_phy(bp, bp->mii_bmcr, bmcr | BMCR_ANRESTART | BMCR_ANENABLE);
6838
6839         spin_unlock_bh(&bp->phy_lock);
6840
6841         return 0;
6842 }
6843
6844 static u32
6845 bnx2_get_link(struct net_device *dev)
6846 {
6847         struct bnx2 *bp = netdev_priv(dev);
6848
6849         return bp->link_up;
6850 }
6851
6852 static int
6853 bnx2_get_eeprom_len(struct net_device *dev)
6854 {
6855         struct bnx2 *bp = netdev_priv(dev);
6856
6857         if (bp->flash_info == NULL)
6858                 return 0;
6859
6860         return (int) bp->flash_size;
6861 }
6862
6863 static int
6864 bnx2_get_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6865                 u8 *eebuf)
6866 {
6867         struct bnx2 *bp = netdev_priv(dev);
6868         int rc;
6869
6870         if (!netif_running(dev))
6871                 return -EAGAIN;
6872
6873         /* parameters already validated in ethtool_get_eeprom */
6874
6875         rc = bnx2_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
6876
6877         return rc;
6878 }
6879
6880 static int
6881 bnx2_set_eeprom(struct net_device *dev, struct ethtool_eeprom *eeprom,
6882                 u8 *eebuf)
6883 {
6884         struct bnx2 *bp = netdev_priv(dev);
6885         int rc;
6886
6887         if (!netif_running(dev))
6888                 return -EAGAIN;
6889
6890         /* parameters already validated in ethtool_set_eeprom */
6891
6892         rc = bnx2_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
6893
6894         return rc;
6895 }
6896
6897 static int
6898 bnx2_get_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6899 {
6900         struct bnx2 *bp = netdev_priv(dev);
6901
6902         memset(coal, 0, sizeof(struct ethtool_coalesce));
6903
6904         coal->rx_coalesce_usecs = bp->rx_ticks;
6905         coal->rx_max_coalesced_frames = bp->rx_quick_cons_trip;
6906         coal->rx_coalesce_usecs_irq = bp->rx_ticks_int;
6907         coal->rx_max_coalesced_frames_irq = bp->rx_quick_cons_trip_int;
6908
6909         coal->tx_coalesce_usecs = bp->tx_ticks;
6910         coal->tx_max_coalesced_frames = bp->tx_quick_cons_trip;
6911         coal->tx_coalesce_usecs_irq = bp->tx_ticks_int;
6912         coal->tx_max_coalesced_frames_irq = bp->tx_quick_cons_trip_int;
6913
6914         coal->stats_block_coalesce_usecs = bp->stats_ticks;
6915
6916         return 0;
6917 }
6918
6919 static int
6920 bnx2_set_coalesce(struct net_device *dev, struct ethtool_coalesce *coal)
6921 {
6922         struct bnx2 *bp = netdev_priv(dev);
6923
6924         bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
6925         if (bp->rx_ticks > 0x3ff) bp->rx_ticks = 0x3ff;
6926
6927         bp->rx_quick_cons_trip = (u16) coal->rx_max_coalesced_frames;
6928         if (bp->rx_quick_cons_trip > 0xff) bp->rx_quick_cons_trip = 0xff;
6929
6930         bp->rx_ticks_int = (u16) coal->rx_coalesce_usecs_irq;
6931         if (bp->rx_ticks_int > 0x3ff) bp->rx_ticks_int = 0x3ff;
6932
6933         bp->rx_quick_cons_trip_int = (u16) coal->rx_max_coalesced_frames_irq;
6934         if (bp->rx_quick_cons_trip_int > 0xff)
6935                 bp->rx_quick_cons_trip_int = 0xff;
6936
6937         bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
6938         if (bp->tx_ticks > 0x3ff) bp->tx_ticks = 0x3ff;
6939
6940         bp->tx_quick_cons_trip = (u16) coal->tx_max_coalesced_frames;
6941         if (bp->tx_quick_cons_trip > 0xff) bp->tx_quick_cons_trip = 0xff;
6942
6943         bp->tx_ticks_int = (u16) coal->tx_coalesce_usecs_irq;
6944         if (bp->tx_ticks_int > 0x3ff) bp->tx_ticks_int = 0x3ff;
6945
6946         bp->tx_quick_cons_trip_int = (u16) coal->tx_max_coalesced_frames_irq;
6947         if (bp->tx_quick_cons_trip_int > 0xff) bp->tx_quick_cons_trip_int =
6948                 0xff;
6949
6950         bp->stats_ticks = coal->stats_block_coalesce_usecs;
6951         if (bp->flags & BNX2_FLAG_BROKEN_STATS) {
6952                 if (bp->stats_ticks != 0 && bp->stats_ticks != USEC_PER_SEC)
6953                         bp->stats_ticks = USEC_PER_SEC;
6954         }
6955         if (bp->stats_ticks > BNX2_HC_STATS_TICKS_HC_STAT_TICKS)
6956                 bp->stats_ticks = BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6957         bp->stats_ticks &= BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
6958
6959         if (netif_running(bp->dev)) {
6960                 bnx2_netif_stop(bp);
6961                 bnx2_init_nic(bp, 0);
6962                 bnx2_netif_start(bp);
6963         }
6964
6965         return 0;
6966 }
6967
6968 static void
6969 bnx2_get_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
6970 {
6971         struct bnx2 *bp = netdev_priv(dev);
6972
6973         ering->rx_max_pending = MAX_TOTAL_RX_DESC_CNT;
6974         ering->rx_mini_max_pending = 0;
6975         ering->rx_jumbo_max_pending = MAX_TOTAL_RX_PG_DESC_CNT;
6976
6977         ering->rx_pending = bp->rx_ring_size;
6978         ering->rx_mini_pending = 0;
6979         ering->rx_jumbo_pending = bp->rx_pg_ring_size;
6980
6981         ering->tx_max_pending = MAX_TX_DESC_CNT;
6982         ering->tx_pending = bp->tx_ring_size;
6983 }
6984
6985 static int
6986 bnx2_change_ring_size(struct bnx2 *bp, u32 rx, u32 tx)
6987 {
6988         if (netif_running(bp->dev)) {
6989                 bnx2_netif_stop(bp);
6990                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_RESET);
6991                 bnx2_free_skbs(bp);
6992                 bnx2_free_mem(bp);
6993         }
6994
6995         bnx2_set_rx_ring_size(bp, rx);
6996         bp->tx_ring_size = tx;
6997
6998         if (netif_running(bp->dev)) {
6999                 int rc;
7000
7001                 rc = bnx2_alloc_mem(bp);
7002                 if (!rc)
7003                         rc = bnx2_init_nic(bp, 0);
7004
7005                 if (rc) {
7006                         bnx2_napi_enable(bp);
7007                         dev_close(bp->dev);
7008                         return rc;
7009                 }
7010                 bnx2_netif_start(bp);
7011         }
7012         return 0;
7013 }
7014
7015 static int
7016 bnx2_set_ringparam(struct net_device *dev, struct ethtool_ringparam *ering)
7017 {
7018         struct bnx2 *bp = netdev_priv(dev);
7019         int rc;
7020
7021         if ((ering->rx_pending > MAX_TOTAL_RX_DESC_CNT) ||
7022                 (ering->tx_pending > MAX_TX_DESC_CNT) ||
7023                 (ering->tx_pending <= MAX_SKB_FRAGS)) {
7024
7025                 return -EINVAL;
7026         }
7027         rc = bnx2_change_ring_size(bp, ering->rx_pending, ering->tx_pending);
7028         return rc;
7029 }
7030
7031 static void
7032 bnx2_get_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7033 {
7034         struct bnx2 *bp = netdev_priv(dev);
7035
7036         epause->autoneg = ((bp->autoneg & AUTONEG_FLOW_CTRL) != 0);
7037         epause->rx_pause = ((bp->flow_ctrl & FLOW_CTRL_RX) != 0);
7038         epause->tx_pause = ((bp->flow_ctrl & FLOW_CTRL_TX) != 0);
7039 }
7040
7041 static int
7042 bnx2_set_pauseparam(struct net_device *dev, struct ethtool_pauseparam *epause)
7043 {
7044         struct bnx2 *bp = netdev_priv(dev);
7045
7046         bp->req_flow_ctrl = 0;
7047         if (epause->rx_pause)
7048                 bp->req_flow_ctrl |= FLOW_CTRL_RX;
7049         if (epause->tx_pause)
7050                 bp->req_flow_ctrl |= FLOW_CTRL_TX;
7051
7052         if (epause->autoneg) {
7053                 bp->autoneg |= AUTONEG_FLOW_CTRL;
7054         }
7055         else {
7056                 bp->autoneg &= ~AUTONEG_FLOW_CTRL;
7057         }
7058
7059         if (netif_running(dev)) {
7060                 spin_lock_bh(&bp->phy_lock);
7061                 bnx2_setup_phy(bp, bp->phy_port);
7062                 spin_unlock_bh(&bp->phy_lock);
7063         }
7064
7065         return 0;
7066 }
7067
7068 static u32
7069 bnx2_get_rx_csum(struct net_device *dev)
7070 {
7071         struct bnx2 *bp = netdev_priv(dev);
7072
7073         return bp->rx_csum;
7074 }
7075
7076 static int
7077 bnx2_set_rx_csum(struct net_device *dev, u32 data)
7078 {
7079         struct bnx2 *bp = netdev_priv(dev);
7080
7081         bp->rx_csum = data;
7082         return 0;
7083 }
7084
7085 static int
7086 bnx2_set_tso(struct net_device *dev, u32 data)
7087 {
7088         struct bnx2 *bp = netdev_priv(dev);
7089
7090         if (data) {
7091                 dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
7092                 if (CHIP_NUM(bp) == CHIP_NUM_5709)
7093                         dev->features |= NETIF_F_TSO6;
7094         } else
7095                 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
7096                                    NETIF_F_TSO_ECN);
7097         return 0;
7098 }
7099
7100 static struct {
7101         char string[ETH_GSTRING_LEN];
7102 } bnx2_stats_str_arr[] = {
7103         { "rx_bytes" },
7104         { "rx_error_bytes" },
7105         { "tx_bytes" },
7106         { "tx_error_bytes" },
7107         { "rx_ucast_packets" },
7108         { "rx_mcast_packets" },
7109         { "rx_bcast_packets" },
7110         { "tx_ucast_packets" },
7111         { "tx_mcast_packets" },
7112         { "tx_bcast_packets" },
7113         { "tx_mac_errors" },
7114         { "tx_carrier_errors" },
7115         { "rx_crc_errors" },
7116         { "rx_align_errors" },
7117         { "tx_single_collisions" },
7118         { "tx_multi_collisions" },
7119         { "tx_deferred" },
7120         { "tx_excess_collisions" },
7121         { "tx_late_collisions" },
7122         { "tx_total_collisions" },
7123         { "rx_fragments" },
7124         { "rx_jabbers" },
7125         { "rx_undersize_packets" },
7126         { "rx_oversize_packets" },
7127         { "rx_64_byte_packets" },
7128         { "rx_65_to_127_byte_packets" },
7129         { "rx_128_to_255_byte_packets" },
7130         { "rx_256_to_511_byte_packets" },
7131         { "rx_512_to_1023_byte_packets" },
7132         { "rx_1024_to_1522_byte_packets" },
7133         { "rx_1523_to_9022_byte_packets" },
7134         { "tx_64_byte_packets" },
7135         { "tx_65_to_127_byte_packets" },
7136         { "tx_128_to_255_byte_packets" },
7137         { "tx_256_to_511_byte_packets" },
7138         { "tx_512_to_1023_byte_packets" },
7139         { "tx_1024_to_1522_byte_packets" },
7140         { "tx_1523_to_9022_byte_packets" },
7141         { "rx_xon_frames" },
7142         { "rx_xoff_frames" },
7143         { "tx_xon_frames" },
7144         { "tx_xoff_frames" },
7145         { "rx_mac_ctrl_frames" },
7146         { "rx_filtered_packets" },
7147         { "rx_ftq_discards" },
7148         { "rx_discards" },
7149         { "rx_fw_discards" },
7150 };
7151
7152 #define BNX2_NUM_STATS (sizeof(bnx2_stats_str_arr)/\
7153                         sizeof(bnx2_stats_str_arr[0]))
7154
7155 #define STATS_OFFSET32(offset_name) (offsetof(struct statistics_block, offset_name) / 4)
7156
7157 static const unsigned long bnx2_stats_offset_arr[BNX2_NUM_STATS] = {
7158     STATS_OFFSET32(stat_IfHCInOctets_hi),
7159     STATS_OFFSET32(stat_IfHCInBadOctets_hi),
7160     STATS_OFFSET32(stat_IfHCOutOctets_hi),
7161     STATS_OFFSET32(stat_IfHCOutBadOctets_hi),
7162     STATS_OFFSET32(stat_IfHCInUcastPkts_hi),
7163     STATS_OFFSET32(stat_IfHCInMulticastPkts_hi),
7164     STATS_OFFSET32(stat_IfHCInBroadcastPkts_hi),
7165     STATS_OFFSET32(stat_IfHCOutUcastPkts_hi),
7166     STATS_OFFSET32(stat_IfHCOutMulticastPkts_hi),
7167     STATS_OFFSET32(stat_IfHCOutBroadcastPkts_hi),
7168     STATS_OFFSET32(stat_emac_tx_stat_dot3statsinternalmactransmiterrors),
7169     STATS_OFFSET32(stat_Dot3StatsCarrierSenseErrors),
7170     STATS_OFFSET32(stat_Dot3StatsFCSErrors),
7171     STATS_OFFSET32(stat_Dot3StatsAlignmentErrors),
7172     STATS_OFFSET32(stat_Dot3StatsSingleCollisionFrames),
7173     STATS_OFFSET32(stat_Dot3StatsMultipleCollisionFrames),
7174     STATS_OFFSET32(stat_Dot3StatsDeferredTransmissions),
7175     STATS_OFFSET32(stat_Dot3StatsExcessiveCollisions),
7176     STATS_OFFSET32(stat_Dot3StatsLateCollisions),
7177     STATS_OFFSET32(stat_EtherStatsCollisions),
7178     STATS_OFFSET32(stat_EtherStatsFragments),
7179     STATS_OFFSET32(stat_EtherStatsJabbers),
7180     STATS_OFFSET32(stat_EtherStatsUndersizePkts),
7181     STATS_OFFSET32(stat_EtherStatsOverrsizePkts),
7182     STATS_OFFSET32(stat_EtherStatsPktsRx64Octets),
7183     STATS_OFFSET32(stat_EtherStatsPktsRx65Octetsto127Octets),
7184     STATS_OFFSET32(stat_EtherStatsPktsRx128Octetsto255Octets),
7185     STATS_OFFSET32(stat_EtherStatsPktsRx256Octetsto511Octets),
7186     STATS_OFFSET32(stat_EtherStatsPktsRx512Octetsto1023Octets),
7187     STATS_OFFSET32(stat_EtherStatsPktsRx1024Octetsto1522Octets),
7188     STATS_OFFSET32(stat_EtherStatsPktsRx1523Octetsto9022Octets),
7189     STATS_OFFSET32(stat_EtherStatsPktsTx64Octets),
7190     STATS_OFFSET32(stat_EtherStatsPktsTx65Octetsto127Octets),
7191     STATS_OFFSET32(stat_EtherStatsPktsTx128Octetsto255Octets),
7192     STATS_OFFSET32(stat_EtherStatsPktsTx256Octetsto511Octets),
7193     STATS_OFFSET32(stat_EtherStatsPktsTx512Octetsto1023Octets),
7194     STATS_OFFSET32(stat_EtherStatsPktsTx1024Octetsto1522Octets),
7195     STATS_OFFSET32(stat_EtherStatsPktsTx1523Octetsto9022Octets),
7196     STATS_OFFSET32(stat_XonPauseFramesReceived),
7197     STATS_OFFSET32(stat_XoffPauseFramesReceived),
7198     STATS_OFFSET32(stat_OutXonSent),
7199     STATS_OFFSET32(stat_OutXoffSent),
7200     STATS_OFFSET32(stat_MacControlFramesReceived),
7201     STATS_OFFSET32(stat_IfInFramesL2FilterDiscards),
7202     STATS_OFFSET32(stat_IfInFTQDiscards),
7203     STATS_OFFSET32(stat_IfInMBUFDiscards),
7204     STATS_OFFSET32(stat_FwRxDrop),
7205 };
7206
7207 /* stat_IfHCInBadOctets and stat_Dot3StatsCarrierSenseErrors are
7208  * skipped because of errata.
7209  */
7210 static u8 bnx2_5706_stats_len_arr[BNX2_NUM_STATS] = {
7211         8,0,8,8,8,8,8,8,8,8,
7212         4,0,4,4,4,4,4,4,4,4,
7213         4,4,4,4,4,4,4,4,4,4,
7214         4,4,4,4,4,4,4,4,4,4,
7215         4,4,4,4,4,4,4,
7216 };
7217
7218 static u8 bnx2_5708_stats_len_arr[BNX2_NUM_STATS] = {
7219         8,0,8,8,8,8,8,8,8,8,
7220         4,4,4,4,4,4,4,4,4,4,
7221         4,4,4,4,4,4,4,4,4,4,
7222         4,4,4,4,4,4,4,4,4,4,
7223         4,4,4,4,4,4,4,
7224 };
7225
7226 #define BNX2_NUM_TESTS 6
7227
7228 static struct {
7229         char string[ETH_GSTRING_LEN];
7230 } bnx2_tests_str_arr[BNX2_NUM_TESTS] = {
7231         { "register_test (offline)" },
7232         { "memory_test (offline)" },
7233         { "loopback_test (offline)" },
7234         { "nvram_test (online)" },
7235         { "interrupt_test (online)" },
7236         { "link_test (online)" },
7237 };
7238
7239 static int
7240 bnx2_get_sset_count(struct net_device *dev, int sset)
7241 {
7242         switch (sset) {
7243         case ETH_SS_TEST:
7244                 return BNX2_NUM_TESTS;
7245         case ETH_SS_STATS:
7246                 return BNX2_NUM_STATS;
7247         default:
7248                 return -EOPNOTSUPP;
7249         }
7250 }
7251
7252 static void
7253 bnx2_self_test(struct net_device *dev, struct ethtool_test *etest, u64 *buf)
7254 {
7255         struct bnx2 *bp = netdev_priv(dev);
7256
7257         bnx2_set_power_state(bp, PCI_D0);
7258
7259         memset(buf, 0, sizeof(u64) * BNX2_NUM_TESTS);
7260         if (etest->flags & ETH_TEST_FL_OFFLINE) {
7261                 int i;
7262
7263                 bnx2_netif_stop(bp);
7264                 bnx2_reset_chip(bp, BNX2_DRV_MSG_CODE_DIAG);
7265                 bnx2_free_skbs(bp);
7266
7267                 if (bnx2_test_registers(bp) != 0) {
7268                         buf[0] = 1;
7269                         etest->flags |= ETH_TEST_FL_FAILED;
7270                 }
7271                 if (bnx2_test_memory(bp) != 0) {
7272                         buf[1] = 1;
7273                         etest->flags |= ETH_TEST_FL_FAILED;
7274                 }
7275                 if ((buf[2] = bnx2_test_loopback(bp)) != 0)
7276                         etest->flags |= ETH_TEST_FL_FAILED;
7277
7278                 if (!netif_running(bp->dev))
7279                         bnx2_shutdown_chip(bp);
7280                 else {
7281                         bnx2_init_nic(bp, 1);
7282                         bnx2_netif_start(bp);
7283                 }
7284
7285                 /* wait for link up */
7286                 for (i = 0; i < 7; i++) {
7287                         if (bp->link_up)
7288                                 break;
7289                         msleep_interruptible(1000);
7290                 }
7291         }
7292
7293         if (bnx2_test_nvram(bp) != 0) {
7294                 buf[3] = 1;
7295                 etest->flags |= ETH_TEST_FL_FAILED;
7296         }
7297         if (bnx2_test_intr(bp) != 0) {
7298                 buf[4] = 1;
7299                 etest->flags |= ETH_TEST_FL_FAILED;
7300         }
7301
7302         if (bnx2_test_link(bp) != 0) {
7303                 buf[5] = 1;
7304                 etest->flags |= ETH_TEST_FL_FAILED;
7305
7306         }
7307         if (!netif_running(bp->dev))
7308                 bnx2_set_power_state(bp, PCI_D3hot);
7309 }
7310
7311 static void
7312 bnx2_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
7313 {
7314         switch (stringset) {
7315         case ETH_SS_STATS:
7316                 memcpy(buf, bnx2_stats_str_arr,
7317                         sizeof(bnx2_stats_str_arr));
7318                 break;
7319         case ETH_SS_TEST:
7320                 memcpy(buf, bnx2_tests_str_arr,
7321                         sizeof(bnx2_tests_str_arr));
7322                 break;
7323         }
7324 }
7325
7326 static void
7327 bnx2_get_ethtool_stats(struct net_device *dev,
7328                 struct ethtool_stats *stats, u64 *buf)
7329 {
7330         struct bnx2 *bp = netdev_priv(dev);
7331         int i;
7332         u32 *hw_stats = (u32 *) bp->stats_blk;
7333         u8 *stats_len_arr = NULL;
7334
7335         if (hw_stats == NULL) {
7336                 memset(buf, 0, sizeof(u64) * BNX2_NUM_STATS);
7337                 return;
7338         }
7339
7340         if ((CHIP_ID(bp) == CHIP_ID_5706_A0) ||
7341             (CHIP_ID(bp) == CHIP_ID_5706_A1) ||
7342             (CHIP_ID(bp) == CHIP_ID_5706_A2) ||
7343             (CHIP_ID(bp) == CHIP_ID_5708_A0))
7344                 stats_len_arr = bnx2_5706_stats_len_arr;
7345         else
7346                 stats_len_arr = bnx2_5708_stats_len_arr;
7347
7348         for (i = 0; i < BNX2_NUM_STATS; i++) {
7349                 if (stats_len_arr[i] == 0) {
7350                         /* skip this counter */
7351                         buf[i] = 0;
7352                         continue;
7353                 }
7354                 if (stats_len_arr[i] == 4) {
7355                         /* 4-byte counter */
7356                         buf[i] = (u64)
7357                                 *(hw_stats + bnx2_stats_offset_arr[i]);
7358                         continue;
7359                 }
7360                 /* 8-byte counter */
7361                 buf[i] = (((u64) *(hw_stats +
7362                                         bnx2_stats_offset_arr[i])) << 32) +
7363                                 *(hw_stats + bnx2_stats_offset_arr[i] + 1);
7364         }
7365 }
7366
7367 static int
7368 bnx2_phys_id(struct net_device *dev, u32 data)
7369 {
7370         struct bnx2 *bp = netdev_priv(dev);
7371         int i;
7372         u32 save;
7373
7374         bnx2_set_power_state(bp, PCI_D0);
7375
7376         if (data == 0)
7377                 data = 2;
7378
7379         save = REG_RD(bp, BNX2_MISC_CFG);
7380         REG_WR(bp, BNX2_MISC_CFG, BNX2_MISC_CFG_LEDMODE_MAC);
7381
7382         for (i = 0; i < (data * 2); i++) {
7383                 if ((i % 2) == 0) {
7384                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE);
7385                 }
7386                 else {
7387                         REG_WR(bp, BNX2_EMAC_LED, BNX2_EMAC_LED_OVERRIDE |
7388                                 BNX2_EMAC_LED_1000MB_OVERRIDE |
7389                                 BNX2_EMAC_LED_100MB_OVERRIDE |
7390                                 BNX2_EMAC_LED_10MB_OVERRIDE |
7391                                 BNX2_EMAC_LED_TRAFFIC_OVERRIDE |
7392                                 BNX2_EMAC_LED_TRAFFIC);
7393                 }
7394                 msleep_interruptible(500);
7395                 if (signal_pending(current))
7396                         break;
7397         }
7398         REG_WR(bp, BNX2_EMAC_LED, 0);
7399         REG_WR(bp, BNX2_MISC_CFG, save);
7400
7401         if (!netif_running(dev))
7402                 bnx2_set_power_state(bp, PCI_D3hot);
7403
7404         return 0;
7405 }
7406
7407 static int
7408 bnx2_set_tx_csum(struct net_device *dev, u32 data)
7409 {
7410         struct bnx2 *bp = netdev_priv(dev);
7411
7412         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7413                 return (ethtool_op_set_tx_ipv6_csum(dev, data));
7414         else
7415                 return (ethtool_op_set_tx_csum(dev, data));
7416 }
7417
7418 static const struct ethtool_ops bnx2_ethtool_ops = {
7419         .get_settings           = bnx2_get_settings,
7420         .set_settings           = bnx2_set_settings,
7421         .get_drvinfo            = bnx2_get_drvinfo,
7422         .get_regs_len           = bnx2_get_regs_len,
7423         .get_regs               = bnx2_get_regs,
7424         .get_wol                = bnx2_get_wol,
7425         .set_wol                = bnx2_set_wol,
7426         .nway_reset             = bnx2_nway_reset,
7427         .get_link               = bnx2_get_link,
7428         .get_eeprom_len         = bnx2_get_eeprom_len,
7429         .get_eeprom             = bnx2_get_eeprom,
7430         .set_eeprom             = bnx2_set_eeprom,
7431         .get_coalesce           = bnx2_get_coalesce,
7432         .set_coalesce           = bnx2_set_coalesce,
7433         .get_ringparam          = bnx2_get_ringparam,
7434         .set_ringparam          = bnx2_set_ringparam,
7435         .get_pauseparam         = bnx2_get_pauseparam,
7436         .set_pauseparam         = bnx2_set_pauseparam,
7437         .get_rx_csum            = bnx2_get_rx_csum,
7438         .set_rx_csum            = bnx2_set_rx_csum,
7439         .set_tx_csum            = bnx2_set_tx_csum,
7440         .set_sg                 = ethtool_op_set_sg,
7441         .set_tso                = bnx2_set_tso,
7442         .self_test              = bnx2_self_test,
7443         .get_strings            = bnx2_get_strings,
7444         .phys_id                = bnx2_phys_id,
7445         .get_ethtool_stats      = bnx2_get_ethtool_stats,
7446         .get_sset_count         = bnx2_get_sset_count,
7447 };
7448
7449 /* Called with rtnl_lock */
7450 static int
7451 bnx2_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
7452 {
7453         struct mii_ioctl_data *data = if_mii(ifr);
7454         struct bnx2 *bp = netdev_priv(dev);
7455         int err;
7456
7457         switch(cmd) {
7458         case SIOCGMIIPHY:
7459                 data->phy_id = bp->phy_addr;
7460
7461                 /* fallthru */
7462         case SIOCGMIIREG: {
7463                 u32 mii_regval;
7464
7465                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7466                         return -EOPNOTSUPP;
7467
7468                 if (!netif_running(dev))
7469                         return -EAGAIN;
7470
7471                 spin_lock_bh(&bp->phy_lock);
7472                 err = bnx2_read_phy(bp, data->reg_num & 0x1f, &mii_regval);
7473                 spin_unlock_bh(&bp->phy_lock);
7474
7475                 data->val_out = mii_regval;
7476
7477                 return err;
7478         }
7479
7480         case SIOCSMIIREG:
7481                 if (!capable(CAP_NET_ADMIN))
7482                         return -EPERM;
7483
7484                 if (bp->phy_flags & BNX2_PHY_FLAG_REMOTE_PHY_CAP)
7485                         return -EOPNOTSUPP;
7486
7487                 if (!netif_running(dev))
7488                         return -EAGAIN;
7489
7490                 spin_lock_bh(&bp->phy_lock);
7491                 err = bnx2_write_phy(bp, data->reg_num & 0x1f, data->val_in);
7492                 spin_unlock_bh(&bp->phy_lock);
7493
7494                 return err;
7495
7496         default:
7497                 /* do nothing */
7498                 break;
7499         }
7500         return -EOPNOTSUPP;
7501 }
7502
7503 /* Called with rtnl_lock */
7504 static int
7505 bnx2_change_mac_addr(struct net_device *dev, void *p)
7506 {
7507         struct sockaddr *addr = p;
7508         struct bnx2 *bp = netdev_priv(dev);
7509
7510         if (!is_valid_ether_addr(addr->sa_data))
7511                 return -EINVAL;
7512
7513         memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
7514         if (netif_running(dev))
7515                 bnx2_set_mac_addr(bp, bp->dev->dev_addr, 0);
7516
7517         return 0;
7518 }
7519
7520 /* Called with rtnl_lock */
7521 static int
7522 bnx2_change_mtu(struct net_device *dev, int new_mtu)
7523 {
7524         struct bnx2 *bp = netdev_priv(dev);
7525
7526         if (((new_mtu + ETH_HLEN) > MAX_ETHERNET_JUMBO_PACKET_SIZE) ||
7527                 ((new_mtu + ETH_HLEN) < MIN_ETHERNET_PACKET_SIZE))
7528                 return -EINVAL;
7529
7530         dev->mtu = new_mtu;
7531         return (bnx2_change_ring_size(bp, bp->rx_ring_size, bp->tx_ring_size));
7532 }
7533
7534 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
7535 static void
7536 poll_bnx2(struct net_device *dev)
7537 {
7538         struct bnx2 *bp = netdev_priv(dev);
7539         int i;
7540
7541         for (i = 0; i < bp->irq_nvecs; i++) {
7542                 disable_irq(bp->irq_tbl[i].vector);
7543                 bnx2_interrupt(bp->irq_tbl[i].vector, &bp->bnx2_napi[i]);
7544                 enable_irq(bp->irq_tbl[i].vector);
7545         }
7546 }
7547 #endif
7548
7549 static void __devinit
7550 bnx2_get_5709_media(struct bnx2 *bp)
7551 {
7552         u32 val = REG_RD(bp, BNX2_MISC_DUAL_MEDIA_CTRL);
7553         u32 bond_id = val & BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID;
7554         u32 strap;
7555
7556         if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_C)
7557                 return;
7558         else if (bond_id == BNX2_MISC_DUAL_MEDIA_CTRL_BOND_ID_S) {
7559                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7560                 return;
7561         }
7562
7563         if (val & BNX2_MISC_DUAL_MEDIA_CTRL_STRAP_OVERRIDE)
7564                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL) >> 21;
7565         else
7566                 strap = (val & BNX2_MISC_DUAL_MEDIA_CTRL_PHY_CTRL_STRAP) >> 8;
7567
7568         if (PCI_FUNC(bp->pdev->devfn) == 0) {
7569                 switch (strap) {
7570                 case 0x4:
7571                 case 0x5:
7572                 case 0x6:
7573                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7574                         return;
7575                 }
7576         } else {
7577                 switch (strap) {
7578                 case 0x1:
7579                 case 0x2:
7580                 case 0x4:
7581                         bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7582                         return;
7583                 }
7584         }
7585 }
7586
7587 static void __devinit
7588 bnx2_get_pci_speed(struct bnx2 *bp)
7589 {
7590         u32 reg;
7591
7592         reg = REG_RD(bp, BNX2_PCICFG_MISC_STATUS);
7593         if (reg & BNX2_PCICFG_MISC_STATUS_PCIX_DET) {
7594                 u32 clkreg;
7595
7596                 bp->flags |= BNX2_FLAG_PCIX;
7597
7598                 clkreg = REG_RD(bp, BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS);
7599
7600                 clkreg &= BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET;
7601                 switch (clkreg) {
7602                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_133MHZ:
7603                         bp->bus_speed_mhz = 133;
7604                         break;
7605
7606                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_95MHZ:
7607                         bp->bus_speed_mhz = 100;
7608                         break;
7609
7610                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_66MHZ:
7611                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_80MHZ:
7612                         bp->bus_speed_mhz = 66;
7613                         break;
7614
7615                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_48MHZ:
7616                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_55MHZ:
7617                         bp->bus_speed_mhz = 50;
7618                         break;
7619
7620                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_LOW:
7621                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_32MHZ:
7622                 case BNX2_PCICFG_PCI_CLOCK_CONTROL_BITS_PCI_CLK_SPD_DET_38MHZ:
7623                         bp->bus_speed_mhz = 33;
7624                         break;
7625                 }
7626         }
7627         else {
7628                 if (reg & BNX2_PCICFG_MISC_STATUS_M66EN)
7629                         bp->bus_speed_mhz = 66;
7630                 else
7631                         bp->bus_speed_mhz = 33;
7632         }
7633
7634         if (reg & BNX2_PCICFG_MISC_STATUS_32BIT_DET)
7635                 bp->flags |= BNX2_FLAG_PCI_32BIT;
7636
7637 }
7638
7639 static int __devinit
7640 bnx2_init_board(struct pci_dev *pdev, struct net_device *dev)
7641 {
7642         struct bnx2 *bp;
7643         unsigned long mem_len;
7644         int rc, i, j;
7645         u32 reg;
7646         u64 dma_mask, persist_dma_mask;
7647
7648         SET_NETDEV_DEV(dev, &pdev->dev);
7649         bp = netdev_priv(dev);
7650
7651         bp->flags = 0;
7652         bp->phy_flags = 0;
7653
7654         /* enable device (incl. PCI PM wakeup), and bus-mastering */
7655         rc = pci_enable_device(pdev);
7656         if (rc) {
7657                 dev_err(&pdev->dev, "Cannot enable PCI device, aborting.\n");
7658                 goto err_out;
7659         }
7660
7661         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
7662                 dev_err(&pdev->dev,
7663                         "Cannot find PCI device base address, aborting.\n");
7664                 rc = -ENODEV;
7665                 goto err_out_disable;
7666         }
7667
7668         rc = pci_request_regions(pdev, DRV_MODULE_NAME);
7669         if (rc) {
7670                 dev_err(&pdev->dev, "Cannot obtain PCI resources, aborting.\n");
7671                 goto err_out_disable;
7672         }
7673
7674         pci_set_master(pdev);
7675         pci_save_state(pdev);
7676
7677         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
7678         if (bp->pm_cap == 0) {
7679                 dev_err(&pdev->dev,
7680                         "Cannot find power management capability, aborting.\n");
7681                 rc = -EIO;
7682                 goto err_out_release;
7683         }
7684
7685         bp->dev = dev;
7686         bp->pdev = pdev;
7687
7688         spin_lock_init(&bp->phy_lock);
7689         spin_lock_init(&bp->indirect_lock);
7690         INIT_WORK(&bp->reset_task, bnx2_reset_task);
7691
7692         dev->base_addr = dev->mem_start = pci_resource_start(pdev, 0);
7693         mem_len = MB_GET_CID_ADDR(TX_TSS_CID + TX_MAX_TSS_RINGS + 1);
7694         dev->mem_end = dev->mem_start + mem_len;
7695         dev->irq = pdev->irq;
7696
7697         bp->regview = ioremap_nocache(dev->base_addr, mem_len);
7698
7699         if (!bp->regview) {
7700                 dev_err(&pdev->dev, "Cannot map register space, aborting.\n");
7701                 rc = -ENOMEM;
7702                 goto err_out_release;
7703         }
7704
7705         /* Configure byte swap and enable write to the reg_window registers.
7706          * Rely on CPU to do target byte swapping on big endian systems
7707          * The chip's target access swapping will not swap all accesses
7708          */
7709         pci_write_config_dword(bp->pdev, BNX2_PCICFG_MISC_CONFIG,
7710                                BNX2_PCICFG_MISC_CONFIG_REG_WINDOW_ENA |
7711                                BNX2_PCICFG_MISC_CONFIG_TARGET_MB_WORD_SWAP);
7712
7713         bnx2_set_power_state(bp, PCI_D0);
7714
7715         bp->chip_id = REG_RD(bp, BNX2_MISC_ID);
7716
7717         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
7718                 if (pci_find_capability(pdev, PCI_CAP_ID_EXP) == 0) {
7719                         dev_err(&pdev->dev,
7720                                 "Cannot find PCIE capability, aborting.\n");
7721                         rc = -EIO;
7722                         goto err_out_unmap;
7723                 }
7724                 bp->flags |= BNX2_FLAG_PCIE;
7725                 if (CHIP_REV(bp) == CHIP_REV_Ax)
7726                         bp->flags |= BNX2_FLAG_JUMBO_BROKEN;
7727         } else {
7728                 bp->pcix_cap = pci_find_capability(pdev, PCI_CAP_ID_PCIX);
7729                 if (bp->pcix_cap == 0) {
7730                         dev_err(&pdev->dev,
7731                                 "Cannot find PCIX capability, aborting.\n");
7732                         rc = -EIO;
7733                         goto err_out_unmap;
7734                 }
7735                 bp->flags |= BNX2_FLAG_BROKEN_STATS;
7736         }
7737
7738         if (CHIP_NUM(bp) == CHIP_NUM_5709 && CHIP_REV(bp) != CHIP_REV_Ax) {
7739                 if (pci_find_capability(pdev, PCI_CAP_ID_MSIX))
7740                         bp->flags |= BNX2_FLAG_MSIX_CAP;
7741         }
7742
7743         if (CHIP_ID(bp) != CHIP_ID_5706_A0 && CHIP_ID(bp) != CHIP_ID_5706_A1) {
7744                 if (pci_find_capability(pdev, PCI_CAP_ID_MSI))
7745                         bp->flags |= BNX2_FLAG_MSI_CAP;
7746         }
7747
7748         /* 5708 cannot support DMA addresses > 40-bit.  */
7749         if (CHIP_NUM(bp) == CHIP_NUM_5708)
7750                 persist_dma_mask = dma_mask = DMA_BIT_MASK(40);
7751         else
7752                 persist_dma_mask = dma_mask = DMA_BIT_MASK(64);
7753
7754         /* Configure DMA attributes. */
7755         if (pci_set_dma_mask(pdev, dma_mask) == 0) {
7756                 dev->features |= NETIF_F_HIGHDMA;
7757                 rc = pci_set_consistent_dma_mask(pdev, persist_dma_mask);
7758                 if (rc) {
7759                         dev_err(&pdev->dev,
7760                                 "pci_set_consistent_dma_mask failed, aborting.\n");
7761                         goto err_out_unmap;
7762                 }
7763         } else if ((rc = pci_set_dma_mask(pdev, DMA_BIT_MASK(32))) != 0) {
7764                 dev_err(&pdev->dev, "System does not support DMA, aborting.\n");
7765                 goto err_out_unmap;
7766         }
7767
7768         if (!(bp->flags & BNX2_FLAG_PCIE))
7769                 bnx2_get_pci_speed(bp);
7770
7771         /* 5706A0 may falsely detect SERR and PERR. */
7772         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7773                 reg = REG_RD(bp, PCI_COMMAND);
7774                 reg &= ~(PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
7775                 REG_WR(bp, PCI_COMMAND, reg);
7776         }
7777         else if ((CHIP_ID(bp) == CHIP_ID_5706_A1) &&
7778                 !(bp->flags & BNX2_FLAG_PCIX)) {
7779
7780                 dev_err(&pdev->dev,
7781                         "5706 A1 can only be used in a PCIX bus, aborting.\n");
7782                 goto err_out_unmap;
7783         }
7784
7785         bnx2_init_nvram(bp);
7786
7787         reg = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_SIGNATURE);
7788
7789         if ((reg & BNX2_SHM_HDR_SIGNATURE_SIG_MASK) ==
7790             BNX2_SHM_HDR_SIGNATURE_SIG) {
7791                 u32 off = PCI_FUNC(pdev->devfn) << 2;
7792
7793                 bp->shmem_base = bnx2_reg_rd_ind(bp, BNX2_SHM_HDR_ADDR_0 + off);
7794         } else
7795                 bp->shmem_base = HOST_VIEW_SHMEM_BASE;
7796
7797         /* Get the permanent MAC address.  First we need to make sure the
7798          * firmware is actually running.
7799          */
7800         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_SIGNATURE);
7801
7802         if ((reg & BNX2_DEV_INFO_SIGNATURE_MAGIC_MASK) !=
7803             BNX2_DEV_INFO_SIGNATURE_MAGIC) {
7804                 dev_err(&pdev->dev, "Firmware not running, aborting.\n");
7805                 rc = -ENODEV;
7806                 goto err_out_unmap;
7807         }
7808
7809         reg = bnx2_shmem_rd(bp, BNX2_DEV_INFO_BC_REV);
7810         for (i = 0, j = 0; i < 3; i++) {
7811                 u8 num, k, skip0;
7812
7813                 num = (u8) (reg >> (24 - (i * 8)));
7814                 for (k = 100, skip0 = 1; k >= 1; num %= k, k /= 10) {
7815                         if (num >= k || !skip0 || k == 1) {
7816                                 bp->fw_version[j++] = (num / k) + '0';
7817                                 skip0 = 0;
7818                         }
7819                 }
7820                 if (i != 2)
7821                         bp->fw_version[j++] = '.';
7822         }
7823         reg = bnx2_shmem_rd(bp, BNX2_PORT_FEATURE);
7824         if (reg & BNX2_PORT_FEATURE_WOL_ENABLED)
7825                 bp->wol = 1;
7826
7827         if (reg & BNX2_PORT_FEATURE_ASF_ENABLED) {
7828                 bp->flags |= BNX2_FLAG_ASF_ENABLE;
7829
7830                 for (i = 0; i < 30; i++) {
7831                         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7832                         if (reg & BNX2_CONDITION_MFW_RUN_MASK)
7833                                 break;
7834                         msleep(10);
7835                 }
7836         }
7837         reg = bnx2_shmem_rd(bp, BNX2_BC_STATE_CONDITION);
7838         reg &= BNX2_CONDITION_MFW_RUN_MASK;
7839         if (reg != BNX2_CONDITION_MFW_RUN_UNKNOWN &&
7840             reg != BNX2_CONDITION_MFW_RUN_NONE) {
7841                 u32 addr = bnx2_shmem_rd(bp, BNX2_MFW_VER_PTR);
7842
7843                 bp->fw_version[j++] = ' ';
7844                 for (i = 0; i < 3; i++) {
7845                         reg = bnx2_reg_rd_ind(bp, addr + i * 4);
7846                         reg = swab32(reg);
7847                         memcpy(&bp->fw_version[j], &reg, 4);
7848                         j += 4;
7849                 }
7850         }
7851
7852         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_UPPER);
7853         bp->mac_addr[0] = (u8) (reg >> 8);
7854         bp->mac_addr[1] = (u8) reg;
7855
7856         reg = bnx2_shmem_rd(bp, BNX2_PORT_HW_CFG_MAC_LOWER);
7857         bp->mac_addr[2] = (u8) (reg >> 24);
7858         bp->mac_addr[3] = (u8) (reg >> 16);
7859         bp->mac_addr[4] = (u8) (reg >> 8);
7860         bp->mac_addr[5] = (u8) reg;
7861
7862         bp->tx_ring_size = MAX_TX_DESC_CNT;
7863         bnx2_set_rx_ring_size(bp, 255);
7864
7865         bp->rx_csum = 1;
7866
7867         bp->tx_quick_cons_trip_int = 2;
7868         bp->tx_quick_cons_trip = 20;
7869         bp->tx_ticks_int = 18;
7870         bp->tx_ticks = 80;
7871
7872         bp->rx_quick_cons_trip_int = 2;
7873         bp->rx_quick_cons_trip = 12;
7874         bp->rx_ticks_int = 18;
7875         bp->rx_ticks = 18;
7876
7877         bp->stats_ticks = USEC_PER_SEC & BNX2_HC_STATS_TICKS_HC_STAT_TICKS;
7878
7879         bp->current_interval = BNX2_TIMER_INTERVAL;
7880
7881         bp->phy_addr = 1;
7882
7883         /* Disable WOL support if we are running on a SERDES chip. */
7884         if (CHIP_NUM(bp) == CHIP_NUM_5709)
7885                 bnx2_get_5709_media(bp);
7886         else if (CHIP_BOND_ID(bp) & CHIP_BOND_ID_SERDES_BIT)
7887                 bp->phy_flags |= BNX2_PHY_FLAG_SERDES;
7888
7889         bp->phy_port = PORT_TP;
7890         if (bp->phy_flags & BNX2_PHY_FLAG_SERDES) {
7891                 bp->phy_port = PORT_FIBRE;
7892                 reg = bnx2_shmem_rd(bp, BNX2_SHARED_HW_CFG_CONFIG);
7893                 if (!(reg & BNX2_SHARED_HW_CFG_GIG_LINK_ON_VAUX)) {
7894                         bp->flags |= BNX2_FLAG_NO_WOL;
7895                         bp->wol = 0;
7896                 }
7897                 if (CHIP_NUM(bp) == CHIP_NUM_5706) {
7898                         /* Don't do parallel detect on this board because of
7899                          * some board problems.  The link will not go down
7900                          * if we do parallel detect.
7901                          */
7902                         if (pdev->subsystem_vendor == PCI_VENDOR_ID_HP &&
7903                             pdev->subsystem_device == 0x310c)
7904                                 bp->phy_flags |= BNX2_PHY_FLAG_NO_PARALLEL;
7905                 } else {
7906                         bp->phy_addr = 2;
7907                         if (reg & BNX2_SHARED_HW_CFG_PHY_2_5G)
7908                                 bp->phy_flags |= BNX2_PHY_FLAG_2_5G_CAPABLE;
7909                 }
7910         } else if (CHIP_NUM(bp) == CHIP_NUM_5706 ||
7911                    CHIP_NUM(bp) == CHIP_NUM_5708)
7912                 bp->phy_flags |= BNX2_PHY_FLAG_CRC_FIX;
7913         else if (CHIP_NUM(bp) == CHIP_NUM_5709 &&
7914                  (CHIP_REV(bp) == CHIP_REV_Ax ||
7915                   CHIP_REV(bp) == CHIP_REV_Bx))
7916                 bp->phy_flags |= BNX2_PHY_FLAG_DIS_EARLY_DAC;
7917
7918         bnx2_init_fw_cap(bp);
7919
7920         if ((CHIP_ID(bp) == CHIP_ID_5708_A0) ||
7921             (CHIP_ID(bp) == CHIP_ID_5708_B0) ||
7922             (CHIP_ID(bp) == CHIP_ID_5708_B1) ||
7923             !(REG_RD(bp, BNX2_PCI_CONFIG_3) & BNX2_PCI_CONFIG_3_VAUX_PRESET)) {
7924                 bp->flags |= BNX2_FLAG_NO_WOL;
7925                 bp->wol = 0;
7926         }
7927
7928         if (CHIP_ID(bp) == CHIP_ID_5706_A0) {
7929                 bp->tx_quick_cons_trip_int =
7930                         bp->tx_quick_cons_trip;
7931                 bp->tx_ticks_int = bp->tx_ticks;
7932                 bp->rx_quick_cons_trip_int =
7933                         bp->rx_quick_cons_trip;
7934                 bp->rx_ticks_int = bp->rx_ticks;
7935                 bp->comp_prod_trip_int = bp->comp_prod_trip;
7936                 bp->com_ticks_int = bp->com_ticks;
7937                 bp->cmd_ticks_int = bp->cmd_ticks;
7938         }
7939
7940         /* Disable MSI on 5706 if AMD 8132 bridge is found.
7941          *
7942          * MSI is defined to be 32-bit write.  The 5706 does 64-bit MSI writes
7943          * with byte enables disabled on the unused 32-bit word.  This is legal
7944          * but causes problems on the AMD 8132 which will eventually stop
7945          * responding after a while.
7946          *
7947          * AMD believes this incompatibility is unique to the 5706, and
7948          * prefers to locally disable MSI rather than globally disabling it.
7949          */
7950         if (CHIP_NUM(bp) == CHIP_NUM_5706 && disable_msi == 0) {
7951                 struct pci_dev *amd_8132 = NULL;
7952
7953                 while ((amd_8132 = pci_get_device(PCI_VENDOR_ID_AMD,
7954                                                   PCI_DEVICE_ID_AMD_8132_BRIDGE,
7955                                                   amd_8132))) {
7956
7957                         if (amd_8132->revision >= 0x10 &&
7958                             amd_8132->revision <= 0x13) {
7959                                 disable_msi = 1;
7960                                 pci_dev_put(amd_8132);
7961                                 break;
7962                         }
7963                 }
7964         }
7965
7966         bnx2_set_default_link(bp);
7967         bp->req_flow_ctrl = FLOW_CTRL_RX | FLOW_CTRL_TX;
7968
7969         init_timer(&bp->timer);
7970         bp->timer.expires = RUN_AT(BNX2_TIMER_INTERVAL);
7971         bp->timer.data = (unsigned long) bp;
7972         bp->timer.function = bnx2_timer;
7973
7974         return 0;
7975
7976 err_out_unmap:
7977         if (bp->regview) {
7978                 iounmap(bp->regview);
7979                 bp->regview = NULL;
7980         }
7981
7982 err_out_release:
7983         pci_release_regions(pdev);
7984
7985 err_out_disable:
7986         pci_disable_device(pdev);
7987         pci_set_drvdata(pdev, NULL);
7988
7989 err_out:
7990         return rc;
7991 }
7992
7993 static char * __devinit
7994 bnx2_bus_string(struct bnx2 *bp, char *str)
7995 {
7996         char *s = str;
7997
7998         if (bp->flags & BNX2_FLAG_PCIE) {
7999                 s += sprintf(s, "PCI Express");
8000         } else {
8001                 s += sprintf(s, "PCI");
8002                 if (bp->flags & BNX2_FLAG_PCIX)
8003                         s += sprintf(s, "-X");
8004                 if (bp->flags & BNX2_FLAG_PCI_32BIT)
8005                         s += sprintf(s, " 32-bit");
8006                 else
8007                         s += sprintf(s, " 64-bit");
8008                 s += sprintf(s, " %dMHz", bp->bus_speed_mhz);
8009         }
8010         return str;
8011 }
8012
8013 static void __devinit
8014 bnx2_init_napi(struct bnx2 *bp)
8015 {
8016         int i;
8017
8018         for (i = 0; i < BNX2_MAX_MSIX_VEC; i++) {
8019                 struct bnx2_napi *bnapi = &bp->bnx2_napi[i];
8020                 int (*poll)(struct napi_struct *, int);
8021
8022                 if (i == 0)
8023                         poll = bnx2_poll;
8024                 else
8025                         poll = bnx2_poll_msix;
8026
8027                 netif_napi_add(bp->dev, &bp->bnx2_napi[i].napi, poll, 64);
8028                 bnapi->bp = bp;
8029         }
8030 }
8031
8032 static const struct net_device_ops bnx2_netdev_ops = {
8033         .ndo_open               = bnx2_open,
8034         .ndo_start_xmit         = bnx2_start_xmit,
8035         .ndo_stop               = bnx2_close,
8036         .ndo_get_stats          = bnx2_get_stats,
8037         .ndo_set_rx_mode        = bnx2_set_rx_mode,
8038         .ndo_do_ioctl           = bnx2_ioctl,
8039         .ndo_validate_addr      = eth_validate_addr,
8040         .ndo_set_mac_address    = bnx2_change_mac_addr,
8041         .ndo_change_mtu         = bnx2_change_mtu,
8042         .ndo_tx_timeout         = bnx2_tx_timeout,
8043 #ifdef BCM_VLAN
8044         .ndo_vlan_rx_register   = bnx2_vlan_rx_register,
8045 #endif
8046 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
8047         .ndo_poll_controller    = poll_bnx2,
8048 #endif
8049 };
8050
8051 static void inline vlan_features_add(struct net_device *dev, unsigned long flags)
8052 {
8053 #ifdef BCM_VLAN
8054         dev->vlan_features |= flags;
8055 #endif
8056 }
8057
8058 static int __devinit
8059 bnx2_init_one(struct pci_dev *pdev, const struct pci_device_id *ent)
8060 {
8061         static int version_printed = 0;
8062         struct net_device *dev = NULL;
8063         struct bnx2 *bp;
8064         int rc;
8065         char str[40];
8066
8067         if (version_printed++ == 0)
8068                 printk(KERN_INFO "%s", version);
8069
8070         /* dev zeroed in init_etherdev */
8071         dev = alloc_etherdev_mq(sizeof(*bp), TX_MAX_RINGS);
8072
8073         if (!dev)
8074                 return -ENOMEM;
8075
8076         rc = bnx2_init_board(pdev, dev);
8077         if (rc < 0) {
8078                 free_netdev(dev);
8079                 return rc;
8080         }
8081
8082         dev->netdev_ops = &bnx2_netdev_ops;
8083         dev->watchdog_timeo = TX_TIMEOUT;
8084         dev->ethtool_ops = &bnx2_ethtool_ops;
8085
8086         bp = netdev_priv(dev);
8087         bnx2_init_napi(bp);
8088
8089         pci_set_drvdata(pdev, dev);
8090
8091         rc = bnx2_request_firmware(bp);
8092         if (rc)
8093                 goto error;
8094
8095         memcpy(dev->dev_addr, bp->mac_addr, 6);
8096         memcpy(dev->perm_addr, bp->mac_addr, 6);
8097
8098         dev->features |= NETIF_F_IP_CSUM | NETIF_F_SG;
8099         vlan_features_add(dev, NETIF_F_IP_CSUM | NETIF_F_SG);
8100         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8101                 dev->features |= NETIF_F_IPV6_CSUM;
8102                 vlan_features_add(dev, NETIF_F_IPV6_CSUM);
8103         }
8104 #ifdef BCM_VLAN
8105         dev->features |= NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX;
8106 #endif
8107         dev->features |= NETIF_F_TSO | NETIF_F_TSO_ECN;
8108         vlan_features_add(dev, NETIF_F_TSO | NETIF_F_TSO_ECN);
8109         if (CHIP_NUM(bp) == CHIP_NUM_5709) {
8110                 dev->features |= NETIF_F_TSO6;
8111                 vlan_features_add(dev, NETIF_F_TSO6);
8112         }
8113         if ((rc = register_netdev(dev))) {
8114                 dev_err(&pdev->dev, "Cannot register net device\n");
8115                 goto error;
8116         }
8117
8118         printk(KERN_INFO "%s: %s (%c%d) %s found at mem %lx, "
8119                 "IRQ %d, node addr %pM\n",
8120                 dev->name,
8121                 board_info[ent->driver_data].name,
8122                 ((CHIP_ID(bp) & 0xf000) >> 12) + 'A',
8123                 ((CHIP_ID(bp) & 0x0ff0) >> 4),
8124                 bnx2_bus_string(bp, str),
8125                 dev->base_addr,
8126                 bp->pdev->irq, dev->dev_addr);
8127
8128         return 0;
8129
8130 error:
8131         if (bp->mips_firmware)
8132                 release_firmware(bp->mips_firmware);
8133         if (bp->rv2p_firmware)
8134                 release_firmware(bp->rv2p_firmware);
8135
8136         if (bp->regview)
8137                 iounmap(bp->regview);
8138         pci_release_regions(pdev);
8139         pci_disable_device(pdev);
8140         pci_set_drvdata(pdev, NULL);
8141         free_netdev(dev);
8142         return rc;
8143 }
8144
8145 static void __devexit
8146 bnx2_remove_one(struct pci_dev *pdev)
8147 {
8148         struct net_device *dev = pci_get_drvdata(pdev);
8149         struct bnx2 *bp = netdev_priv(dev);
8150
8151         flush_scheduled_work();
8152
8153         unregister_netdev(dev);
8154
8155         if (bp->mips_firmware)
8156                 release_firmware(bp->mips_firmware);
8157         if (bp->rv2p_firmware)
8158                 release_firmware(bp->rv2p_firmware);
8159
8160         if (bp->regview)
8161                 iounmap(bp->regview);
8162
8163         free_netdev(dev);
8164         pci_release_regions(pdev);
8165         pci_disable_device(pdev);
8166         pci_set_drvdata(pdev, NULL);
8167 }
8168
8169 static int
8170 bnx2_suspend(struct pci_dev *pdev, pm_message_t state)
8171 {
8172         struct net_device *dev = pci_get_drvdata(pdev);
8173         struct bnx2 *bp = netdev_priv(dev);
8174
8175         /* PCI register 4 needs to be saved whether netif_running() or not.
8176          * MSI address and data need to be saved if using MSI and
8177          * netif_running().
8178          */
8179         pci_save_state(pdev);
8180         if (!netif_running(dev))
8181                 return 0;
8182
8183         flush_scheduled_work();
8184         bnx2_netif_stop(bp);
8185         netif_device_detach(dev);
8186         del_timer_sync(&bp->timer);
8187         bnx2_shutdown_chip(bp);
8188         bnx2_free_skbs(bp);
8189         bnx2_set_power_state(bp, pci_choose_state(pdev, state));
8190         return 0;
8191 }
8192
8193 static int
8194 bnx2_resume(struct pci_dev *pdev)
8195 {
8196         struct net_device *dev = pci_get_drvdata(pdev);
8197         struct bnx2 *bp = netdev_priv(dev);
8198
8199         pci_restore_state(pdev);
8200         if (!netif_running(dev))
8201                 return 0;
8202
8203         bnx2_set_power_state(bp, PCI_D0);
8204         netif_device_attach(dev);
8205         bnx2_init_nic(bp, 1);
8206         bnx2_netif_start(bp);
8207         return 0;
8208 }
8209
8210 /**
8211  * bnx2_io_error_detected - called when PCI error is detected
8212  * @pdev: Pointer to PCI device
8213  * @state: The current pci connection state
8214  *
8215  * This function is called after a PCI bus error affecting
8216  * this device has been detected.
8217  */
8218 static pci_ers_result_t bnx2_io_error_detected(struct pci_dev *pdev,
8219                                                pci_channel_state_t state)
8220 {
8221         struct net_device *dev = pci_get_drvdata(pdev);
8222         struct bnx2 *bp = netdev_priv(dev);
8223
8224         rtnl_lock();
8225         netif_device_detach(dev);
8226
8227         if (state == pci_channel_io_perm_failure) {
8228                 rtnl_unlock();
8229                 return PCI_ERS_RESULT_DISCONNECT;
8230         }
8231
8232         if (netif_running(dev)) {
8233                 bnx2_netif_stop(bp);
8234                 del_timer_sync(&bp->timer);
8235                 bnx2_reset_nic(bp, BNX2_DRV_MSG_CODE_RESET);
8236         }
8237
8238         pci_disable_device(pdev);
8239         rtnl_unlock();
8240
8241         /* Request a slot slot reset. */
8242         return PCI_ERS_RESULT_NEED_RESET;
8243 }
8244
8245 /**
8246  * bnx2_io_slot_reset - called after the pci bus has been reset.
8247  * @pdev: Pointer to PCI device
8248  *
8249  * Restart the card from scratch, as if from a cold-boot.
8250  */
8251 static pci_ers_result_t bnx2_io_slot_reset(struct pci_dev *pdev)
8252 {
8253         struct net_device *dev = pci_get_drvdata(pdev);
8254         struct bnx2 *bp = netdev_priv(dev);
8255
8256         rtnl_lock();
8257         if (pci_enable_device(pdev)) {
8258                 dev_err(&pdev->dev,
8259                         "Cannot re-enable PCI device after reset.\n");
8260                 rtnl_unlock();
8261                 return PCI_ERS_RESULT_DISCONNECT;
8262         }
8263         pci_set_master(pdev);
8264         pci_restore_state(pdev);
8265
8266         if (netif_running(dev)) {
8267                 bnx2_set_power_state(bp, PCI_D0);
8268                 bnx2_init_nic(bp, 1);
8269         }
8270
8271         rtnl_unlock();
8272         return PCI_ERS_RESULT_RECOVERED;
8273 }
8274
8275 /**
8276  * bnx2_io_resume - called when traffic can start flowing again.
8277  * @pdev: Pointer to PCI device
8278  *
8279  * This callback is called when the error recovery driver tells us that
8280  * its OK to resume normal operation.
8281  */
8282 static void bnx2_io_resume(struct pci_dev *pdev)
8283 {
8284         struct net_device *dev = pci_get_drvdata(pdev);
8285         struct bnx2 *bp = netdev_priv(dev);
8286
8287         rtnl_lock();
8288         if (netif_running(dev))
8289                 bnx2_netif_start(bp);
8290
8291         netif_device_attach(dev);
8292         rtnl_unlock();
8293 }
8294
8295 static struct pci_error_handlers bnx2_err_handler = {
8296         .error_detected = bnx2_io_error_detected,
8297         .slot_reset     = bnx2_io_slot_reset,
8298         .resume         = bnx2_io_resume,
8299 };
8300
8301 static struct pci_driver bnx2_pci_driver = {
8302         .name           = DRV_MODULE_NAME,
8303         .id_table       = bnx2_pci_tbl,
8304         .probe          = bnx2_init_one,
8305         .remove         = __devexit_p(bnx2_remove_one),
8306         .suspend        = bnx2_suspend,
8307         .resume         = bnx2_resume,
8308         .err_handler    = &bnx2_err_handler,
8309 };
8310
8311 static int __init bnx2_init(void)
8312 {
8313         return pci_register_driver(&bnx2_pci_driver);
8314 }
8315
8316 static void __exit bnx2_cleanup(void)
8317 {
8318         pci_unregister_driver(&bnx2_pci_driver);
8319 }
8320
8321 module_init(bnx2_init);
8322 module_exit(bnx2_cleanup);
8323
8324
8325