3696a4b6547bd78ab86bdc98fa67ea348e946b52
[linux-flexiantxendom0-3.2.10.git] / drivers / net / bnx2x / bnx2x_main.c
1 /* bnx2x_main.c: Broadcom Everest network driver.
2  *
3  * Copyright (c) 2007-2010 Broadcom Corporation
4  *
5  * This program is free software; you can redistribute it and/or modify
6  * it under the terms of the GNU General Public License as published by
7  * the Free Software Foundation.
8  *
9  * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10  * Written by: Eliezer Tamir
11  * Based on code from Michael Chan's bnx2 driver
12  * UDP CSUM errata workaround by Arik Gendelman
13  * Slowpath and fastpath rework by Vladislav Zolotarov
14  * Statistics and Link management by Yitchak Gertner
15  *
16  */
17
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h>  /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
42 #include <net/ip.h>
43 #include <net/tcp.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
51 #include <linux/io.h>
52 #include <linux/stringify.h>
53
54 #define BNX2X_MAIN
55 #include "bnx2x.h"
56 #include "bnx2x_init.h"
57 #include "bnx2x_init_ops.h"
58 #include "bnx2x_cmn.h"
59
60
61 #include <linux/firmware.h>
62 #include "bnx2x_fw_file_hdr.h"
63 /* FW files */
64 #define FW_FILE_VERSION                                 \
65         __stringify(BCM_5710_FW_MAJOR_VERSION) "."      \
66         __stringify(BCM_5710_FW_MINOR_VERSION) "."      \
67         __stringify(BCM_5710_FW_REVISION_VERSION) "."   \
68         __stringify(BCM_5710_FW_ENGINEERING_VERSION)
69 #define FW_FILE_NAME_E1         "bnx2x/bnx2x-e1-" FW_FILE_VERSION ".fw"
70 #define FW_FILE_NAME_E1H        "bnx2x/bnx2x-e1h-" FW_FILE_VERSION ".fw"
71
72 /* Time in jiffies before concluding the transmitter is hung */
73 #define TX_TIMEOUT              (5*HZ)
74
75 static char version[] __devinitdata =
76         "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
77         DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
78
79 MODULE_AUTHOR("Eliezer Tamir");
80 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
81 MODULE_LICENSE("GPL");
82 MODULE_VERSION(DRV_MODULE_VERSION);
83 MODULE_FIRMWARE(FW_FILE_NAME_E1);
84 MODULE_FIRMWARE(FW_FILE_NAME_E1H);
85
86 static int multi_mode = 1;
87 module_param(multi_mode, int, 0);
88 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
89                              "(0 Disable; 1 Enable (default))");
90
91 static int num_queues;
92 module_param(num_queues, int, 0);
93 MODULE_PARM_DESC(num_queues, " Number of queues for multi_mode=1"
94                                 " (default is as a number of CPUs)");
95
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
99
100 static int int_mode;
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode other then MSI-X "
103                                 "(1 INT#x; 2 MSI)");
104
105 static int dropless_fc;
106 module_param(dropless_fc, int, 0);
107 MODULE_PARM_DESC(dropless_fc, " Pause on exhausted host ring");
108
109 static int poll;
110 module_param(poll, int, 0);
111 MODULE_PARM_DESC(poll, " Use polling (for debug)");
112
113 static int mrrs = -1;
114 module_param(mrrs, int, 0);
115 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
116
117 static int debug;
118 module_param(debug, int, 0);
119 MODULE_PARM_DESC(debug, " Default debug msglevel");
120
121 static struct workqueue_struct *bnx2x_wq;
122
123 enum bnx2x_board_type {
124         BCM57710 = 0,
125         BCM57711 = 1,
126         BCM57711E = 2,
127 };
128
129 /* indexed by board_type, above */
130 static struct {
131         char *name;
132 } board_info[] __devinitdata = {
133         { "Broadcom NetXtreme II BCM57710 XGb" },
134         { "Broadcom NetXtreme II BCM57711 XGb" },
135         { "Broadcom NetXtreme II BCM57711E XGb" }
136 };
137
138
139 static DEFINE_PCI_DEVICE_TABLE(bnx2x_pci_tbl) = {
140         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57710), BCM57710 },
141         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711), BCM57711 },
142         { PCI_VDEVICE(BROADCOM, PCI_DEVICE_ID_NX2_57711E), BCM57711E },
143         { 0 }
144 };
145
146 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
147
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
151
152 /* used only at init
153  * locking is done by mcp
154  */
155 void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
156 {
157         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158         pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160                                PCICFG_VENDOR_ID_OFFSET);
161 }
162
163 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
164 {
165         u32 val;
166
167         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168         pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170                                PCICFG_VENDOR_ID_OFFSET);
171
172         return val;
173 }
174
175 const u32 dmae_reg_go_c[] = {
176         DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177         DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178         DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179         DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
180 };
181
182 /* copy command into DMAE command memory and set DMAE command go */
183 void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae, int idx)
184 {
185         u32 cmd_offset;
186         int i;
187
188         cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
189         for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
190                 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
191
192                 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
193                    idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
194         }
195         REG_WR(bp, dmae_reg_go_c[idx], 1);
196 }
197
198 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
199                       u32 len32)
200 {
201         struct dmae_command dmae;
202         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
203         int cnt = 200;
204
205         if (!bp->dmae_ready) {
206                 u32 *data = bnx2x_sp(bp, wb_data[0]);
207
208                 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x  len32 %d)"
209                    "  using indirect\n", dst_addr, len32);
210                 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
211                 return;
212         }
213
214         memset(&dmae, 0, sizeof(struct dmae_command));
215
216         dmae.opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
217                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
218                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
219 #ifdef __BIG_ENDIAN
220                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
221 #else
222                        DMAE_CMD_ENDIANITY_DW_SWAP |
223 #endif
224                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
225                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
226         dmae.src_addr_lo = U64_LO(dma_addr);
227         dmae.src_addr_hi = U64_HI(dma_addr);
228         dmae.dst_addr_lo = dst_addr >> 2;
229         dmae.dst_addr_hi = 0;
230         dmae.len = len32;
231         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
232         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
233         dmae.comp_val = DMAE_COMP_VAL;
234
235         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
236            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
237                     "dst_addr [%x:%08x (%08x)]\n"
238            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
239            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
240            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, dst_addr,
241            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
242         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
243            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
244            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
245
246         mutex_lock(&bp->dmae_mutex);
247
248         *wb_comp = 0;
249
250         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
251
252         udelay(5);
253
254         while (*wb_comp != DMAE_COMP_VAL) {
255                 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
256
257                 if (!cnt) {
258                         BNX2X_ERR("DMAE timeout!\n");
259                         break;
260                 }
261                 cnt--;
262                 /* adjust delay for emulation/FPGA */
263                 if (CHIP_REV_IS_SLOW(bp))
264                         msleep(100);
265                 else
266                         udelay(5);
267         }
268
269         mutex_unlock(&bp->dmae_mutex);
270 }
271
272 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
273 {
274         struct dmae_command dmae;
275         u32 *wb_comp = bnx2x_sp(bp, wb_comp);
276         int cnt = 200;
277
278         if (!bp->dmae_ready) {
279                 u32 *data = bnx2x_sp(bp, wb_data[0]);
280                 int i;
281
282                 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x  len32 %d)"
283                    "  using indirect\n", src_addr, len32);
284                 for (i = 0; i < len32; i++)
285                         data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
286                 return;
287         }
288
289         memset(&dmae, 0, sizeof(struct dmae_command));
290
291         dmae.opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
292                        DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
293                        DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
294 #ifdef __BIG_ENDIAN
295                        DMAE_CMD_ENDIANITY_B_DW_SWAP |
296 #else
297                        DMAE_CMD_ENDIANITY_DW_SWAP |
298 #endif
299                        (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
300                        (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
301         dmae.src_addr_lo = src_addr >> 2;
302         dmae.src_addr_hi = 0;
303         dmae.dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
304         dmae.dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
305         dmae.len = len32;
306         dmae.comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
307         dmae.comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
308         dmae.comp_val = DMAE_COMP_VAL;
309
310         DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
311            DP_LEVEL "src_addr  [%x:%08x]  len [%d *4]  "
312                     "dst_addr [%x:%08x (%08x)]\n"
313            DP_LEVEL "comp_addr [%x:%08x]  comp_val 0x%08x\n",
314            dmae.opcode, dmae.src_addr_hi, dmae.src_addr_lo,
315            dmae.len, dmae.dst_addr_hi, dmae.dst_addr_lo, src_addr,
316            dmae.comp_addr_hi, dmae.comp_addr_lo, dmae.comp_val);
317
318         mutex_lock(&bp->dmae_mutex);
319
320         memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
321         *wb_comp = 0;
322
323         bnx2x_post_dmae(bp, &dmae, INIT_DMAE_C(bp));
324
325         udelay(5);
326
327         while (*wb_comp != DMAE_COMP_VAL) {
328
329                 if (!cnt) {
330                         BNX2X_ERR("DMAE timeout!\n");
331                         break;
332                 }
333                 cnt--;
334                 /* adjust delay for emulation/FPGA */
335                 if (CHIP_REV_IS_SLOW(bp))
336                         msleep(100);
337                 else
338                         udelay(5);
339         }
340         DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
341            bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
342            bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
343
344         mutex_unlock(&bp->dmae_mutex);
345 }
346
347 void bnx2x_write_dmae_phys_len(struct bnx2x *bp, dma_addr_t phys_addr,
348                                u32 addr, u32 len)
349 {
350         int dmae_wr_max = DMAE_LEN32_WR_MAX(bp);
351         int offset = 0;
352
353         while (len > dmae_wr_max) {
354                 bnx2x_write_dmae(bp, phys_addr + offset,
355                                  addr + offset, dmae_wr_max);
356                 offset += dmae_wr_max * 4;
357                 len -= dmae_wr_max;
358         }
359
360         bnx2x_write_dmae(bp, phys_addr + offset, addr + offset, len);
361 }
362
363 /* used only for slowpath so not inlined */
364 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
365 {
366         u32 wb_write[2];
367
368         wb_write[0] = val_hi;
369         wb_write[1] = val_lo;
370         REG_WR_DMAE(bp, reg, wb_write, 2);
371 }
372
373 #ifdef USE_WB_RD
374 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
375 {
376         u32 wb_data[2];
377
378         REG_RD_DMAE(bp, reg, wb_data, 2);
379
380         return HILO_U64(wb_data[0], wb_data[1]);
381 }
382 #endif
383
384 static int bnx2x_mc_assert(struct bnx2x *bp)
385 {
386         char last_idx;
387         int i, rc = 0;
388         u32 row0, row1, row2, row3;
389
390         /* XSTORM */
391         last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
392                            XSTORM_ASSERT_LIST_INDEX_OFFSET);
393         if (last_idx)
394                 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
395
396         /* print the asserts */
397         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
398
399                 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
400                               XSTORM_ASSERT_LIST_OFFSET(i));
401                 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
402                               XSTORM_ASSERT_LIST_OFFSET(i) + 4);
403                 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
404                               XSTORM_ASSERT_LIST_OFFSET(i) + 8);
405                 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
406                               XSTORM_ASSERT_LIST_OFFSET(i) + 12);
407
408                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
409                         BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
410                                   " 0x%08x 0x%08x 0x%08x\n",
411                                   i, row3, row2, row1, row0);
412                         rc++;
413                 } else {
414                         break;
415                 }
416         }
417
418         /* TSTORM */
419         last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
420                            TSTORM_ASSERT_LIST_INDEX_OFFSET);
421         if (last_idx)
422                 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
423
424         /* print the asserts */
425         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
426
427                 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
428                               TSTORM_ASSERT_LIST_OFFSET(i));
429                 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
430                               TSTORM_ASSERT_LIST_OFFSET(i) + 4);
431                 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
432                               TSTORM_ASSERT_LIST_OFFSET(i) + 8);
433                 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
434                               TSTORM_ASSERT_LIST_OFFSET(i) + 12);
435
436                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
437                         BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
438                                   " 0x%08x 0x%08x 0x%08x\n",
439                                   i, row3, row2, row1, row0);
440                         rc++;
441                 } else {
442                         break;
443                 }
444         }
445
446         /* CSTORM */
447         last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
448                            CSTORM_ASSERT_LIST_INDEX_OFFSET);
449         if (last_idx)
450                 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
451
452         /* print the asserts */
453         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
454
455                 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
456                               CSTORM_ASSERT_LIST_OFFSET(i));
457                 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
458                               CSTORM_ASSERT_LIST_OFFSET(i) + 4);
459                 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
460                               CSTORM_ASSERT_LIST_OFFSET(i) + 8);
461                 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
462                               CSTORM_ASSERT_LIST_OFFSET(i) + 12);
463
464                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
465                         BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
466                                   " 0x%08x 0x%08x 0x%08x\n",
467                                   i, row3, row2, row1, row0);
468                         rc++;
469                 } else {
470                         break;
471                 }
472         }
473
474         /* USTORM */
475         last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
476                            USTORM_ASSERT_LIST_INDEX_OFFSET);
477         if (last_idx)
478                 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
479
480         /* print the asserts */
481         for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
482
483                 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
484                               USTORM_ASSERT_LIST_OFFSET(i));
485                 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
486                               USTORM_ASSERT_LIST_OFFSET(i) + 4);
487                 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
488                               USTORM_ASSERT_LIST_OFFSET(i) + 8);
489                 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
490                               USTORM_ASSERT_LIST_OFFSET(i) + 12);
491
492                 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
493                         BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
494                                   " 0x%08x 0x%08x 0x%08x\n",
495                                   i, row3, row2, row1, row0);
496                         rc++;
497                 } else {
498                         break;
499                 }
500         }
501
502         return rc;
503 }
504
505 static void bnx2x_fw_dump(struct bnx2x *bp)
506 {
507         u32 addr;
508         u32 mark, offset;
509         __be32 data[9];
510         int word;
511
512         if (BP_NOMCP(bp)) {
513                 BNX2X_ERR("NO MCP - can not dump\n");
514                 return;
515         }
516
517         addr = bp->common.shmem_base - 0x0800 + 4;
518         mark = REG_RD(bp, addr);
519         mark = MCP_REG_MCPR_SCRATCH + ((mark + 0x3) & ~0x3) - 0x08000000;
520         pr_err("begin fw dump (mark 0x%x)\n", mark);
521
522         pr_err("");
523         for (offset = mark; offset <= bp->common.shmem_base; offset += 0x8*4) {
524                 for (word = 0; word < 8; word++)
525                         data[word] = htonl(REG_RD(bp, offset + 4*word));
526                 data[8] = 0x0;
527                 pr_cont("%s", (char *)data);
528         }
529         for (offset = addr + 4; offset <= mark; offset += 0x8*4) {
530                 for (word = 0; word < 8; word++)
531                         data[word] = htonl(REG_RD(bp, offset + 4*word));
532                 data[8] = 0x0;
533                 pr_cont("%s", (char *)data);
534         }
535         pr_err("end of fw dump\n");
536 }
537
538 void bnx2x_panic_dump(struct bnx2x *bp)
539 {
540         int i;
541         u16 j, start, end;
542
543         bp->stats_state = STATS_STATE_DISABLED;
544         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
545
546         BNX2X_ERR("begin crash dump -----------------\n");
547
548         /* Indices */
549         /* Common */
550         BNX2X_ERR("def_c_idx(0x%x)  def_u_idx(0x%x)  def_x_idx(0x%x)"
551                   "  def_t_idx(0x%x)  def_att_idx(0x%x)  attn_state(0x%x)"
552                   "  spq_prod_idx(0x%x)\n",
553                   bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
554                   bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
555
556         /* Rx */
557         for_each_queue(bp, i) {
558                 struct bnx2x_fastpath *fp = &bp->fp[i];
559
560                 BNX2X_ERR("fp%d: rx_bd_prod(0x%x)  rx_bd_cons(0x%x)"
561                           "  *rx_bd_cons_sb(0x%x)  rx_comp_prod(0x%x)"
562                           "  rx_comp_cons(0x%x)  *rx_cons_sb(0x%x)\n",
563                           i, fp->rx_bd_prod, fp->rx_bd_cons,
564                           le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
565                           fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
566                 BNX2X_ERR("     rx_sge_prod(0x%x)  last_max_sge(0x%x)"
567                           "  fp_u_idx(0x%x) *sb_u_idx(0x%x)\n",
568                           fp->rx_sge_prod, fp->last_max_sge,
569                           le16_to_cpu(fp->fp_u_idx),
570                           fp->status_blk->u_status_block.status_block_index);
571         }
572
573         /* Tx */
574         for_each_queue(bp, i) {
575                 struct bnx2x_fastpath *fp = &bp->fp[i];
576
577                 BNX2X_ERR("fp%d: tx_pkt_prod(0x%x)  tx_pkt_cons(0x%x)"
578                           "  tx_bd_prod(0x%x)  tx_bd_cons(0x%x)"
579                           "  *tx_cons_sb(0x%x)\n",
580                           i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
581                           fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
582                 BNX2X_ERR("     fp_c_idx(0x%x)  *sb_c_idx(0x%x)"
583                           "  tx_db_prod(0x%x)\n", le16_to_cpu(fp->fp_c_idx),
584                           fp->status_blk->c_status_block.status_block_index,
585                           fp->tx_db.data.prod);
586         }
587
588         /* Rings */
589         /* Rx */
590         for_each_queue(bp, i) {
591                 struct bnx2x_fastpath *fp = &bp->fp[i];
592
593                 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
594                 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
595                 for (j = start; j != end; j = RX_BD(j + 1)) {
596                         u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
597                         struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
598
599                         BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x]  sw_bd=[%p]\n",
600                                   i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
601                 }
602
603                 start = RX_SGE(fp->rx_sge_prod);
604                 end = RX_SGE(fp->last_max_sge);
605                 for (j = start; j != end; j = RX_SGE(j + 1)) {
606                         u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
607                         struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
608
609                         BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x]  sw_page=[%p]\n",
610                                   i, j, rx_sge[1], rx_sge[0], sw_page->page);
611                 }
612
613                 start = RCQ_BD(fp->rx_comp_cons - 10);
614                 end = RCQ_BD(fp->rx_comp_cons + 503);
615                 for (j = start; j != end; j = RCQ_BD(j + 1)) {
616                         u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
617
618                         BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
619                                   i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
620                 }
621         }
622
623         /* Tx */
624         for_each_queue(bp, i) {
625                 struct bnx2x_fastpath *fp = &bp->fp[i];
626
627                 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
628                 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
629                 for (j = start; j != end; j = TX_BD(j + 1)) {
630                         struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
631
632                         BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
633                                   i, j, sw_bd->skb, sw_bd->first_bd);
634                 }
635
636                 start = TX_BD(fp->tx_bd_cons - 10);
637                 end = TX_BD(fp->tx_bd_cons + 254);
638                 for (j = start; j != end; j = TX_BD(j + 1)) {
639                         u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
640
641                         BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
642                                   i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
643                 }
644         }
645
646         bnx2x_fw_dump(bp);
647         bnx2x_mc_assert(bp);
648         BNX2X_ERR("end crash dump -----------------\n");
649 }
650
651 void bnx2x_int_enable(struct bnx2x *bp)
652 {
653         int port = BP_PORT(bp);
654         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
655         u32 val = REG_RD(bp, addr);
656         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
657         int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
658
659         if (msix) {
660                 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
661                          HC_CONFIG_0_REG_INT_LINE_EN_0);
662                 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
663                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
664         } else if (msi) {
665                 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
666                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
667                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
668                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
669         } else {
670                 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
671                         HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
672                         HC_CONFIG_0_REG_INT_LINE_EN_0 |
673                         HC_CONFIG_0_REG_ATTN_BIT_EN_0);
674
675                 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
676                    val, port, addr);
677
678                 REG_WR(bp, addr, val);
679
680                 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
681         }
682
683         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)  mode %s\n",
684            val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
685
686         REG_WR(bp, addr, val);
687         /*
688          * Ensure that HC_CONFIG is written before leading/trailing edge config
689          */
690         mmiowb();
691         barrier();
692
693         if (CHIP_IS_E1H(bp)) {
694                 /* init leading/trailing edge */
695                 if (IS_E1HMF(bp)) {
696                         val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
697                         if (bp->port.pmf)
698                                 /* enable nig and gpio3 attention */
699                                 val |= 0x1100;
700                 } else
701                         val = 0xffff;
702
703                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
704                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
705         }
706
707         /* Make sure that interrupts are indeed enabled from here on */
708         mmiowb();
709 }
710
711 static void bnx2x_int_disable(struct bnx2x *bp)
712 {
713         int port = BP_PORT(bp);
714         u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
715         u32 val = REG_RD(bp, addr);
716
717         val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
718                  HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
719                  HC_CONFIG_0_REG_INT_LINE_EN_0 |
720                  HC_CONFIG_0_REG_ATTN_BIT_EN_0);
721
722         DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
723            val, port, addr);
724
725         /* flush all outstanding writes */
726         mmiowb();
727
728         REG_WR(bp, addr, val);
729         if (REG_RD(bp, addr) != val)
730                 BNX2X_ERR("BUG! proper val not read from IGU!\n");
731 }
732
733 void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
734 {
735         int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
736         int i, offset;
737
738         /* disable interrupt handling */
739         atomic_inc(&bp->intr_sem);
740         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
741
742         if (disable_hw)
743                 /* prevent the HW from sending interrupts */
744                 bnx2x_int_disable(bp);
745
746         /* make sure all ISRs are done */
747         if (msix) {
748                 synchronize_irq(bp->msix_table[0].vector);
749                 offset = 1;
750 #ifdef BCM_CNIC
751                 offset++;
752 #endif
753                 for_each_queue(bp, i)
754                         synchronize_irq(bp->msix_table[i + offset].vector);
755         } else
756                 synchronize_irq(bp->pdev->irq);
757
758         /* make sure sp_task is not running */
759         cancel_delayed_work(&bp->sp_task);
760         flush_workqueue(bnx2x_wq);
761 }
762
763 /* fast path */
764
765 /*
766  * General service functions
767  */
768
769 /* Return true if succeeded to acquire the lock */
770 static bool bnx2x_trylock_hw_lock(struct bnx2x *bp, u32 resource)
771 {
772         u32 lock_status;
773         u32 resource_bit = (1 << resource);
774         int func = BP_FUNC(bp);
775         u32 hw_lock_control_reg;
776
777         DP(NETIF_MSG_HW, "Trying to take a lock on resource %d\n", resource);
778
779         /* Validating that the resource is within range */
780         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
781                 DP(NETIF_MSG_HW,
782                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
783                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
784                 return false;
785         }
786
787         if (func <= 5)
788                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
789         else
790                 hw_lock_control_reg =
791                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
792
793         /* Try to acquire the lock */
794         REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
795         lock_status = REG_RD(bp, hw_lock_control_reg);
796         if (lock_status & resource_bit)
797                 return true;
798
799         DP(NETIF_MSG_HW, "Failed to get a lock on resource %d\n", resource);
800         return false;
801 }
802
803
804 #ifdef BCM_CNIC
805 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid);
806 #endif
807
808 void bnx2x_sp_event(struct bnx2x_fastpath *fp,
809                            union eth_rx_cqe *rr_cqe)
810 {
811         struct bnx2x *bp = fp->bp;
812         int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
813         int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
814
815         DP(BNX2X_MSG_SP,
816            "fp %d  cid %d  got ramrod #%d  state is %x  type is %d\n",
817            fp->index, cid, command, bp->state,
818            rr_cqe->ramrod_cqe.ramrod_type);
819
820         bp->spq_left++;
821
822         if (fp->index) {
823                 switch (command | fp->state) {
824                 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
825                                                 BNX2X_FP_STATE_OPENING):
826                         DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
827                            cid);
828                         fp->state = BNX2X_FP_STATE_OPEN;
829                         break;
830
831                 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
832                         DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
833                            cid);
834                         fp->state = BNX2X_FP_STATE_HALTED;
835                         break;
836
837                 default:
838                         BNX2X_ERR("unexpected MC reply (%d)  "
839                                   "fp[%d] state is %x\n",
840                                   command, fp->index, fp->state);
841                         break;
842                 }
843                 mb(); /* force bnx2x_wait_ramrod() to see the change */
844                 return;
845         }
846
847         switch (command | bp->state) {
848         case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
849                 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
850                 bp->state = BNX2X_STATE_OPEN;
851                 break;
852
853         case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
854                 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
855                 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
856                 fp->state = BNX2X_FP_STATE_HALTED;
857                 break;
858
859         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
860                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
861                 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
862                 break;
863
864 #ifdef BCM_CNIC
865         case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_OPEN):
866                 DP(NETIF_MSG_IFDOWN, "got delete ramrod for CID %d\n", cid);
867                 bnx2x_cnic_cfc_comp(bp, cid);
868                 break;
869 #endif
870
871         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
872         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
873                 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
874                 bp->set_mac_pending--;
875                 smp_wmb();
876                 break;
877
878         case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
879                 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
880                 bp->set_mac_pending--;
881                 smp_wmb();
882                 break;
883
884         default:
885                 BNX2X_ERR("unexpected MC reply (%d)  bp->state is %x\n",
886                           command, bp->state);
887                 break;
888         }
889         mb(); /* force bnx2x_wait_ramrod() to see the change */
890 }
891
892 irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
893 {
894         struct bnx2x *bp = netdev_priv(dev_instance);
895         u16 status = bnx2x_ack_int(bp);
896         u16 mask;
897         int i;
898
899         /* Return here if interrupt is shared and it's not for us */
900         if (unlikely(status == 0)) {
901                 DP(NETIF_MSG_INTR, "not our interrupt!\n");
902                 return IRQ_NONE;
903         }
904         DP(NETIF_MSG_INTR, "got an interrupt  status 0x%x\n", status);
905
906         /* Return here if interrupt is disabled */
907         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
908                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
909                 return IRQ_HANDLED;
910         }
911
912 #ifdef BNX2X_STOP_ON_ERROR
913         if (unlikely(bp->panic))
914                 return IRQ_HANDLED;
915 #endif
916
917         for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
918                 struct bnx2x_fastpath *fp = &bp->fp[i];
919
920                 mask = 0x2 << fp->sb_id;
921                 if (status & mask) {
922                         /* Handle Rx and Tx according to SB id */
923                         prefetch(fp->rx_cons_sb);
924                         prefetch(&fp->status_blk->u_status_block.
925                                                 status_block_index);
926                         prefetch(fp->tx_cons_sb);
927                         prefetch(&fp->status_blk->c_status_block.
928                                                 status_block_index);
929                         napi_schedule(&bnx2x_fp(bp, fp->index, napi));
930                         status &= ~mask;
931                 }
932         }
933
934 #ifdef BCM_CNIC
935         mask = 0x2 << CNIC_SB_ID(bp);
936         if (status & (mask | 0x1)) {
937                 struct cnic_ops *c_ops = NULL;
938
939                 rcu_read_lock();
940                 c_ops = rcu_dereference(bp->cnic_ops);
941                 if (c_ops)
942                         c_ops->cnic_handler(bp->cnic_data, NULL);
943                 rcu_read_unlock();
944
945                 status &= ~mask;
946         }
947 #endif
948
949         if (unlikely(status & 0x1)) {
950                 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
951
952                 status &= ~0x1;
953                 if (!status)
954                         return IRQ_HANDLED;
955         }
956
957         if (unlikely(status))
958                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
959                    status);
960
961         return IRQ_HANDLED;
962 }
963
964 /* end of fast path */
965
966
967 /* Link */
968
969 /*
970  * General service functions
971  */
972
973 int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
974 {
975         u32 lock_status;
976         u32 resource_bit = (1 << resource);
977         int func = BP_FUNC(bp);
978         u32 hw_lock_control_reg;
979         int cnt;
980
981         /* Validating that the resource is within range */
982         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
983                 DP(NETIF_MSG_HW,
984                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
985                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
986                 return -EINVAL;
987         }
988
989         if (func <= 5) {
990                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
991         } else {
992                 hw_lock_control_reg =
993                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
994         }
995
996         /* Validating that the resource is not already taken */
997         lock_status = REG_RD(bp, hw_lock_control_reg);
998         if (lock_status & resource_bit) {
999                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1000                    lock_status, resource_bit);
1001                 return -EEXIST;
1002         }
1003
1004         /* Try for 5 second every 5ms */
1005         for (cnt = 0; cnt < 1000; cnt++) {
1006                 /* Try to acquire the lock */
1007                 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1008                 lock_status = REG_RD(bp, hw_lock_control_reg);
1009                 if (lock_status & resource_bit)
1010                         return 0;
1011
1012                 msleep(5);
1013         }
1014         DP(NETIF_MSG_HW, "Timeout\n");
1015         return -EAGAIN;
1016 }
1017
1018 int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1019 {
1020         u32 lock_status;
1021         u32 resource_bit = (1 << resource);
1022         int func = BP_FUNC(bp);
1023         u32 hw_lock_control_reg;
1024
1025         DP(NETIF_MSG_HW, "Releasing a lock on resource %d\n", resource);
1026
1027         /* Validating that the resource is within range */
1028         if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1029                 DP(NETIF_MSG_HW,
1030                    "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1031                    resource, HW_LOCK_MAX_RESOURCE_VALUE);
1032                 return -EINVAL;
1033         }
1034
1035         if (func <= 5) {
1036                 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1037         } else {
1038                 hw_lock_control_reg =
1039                                 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1040         }
1041
1042         /* Validating that the resource is currently taken */
1043         lock_status = REG_RD(bp, hw_lock_control_reg);
1044         if (!(lock_status & resource_bit)) {
1045                 DP(NETIF_MSG_HW, "lock_status 0x%x  resource_bit 0x%x\n",
1046                    lock_status, resource_bit);
1047                 return -EFAULT;
1048         }
1049
1050         REG_WR(bp, hw_lock_control_reg, resource_bit);
1051         return 0;
1052 }
1053
1054
1055 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1056 {
1057         /* The GPIO should be swapped if swap register is set and active */
1058         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1059                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1060         int gpio_shift = gpio_num +
1061                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1062         u32 gpio_mask = (1 << gpio_shift);
1063         u32 gpio_reg;
1064         int value;
1065
1066         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1067                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1068                 return -EINVAL;
1069         }
1070
1071         /* read GPIO value */
1072         gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1073
1074         /* get the requested pin value */
1075         if ((gpio_reg & gpio_mask) == gpio_mask)
1076                 value = 1;
1077         else
1078                 value = 0;
1079
1080         DP(NETIF_MSG_LINK, "pin %d  value 0x%x\n", gpio_num, value);
1081
1082         return value;
1083 }
1084
1085 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1086 {
1087         /* The GPIO should be swapped if swap register is set and active */
1088         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1089                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1090         int gpio_shift = gpio_num +
1091                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1092         u32 gpio_mask = (1 << gpio_shift);
1093         u32 gpio_reg;
1094
1095         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1096                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1097                 return -EINVAL;
1098         }
1099
1100         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1101         /* read GPIO and mask except the float bits */
1102         gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1103
1104         switch (mode) {
1105         case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1106                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1107                    gpio_num, gpio_shift);
1108                 /* clear FLOAT and set CLR */
1109                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1110                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1111                 break;
1112
1113         case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1114                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1115                    gpio_num, gpio_shift);
1116                 /* clear FLOAT and set SET */
1117                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1118                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1119                 break;
1120
1121         case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1122                 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1123                    gpio_num, gpio_shift);
1124                 /* set FLOAT */
1125                 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1126                 break;
1127
1128         default:
1129                 break;
1130         }
1131
1132         REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1133         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1134
1135         return 0;
1136 }
1137
1138 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1139 {
1140         /* The GPIO should be swapped if swap register is set and active */
1141         int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1142                          REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1143         int gpio_shift = gpio_num +
1144                         (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1145         u32 gpio_mask = (1 << gpio_shift);
1146         u32 gpio_reg;
1147
1148         if (gpio_num > MISC_REGISTERS_GPIO_3) {
1149                 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1150                 return -EINVAL;
1151         }
1152
1153         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1154         /* read GPIO int */
1155         gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1156
1157         switch (mode) {
1158         case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1159                 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1160                                    "output low\n", gpio_num, gpio_shift);
1161                 /* clear SET and set CLR */
1162                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1163                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1164                 break;
1165
1166         case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1167                 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1168                                    "output high\n", gpio_num, gpio_shift);
1169                 /* clear CLR and set SET */
1170                 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1171                 gpio_reg |=  (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1172                 break;
1173
1174         default:
1175                 break;
1176         }
1177
1178         REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
1179         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1180
1181         return 0;
1182 }
1183
1184 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
1185 {
1186         u32 spio_mask = (1 << spio_num);
1187         u32 spio_reg;
1188
1189         if ((spio_num < MISC_REGISTERS_SPIO_4) ||
1190             (spio_num > MISC_REGISTERS_SPIO_7)) {
1191                 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
1192                 return -EINVAL;
1193         }
1194
1195         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1196         /* read SPIO and mask except the float bits */
1197         spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
1198
1199         switch (mode) {
1200         case MISC_REGISTERS_SPIO_OUTPUT_LOW:
1201                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
1202                 /* clear FLOAT and set CLR */
1203                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1204                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
1205                 break;
1206
1207         case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
1208                 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
1209                 /* clear FLOAT and set SET */
1210                 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1211                 spio_reg |=  (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
1212                 break;
1213
1214         case MISC_REGISTERS_SPIO_INPUT_HI_Z:
1215                 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
1216                 /* set FLOAT */
1217                 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
1218                 break;
1219
1220         default:
1221                 break;
1222         }
1223
1224         REG_WR(bp, MISC_REG_SPIO, spio_reg);
1225         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
1226
1227         return 0;
1228 }
1229
1230 int bnx2x_get_link_cfg_idx(struct bnx2x *bp)
1231 {
1232         u32 sel_phy_idx = 0;
1233         if (bp->link_vars.link_up) {
1234                 sel_phy_idx = EXT_PHY1;
1235                 /* In case link is SERDES, check if the EXT_PHY2 is the one */
1236                 if ((bp->link_vars.link_status & LINK_STATUS_SERDES_LINK) &&
1237                     (bp->link_params.phy[EXT_PHY2].supported & SUPPORTED_FIBRE))
1238                         sel_phy_idx = EXT_PHY2;
1239         } else {
1240
1241                 switch (bnx2x_phy_selection(&bp->link_params)) {
1242                 case PORT_HW_CFG_PHY_SELECTION_HARDWARE_DEFAULT:
1243                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY:
1244                 case PORT_HW_CFG_PHY_SELECTION_FIRST_PHY_PRIORITY:
1245                        sel_phy_idx = EXT_PHY1;
1246                        break;
1247                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY:
1248                 case PORT_HW_CFG_PHY_SELECTION_SECOND_PHY_PRIORITY:
1249                        sel_phy_idx = EXT_PHY2;
1250                        break;
1251                 }
1252         }
1253         /*
1254         * The selected actived PHY is always after swapping (in case PHY
1255         * swapping is enabled). So when swapping is enabled, we need to reverse
1256         * the configuration
1257         */
1258
1259         if (bp->link_params.multi_phy_config &
1260             PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
1261                 if (sel_phy_idx == EXT_PHY1)
1262                         sel_phy_idx = EXT_PHY2;
1263                 else if (sel_phy_idx == EXT_PHY2)
1264                         sel_phy_idx = EXT_PHY1;
1265         }
1266         return LINK_CONFIG_IDX(sel_phy_idx);
1267 }
1268
1269 void bnx2x_calc_fc_adv(struct bnx2x *bp)
1270 {
1271         u8 cfg_idx = bnx2x_get_link_cfg_idx(bp);
1272         switch (bp->link_vars.ieee_fc &
1273                 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
1274         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
1275                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1276                                           ADVERTISED_Pause);
1277                 break;
1278
1279         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
1280                 bp->port.advertising[cfg_idx] |= (ADVERTISED_Asym_Pause |
1281                                          ADVERTISED_Pause);
1282                 break;
1283
1284         case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
1285                 bp->port.advertising[cfg_idx] |= ADVERTISED_Asym_Pause;
1286                 break;
1287
1288         default:
1289                 bp->port.advertising[cfg_idx] &= ~(ADVERTISED_Asym_Pause |
1290                                           ADVERTISED_Pause);
1291                 break;
1292         }
1293 }
1294
1295
1296 u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
1297 {
1298         if (!BP_NOMCP(bp)) {
1299                 u8 rc;
1300                 int cfx_idx = bnx2x_get_link_cfg_idx(bp);
1301                 u16 req_line_speed = bp->link_params.req_line_speed[cfx_idx];
1302                 /* Initialize link parameters structure variables */
1303                 /* It is recommended to turn off RX FC for jumbo frames
1304                    for better performance */
1305                 if (bp->dev->mtu > 5000)
1306                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
1307                 else
1308                         bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
1309
1310                 bnx2x_acquire_phy_lock(bp);
1311
1312                 if (load_mode == LOAD_DIAG) {
1313                         bp->link_params.loopback_mode = LOOPBACK_XGXS;
1314                         bp->link_params.req_line_speed[cfx_idx] = SPEED_10000;
1315                 }
1316
1317                 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1318
1319                 bnx2x_release_phy_lock(bp);
1320
1321                 bnx2x_calc_fc_adv(bp);
1322
1323                 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
1324                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1325                         bnx2x_link_report(bp);
1326                 }
1327                 bp->link_params.req_line_speed[cfx_idx] = req_line_speed;
1328                 return rc;
1329         }
1330         BNX2X_ERR("Bootcode is missing - can not initialize link\n");
1331         return -EINVAL;
1332 }
1333
1334 void bnx2x_link_set(struct bnx2x *bp)
1335 {
1336         if (!BP_NOMCP(bp)) {
1337                 bnx2x_acquire_phy_lock(bp);
1338                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1339                 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
1340                 bnx2x_release_phy_lock(bp);
1341
1342                 bnx2x_calc_fc_adv(bp);
1343         } else
1344                 BNX2X_ERR("Bootcode is missing - can not set link\n");
1345 }
1346
1347 static void bnx2x__link_reset(struct bnx2x *bp)
1348 {
1349         if (!BP_NOMCP(bp)) {
1350                 bnx2x_acquire_phy_lock(bp);
1351                 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
1352                 bnx2x_release_phy_lock(bp);
1353         } else
1354                 BNX2X_ERR("Bootcode is missing - can not reset link\n");
1355 }
1356
1357 u8 bnx2x_link_test(struct bnx2x *bp, u8 is_serdes)
1358 {
1359         u8 rc = 0;
1360
1361         if (!BP_NOMCP(bp)) {
1362                 bnx2x_acquire_phy_lock(bp);
1363                 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars,
1364                                      is_serdes);
1365                 bnx2x_release_phy_lock(bp);
1366         } else
1367                 BNX2X_ERR("Bootcode is missing - can not test link\n");
1368
1369         return rc;
1370 }
1371
1372 static void bnx2x_init_port_minmax(struct bnx2x *bp)
1373 {
1374         u32 r_param = bp->link_vars.line_speed / 8;
1375         u32 fair_periodic_timeout_usec;
1376         u32 t_fair;
1377
1378         memset(&(bp->cmng.rs_vars), 0,
1379                sizeof(struct rate_shaping_vars_per_port));
1380         memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
1381
1382         /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
1383         bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
1384
1385         /* this is the threshold below which no timer arming will occur
1386            1.25 coefficient is for the threshold to be a little bigger
1387            than the real time, to compensate for timer in-accuracy */
1388         bp->cmng.rs_vars.rs_threshold =
1389                                 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
1390
1391         /* resolution of fairness timer */
1392         fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
1393         /* for 10G it is 1000usec. for 1G it is 10000usec. */
1394         t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
1395
1396         /* this is the threshold below which we won't arm the timer anymore */
1397         bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
1398
1399         /* we multiply by 1e3/8 to get bytes/msec.
1400            We don't want the credits to pass a credit
1401            of the t_fair*FAIR_MEM (algorithm resolution) */
1402         bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
1403         /* since each tick is 4 usec */
1404         bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
1405 }
1406
1407 /* Calculates the sum of vn_min_rates.
1408    It's needed for further normalizing of the min_rates.
1409    Returns:
1410      sum of vn_min_rates.
1411        or
1412      0 - if all the min_rates are 0.
1413      In the later case fainess algorithm should be deactivated.
1414      If not all min_rates are zero then those that are zeroes will be set to 1.
1415  */
1416 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
1417 {
1418         int all_zero = 1;
1419         int port = BP_PORT(bp);
1420         int vn;
1421
1422         bp->vn_weight_sum = 0;
1423         for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1424                 int func = 2*vn + port;
1425                 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1426                 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1427                                    FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1428
1429                 /* Skip hidden vns */
1430                 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
1431                         continue;
1432
1433                 /* If min rate is zero - set it to 1 */
1434                 if (!vn_min_rate)
1435                         vn_min_rate = DEF_MIN_RATE;
1436                 else
1437                         all_zero = 0;
1438
1439                 bp->vn_weight_sum += vn_min_rate;
1440         }
1441
1442         /* ... only if all min rates are zeros - disable fairness */
1443         if (all_zero) {
1444                 bp->cmng.flags.cmng_enables &=
1445                                         ~CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1446                 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
1447                    "  fairness will be disabled\n");
1448         } else
1449                 bp->cmng.flags.cmng_enables |=
1450                                         CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
1451 }
1452
1453 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
1454 {
1455         struct rate_shaping_vars_per_vn m_rs_vn;
1456         struct fairness_vars_per_vn m_fair_vn;
1457         u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
1458         u16 vn_min_rate, vn_max_rate;
1459         int i;
1460
1461         /* If function is hidden - set min and max to zeroes */
1462         if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
1463                 vn_min_rate = 0;
1464                 vn_max_rate = 0;
1465
1466         } else {
1467                 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
1468                                 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
1469                 /* If min rate is zero - set it to 1 */
1470                 if (!vn_min_rate)
1471                         vn_min_rate = DEF_MIN_RATE;
1472                 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
1473                                 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
1474         }
1475         DP(NETIF_MSG_IFUP,
1476            "func %d: vn_min_rate %d  vn_max_rate %d  vn_weight_sum %d\n",
1477            func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
1478
1479         memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
1480         memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
1481
1482         /* global vn counter - maximal Mbps for this vn */
1483         m_rs_vn.vn_counter.rate = vn_max_rate;
1484
1485         /* quota - number of bytes transmitted in this period */
1486         m_rs_vn.vn_counter.quota =
1487                                 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
1488
1489         if (bp->vn_weight_sum) {
1490                 /* credit for each period of the fairness algorithm:
1491                    number of bytes in T_FAIR (the vn share the port rate).
1492                    vn_weight_sum should not be larger than 10000, thus
1493                    T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
1494                    than zero */
1495                 m_fair_vn.vn_credit_delta =
1496                         max_t(u32, (vn_min_rate * (T_FAIR_COEF /
1497                                                    (8 * bp->vn_weight_sum))),
1498                               (bp->cmng.fair_vars.fair_threshold * 2));
1499                 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta %d\n",
1500                    m_fair_vn.vn_credit_delta);
1501         }
1502
1503         /* Store it to internal memory */
1504         for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
1505                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1506                        XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
1507                        ((u32 *)(&m_rs_vn))[i]);
1508
1509         for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
1510                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1511                        XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
1512                        ((u32 *)(&m_fair_vn))[i]);
1513 }
1514
1515
1516 /* This function is called upon link interrupt */
1517 static void bnx2x_link_attn(struct bnx2x *bp)
1518 {
1519         u32 prev_link_status = bp->link_vars.link_status;
1520         /* Make sure that we are synced with the current statistics */
1521         bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1522
1523         bnx2x_link_update(&bp->link_params, &bp->link_vars);
1524
1525         if (bp->link_vars.link_up) {
1526
1527                 /* dropless flow control */
1528                 if (CHIP_IS_E1H(bp) && bp->dropless_fc) {
1529                         int port = BP_PORT(bp);
1530                         u32 pause_enabled = 0;
1531
1532                         if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
1533                                 pause_enabled = 1;
1534
1535                         REG_WR(bp, BAR_USTRORM_INTMEM +
1536                                USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
1537                                pause_enabled);
1538                 }
1539
1540                 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
1541                         struct host_port_stats *pstats;
1542
1543                         pstats = bnx2x_sp(bp, port_stats);
1544                         /* reset old bmac stats */
1545                         memset(&(pstats->mac_stx[0]), 0,
1546                                sizeof(struct mac_stx));
1547                 }
1548                 if (bp->state == BNX2X_STATE_OPEN)
1549                         bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1550         }
1551
1552         /* indicate link status only if link status actually changed */
1553         if (prev_link_status != bp->link_vars.link_status)
1554                 bnx2x_link_report(bp);
1555
1556         if (IS_E1HMF(bp)) {
1557                 int port = BP_PORT(bp);
1558                 int func;
1559                 int vn;
1560
1561                 /* Set the attention towards other drivers on the same port */
1562                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1563                         if (vn == BP_E1HVN(bp))
1564                                 continue;
1565
1566                         func = ((vn << 1) | port);
1567                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1568                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1569                 }
1570
1571                 if (bp->link_vars.link_up) {
1572                         int i;
1573
1574                         /* Init rate shaping and fairness contexts */
1575                         bnx2x_init_port_minmax(bp);
1576
1577                         for (vn = VN_0; vn < E1HVN_MAX; vn++)
1578                                 bnx2x_init_vn_minmax(bp, 2*vn + port);
1579
1580                         /* Store it to internal memory */
1581                         for (i = 0;
1582                              i < sizeof(struct cmng_struct_per_port) / 4; i++)
1583                                 REG_WR(bp, BAR_XSTRORM_INTMEM +
1584                                   XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1585                                        ((u32 *)(&bp->cmng))[i]);
1586                 }
1587         }
1588 }
1589
1590 void bnx2x__link_status_update(struct bnx2x *bp)
1591 {
1592         if ((bp->state != BNX2X_STATE_OPEN) || (bp->flags & MF_FUNC_DIS))
1593                 return;
1594
1595         bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
1596
1597         if (bp->link_vars.link_up)
1598                 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
1599         else
1600                 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
1601
1602         bnx2x_calc_vn_weight_sum(bp);
1603
1604         /* indicate link status */
1605         bnx2x_link_report(bp);
1606 }
1607
1608 static void bnx2x_pmf_update(struct bnx2x *bp)
1609 {
1610         int port = BP_PORT(bp);
1611         u32 val;
1612
1613         bp->port.pmf = 1;
1614         DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
1615
1616         /* enable nig attention */
1617         val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
1618         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
1619         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
1620
1621         bnx2x_stats_handle(bp, STATS_EVENT_PMF);
1622 }
1623
1624 /* end of Link */
1625
1626 /* slow path */
1627
1628 /*
1629  * General service functions
1630  */
1631
1632 /* send the MCP a request, block until there is a reply */
1633 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command, u32 param)
1634 {
1635         int func = BP_FUNC(bp);
1636         u32 seq = ++bp->fw_seq;
1637         u32 rc = 0;
1638         u32 cnt = 1;
1639         u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
1640
1641         mutex_lock(&bp->fw_mb_mutex);
1642         SHMEM_WR(bp, func_mb[func].drv_mb_param, param);
1643         SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
1644         DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
1645
1646         do {
1647                 /* let the FW do it's magic ... */
1648                 msleep(delay);
1649
1650                 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
1651
1652                 /* Give the FW up to 5 second (500*10ms) */
1653         } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 500));
1654
1655         DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
1656            cnt*delay, rc, seq);
1657
1658         /* is this a reply to our command? */
1659         if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK))
1660                 rc &= FW_MSG_CODE_MASK;
1661         else {
1662                 /* FW BUG! */
1663                 BNX2X_ERR("FW failed to respond!\n");
1664                 bnx2x_fw_dump(bp);
1665                 rc = 0;
1666         }
1667         mutex_unlock(&bp->fw_mb_mutex);
1668
1669         return rc;
1670 }
1671
1672 static void bnx2x_e1h_disable(struct bnx2x *bp)
1673 {
1674         int port = BP_PORT(bp);
1675
1676         netif_tx_disable(bp->dev);
1677
1678         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
1679
1680         netif_carrier_off(bp->dev);
1681 }
1682
1683 static void bnx2x_e1h_enable(struct bnx2x *bp)
1684 {
1685         int port = BP_PORT(bp);
1686
1687         REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
1688
1689         /* Tx queue should be only reenabled */
1690         netif_tx_wake_all_queues(bp->dev);
1691
1692         /*
1693          * Should not call netif_carrier_on since it will be called if the link
1694          * is up when checking for link state
1695          */
1696 }
1697
1698 static void bnx2x_update_min_max(struct bnx2x *bp)
1699 {
1700         int port = BP_PORT(bp);
1701         int vn, i;
1702
1703         /* Init rate shaping and fairness contexts */
1704         bnx2x_init_port_minmax(bp);
1705
1706         bnx2x_calc_vn_weight_sum(bp);
1707
1708         for (vn = VN_0; vn < E1HVN_MAX; vn++)
1709                 bnx2x_init_vn_minmax(bp, 2*vn + port);
1710
1711         if (bp->port.pmf) {
1712                 int func;
1713
1714                 /* Set the attention towards other drivers on the same port */
1715                 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
1716                         if (vn == BP_E1HVN(bp))
1717                                 continue;
1718
1719                         func = ((vn << 1) | port);
1720                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
1721                                (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
1722                 }
1723
1724                 /* Store it to internal memory */
1725                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
1726                         REG_WR(bp, BAR_XSTRORM_INTMEM +
1727                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
1728                                ((u32 *)(&bp->cmng))[i]);
1729         }
1730 }
1731
1732 static void bnx2x_dcc_event(struct bnx2x *bp, u32 dcc_event)
1733 {
1734         DP(BNX2X_MSG_MCP, "dcc_event 0x%x\n", dcc_event);
1735
1736         if (dcc_event & DRV_STATUS_DCC_DISABLE_ENABLE_PF) {
1737
1738                 /*
1739                  * This is the only place besides the function initialization
1740                  * where the bp->flags can change so it is done without any
1741                  * locks
1742                  */
1743                 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
1744                         DP(NETIF_MSG_IFDOWN, "mf_cfg function disabled\n");
1745                         bp->flags |= MF_FUNC_DIS;
1746
1747                         bnx2x_e1h_disable(bp);
1748                 } else {
1749                         DP(NETIF_MSG_IFUP, "mf_cfg function enabled\n");
1750                         bp->flags &= ~MF_FUNC_DIS;
1751
1752                         bnx2x_e1h_enable(bp);
1753                 }
1754                 dcc_event &= ~DRV_STATUS_DCC_DISABLE_ENABLE_PF;
1755         }
1756         if (dcc_event & DRV_STATUS_DCC_BANDWIDTH_ALLOCATION) {
1757
1758                 bnx2x_update_min_max(bp);
1759                 dcc_event &= ~DRV_STATUS_DCC_BANDWIDTH_ALLOCATION;
1760         }
1761
1762         /* Report results to MCP */
1763         if (dcc_event)
1764                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_FAILURE, 0);
1765         else
1766                 bnx2x_fw_command(bp, DRV_MSG_CODE_DCC_OK, 0);
1767 }
1768
1769 /* must be called under the spq lock */
1770 static inline struct eth_spe *bnx2x_sp_get_next(struct bnx2x *bp)
1771 {
1772         struct eth_spe *next_spe = bp->spq_prod_bd;
1773
1774         if (bp->spq_prod_bd == bp->spq_last_bd) {
1775                 bp->spq_prod_bd = bp->spq;
1776                 bp->spq_prod_idx = 0;
1777                 DP(NETIF_MSG_TIMER, "end of spq\n");
1778         } else {
1779                 bp->spq_prod_bd++;
1780                 bp->spq_prod_idx++;
1781         }
1782         return next_spe;
1783 }
1784
1785 /* must be called under the spq lock */
1786 static inline void bnx2x_sp_prod_update(struct bnx2x *bp)
1787 {
1788         int func = BP_FUNC(bp);
1789
1790         /* Make sure that BD data is updated before writing the producer */
1791         wmb();
1792
1793         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
1794                bp->spq_prod_idx);
1795         mmiowb();
1796 }
1797
1798 /* the slow path queue is odd since completions arrive on the fastpath ring */
1799 int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
1800                          u32 data_hi, u32 data_lo, int common)
1801 {
1802         struct eth_spe *spe;
1803
1804 #ifdef BNX2X_STOP_ON_ERROR
1805         if (unlikely(bp->panic))
1806                 return -EIO;
1807 #endif
1808
1809         spin_lock_bh(&bp->spq_lock);
1810
1811         if (!bp->spq_left) {
1812                 BNX2X_ERR("BUG! SPQ ring full!\n");
1813                 spin_unlock_bh(&bp->spq_lock);
1814                 bnx2x_panic();
1815                 return -EBUSY;
1816         }
1817
1818         spe = bnx2x_sp_get_next(bp);
1819
1820         /* CID needs port number to be encoded int it */
1821         spe->hdr.conn_and_cmd_data =
1822                         cpu_to_le32((command << SPE_HDR_CMD_ID_SHIFT) |
1823                                     HW_CID(bp, cid));
1824         spe->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
1825         if (common)
1826                 spe->hdr.type |=
1827                         cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
1828
1829         spe->data.mac_config_addr.hi = cpu_to_le32(data_hi);
1830         spe->data.mac_config_addr.lo = cpu_to_le32(data_lo);
1831
1832         bp->spq_left--;
1833
1834         DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
1835            "SPQE[%x] (%x:%x)  command %d  hw_cid %x  data (%x:%x)  left %x\n",
1836            bp->spq_prod_idx, (u32)U64_HI(bp->spq_mapping),
1837            (u32)(U64_LO(bp->spq_mapping) +
1838            (void *)bp->spq_prod_bd - (void *)bp->spq), command,
1839            HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
1840
1841         bnx2x_sp_prod_update(bp);
1842         spin_unlock_bh(&bp->spq_lock);
1843         return 0;
1844 }
1845
1846 /* acquire split MCP access lock register */
1847 static int bnx2x_acquire_alr(struct bnx2x *bp)
1848 {
1849         u32 j, val;
1850         int rc = 0;
1851
1852         might_sleep();
1853         for (j = 0; j < 1000; j++) {
1854                 val = (1UL << 31);
1855                 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
1856                 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
1857                 if (val & (1L << 31))
1858                         break;
1859
1860                 msleep(5);
1861         }
1862         if (!(val & (1L << 31))) {
1863                 BNX2X_ERR("Cannot acquire MCP access lock register\n");
1864                 rc = -EBUSY;
1865         }
1866
1867         return rc;
1868 }
1869
1870 /* release split MCP access lock register */
1871 static void bnx2x_release_alr(struct bnx2x *bp)
1872 {
1873         REG_WR(bp, GRCBASE_MCP + 0x9c, 0);
1874 }
1875
1876 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
1877 {
1878         struct host_def_status_block *def_sb = bp->def_status_blk;
1879         u16 rc = 0;
1880
1881         barrier(); /* status block is written to by the chip */
1882         if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
1883                 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
1884                 rc |= 1;
1885         }
1886         if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
1887                 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
1888                 rc |= 2;
1889         }
1890         if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
1891                 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
1892                 rc |= 4;
1893         }
1894         if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
1895                 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
1896                 rc |= 8;
1897         }
1898         if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
1899                 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
1900                 rc |= 16;
1901         }
1902         return rc;
1903 }
1904
1905 /*
1906  * slow path service functions
1907  */
1908
1909 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
1910 {
1911         int port = BP_PORT(bp);
1912         u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
1913                        COMMAND_REG_ATTN_BITS_SET);
1914         u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
1915                               MISC_REG_AEU_MASK_ATTN_FUNC_0;
1916         u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
1917                                        NIG_REG_MASK_INTERRUPT_PORT0;
1918         u32 aeu_mask;
1919         u32 nig_mask = 0;
1920
1921         if (bp->attn_state & asserted)
1922                 BNX2X_ERR("IGU ERROR\n");
1923
1924         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1925         aeu_mask = REG_RD(bp, aeu_addr);
1926
1927         DP(NETIF_MSG_HW, "aeu_mask %x  newly asserted %x\n",
1928            aeu_mask, asserted);
1929         aeu_mask &= ~(asserted & 0x3ff);
1930         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
1931
1932         REG_WR(bp, aeu_addr, aeu_mask);
1933         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
1934
1935         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
1936         bp->attn_state |= asserted;
1937         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
1938
1939         if (asserted & ATTN_HARD_WIRED_MASK) {
1940                 if (asserted & ATTN_NIG_FOR_FUNC) {
1941
1942                         bnx2x_acquire_phy_lock(bp);
1943
1944                         /* save nig interrupt mask */
1945                         nig_mask = REG_RD(bp, nig_int_mask_addr);
1946                         REG_WR(bp, nig_int_mask_addr, 0);
1947
1948                         bnx2x_link_attn(bp);
1949
1950                         /* handle unicore attn? */
1951                 }
1952                 if (asserted & ATTN_SW_TIMER_4_FUNC)
1953                         DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
1954
1955                 if (asserted & GPIO_2_FUNC)
1956                         DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
1957
1958                 if (asserted & GPIO_3_FUNC)
1959                         DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
1960
1961                 if (asserted & GPIO_4_FUNC)
1962                         DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
1963
1964                 if (port == 0) {
1965                         if (asserted & ATTN_GENERAL_ATTN_1) {
1966                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
1967                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
1968                         }
1969                         if (asserted & ATTN_GENERAL_ATTN_2) {
1970                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
1971                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
1972                         }
1973                         if (asserted & ATTN_GENERAL_ATTN_3) {
1974                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
1975                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
1976                         }
1977                 } else {
1978                         if (asserted & ATTN_GENERAL_ATTN_4) {
1979                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
1980                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
1981                         }
1982                         if (asserted & ATTN_GENERAL_ATTN_5) {
1983                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
1984                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
1985                         }
1986                         if (asserted & ATTN_GENERAL_ATTN_6) {
1987                                 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
1988                                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
1989                         }
1990                 }
1991
1992         } /* if hardwired */
1993
1994         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
1995            asserted, hc_addr);
1996         REG_WR(bp, hc_addr, asserted);
1997
1998         /* now set back the mask */
1999         if (asserted & ATTN_NIG_FOR_FUNC) {
2000                 REG_WR(bp, nig_int_mask_addr, nig_mask);
2001                 bnx2x_release_phy_lock(bp);
2002         }
2003 }
2004
2005 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2006 {
2007         int port = BP_PORT(bp);
2008         u32 ext_phy_config;
2009         /* mark the failure */
2010         ext_phy_config =
2011                 SHMEM_RD(bp,
2012                          dev_info.port_hw_config[port].external_phy_config);
2013
2014         ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2015         ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2016         SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2017                  ext_phy_config);
2018
2019         /* log the failure */
2020         netdev_err(bp->dev, "Fan Failure on Network Controller has caused"
2021                " the driver to shutdown the card to prevent permanent"
2022                " damage.  Please contact OEM Support for assistance\n");
2023 }
2024
2025 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2026 {
2027         int port = BP_PORT(bp);
2028         int reg_offset;
2029         u32 val;
2030
2031         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2032                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2033
2034         if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2035
2036                 val = REG_RD(bp, reg_offset);
2037                 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2038                 REG_WR(bp, reg_offset, val);
2039
2040                 BNX2X_ERR("SPIO5 hw attention\n");
2041
2042                 /* Fan failure attention */
2043                 bnx2x_hw_reset_phy(&bp->link_params);
2044                 bnx2x_fan_failure(bp);
2045         }
2046
2047         if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2048                     AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2049                 bnx2x_acquire_phy_lock(bp);
2050                 bnx2x_handle_module_detect_int(&bp->link_params);
2051                 bnx2x_release_phy_lock(bp);
2052         }
2053
2054         if (attn & HW_INTERRUT_ASSERT_SET_0) {
2055
2056                 val = REG_RD(bp, reg_offset);
2057                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2058                 REG_WR(bp, reg_offset, val);
2059
2060                 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2061                           (u32)(attn & HW_INTERRUT_ASSERT_SET_0));
2062                 bnx2x_panic();
2063         }
2064 }
2065
2066 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2067 {
2068         u32 val;
2069
2070         if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2071
2072                 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2073                 BNX2X_ERR("DB hw attention 0x%x\n", val);
2074                 /* DORQ discard attention */
2075                 if (val & 0x2)
2076                         BNX2X_ERR("FATAL error from DORQ\n");
2077         }
2078
2079         if (attn & HW_INTERRUT_ASSERT_SET_1) {
2080
2081                 int port = BP_PORT(bp);
2082                 int reg_offset;
2083
2084                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2085                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2086
2087                 val = REG_RD(bp, reg_offset);
2088                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2089                 REG_WR(bp, reg_offset, val);
2090
2091                 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2092                           (u32)(attn & HW_INTERRUT_ASSERT_SET_1));
2093                 bnx2x_panic();
2094         }
2095 }
2096
2097 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2098 {
2099         u32 val;
2100
2101         if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2102
2103                 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2104                 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2105                 /* CFC error attention */
2106                 if (val & 0x2)
2107                         BNX2X_ERR("FATAL error from CFC\n");
2108         }
2109
2110         if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2111
2112                 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2113                 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2114                 /* RQ_USDMDP_FIFO_OVERFLOW */
2115                 if (val & 0x18000)
2116                         BNX2X_ERR("FATAL error from PXP\n");
2117         }
2118
2119         if (attn & HW_INTERRUT_ASSERT_SET_2) {
2120
2121                 int port = BP_PORT(bp);
2122                 int reg_offset;
2123
2124                 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2125                                      MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2126
2127                 val = REG_RD(bp, reg_offset);
2128                 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2129                 REG_WR(bp, reg_offset, val);
2130
2131                 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2132                           (u32)(attn & HW_INTERRUT_ASSERT_SET_2));
2133                 bnx2x_panic();
2134         }
2135 }
2136
2137 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2138 {
2139         u32 val;
2140
2141         if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2142
2143                 if (attn & BNX2X_PMF_LINK_ASSERT) {
2144                         int func = BP_FUNC(bp);
2145
2146                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2147                         bp->mf_config = SHMEM_RD(bp,
2148                                            mf_cfg.func_mf_config[func].config);
2149                         val = SHMEM_RD(bp, func_mb[func].drv_status);
2150                         if (val & DRV_STATUS_DCC_EVENT_MASK)
2151                                 bnx2x_dcc_event(bp,
2152                                             (val & DRV_STATUS_DCC_EVENT_MASK));
2153                         bnx2x__link_status_update(bp);
2154                         if ((bp->port.pmf == 0) && (val & DRV_STATUS_PMF))
2155                                 bnx2x_pmf_update(bp);
2156
2157                 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2158
2159                         BNX2X_ERR("MC assert!\n");
2160                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2161                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2162                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2163                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2164                         bnx2x_panic();
2165
2166                 } else if (attn & BNX2X_MCP_ASSERT) {
2167
2168                         BNX2X_ERR("MCP assert!\n");
2169                         REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2170                         bnx2x_fw_dump(bp);
2171
2172                 } else
2173                         BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2174         }
2175
2176         if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2177                 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2178                 if (attn & BNX2X_GRC_TIMEOUT) {
2179                         val = CHIP_IS_E1H(bp) ?
2180                                 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2181                         BNX2X_ERR("GRC time-out 0x%08x\n", val);
2182                 }
2183                 if (attn & BNX2X_GRC_RSV) {
2184                         val = CHIP_IS_E1H(bp) ?
2185                                 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2186                         BNX2X_ERR("GRC reserved 0x%08x\n", val);
2187                 }
2188                 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2189         }
2190 }
2191
2192 #define BNX2X_MISC_GEN_REG      MISC_REG_GENERIC_POR_1
2193 #define LOAD_COUNTER_BITS       16 /* Number of bits for load counter */
2194 #define LOAD_COUNTER_MASK       (((u32)0x1 << LOAD_COUNTER_BITS) - 1)
2195 #define RESET_DONE_FLAG_MASK    (~LOAD_COUNTER_MASK)
2196 #define RESET_DONE_FLAG_SHIFT   LOAD_COUNTER_BITS
2197 #define CHIP_PARITY_SUPPORTED(bp)   (CHIP_IS_E1(bp) || CHIP_IS_E1H(bp))
2198 /*
2199  * should be run under rtnl lock
2200  */
2201 static inline void bnx2x_set_reset_done(struct bnx2x *bp)
2202 {
2203         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2204         val &= ~(1 << RESET_DONE_FLAG_SHIFT);
2205         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2206         barrier();
2207         mmiowb();
2208 }
2209
2210 /*
2211  * should be run under rtnl lock
2212  */
2213 static inline void bnx2x_set_reset_in_progress(struct bnx2x *bp)
2214 {
2215         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2216         val |= (1 << 16);
2217         REG_WR(bp, BNX2X_MISC_GEN_REG, val);
2218         barrier();
2219         mmiowb();
2220 }
2221
2222 /*
2223  * should be run under rtnl lock
2224  */
2225 bool bnx2x_reset_is_done(struct bnx2x *bp)
2226 {
2227         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2228         DP(NETIF_MSG_HW, "GEN_REG_VAL=0x%08x\n", val);
2229         return (val & RESET_DONE_FLAG_MASK) ? false : true;
2230 }
2231
2232 /*
2233  * should be run under rtnl lock
2234  */
2235 inline void bnx2x_inc_load_cnt(struct bnx2x *bp)
2236 {
2237         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2238
2239         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2240
2241         val1 = ((val & LOAD_COUNTER_MASK) + 1) & LOAD_COUNTER_MASK;
2242         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2243         barrier();
2244         mmiowb();
2245 }
2246
2247 /*
2248  * should be run under rtnl lock
2249  */
2250 u32 bnx2x_dec_load_cnt(struct bnx2x *bp)
2251 {
2252         u32 val1, val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2253
2254         DP(NETIF_MSG_HW, "Old GEN_REG_VAL=0x%08x\n", val);
2255
2256         val1 = ((val & LOAD_COUNTER_MASK) - 1) & LOAD_COUNTER_MASK;
2257         REG_WR(bp, BNX2X_MISC_GEN_REG, (val & RESET_DONE_FLAG_MASK) | val1);
2258         barrier();
2259         mmiowb();
2260
2261         return val1;
2262 }
2263
2264 /*
2265  * should be run under rtnl lock
2266  */
2267 static inline u32 bnx2x_get_load_cnt(struct bnx2x *bp)
2268 {
2269         return REG_RD(bp, BNX2X_MISC_GEN_REG) & LOAD_COUNTER_MASK;
2270 }
2271
2272 static inline void bnx2x_clear_load_cnt(struct bnx2x *bp)
2273 {
2274         u32 val = REG_RD(bp, BNX2X_MISC_GEN_REG);
2275         REG_WR(bp, BNX2X_MISC_GEN_REG, val & (~LOAD_COUNTER_MASK));
2276 }
2277
2278 static inline void _print_next_block(int idx, const char *blk)
2279 {
2280         if (idx)
2281                 pr_cont(", ");
2282         pr_cont("%s", blk);
2283 }
2284
2285 static inline int bnx2x_print_blocks_with_parity0(u32 sig, int par_num)
2286 {
2287         int i = 0;
2288         u32 cur_bit = 0;
2289         for (i = 0; sig; i++) {
2290                 cur_bit = ((u32)0x1 << i);
2291                 if (sig & cur_bit) {
2292                         switch (cur_bit) {
2293                         case AEU_INPUTS_ATTN_BITS_BRB_PARITY_ERROR:
2294                                 _print_next_block(par_num++, "BRB");
2295                                 break;
2296                         case AEU_INPUTS_ATTN_BITS_PARSER_PARITY_ERROR:
2297                                 _print_next_block(par_num++, "PARSER");
2298                                 break;
2299                         case AEU_INPUTS_ATTN_BITS_TSDM_PARITY_ERROR:
2300                                 _print_next_block(par_num++, "TSDM");
2301                                 break;
2302                         case AEU_INPUTS_ATTN_BITS_SEARCHER_PARITY_ERROR:
2303                                 _print_next_block(par_num++, "SEARCHER");
2304                                 break;
2305                         case AEU_INPUTS_ATTN_BITS_TSEMI_PARITY_ERROR:
2306                                 _print_next_block(par_num++, "TSEMI");
2307                                 break;
2308                         }
2309
2310                         /* Clear the bit */
2311                         sig &= ~cur_bit;
2312                 }
2313         }
2314
2315         return par_num;
2316 }
2317
2318 static inline int bnx2x_print_blocks_with_parity1(u32 sig, int par_num)
2319 {
2320         int i = 0;
2321         u32 cur_bit = 0;
2322         for (i = 0; sig; i++) {
2323                 cur_bit = ((u32)0x1 << i);
2324                 if (sig & cur_bit) {
2325                         switch (cur_bit) {
2326                         case AEU_INPUTS_ATTN_BITS_PBCLIENT_PARITY_ERROR:
2327                                 _print_next_block(par_num++, "PBCLIENT");
2328                                 break;
2329                         case AEU_INPUTS_ATTN_BITS_QM_PARITY_ERROR:
2330                                 _print_next_block(par_num++, "QM");
2331                                 break;
2332                         case AEU_INPUTS_ATTN_BITS_XSDM_PARITY_ERROR:
2333                                 _print_next_block(par_num++, "XSDM");
2334                                 break;
2335                         case AEU_INPUTS_ATTN_BITS_XSEMI_PARITY_ERROR:
2336                                 _print_next_block(par_num++, "XSEMI");
2337                                 break;
2338                         case AEU_INPUTS_ATTN_BITS_DOORBELLQ_PARITY_ERROR:
2339                                 _print_next_block(par_num++, "DOORBELLQ");
2340                                 break;
2341                         case AEU_INPUTS_ATTN_BITS_VAUX_PCI_CORE_PARITY_ERROR:
2342                                 _print_next_block(par_num++, "VAUX PCI CORE");
2343                                 break;
2344                         case AEU_INPUTS_ATTN_BITS_DEBUG_PARITY_ERROR:
2345                                 _print_next_block(par_num++, "DEBUG");
2346                                 break;
2347                         case AEU_INPUTS_ATTN_BITS_USDM_PARITY_ERROR:
2348                                 _print_next_block(par_num++, "USDM");
2349                                 break;
2350                         case AEU_INPUTS_ATTN_BITS_USEMI_PARITY_ERROR:
2351                                 _print_next_block(par_num++, "USEMI");
2352                                 break;
2353                         case AEU_INPUTS_ATTN_BITS_UPB_PARITY_ERROR:
2354                                 _print_next_block(par_num++, "UPB");
2355                                 break;
2356                         case AEU_INPUTS_ATTN_BITS_CSDM_PARITY_ERROR:
2357                                 _print_next_block(par_num++, "CSDM");
2358                                 break;
2359                         }
2360
2361                         /* Clear the bit */
2362                         sig &= ~cur_bit;
2363                 }
2364         }
2365
2366         return par_num;
2367 }
2368
2369 static inline int bnx2x_print_blocks_with_parity2(u32 sig, int par_num)
2370 {
2371         int i = 0;
2372         u32 cur_bit = 0;
2373         for (i = 0; sig; i++) {
2374                 cur_bit = ((u32)0x1 << i);
2375                 if (sig & cur_bit) {
2376                         switch (cur_bit) {
2377                         case AEU_INPUTS_ATTN_BITS_CSEMI_PARITY_ERROR:
2378                                 _print_next_block(par_num++, "CSEMI");
2379                                 break;
2380                         case AEU_INPUTS_ATTN_BITS_PXP_PARITY_ERROR:
2381                                 _print_next_block(par_num++, "PXP");
2382                                 break;
2383                         case AEU_IN_ATTN_BITS_PXPPCICLOCKCLIENT_PARITY_ERROR:
2384                                 _print_next_block(par_num++,
2385                                         "PXPPCICLOCKCLIENT");
2386                                 break;
2387                         case AEU_INPUTS_ATTN_BITS_CFC_PARITY_ERROR:
2388                                 _print_next_block(par_num++, "CFC");
2389                                 break;
2390                         case AEU_INPUTS_ATTN_BITS_CDU_PARITY_ERROR:
2391                                 _print_next_block(par_num++, "CDU");
2392                                 break;
2393                         case AEU_INPUTS_ATTN_BITS_IGU_PARITY_ERROR:
2394                                 _print_next_block(par_num++, "IGU");
2395                                 break;
2396                         case AEU_INPUTS_ATTN_BITS_MISC_PARITY_ERROR:
2397                                 _print_next_block(par_num++, "MISC");
2398                                 break;
2399                         }
2400
2401                         /* Clear the bit */
2402                         sig &= ~cur_bit;
2403                 }
2404         }
2405
2406         return par_num;
2407 }
2408
2409 static inline int bnx2x_print_blocks_with_parity3(u32 sig, int par_num)
2410 {
2411         int i = 0;
2412         u32 cur_bit = 0;
2413         for (i = 0; sig; i++) {
2414                 cur_bit = ((u32)0x1 << i);
2415                 if (sig & cur_bit) {
2416                         switch (cur_bit) {
2417                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_ROM_PARITY:
2418                                 _print_next_block(par_num++, "MCP ROM");
2419                                 break;
2420                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_RX_PARITY:
2421                                 _print_next_block(par_num++, "MCP UMP RX");
2422                                 break;
2423                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_UMP_TX_PARITY:
2424                                 _print_next_block(par_num++, "MCP UMP TX");
2425                                 break;
2426                         case AEU_INPUTS_ATTN_BITS_MCP_LATCHED_SCPAD_PARITY:
2427                                 _print_next_block(par_num++, "MCP SCPAD");
2428                                 break;
2429                         }
2430
2431                         /* Clear the bit */
2432                         sig &= ~cur_bit;
2433                 }
2434         }
2435
2436         return par_num;
2437 }
2438
2439 static inline bool bnx2x_parity_attn(struct bnx2x *bp, u32 sig0, u32 sig1,
2440                                      u32 sig2, u32 sig3)
2441 {
2442         if ((sig0 & HW_PRTY_ASSERT_SET_0) || (sig1 & HW_PRTY_ASSERT_SET_1) ||
2443             (sig2 & HW_PRTY_ASSERT_SET_2) || (sig3 & HW_PRTY_ASSERT_SET_3)) {
2444                 int par_num = 0;
2445                 DP(NETIF_MSG_HW, "Was parity error: HW block parity attention: "
2446                         "[0]:0x%08x [1]:0x%08x "
2447                         "[2]:0x%08x [3]:0x%08x\n",
2448                           sig0 & HW_PRTY_ASSERT_SET_0,
2449                           sig1 & HW_PRTY_ASSERT_SET_1,
2450                           sig2 & HW_PRTY_ASSERT_SET_2,
2451                           sig3 & HW_PRTY_ASSERT_SET_3);
2452                 printk(KERN_ERR"%s: Parity errors detected in blocks: ",
2453                        bp->dev->name);
2454                 par_num = bnx2x_print_blocks_with_parity0(
2455                         sig0 & HW_PRTY_ASSERT_SET_0, par_num);
2456                 par_num = bnx2x_print_blocks_with_parity1(
2457                         sig1 & HW_PRTY_ASSERT_SET_1, par_num);
2458                 par_num = bnx2x_print_blocks_with_parity2(
2459                         sig2 & HW_PRTY_ASSERT_SET_2, par_num);
2460                 par_num = bnx2x_print_blocks_with_parity3(
2461                         sig3 & HW_PRTY_ASSERT_SET_3, par_num);
2462                 printk("\n");
2463                 return true;
2464         } else
2465                 return false;
2466 }
2467
2468 bool bnx2x_chk_parity_attn(struct bnx2x *bp)
2469 {
2470         struct attn_route attn;
2471         int port = BP_PORT(bp);
2472
2473         attn.sig[0] = REG_RD(bp,
2474                 MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 +
2475                              port*4);
2476         attn.sig[1] = REG_RD(bp,
2477                 MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 +
2478                              port*4);
2479         attn.sig[2] = REG_RD(bp,
2480                 MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 +
2481                              port*4);
2482         attn.sig[3] = REG_RD(bp,
2483                 MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 +
2484                              port*4);
2485
2486         return bnx2x_parity_attn(bp, attn.sig[0], attn.sig[1], attn.sig[2],
2487                                         attn.sig[3]);
2488 }
2489
2490 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2491 {
2492         struct attn_route attn, *group_mask;
2493         int port = BP_PORT(bp);
2494         int index;
2495         u32 reg_addr;
2496         u32 val;
2497         u32 aeu_mask;
2498
2499         /* need to take HW lock because MCP or other port might also
2500            try to handle this event */
2501         bnx2x_acquire_alr(bp);
2502
2503         if (bnx2x_chk_parity_attn(bp)) {
2504                 bp->recovery_state = BNX2X_RECOVERY_INIT;
2505                 bnx2x_set_reset_in_progress(bp);
2506                 schedule_delayed_work(&bp->reset_task, 0);
2507                 /* Disable HW interrupts */
2508                 bnx2x_int_disable(bp);
2509                 bnx2x_release_alr(bp);
2510                 /* In case of parity errors don't handle attentions so that
2511                  * other function would "see" parity errors.
2512                  */
2513                 return;
2514         }
2515
2516         attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2517         attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2518         attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2519         attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2520         DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2521            attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2522
2523         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2524                 if (deasserted & (1 << index)) {
2525                         group_mask = &bp->attn_group[index];
2526
2527                         DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2528                            index, group_mask->sig[0], group_mask->sig[1],
2529                            group_mask->sig[2], group_mask->sig[3]);
2530
2531                         bnx2x_attn_int_deasserted3(bp,
2532                                         attn.sig[3] & group_mask->sig[3]);
2533                         bnx2x_attn_int_deasserted1(bp,
2534                                         attn.sig[1] & group_mask->sig[1]);
2535                         bnx2x_attn_int_deasserted2(bp,
2536                                         attn.sig[2] & group_mask->sig[2]);
2537                         bnx2x_attn_int_deasserted0(bp,
2538                                         attn.sig[0] & group_mask->sig[0]);
2539                 }
2540         }
2541
2542         bnx2x_release_alr(bp);
2543
2544         reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2545
2546         val = ~deasserted;
2547         DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2548            val, reg_addr);
2549         REG_WR(bp, reg_addr, val);
2550
2551         if (~bp->attn_state & deasserted)
2552                 BNX2X_ERR("IGU ERROR\n");
2553
2554         reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2555                           MISC_REG_AEU_MASK_ATTN_FUNC_0;
2556
2557         bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2558         aeu_mask = REG_RD(bp, reg_addr);
2559
2560         DP(NETIF_MSG_HW, "aeu_mask %x  newly deasserted %x\n",
2561            aeu_mask, deasserted);
2562         aeu_mask |= (deasserted & 0x3ff);
2563         DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2564
2565         REG_WR(bp, reg_addr, aeu_mask);
2566         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2567
2568         DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2569         bp->attn_state &= ~deasserted;
2570         DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2571 }
2572
2573 static void bnx2x_attn_int(struct bnx2x *bp)
2574 {
2575         /* read local copy of bits */
2576         u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2577                                                                 attn_bits);
2578         u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2579                                                                 attn_bits_ack);
2580         u32 attn_state = bp->attn_state;
2581
2582         /* look for changed bits */
2583         u32 asserted   =  attn_bits & ~attn_ack & ~attn_state;
2584         u32 deasserted = ~attn_bits &  attn_ack &  attn_state;
2585
2586         DP(NETIF_MSG_HW,
2587            "attn_bits %x  attn_ack %x  asserted %x  deasserted %x\n",
2588            attn_bits, attn_ack, asserted, deasserted);
2589
2590         if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2591                 BNX2X_ERR("BAD attention state\n");
2592
2593         /* handle bits that were raised */
2594         if (asserted)
2595                 bnx2x_attn_int_asserted(bp, asserted);
2596
2597         if (deasserted)
2598                 bnx2x_attn_int_deasserted(bp, deasserted);
2599 }
2600
2601 static void bnx2x_sp_task(struct work_struct *work)
2602 {
2603         struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2604         u16 status;
2605
2606         /* Return here if interrupt is disabled */
2607         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2608                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2609                 return;
2610         }
2611
2612         status = bnx2x_update_dsb_idx(bp);
2613 /*      if (status == 0)                                     */
2614 /*              BNX2X_ERR("spurious slowpath interrupt!\n"); */
2615
2616         DP(NETIF_MSG_INTR, "got a slowpath interrupt (status 0x%x)\n", status);
2617
2618         /* HW attentions */
2619         if (status & 0x1) {
2620                 bnx2x_attn_int(bp);
2621                 status &= ~0x1;
2622         }
2623
2624         /* CStorm events: STAT_QUERY */
2625         if (status & 0x2) {
2626                 DP(BNX2X_MSG_SP, "CStorm events: STAT_QUERY\n");
2627                 status &= ~0x2;
2628         }
2629
2630         if (unlikely(status))
2631                 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status 0x%x)\n",
2632                    status);
2633
2634         bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2635                      IGU_INT_NOP, 1);
2636         bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2637                      IGU_INT_NOP, 1);
2638         bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2639                      IGU_INT_NOP, 1);
2640         bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2641                      IGU_INT_NOP, 1);
2642         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2643                      IGU_INT_ENABLE, 1);
2644 }
2645
2646 irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2647 {
2648         struct net_device *dev = dev_instance;
2649         struct bnx2x *bp = netdev_priv(dev);
2650
2651         /* Return here if interrupt is disabled */
2652         if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2653                 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2654                 return IRQ_HANDLED;
2655         }
2656
2657         bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2658
2659 #ifdef BNX2X_STOP_ON_ERROR
2660         if (unlikely(bp->panic))
2661                 return IRQ_HANDLED;
2662 #endif
2663
2664 #ifdef BCM_CNIC
2665         {
2666                 struct cnic_ops *c_ops;
2667
2668                 rcu_read_lock();
2669                 c_ops = rcu_dereference(bp->cnic_ops);
2670                 if (c_ops)
2671                         c_ops->cnic_handler(bp->cnic_data, NULL);
2672                 rcu_read_unlock();
2673         }
2674 #endif
2675         queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2676
2677         return IRQ_HANDLED;
2678 }
2679
2680 /* end of slow path */
2681
2682 static void bnx2x_timer(unsigned long data)
2683 {
2684         struct bnx2x *bp = (struct bnx2x *) data;
2685
2686         if (!netif_running(bp->dev))
2687                 return;
2688
2689         if (atomic_read(&bp->intr_sem) != 0)
2690                 goto timer_restart;
2691
2692         if (poll) {
2693                 struct bnx2x_fastpath *fp = &bp->fp[0];
2694                 int rc;
2695
2696                 bnx2x_tx_int(fp);
2697                 rc = bnx2x_rx_int(fp, 1000);
2698         }
2699
2700         if (!BP_NOMCP(bp)) {
2701                 int func = BP_FUNC(bp);
2702                 u32 drv_pulse;
2703                 u32 mcp_pulse;
2704
2705                 ++bp->fw_drv_pulse_wr_seq;
2706                 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
2707                 /* TBD - add SYSTEM_TIME */
2708                 drv_pulse = bp->fw_drv_pulse_wr_seq;
2709                 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
2710
2711                 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
2712                              MCP_PULSE_SEQ_MASK);
2713                 /* The delta between driver pulse and mcp response
2714                  * should be 1 (before mcp response) or 0 (after mcp response)
2715                  */
2716                 if ((drv_pulse != mcp_pulse) &&
2717                     (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
2718                         /* someone lost a heartbeat... */
2719                         BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
2720                                   drv_pulse, mcp_pulse);
2721                 }
2722         }
2723
2724         if (bp->state == BNX2X_STATE_OPEN)
2725                 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
2726
2727 timer_restart:
2728         mod_timer(&bp->timer, jiffies + bp->current_interval);
2729 }
2730
2731 /* end of Statistics */
2732
2733 /* nic init */
2734
2735 /*
2736  * nic init service functions
2737  */
2738
2739 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
2740 {
2741         int port = BP_PORT(bp);
2742
2743         /* "CSTORM" */
2744         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2745                         CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
2746                         CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
2747         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2748                         CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
2749                         CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
2750 }
2751
2752 void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
2753                           dma_addr_t mapping, int sb_id)
2754 {
2755         int port = BP_PORT(bp);
2756         int func = BP_FUNC(bp);
2757         int index;
2758         u64 section;
2759
2760         /* USTORM */
2761         section = ((u64)mapping) + offsetof(struct host_status_block,
2762                                             u_status_block);
2763         sb->u_status_block.status_block_id = sb_id;
2764
2765         REG_WR(bp, BAR_CSTRORM_INTMEM +
2766                CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
2767         REG_WR(bp, BAR_CSTRORM_INTMEM +
2768                ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
2769                U64_HI(section));
2770         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
2771                 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
2772
2773         for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
2774                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2775                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
2776
2777         /* CSTORM */
2778         section = ((u64)mapping) + offsetof(struct host_status_block,
2779                                             c_status_block);
2780         sb->c_status_block.status_block_id = sb_id;
2781
2782         REG_WR(bp, BAR_CSTRORM_INTMEM +
2783                CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
2784         REG_WR(bp, BAR_CSTRORM_INTMEM +
2785                ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
2786                U64_HI(section));
2787         REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
2788                 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
2789
2790         for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
2791                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2792                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
2793
2794         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2795 }
2796
2797 static void bnx2x_zero_def_sb(struct bnx2x *bp)
2798 {
2799         int func = BP_FUNC(bp);
2800
2801         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
2802                         TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2803                         sizeof(struct tstorm_def_status_block)/4);
2804         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2805                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
2806                         sizeof(struct cstorm_def_status_block_u)/4);
2807         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
2808                         CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
2809                         sizeof(struct cstorm_def_status_block_c)/4);
2810         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
2811                         XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
2812                         sizeof(struct xstorm_def_status_block)/4);
2813 }
2814
2815 static void bnx2x_init_def_sb(struct bnx2x *bp,
2816                               struct host_def_status_block *def_sb,
2817                               dma_addr_t mapping, int sb_id)
2818 {
2819         int port = BP_PORT(bp);
2820         int func = BP_FUNC(bp);
2821         int index, val, reg_offset;
2822         u64 section;
2823
2824         /* ATTN */
2825         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2826                                             atten_status_block);
2827         def_sb->atten_status_block.status_block_id = sb_id;
2828
2829         bp->attn_state = 0;
2830
2831         reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2832                              MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2833
2834         for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2835                 bp->attn_group[index].sig[0] = REG_RD(bp,
2836                                                      reg_offset + 0x10*index);
2837                 bp->attn_group[index].sig[1] = REG_RD(bp,
2838                                                reg_offset + 0x4 + 0x10*index);
2839                 bp->attn_group[index].sig[2] = REG_RD(bp,
2840                                                reg_offset + 0x8 + 0x10*index);
2841                 bp->attn_group[index].sig[3] = REG_RD(bp,
2842                                                reg_offset + 0xc + 0x10*index);
2843         }
2844
2845         reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
2846                              HC_REG_ATTN_MSG0_ADDR_L);
2847
2848         REG_WR(bp, reg_offset, U64_LO(section));
2849         REG_WR(bp, reg_offset + 4, U64_HI(section));
2850
2851         reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
2852
2853         val = REG_RD(bp, reg_offset);
2854         val |= sb_id;
2855         REG_WR(bp, reg_offset, val);
2856
2857         /* USTORM */
2858         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2859                                             u_def_status_block);
2860         def_sb->u_def_status_block.status_block_id = sb_id;
2861
2862         REG_WR(bp, BAR_CSTRORM_INTMEM +
2863                CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
2864         REG_WR(bp, BAR_CSTRORM_INTMEM +
2865                ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
2866                U64_HI(section));
2867         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
2868                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
2869
2870         for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
2871                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2872                          CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
2873
2874         /* CSTORM */
2875         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2876                                             c_def_status_block);
2877         def_sb->c_def_status_block.status_block_id = sb_id;
2878
2879         REG_WR(bp, BAR_CSTRORM_INTMEM +
2880                CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
2881         REG_WR(bp, BAR_CSTRORM_INTMEM +
2882                ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
2883                U64_HI(section));
2884         REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
2885                 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
2886
2887         for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
2888                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2889                          CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
2890
2891         /* TSTORM */
2892         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2893                                             t_def_status_block);
2894         def_sb->t_def_status_block.status_block_id = sb_id;
2895
2896         REG_WR(bp, BAR_TSTRORM_INTMEM +
2897                TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2898         REG_WR(bp, BAR_TSTRORM_INTMEM +
2899                ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2900                U64_HI(section));
2901         REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
2902                 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2903
2904         for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
2905                 REG_WR16(bp, BAR_TSTRORM_INTMEM +
2906                          TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2907
2908         /* XSTORM */
2909         section = ((u64)mapping) + offsetof(struct host_def_status_block,
2910                                             x_def_status_block);
2911         def_sb->x_def_status_block.status_block_id = sb_id;
2912
2913         REG_WR(bp, BAR_XSTRORM_INTMEM +
2914                XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
2915         REG_WR(bp, BAR_XSTRORM_INTMEM +
2916                ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
2917                U64_HI(section));
2918         REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
2919                 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
2920
2921         for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
2922                 REG_WR16(bp, BAR_XSTRORM_INTMEM +
2923                          XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
2924
2925         bp->stats_pending = 0;
2926         bp->set_mac_pending = 0;
2927
2928         bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
2929 }
2930
2931 void bnx2x_update_coalesce(struct bnx2x *bp)
2932 {
2933         int port = BP_PORT(bp);
2934         int i;
2935
2936         for_each_queue(bp, i) {
2937                 int sb_id = bp->fp[i].sb_id;
2938
2939                 /* HC_INDEX_U_ETH_RX_CQ_CONS */
2940                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2941                         CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
2942                                                       U_SB_ETH_RX_CQ_INDEX),
2943                         bp->rx_ticks/(4 * BNX2X_BTR));
2944                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2945                          CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
2946                                                        U_SB_ETH_RX_CQ_INDEX),
2947                          (bp->rx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2948
2949                 /* HC_INDEX_C_ETH_TX_CQ_CONS */
2950                 REG_WR8(bp, BAR_CSTRORM_INTMEM +
2951                         CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
2952                                                       C_SB_ETH_TX_CQ_INDEX),
2953                         bp->tx_ticks/(4 * BNX2X_BTR));
2954                 REG_WR16(bp, BAR_CSTRORM_INTMEM +
2955                          CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
2956                                                        C_SB_ETH_TX_CQ_INDEX),
2957                          (bp->tx_ticks/(4 * BNX2X_BTR)) ? 0 : 1);
2958         }
2959 }
2960
2961 static void bnx2x_init_sp_ring(struct bnx2x *bp)
2962 {
2963         int func = BP_FUNC(bp);
2964
2965         spin_lock_init(&bp->spq_lock);
2966
2967         bp->spq_left = MAX_SPQ_PENDING;
2968         bp->spq_prod_idx = 0;
2969         bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
2970         bp->spq_prod_bd = bp->spq;
2971         bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
2972
2973         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
2974                U64_LO(bp->spq_mapping));
2975         REG_WR(bp,
2976                XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
2977                U64_HI(bp->spq_mapping));
2978
2979         REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
2980                bp->spq_prod_idx);
2981 }
2982
2983 static void bnx2x_init_context(struct bnx2x *bp)
2984 {
2985         int i;
2986
2987         /* Rx */
2988         for_each_queue(bp, i) {
2989                 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
2990                 struct bnx2x_fastpath *fp = &bp->fp[i];
2991                 u8 cl_id = fp->cl_id;
2992
2993                 context->ustorm_st_context.common.sb_index_numbers =
2994                                                 BNX2X_RX_SB_INDEX_NUM;
2995                 context->ustorm_st_context.common.clientId = cl_id;
2996                 context->ustorm_st_context.common.status_block_id = fp->sb_id;
2997                 context->ustorm_st_context.common.flags =
2998                         (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
2999                          USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
3000                 context->ustorm_st_context.common.statistics_counter_id =
3001                                                 cl_id;
3002                 context->ustorm_st_context.common.mc_alignment_log_size =
3003                                                 BNX2X_RX_ALIGN_SHIFT;
3004                 context->ustorm_st_context.common.bd_buff_size =
3005                                                 bp->rx_buf_size;
3006                 context->ustorm_st_context.common.bd_page_base_hi =
3007                                                 U64_HI(fp->rx_desc_mapping);
3008                 context->ustorm_st_context.common.bd_page_base_lo =
3009                                                 U64_LO(fp->rx_desc_mapping);
3010                 if (!fp->disable_tpa) {
3011                         context->ustorm_st_context.common.flags |=
3012                                 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
3013                         context->ustorm_st_context.common.sge_buff_size =
3014                                 (u16)min_t(u32, SGE_PAGE_SIZE*PAGES_PER_SGE,
3015                                            0xffff);
3016                         context->ustorm_st_context.common.sge_page_base_hi =
3017                                                 U64_HI(fp->rx_sge_mapping);
3018                         context->ustorm_st_context.common.sge_page_base_lo =
3019                                                 U64_LO(fp->rx_sge_mapping);
3020
3021                         context->ustorm_st_context.common.max_sges_for_packet =
3022                                 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
3023                         context->ustorm_st_context.common.max_sges_for_packet =
3024                                 ((context->ustorm_st_context.common.
3025                                   max_sges_for_packet + PAGES_PER_SGE - 1) &
3026                                  (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
3027                 }
3028
3029                 context->ustorm_ag_context.cdu_usage =
3030                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3031                                                CDU_REGION_NUMBER_UCM_AG,
3032                                                ETH_CONNECTION_TYPE);
3033
3034                 context->xstorm_ag_context.cdu_reserved =
3035                         CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
3036                                                CDU_REGION_NUMBER_XCM_AG,
3037                                                ETH_CONNECTION_TYPE);
3038         }
3039
3040         /* Tx */
3041         for_each_queue(bp, i) {
3042                 struct bnx2x_fastpath *fp = &bp->fp[i];
3043                 struct eth_context *context =
3044                         bnx2x_sp(bp, context[i].eth);
3045
3046                 context->cstorm_st_context.sb_index_number =
3047                                                 C_SB_ETH_TX_CQ_INDEX;
3048                 context->cstorm_st_context.status_block_id = fp->sb_id;
3049
3050                 context->xstorm_st_context.tx_bd_page_base_hi =
3051                                                 U64_HI(fp->tx_desc_mapping);
3052                 context->xstorm_st_context.tx_bd_page_base_lo =
3053                                                 U64_LO(fp->tx_desc_mapping);
3054                 context->xstorm_st_context.statistics_data = (fp->cl_id |
3055                                 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
3056         }
3057 }
3058
3059 static void bnx2x_init_ind_table(struct bnx2x *bp)
3060 {
3061         int func = BP_FUNC(bp);
3062         int i;
3063
3064         if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
3065                 return;
3066
3067         DP(NETIF_MSG_IFUP,
3068            "Initializing indirection table  multi_mode %d\n", bp->multi_mode);
3069         for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
3070                 REG_WR8(bp, BAR_TSTRORM_INTMEM +
3071                         TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
3072                         bp->fp->cl_id + (i % bp->num_queues));
3073 }
3074
3075 void bnx2x_set_client_config(struct bnx2x *bp)
3076 {
3077         struct tstorm_eth_client_config tstorm_client = {0};
3078         int port = BP_PORT(bp);
3079         int i;
3080
3081         tstorm_client.mtu = bp->dev->mtu;
3082         tstorm_client.config_flags =
3083                                 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
3084                                  TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
3085 #ifdef BCM_VLAN
3086         if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
3087                 tstorm_client.config_flags |=
3088                                 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
3089                 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
3090         }
3091 #endif
3092
3093         for_each_queue(bp, i) {
3094                 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
3095
3096                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3097                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
3098                        ((u32 *)&tstorm_client)[0]);
3099                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3100                        TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
3101                        ((u32 *)&tstorm_client)[1]);
3102         }
3103
3104         DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
3105            ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
3106 }
3107
3108 void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
3109 {
3110         struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
3111         int mode = bp->rx_mode;
3112         int mask = bp->rx_mode_cl_mask;
3113         int func = BP_FUNC(bp);
3114         int port = BP_PORT(bp);
3115         int i;
3116         /* All but management unicast packets should pass to the host as well */
3117         u32 llh_mask =
3118                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
3119                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
3120                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
3121                 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
3122
3123         DP(NETIF_MSG_IFUP, "rx mode %d  mask 0x%x\n", mode, mask);
3124
3125         switch (mode) {
3126         case BNX2X_RX_MODE_NONE: /* no Rx */
3127                 tstorm_mac_filter.ucast_drop_all = mask;
3128                 tstorm_mac_filter.mcast_drop_all = mask;
3129                 tstorm_mac_filter.bcast_drop_all = mask;
3130                 break;
3131
3132         case BNX2X_RX_MODE_NORMAL:
3133                 tstorm_mac_filter.bcast_accept_all = mask;
3134                 break;
3135
3136         case BNX2X_RX_MODE_ALLMULTI:
3137                 tstorm_mac_filter.mcast_accept_all = mask;
3138                 tstorm_mac_filter.bcast_accept_all = mask;
3139                 break;
3140
3141         case BNX2X_RX_MODE_PROMISC:
3142                 tstorm_mac_filter.ucast_accept_all = mask;
3143                 tstorm_mac_filter.mcast_accept_all = mask;
3144                 tstorm_mac_filter.bcast_accept_all = mask;
3145                 /* pass management unicast packets as well */
3146                 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
3147                 break;
3148
3149         default:
3150                 BNX2X_ERR("BAD rx mode (%d)\n", mode);
3151                 break;
3152         }
3153
3154         REG_WR(bp,
3155                (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
3156                llh_mask);
3157
3158         for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
3159                 REG_WR(bp, BAR_TSTRORM_INTMEM +
3160                        TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
3161                        ((u32 *)&tstorm_mac_filter)[i]);
3162
3163 /*              DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
3164                    ((u32 *)&tstorm_mac_filter)[i]); */
3165         }
3166
3167         if (mode != BNX2X_RX_MODE_NONE)
3168                 bnx2x_set_client_config(bp);
3169 }
3170
3171 static void bnx2x_init_internal_common(struct bnx2x *bp)
3172 {
3173         int i;
3174
3175         /* Zero this manually as its initialization is
3176            currently missing in the initTool */
3177         for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
3178                 REG_WR(bp, BAR_USTRORM_INTMEM +
3179                        USTORM_AGG_DATA_OFFSET + i * 4, 0);
3180 }
3181
3182 static void bnx2x_init_internal_port(struct bnx2x *bp)
3183 {
3184         int port = BP_PORT(bp);
3185
3186         REG_WR(bp,
3187                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
3188         REG_WR(bp,
3189                BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
3190         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3191         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
3192 }
3193
3194 static void bnx2x_init_internal_func(struct bnx2x *bp)
3195 {
3196         struct tstorm_eth_function_common_config tstorm_config = {0};
3197         struct stats_indication_flags stats_flags = {0};
3198         int port = BP_PORT(bp);
3199         int func = BP_FUNC(bp);
3200         int i, j;
3201         u32 offset;
3202         u16 max_agg_size;
3203
3204         tstorm_config.config_flags = RSS_FLAGS(bp);
3205
3206         if (is_multi(bp))
3207                 tstorm_config.rss_result_mask = MULTI_MASK;
3208
3209         /* Enable TPA if needed */
3210         if (bp->flags & TPA_ENABLE_FLAG)
3211                 tstorm_config.config_flags |=
3212                         TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
3213
3214         if (IS_E1HMF(bp))
3215                 tstorm_config.config_flags |=
3216                                 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
3217
3218         tstorm_config.leading_client_id = BP_L_ID(bp);
3219
3220         REG_WR(bp, BAR_TSTRORM_INTMEM +
3221                TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
3222                (*(u32 *)&tstorm_config));
3223
3224         bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
3225         bp->rx_mode_cl_mask = (1 << BP_L_ID(bp));
3226         bnx2x_set_storm_rx_mode(bp);
3227
3228         for_each_queue(bp, i) {
3229                 u8 cl_id = bp->fp[i].cl_id;
3230
3231                 /* reset xstorm per client statistics */
3232                 offset = BAR_XSTRORM_INTMEM +
3233                          XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3234                 for (j = 0;
3235                      j < sizeof(struct xstorm_per_client_stats) / 4; j++)
3236                         REG_WR(bp, offset + j*4, 0);
3237
3238                 /* reset tstorm per client statistics */
3239                 offset = BAR_TSTRORM_INTMEM +
3240                          TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3241                 for (j = 0;
3242                      j < sizeof(struct tstorm_per_client_stats) / 4; j++)
3243                         REG_WR(bp, offset + j*4, 0);
3244
3245                 /* reset ustorm per client statistics */
3246                 offset = BAR_USTRORM_INTMEM +
3247                          USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
3248                 for (j = 0;
3249                      j < sizeof(struct ustorm_per_client_stats) / 4; j++)
3250                         REG_WR(bp, offset + j*4, 0);
3251         }
3252
3253         /* Init statistics related context */
3254         stats_flags.collect_eth = 1;
3255
3256         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
3257                ((u32 *)&stats_flags)[0]);
3258         REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
3259                ((u32 *)&stats_flags)[1]);
3260
3261         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
3262                ((u32 *)&stats_flags)[0]);
3263         REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
3264                ((u32 *)&stats_flags)[1]);
3265
3266         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
3267                ((u32 *)&stats_flags)[0]);
3268         REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
3269                ((u32 *)&stats_flags)[1]);
3270
3271         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
3272                ((u32 *)&stats_flags)[0]);
3273         REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
3274                ((u32 *)&stats_flags)[1]);
3275
3276         REG_WR(bp, BAR_XSTRORM_INTMEM +
3277                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3278                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3279         REG_WR(bp, BAR_XSTRORM_INTMEM +
3280                XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3281                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3282
3283         REG_WR(bp, BAR_TSTRORM_INTMEM +
3284                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3285                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3286         REG_WR(bp, BAR_TSTRORM_INTMEM +
3287                TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3288                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3289
3290         REG_WR(bp, BAR_USTRORM_INTMEM +
3291                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
3292                U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
3293         REG_WR(bp, BAR_USTRORM_INTMEM +
3294                USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
3295                U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
3296
3297         if (CHIP_IS_E1H(bp)) {
3298                 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
3299                         IS_E1HMF(bp));
3300                 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
3301                         IS_E1HMF(bp));
3302                 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
3303                         IS_E1HMF(bp));
3304                 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
3305                         IS_E1HMF(bp));
3306
3307                 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
3308                          bp->e1hov);
3309         }
3310
3311         /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
3312         max_agg_size = min_t(u32, (min_t(u32, 8, MAX_SKB_FRAGS) *
3313                                    SGE_PAGE_SIZE * PAGES_PER_SGE), 0xffff);
3314         for_each_queue(bp, i) {
3315                 struct bnx2x_fastpath *fp = &bp->fp[i];
3316
3317                 REG_WR(bp, BAR_USTRORM_INTMEM +
3318                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
3319                        U64_LO(fp->rx_comp_mapping));
3320                 REG_WR(bp, BAR_USTRORM_INTMEM +
3321                        USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
3322                        U64_HI(fp->rx_comp_mapping));
3323
3324                 /* Next page */
3325                 REG_WR(bp, BAR_USTRORM_INTMEM +
3326                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
3327                        U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3328                 REG_WR(bp, BAR_USTRORM_INTMEM +
3329                        USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
3330                        U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
3331
3332                 REG_WR16(bp, BAR_USTRORM_INTMEM +
3333                          USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
3334                          max_agg_size);
3335         }
3336
3337         /* dropless flow control */
3338         if (CHIP_IS_E1H(bp)) {
3339                 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
3340
3341                 rx_pause.bd_thr_low = 250;
3342                 rx_pause.cqe_thr_low = 250;
3343                 rx_pause.cos = 1;
3344                 rx_pause.sge_thr_low = 0;
3345                 rx_pause.bd_thr_high = 350;
3346                 rx_pause.cqe_thr_high = 350;
3347                 rx_pause.sge_thr_high = 0;
3348
3349                 for_each_queue(bp, i) {
3350                         struct bnx2x_fastpath *fp = &bp->fp[i];
3351
3352                         if (!fp->disable_tpa) {
3353                                 rx_pause.sge_thr_low = 150;
3354                                 rx_pause.sge_thr_high = 250;
3355                         }
3356
3357
3358                         offset = BAR_USTRORM_INTMEM +
3359                                  USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
3360                                                                    fp->cl_id);
3361                         for (j = 0;
3362                              j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
3363                              j++)
3364                                 REG_WR(bp, offset + j*4,
3365                                        ((u32 *)&rx_pause)[j]);
3366                 }
3367         }
3368
3369         memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
3370
3371         /* Init rate shaping and fairness contexts */
3372         if (IS_E1HMF(bp)) {
3373                 int vn;
3374
3375                 /* During init there is no active link
3376                    Until link is up, set link rate to 10Gbps */
3377                 bp->link_vars.line_speed = SPEED_10000;
3378                 bnx2x_init_port_minmax(bp);
3379
3380                 if (!BP_NOMCP(bp))
3381                         bp->mf_config =
3382                               SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
3383                 bnx2x_calc_vn_weight_sum(bp);
3384
3385                 for (vn = VN_0; vn < E1HVN_MAX; vn++)
3386                         bnx2x_init_vn_minmax(bp, 2*vn + port);
3387
3388                 /* Enable rate shaping and fairness */
3389                 bp->cmng.flags.cmng_enables |=
3390                                         CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
3391
3392         } else {
3393                 /* rate shaping and fairness are disabled */
3394                 DP(NETIF_MSG_IFUP,
3395                    "single function mode  minmax will be disabled\n");
3396         }
3397
3398
3399         /* Store cmng structures to internal memory */
3400         if (bp->port.pmf)
3401                 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
3402                         REG_WR(bp, BAR_XSTRORM_INTMEM +
3403                                XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
3404                                ((u32 *)(&bp->cmng))[i]);
3405 }
3406
3407 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
3408 {
3409         switch (load_code) {
3410         case FW_MSG_CODE_DRV_LOAD_COMMON:
3411                 bnx2x_init_internal_common(bp);
3412                 /* no break */
3413
3414         case FW_MSG_CODE_DRV_LOAD_PORT:
3415                 bnx2x_init_internal_port(bp);
3416                 /* no break */
3417
3418         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
3419                 bnx2x_init_internal_func(bp);
3420                 break;
3421
3422         default:
3423                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
3424                 break;
3425         }
3426 }
3427
3428 void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
3429 {
3430         int i;
3431
3432         for_each_queue(bp, i) {
3433                 struct bnx2x_fastpath *fp = &bp->fp[i];
3434
3435                 fp->bp = bp;
3436                 fp->state = BNX2X_FP_STATE_CLOSED;
3437                 fp->index = i;
3438                 fp->cl_id = BP_L_ID(bp) + i;
3439 #ifdef BCM_CNIC
3440                 fp->sb_id = fp->cl_id + 1;
3441 #else
3442                 fp->sb_id = fp->cl_id;
3443 #endif
3444                 DP(NETIF_MSG_IFUP,
3445                    "queue[%d]:  bnx2x_init_sb(%p,%p)  cl_id %d  sb %d\n",
3446                    i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
3447                 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
3448                               fp->sb_id);
3449                 bnx2x_update_fpsb_idx(fp);
3450         }
3451
3452         /* ensure status block indices were read */
3453         rmb();
3454
3455
3456         bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
3457                           DEF_SB_ID);
3458         bnx2x_update_dsb_idx(bp);
3459         bnx2x_update_coalesce(bp);
3460         bnx2x_init_rx_rings(bp);
3461         bnx2x_init_tx_ring(bp);
3462         bnx2x_init_sp_ring(bp);
3463         bnx2x_init_context(bp);
3464         bnx2x_init_internal(bp, load_code);
3465         bnx2x_init_ind_table(bp);
3466         bnx2x_stats_init(bp);
3467
3468         /* At this point, we are ready for interrupts */
3469         atomic_set(&bp->intr_sem, 0);
3470
3471         /* flush all before enabling interrupts */
3472         mb();
3473         mmiowb();
3474
3475         bnx2x_int_enable(bp);
3476
3477         /* Check for SPIO5 */
3478         bnx2x_attn_int_deasserted0(bp,
3479                 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
3480                                    AEU_INPUTS_ATTN_BITS_SPIO5);
3481 }
3482
3483 /* end of nic init */
3484
3485 /*
3486  * gzip service functions
3487  */
3488
3489 static int bnx2x_gunzip_init(struct bnx2x *bp)
3490 {
3491         bp->gunzip_buf = dma_alloc_coherent(&bp->pdev->dev, FW_BUF_SIZE,
3492                                             &bp->gunzip_mapping, GFP_KERNEL);
3493         if (bp->gunzip_buf  == NULL)
3494                 goto gunzip_nomem1;
3495
3496         bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
3497         if (bp->strm  == NULL)
3498                 goto gunzip_nomem2;
3499
3500         bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
3501                                       GFP_KERNEL);
3502         if (bp->strm->workspace == NULL)
3503                 goto gunzip_nomem3;
3504
3505         return 0;
3506
3507 gunzip_nomem3:
3508         kfree(bp->strm);
3509         bp->strm = NULL;
3510
3511 gunzip_nomem2:
3512         dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3513                           bp->gunzip_mapping);
3514         bp->gunzip_buf = NULL;
3515
3516 gunzip_nomem1:
3517         netdev_err(bp->dev, "Cannot allocate firmware buffer for"
3518                " un-compression\n");
3519         return -ENOMEM;
3520 }
3521
3522 static void bnx2x_gunzip_end(struct bnx2x *bp)
3523 {
3524         kfree(bp->strm->workspace);
3525
3526         kfree(bp->strm);
3527         bp->strm = NULL;
3528
3529         if (bp->gunzip_buf) {
3530                 dma_free_coherent(&bp->pdev->dev, FW_BUF_SIZE, bp->gunzip_buf,
3531                                   bp->gunzip_mapping);
3532                 bp->gunzip_buf = NULL;
3533         }
3534 }
3535
3536 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
3537 {
3538         int n, rc;
3539
3540         /* check gzip header */
3541         if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
3542                 BNX2X_ERR("Bad gzip header\n");
3543                 return -EINVAL;
3544         }
3545
3546         n = 10;
3547
3548 #define FNAME                           0x8
3549
3550         if (zbuf[3] & FNAME)
3551                 while ((zbuf[n++] != 0) && (n < len));
3552
3553         bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
3554         bp->strm->avail_in = len - n;
3555         bp->strm->next_out = bp->gunzip_buf;
3556         bp->strm->avail_out = FW_BUF_SIZE;
3557
3558         rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
3559         if (rc != Z_OK)
3560                 return rc;
3561
3562         rc = zlib_inflate(bp->strm, Z_FINISH);
3563         if ((rc != Z_OK) && (rc != Z_STREAM_END))
3564                 netdev_err(bp->dev, "Firmware decompression error: %s\n",
3565                            bp->strm->msg);
3566
3567         bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
3568         if (bp->gunzip_outlen & 0x3)
3569                 netdev_err(bp->dev, "Firmware decompression error:"
3570                                     " gunzip_outlen (%d) not aligned\n",
3571                                 bp->gunzip_outlen);
3572         bp->gunzip_outlen >>= 2;
3573
3574         zlib_inflateEnd(bp->strm);
3575
3576         if (rc == Z_STREAM_END)
3577                 return 0;
3578
3579         return rc;
3580 }
3581
3582 /* nic load/unload */
3583
3584 /*
3585  * General service functions
3586  */
3587
3588 /* send a NIG loopback debug packet */
3589 static void bnx2x_lb_pckt(struct bnx2x *bp)
3590 {
3591         u32 wb_write[3];
3592
3593         /* Ethernet source and destination addresses */
3594         wb_write[0] = 0x55555555;
3595         wb_write[1] = 0x55555555;
3596         wb_write[2] = 0x20;             /* SOP */
3597         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3598
3599         /* NON-IP protocol */
3600         wb_write[0] = 0x09000000;
3601         wb_write[1] = 0x55555555;
3602         wb_write[2] = 0x10;             /* EOP, eop_bvalid = 0 */
3603         REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
3604 }
3605
3606 /* some of the internal memories
3607  * are not directly readable from the driver
3608  * to test them we send debug packets
3609  */
3610 static int bnx2x_int_mem_test(struct bnx2x *bp)
3611 {
3612         int factor;
3613         int count, i;
3614         u32 val = 0;
3615
3616         if (CHIP_REV_IS_FPGA(bp))
3617                 factor = 120;
3618         else if (CHIP_REV_IS_EMUL(bp))
3619                 factor = 200;
3620         else
3621                 factor = 1;
3622
3623         DP(NETIF_MSG_HW, "start part1\n");
3624
3625         /* Disable inputs of parser neighbor blocks */
3626         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3627         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3628         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3629         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3630
3631         /*  Write 0 to parser credits for CFC search request */
3632         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3633
3634         /* send Ethernet packet */
3635         bnx2x_lb_pckt(bp);
3636
3637         /* TODO do i reset NIG statistic? */
3638         /* Wait until NIG register shows 1 packet of size 0x10 */
3639         count = 1000 * factor;
3640         while (count) {
3641
3642                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3643                 val = *bnx2x_sp(bp, wb_data[0]);
3644                 if (val == 0x10)
3645                         break;
3646
3647                 msleep(10);
3648                 count--;
3649         }
3650         if (val != 0x10) {
3651                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
3652                 return -1;
3653         }
3654
3655         /* Wait until PRS register shows 1 packet */
3656         count = 1000 * factor;
3657         while (count) {
3658                 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3659                 if (val == 1)
3660                         break;
3661
3662                 msleep(10);
3663                 count--;
3664         }
3665         if (val != 0x1) {
3666                 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
3667                 return -2;
3668         }
3669
3670         /* Reset and init BRB, PRS */
3671         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3672         msleep(50);
3673         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3674         msleep(50);
3675         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3676         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3677
3678         DP(NETIF_MSG_HW, "part2\n");
3679
3680         /* Disable inputs of parser neighbor blocks */
3681         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
3682         REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
3683         REG_WR(bp, CFC_REG_DEBUG0, 0x1);
3684         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
3685
3686         /* Write 0 to parser credits for CFC search request */
3687         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
3688
3689         /* send 10 Ethernet packets */
3690         for (i = 0; i < 10; i++)
3691                 bnx2x_lb_pckt(bp);
3692
3693         /* Wait until NIG register shows 10 + 1
3694            packets of size 11*0x10 = 0xb0 */
3695         count = 1000 * factor;
3696         while (count) {
3697
3698                 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
3699                 val = *bnx2x_sp(bp, wb_data[0]);
3700                 if (val == 0xb0)
3701                         break;
3702
3703                 msleep(10);
3704                 count--;
3705         }
3706         if (val != 0xb0) {
3707                 BNX2X_ERR("NIG timeout  val = 0x%x\n", val);
3708                 return -3;
3709         }
3710
3711         /* Wait until PRS register shows 2 packets */
3712         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3713         if (val != 2)
3714                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
3715
3716         /* Write 1 to parser credits for CFC search request */
3717         REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
3718
3719         /* Wait until PRS register shows 3 packets */
3720         msleep(10 * factor);
3721         /* Wait until NIG register shows 1 packet of size 0x10 */
3722         val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
3723         if (val != 3)
3724                 BNX2X_ERR("PRS timeout  val = 0x%x\n", val);
3725
3726         /* clear NIG EOP FIFO */
3727         for (i = 0; i < 11; i++)
3728                 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
3729         val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
3730         if (val != 1) {
3731                 BNX2X_ERR("clear of NIG failed\n");
3732                 return -4;
3733         }
3734
3735         /* Reset and init BRB, PRS, NIG */
3736         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
3737         msleep(50);
3738         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
3739         msleep(50);
3740         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
3741         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
3742 #ifndef BCM_CNIC
3743         /* set NIC mode */
3744         REG_WR(bp, PRS_REG_NIC_MODE, 1);
3745 #endif
3746
3747         /* Enable inputs of parser neighbor blocks */
3748         REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
3749         REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
3750         REG_WR(bp, CFC_REG_DEBUG0, 0x0);
3751         REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
3752
3753         DP(NETIF_MSG_HW, "done\n");
3754
3755         return 0; /* OK */
3756 }
3757
3758 static void enable_blocks_attention(struct bnx2x *bp)
3759 {
3760         REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3761         REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
3762         REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
3763         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
3764         REG_WR(bp, QM_REG_QM_INT_MASK, 0);
3765         REG_WR(bp, TM_REG_TM_INT_MASK, 0);
3766         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
3767         REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
3768         REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
3769 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
3770 /*      REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
3771         REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
3772         REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
3773         REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
3774 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
3775 /*      REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
3776         REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
3777         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
3778         REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
3779         REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
3780 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
3781 /*      REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
3782         if (CHIP_REV_IS_FPGA(bp))
3783                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
3784         else
3785                 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
3786         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
3787         REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
3788         REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
3789 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
3790 /*      REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
3791         REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
3792         REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
3793 /*      REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
3794         REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18);         /* bit 3,4 masked */
3795 }
3796
3797 static const struct {
3798         u32 addr;
3799         u32 mask;
3800 } bnx2x_parity_mask[] = {
3801         {PXP_REG_PXP_PRTY_MASK, 0xffffffff},
3802         {PXP2_REG_PXP2_PRTY_MASK_0, 0xffffffff},
3803         {PXP2_REG_PXP2_PRTY_MASK_1, 0xffffffff},
3804         {HC_REG_HC_PRTY_MASK, 0xffffffff},
3805         {MISC_REG_MISC_PRTY_MASK, 0xffffffff},
3806         {QM_REG_QM_PRTY_MASK, 0x0},
3807         {DORQ_REG_DORQ_PRTY_MASK, 0x0},
3808         {GRCBASE_UPB + PB_REG_PB_PRTY_MASK, 0x0},
3809         {GRCBASE_XPB + PB_REG_PB_PRTY_MASK, 0x0},
3810         {SRC_REG_SRC_PRTY_MASK, 0x4}, /* bit 2 */
3811         {CDU_REG_CDU_PRTY_MASK, 0x0},
3812         {CFC_REG_CFC_PRTY_MASK, 0x0},
3813         {DBG_REG_DBG_PRTY_MASK, 0x0},
3814         {DMAE_REG_DMAE_PRTY_MASK, 0x0},
3815         {BRB1_REG_BRB1_PRTY_MASK, 0x0},
3816         {PRS_REG_PRS_PRTY_MASK, (1<<6)},/* bit 6 */
3817         {TSDM_REG_TSDM_PRTY_MASK, 0x18},/* bit 3,4 */
3818         {CSDM_REG_CSDM_PRTY_MASK, 0x8}, /* bit 3 */
3819         {USDM_REG_USDM_PRTY_MASK, 0x38},/* bit 3,4,5 */
3820         {XSDM_REG_XSDM_PRTY_MASK, 0x8}, /* bit 3 */
3821         {TSEM_REG_TSEM_PRTY_MASK_0, 0x0},
3822         {TSEM_REG_TSEM_PRTY_MASK_1, 0x0},
3823         {USEM_REG_USEM_PRTY_MASK_0, 0x0},
3824         {USEM_REG_USEM_PRTY_MASK_1, 0x0},
3825         {CSEM_REG_CSEM_PRTY_MASK_0, 0x0},
3826         {CSEM_REG_CSEM_PRTY_MASK_1, 0x0},
3827         {XSEM_REG_XSEM_PRTY_MASK_0, 0x0},
3828         {XSEM_REG_XSEM_PRTY_MASK_1, 0x0}
3829 };
3830
3831 static void enable_blocks_parity(struct bnx2x *bp)
3832 {
3833         int i;
3834
3835         for (i = 0; i < ARRAY_SIZE(bnx2x_parity_mask); i++)
3836                 REG_WR(bp, bnx2x_parity_mask[i].addr,
3837                         bnx2x_parity_mask[i].mask);
3838 }
3839
3840
3841 static void bnx2x_reset_common(struct bnx2x *bp)
3842 {
3843         /* reset_common */
3844         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
3845                0xd3ffff7f);
3846         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
3847 }
3848
3849 static void bnx2x_init_pxp(struct bnx2x *bp)
3850 {
3851         u16 devctl;
3852         int r_order, w_order;
3853
3854         pci_read_config_word(bp->pdev,
3855                              bp->pcie_cap + PCI_EXP_DEVCTL, &devctl);
3856         DP(NETIF_MSG_HW, "read 0x%x from devctl\n", devctl);
3857         w_order = ((devctl & PCI_EXP_DEVCTL_PAYLOAD) >> 5);
3858         if (bp->mrrs == -1)
3859                 r_order = ((devctl & PCI_EXP_DEVCTL_READRQ) >> 12);
3860         else {
3861                 DP(NETIF_MSG_HW, "force read order to %d\n", bp->mrrs);
3862                 r_order = bp->mrrs;
3863         }
3864
3865         bnx2x_init_pxp_arb(bp, r_order, w_order);
3866 }
3867
3868 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
3869 {
3870         int is_required;
3871         u32 val;
3872         int port;
3873
3874         if (BP_NOMCP(bp))
3875                 return;
3876
3877         is_required = 0;
3878         val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
3879               SHARED_HW_CFG_FAN_FAILURE_MASK;
3880
3881         if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
3882                 is_required = 1;
3883
3884         /*
3885          * The fan failure mechanism is usually related to the PHY type since
3886          * the power consumption of the board is affected by the PHY. Currently,
3887          * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
3888          */
3889         else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
3890                 for (port = PORT_0; port < PORT_MAX; port++) {
3891                         is_required |=
3892                                 bnx2x_fan_failure_det_req(
3893                                         bp,
3894                                         bp->common.shmem_base,
3895                                         bp->common.shmem2_base,
3896                                         port);
3897                 }
3898
3899         DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
3900
3901         if (is_required == 0)
3902                 return;
3903
3904         /* Fan failure is indicated by SPIO 5 */
3905         bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
3906                        MISC_REGISTERS_SPIO_INPUT_HI_Z);
3907
3908         /* set to active low mode */
3909         val = REG_RD(bp, MISC_REG_SPIO_INT);
3910         val |= ((1 << MISC_REGISTERS_SPIO_5) <<
3911                                         MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
3912         REG_WR(bp, MISC_REG_SPIO_INT, val);
3913
3914         /* enable interrupt to signal the IGU */
3915         val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
3916         val |= (1 << MISC_REGISTERS_SPIO_5);
3917         REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
3918 }
3919
3920 static int bnx2x_init_common(struct bnx2x *bp)
3921 {
3922         u32 val, i;
3923 #ifdef BCM_CNIC
3924         u32 wb_write[2];
3925 #endif
3926
3927         DP(BNX2X_MSG_MCP, "starting common init  func %d\n", BP_FUNC(bp));
3928
3929         bnx2x_reset_common(bp);
3930         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
3931         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
3932
3933         bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
3934         if (CHIP_IS_E1H(bp))
3935                 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
3936
3937         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
3938         msleep(30);
3939         REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
3940
3941         bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
3942         if (CHIP_IS_E1(bp)) {
3943                 /* enable HW interrupt from PXP on USDM overflow
3944                    bit 16 on INT_MASK_0 */
3945                 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
3946         }
3947
3948         bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
3949         bnx2x_init_pxp(bp);
3950
3951 #ifdef __BIG_ENDIAN
3952         REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
3953         REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
3954         REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
3955         REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
3956         REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
3957         /* make sure this value is 0 */
3958         REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
3959
3960 /*      REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
3961         REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
3962         REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
3963         REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
3964         REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
3965 #endif
3966
3967         REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
3968 #ifdef BCM_CNIC
3969         REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
3970         REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
3971         REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
3972 #endif
3973
3974         if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
3975                 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
3976
3977         /* let the HW do it's magic ... */
3978         msleep(100);
3979         /* finish PXP init */
3980         val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
3981         if (val != 1) {
3982                 BNX2X_ERR("PXP2 CFG failed\n");
3983                 return -EBUSY;
3984         }
3985         val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
3986         if (val != 1) {
3987                 BNX2X_ERR("PXP2 RD_INIT failed\n");
3988                 return -EBUSY;
3989         }
3990
3991         REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
3992         REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
3993
3994         bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
3995
3996         /* clean the DMAE memory */
3997         bp->dmae_ready = 1;
3998         bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
3999
4000         bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
4001         bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
4002         bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
4003         bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
4004
4005         bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
4006         bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
4007         bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
4008         bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
4009
4010         bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
4011
4012 #ifdef BCM_CNIC
4013         wb_write[0] = 0;
4014         wb_write[1] = 0;
4015         for (i = 0; i < 64; i++) {
4016                 REG_WR(bp, QM_REG_BASEADDR + i*4, 1024 * 4 * (i%16));
4017                 bnx2x_init_ind_wr(bp, QM_REG_PTRTBL + i*8, wb_write, 2);
4018
4019                 if (CHIP_IS_E1H(bp)) {
4020                         REG_WR(bp, QM_REG_BASEADDR_EXT_A + i*4, 1024*4*(i%16));
4021                         bnx2x_init_ind_wr(bp, QM_REG_PTRTBL_EXT_A + i*8,
4022                                           wb_write, 2);
4023                 }
4024         }
4025 #endif
4026         /* soft reset pulse */
4027         REG_WR(bp, QM_REG_SOFT_RESET, 1);
4028         REG_WR(bp, QM_REG_SOFT_RESET, 0);
4029
4030 #ifdef BCM_CNIC
4031         bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
4032 #endif
4033
4034         bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
4035         REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
4036         if (!CHIP_REV_IS_SLOW(bp)) {
4037                 /* enable hw interrupt from doorbell Q */
4038                 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
4039         }
4040
4041         bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
4042         bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
4043         REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
4044 #ifndef BCM_CNIC
4045         /* set NIC mode */
4046         REG_WR(bp, PRS_REG_NIC_MODE, 1);
4047 #endif
4048         if (CHIP_IS_E1H(bp))
4049                 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
4050
4051         bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
4052         bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
4053         bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
4054         bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
4055
4056         bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4057         bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4058         bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4059         bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
4060
4061         bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
4062         bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
4063         bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
4064         bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
4065
4066         /* sync semi rtc */
4067         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
4068                0x80000000);
4069         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
4070                0x80000000);
4071
4072         bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
4073         bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
4074         bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
4075
4076         REG_WR(bp, SRC_REG_SOFT_RST, 1);
4077         for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4)
4078                 REG_WR(bp, i, random32());
4079         bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
4080 #ifdef BCM_CNIC
4081         REG_WR(bp, SRC_REG_KEYSEARCH_0, 0x63285672);
4082         REG_WR(bp, SRC_REG_KEYSEARCH_1, 0x24b8f2cc);
4083         REG_WR(bp, SRC_REG_KEYSEARCH_2, 0x223aef9b);
4084         REG_WR(bp, SRC_REG_KEYSEARCH_3, 0x26001e3a);
4085         REG_WR(bp, SRC_REG_KEYSEARCH_4, 0x7ae91116);
4086         REG_WR(bp, SRC_REG_KEYSEARCH_5, 0x5ce5230b);
4087         REG_WR(bp, SRC_REG_KEYSEARCH_6, 0x298d8adf);
4088         REG_WR(bp, SRC_REG_KEYSEARCH_7, 0x6eb0ff09);
4089         REG_WR(bp, SRC_REG_KEYSEARCH_8, 0x1830f82f);
4090         REG_WR(bp, SRC_REG_KEYSEARCH_9, 0x01e46be7);
4091 #endif
4092         REG_WR(bp, SRC_REG_SOFT_RST, 0);
4093
4094         if (sizeof(union cdu_context) != 1024)
4095                 /* we currently assume that a context is 1024 bytes */
4096                 dev_alert(&bp->pdev->dev, "please adjust the size "
4097                                           "of cdu_context(%ld)\n",
4098                          (long)sizeof(union cdu_context));
4099
4100         bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
4101         val = (4 << 24) + (0 << 12) + 1024;
4102         REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
4103
4104         bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
4105         REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
4106         /* enable context validation interrupt from CFC */
4107         REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
4108
4109         /* set the thresholds to prevent CFC/CDU race */
4110         REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
4111
4112         bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
4113         bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
4114
4115         bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
4116         /* Reset PCIE errors for debug */
4117         REG_WR(bp, 0x2814, 0xffffffff);
4118         REG_WR(bp, 0x3820, 0xffffffff);
4119
4120         bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
4121         bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
4122         bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
4123         bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
4124
4125         bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
4126         if (CHIP_IS_E1H(bp)) {
4127                 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
4128                 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
4129         }
4130
4131         if (CHIP_REV_IS_SLOW(bp))
4132                 msleep(200);
4133
4134         /* finish CFC init */
4135         val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
4136         if (val != 1) {
4137                 BNX2X_ERR("CFC LL_INIT failed\n");
4138                 return -EBUSY;
4139         }
4140         val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
4141         if (val != 1) {
4142                 BNX2X_ERR("CFC AC_INIT failed\n");
4143                 return -EBUSY;
4144         }
4145         val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
4146         if (val != 1) {
4147                 BNX2X_ERR("CFC CAM_INIT failed\n");
4148                 return -EBUSY;
4149         }
4150         REG_WR(bp, CFC_REG_DEBUG0, 0);
4151
4152         /* read NIG statistic
4153            to see if this is our first up since powerup */
4154         bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
4155         val = *bnx2x_sp(bp, wb_data[0]);
4156
4157         /* do internal memory self test */
4158         if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
4159                 BNX2X_ERR("internal mem self test failed\n");
4160                 return -EBUSY;
4161         }
4162
4163         bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
4164                                                        bp->common.shmem_base,
4165                                                        bp->common.shmem2_base);
4166
4167         bnx2x_setup_fan_failure_detection(bp);
4168
4169         /* clear PXP2 attentions */
4170         REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
4171
4172         enable_blocks_attention(bp);
4173         if (CHIP_PARITY_SUPPORTED(bp))
4174                 enable_blocks_parity(bp);
4175
4176         if (!BP_NOMCP(bp)) {
4177                 bnx2x_acquire_phy_lock(bp);
4178                 bnx2x_common_init_phy(bp, bp->common.shmem_base,
4179                                       bp->common.shmem2_base);
4180                 bnx2x_release_phy_lock(bp);
4181         } else
4182                 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
4183
4184         return 0;
4185 }
4186
4187 static int bnx2x_init_port(struct bnx2x *bp)
4188 {
4189         int port = BP_PORT(bp);
4190         int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
4191         u32 low, high;
4192         u32 val;
4193
4194         DP(BNX2X_MSG_MCP, "starting port init  port %d\n", port);
4195
4196         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
4197
4198         bnx2x_init_block(bp, PXP_BLOCK, init_stage);
4199         bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
4200
4201         bnx2x_init_block(bp, TCM_BLOCK, init_stage);
4202         bnx2x_init_block(bp, UCM_BLOCK, init_stage);
4203         bnx2x_init_block(bp, CCM_BLOCK, init_stage);
4204         bnx2x_init_block(bp, XCM_BLOCK, init_stage);
4205
4206 #ifdef BCM_CNIC
4207         REG_WR(bp, QM_REG_CONNNUM_0 + port*4, 1024/16 - 1);
4208
4209         bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
4210         REG_WR(bp, TM_REG_LIN0_SCAN_TIME + port*4, 20);
4211         REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + port*4, 31);
4212 #endif
4213
4214         bnx2x_init_block(bp, DQ_BLOCK, init_stage);
4215
4216         bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
4217         if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
4218                 /* no pause for emulation and FPGA */
4219                 low = 0;
4220                 high = 513;
4221         } else {
4222                 if (IS_E1HMF(bp))
4223                         low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
4224                 else if (bp->dev->mtu > 4096) {
4225                         if (bp->flags & ONE_PORT_FLAG)
4226                                 low = 160;
4227                         else {
4228                                 val = bp->dev->mtu;
4229                                 /* (24*1024 + val*4)/256 */
4230                                 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
4231                         }
4232                 } else
4233                         low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
4234                 high = low + 56;        /* 14*1024/256 */
4235         }
4236         REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
4237         REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
4238
4239
4240         bnx2x_init_block(bp, PRS_BLOCK, init_stage);
4241
4242         bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
4243         bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
4244         bnx2x_init_block(bp, USDM_BLOCK, init_stage);
4245         bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
4246
4247         bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
4248         bnx2x_init_block(bp, USEM_BLOCK, init_stage);
4249         bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
4250         bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
4251
4252         bnx2x_init_block(bp, UPB_BLOCK, init_stage);
4253         bnx2x_init_block(bp, XPB_BLOCK, init_stage);
4254
4255         bnx2x_init_block(bp, PBF_BLOCK, init_stage);
4256
4257         /* configure PBF to work without PAUSE mtu 9000 */
4258         REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
4259
4260         /* update threshold */
4261         REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
4262         /* update init credit */
4263         REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
4264
4265         /* probe changes */
4266         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
4267         msleep(5);
4268         REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
4269
4270 #ifdef BCM_CNIC
4271         bnx2x_init_block(bp, SRCH_BLOCK, init_stage);
4272 #endif
4273         bnx2x_init_block(bp, CDU_BLOCK, init_stage);
4274         bnx2x_init_block(bp, CFC_BLOCK, init_stage);
4275
4276         if (CHIP_IS_E1(bp)) {
4277                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4278                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4279         }
4280         bnx2x_init_block(bp, HC_BLOCK, init_stage);
4281
4282         bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
4283         /* init aeu_mask_attn_func_0/1:
4284          *  - SF mode: bits 3-7 are masked. only bits 0-2 are in use
4285          *  - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
4286          *             bits 4-7 are used for "per vn group attention" */
4287         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
4288                (IS_E1HMF(bp) ? 0xF7 : 0x7));
4289
4290         bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
4291         bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
4292         bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
4293         bnx2x_init_block(bp, DBU_BLOCK, init_stage);
4294         bnx2x_init_block(bp, DBG_BLOCK, init_stage);
4295
4296         bnx2x_init_block(bp, NIG_BLOCK, init_stage);
4297
4298         REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
4299
4300         if (CHIP_IS_E1H(bp)) {
4301                 /* 0x2 disable e1hov, 0x1 enable */
4302                 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
4303                        (IS_E1HMF(bp) ? 0x1 : 0x2));
4304
4305                 {
4306                         REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
4307                         REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
4308                         REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
4309                 }
4310         }
4311
4312         bnx2x_init_block(bp, MCP_BLOCK, init_stage);
4313         bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
4314         bp->port.need_hw_lock = bnx2x_hw_lock_required(bp,
4315                                                        bp->common.shmem_base,
4316                                                        bp->common.shmem2_base);
4317         if (bnx2x_fan_failure_det_req(bp, bp->common.shmem_base,
4318                                       bp->common.shmem2_base, port)) {
4319                 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4320                                        MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4321                 val = REG_RD(bp, reg_addr);
4322                 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
4323                 REG_WR(bp, reg_addr, val);
4324         }
4325         bnx2x__link_reset(bp);
4326
4327         return 0;
4328 }
4329
4330 #define ILT_PER_FUNC            (768/2)
4331 #define FUNC_ILT_BASE(func)     (func * ILT_PER_FUNC)
4332 /* the phys address is shifted right 12 bits and has an added
4333    1=valid bit added to the 53rd bit
4334    then since this is a wide register(TM)
4335    we split it into two 32 bit writes
4336  */
4337 #define ONCHIP_ADDR1(x)         ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
4338 #define ONCHIP_ADDR2(x)         ((u32)((1 << 20) | ((u64)x >> 44)))
4339 #define PXP_ONE_ILT(x)          (((x) << 10) | x)
4340 #define PXP_ILT_RANGE(f, l)     (((l) << 10) | f)
4341
4342 #ifdef BCM_CNIC
4343 #define CNIC_ILT_LINES          127
4344 #define CNIC_CTX_PER_ILT        16
4345 #else
4346 #define CNIC_ILT_LINES          0
4347 #endif
4348
4349 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
4350 {
4351         int reg;
4352
4353         if (CHIP_IS_E1H(bp))
4354                 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
4355         else /* E1 */
4356                 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
4357
4358         bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
4359 }
4360
4361 static int bnx2x_init_func(struct bnx2x *bp)
4362 {
4363         int port = BP_PORT(bp);
4364         int func = BP_FUNC(bp);
4365         u32 addr, val;
4366         int i;
4367
4368         DP(BNX2X_MSG_MCP, "starting func init  func %d\n", func);
4369
4370         /* set MSI reconfigure capability */
4371         addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
4372         val = REG_RD(bp, addr);
4373         val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
4374         REG_WR(bp, addr, val);
4375
4376         i = FUNC_ILT_BASE(func);
4377
4378         bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
4379         if (CHIP_IS_E1H(bp)) {
4380                 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
4381                 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
4382         } else /* E1 */
4383                 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
4384                        PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
4385
4386 #ifdef BCM_CNIC
4387         i += 1 + CNIC_ILT_LINES;
4388         bnx2x_ilt_wr(bp, i, bp->timers_mapping);
4389         if (CHIP_IS_E1(bp))
4390                 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
4391         else {
4392                 REG_WR(bp, PXP2_REG_RQ_TM_FIRST_ILT, i);
4393                 REG_WR(bp, PXP2_REG_RQ_TM_LAST_ILT, i);
4394         }
4395
4396         i++;
4397         bnx2x_ilt_wr(bp, i, bp->qm_mapping);
4398         if (CHIP_IS_E1(bp))
4399                 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
4400         else {
4401                 REG_WR(bp, PXP2_REG_RQ_QM_FIRST_ILT, i);
4402                 REG_WR(bp, PXP2_REG_RQ_QM_LAST_ILT, i);
4403         }
4404
4405         i++;
4406         bnx2x_ilt_wr(bp, i, bp->t1_mapping);
4407         if (CHIP_IS_E1(bp))
4408                 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
4409         else {
4410                 REG_WR(bp, PXP2_REG_RQ_SRC_FIRST_ILT, i);
4411                 REG_WR(bp, PXP2_REG_RQ_SRC_LAST_ILT, i);
4412         }
4413
4414         /* tell the searcher where the T2 table is */
4415         REG_WR(bp, SRC_REG_COUNTFREE0 + port*4, 16*1024/64);
4416
4417         bnx2x_wb_wr(bp, SRC_REG_FIRSTFREE0 + port*16,
4418                     U64_LO(bp->t2_mapping), U64_HI(bp->t2_mapping));
4419
4420         bnx2x_wb_wr(bp, SRC_REG_LASTFREE0 + port*16,
4421                     U64_LO((u64)bp->t2_mapping + 16*1024 - 64),
4422                     U64_HI((u64)bp->t2_mapping + 16*1024 - 64));
4423
4424         REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + port*4, 10);
4425 #endif
4426
4427         if (CHIP_IS_E1H(bp)) {
4428                 bnx2x_init_block(bp, MISC_BLOCK, FUNC0_STAGE + func);
4429                 bnx2x_init_block(bp, TCM_BLOCK, FUNC0_STAGE + func);
4430                 bnx2x_init_block(bp, UCM_BLOCK, FUNC0_STAGE + func);
4431                 bnx2x_init_block(bp, CCM_BLOCK, FUNC0_STAGE + func);
4432                 bnx2x_init_block(bp, XCM_BLOCK, FUNC0_STAGE + func);
4433                 bnx2x_init_block(bp, TSEM_BLOCK, FUNC0_STAGE + func);
4434                 bnx2x_init_block(bp, USEM_BLOCK, FUNC0_STAGE + func);
4435                 bnx2x_init_block(bp, CSEM_BLOCK, FUNC0_STAGE + func);
4436                 bnx2x_init_block(bp, XSEM_BLOCK, FUNC0_STAGE + func);
4437
4438                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
4439                 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
4440         }
4441
4442         /* HC init per function */
4443         if (CHIP_IS_E1H(bp)) {
4444                 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
4445
4446                 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
4447                 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
4448         }
4449         bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
4450
4451         /* Reset PCIE errors for debug */
4452         REG_WR(bp, 0x2114, 0xffffffff);
4453         REG_WR(bp, 0x2120, 0xffffffff);
4454         bnx2x_phy_probe(&bp->link_params);
4455         return 0;
4456 }
4457
4458 int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
4459 {
4460         int i, rc = 0;
4461
4462         DP(BNX2X_MSG_MCP, "function %d  load_code %x\n",
4463            BP_FUNC(bp), load_code);
4464
4465         bp->dmae_ready = 0;
4466         mutex_init(&bp->dmae_mutex);
4467         rc = bnx2x_gunzip_init(bp);
4468         if (rc)
4469                 return rc;
4470
4471         switch (load_code) {
4472         case FW_MSG_CODE_DRV_LOAD_COMMON:
4473                 rc = bnx2x_init_common(bp);
4474                 if (rc)
4475                         goto init_hw_err;
4476                 /* no break */
4477
4478         case FW_MSG_CODE_DRV_LOAD_PORT:
4479                 bp->dmae_ready = 1;
4480                 rc = bnx2x_init_port(bp);
4481                 if (rc)
4482                         goto init_hw_err;
4483                 /* no break */
4484
4485         case FW_MSG_CODE_DRV_LOAD_FUNCTION:
4486                 bp->dmae_ready = 1;
4487                 rc = bnx2x_init_func(bp);
4488                 if (rc)
4489                         goto init_hw_err;
4490                 break;
4491
4492         default:
4493                 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
4494                 break;
4495         }
4496
4497         if (!BP_NOMCP(bp)) {
4498                 int func = BP_FUNC(bp);
4499
4500                 bp->fw_drv_pulse_wr_seq =
4501                                 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
4502                                  DRV_PULSE_SEQ_MASK);
4503                 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x\n", bp->fw_drv_pulse_wr_seq);
4504         }
4505
4506         /* this needs to be done before gunzip end */
4507         bnx2x_zero_def_sb(bp);
4508         for_each_queue(bp, i)
4509                 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4510 #ifdef BCM_CNIC
4511         bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
4512 #endif
4513
4514 init_hw_err:
4515         bnx2x_gunzip_end(bp);
4516
4517         return rc;
4518 }
4519
4520 void bnx2x_free_mem(struct bnx2x *bp)
4521 {
4522
4523 #define BNX2X_PCI_FREE(x, y, size) \
4524         do { \
4525                 if (x) { \
4526                         dma_free_coherent(&bp->pdev->dev, size, x, y); \
4527                         x = NULL; \
4528                         y = 0; \
4529                 } \
4530         } while (0)
4531
4532 #define BNX2X_FREE(x) \
4533         do { \
4534                 if (x) { \
4535                         vfree(x); \
4536                         x = NULL; \
4537                 } \
4538         } while (0)
4539
4540         int i;
4541
4542         /* fastpath */
4543         /* Common */
4544         for_each_queue(bp, i) {
4545
4546                 /* status blocks */
4547                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
4548                                bnx2x_fp(bp, i, status_blk_mapping),
4549                                sizeof(struct host_status_block));
4550         }
4551         /* Rx */
4552         for_each_queue(bp, i) {
4553
4554                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4555                 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
4556                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
4557                                bnx2x_fp(bp, i, rx_desc_mapping),
4558                                sizeof(struct eth_rx_bd) * NUM_RX_BD);
4559
4560                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
4561                                bnx2x_fp(bp, i, rx_comp_mapping),
4562                                sizeof(struct eth_fast_path_rx_cqe) *
4563                                NUM_RCQ_BD);
4564
4565                 /* SGE ring */
4566                 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
4567                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
4568                                bnx2x_fp(bp, i, rx_sge_mapping),
4569                                BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4570         }
4571         /* Tx */
4572         for_each_queue(bp, i) {
4573
4574                 /* fastpath tx rings: tx_buf tx_desc */
4575                 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
4576                 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
4577                                bnx2x_fp(bp, i, tx_desc_mapping),
4578                                sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4579         }
4580         /* end of fastpath */
4581
4582         BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
4583                        sizeof(struct host_def_status_block));
4584
4585         BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
4586                        sizeof(struct bnx2x_slowpath));
4587
4588 #ifdef BCM_CNIC
4589         BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
4590         BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
4591         BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
4592         BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
4593         BNX2X_PCI_FREE(bp->cnic_sb, bp->cnic_sb_mapping,
4594                        sizeof(struct host_status_block));
4595 #endif
4596         BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
4597
4598 #undef BNX2X_PCI_FREE
4599 #undef BNX2X_KFREE
4600 }
4601
4602 int bnx2x_alloc_mem(struct bnx2x *bp)
4603 {
4604
4605 #define BNX2X_PCI_ALLOC(x, y, size) \
4606         do { \
4607                 x = dma_alloc_coherent(&bp->pdev->dev, size, y, GFP_KERNEL); \
4608                 if (x == NULL) \
4609                         goto alloc_mem_err; \
4610                 memset(x, 0, size); \
4611         } while (0)
4612
4613 #define BNX2X_ALLOC(x, size) \
4614         do { \
4615                 x = vmalloc(size); \
4616                 if (x == NULL) \
4617                         goto alloc_mem_err; \
4618                 memset(x, 0, size); \
4619         } while (0)
4620
4621         int i;
4622
4623         /* fastpath */
4624         /* Common */
4625         for_each_queue(bp, i) {
4626                 bnx2x_fp(bp, i, bp) = bp;
4627
4628                 /* status blocks */
4629                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
4630                                 &bnx2x_fp(bp, i, status_blk_mapping),
4631                                 sizeof(struct host_status_block));
4632         }
4633         /* Rx */
4634         for_each_queue(bp, i) {
4635
4636                 /* fastpath rx rings: rx_buf rx_desc rx_comp */
4637                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
4638                                 sizeof(struct sw_rx_bd) * NUM_RX_BD);
4639                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
4640                                 &bnx2x_fp(bp, i, rx_desc_mapping),
4641                                 sizeof(struct eth_rx_bd) * NUM_RX_BD);
4642
4643                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
4644                                 &bnx2x_fp(bp, i, rx_comp_mapping),
4645                                 sizeof(struct eth_fast_path_rx_cqe) *
4646                                 NUM_RCQ_BD);
4647
4648                 /* SGE ring */
4649                 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
4650                                 sizeof(struct sw_rx_page) * NUM_RX_SGE);
4651                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
4652                                 &bnx2x_fp(bp, i, rx_sge_mapping),
4653                                 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
4654         }
4655         /* Tx */
4656         for_each_queue(bp, i) {
4657
4658                 /* fastpath tx rings: tx_buf tx_desc */
4659                 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
4660                                 sizeof(struct sw_tx_bd) * NUM_TX_BD);
4661                 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
4662                                 &bnx2x_fp(bp, i, tx_desc_mapping),
4663                                 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
4664         }
4665         /* end of fastpath */
4666
4667         BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
4668                         sizeof(struct host_def_status_block));
4669
4670         BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
4671                         sizeof(struct bnx2x_slowpath));
4672
4673 #ifdef BCM_CNIC
4674         BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
4675
4676         /* allocate searcher T2 table
4677            we allocate 1/4 of alloc num for T2
4678           (which is not entered into the ILT) */
4679         BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
4680
4681         /* Initialize T2 (for 1024 connections) */
4682         for (i = 0; i < 16*1024; i += 64)
4683                 *(u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
4684
4685         /* Timer block array (8*MAX_CONN) phys uncached for now 1024 conns */
4686         BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
4687
4688         /* QM queues (128*MAX_CONN) */
4689         BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
4690
4691         BNX2X_PCI_ALLOC(bp->cnic_sb, &bp->cnic_sb_mapping,
4692                         sizeof(struct host_status_block));
4693 #endif
4694
4695         /* Slow path ring */
4696         BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
4697
4698         return 0;
4699
4700 alloc_mem_err:
4701         bnx2x_free_mem(bp);
4702         return -ENOMEM;
4703
4704 #undef BNX2X_PCI_ALLOC
4705 #undef BNX2X_ALLOC
4706 }
4707
4708
4709 /*
4710  * Init service functions
4711  */
4712
4713 /**
4714  * Sets a MAC in a CAM for a few L2 Clients for E1 chip
4715  *
4716  * @param bp driver descriptor
4717  * @param set set or clear an entry (1 or 0)
4718  * @param mac pointer to a buffer containing a MAC
4719  * @param cl_bit_vec bit vector of clients to register a MAC for
4720  * @param cam_offset offset in a CAM to use
4721  * @param with_bcast set broadcast MAC as well
4722  */
4723 static void bnx2x_set_mac_addr_e1_gen(struct bnx2x *bp, int set, u8 *mac,
4724                                       u32 cl_bit_vec, u8 cam_offset,
4725                                       u8 with_bcast)
4726 {
4727         struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
4728         int port = BP_PORT(bp);
4729
4730         /* CAM allocation
4731          * unicasts 0-31:port0 32-63:port1
4732          * multicast 64-127:port0 128-191:port1
4733          */
4734         config->hdr.length = 1 + (with_bcast ? 1 : 0);
4735         config->hdr.offset = cam_offset;
4736         config->hdr.client_id = 0xff;
4737         config->hdr.reserved1 = 0;
4738
4739         /* primary MAC */
4740         config->config_table[0].cam_entry.msb_mac_addr =
4741                                         swab16(*(u16 *)&mac[0]);
4742         config->config_table[0].cam_entry.middle_mac_addr =
4743                                         swab16(*(u16 *)&mac[2]);
4744         config->config_table[0].cam_entry.lsb_mac_addr =
4745                                         swab16(*(u16 *)&mac[4]);
4746         config->config_table[0].cam_entry.flags = cpu_to_le16(port);
4747         if (set)
4748                 config->config_table[0].target_table_entry.flags = 0;
4749         else
4750                 CAM_INVALIDATE(config->config_table[0]);
4751         config->config_table[0].target_table_entry.clients_bit_vector =
4752                                                 cpu_to_le32(cl_bit_vec);
4753         config->config_table[0].target_table_entry.vlan_id = 0;
4754
4755         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
4756            (set ? "setting" : "clearing"),
4757            config->config_table[0].cam_entry.msb_mac_addr,
4758            config->config_table[0].cam_entry.middle_mac_addr,
4759            config->config_table[0].cam_entry.lsb_mac_addr);
4760
4761         /* broadcast */
4762         if (with_bcast) {
4763                 config->config_table[1].cam_entry.msb_mac_addr =
4764                         cpu_to_le16(0xffff);
4765                 config->config_table[1].cam_entry.middle_mac_addr =
4766                         cpu_to_le16(0xffff);
4767                 config->config_table[1].cam_entry.lsb_mac_addr =
4768                         cpu_to_le16(0xffff);
4769                 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
4770                 if (set)
4771                         config->config_table[1].target_table_entry.flags =
4772                                         TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
4773                 else
4774                         CAM_INVALIDATE(config->config_table[1]);
4775                 config->config_table[1].target_table_entry.clients_bit_vector =
4776                                                         cpu_to_le32(cl_bit_vec);
4777                 config->config_table[1].target_table_entry.vlan_id = 0;
4778         }
4779
4780         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4781                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4782                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4783 }
4784
4785 /**
4786  * Sets a MAC in a CAM for a few L2 Clients for E1H chip
4787  *
4788  * @param bp driver descriptor
4789  * @param set set or clear an entry (1 or 0)
4790  * @param mac pointer to a buffer containing a MAC
4791  * @param cl_bit_vec bit vector of clients to register a MAC for
4792  * @param cam_offset offset in a CAM to use
4793  */
4794 static void bnx2x_set_mac_addr_e1h_gen(struct bnx2x *bp, int set, u8 *mac,
4795                                        u32 cl_bit_vec, u8 cam_offset)
4796 {
4797         struct mac_configuration_cmd_e1h *config =
4798                 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
4799
4800         config->hdr.length = 1;
4801         config->hdr.offset = cam_offset;
4802         config->hdr.client_id = 0xff;
4803         config->hdr.reserved1 = 0;
4804
4805         /* primary MAC */
4806         config->config_table[0].msb_mac_addr =
4807                                         swab16(*(u16 *)&mac[0]);
4808         config->config_table[0].middle_mac_addr =
4809                                         swab16(*(u16 *)&mac[2]);
4810         config->config_table[0].lsb_mac_addr =
4811                                         swab16(*(u16 *)&mac[4]);
4812         config->config_table[0].clients_bit_vector =
4813                                         cpu_to_le32(cl_bit_vec);
4814         config->config_table[0].vlan_id = 0;
4815         config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
4816         if (set)
4817                 config->config_table[0].flags = BP_PORT(bp);
4818         else
4819                 config->config_table[0].flags =
4820                                 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
4821
4822         DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)  E1HOV %d  CLID mask %d\n",
4823            (set ? "setting" : "clearing"),
4824            config->config_table[0].msb_mac_addr,
4825            config->config_table[0].middle_mac_addr,
4826            config->config_table[0].lsb_mac_addr, bp->e1hov, cl_bit_vec);
4827
4828         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
4829                       U64_HI(bnx2x_sp_mapping(bp, mac_config)),
4830                       U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
4831 }
4832
4833 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
4834                              int *state_p, int poll)
4835 {
4836         /* can take a while if any port is running */
4837         int cnt = 5000;
4838
4839         DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
4840            poll ? "polling" : "waiting", state, idx);
4841
4842         might_sleep();
4843         while (cnt--) {
4844                 if (poll) {
4845                         bnx2x_rx_int(bp->fp, 10);
4846                         /* if index is different from 0
4847                          * the reply for some commands will
4848                          * be on the non default queue
4849                          */
4850                         if (idx)
4851                                 bnx2x_rx_int(&bp->fp[idx], 10);
4852                 }
4853
4854                 mb(); /* state is changed by bnx2x_sp_event() */
4855                 if (*state_p == state) {
4856 #ifdef BNX2X_STOP_ON_ERROR
4857                         DP(NETIF_MSG_IFUP, "exit  (cnt %d)\n", 5000 - cnt);
4858 #endif
4859                         return 0;
4860                 }
4861
4862                 msleep(1);
4863
4864                 if (bp->panic)
4865                         return -EIO;
4866         }
4867
4868         /* timeout! */
4869         BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
4870                   poll ? "polling" : "waiting", state, idx);
4871 #ifdef BNX2X_STOP_ON_ERROR
4872         bnx2x_panic();
4873 #endif
4874
4875         return -EBUSY;
4876 }
4877
4878 void bnx2x_set_eth_mac_addr_e1h(struct bnx2x *bp, int set)
4879 {
4880         bp->set_mac_pending++;
4881         smp_wmb();
4882
4883         bnx2x_set_mac_addr_e1h_gen(bp, set, bp->dev->dev_addr,
4884                                    (1 << bp->fp->cl_id), BP_FUNC(bp));
4885
4886         /* Wait for a completion */
4887         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4888 }
4889
4890 void bnx2x_set_eth_mac_addr_e1(struct bnx2x *bp, int set)
4891 {
4892         bp->set_mac_pending++;
4893         smp_wmb();
4894
4895         bnx2x_set_mac_addr_e1_gen(bp, set, bp->dev->dev_addr,
4896                                   (1 << bp->fp->cl_id), (BP_PORT(bp) ? 32 : 0),
4897                                   1);
4898
4899         /* Wait for a completion */
4900         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4901 }
4902
4903 #ifdef BCM_CNIC
4904 /**
4905  * Set iSCSI MAC(s) at the next enties in the CAM after the ETH
4906  * MAC(s). This function will wait until the ramdord completion
4907  * returns.
4908  *
4909  * @param bp driver handle
4910  * @param set set or clear the CAM entry
4911  *
4912  * @return 0 if cussess, -ENODEV if ramrod doesn't return.
4913  */
4914 int bnx2x_set_iscsi_eth_mac_addr(struct bnx2x *bp, int set)
4915 {
4916         u32 cl_bit_vec = (1 << BCM_ISCSI_ETH_CL_ID);
4917
4918         bp->set_mac_pending++;
4919         smp_wmb();
4920
4921         /* Send a SET_MAC ramrod */
4922         if (CHIP_IS_E1(bp))
4923                 bnx2x_set_mac_addr_e1_gen(bp, set, bp->iscsi_mac,
4924                                   cl_bit_vec, (BP_PORT(bp) ? 32 : 0) + 2,
4925                                   1);
4926         else
4927                 /* CAM allocation for E1H
4928                 * unicasts: by func number
4929                 * multicast: 20+FUNC*20, 20 each
4930                 */
4931                 bnx2x_set_mac_addr_e1h_gen(bp, set, bp->iscsi_mac,
4932                                    cl_bit_vec, E1H_FUNC_MAX + BP_FUNC(bp));
4933
4934         /* Wait for a completion when setting */
4935         bnx2x_wait_ramrod(bp, 0, 0, &bp->set_mac_pending, set ? 0 : 1);
4936
4937         return 0;
4938 }
4939 #endif
4940
4941 int bnx2x_setup_leading(struct bnx2x *bp)
4942 {
4943         int rc;
4944
4945         /* reset IGU state */
4946         bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4947
4948         /* SETUP ramrod */
4949         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
4950
4951         /* Wait for completion */
4952         rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
4953
4954         return rc;
4955 }
4956
4957 int bnx2x_setup_multi(struct bnx2x *bp, int index)
4958 {
4959         struct bnx2x_fastpath *fp = &bp->fp[index];
4960
4961         /* reset IGU state */
4962         bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4963
4964         /* SETUP ramrod */
4965         fp->state = BNX2X_FP_STATE_OPENING;
4966         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
4967                       fp->cl_id, 0);
4968
4969         /* Wait for completion */
4970         return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
4971                                  &(fp->state), 0);
4972 }
4973
4974
4975 void bnx2x_set_num_queues_msix(struct bnx2x *bp)
4976 {
4977
4978         switch (bp->multi_mode) {
4979         case ETH_RSS_MODE_DISABLED:
4980                 bp->num_queues = 1;
4981                 break;
4982
4983         case ETH_RSS_MODE_REGULAR:
4984                 if (num_queues)
4985                         bp->num_queues = min_t(u32, num_queues,
4986                                                   BNX2X_MAX_QUEUES(bp));
4987                 else
4988                         bp->num_queues = min_t(u32, num_online_cpus(),
4989                                                   BNX2X_MAX_QUEUES(bp));
4990                 break;
4991
4992
4993         default:
4994                 bp->num_queues = 1;
4995                 break;
4996         }
4997 }
4998
4999
5000
5001 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
5002 {
5003         struct bnx2x_fastpath *fp = &bp->fp[index];
5004         int rc;
5005
5006         /* halt the connection */
5007         fp->state = BNX2X_FP_STATE_HALTING;
5008         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
5009
5010         /* Wait for completion */
5011         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
5012                                &(fp->state), 1);
5013         if (rc) /* timeout */
5014                 return rc;
5015
5016         /* delete cfc entry */
5017         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
5018
5019         /* Wait for completion */
5020         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
5021                                &(fp->state), 1);
5022         return rc;
5023 }
5024
5025 static int bnx2x_stop_leading(struct bnx2x *bp)
5026 {
5027         __le16 dsb_sp_prod_idx;
5028         /* if the other port is handling traffic,
5029            this can take a lot of time */
5030         int cnt = 500;
5031         int rc;
5032
5033         might_sleep();
5034
5035         /* Send HALT ramrod */
5036         bp->fp[0].state = BNX2X_FP_STATE_HALTING;
5037         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
5038
5039         /* Wait for completion */
5040         rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
5041                                &(bp->fp[0].state), 1);
5042         if (rc) /* timeout */
5043                 return rc;
5044
5045         dsb_sp_prod_idx = *bp->dsb_sp_prod;
5046
5047         /* Send PORT_DELETE ramrod */
5048         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
5049
5050         /* Wait for completion to arrive on default status block
5051            we are going to reset the chip anyway
5052            so there is not much to do if this times out
5053          */
5054         while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
5055                 if (!cnt) {
5056                         DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
5057                            "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
5058                            *bp->dsb_sp_prod, dsb_sp_prod_idx);
5059 #ifdef BNX2X_STOP_ON_ERROR
5060                         bnx2x_panic();
5061 #endif
5062                         rc = -EBUSY;
5063                         break;
5064                 }
5065                 cnt--;
5066                 msleep(1);
5067                 rmb(); /* Refresh the dsb_sp_prod */
5068         }
5069         bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
5070         bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
5071
5072         return rc;
5073 }
5074
5075 static void bnx2x_reset_func(struct bnx2x *bp)
5076 {
5077         int port = BP_PORT(bp);
5078         int func = BP_FUNC(bp);
5079         int base, i;
5080
5081         /* Configure IGU */
5082         REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
5083         REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
5084
5085 #ifdef BCM_CNIC
5086         /* Disable Timer scan */
5087         REG_WR(bp, TM_REG_EN_LINEAR0_TIMER + port*4, 0);
5088         /*
5089          * Wait for at least 10ms and up to 2 second for the timers scan to
5090          * complete
5091          */
5092         for (i = 0; i < 200; i++) {
5093                 msleep(10);
5094                 if (!REG_RD(bp, TM_REG_LIN0_SCAN_ON + port*4))
5095                         break;
5096         }
5097 #endif
5098         /* Clear ILT */
5099         base = FUNC_ILT_BASE(func);
5100         for (i = base; i < base + ILT_PER_FUNC; i++)
5101                 bnx2x_ilt_wr(bp, i, 0);
5102 }
5103
5104 static void bnx2x_reset_port(struct bnx2x *bp)
5105 {
5106         int port = BP_PORT(bp);
5107         u32 val;
5108
5109         REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5110
5111         /* Do not rcv packets to BRB */
5112         REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
5113         /* Do not direct rcv packets that are not for MCP to the BRB */
5114         REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
5115                            NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5116
5117         /* Configure AEU */
5118         REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
5119
5120         msleep(100);
5121         /* Check for BRB port occupancy */
5122         val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
5123         if (val)
5124                 DP(NETIF_MSG_IFDOWN,
5125                    "BRB1 is not empty  %d blocks are occupied\n", val);
5126
5127         /* TODO: Close Doorbell port? */
5128 }
5129
5130 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
5131 {
5132         DP(BNX2X_MSG_MCP, "function %d  reset_code %x\n",
5133            BP_FUNC(bp), reset_code);
5134
5135         switch (reset_code) {
5136         case FW_MSG_CODE_DRV_UNLOAD_COMMON:
5137                 bnx2x_reset_port(bp);
5138                 bnx2x_reset_func(bp);
5139                 bnx2x_reset_common(bp);
5140                 break;
5141
5142         case FW_MSG_CODE_DRV_UNLOAD_PORT:
5143                 bnx2x_reset_port(bp);
5144                 bnx2x_reset_func(bp);
5145                 break;
5146
5147         case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
5148                 bnx2x_reset_func(bp);
5149                 break;
5150
5151         default:
5152                 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
5153                 break;
5154         }
5155 }
5156
5157 void bnx2x_chip_cleanup(struct bnx2x *bp, int unload_mode)
5158 {
5159         int port = BP_PORT(bp);
5160         u32 reset_code = 0;
5161         int i, cnt, rc;
5162
5163         /* Wait until tx fastpath tasks complete */
5164         for_each_queue(bp, i) {
5165                 struct bnx2x_fastpath *fp = &bp->fp[i];
5166
5167                 cnt = 1000;
5168                 while (bnx2x_has_tx_work_unload(fp)) {
5169
5170                         bnx2x_tx_int(fp);
5171                         if (!cnt) {
5172                                 BNX2X_ERR("timeout waiting for queue[%d]\n",
5173                                           i);
5174 #ifdef BNX2X_STOP_ON_ERROR
5175                                 bnx2x_panic();
5176                                 return -EBUSY;
5177 #else
5178                                 break;
5179 #endif
5180                         }
5181                         cnt--;
5182                         msleep(1);
5183                 }
5184         }
5185         /* Give HW time to discard old tx messages */
5186         msleep(1);
5187
5188         if (CHIP_IS_E1(bp)) {
5189                 struct mac_configuration_cmd *config =
5190                                                 bnx2x_sp(bp, mcast_config);
5191
5192                 bnx2x_set_eth_mac_addr_e1(bp, 0);
5193
5194                 for (i = 0; i < config->hdr.length; i++)
5195                         CAM_INVALIDATE(config->config_table[i]);
5196
5197                 config->hdr.length = i;
5198                 if (CHIP_REV_IS_SLOW(bp))
5199                         config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
5200                 else
5201                         config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
5202                 config->hdr.client_id = bp->fp->cl_id;
5203                 config->hdr.reserved1 = 0;
5204
5205                 bp->set_mac_pending++;
5206                 smp_wmb();
5207
5208                 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
5209                               U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
5210                               U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
5211
5212         } else { /* E1H */
5213                 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
5214
5215                 bnx2x_set_eth_mac_addr_e1h(bp, 0);
5216
5217                 for (i = 0; i < MC_HASH_SIZE; i++)
5218                         REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
5219
5220                 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
5221         }
5222 #ifdef BCM_CNIC
5223         /* Clear iSCSI L2 MAC */
5224         mutex_lock(&bp->cnic_mutex);
5225         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
5226                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
5227                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
5228         }
5229         mutex_unlock(&bp->cnic_mutex);
5230 #endif
5231
5232         if (unload_mode == UNLOAD_NORMAL)
5233                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5234
5235         else if (bp->flags & NO_WOL_FLAG)
5236                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
5237
5238         else if (bp->wol) {
5239                 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
5240                 u8 *mac_addr = bp->dev->dev_addr;
5241                 u32 val;
5242                 /* The mac address is written to entries 1-4 to
5243                    preserve entry 0 which is used by the PMF */
5244                 u8 entry = (BP_E1HVN(bp) + 1)*8;
5245
5246                 val = (mac_addr[0] << 8) | mac_addr[1];
5247                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
5248
5249                 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
5250                       (mac_addr[4] << 8) | mac_addr[5];
5251                 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
5252
5253                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
5254
5255         } else
5256                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5257
5258         /* Close multi and leading connections
5259            Completions for ramrods are collected in a synchronous way */
5260         for_each_nondefault_queue(bp, i)
5261                 if (bnx2x_stop_multi(bp, i))
5262                         goto unload_error;
5263
5264         rc = bnx2x_stop_leading(bp);
5265         if (rc) {
5266                 BNX2X_ERR("Stop leading failed!\n");
5267 #ifdef BNX2X_STOP_ON_ERROR
5268                 return -EBUSY;
5269 #else
5270                 goto unload_error;
5271 #endif
5272         }
5273
5274 unload_error:
5275         if (!BP_NOMCP(bp))
5276                 reset_code = bnx2x_fw_command(bp, reset_code, 0);
5277         else {
5278                 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts      %d, %d, %d\n",
5279                    load_count[0], load_count[1], load_count[2]);
5280                 load_count[0]--;
5281                 load_count[1 + port]--;
5282                 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts  %d, %d, %d\n",
5283                    load_count[0], load_count[1], load_count[2]);
5284                 if (load_count[0] == 0)
5285                         reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
5286                 else if (load_count[1 + port] == 0)
5287                         reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
5288                 else
5289                         reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
5290         }
5291
5292         if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
5293             (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
5294                 bnx2x__link_reset(bp);
5295
5296         /* Reset the chip */
5297         bnx2x_reset_chip(bp, reset_code);
5298
5299         /* Report UNLOAD_DONE to MCP */
5300         if (!BP_NOMCP(bp))
5301                 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
5302
5303 }
5304
5305 void bnx2x_disable_close_the_gate(struct bnx2x *bp)
5306 {
5307         u32 val;
5308
5309         DP(NETIF_MSG_HW, "Disabling \"close the gates\"\n");
5310
5311         if (CHIP_IS_E1(bp)) {
5312                 int port = BP_PORT(bp);
5313                 u32 addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5314                         MISC_REG_AEU_MASK_ATTN_FUNC_0;
5315
5316                 val = REG_RD(bp, addr);
5317                 val &= ~(0x300);
5318                 REG_WR(bp, addr, val);
5319         } else if (CHIP_IS_E1H(bp)) {
5320                 val = REG_RD(bp, MISC_REG_AEU_GENERAL_MASK);
5321                 val &= ~(MISC_AEU_GENERAL_MASK_REG_AEU_PXP_CLOSE_MASK |
5322                          MISC_AEU_GENERAL_MASK_REG_AEU_NIG_CLOSE_MASK);
5323                 REG_WR(bp, MISC_REG_AEU_GENERAL_MASK, val);
5324         }
5325 }
5326
5327
5328 /* Close gates #2, #3 and #4: */
5329 static void bnx2x_set_234_gates(struct bnx2x *bp, bool close)
5330 {
5331         u32 val, addr;
5332
5333         /* Gates #2 and #4a are closed/opened for "not E1" only */
5334         if (!CHIP_IS_E1(bp)) {
5335                 /* #4 */
5336                 val = REG_RD(bp, PXP_REG_HST_DISCARD_DOORBELLS);
5337                 REG_WR(bp, PXP_REG_HST_DISCARD_DOORBELLS,
5338                        close ? (val | 0x1) : (val & (~(u32)1)));
5339                 /* #2 */
5340                 val = REG_RD(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES);
5341                 REG_WR(bp, PXP_REG_HST_DISCARD_INTERNAL_WRITES,
5342                        close ? (val | 0x1) : (val & (~(u32)1)));
5343         }
5344
5345         /* #3 */
5346         addr = BP_PORT(bp) ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
5347         val = REG_RD(bp, addr);
5348         REG_WR(bp, addr, (!close) ? (val | 0x1) : (val & (~(u32)1)));
5349
5350         DP(NETIF_MSG_HW, "%s gates #2, #3 and #4\n",
5351                 close ? "closing" : "opening");
5352         mmiowb();
5353 }
5354
5355 #define SHARED_MF_CLP_MAGIC  0x80000000 /* `magic' bit */
5356
5357 static void bnx2x_clp_reset_prep(struct bnx2x *bp, u32 *magic_val)
5358 {
5359         /* Do some magic... */
5360         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5361         *magic_val = val & SHARED_MF_CLP_MAGIC;
5362         MF_CFG_WR(bp, shared_mf_config.clp_mb, val | SHARED_MF_CLP_MAGIC);
5363 }
5364
5365 /* Restore the value of the `magic' bit.
5366  *
5367  * @param pdev Device handle.
5368  * @param magic_val Old value of the `magic' bit.
5369  */
5370 static void bnx2x_clp_reset_done(struct bnx2x *bp, u32 magic_val)
5371 {
5372         /* Restore the `magic' bit value... */
5373         /* u32 val = SHMEM_RD(bp, mf_cfg.shared_mf_config.clp_mb);
5374         SHMEM_WR(bp, mf_cfg.shared_mf_config.clp_mb,
5375                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val); */
5376         u32 val = MF_CFG_RD(bp, shared_mf_config.clp_mb);
5377         MF_CFG_WR(bp, shared_mf_config.clp_mb,
5378                 (val & (~SHARED_MF_CLP_MAGIC)) | magic_val);
5379 }
5380
5381 /* Prepares for MCP reset: takes care of CLP configurations.
5382  *
5383  * @param bp
5384  * @param magic_val Old value of 'magic' bit.
5385  */
5386 static void bnx2x_reset_mcp_prep(struct bnx2x *bp, u32 *magic_val)
5387 {
5388         u32 shmem;
5389         u32 validity_offset;
5390
5391         DP(NETIF_MSG_HW, "Starting\n");
5392
5393         /* Set `magic' bit in order to save MF config */
5394         if (!CHIP_IS_E1(bp))
5395                 bnx2x_clp_reset_prep(bp, magic_val);
5396
5397         /* Get shmem offset */
5398         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5399         validity_offset = offsetof(struct shmem_region, validity_map[0]);
5400
5401         /* Clear validity map flags */
5402         if (shmem > 0)
5403                 REG_WR(bp, shmem + validity_offset, 0);
5404 }
5405
5406 #define MCP_TIMEOUT      5000   /* 5 seconds (in ms) */
5407 #define MCP_ONE_TIMEOUT  100    /* 100 ms */
5408
5409 /* Waits for MCP_ONE_TIMEOUT or MCP_ONE_TIMEOUT*10,
5410  * depending on the HW type.
5411  *
5412  * @param bp
5413  */
5414 static inline void bnx2x_mcp_wait_one(struct bnx2x *bp)
5415 {
5416         /* special handling for emulation and FPGA,
5417            wait 10 times longer */
5418         if (CHIP_REV_IS_SLOW(bp))
5419                 msleep(MCP_ONE_TIMEOUT*10);
5420         else
5421                 msleep(MCP_ONE_TIMEOUT);
5422 }
5423
5424 static int bnx2x_reset_mcp_comp(struct bnx2x *bp, u32 magic_val)
5425 {
5426         u32 shmem, cnt, validity_offset, val;
5427         int rc = 0;
5428
5429         msleep(100);
5430
5431         /* Get shmem offset */
5432         shmem = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5433         if (shmem == 0) {
5434                 BNX2X_ERR("Shmem 0 return failure\n");
5435                 rc = -ENOTTY;
5436                 goto exit_lbl;
5437         }
5438
5439         validity_offset = offsetof(struct shmem_region, validity_map[0]);
5440
5441         /* Wait for MCP to come up */
5442         for (cnt = 0; cnt < (MCP_TIMEOUT / MCP_ONE_TIMEOUT); cnt++) {
5443                 /* TBD: its best to check validity map of last port.
5444                  * currently checks on port 0.
5445                  */
5446                 val = REG_RD(bp, shmem + validity_offset);
5447                 DP(NETIF_MSG_HW, "shmem 0x%x validity map(0x%x)=0x%x\n", shmem,
5448                    shmem + validity_offset, val);
5449
5450                 /* check that shared memory is valid. */
5451                 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5452                     == (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5453                         break;
5454
5455                 bnx2x_mcp_wait_one(bp);
5456         }
5457
5458         DP(NETIF_MSG_HW, "Cnt=%d Shmem validity map 0x%x\n", cnt, val);
5459
5460         /* Check that shared memory is valid. This indicates that MCP is up. */
5461         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) !=
5462             (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB)) {
5463                 BNX2X_ERR("Shmem signature not present. MCP is not up !!\n");
5464                 rc = -ENOTTY;
5465                 goto exit_lbl;
5466         }
5467
5468 exit_lbl:
5469         /* Restore the `magic' bit value */
5470         if (!CHIP_IS_E1(bp))
5471                 bnx2x_clp_reset_done(bp, magic_val);
5472
5473         return rc;
5474 }
5475
5476 static void bnx2x_pxp_prep(struct bnx2x *bp)
5477 {
5478         if (!CHIP_IS_E1(bp)) {
5479                 REG_WR(bp, PXP2_REG_RD_START_INIT, 0);
5480                 REG_WR(bp, PXP2_REG_RQ_RBC_DONE, 0);
5481                 REG_WR(bp, PXP2_REG_RQ_CFG_DONE, 0);
5482                 mmiowb();
5483         }
5484 }
5485
5486 /*
5487  * Reset the whole chip except for:
5488  *      - PCIE core
5489  *      - PCI Glue, PSWHST, PXP/PXP2 RF (all controlled by
5490  *              one reset bit)
5491  *      - IGU
5492  *      - MISC (including AEU)
5493  *      - GRC
5494  *      - RBCN, RBCP
5495  */
5496 static void bnx2x_process_kill_chip_reset(struct bnx2x *bp)
5497 {
5498         u32 not_reset_mask1, reset_mask1, not_reset_mask2, reset_mask2;
5499
5500         not_reset_mask1 =
5501                 MISC_REGISTERS_RESET_REG_1_RST_HC |
5502                 MISC_REGISTERS_RESET_REG_1_RST_PXPV |
5503                 MISC_REGISTERS_RESET_REG_1_RST_PXP;
5504
5505         not_reset_mask2 =
5506                 MISC_REGISTERS_RESET_REG_2_RST_MDIO |
5507                 MISC_REGISTERS_RESET_REG_2_RST_EMAC0_HARD_CORE |
5508                 MISC_REGISTERS_RESET_REG_2_RST_EMAC1_HARD_CORE |
5509                 MISC_REGISTERS_RESET_REG_2_RST_MISC_CORE |
5510                 MISC_REGISTERS_RESET_REG_2_RST_RBCN |
5511                 MISC_REGISTERS_RESET_REG_2_RST_GRC  |
5512                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_RESET_REG_HARD_CORE |
5513                 MISC_REGISTERS_RESET_REG_2_RST_MCP_N_HARD_CORE_RST_B;
5514
5515         reset_mask1 = 0xffffffff;
5516
5517         if (CHIP_IS_E1(bp))
5518                 reset_mask2 = 0xffff;
5519         else
5520                 reset_mask2 = 0x1ffff;
5521
5522         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5523                reset_mask1 & (~not_reset_mask1));
5524         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5525                reset_mask2 & (~not_reset_mask2));
5526
5527         barrier();
5528         mmiowb();
5529
5530         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, reset_mask1);
5531         REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, reset_mask2);
5532         mmiowb();
5533 }
5534
5535 static int bnx2x_process_kill(struct bnx2x *bp)
5536 {
5537         int cnt = 1000;
5538         u32 val = 0;
5539         u32 sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1, pgl_exp_rom2;
5540
5541
5542         /* Empty the Tetris buffer, wait for 1s */
5543         do {
5544                 sr_cnt  = REG_RD(bp, PXP2_REG_RD_SR_CNT);
5545                 blk_cnt = REG_RD(bp, PXP2_REG_RD_BLK_CNT);
5546                 port_is_idle_0 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_0);
5547                 port_is_idle_1 = REG_RD(bp, PXP2_REG_RD_PORT_IS_IDLE_1);
5548                 pgl_exp_rom2 = REG_RD(bp, PXP2_REG_PGL_EXP_ROM2);
5549                 if ((sr_cnt == 0x7e) && (blk_cnt == 0xa0) &&
5550                     ((port_is_idle_0 & 0x1) == 0x1) &&
5551                     ((port_is_idle_1 & 0x1) == 0x1) &&
5552                     (pgl_exp_rom2 == 0xffffffff))
5553                         break;
5554                 msleep(1);
5555         } while (cnt-- > 0);
5556
5557         if (cnt <= 0) {
5558                 DP(NETIF_MSG_HW, "Tetris buffer didn't get empty or there"
5559                           " are still"
5560                           " outstanding read requests after 1s!\n");
5561                 DP(NETIF_MSG_HW, "sr_cnt=0x%08x, blk_cnt=0x%08x,"
5562                           " port_is_idle_0=0x%08x,"
5563                           " port_is_idle_1=0x%08x, pgl_exp_rom2=0x%08x\n",
5564                           sr_cnt, blk_cnt, port_is_idle_0, port_is_idle_1,
5565                           pgl_exp_rom2);
5566                 return -EAGAIN;
5567         }
5568
5569         barrier();
5570
5571         /* Close gates #2, #3 and #4 */
5572         bnx2x_set_234_gates(bp, true);
5573
5574         /* TBD: Indicate that "process kill" is in progress to MCP */
5575
5576         /* Clear "unprepared" bit */
5577         REG_WR(bp, MISC_REG_UNPREPARED, 0);
5578         barrier();
5579
5580         /* Make sure all is written to the chip before the reset */
5581         mmiowb();
5582
5583         /* Wait for 1ms to empty GLUE and PCI-E core queues,
5584          * PSWHST, GRC and PSWRD Tetris buffer.
5585          */
5586         msleep(1);
5587
5588         /* Prepare to chip reset: */
5589         /* MCP */
5590         bnx2x_reset_mcp_prep(bp, &val);
5591
5592         /* PXP */
5593         bnx2x_pxp_prep(bp);
5594         barrier();
5595
5596         /* reset the chip */
5597         bnx2x_process_kill_chip_reset(bp);
5598         barrier();
5599
5600         /* Recover after reset: */
5601         /* MCP */
5602         if (bnx2x_reset_mcp_comp(bp, val))
5603                 return -EAGAIN;
5604
5605         /* PXP */
5606         bnx2x_pxp_prep(bp);
5607
5608         /* Open the gates #2, #3 and #4 */
5609         bnx2x_set_234_gates(bp, false);
5610
5611         /* TBD: IGU/AEU preparation bring back the AEU/IGU to a
5612          * reset state, re-enable attentions. */
5613
5614         return 0;
5615 }
5616
5617 static int bnx2x_leader_reset(struct bnx2x *bp)
5618 {
5619         int rc = 0;
5620         /* Try to recover after the failure */
5621         if (bnx2x_process_kill(bp)) {
5622                 printk(KERN_ERR "%s: Something bad had happen! Aii!\n",
5623                        bp->dev->name);
5624                 rc = -EAGAIN;
5625                 goto exit_leader_reset;
5626         }
5627
5628         /* Clear "reset is in progress" bit and update the driver state */
5629         bnx2x_set_reset_done(bp);
5630         bp->recovery_state = BNX2X_RECOVERY_DONE;
5631
5632 exit_leader_reset:
5633         bp->is_leader = 0;
5634         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_RESERVED_08);
5635         smp_wmb();
5636         return rc;
5637 }
5638
5639 /* Assumption: runs under rtnl lock. This together with the fact
5640  * that it's called only from bnx2x_reset_task() ensure that it
5641  * will never be called when netif_running(bp->dev) is false.
5642  */
5643 static void bnx2x_parity_recover(struct bnx2x *bp)
5644 {
5645         DP(NETIF_MSG_HW, "Handling parity\n");
5646         while (1) {
5647                 switch (bp->recovery_state) {
5648                 case BNX2X_RECOVERY_INIT:
5649                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_INIT\n");
5650                         /* Try to get a LEADER_LOCK HW lock */
5651                         if (bnx2x_trylock_hw_lock(bp,
5652                                 HW_LOCK_RESOURCE_RESERVED_08))
5653                                 bp->is_leader = 1;
5654
5655                         /* Stop the driver */
5656                         /* If interface has been removed - break */
5657                         if (bnx2x_nic_unload(bp, UNLOAD_RECOVERY))
5658                                 return;
5659
5660                         bp->recovery_state = BNX2X_RECOVERY_WAIT;
5661                         /* Ensure "is_leader" and "recovery_state"
5662                          *  update values are seen on other CPUs
5663                          */
5664                         smp_wmb();
5665                         break;
5666
5667                 case BNX2X_RECOVERY_WAIT:
5668                         DP(NETIF_MSG_HW, "State is BNX2X_RECOVERY_WAIT\n");
5669                         if (bp->is_leader) {
5670                                 u32 load_counter = bnx2x_get_load_cnt(bp);
5671                                 if (load_counter) {
5672                                         /* Wait until all other functions get
5673                                          * down.
5674                                          */
5675                                         schedule_delayed_work(&bp->reset_task,
5676                                                                 HZ/10);
5677                                         return;
5678                                 } else {
5679                                         /* If all other functions got down -
5680                                          * try to bring the chip back to
5681                                          * normal. In any case it's an exit
5682                                          * point for a leader.
5683                                          */
5684                                         if (bnx2x_leader_reset(bp) ||
5685                                         bnx2x_nic_load(bp, LOAD_NORMAL)) {
5686                                                 printk(KERN_ERR"%s: Recovery "
5687                                                 "has failed. Power cycle is "
5688                                                 "needed.\n", bp->dev->name);
5689                                                 /* Disconnect this device */
5690                                                 netif_device_detach(bp->dev);
5691                                                 /* Block ifup for all function
5692                                                  * of this ASIC until
5693                                                  * "process kill" or power
5694                                                  * cycle.
5695                                                  */
5696                                                 bnx2x_set_reset_in_progress(bp);
5697                                                 /* Shut down the power */
5698                                                 bnx2x_set_power_state(bp,
5699                                                                 PCI_D3hot);
5700                                                 return;
5701                                         }
5702
5703                                         return;
5704                                 }
5705                         } else { /* non-leader */
5706                                 if (!bnx2x_reset_is_done(bp)) {
5707                                         /* Try to get a LEADER_LOCK HW lock as
5708                                          * long as a former leader may have
5709                                          * been unloaded by the user or
5710                                          * released a leadership by another
5711                                          * reason.
5712                                          */
5713                                         if (bnx2x_trylock_hw_lock(bp,
5714                                             HW_LOCK_RESOURCE_RESERVED_08)) {
5715                                                 /* I'm a leader now! Restart a
5716                                                  * switch case.
5717                                                  */
5718                                                 bp->is_leader = 1;
5719                                                 break;
5720                                         }
5721
5722                                         schedule_delayed_work(&bp->reset_task,
5723                                                                 HZ/10);
5724                                         return;
5725
5726                                 } else { /* A leader has completed
5727                                           * the "process kill". It's an exit
5728                                           * point for a non-leader.
5729                                           */
5730                                         bnx2x_nic_load(bp, LOAD_NORMAL);
5731                                         bp->recovery_state =
5732                                                 BNX2X_RECOVERY_DONE;
5733                                         smp_wmb();
5734                                         return;
5735                                 }
5736                         }
5737                 default:
5738                         return;
5739                 }
5740         }
5741 }
5742
5743 /* bnx2x_nic_unload() flushes the bnx2x_wq, thus reset task is
5744  * scheduled on a general queue in order to prevent a dead lock.
5745  */
5746 static void bnx2x_reset_task(struct work_struct *work)
5747 {
5748         struct bnx2x *bp = container_of(work, struct bnx2x, reset_task.work);
5749
5750 #ifdef BNX2X_STOP_ON_ERROR
5751         BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
5752                   " so reset not done to allow debug dump,\n"
5753          KERN_ERR " you will need to reboot when done\n");
5754         return;
5755 #endif
5756
5757         rtnl_lock();
5758
5759         if (!netif_running(bp->dev))
5760                 goto reset_task_exit;
5761
5762         if (unlikely(bp->recovery_state != BNX2X_RECOVERY_DONE))
5763                 bnx2x_parity_recover(bp);
5764         else {
5765                 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
5766                 bnx2x_nic_load(bp, LOAD_NORMAL);
5767         }
5768
5769 reset_task_exit:
5770         rtnl_unlock();
5771 }
5772
5773 /* end of nic load/unload */
5774
5775 /*
5776  * Init service functions
5777  */
5778
5779 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
5780 {
5781         switch (func) {
5782         case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
5783         case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
5784         case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
5785         case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
5786         case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
5787         case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
5788         case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
5789         case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
5790         default:
5791                 BNX2X_ERR("Unsupported function index: %d\n", func);
5792                 return (u32)(-1);
5793         }
5794 }
5795
5796 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
5797 {
5798         u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
5799
5800         /* Flush all outstanding writes */
5801         mmiowb();
5802
5803         /* Pretend to be function 0 */
5804         REG_WR(bp, reg, 0);
5805         /* Flush the GRC transaction (in the chip) */
5806         new_val = REG_RD(bp, reg);
5807         if (new_val != 0) {
5808                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
5809                           new_val);
5810                 BUG();
5811         }
5812
5813         /* From now we are in the "like-E1" mode */
5814         bnx2x_int_disable(bp);
5815
5816         /* Flush all outstanding writes */
5817         mmiowb();
5818
5819         /* Restore the original funtion settings */
5820         REG_WR(bp, reg, orig_func);
5821         new_val = REG_RD(bp, reg);
5822         if (new_val != orig_func) {
5823                 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
5824                           orig_func, new_val);
5825                 BUG();
5826         }
5827 }
5828
5829 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
5830 {
5831         if (CHIP_IS_E1H(bp))
5832                 bnx2x_undi_int_disable_e1h(bp, func);
5833         else
5834                 bnx2x_int_disable(bp);
5835 }
5836
5837 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
5838 {
5839         u32 val;
5840
5841         /* Check if there is any driver already loaded */
5842         val = REG_RD(bp, MISC_REG_UNPREPARED);
5843         if (val == 0x1) {
5844                 /* Check if it is the UNDI driver
5845                  * UNDI driver initializes CID offset for normal bell to 0x7
5846                  */
5847                 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5848                 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
5849                 if (val == 0x7) {
5850                         u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5851                         /* save our func */
5852                         int func = BP_FUNC(bp);
5853                         u32 swap_en;
5854                         u32 swap_val;
5855
5856                         /* clear the UNDI indication */
5857                         REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
5858
5859                         BNX2X_DEV_INFO("UNDI is active! reset device\n");
5860
5861                         /* try unload UNDI on port 0 */
5862                         bp->func = 0;
5863                         bp->fw_seq =
5864                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5865                                 DRV_MSG_SEQ_NUMBER_MASK);
5866                         reset_code = bnx2x_fw_command(bp, reset_code, 0);
5867
5868                         /* if UNDI is loaded on the other port */
5869                         if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
5870
5871                                 /* send "DONE" for previous unload */
5872                                 bnx2x_fw_command(bp,
5873                                                  DRV_MSG_CODE_UNLOAD_DONE, 0);
5874
5875                                 /* unload UNDI on port 1 */
5876                                 bp->func = 1;
5877                                 bp->fw_seq =
5878                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5879                                         DRV_MSG_SEQ_NUMBER_MASK);
5880                                 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
5881
5882                                 bnx2x_fw_command(bp, reset_code, 0);
5883                         }
5884
5885                         /* now it's safe to release the lock */
5886                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5887
5888                         bnx2x_undi_int_disable(bp, func);
5889
5890                         /* close input traffic and wait for it */
5891                         /* Do not rcv packets to BRB */
5892                         REG_WR(bp,
5893                               (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
5894                                              NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
5895                         /* Do not direct rcv packets that are not for MCP to
5896                          * the BRB */
5897                         REG_WR(bp,
5898                                (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
5899                                               NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
5900                         /* clear AEU */
5901                         REG_WR(bp,
5902                              (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
5903                                             MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
5904                         msleep(10);
5905
5906                         /* save NIG port swap info */
5907                         swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
5908                         swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
5909                         /* reset device */
5910                         REG_WR(bp,
5911                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5912                                0xd3ffffff);
5913                         REG_WR(bp,
5914                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
5915                                0x1403);
5916                         /* take the NIG out of reset and restore swap values */
5917                         REG_WR(bp,
5918                                GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5919                                MISC_REGISTERS_RESET_REG_1_RST_NIG);
5920                         REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
5921                         REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
5922
5923                         /* send unload done to the MCP */
5924                         bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE, 0);
5925
5926                         /* restore our func and fw_seq */
5927                         bp->func = func;
5928                         bp->fw_seq =
5929                                (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
5930                                 DRV_MSG_SEQ_NUMBER_MASK);
5931
5932                 } else
5933                         bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
5934         }
5935 }
5936
5937 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
5938 {
5939         u32 val, val2, val3, val4, id;
5940         u16 pmc;
5941
5942         /* Get the chip revision id and number. */
5943         /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
5944         val = REG_RD(bp, MISC_REG_CHIP_NUM);
5945         id = ((val & 0xffff) << 16);
5946         val = REG_RD(bp, MISC_REG_CHIP_REV);
5947         id |= ((val & 0xf) << 12);
5948         val = REG_RD(bp, MISC_REG_CHIP_METAL);
5949         id |= ((val & 0xff) << 4);
5950         val = REG_RD(bp, MISC_REG_BOND_ID);
5951         id |= (val & 0xf);
5952         bp->common.chip_id = id;
5953         bp->link_params.chip_id = bp->common.chip_id;
5954         BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
5955
5956         val = (REG_RD(bp, 0x2874) & 0x55);
5957         if ((bp->common.chip_id & 0x1) ||
5958             (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
5959                 bp->flags |= ONE_PORT_FLAG;
5960                 BNX2X_DEV_INFO("single port device\n");
5961         }
5962
5963         val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
5964         bp->common.flash_size = (NVRAM_1MB_SIZE <<
5965                                  (val & MCPR_NVM_CFG4_FLASH_SIZE));
5966         BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
5967                        bp->common.flash_size, bp->common.flash_size);
5968
5969         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
5970         bp->common.shmem2_base = REG_RD(bp, MISC_REG_GENERIC_CR_0);
5971         bp->link_params.shmem_base = bp->common.shmem_base;
5972         bp->link_params.shmem2_base = bp->common.shmem2_base;
5973         BNX2X_DEV_INFO("shmem offset 0x%x  shmem2 offset 0x%x\n",
5974                        bp->common.shmem_base, bp->common.shmem2_base);
5975
5976         if (!bp->common.shmem_base ||
5977             (bp->common.shmem_base < 0xA0000) ||
5978             (bp->common.shmem_base >= 0xC0000)) {
5979                 BNX2X_DEV_INFO("MCP not active\n");
5980                 bp->flags |= NO_MCP_FLAG;
5981                 return;
5982         }
5983
5984         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
5985         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5986                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
5987                 BNX2X_ERROR("BAD MCP validity signature\n");
5988
5989         bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
5990         BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
5991
5992         bp->link_params.hw_led_mode = ((bp->common.hw_config &
5993                                         SHARED_HW_CFG_LED_MODE_MASK) >>
5994                                        SHARED_HW_CFG_LED_MODE_SHIFT);
5995
5996         bp->link_params.feature_config_flags = 0;
5997         val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
5998         if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
5999                 bp->link_params.feature_config_flags |=
6000                                 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6001         else
6002                 bp->link_params.feature_config_flags &=
6003                                 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
6004
6005         val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
6006         bp->common.bc_ver = val;
6007         BNX2X_DEV_INFO("bc_ver %X\n", val);
6008         if (val < BNX2X_BC_VER) {
6009                 /* for now only warn
6010                  * later we might need to enforce this */
6011                 BNX2X_ERROR("This driver needs bc_ver %X but found %X, "
6012                             "please upgrade BC\n", BNX2X_BC_VER, val);
6013         }
6014         bp->link_params.feature_config_flags |=
6015                                 (val >= REQ_BC_VER_4_VRFY_FIRST_PHY_OPT_MDL) ?
6016                 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
6017         bp->link_params.feature_config_flags |=
6018                 (val >= REQ_BC_VER_4_VRFY_SPECIFIC_PHY_OPT_MDL) ?
6019                 FEATURE_CONFIG_BC_SUPPORTS_DUAL_PHY_OPT_MDL_VRFY : 0;
6020
6021         if (BP_E1HVN(bp) == 0) {
6022                 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
6023                 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
6024         } else {
6025                 /* no WOL capability for E1HVN != 0 */
6026                 bp->flags |= NO_WOL_FLAG;
6027         }
6028         BNX2X_DEV_INFO("%sWoL capable\n",
6029                        (bp->flags & NO_WOL_FLAG) ? "not " : "");
6030
6031         val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
6032         val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
6033         val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
6034         val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
6035
6036         dev_info(&bp->pdev->dev, "part number %X-%X-%X-%X\n",
6037                  val, val2, val3, val4);
6038 }
6039
6040 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
6041                                                     u32 switch_cfg)
6042 {
6043         int cfg_size = 0, idx, port = BP_PORT(bp);
6044
6045         /* Aggregation of supported attributes of all external phys */
6046         bp->port.supported[0] = 0;
6047         bp->port.supported[1] = 0;
6048         switch (bp->link_params.num_phys) {
6049         case 1:
6050                 bp->port.supported[0] = bp->link_params.phy[INT_PHY].supported;
6051                 cfg_size = 1;
6052                 break;
6053         case 2:
6054                 bp->port.supported[0] = bp->link_params.phy[EXT_PHY1].supported;
6055                 cfg_size = 1;
6056                 break;
6057         case 3:
6058                 if (bp->link_params.multi_phy_config &
6059                     PORT_HW_CFG_PHY_SWAPPED_ENABLED) {
6060                         bp->port.supported[1] =
6061                                 bp->link_params.phy[EXT_PHY1].supported;
6062                         bp->port.supported[0] =
6063                                 bp->link_params.phy[EXT_PHY2].supported;
6064                 } else {
6065                         bp->port.supported[0] =
6066                                 bp->link_params.phy[EXT_PHY1].supported;
6067                         bp->port.supported[1] =
6068                                 bp->link_params.phy[EXT_PHY2].supported;
6069                 }
6070                 cfg_size = 2;
6071                 break;
6072         }
6073
6074         if (!(bp->port.supported[0] || bp->port.supported[1])) {
6075                 BNX2X_ERR("NVRAM config error. BAD phy config."
6076                           "PHY1 config 0x%x, PHY2 config 0x%x\n",
6077                            SHMEM_RD(bp,
6078                            dev_info.port_hw_config[port].external_phy_config),
6079                            SHMEM_RD(bp,
6080                            dev_info.port_hw_config[port].external_phy_config2));
6081                         return;
6082                 }
6083
6084         switch (switch_cfg) {
6085         case SWITCH_CFG_1G:
6086                 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
6087                                            port*0x10);
6088                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6089                 break;
6090
6091         case SWITCH_CFG_10G:
6092                 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
6093                                            port*0x18);
6094                 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
6095
6096                 break;
6097
6098         default:
6099                 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
6100                           bp->port.link_config[0]);
6101                 return;
6102         }
6103         /* mask what we support according to speed_cap_mask per configuration */
6104         for (idx = 0; idx < cfg_size; idx++) {
6105                 if (!(bp->link_params.speed_cap_mask[idx] &
6106                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
6107                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Half;
6108
6109                 if (!(bp->link_params.speed_cap_mask[idx] &
6110                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
6111                         bp->port.supported[idx] &= ~SUPPORTED_10baseT_Full;
6112
6113                 if (!(bp->link_params.speed_cap_mask[idx] &
6114                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
6115                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Half;
6116
6117                 if (!(bp->link_params.speed_cap_mask[idx] &
6118                                 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
6119                         bp->port.supported[idx] &= ~SUPPORTED_100baseT_Full;
6120
6121                 if (!(bp->link_params.speed_cap_mask[idx] &
6122                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
6123                         bp->port.supported[idx] &= ~(SUPPORTED_1000baseT_Half |
6124                                         SUPPORTED_1000baseT_Full);
6125
6126                 if (!(bp->link_params.speed_cap_mask[idx] &
6127                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
6128                         bp->port.supported[idx] &= ~SUPPORTED_2500baseX_Full;
6129
6130                 if (!(bp->link_params.speed_cap_mask[idx] &
6131                                         PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
6132                         bp->port.supported[idx] &= ~SUPPORTED_10000baseT_Full;
6133
6134         }
6135
6136         BNX2X_DEV_INFO("supported 0x%x 0x%x\n", bp->port.supported[0],
6137                        bp->port.supported[1]);
6138 }
6139
6140 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
6141 {
6142         u32 link_config, idx, cfg_size = 0;
6143         bp->port.advertising[0] = 0;
6144         bp->port.advertising[1] = 0;
6145         switch (bp->link_params.num_phys) {
6146         case 1:
6147         case 2:
6148                 cfg_size = 1;
6149                 break;
6150         case 3:
6151                 cfg_size = 2;
6152                 break;
6153         }
6154         for (idx = 0; idx < cfg_size; idx++) {
6155                 bp->link_params.req_duplex[idx] = DUPLEX_FULL;
6156                 link_config = bp->port.link_config[idx];
6157                 switch (link_config & PORT_FEATURE_LINK_SPEED_MASK) {
6158         case PORT_FEATURE_LINK_SPEED_AUTO:
6159                         if (bp->port.supported[idx] & SUPPORTED_Autoneg) {
6160                                 bp->link_params.req_line_speed[idx] =
6161                                         SPEED_AUTO_NEG;
6162                                 bp->port.advertising[idx] |=
6163                                         bp->port.supported[idx];
6164                 } else {
6165                         /* force 10G, no AN */
6166                                 bp->link_params.req_line_speed[idx] =
6167                                         SPEED_10000;
6168                                 bp->port.advertising[idx] |=
6169                                         (ADVERTISED_10000baseT_Full |
6170                                                  ADVERTISED_FIBRE);
6171                                 continue;
6172                 }
6173                 break;
6174
6175         case PORT_FEATURE_LINK_SPEED_10M_FULL:
6176                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Full) {
6177                                 bp->link_params.req_line_speed[idx] =
6178                                         SPEED_10;
6179                                 bp->port.advertising[idx] |=
6180                                         (ADVERTISED_10baseT_Full |
6181                                                 ADVERTISED_TP);
6182                 } else {
6183                         BNX2X_ERROR("NVRAM config error. "
6184                                     "Invalid link_config 0x%x"
6185                                     "  speed_cap_mask 0x%x\n",
6186                                     link_config,
6187                                     bp->link_params.speed_cap_mask[idx]);
6188                         return;
6189                 }
6190                 break;
6191
6192         case PORT_FEATURE_LINK_SPEED_10M_HALF:
6193                         if (bp->port.supported[idx] & SUPPORTED_10baseT_Half) {
6194                                 bp->link_params.req_line_speed[idx] =
6195                                         SPEED_10;
6196                                 bp->link_params.req_duplex[idx] =
6197                                         DUPLEX_HALF;
6198                                 bp->port.advertising[idx] |=
6199                                         (ADVERTISED_10baseT_Half |
6200                                                 ADVERTISED_TP);
6201                 } else {
6202                         BNX2X_ERROR("NVRAM config error. "
6203                                     "Invalid link_config 0x%x"
6204                                     "  speed_cap_mask 0x%x\n",
6205                                     link_config,
6206                                     bp->link_params.speed_cap_mask[idx]);
6207                         return;
6208                 }
6209                 break;
6210
6211         case PORT_FEATURE_LINK_SPEED_100M_FULL:
6212                         if (bp->port.supported[idx] & SUPPORTED_100baseT_Full) {
6213                                 bp->link_params.req_line_speed[idx] =
6214                                         SPEED_100;
6215                                 bp->port.advertising[idx] |=
6216                                         (ADVERTISED_100baseT_Full |
6217                                                 ADVERTISED_TP);
6218                 } else {
6219                         BNX2X_ERROR("NVRAM config error. "
6220                                     "Invalid link_config 0x%x"
6221                                     "  speed_cap_mask 0x%x\n",
6222                                     link_config,
6223                                     bp->link_params.speed_cap_mask[idx]);
6224                         return;
6225                 }
6226                 break;
6227
6228         case PORT_FEATURE_LINK_SPEED_100M_HALF:
6229                         if (bp->port.supported[idx] & SUPPORTED_100baseT_Half) {
6230                                 bp->link_params.req_line_speed[idx] = SPEED_100;
6231                                 bp->link_params.req_duplex[idx] = DUPLEX_HALF;
6232                                 bp->port.advertising[idx] |=
6233                                         (ADVERTISED_100baseT_Half |
6234                                                 ADVERTISED_TP);
6235                 } else {
6236                         BNX2X_ERROR("NVRAM config error. "
6237                                     "Invalid link_config 0x%x"
6238                                     "  speed_cap_mask 0x%x\n",
6239                                     link_config,
6240                                     bp->link_params.speed_cap_mask[idx]);
6241                         return;
6242                 }
6243                 break;
6244
6245         case PORT_FEATURE_LINK_SPEED_1G:
6246                         if (bp->port.supported[idx] &
6247                             SUPPORTED_1000baseT_Full) {
6248                                 bp->link_params.req_line_speed[idx] =
6249                                         SPEED_1000;
6250                                 bp->port.advertising[idx] |=
6251                                         (ADVERTISED_1000baseT_Full |
6252                                                 ADVERTISED_TP);
6253                 } else {
6254                         BNX2X_ERROR("NVRAM config error. "
6255                                     "Invalid link_config 0x%x"
6256                                     "  speed_cap_mask 0x%x\n",
6257                                     link_config,
6258                                     bp->link_params.speed_cap_mask[idx]);
6259                         return;
6260                 }
6261                 break;
6262
6263         case PORT_FEATURE_LINK_SPEED_2_5G:
6264                         if (bp->port.supported[idx] &
6265                             SUPPORTED_2500baseX_Full) {
6266                                 bp->link_params.req_line_speed[idx] =
6267                                         SPEED_2500;
6268                                 bp->port.advertising[idx] |=
6269                                         (ADVERTISED_2500baseX_Full |
6270                                                 ADVERTISED_TP);
6271                 } else {
6272                         BNX2X_ERROR("NVRAM config error. "
6273                                     "Invalid link_config 0x%x"
6274                                     "  speed_cap_mask 0x%x\n",
6275                                     link_config,
6276                                      bp->link_params.speed_cap_mask[idx]);
6277                         return;
6278                 }
6279                 break;
6280
6281         case PORT_FEATURE_LINK_SPEED_10G_CX4:
6282         case PORT_FEATURE_LINK_SPEED_10G_KX4:
6283         case PORT_FEATURE_LINK_SPEED_10G_KR:
6284                         if (bp->port.supported[idx] &
6285                             SUPPORTED_10000baseT_Full) {
6286                                 bp->link_params.req_line_speed[idx] =
6287                                         SPEED_10000;
6288                                 bp->port.advertising[idx] |=
6289                                         (ADVERTISED_10000baseT_Full |
6290                                                 ADVERTISED_FIBRE);
6291                 } else {
6292                         BNX2X_ERROR("NVRAM config error. "
6293                                     "Invalid link_config 0x%x"
6294                                     "  speed_cap_mask 0x%x\n",
6295                                     link_config,
6296                                      bp->link_params.speed_cap_mask[idx]);
6297                         return;
6298                 }
6299                 break;
6300
6301         default:
6302                 BNX2X_ERROR("NVRAM config error. "
6303                             "BAD link speed link_config 0x%x\n",
6304                                   link_config);
6305                         bp->link_params.req_line_speed[idx] = SPEED_AUTO_NEG;
6306                         bp->port.advertising[idx] = bp->port.supported[idx];
6307                 break;
6308         }
6309
6310                 bp->link_params.req_flow_ctrl[idx] = (link_config &
6311                                          PORT_FEATURE_FLOW_CONTROL_MASK);
6312                 if ((bp->link_params.req_flow_ctrl[idx] ==
6313                      BNX2X_FLOW_CTRL_AUTO) &&
6314                     !(bp->port.supported[idx] & SUPPORTED_Autoneg)) {
6315                         bp->link_params.req_flow_ctrl[idx] =
6316                                 BNX2X_FLOW_CTRL_NONE;
6317                 }
6318
6319                 BNX2X_DEV_INFO("req_line_speed %d  req_duplex %d req_flow_ctrl"
6320                                " 0x%x advertising 0x%x\n",
6321                                bp->link_params.req_line_speed[idx],
6322                                bp->link_params.req_duplex[idx],
6323                                bp->link_params.req_flow_ctrl[idx],
6324                                bp->port.advertising[idx]);
6325         }
6326 }
6327
6328 static void __devinit bnx2x_set_mac_buf(u8 *mac_buf, u32 mac_lo, u16 mac_hi)
6329 {
6330         mac_hi = cpu_to_be16(mac_hi);
6331         mac_lo = cpu_to_be32(mac_lo);
6332         memcpy(mac_buf, &mac_hi, sizeof(mac_hi));
6333         memcpy(mac_buf + sizeof(mac_hi), &mac_lo, sizeof(mac_lo));
6334 }
6335
6336 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
6337 {
6338         int port = BP_PORT(bp);
6339         u32 val, val2;
6340         u32 config;
6341         u32 ext_phy_type, ext_phy_config;;
6342
6343         bp->link_params.bp = bp;
6344         bp->link_params.port = port;
6345
6346         bp->link_params.lane_config =
6347                 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
6348
6349         bp->link_params.speed_cap_mask[0] =
6350                 SHMEM_RD(bp,
6351                          dev_info.port_hw_config[port].speed_capability_mask);
6352         bp->link_params.speed_cap_mask[1] =
6353                 SHMEM_RD(bp,
6354                          dev_info.port_hw_config[port].speed_capability_mask2);
6355         bp->port.link_config[0] =
6356                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
6357
6358         bp->port.link_config[1] =
6359                 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config2);
6360
6361         bp->link_params.multi_phy_config =
6362                 SHMEM_RD(bp, dev_info.port_hw_config[port].multi_phy_config);
6363         /* If the device is capable of WoL, set the default state according
6364          * to the HW
6365          */
6366         config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
6367         bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
6368                    (config & PORT_FEATURE_WOL_ENABLED));
6369
6370         BNX2X_DEV_INFO("lane_config 0x%08x"
6371                        "speed_cap_mask0 0x%08x  link_config0 0x%08x\n",
6372                        bp->link_params.lane_config,
6373                        bp->link_params.speed_cap_mask[0],
6374                        bp->port.link_config[0]);
6375
6376         bp->link_params.switch_cfg = (bp->port.link_config[0] &
6377                                        PORT_FEATURE_CONNECTED_SWITCH_MASK);
6378         bnx2x_phy_probe(&bp->link_params);
6379         bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
6380
6381         bnx2x_link_settings_requested(bp);
6382
6383         /*
6384          * If connected directly, work with the internal PHY, otherwise, work
6385          * with the external PHY
6386          */
6387         ext_phy_config =
6388                 SHMEM_RD(bp,
6389                          dev_info.port_hw_config[port].external_phy_config);
6390         ext_phy_type = XGXS_EXT_PHY_TYPE(ext_phy_config);
6391         if (ext_phy_type == PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT)
6392                 bp->mdio.prtad = bp->port.phy_addr;
6393
6394         else if ((ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE) &&
6395                  (ext_phy_type != PORT_HW_CFG_XGXS_EXT_PHY_TYPE_NOT_CONN))
6396                 bp->mdio.prtad =
6397                         XGXS_EXT_PHY_ADDR(ext_phy_config);
6398
6399         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
6400         val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
6401         bnx2x_set_mac_buf(bp->dev->dev_addr, val, val2);
6402         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
6403         memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6404
6405 #ifdef BCM_CNIC
6406         val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_upper);
6407         val = SHMEM_RD(bp, dev_info.port_hw_config[port].iscsi_mac_lower);
6408         bnx2x_set_mac_buf(bp->iscsi_mac, val, val2);
6409 #endif
6410 }
6411
6412 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
6413 {
6414         int func = BP_FUNC(bp);
6415         u32 val, val2;
6416         int rc = 0;
6417
6418         bnx2x_get_common_hwinfo(bp);
6419
6420         bp->e1hov = 0;
6421         bp->e1hmf = 0;
6422         if (CHIP_IS_E1H(bp) && !BP_NOMCP(bp)) {
6423                 bp->mf_config =
6424                         SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
6425
6426                 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[FUNC_0].e1hov_tag) &
6427                        FUNC_MF_CFG_E1HOV_TAG_MASK);
6428                 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT)
6429                         bp->e1hmf = 1;
6430                 BNX2X_DEV_INFO("%s function mode\n",
6431                                IS_E1HMF(bp) ? "multi" : "single");
6432
6433                 if (IS_E1HMF(bp)) {
6434                         val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].
6435                                                                 e1hov_tag) &
6436                                FUNC_MF_CFG_E1HOV_TAG_MASK);
6437                         if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
6438                                 bp->e1hov = val;
6439                                 BNX2X_DEV_INFO("E1HOV for func %d is %d "
6440                                                "(0x%04x)\n",
6441                                                func, bp->e1hov, bp->e1hov);
6442                         } else {
6443                                 BNX2X_ERROR("No valid E1HOV for func %d,"
6444                                             "  aborting\n", func);
6445                                 rc = -EPERM;
6446                         }
6447                 } else {
6448                         if (BP_E1HVN(bp)) {
6449                                 BNX2X_ERROR("VN %d in single function mode,"
6450                                             "  aborting\n", BP_E1HVN(bp));
6451                                 rc = -EPERM;
6452                         }
6453                 }
6454         }
6455
6456         if (!BP_NOMCP(bp)) {
6457                 bnx2x_get_port_hwinfo(bp);
6458
6459                 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
6460                               DRV_MSG_SEQ_NUMBER_MASK);
6461                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
6462         }
6463
6464         if (IS_E1HMF(bp)) {
6465                 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
6466                 val = SHMEM_RD(bp,  mf_cfg.func_mf_config[func].mac_lower);
6467                 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
6468                     (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
6469                         bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
6470                         bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
6471                         bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
6472                         bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
6473                         bp->dev->dev_addr[4] = (u8)(val >> 8  & 0xff);
6474                         bp->dev->dev_addr[5] = (u8)(val & 0xff);
6475                         memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
6476                                ETH_ALEN);
6477                         memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
6478                                ETH_ALEN);
6479                 }
6480
6481                 return rc;
6482         }
6483
6484         if (BP_NOMCP(bp)) {
6485                 /* only supposed to happen on emulation/FPGA */
6486                 BNX2X_ERROR("warning: random MAC workaround active\n");
6487                 random_ether_addr(bp->dev->dev_addr);
6488                 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
6489         }
6490
6491         return rc;
6492 }
6493
6494 static void __devinit bnx2x_read_fwinfo(struct bnx2x *bp)
6495 {
6496         int cnt, i, block_end, rodi;
6497         char vpd_data[BNX2X_VPD_LEN+1];
6498         char str_id_reg[VENDOR_ID_LEN+1];
6499         char str_id_cap[VENDOR_ID_LEN+1];
6500         u8 len;
6501
6502         cnt = pci_read_vpd(bp->pdev, 0, BNX2X_VPD_LEN, vpd_data);
6503         memset(bp->fw_ver, 0, sizeof(bp->fw_ver));
6504
6505         if (cnt < BNX2X_VPD_LEN)
6506                 goto out_not_found;
6507
6508         i = pci_vpd_find_tag(vpd_data, 0, BNX2X_VPD_LEN,
6509                              PCI_VPD_LRDT_RO_DATA);
6510         if (i < 0)
6511                 goto out_not_found;
6512
6513
6514         block_end = i + PCI_VPD_LRDT_TAG_SIZE +
6515                     pci_vpd_lrdt_size(&vpd_data[i]);
6516
6517         i += PCI_VPD_LRDT_TAG_SIZE;
6518
6519         if (block_end > BNX2X_VPD_LEN)
6520                 goto out_not_found;
6521
6522         rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6523                                    PCI_VPD_RO_KEYWORD_MFR_ID);
6524         if (rodi < 0)
6525                 goto out_not_found;
6526
6527         len = pci_vpd_info_field_size(&vpd_data[rodi]);
6528
6529         if (len != VENDOR_ID_LEN)
6530                 goto out_not_found;
6531
6532         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6533
6534         /* vendor specific info */
6535         snprintf(str_id_reg, VENDOR_ID_LEN + 1, "%04x", PCI_VENDOR_ID_DELL);
6536         snprintf(str_id_cap, VENDOR_ID_LEN + 1, "%04X", PCI_VENDOR_ID_DELL);
6537         if (!strncmp(str_id_reg, &vpd_data[rodi], VENDOR_ID_LEN) ||
6538             !strncmp(str_id_cap, &vpd_data[rodi], VENDOR_ID_LEN)) {
6539
6540                 rodi = pci_vpd_find_info_keyword(vpd_data, i, block_end,
6541                                                 PCI_VPD_RO_KEYWORD_VENDOR0);
6542                 if (rodi >= 0) {
6543                         len = pci_vpd_info_field_size(&vpd_data[rodi]);
6544
6545                         rodi += PCI_VPD_INFO_FLD_HDR_SIZE;
6546
6547                         if (len < 32 && (len + rodi) <= BNX2X_VPD_LEN) {
6548                                 memcpy(bp->fw_ver, &vpd_data[rodi], len);
6549                                 bp->fw_ver[len] = ' ';
6550                         }
6551                 }
6552                 return;
6553         }
6554 out_not_found:
6555         return;
6556 }
6557
6558 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
6559 {
6560         int func = BP_FUNC(bp);
6561         int timer_interval;
6562         int rc;
6563
6564         /* Disable interrupt handling until HW is initialized */
6565         atomic_set(&bp->intr_sem, 1);
6566         smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6567
6568         mutex_init(&bp->port.phy_mutex);
6569         mutex_init(&bp->fw_mb_mutex);
6570         spin_lock_init(&bp->stats_lock);
6571 #ifdef BCM_CNIC
6572         mutex_init(&bp->cnic_mutex);
6573 #endif
6574
6575         INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
6576         INIT_DELAYED_WORK(&bp->reset_task, bnx2x_reset_task);
6577
6578         rc = bnx2x_get_hwinfo(bp);
6579
6580         bnx2x_read_fwinfo(bp);
6581         /* need to reset chip if undi was active */
6582         if (!BP_NOMCP(bp))
6583                 bnx2x_undi_unload(bp);
6584
6585         if (CHIP_REV_IS_FPGA(bp))
6586                 dev_err(&bp->pdev->dev, "FPGA detected\n");
6587
6588         if (BP_NOMCP(bp) && (func == 0))
6589                 dev_err(&bp->pdev->dev, "MCP disabled, "
6590                                         "must load devices in order!\n");
6591
6592         /* Set multi queue mode */
6593         if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
6594             ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
6595                 dev_err(&bp->pdev->dev, "Multi disabled since int_mode "
6596                                         "requested is not MSI-X\n");
6597                 multi_mode = ETH_RSS_MODE_DISABLED;
6598         }
6599         bp->multi_mode = multi_mode;
6600         bp->int_mode = int_mode;
6601
6602         bp->dev->features |= NETIF_F_GRO;
6603
6604         /* Set TPA flags */
6605         if (disable_tpa) {
6606                 bp->flags &= ~TPA_ENABLE_FLAG;
6607                 bp->dev->features &= ~NETIF_F_LRO;
6608         } else {
6609                 bp->flags |= TPA_ENABLE_FLAG;
6610                 bp->dev->features |= NETIF_F_LRO;
6611         }
6612         bp->disable_tpa = disable_tpa;
6613
6614         if (CHIP_IS_E1(bp))
6615                 bp->dropless_fc = 0;
6616         else
6617                 bp->dropless_fc = dropless_fc;
6618
6619         bp->mrrs = mrrs;
6620
6621         bp->tx_ring_size = MAX_TX_AVAIL;
6622
6623         bp->rx_csum = 1;
6624
6625         /* make sure that the numbers are in the right granularity */
6626         bp->tx_ticks = (50 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6627         bp->rx_ticks = (25 / (4 * BNX2X_BTR)) * (4 * BNX2X_BTR);
6628
6629         timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
6630         bp->current_interval = (poll ? poll : timer_interval);
6631
6632         init_timer(&bp->timer);
6633         bp->timer.expires = jiffies + bp->current_interval;
6634         bp->timer.data = (unsigned long) bp;
6635         bp->timer.function = bnx2x_timer;
6636
6637         return rc;
6638 }
6639
6640
6641 /****************************************************************************
6642 * General service functions
6643 ****************************************************************************/
6644
6645 /* called with rtnl_lock */
6646 static int bnx2x_open(struct net_device *dev)
6647 {
6648         struct bnx2x *bp = netdev_priv(dev);
6649
6650         netif_carrier_off(dev);
6651
6652         bnx2x_set_power_state(bp, PCI_D0);
6653
6654         if (!bnx2x_reset_is_done(bp)) {
6655                 do {
6656                         /* Reset MCP mail box sequence if there is on going
6657                          * recovery
6658                          */
6659                         bp->fw_seq = 0;
6660
6661                         /* If it's the first function to load and reset done
6662                          * is still not cleared it may mean that. We don't
6663                          * check the attention state here because it may have
6664                          * already been cleared by a "common" reset but we
6665                          * shell proceed with "process kill" anyway.
6666                          */
6667                         if ((bnx2x_get_load_cnt(bp) == 0) &&
6668                                 bnx2x_trylock_hw_lock(bp,
6669                                 HW_LOCK_RESOURCE_RESERVED_08) &&
6670                                 (!bnx2x_leader_reset(bp))) {
6671                                 DP(NETIF_MSG_HW, "Recovered in open\n");
6672                                 break;
6673                         }
6674
6675                         bnx2x_set_power_state(bp, PCI_D3hot);
6676
6677                         printk(KERN_ERR"%s: Recovery flow hasn't been properly"
6678                         " completed yet. Try again later. If u still see this"
6679                         " message after a few retries then power cycle is"
6680                         " required.\n", bp->dev->name);
6681
6682                         return -EAGAIN;
6683                 } while (0);
6684         }
6685
6686         bp->recovery_state = BNX2X_RECOVERY_DONE;
6687
6688         return bnx2x_nic_load(bp, LOAD_OPEN);
6689 }
6690
6691 /* called with rtnl_lock */
6692 static int bnx2x_close(struct net_device *dev)
6693 {
6694         struct bnx2x *bp = netdev_priv(dev);
6695
6696         /* Unload the driver, release IRQs */
6697         bnx2x_nic_unload(bp, UNLOAD_CLOSE);
6698         bnx2x_set_power_state(bp, PCI_D3hot);
6699
6700         return 0;
6701 }
6702
6703 /* called with netif_tx_lock from dev_mcast.c */
6704 void bnx2x_set_rx_mode(struct net_device *dev)
6705 {
6706         struct bnx2x *bp = netdev_priv(dev);
6707         u32 rx_mode = BNX2X_RX_MODE_NORMAL;
6708         int port = BP_PORT(bp);
6709
6710         if (bp->state != BNX2X_STATE_OPEN) {
6711                 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6712                 return;
6713         }
6714
6715         DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
6716
6717         if (dev->flags & IFF_PROMISC)
6718                 rx_mode = BNX2X_RX_MODE_PROMISC;
6719
6720         else if ((dev->flags & IFF_ALLMULTI) ||
6721                  ((netdev_mc_count(dev) > BNX2X_MAX_MULTICAST) &&
6722                   CHIP_IS_E1(bp)))
6723                 rx_mode = BNX2X_RX_MODE_ALLMULTI;
6724
6725         else { /* some multicasts */
6726                 if (CHIP_IS_E1(bp)) {
6727                         int i, old, offset;
6728                         struct netdev_hw_addr *ha;
6729                         struct mac_configuration_cmd *config =
6730                                                 bnx2x_sp(bp, mcast_config);
6731
6732                         i = 0;
6733                         netdev_for_each_mc_addr(ha, dev) {
6734                                 config->config_table[i].
6735                                         cam_entry.msb_mac_addr =
6736                                         swab16(*(u16 *)&ha->addr[0]);
6737                                 config->config_table[i].
6738                                         cam_entry.middle_mac_addr =
6739                                         swab16(*(u16 *)&ha->addr[2]);
6740                                 config->config_table[i].
6741                                         cam_entry.lsb_mac_addr =
6742                                         swab16(*(u16 *)&ha->addr[4]);
6743                                 config->config_table[i].cam_entry.flags =
6744                                                         cpu_to_le16(port);
6745                                 config->config_table[i].
6746                                         target_table_entry.flags = 0;
6747                                 config->config_table[i].target_table_entry.
6748                                         clients_bit_vector =
6749                                                 cpu_to_le32(1 << BP_L_ID(bp));
6750                                 config->config_table[i].
6751                                         target_table_entry.vlan_id = 0;
6752
6753                                 DP(NETIF_MSG_IFUP,
6754                                    "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
6755                                    config->config_table[i].
6756                                                 cam_entry.msb_mac_addr,
6757                                    config->config_table[i].
6758                                                 cam_entry.middle_mac_addr,
6759                                    config->config_table[i].
6760                                                 cam_entry.lsb_mac_addr);
6761                                 i++;
6762                         }
6763                         old = config->hdr.length;
6764                         if (old > i) {
6765                                 for (; i < old; i++) {
6766                                         if (CAM_IS_INVALID(config->
6767                                                            config_table[i])) {
6768                                                 /* already invalidated */
6769                                                 break;
6770                                         }
6771                                         /* invalidate */
6772                                         CAM_INVALIDATE(config->
6773                                                        config_table[i]);
6774                                 }
6775                         }
6776
6777                         if (CHIP_REV_IS_SLOW(bp))
6778                                 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
6779                         else
6780                                 offset = BNX2X_MAX_MULTICAST*(1 + port);
6781
6782                         config->hdr.length = i;
6783                         config->hdr.offset = offset;
6784                         config->hdr.client_id = bp->fp->cl_id;
6785                         config->hdr.reserved1 = 0;
6786
6787                         bp->set_mac_pending++;
6788                         smp_wmb();
6789
6790                         bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6791                                    U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
6792                                    U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
6793                                       0);
6794                 } else { /* E1H */
6795                         /* Accept one or more multicasts */
6796                         struct netdev_hw_addr *ha;
6797                         u32 mc_filter[MC_HASH_SIZE];
6798                         u32 crc, bit, regidx;
6799                         int i;
6800
6801                         memset(mc_filter, 0, 4 * MC_HASH_SIZE);
6802
6803                         netdev_for_each_mc_addr(ha, dev) {
6804                                 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
6805                                    ha->addr);
6806
6807                                 crc = crc32c_le(0, ha->addr, ETH_ALEN);
6808                                 bit = (crc >> 24) & 0xff;
6809                                 regidx = bit >> 5;
6810                                 bit &= 0x1f;
6811                                 mc_filter[regidx] |= (1 << bit);
6812                         }
6813
6814                         for (i = 0; i < MC_HASH_SIZE; i++)
6815                                 REG_WR(bp, MC_HASH_OFFSET(bp, i),
6816                                        mc_filter[i]);
6817                 }
6818         }
6819
6820         bp->rx_mode = rx_mode;
6821         bnx2x_set_storm_rx_mode(bp);
6822 }
6823
6824
6825 /* called with rtnl_lock */
6826 static int bnx2x_mdio_read(struct net_device *netdev, int prtad,
6827                            int devad, u16 addr)
6828 {
6829         struct bnx2x *bp = netdev_priv(netdev);
6830         u16 value;
6831         int rc;
6832
6833         DP(NETIF_MSG_LINK, "mdio_read: prtad 0x%x, devad 0x%x, addr 0x%x\n",
6834            prtad, devad, addr);
6835
6836         /* The HW expects different devad if CL22 is used */
6837         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6838
6839         bnx2x_acquire_phy_lock(bp);
6840         rc = bnx2x_phy_read(&bp->link_params, prtad, devad, addr, &value);
6841         bnx2x_release_phy_lock(bp);
6842         DP(NETIF_MSG_LINK, "mdio_read_val 0x%x rc = 0x%x\n", value, rc);
6843
6844         if (!rc)
6845                 rc = value;
6846         return rc;
6847 }
6848
6849 /* called with rtnl_lock */
6850 static int bnx2x_mdio_write(struct net_device *netdev, int prtad, int devad,
6851                             u16 addr, u16 value)
6852 {
6853         struct bnx2x *bp = netdev_priv(netdev);
6854         int rc;
6855
6856         DP(NETIF_MSG_LINK, "mdio_write: prtad 0x%x, devad 0x%x, addr 0x%x,"
6857                            " value 0x%x\n", prtad, devad, addr, value);
6858
6859         /* The HW expects different devad if CL22 is used */
6860         devad = (devad == MDIO_DEVAD_NONE) ? DEFAULT_PHY_DEV_ADDR : devad;
6861
6862         bnx2x_acquire_phy_lock(bp);
6863         rc = bnx2x_phy_write(&bp->link_params, prtad, devad, addr, value);
6864         bnx2x_release_phy_lock(bp);
6865         return rc;
6866 }
6867
6868 /* called with rtnl_lock */
6869 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
6870 {
6871         struct bnx2x *bp = netdev_priv(dev);
6872         struct mii_ioctl_data *mdio = if_mii(ifr);
6873
6874         DP(NETIF_MSG_LINK, "ioctl: phy id 0x%x, reg 0x%x, val_in 0x%x\n",
6875            mdio->phy_id, mdio->reg_num, mdio->val_in);
6876
6877         if (!netif_running(dev))
6878                 return -EAGAIN;
6879
6880         return mdio_mii_ioctl(&bp->mdio, mdio, cmd);
6881 }
6882
6883 #ifdef CONFIG_NET_POLL_CONTROLLER
6884 static void poll_bnx2x(struct net_device *dev)
6885 {
6886         struct bnx2x *bp = netdev_priv(dev);
6887
6888         disable_irq(bp->pdev->irq);
6889         bnx2x_interrupt(bp->pdev->irq, dev);
6890         enable_irq(bp->pdev->irq);
6891 }
6892 #endif
6893
6894 static const struct net_device_ops bnx2x_netdev_ops = {
6895         .ndo_open               = bnx2x_open,
6896         .ndo_stop               = bnx2x_close,
6897         .ndo_start_xmit         = bnx2x_start_xmit,
6898         .ndo_set_multicast_list = bnx2x_set_rx_mode,
6899         .ndo_set_mac_address    = bnx2x_change_mac_addr,
6900         .ndo_validate_addr      = eth_validate_addr,
6901         .ndo_do_ioctl           = bnx2x_ioctl,
6902         .ndo_change_mtu         = bnx2x_change_mtu,
6903         .ndo_tx_timeout         = bnx2x_tx_timeout,
6904 #ifdef BCM_VLAN
6905         .ndo_vlan_rx_register   = bnx2x_vlan_rx_register,
6906 #endif
6907 #ifdef CONFIG_NET_POLL_CONTROLLER
6908         .ndo_poll_controller    = poll_bnx2x,
6909 #endif
6910 };
6911
6912 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
6913                                     struct net_device *dev)
6914 {
6915         struct bnx2x *bp;
6916         int rc;
6917
6918         SET_NETDEV_DEV(dev, &pdev->dev);
6919         bp = netdev_priv(dev);
6920
6921         bp->dev = dev;
6922         bp->pdev = pdev;
6923         bp->flags = 0;
6924         bp->func = PCI_FUNC(pdev->devfn);
6925
6926         rc = pci_enable_device(pdev);
6927         if (rc) {
6928                 dev_err(&bp->pdev->dev,
6929                         "Cannot enable PCI device, aborting\n");
6930                 goto err_out;
6931         }
6932
6933         if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
6934                 dev_err(&bp->pdev->dev,
6935                         "Cannot find PCI device base address, aborting\n");
6936                 rc = -ENODEV;
6937                 goto err_out_disable;
6938         }
6939
6940         if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
6941                 dev_err(&bp->pdev->dev, "Cannot find second PCI device"
6942                        " base address, aborting\n");
6943                 rc = -ENODEV;
6944                 goto err_out_disable;
6945         }
6946
6947         if (atomic_read(&pdev->enable_cnt) == 1) {
6948                 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
6949                 if (rc) {
6950                         dev_err(&bp->pdev->dev,
6951                                 "Cannot obtain PCI resources, aborting\n");
6952                         goto err_out_disable;
6953                 }
6954
6955                 pci_set_master(pdev);
6956                 pci_save_state(pdev);
6957         }
6958
6959         bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
6960         if (bp->pm_cap == 0) {
6961                 dev_err(&bp->pdev->dev,
6962                         "Cannot find power management capability, aborting\n");
6963                 rc = -EIO;
6964                 goto err_out_release;
6965         }
6966
6967         bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
6968         if (bp->pcie_cap == 0) {
6969                 dev_err(&bp->pdev->dev,
6970                         "Cannot find PCI Express capability, aborting\n");
6971                 rc = -EIO;
6972                 goto err_out_release;
6973         }
6974
6975         if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(64)) == 0) {
6976                 bp->flags |= USING_DAC_FLAG;
6977                 if (dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)) != 0) {
6978                         dev_err(&bp->pdev->dev, "dma_set_coherent_mask"
6979                                " failed, aborting\n");
6980                         rc = -EIO;
6981                         goto err_out_release;
6982                 }
6983
6984         } else if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32)) != 0) {
6985                 dev_err(&bp->pdev->dev,
6986                         "System does not support DMA, aborting\n");
6987                 rc = -EIO;
6988                 goto err_out_release;
6989         }
6990
6991         dev->mem_start = pci_resource_start(pdev, 0);
6992         dev->base_addr = dev->mem_start;
6993         dev->mem_end = pci_resource_end(pdev, 0);
6994
6995         dev->irq = pdev->irq;
6996
6997         bp->regview = pci_ioremap_bar(pdev, 0);
6998         if (!bp->regview) {
6999                 dev_err(&bp->pdev->dev,
7000                         "Cannot map register space, aborting\n");
7001                 rc = -ENOMEM;
7002                 goto err_out_release;
7003         }
7004
7005         bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
7006                                         min_t(u64, BNX2X_DB_SIZE,
7007                                               pci_resource_len(pdev, 2)));
7008         if (!bp->doorbells) {
7009                 dev_err(&bp->pdev->dev,
7010                         "Cannot map doorbell space, aborting\n");
7011                 rc = -ENOMEM;
7012                 goto err_out_unmap;
7013         }
7014
7015         bnx2x_set_power_state(bp, PCI_D0);
7016
7017         /* clean indirect addresses */
7018         pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
7019                                PCICFG_VENDOR_ID_OFFSET);
7020         REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
7021         REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
7022         REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
7023         REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
7024
7025         /* Reset the load counter */
7026         bnx2x_clear_load_cnt(bp);
7027
7028         dev->watchdog_timeo = TX_TIMEOUT;
7029
7030         dev->netdev_ops = &bnx2x_netdev_ops;
7031         bnx2x_set_ethtool_ops(dev);
7032         dev->features |= NETIF_F_SG;
7033         dev->features |= NETIF_F_HW_CSUM;
7034         if (bp->flags & USING_DAC_FLAG)
7035                 dev->features |= NETIF_F_HIGHDMA;
7036         dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7037         dev->features |= NETIF_F_TSO6;
7038 #ifdef BCM_VLAN
7039         dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
7040         bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
7041
7042         dev->vlan_features |= NETIF_F_SG;
7043         dev->vlan_features |= NETIF_F_HW_CSUM;
7044         if (bp->flags & USING_DAC_FLAG)
7045                 dev->vlan_features |= NETIF_F_HIGHDMA;
7046         dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
7047         dev->vlan_features |= NETIF_F_TSO6;
7048 #endif
7049
7050         /* get_port_hwinfo() will set prtad and mmds properly */
7051         bp->mdio.prtad = MDIO_PRTAD_NONE;
7052         bp->mdio.mmds = 0;
7053         bp->mdio.mode_support = MDIO_SUPPORTS_C45 | MDIO_EMULATE_C22;
7054         bp->mdio.dev = dev;
7055         bp->mdio.mdio_read = bnx2x_mdio_read;
7056         bp->mdio.mdio_write = bnx2x_mdio_write;
7057
7058         return 0;
7059
7060 err_out_unmap:
7061         if (bp->regview) {
7062                 iounmap(bp->regview);
7063                 bp->regview = NULL;
7064         }
7065         if (bp->doorbells) {
7066                 iounmap(bp->doorbells);
7067                 bp->doorbells = NULL;
7068         }
7069
7070 err_out_release:
7071         if (atomic_read(&pdev->enable_cnt) == 1)
7072                 pci_release_regions(pdev);
7073
7074 err_out_disable:
7075         pci_disable_device(pdev);
7076         pci_set_drvdata(pdev, NULL);
7077
7078 err_out:
7079         return rc;
7080 }
7081
7082 static void __devinit bnx2x_get_pcie_width_speed(struct bnx2x *bp,
7083                                                  int *width, int *speed)
7084 {
7085         u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
7086
7087         *width = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
7088
7089         /* return value of 1=2.5GHz 2=5GHz */
7090         *speed = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
7091 }
7092
7093 static int bnx2x_check_firmware(struct bnx2x *bp)
7094 {
7095         const struct firmware *firmware = bp->firmware;
7096         struct bnx2x_fw_file_hdr *fw_hdr;
7097         struct bnx2x_fw_file_section *sections;
7098         u32 offset, len, num_ops;
7099         u16 *ops_offsets;
7100         int i;
7101         const u8 *fw_ver;
7102
7103         if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
7104                 return -EINVAL;
7105
7106         fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
7107         sections = (struct bnx2x_fw_file_section *)fw_hdr;
7108
7109         /* Make sure none of the offsets and sizes make us read beyond
7110          * the end of the firmware data */
7111         for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
7112                 offset = be32_to_cpu(sections[i].offset);
7113                 len = be32_to_cpu(sections[i].len);
7114                 if (offset + len > firmware->size) {
7115                         dev_err(&bp->pdev->dev,
7116                                 "Section %d length is out of bounds\n", i);
7117                         return -EINVAL;
7118                 }
7119         }
7120
7121         /* Likewise for the init_ops offsets */
7122         offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
7123         ops_offsets = (u16 *)(firmware->data + offset);
7124         num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
7125
7126         for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
7127                 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
7128                         dev_err(&bp->pdev->dev,
7129                                 "Section offset %d is out of bounds\n", i);
7130                         return -EINVAL;
7131                 }
7132         }
7133
7134         /* Check FW version */
7135         offset = be32_to_cpu(fw_hdr->fw_version.offset);
7136         fw_ver = firmware->data + offset;
7137         if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
7138             (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
7139             (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
7140             (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
7141                 dev_err(&bp->pdev->dev,
7142                         "Bad FW version:%d.%d.%d.%d. Should be %d.%d.%d.%d\n",
7143                        fw_ver[0], fw_ver[1], fw_ver[2],
7144                        fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
7145                        BCM_5710_FW_MINOR_VERSION,
7146                        BCM_5710_FW_REVISION_VERSION,
7147                        BCM_5710_FW_ENGINEERING_VERSION);
7148                 return -EINVAL;
7149         }
7150
7151         return 0;
7152 }
7153
7154 static inline void be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7155 {
7156         const __be32 *source = (const __be32 *)_source;
7157         u32 *target = (u32 *)_target;
7158         u32 i;
7159
7160         for (i = 0; i < n/4; i++)
7161                 target[i] = be32_to_cpu(source[i]);
7162 }
7163
7164 /*
7165    Ops array is stored in the following format:
7166    {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
7167  */
7168 static inline void bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
7169 {
7170         const __be32 *source = (const __be32 *)_source;
7171         struct raw_op *target = (struct raw_op *)_target;
7172         u32 i, j, tmp;
7173
7174         for (i = 0, j = 0; i < n/8; i++, j += 2) {
7175                 tmp = be32_to_cpu(source[j]);
7176                 target[i].op = (tmp >> 24) & 0xff;
7177                 target[i].offset = tmp & 0xffffff;
7178                 target[i].raw_data = be32_to_cpu(source[j + 1]);
7179         }
7180 }
7181
7182 static inline void be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
7183 {
7184         const __be16 *source = (const __be16 *)_source;
7185         u16 *target = (u16 *)_target;
7186         u32 i;
7187
7188         for (i = 0; i < n/2; i++)
7189                 target[i] = be16_to_cpu(source[i]);
7190 }
7191
7192 #define BNX2X_ALLOC_AND_SET(arr, lbl, func)                             \
7193 do {                                                                    \
7194         u32 len = be32_to_cpu(fw_hdr->arr.len);                         \
7195         bp->arr = kmalloc(len, GFP_KERNEL);                             \
7196         if (!bp->arr) {                                                 \
7197                 pr_err("Failed to allocate %d bytes for "#arr"\n", len); \
7198                 goto lbl;                                               \
7199         }                                                               \
7200         func(bp->firmware->data + be32_to_cpu(fw_hdr->arr.offset),      \
7201              (u8 *)bp->arr, len);                                       \
7202 } while (0)
7203
7204 int bnx2x_init_firmware(struct bnx2x *bp)
7205 {
7206         const char *fw_file_name;
7207         struct bnx2x_fw_file_hdr *fw_hdr;
7208         int rc;
7209
7210         if (CHIP_IS_E1(bp))
7211                 fw_file_name = FW_FILE_NAME_E1;
7212         else if (CHIP_IS_E1H(bp))
7213                 fw_file_name = FW_FILE_NAME_E1H;
7214         else {
7215                 BNX2X_ERR("Unsupported chip revision\n");
7216                 return -EINVAL;
7217         }
7218
7219         BNX2X_DEV_INFO("Loading %s\n", fw_file_name);
7220
7221         rc = request_firmware(&bp->firmware, fw_file_name, &bp->pdev->dev);
7222         if (rc) {
7223                 BNX2X_ERR("Can't load firmware file %s\n", fw_file_name);
7224                 goto request_firmware_exit;
7225         }
7226
7227         rc = bnx2x_check_firmware(bp);
7228         if (rc) {
7229                 BNX2X_ERR("Corrupt firmware file %s\n", fw_file_name);
7230                 goto request_firmware_exit;
7231         }
7232
7233         fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
7234
7235         /* Initialize the pointers to the init arrays */
7236         /* Blob */
7237         BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
7238
7239         /* Opcodes */
7240         BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
7241
7242         /* Offsets */
7243         BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err,
7244                             be16_to_cpu_n);
7245
7246         /* STORMs firmware */
7247         INIT_TSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7248                         be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
7249         INIT_TSEM_PRAM_DATA(bp)      = bp->firmware->data +
7250                         be32_to_cpu(fw_hdr->tsem_pram_data.offset);
7251         INIT_USEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7252                         be32_to_cpu(fw_hdr->usem_int_table_data.offset);
7253         INIT_USEM_PRAM_DATA(bp)      = bp->firmware->data +
7254                         be32_to_cpu(fw_hdr->usem_pram_data.offset);
7255         INIT_XSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7256                         be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
7257         INIT_XSEM_PRAM_DATA(bp)      = bp->firmware->data +
7258                         be32_to_cpu(fw_hdr->xsem_pram_data.offset);
7259         INIT_CSEM_INT_TABLE_DATA(bp) = bp->firmware->data +
7260                         be32_to_cpu(fw_hdr->csem_int_table_data.offset);
7261         INIT_CSEM_PRAM_DATA(bp)      = bp->firmware->data +
7262                         be32_to_cpu(fw_hdr->csem_pram_data.offset);
7263
7264         return 0;
7265
7266 init_offsets_alloc_err:
7267         kfree(bp->init_ops);
7268 init_ops_alloc_err:
7269         kfree(bp->init_data);
7270 request_firmware_exit:
7271         release_firmware(bp->firmware);
7272
7273         return rc;
7274 }
7275
7276
7277 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
7278                                     const struct pci_device_id *ent)
7279 {
7280         struct net_device *dev = NULL;
7281         struct bnx2x *bp;
7282         int pcie_width, pcie_speed;
7283         int rc;
7284
7285         /* dev zeroed in init_etherdev */
7286         dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
7287         if (!dev) {
7288                 dev_err(&pdev->dev, "Cannot allocate net device\n");
7289                 return -ENOMEM;
7290         }
7291
7292         bp = netdev_priv(dev);
7293         bp->msg_enable = debug;
7294
7295         pci_set_drvdata(pdev, dev);
7296
7297         rc = bnx2x_init_dev(pdev, dev);
7298         if (rc < 0) {
7299                 free_netdev(dev);
7300                 return rc;
7301         }
7302
7303         rc = bnx2x_init_bp(bp);
7304         if (rc)
7305                 goto init_one_exit;
7306
7307         rc = register_netdev(dev);
7308         if (rc) {
7309                 dev_err(&pdev->dev, "Cannot register net device\n");
7310                 goto init_one_exit;
7311         }
7312
7313         bnx2x_get_pcie_width_speed(bp, &pcie_width, &pcie_speed);
7314         netdev_info(dev, "%s (%c%d) PCI-E x%d %s found at mem %lx,"
7315                " IRQ %d, ", board_info[ent->driver_data].name,
7316                (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
7317                pcie_width, (pcie_speed == 2) ? "5GHz (Gen2)" : "2.5GHz",
7318                dev->base_addr, bp->pdev->irq);
7319         pr_cont("node addr %pM\n", dev->dev_addr);
7320
7321         return 0;
7322
7323 init_one_exit:
7324         if (bp->regview)
7325                 iounmap(bp->regview);
7326
7327         if (bp->doorbells)
7328                 iounmap(bp->doorbells);
7329
7330         free_netdev(dev);
7331
7332         if (atomic_read(&pdev->enable_cnt) == 1)
7333                 pci_release_regions(pdev);
7334
7335         pci_disable_device(pdev);
7336         pci_set_drvdata(pdev, NULL);
7337
7338         return rc;
7339 }
7340
7341 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
7342 {
7343         struct net_device *dev = pci_get_drvdata(pdev);
7344         struct bnx2x *bp;
7345
7346         if (!dev) {
7347                 dev_err(&pdev->dev, "BAD net device from bnx2x_init_one\n");
7348                 return;
7349         }
7350         bp = netdev_priv(dev);
7351
7352         unregister_netdev(dev);
7353
7354         /* Make sure RESET task is not scheduled before continuing */
7355         cancel_delayed_work_sync(&bp->reset_task);
7356
7357         if (bp->regview)
7358                 iounmap(bp->regview);
7359
7360         if (bp->doorbells)
7361                 iounmap(bp->doorbells);
7362
7363         free_netdev(dev);
7364
7365         if (atomic_read(&pdev->enable_cnt) == 1)
7366                 pci_release_regions(pdev);
7367
7368         pci_disable_device(pdev);
7369         pci_set_drvdata(pdev, NULL);
7370 }
7371
7372 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
7373 {
7374         int i;
7375
7376         bp->state = BNX2X_STATE_ERROR;
7377
7378         bp->rx_mode = BNX2X_RX_MODE_NONE;
7379
7380         bnx2x_netif_stop(bp, 0);
7381         netif_carrier_off(bp->dev);
7382
7383         del_timer_sync(&bp->timer);
7384         bp->stats_state = STATS_STATE_DISABLED;
7385         DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
7386
7387         /* Release IRQs */
7388         bnx2x_free_irq(bp, false);
7389
7390         if (CHIP_IS_E1(bp)) {
7391                 struct mac_configuration_cmd *config =
7392                                                 bnx2x_sp(bp, mcast_config);
7393
7394                 for (i = 0; i < config->hdr.length; i++)
7395                         CAM_INVALIDATE(config->config_table[i]);
7396         }
7397
7398         /* Free SKBs, SGEs, TPA pool and driver internals */
7399         bnx2x_free_skbs(bp);
7400         for_each_queue(bp, i)
7401                 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7402         for_each_queue(bp, i)
7403                 netif_napi_del(&bnx2x_fp(bp, i, napi));
7404         bnx2x_free_mem(bp);
7405
7406         bp->state = BNX2X_STATE_CLOSED;
7407
7408         return 0;
7409 }
7410
7411 static void bnx2x_eeh_recover(struct bnx2x *bp)
7412 {
7413         u32 val;
7414
7415         mutex_init(&bp->port.phy_mutex);
7416
7417         bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7418         bp->link_params.shmem_base = bp->common.shmem_base;
7419         BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7420
7421         if (!bp->common.shmem_base ||
7422             (bp->common.shmem_base < 0xA0000) ||
7423             (bp->common.shmem_base >= 0xC0000)) {
7424                 BNX2X_DEV_INFO("MCP not active\n");
7425                 bp->flags |= NO_MCP_FLAG;
7426                 return;
7427         }
7428
7429         val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7430         if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7431                 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7432                 BNX2X_ERR("BAD MCP validity signature\n");
7433
7434         if (!BP_NOMCP(bp)) {
7435                 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
7436                               & DRV_MSG_SEQ_NUMBER_MASK);
7437                 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
7438         }
7439 }
7440
7441 /**
7442  * bnx2x_io_error_detected - called when PCI error is detected
7443  * @pdev: Pointer to PCI device
7444  * @state: The current pci connection state
7445  *
7446  * This function is called after a PCI bus error affecting
7447  * this device has been detected.
7448  */
7449 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
7450                                                 pci_channel_state_t state)
7451 {
7452         struct net_device *dev = pci_get_drvdata(pdev);
7453         struct bnx2x *bp = netdev_priv(dev);
7454
7455         rtnl_lock();
7456
7457         netif_device_detach(dev);
7458
7459         if (state == pci_channel_io_perm_failure) {
7460                 rtnl_unlock();
7461                 return PCI_ERS_RESULT_DISCONNECT;
7462         }
7463
7464         if (netif_running(dev))
7465                 bnx2x_eeh_nic_unload(bp);
7466
7467         pci_disable_device(pdev);
7468
7469         rtnl_unlock();
7470
7471         /* Request a slot reset */
7472         return PCI_ERS_RESULT_NEED_RESET;
7473 }
7474
7475 /**
7476  * bnx2x_io_slot_reset - called after the PCI bus has been reset
7477  * @pdev: Pointer to PCI device
7478  *
7479  * Restart the card from scratch, as if from a cold-boot.
7480  */
7481 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
7482 {
7483         struct net_device *dev = pci_get_drvdata(pdev);
7484         struct bnx2x *bp = netdev_priv(dev);
7485
7486         rtnl_lock();
7487
7488         if (pci_enable_device(pdev)) {
7489                 dev_err(&pdev->dev,
7490                         "Cannot re-enable PCI device after reset\n");
7491                 rtnl_unlock();
7492                 return PCI_ERS_RESULT_DISCONNECT;
7493         }
7494
7495         pci_set_master(pdev);
7496         pci_restore_state(pdev);
7497
7498         if (netif_running(dev))
7499                 bnx2x_set_power_state(bp, PCI_D0);
7500
7501         rtnl_unlock();
7502
7503         return PCI_ERS_RESULT_RECOVERED;
7504 }
7505
7506 /**
7507  * bnx2x_io_resume - called when traffic can start flowing again
7508  * @pdev: Pointer to PCI device
7509  *
7510  * This callback is called when the error recovery driver tells us that
7511  * its OK to resume normal operation.
7512  */
7513 static void bnx2x_io_resume(struct pci_dev *pdev)
7514 {
7515         struct net_device *dev = pci_get_drvdata(pdev);
7516         struct bnx2x *bp = netdev_priv(dev);
7517
7518         if (bp->recovery_state != BNX2X_RECOVERY_DONE) {
7519                 printk(KERN_ERR "Handling parity error recovery. Try again later\n");
7520                 return;
7521         }
7522
7523         rtnl_lock();
7524
7525         bnx2x_eeh_recover(bp);
7526
7527         if (netif_running(dev))
7528                 bnx2x_nic_load(bp, LOAD_NORMAL);
7529
7530         netif_device_attach(dev);
7531
7532         rtnl_unlock();
7533 }
7534
7535 static struct pci_error_handlers bnx2x_err_handler = {
7536         .error_detected = bnx2x_io_error_detected,
7537         .slot_reset     = bnx2x_io_slot_reset,
7538         .resume         = bnx2x_io_resume,
7539 };
7540
7541 static struct pci_driver bnx2x_pci_driver = {
7542         .name        = DRV_MODULE_NAME,
7543         .id_table    = bnx2x_pci_tbl,
7544         .probe       = bnx2x_init_one,
7545         .remove      = __devexit_p(bnx2x_remove_one),
7546         .suspend     = bnx2x_suspend,
7547         .resume      = bnx2x_resume,
7548         .err_handler = &bnx2x_err_handler,
7549 };
7550
7551 static int __init bnx2x_init(void)
7552 {
7553         int ret;
7554
7555         pr_info("%s", version);
7556
7557         bnx2x_wq = create_singlethread_workqueue("bnx2x");
7558         if (bnx2x_wq == NULL) {
7559                 pr_err("Cannot create workqueue\n");
7560                 return -ENOMEM;
7561         }
7562
7563         ret = pci_register_driver(&bnx2x_pci_driver);
7564         if (ret) {
7565                 pr_err("Cannot register driver\n");
7566                 destroy_workqueue(bnx2x_wq);
7567         }
7568         return ret;
7569 }
7570
7571 static void __exit bnx2x_cleanup(void)
7572 {
7573         pci_unregister_driver(&bnx2x_pci_driver);
7574
7575         destroy_workqueue(bnx2x_wq);
7576 }
7577
7578 module_init(bnx2x_init);
7579 module_exit(bnx2x_cleanup);
7580
7581 #ifdef BCM_CNIC
7582
7583 /* count denotes the number of new completions we have seen */
7584 static void bnx2x_cnic_sp_post(struct bnx2x *bp, int count)
7585 {
7586         struct eth_spe *spe;
7587
7588 #ifdef BNX2X_STOP_ON_ERROR
7589         if (unlikely(bp->panic))
7590                 return;
7591 #endif
7592
7593         spin_lock_bh(&bp->spq_lock);
7594         bp->cnic_spq_pending -= count;
7595
7596         for (; bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending;
7597              bp->cnic_spq_pending++) {
7598
7599                 if (!bp->cnic_kwq_pending)
7600                         break;
7601
7602                 spe = bnx2x_sp_get_next(bp);
7603                 *spe = *bp->cnic_kwq_cons;
7604
7605                 bp->cnic_kwq_pending--;
7606
7607                 DP(NETIF_MSG_TIMER, "pending on SPQ %d, on KWQ %d count %d\n",
7608                    bp->cnic_spq_pending, bp->cnic_kwq_pending, count);
7609
7610                 if (bp->cnic_kwq_cons == bp->cnic_kwq_last)
7611                         bp->cnic_kwq_cons = bp->cnic_kwq;
7612                 else
7613                         bp->cnic_kwq_cons++;
7614         }
7615         bnx2x_sp_prod_update(bp);
7616         spin_unlock_bh(&bp->spq_lock);
7617 }
7618
7619 static int bnx2x_cnic_sp_queue(struct net_device *dev,
7620                                struct kwqe_16 *kwqes[], u32 count)
7621 {
7622         struct bnx2x *bp = netdev_priv(dev);
7623         int i;
7624
7625 #ifdef BNX2X_STOP_ON_ERROR
7626         if (unlikely(bp->panic))
7627                 return -EIO;
7628 #endif
7629
7630         spin_lock_bh(&bp->spq_lock);
7631
7632         for (i = 0; i < count; i++) {
7633                 struct eth_spe *spe = (struct eth_spe *)kwqes[i];
7634
7635                 if (bp->cnic_kwq_pending == MAX_SP_DESC_CNT)
7636                         break;
7637
7638                 *bp->cnic_kwq_prod = *spe;
7639
7640                 bp->cnic_kwq_pending++;
7641
7642                 DP(NETIF_MSG_TIMER, "L5 SPQE %x %x %x:%x pos %d\n",
7643                    spe->hdr.conn_and_cmd_data, spe->hdr.type,
7644                    spe->data.mac_config_addr.hi,
7645                    spe->data.mac_config_addr.lo,
7646                    bp->cnic_kwq_pending);
7647
7648                 if (bp->cnic_kwq_prod == bp->cnic_kwq_last)
7649                         bp->cnic_kwq_prod = bp->cnic_kwq;
7650                 else
7651                         bp->cnic_kwq_prod++;
7652         }
7653
7654         spin_unlock_bh(&bp->spq_lock);
7655
7656         if (bp->cnic_spq_pending < bp->cnic_eth_dev.max_kwqe_pending)
7657                 bnx2x_cnic_sp_post(bp, 0);
7658
7659         return i;
7660 }
7661
7662 static int bnx2x_cnic_ctl_send(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7663 {
7664         struct cnic_ops *c_ops;
7665         int rc = 0;
7666
7667         mutex_lock(&bp->cnic_mutex);
7668         c_ops = bp->cnic_ops;
7669         if (c_ops)
7670                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7671         mutex_unlock(&bp->cnic_mutex);
7672
7673         return rc;
7674 }
7675
7676 static int bnx2x_cnic_ctl_send_bh(struct bnx2x *bp, struct cnic_ctl_info *ctl)
7677 {
7678         struct cnic_ops *c_ops;
7679         int rc = 0;
7680
7681         rcu_read_lock();
7682         c_ops = rcu_dereference(bp->cnic_ops);
7683         if (c_ops)
7684                 rc = c_ops->cnic_ctl(bp->cnic_data, ctl);
7685         rcu_read_unlock();
7686
7687         return rc;
7688 }
7689
7690 /*
7691  * for commands that have no data
7692  */
7693 int bnx2x_cnic_notify(struct bnx2x *bp, int cmd)
7694 {
7695         struct cnic_ctl_info ctl = {0};
7696
7697         ctl.cmd = cmd;
7698
7699         return bnx2x_cnic_ctl_send(bp, &ctl);
7700 }
7701
7702 static void bnx2x_cnic_cfc_comp(struct bnx2x *bp, int cid)
7703 {
7704         struct cnic_ctl_info ctl;
7705
7706         /* first we tell CNIC and only then we count this as a completion */
7707         ctl.cmd = CNIC_CTL_COMPLETION_CMD;
7708         ctl.data.comp.cid = cid;
7709
7710         bnx2x_cnic_ctl_send_bh(bp, &ctl);
7711         bnx2x_cnic_sp_post(bp, 1);
7712 }
7713
7714 static int bnx2x_drv_ctl(struct net_device *dev, struct drv_ctl_info *ctl)
7715 {
7716         struct bnx2x *bp = netdev_priv(dev);
7717         int rc = 0;
7718
7719         switch (ctl->cmd) {
7720         case DRV_CTL_CTXTBL_WR_CMD: {
7721                 u32 index = ctl->data.io.offset;
7722                 dma_addr_t addr = ctl->data.io.dma_addr;
7723
7724                 bnx2x_ilt_wr(bp, index, addr);
7725                 break;
7726         }
7727
7728         case DRV_CTL_COMPLETION_CMD: {
7729                 int count = ctl->data.comp.comp_count;
7730
7731                 bnx2x_cnic_sp_post(bp, count);
7732                 break;
7733         }
7734
7735         /* rtnl_lock is held.  */
7736         case DRV_CTL_START_L2_CMD: {
7737                 u32 cli = ctl->data.ring.client_id;
7738
7739                 bp->rx_mode_cl_mask |= (1 << cli);
7740                 bnx2x_set_storm_rx_mode(bp);
7741                 break;
7742         }
7743
7744         /* rtnl_lock is held.  */
7745         case DRV_CTL_STOP_L2_CMD: {
7746                 u32 cli = ctl->data.ring.client_id;
7747
7748                 bp->rx_mode_cl_mask &= ~(1 << cli);
7749                 bnx2x_set_storm_rx_mode(bp);
7750                 break;
7751         }
7752
7753         default:
7754                 BNX2X_ERR("unknown command %x\n", ctl->cmd);
7755                 rc = -EINVAL;
7756         }
7757
7758         return rc;
7759 }
7760
7761 void bnx2x_setup_cnic_irq_info(struct bnx2x *bp)
7762 {
7763         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7764
7765         if (bp->flags & USING_MSIX_FLAG) {
7766                 cp->drv_state |= CNIC_DRV_STATE_USING_MSIX;
7767                 cp->irq_arr[0].irq_flags |= CNIC_IRQ_FL_MSIX;
7768                 cp->irq_arr[0].vector = bp->msix_table[1].vector;
7769         } else {
7770                 cp->drv_state &= ~CNIC_DRV_STATE_USING_MSIX;
7771                 cp->irq_arr[0].irq_flags &= ~CNIC_IRQ_FL_MSIX;
7772         }
7773         cp->irq_arr[0].status_blk = bp->cnic_sb;
7774         cp->irq_arr[0].status_blk_num = CNIC_SB_ID(bp);
7775         cp->irq_arr[1].status_blk = bp->def_status_blk;
7776         cp->irq_arr[1].status_blk_num = DEF_SB_ID;
7777
7778         cp->num_irq = 2;
7779 }
7780
7781 static int bnx2x_register_cnic(struct net_device *dev, struct cnic_ops *ops,
7782                                void *data)
7783 {
7784         struct bnx2x *bp = netdev_priv(dev);
7785         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7786
7787         if (ops == NULL)
7788                 return -EINVAL;
7789
7790         if (atomic_read(&bp->intr_sem) != 0)
7791                 return -EBUSY;
7792
7793         bp->cnic_kwq = kzalloc(PAGE_SIZE, GFP_KERNEL);
7794         if (!bp->cnic_kwq)
7795                 return -ENOMEM;
7796
7797         bp->cnic_kwq_cons = bp->cnic_kwq;
7798         bp->cnic_kwq_prod = bp->cnic_kwq;
7799         bp->cnic_kwq_last = bp->cnic_kwq + MAX_SP_DESC_CNT;
7800
7801         bp->cnic_spq_pending = 0;
7802         bp->cnic_kwq_pending = 0;
7803
7804         bp->cnic_data = data;
7805
7806         cp->num_irq = 0;
7807         cp->drv_state = CNIC_DRV_STATE_REGD;
7808
7809         bnx2x_init_sb(bp, bp->cnic_sb, bp->cnic_sb_mapping, CNIC_SB_ID(bp));
7810
7811         bnx2x_setup_cnic_irq_info(bp);
7812         bnx2x_set_iscsi_eth_mac_addr(bp, 1);
7813         bp->cnic_flags |= BNX2X_CNIC_FLAG_MAC_SET;
7814         rcu_assign_pointer(bp->cnic_ops, ops);
7815
7816         return 0;
7817 }
7818
7819 static int bnx2x_unregister_cnic(struct net_device *dev)
7820 {
7821         struct bnx2x *bp = netdev_priv(dev);
7822         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7823
7824         mutex_lock(&bp->cnic_mutex);
7825         if (bp->cnic_flags & BNX2X_CNIC_FLAG_MAC_SET) {
7826                 bp->cnic_flags &= ~BNX2X_CNIC_FLAG_MAC_SET;
7827                 bnx2x_set_iscsi_eth_mac_addr(bp, 0);
7828         }
7829         cp->drv_state = 0;
7830         rcu_assign_pointer(bp->cnic_ops, NULL);
7831         mutex_unlock(&bp->cnic_mutex);
7832         synchronize_rcu();
7833         kfree(bp->cnic_kwq);
7834         bp->cnic_kwq = NULL;
7835
7836         return 0;
7837 }
7838
7839 struct cnic_eth_dev *bnx2x_cnic_probe(struct net_device *dev)
7840 {
7841         struct bnx2x *bp = netdev_priv(dev);
7842         struct cnic_eth_dev *cp = &bp->cnic_eth_dev;
7843
7844         cp->drv_owner = THIS_MODULE;
7845         cp->chip_id = CHIP_ID(bp);
7846         cp->pdev = bp->pdev;
7847         cp->io_base = bp->regview;
7848         cp->io_base2 = bp->doorbells;
7849         cp->max_kwqe_pending = 8;
7850         cp->ctx_blk_size = CNIC_CTX_PER_ILT * sizeof(union cdu_context);
7851         cp->ctx_tbl_offset = FUNC_ILT_BASE(BP_FUNC(bp)) + 1;
7852         cp->ctx_tbl_len = CNIC_ILT_LINES;
7853         cp->starting_cid = BCM_CNIC_CID_START;
7854         cp->drv_submit_kwqes_16 = bnx2x_cnic_sp_queue;
7855         cp->drv_ctl = bnx2x_drv_ctl;
7856         cp->drv_register_cnic = bnx2x_register_cnic;
7857         cp->drv_unregister_cnic = bnx2x_unregister_cnic;
7858
7859         return cp;
7860 }
7861 EXPORT_SYMBOL(bnx2x_cnic_probe);
7862
7863 #endif /* BCM_CNIC */
7864