1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath and fastpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.48.114-1"
60 #define DRV_MODULE_RELDATE "2009/07/29"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Multi queue mode "
84 "(0 Disable; 1 Enable (default))");
86 static int num_rx_queues;
87 module_param(num_rx_queues, int, 0);
88 MODULE_PARM_DESC(num_rx_queues, " Number of Rx queues for multi_mode=1"
89 " (default is half number of CPUs)");
91 static int num_tx_queues;
92 module_param(num_tx_queues, int, 0);
93 MODULE_PARM_DESC(num_tx_queues, " Number of Tx queues for multi_mode=1"
94 " (default is half number of CPUs)");
96 static int disable_tpa;
97 module_param(disable_tpa, int, 0);
98 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
101 module_param(int_mode, int, 0);
102 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
105 module_param(poll, int, 0);
106 MODULE_PARM_DESC(poll, " Use polling (for debug)");
108 static int mrrs = -1;
109 module_param(mrrs, int, 0);
110 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
113 module_param(debug, int, 0);
114 MODULE_PARM_DESC(debug, " Default debug msglevel");
116 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
118 static struct workqueue_struct *bnx2x_wq;
120 enum bnx2x_board_type {
126 /* indexed by board_type, above */
129 } board_info[] __devinitdata = {
130 { "Broadcom NetXtreme II BCM57710 XGb" },
131 { "Broadcom NetXtreme II BCM57711 XGb" },
132 { "Broadcom NetXtreme II BCM57711E XGb" }
136 static const struct pci_device_id bnx2x_pci_tbl[] = {
137 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
138 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
139 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
140 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
141 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
142 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
146 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
148 /****************************************************************************
149 * General service functions
150 ****************************************************************************/
153 * locking is done by mcp
155 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
157 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
159 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
160 PCICFG_VENDOR_ID_OFFSET);
163 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
167 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
168 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
169 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
170 PCICFG_VENDOR_ID_OFFSET);
175 static const u32 dmae_reg_go_c[] = {
176 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
177 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
178 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
179 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
182 /* copy command into DMAE command memory and set DMAE command go */
183 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
189 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
190 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
191 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
193 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
194 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
196 REG_WR(bp, dmae_reg_go_c[idx], 1);
199 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
202 struct dmae_command *dmae = &bp->init_dmae;
203 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
206 if (!bp->dmae_ready) {
207 u32 *data = bnx2x_sp(bp, wb_data[0]);
209 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
210 " using indirect\n", dst_addr, len32);
211 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
215 mutex_lock(&bp->dmae_mutex);
217 memset(dmae, 0, sizeof(struct dmae_command));
219 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
220 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
221 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
223 DMAE_CMD_ENDIANITY_B_DW_SWAP |
225 DMAE_CMD_ENDIANITY_DW_SWAP |
227 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
228 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
229 dmae->src_addr_lo = U64_LO(dma_addr);
230 dmae->src_addr_hi = U64_HI(dma_addr);
231 dmae->dst_addr_lo = dst_addr >> 2;
232 dmae->dst_addr_hi = 0;
234 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
235 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
236 dmae->comp_val = DMAE_COMP_VAL;
238 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
239 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
240 "dst_addr [%x:%08x (%08x)]\n"
241 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
242 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
243 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
244 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
245 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
246 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
247 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
251 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
255 while (*wb_comp != DMAE_COMP_VAL) {
256 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
259 BNX2X_ERR("DMAE timeout!\n");
263 /* adjust delay for emulation/FPGA */
264 if (CHIP_REV_IS_SLOW(bp))
270 mutex_unlock(&bp->dmae_mutex);
273 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
275 struct dmae_command *dmae = &bp->init_dmae;
276 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
279 if (!bp->dmae_ready) {
280 u32 *data = bnx2x_sp(bp, wb_data[0]);
283 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
284 " using indirect\n", src_addr, len32);
285 for (i = 0; i < len32; i++)
286 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
290 mutex_lock(&bp->dmae_mutex);
292 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
293 memset(dmae, 0, sizeof(struct dmae_command));
295 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
296 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
297 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
299 DMAE_CMD_ENDIANITY_B_DW_SWAP |
301 DMAE_CMD_ENDIANITY_DW_SWAP |
303 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
304 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
305 dmae->src_addr_lo = src_addr >> 2;
306 dmae->src_addr_hi = 0;
307 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
308 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
310 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
311 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
312 dmae->comp_val = DMAE_COMP_VAL;
314 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
315 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
316 "dst_addr [%x:%08x (%08x)]\n"
317 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
318 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
319 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
320 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
324 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
328 while (*wb_comp != DMAE_COMP_VAL) {
331 BNX2X_ERR("DMAE timeout!\n");
335 /* adjust delay for emulation/FPGA */
336 if (CHIP_REV_IS_SLOW(bp))
341 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
342 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
343 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
345 mutex_unlock(&bp->dmae_mutex);
348 /* used only for slowpath so not inlined */
349 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
353 wb_write[0] = val_hi;
354 wb_write[1] = val_lo;
355 REG_WR_DMAE(bp, reg, wb_write, 2);
359 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
363 REG_RD_DMAE(bp, reg, wb_data, 2);
365 return HILO_U64(wb_data[0], wb_data[1]);
369 static int bnx2x_mc_assert(struct bnx2x *bp)
373 u32 row0, row1, row2, row3;
376 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
377 XSTORM_ASSERT_LIST_INDEX_OFFSET);
379 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
381 /* print the asserts */
382 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
384 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
385 XSTORM_ASSERT_LIST_OFFSET(i));
386 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
387 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
388 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
389 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
390 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
391 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
393 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
394 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
395 " 0x%08x 0x%08x 0x%08x\n",
396 i, row3, row2, row1, row0);
404 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
405 TSTORM_ASSERT_LIST_INDEX_OFFSET);
407 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
409 /* print the asserts */
410 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
412 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
413 TSTORM_ASSERT_LIST_OFFSET(i));
414 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
415 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
416 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
417 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
418 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
419 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
421 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
422 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
423 " 0x%08x 0x%08x 0x%08x\n",
424 i, row3, row2, row1, row0);
432 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
433 CSTORM_ASSERT_LIST_INDEX_OFFSET);
435 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
437 /* print the asserts */
438 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
440 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
441 CSTORM_ASSERT_LIST_OFFSET(i));
442 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
443 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
444 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
445 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
446 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
447 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
449 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
450 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
451 " 0x%08x 0x%08x 0x%08x\n",
452 i, row3, row2, row1, row0);
460 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
461 USTORM_ASSERT_LIST_INDEX_OFFSET);
463 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
465 /* print the asserts */
466 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
468 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
469 USTORM_ASSERT_LIST_OFFSET(i));
470 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
471 USTORM_ASSERT_LIST_OFFSET(i) + 4);
472 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
473 USTORM_ASSERT_LIST_OFFSET(i) + 8);
474 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
475 USTORM_ASSERT_LIST_OFFSET(i) + 12);
477 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
478 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
479 " 0x%08x 0x%08x 0x%08x\n",
480 i, row3, row2, row1, row0);
490 static void bnx2x_fw_dump(struct bnx2x *bp)
496 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
497 mark = ((mark + 0x3) & ~0x3);
498 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
500 printk(KERN_ERR PFX);
501 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
502 for (word = 0; word < 8; word++)
503 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
506 printk(KERN_CONT "%s", (char *)data);
508 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
509 for (word = 0; word < 8; word++)
510 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
513 printk(KERN_CONT "%s", (char *)data);
515 printk(KERN_ERR PFX "end of fw dump\n");
518 static void bnx2x_panic_dump(struct bnx2x *bp)
523 bp->stats_state = STATS_STATE_DISABLED;
524 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
526 BNX2X_ERR("begin crash dump -----------------\n");
530 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
531 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
532 " spq_prod_idx(%u)\n",
533 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
534 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
537 for_each_rx_queue(bp, i) {
538 struct bnx2x_fastpath *fp = &bp->fp[i];
540 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
541 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
542 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
543 i, fp->rx_bd_prod, fp->rx_bd_cons,
544 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
545 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
546 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
547 " fp_u_idx(%x) *sb_u_idx(%x)\n",
548 fp->rx_sge_prod, fp->last_max_sge,
549 le16_to_cpu(fp->fp_u_idx),
550 fp->status_blk->u_status_block.status_block_index);
554 for_each_tx_queue(bp, i) {
555 struct bnx2x_fastpath *fp = &bp->fp[i];
557 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
558 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
559 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
560 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
561 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
562 " tx_db_prod(%x)\n", le16_to_cpu(fp->fp_c_idx),
563 fp->status_blk->c_status_block.status_block_index,
564 fp->tx_db.data.prod);
569 for_each_rx_queue(bp, i) {
570 struct bnx2x_fastpath *fp = &bp->fp[i];
572 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
573 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
574 for (j = start; j != end; j = RX_BD(j + 1)) {
575 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
576 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
578 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
579 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
582 start = RX_SGE(fp->rx_sge_prod);
583 end = RX_SGE(fp->last_max_sge);
584 for (j = start; j != end; j = RX_SGE(j + 1)) {
585 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
586 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
588 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
589 i, j, rx_sge[1], rx_sge[0], sw_page->page);
592 start = RCQ_BD(fp->rx_comp_cons - 10);
593 end = RCQ_BD(fp->rx_comp_cons + 503);
594 for (j = start; j != end; j = RCQ_BD(j + 1)) {
595 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
597 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
598 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
603 for_each_tx_queue(bp, i) {
604 struct bnx2x_fastpath *fp = &bp->fp[i];
606 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
607 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
608 for (j = start; j != end; j = TX_BD(j + 1)) {
609 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
611 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
612 i, j, sw_bd->skb, sw_bd->first_bd);
615 start = TX_BD(fp->tx_bd_cons - 10);
616 end = TX_BD(fp->tx_bd_cons + 254);
617 for (j = start; j != end; j = TX_BD(j + 1)) {
618 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
620 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
621 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
627 BNX2X_ERR("end crash dump -----------------\n");
630 static void bnx2x_int_enable(struct bnx2x *bp)
632 int port = BP_PORT(bp);
633 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
634 u32 val = REG_RD(bp, addr);
635 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
636 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
639 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_INT_LINE_EN_0);
641 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
645 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
646 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
647 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
649 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
650 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
651 HC_CONFIG_0_REG_INT_LINE_EN_0 |
652 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
654 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
657 REG_WR(bp, addr, val);
659 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
662 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
663 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
665 REG_WR(bp, addr, val);
667 * Ensure that HC_CONFIG is written before leading/trailing edge config
672 if (CHIP_IS_E1H(bp)) {
673 /* init leading/trailing edge */
675 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
677 /* enable nig and gpio3 attention */
682 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
683 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
686 /* Make sure that interrupts are indeed enabled from here on */
690 static void bnx2x_int_disable(struct bnx2x *bp)
692 int port = BP_PORT(bp);
693 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
694 u32 val = REG_RD(bp, addr);
696 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
697 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
698 HC_CONFIG_0_REG_INT_LINE_EN_0 |
699 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
701 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
704 /* flush all outstanding writes */
707 REG_WR(bp, addr, val);
708 if (REG_RD(bp, addr) != val)
709 BNX2X_ERR("BUG! proper val not read from IGU!\n");
713 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
715 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
718 /* disable interrupt handling */
719 atomic_inc(&bp->intr_sem);
720 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
723 /* prevent the HW from sending interrupts */
724 bnx2x_int_disable(bp);
726 /* make sure all ISRs are done */
728 synchronize_irq(bp->msix_table[0].vector);
730 for_each_queue(bp, i)
731 synchronize_irq(bp->msix_table[i + offset].vector);
733 synchronize_irq(bp->pdev->irq);
735 /* make sure sp_task is not running */
736 cancel_delayed_work(&bp->sp_task);
737 flush_workqueue(bnx2x_wq);
743 * General service functions
746 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
747 u8 storm, u16 index, u8 op, u8 update)
749 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
750 COMMAND_REG_INT_ACK);
751 struct igu_ack_register igu_ack;
753 igu_ack.status_block_index = index;
754 igu_ack.sb_id_and_flags =
755 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
756 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
757 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
758 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
760 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
761 (*(u32 *)&igu_ack), hc_addr);
762 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
764 /* Make sure that ACK is written */
769 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
771 struct host_status_block *fpsb = fp->status_blk;
774 barrier(); /* status block is written to by the chip */
775 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
776 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
779 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
780 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
786 static u16 bnx2x_ack_int(struct bnx2x *bp)
788 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
789 COMMAND_REG_SIMD_MASK);
790 u32 result = REG_RD(bp, hc_addr);
792 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
800 * fast path service functions
803 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
805 /* Tell compiler that consumer and producer can change */
807 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
810 /* free skb in the packet ring at pos idx
811 * return idx of last bd freed
813 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
816 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
817 struct eth_tx_start_bd *tx_start_bd;
818 struct eth_tx_bd *tx_data_bd;
819 struct sk_buff *skb = tx_buf->skb;
820 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
823 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
827 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
828 tx_start_bd = &fp->tx_desc_ring[bd_idx].start_bd;
829 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_start_bd),
830 BD_UNMAP_LEN(tx_start_bd), PCI_DMA_TODEVICE);
832 nbd = le16_to_cpu(tx_start_bd->nbd) - 1;
833 #ifdef BNX2X_STOP_ON_ERROR
834 if ((nbd - 1) > (MAX_SKB_FRAGS + 2)) {
835 BNX2X_ERR("BAD nbd!\n");
839 new_cons = nbd + tx_buf->first_bd;
841 /* Get the next bd */
842 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
844 /* Skip a parse bd... */
846 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
848 /* ...and the TSO split header bd since they have no mapping */
849 if (tx_buf->flags & BNX2X_TSO_SPLIT_BD) {
851 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
857 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
858 tx_data_bd = &fp->tx_desc_ring[bd_idx].reg_bd;
859 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_data_bd),
860 BD_UNMAP_LEN(tx_data_bd), PCI_DMA_TODEVICE);
862 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
867 dev_kfree_skb_any(skb);
868 tx_buf->first_bd = 0;
874 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
880 barrier(); /* Tell compiler that prod and cons can change */
881 prod = fp->tx_bd_prod;
882 cons = fp->tx_bd_cons;
884 /* NUM_TX_RINGS = number of "next-page" entries
885 It will be used as a threshold */
886 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
888 #ifdef BNX2X_STOP_ON_ERROR
890 WARN_ON(used > fp->bp->tx_ring_size);
891 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
894 return (s16)(fp->bp->tx_ring_size) - used;
897 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
899 struct bnx2x *bp = fp->bp;
900 struct netdev_queue *txq;
901 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
904 #ifdef BNX2X_STOP_ON_ERROR
905 if (unlikely(bp->panic))
909 txq = netdev_get_tx_queue(bp->dev, fp->index - bp->num_rx_queues);
910 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
911 sw_cons = fp->tx_pkt_cons;
913 while (sw_cons != hw_cons) {
916 pkt_cons = TX_BD(sw_cons);
918 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
920 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
921 hw_cons, sw_cons, pkt_cons);
923 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
925 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
928 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
933 fp->tx_pkt_cons = sw_cons;
934 fp->tx_bd_cons = bd_cons;
936 /* TBD need a thresh? */
937 if (unlikely(netif_tx_queue_stopped(txq))) {
939 /* Need to make the tx_bd_cons update visible to start_xmit()
940 * before checking for netif_tx_queue_stopped(). Without the
941 * memory barrier, there is a small possibility that
942 * start_xmit() will miss it and cause the queue to be stopped
947 if ((netif_tx_queue_stopped(txq)) &&
948 (bp->state == BNX2X_STATE_OPEN) &&
949 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
950 netif_tx_wake_queue(txq);
955 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
956 union eth_rx_cqe *rr_cqe)
958 struct bnx2x *bp = fp->bp;
959 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
960 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
963 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
964 fp->index, cid, command, bp->state,
965 rr_cqe->ramrod_cqe.ramrod_type);
970 switch (command | fp->state) {
971 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
972 BNX2X_FP_STATE_OPENING):
973 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
975 fp->state = BNX2X_FP_STATE_OPEN;
978 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
979 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
981 fp->state = BNX2X_FP_STATE_HALTED;
985 BNX2X_ERR("unexpected MC reply (%d) "
986 "fp->state is %x\n", command, fp->state);
989 mb(); /* force bnx2x_wait_ramrod() to see the change */
993 switch (command | bp->state) {
994 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
995 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
996 bp->state = BNX2X_STATE_OPEN;
999 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1000 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1001 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1002 fp->state = BNX2X_FP_STATE_HALTED;
1005 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1006 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1007 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1011 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1012 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1013 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1014 bp->set_mac_pending = 0;
1017 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1018 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DISABLED):
1019 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1023 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1024 command, bp->state);
1027 mb(); /* force bnx2x_wait_ramrod() to see the change */
1030 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1031 struct bnx2x_fastpath *fp, u16 index)
1033 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1034 struct page *page = sw_buf->page;
1035 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1037 /* Skip "next page" elements */
1041 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1042 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1043 __free_pages(page, PAGES_PER_SGE_SHIFT);
1045 sw_buf->page = NULL;
1050 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1051 struct bnx2x_fastpath *fp, int last)
1055 for (i = 0; i < last; i++)
1056 bnx2x_free_rx_sge(bp, fp, i);
1059 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1060 struct bnx2x_fastpath *fp, u16 index)
1062 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1063 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1064 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1067 if (unlikely(page == NULL))
1070 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1071 PCI_DMA_FROMDEVICE);
1072 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1073 __free_pages(page, PAGES_PER_SGE_SHIFT);
1077 sw_buf->page = page;
1078 pci_unmap_addr_set(sw_buf, mapping, mapping);
1080 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1081 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1086 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1087 struct bnx2x_fastpath *fp, u16 index)
1089 struct sk_buff *skb;
1090 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1091 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1094 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1095 if (unlikely(skb == NULL))
1098 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1099 PCI_DMA_FROMDEVICE);
1100 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1106 pci_unmap_addr_set(rx_buf, mapping, mapping);
1108 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1109 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1114 /* note that we are not allocating a new skb,
1115 * we are just moving one from cons to prod
1116 * we are not creating a new mapping,
1117 * so there is no need to check for dma_mapping_error().
1119 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1120 struct sk_buff *skb, u16 cons, u16 prod)
1122 struct bnx2x *bp = fp->bp;
1123 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1124 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1125 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1126 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1128 pci_dma_sync_single_for_device(bp->pdev,
1129 pci_unmap_addr(cons_rx_buf, mapping),
1130 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1132 prod_rx_buf->skb = cons_rx_buf->skb;
1133 pci_unmap_addr_set(prod_rx_buf, mapping,
1134 pci_unmap_addr(cons_rx_buf, mapping));
1135 *prod_bd = *cons_bd;
1138 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1141 u16 last_max = fp->last_max_sge;
1143 if (SUB_S16(idx, last_max) > 0)
1144 fp->last_max_sge = idx;
1147 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1151 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1152 int idx = RX_SGE_CNT * i - 1;
1154 for (j = 0; j < 2; j++) {
1155 SGE_MASK_CLEAR_BIT(fp, idx);
1161 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1162 struct eth_fast_path_rx_cqe *fp_cqe)
1164 struct bnx2x *bp = fp->bp;
1165 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1166 le16_to_cpu(fp_cqe->len_on_bd)) >>
1168 u16 last_max, last_elem, first_elem;
1175 /* First mark all used pages */
1176 for (i = 0; i < sge_len; i++)
1177 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1179 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1180 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1182 /* Here we assume that the last SGE index is the biggest */
1183 prefetch((void *)(fp->sge_mask));
1184 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1186 last_max = RX_SGE(fp->last_max_sge);
1187 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1188 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1190 /* If ring is not full */
1191 if (last_elem + 1 != first_elem)
1194 /* Now update the prod */
1195 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1196 if (likely(fp->sge_mask[i]))
1199 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1200 delta += RX_SGE_MASK_ELEM_SZ;
1204 fp->rx_sge_prod += delta;
1205 /* clear page-end entries */
1206 bnx2x_clear_sge_mask_next_elems(fp);
1209 DP(NETIF_MSG_RX_STATUS,
1210 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1211 fp->last_max_sge, fp->rx_sge_prod);
1214 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1216 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1217 memset(fp->sge_mask, 0xff,
1218 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1220 /* Clear the two last indices in the page to 1:
1221 these are the indices that correspond to the "next" element,
1222 hence will never be indicated and should be removed from
1223 the calculations. */
1224 bnx2x_clear_sge_mask_next_elems(fp);
1227 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1228 struct sk_buff *skb, u16 cons, u16 prod)
1230 struct bnx2x *bp = fp->bp;
1231 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1232 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1233 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1236 /* move empty skb from pool to prod and map it */
1237 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1238 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1239 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1240 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1242 /* move partial skb from cons to pool (don't unmap yet) */
1243 fp->tpa_pool[queue] = *cons_rx_buf;
1245 /* mark bin state as start - print error if current state != stop */
1246 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1247 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1249 fp->tpa_state[queue] = BNX2X_TPA_START;
1251 /* point prod_bd to new skb */
1252 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1253 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1255 #ifdef BNX2X_STOP_ON_ERROR
1256 fp->tpa_queue_used |= (1 << queue);
1257 #ifdef __powerpc64__
1258 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1260 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1262 fp->tpa_queue_used);
1266 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1267 struct sk_buff *skb,
1268 struct eth_fast_path_rx_cqe *fp_cqe,
1271 struct sw_rx_page *rx_pg, old_rx_pg;
1272 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1273 u32 i, frag_len, frag_size, pages;
1277 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1278 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1280 /* This is needed in order to enable forwarding support */
1282 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1283 max(frag_size, (u32)len_on_bd));
1285 #ifdef BNX2X_STOP_ON_ERROR
1287 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1288 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1290 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1291 fp_cqe->pkt_len, len_on_bd);
1297 /* Run through the SGL and compose the fragmented skb */
1298 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1299 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1301 /* FW gives the indices of the SGE as if the ring is an array
1302 (meaning that "next" element will consume 2 indices) */
1303 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1304 rx_pg = &fp->rx_page_ring[sge_idx];
1307 /* If we fail to allocate a substitute page, we simply stop
1308 where we are and drop the whole packet */
1309 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1310 if (unlikely(err)) {
1311 fp->eth_q_stats.rx_skb_alloc_failed++;
1315 /* Unmap the page as we r going to pass it to the stack */
1316 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1317 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1319 /* Add one frag and update the appropriate fields in the skb */
1320 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1322 skb->data_len += frag_len;
1323 skb->truesize += frag_len;
1324 skb->len += frag_len;
1326 frag_size -= frag_len;
1332 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1333 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1336 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1337 struct sk_buff *skb = rx_buf->skb;
1339 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1341 /* Unmap skb in the pool anyway, as we are going to change
1342 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1344 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1345 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1347 if (likely(new_skb)) {
1348 /* fix ip xsum and give it to the stack */
1349 /* (no need to map the new skb) */
1352 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1353 PARSING_FLAGS_VLAN);
1354 int is_not_hwaccel_vlan_cqe =
1355 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1359 prefetch(((char *)(skb)) + 128);
1361 #ifdef BNX2X_STOP_ON_ERROR
1362 if (pad + len > bp->rx_buf_size) {
1363 BNX2X_ERR("skb_put is about to fail... "
1364 "pad %d len %d rx_buf_size %d\n",
1365 pad, len, bp->rx_buf_size);
1371 skb_reserve(skb, pad);
1374 skb->protocol = eth_type_trans(skb, bp->dev);
1375 skb->ip_summed = CHECKSUM_UNNECESSARY;
1380 iph = (struct iphdr *)skb->data;
1382 /* If there is no Rx VLAN offloading -
1383 take VLAN tag into an account */
1384 if (unlikely(is_not_hwaccel_vlan_cqe))
1385 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1388 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1391 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1392 &cqe->fast_path_cqe, cqe_idx)) {
1394 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1395 (!is_not_hwaccel_vlan_cqe))
1396 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1397 le16_to_cpu(cqe->fast_path_cqe.
1401 netif_receive_skb(skb);
1403 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1404 " - dropping packet!\n");
1409 /* put new skb in bin */
1410 fp->tpa_pool[queue].skb = new_skb;
1413 /* else drop the packet and keep the buffer in the bin */
1414 DP(NETIF_MSG_RX_STATUS,
1415 "Failed to allocate new skb - dropping packet!\n");
1416 fp->eth_q_stats.rx_skb_alloc_failed++;
1419 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1422 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1423 struct bnx2x_fastpath *fp,
1424 u16 bd_prod, u16 rx_comp_prod,
1427 struct ustorm_eth_rx_producers rx_prods = {0};
1430 /* Update producers */
1431 rx_prods.bd_prod = bd_prod;
1432 rx_prods.cqe_prod = rx_comp_prod;
1433 rx_prods.sge_prod = rx_sge_prod;
1436 * Make sure that the BD and SGE data is updated before updating the
1437 * producers since FW might read the BD/SGE right after the producer
1439 * This is only applicable for weak-ordered memory model archs such
1440 * as IA-64. The following barrier is also mandatory since FW will
1441 * assumes BDs must have buffers.
1445 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1446 REG_WR(bp, BAR_USTRORM_INTMEM +
1447 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1448 ((u32 *)&rx_prods)[i]);
1450 mmiowb(); /* keep prod updates ordered */
1452 DP(NETIF_MSG_RX_STATUS,
1453 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1454 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1457 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1459 struct bnx2x *bp = fp->bp;
1460 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1461 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1464 #ifdef BNX2X_STOP_ON_ERROR
1465 if (unlikely(bp->panic))
1469 /* CQ "next element" is of the size of the regular element,
1470 that's why it's ok here */
1471 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1472 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1475 bd_cons = fp->rx_bd_cons;
1476 bd_prod = fp->rx_bd_prod;
1477 bd_prod_fw = bd_prod;
1478 sw_comp_cons = fp->rx_comp_cons;
1479 sw_comp_prod = fp->rx_comp_prod;
1481 /* Memory barrier necessary as speculative reads of the rx
1482 * buffer can be ahead of the index in the status block
1486 DP(NETIF_MSG_RX_STATUS,
1487 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1488 fp->index, hw_comp_cons, sw_comp_cons);
1490 while (sw_comp_cons != hw_comp_cons) {
1491 struct sw_rx_bd *rx_buf = NULL;
1492 struct sk_buff *skb;
1493 union eth_rx_cqe *cqe;
1497 comp_ring_cons = RCQ_BD(sw_comp_cons);
1498 bd_prod = RX_BD(bd_prod);
1499 bd_cons = RX_BD(bd_cons);
1501 cqe = &fp->rx_comp_ring[comp_ring_cons];
1502 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1504 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1505 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1506 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1507 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1508 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1509 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1511 /* is this a slowpath msg? */
1512 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1513 bnx2x_sp_event(fp, cqe);
1516 /* this is an rx packet */
1518 rx_buf = &fp->rx_buf_ring[bd_cons];
1520 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1521 pad = cqe->fast_path_cqe.placement_offset;
1523 /* If CQE is marked both TPA_START and TPA_END
1524 it is a non-TPA CQE */
1525 if ((!fp->disable_tpa) &&
1526 (TPA_TYPE(cqe_fp_flags) !=
1527 (TPA_TYPE_START | TPA_TYPE_END))) {
1528 u16 queue = cqe->fast_path_cqe.queue_index;
1530 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1531 DP(NETIF_MSG_RX_STATUS,
1532 "calling tpa_start on queue %d\n",
1535 bnx2x_tpa_start(fp, queue, skb,
1540 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1541 DP(NETIF_MSG_RX_STATUS,
1542 "calling tpa_stop on queue %d\n",
1545 if (!BNX2X_RX_SUM_FIX(cqe))
1546 BNX2X_ERR("STOP on none TCP "
1549 /* This is a size of the linear data
1551 len = le16_to_cpu(cqe->fast_path_cqe.
1553 bnx2x_tpa_stop(bp, fp, queue, pad,
1554 len, cqe, comp_ring_cons);
1555 #ifdef BNX2X_STOP_ON_ERROR
1560 bnx2x_update_sge_prod(fp,
1561 &cqe->fast_path_cqe);
1566 pci_dma_sync_single_for_device(bp->pdev,
1567 pci_unmap_addr(rx_buf, mapping),
1568 pad + RX_COPY_THRESH,
1569 PCI_DMA_FROMDEVICE);
1571 prefetch(((char *)(skb)) + 128);
1573 /* is this an error packet? */
1574 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1575 DP(NETIF_MSG_RX_ERR,
1576 "ERROR flags %x rx packet %u\n",
1577 cqe_fp_flags, sw_comp_cons);
1578 fp->eth_q_stats.rx_err_discard_pkt++;
1582 /* Since we don't have a jumbo ring
1583 * copy small packets if mtu > 1500
1585 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1586 (len <= RX_COPY_THRESH)) {
1587 struct sk_buff *new_skb;
1589 new_skb = netdev_alloc_skb(bp->dev,
1591 if (new_skb == NULL) {
1592 DP(NETIF_MSG_RX_ERR,
1593 "ERROR packet dropped "
1594 "because of alloc failure\n");
1595 fp->eth_q_stats.rx_skb_alloc_failed++;
1600 skb_copy_from_linear_data_offset(skb, pad,
1601 new_skb->data + pad, len);
1602 skb_reserve(new_skb, pad);
1603 skb_put(new_skb, len);
1605 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1609 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1610 pci_unmap_single(bp->pdev,
1611 pci_unmap_addr(rx_buf, mapping),
1613 PCI_DMA_FROMDEVICE);
1614 skb_reserve(skb, pad);
1618 DP(NETIF_MSG_RX_ERR,
1619 "ERROR packet dropped because "
1620 "of alloc failure\n");
1621 fp->eth_q_stats.rx_skb_alloc_failed++;
1623 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1627 skb->protocol = eth_type_trans(skb, bp->dev);
1629 skb->ip_summed = CHECKSUM_NONE;
1631 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1632 skb->ip_summed = CHECKSUM_UNNECESSARY;
1634 fp->eth_q_stats.hw_csum_err++;
1638 skb_record_rx_queue(skb, fp->index);
1640 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1641 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1642 PARSING_FLAGS_VLAN))
1643 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1644 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1647 netif_receive_skb(skb);
1653 bd_cons = NEXT_RX_IDX(bd_cons);
1654 bd_prod = NEXT_RX_IDX(bd_prod);
1655 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1658 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1659 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1661 if (rx_pkt == budget)
1665 fp->rx_bd_cons = bd_cons;
1666 fp->rx_bd_prod = bd_prod_fw;
1667 fp->rx_comp_cons = sw_comp_cons;
1668 fp->rx_comp_prod = sw_comp_prod;
1670 /* Update producers */
1671 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1674 fp->rx_pkt += rx_pkt;
1680 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1682 struct bnx2x_fastpath *fp = fp_cookie;
1683 struct bnx2x *bp = fp->bp;
1685 /* Return here if interrupt is disabled */
1686 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1687 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1691 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1692 fp->index, fp->sb_id);
1693 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1695 #ifdef BNX2X_STOP_ON_ERROR
1696 if (unlikely(bp->panic))
1699 /* Handle Rx or Tx according to MSI-X vector */
1700 if (fp->is_rx_queue) {
1701 prefetch(fp->rx_cons_sb);
1702 prefetch(&fp->status_blk->u_status_block.status_block_index);
1704 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1707 prefetch(fp->tx_cons_sb);
1708 prefetch(&fp->status_blk->c_status_block.status_block_index);
1710 bnx2x_update_fpsb_idx(fp);
1714 /* Re-enable interrupts */
1715 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1716 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
1717 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1718 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
1724 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1726 struct bnx2x *bp = netdev_priv(dev_instance);
1727 u16 status = bnx2x_ack_int(bp);
1731 /* Return here if interrupt is shared and it's not for us */
1732 if (unlikely(status == 0)) {
1733 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1736 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1738 /* Return here if interrupt is disabled */
1739 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1740 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1744 #ifdef BNX2X_STOP_ON_ERROR
1745 if (unlikely(bp->panic))
1749 for (i = 0; i < BNX2X_NUM_QUEUES(bp); i++) {
1750 struct bnx2x_fastpath *fp = &bp->fp[i];
1752 mask = 0x2 << fp->sb_id;
1753 if (status & mask) {
1754 /* Handle Rx or Tx according to SB id */
1755 if (fp->is_rx_queue) {
1756 prefetch(fp->rx_cons_sb);
1757 prefetch(&fp->status_blk->u_status_block.
1758 status_block_index);
1760 napi_schedule(&bnx2x_fp(bp, fp->index, napi));
1763 prefetch(fp->tx_cons_sb);
1764 prefetch(&fp->status_blk->c_status_block.
1765 status_block_index);
1767 bnx2x_update_fpsb_idx(fp);
1771 /* Re-enable interrupts */
1772 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
1773 le16_to_cpu(fp->fp_u_idx),
1775 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
1776 le16_to_cpu(fp->fp_c_idx),
1784 if (unlikely(status & 0x1)) {
1785 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1793 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1799 /* end of fast path */
1801 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1806 * General service functions
1809 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1812 u32 resource_bit = (1 << resource);
1813 int func = BP_FUNC(bp);
1814 u32 hw_lock_control_reg;
1817 /* Validating that the resource is within range */
1818 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1820 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1821 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1826 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1828 hw_lock_control_reg =
1829 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1832 /* Validating that the resource is not already taken */
1833 lock_status = REG_RD(bp, hw_lock_control_reg);
1834 if (lock_status & resource_bit) {
1835 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1836 lock_status, resource_bit);
1840 /* Try for 5 second every 5ms */
1841 for (cnt = 0; cnt < 1000; cnt++) {
1842 /* Try to acquire the lock */
1843 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1844 lock_status = REG_RD(bp, hw_lock_control_reg);
1845 if (lock_status & resource_bit)
1850 DP(NETIF_MSG_HW, "Timeout\n");
1854 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1857 u32 resource_bit = (1 << resource);
1858 int func = BP_FUNC(bp);
1859 u32 hw_lock_control_reg;
1861 /* Validating that the resource is within range */
1862 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1864 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1865 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1870 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1872 hw_lock_control_reg =
1873 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1876 /* Validating that the resource is currently taken */
1877 lock_status = REG_RD(bp, hw_lock_control_reg);
1878 if (!(lock_status & resource_bit)) {
1879 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1880 lock_status, resource_bit);
1884 REG_WR(bp, hw_lock_control_reg, resource_bit);
1888 /* HW Lock for shared dual port PHYs */
1889 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1891 mutex_lock(&bp->port.phy_mutex);
1893 if (bp->port.need_hw_lock)
1894 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1897 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1899 if (bp->port.need_hw_lock)
1900 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1902 mutex_unlock(&bp->port.phy_mutex);
1905 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1907 /* The GPIO should be swapped if swap register is set and active */
1908 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1909 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1910 int gpio_shift = gpio_num +
1911 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1912 u32 gpio_mask = (1 << gpio_shift);
1916 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1917 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1921 /* read GPIO value */
1922 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1924 /* get the requested pin value */
1925 if ((gpio_reg & gpio_mask) == gpio_mask)
1930 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1935 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1937 /* The GPIO should be swapped if swap register is set and active */
1938 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1939 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1940 int gpio_shift = gpio_num +
1941 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1942 u32 gpio_mask = (1 << gpio_shift);
1945 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1946 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1950 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1951 /* read GPIO and mask except the float bits */
1952 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1955 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1956 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1957 gpio_num, gpio_shift);
1958 /* clear FLOAT and set CLR */
1959 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1960 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1963 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1964 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1965 gpio_num, gpio_shift);
1966 /* clear FLOAT and set SET */
1967 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1968 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1971 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1972 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1973 gpio_num, gpio_shift);
1975 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1982 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1983 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1988 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1990 /* The GPIO should be swapped if swap register is set and active */
1991 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1992 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1993 int gpio_shift = gpio_num +
1994 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1995 u32 gpio_mask = (1 << gpio_shift);
1998 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1999 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
2003 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2005 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
2008 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
2009 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
2010 "output low\n", gpio_num, gpio_shift);
2011 /* clear SET and set CLR */
2012 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2013 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2016 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
2017 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
2018 "output high\n", gpio_num, gpio_shift);
2019 /* clear CLR and set SET */
2020 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
2021 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2028 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2029 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2034 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2036 u32 spio_mask = (1 << spio_num);
2039 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2040 (spio_num > MISC_REGISTERS_SPIO_7)) {
2041 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2045 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2046 /* read SPIO and mask except the float bits */
2047 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2050 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2051 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2052 /* clear FLOAT and set CLR */
2053 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2054 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2057 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2058 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2059 /* clear FLOAT and set SET */
2060 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2061 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2064 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2065 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2067 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2074 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2075 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2080 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2082 switch (bp->link_vars.ieee_fc &
2083 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2084 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2085 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2089 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2090 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2094 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2095 bp->port.advertising |= ADVERTISED_Asym_Pause;
2099 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2105 static void bnx2x_link_report(struct bnx2x *bp)
2107 if (bp->link_vars.link_up) {
2108 if (bp->state == BNX2X_STATE_OPEN)
2109 netif_carrier_on(bp->dev);
2110 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2112 printk("%d Mbps ", bp->link_vars.line_speed);
2114 if (bp->link_vars.duplex == DUPLEX_FULL)
2115 printk("full duplex");
2117 printk("half duplex");
2119 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2120 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2121 printk(", receive ");
2122 if (bp->link_vars.flow_ctrl &
2124 printk("& transmit ");
2126 printk(", transmit ");
2128 printk("flow control ON");
2132 } else { /* link_down */
2133 netif_carrier_off(bp->dev);
2134 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2138 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2140 if (!BP_NOMCP(bp)) {
2143 /* Initialize link parameters structure variables */
2144 /* It is recommended to turn off RX FC for jumbo frames
2145 for better performance */
2147 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2148 else if (bp->dev->mtu > 5000)
2149 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2151 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2153 bnx2x_acquire_phy_lock(bp);
2155 if (load_mode == LOAD_DIAG)
2156 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2158 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2160 bnx2x_release_phy_lock(bp);
2162 bnx2x_calc_fc_adv(bp);
2164 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2165 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2166 bnx2x_link_report(bp);
2171 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2175 static void bnx2x_link_set(struct bnx2x *bp)
2177 if (!BP_NOMCP(bp)) {
2178 bnx2x_acquire_phy_lock(bp);
2179 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2180 bnx2x_release_phy_lock(bp);
2182 bnx2x_calc_fc_adv(bp);
2184 BNX2X_ERR("Bootcode is missing - can not set link\n");
2187 static void bnx2x__link_reset(struct bnx2x *bp)
2189 if (!BP_NOMCP(bp)) {
2190 bnx2x_acquire_phy_lock(bp);
2191 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2192 bnx2x_release_phy_lock(bp);
2194 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2197 static u8 bnx2x_link_test(struct bnx2x *bp)
2201 bnx2x_acquire_phy_lock(bp);
2202 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2203 bnx2x_release_phy_lock(bp);
2208 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2210 u32 r_param = bp->link_vars.line_speed / 8;
2211 u32 fair_periodic_timeout_usec;
2214 memset(&(bp->cmng.rs_vars), 0,
2215 sizeof(struct rate_shaping_vars_per_port));
2216 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2218 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2219 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2221 /* this is the threshold below which no timer arming will occur
2222 1.25 coefficient is for the threshold to be a little bigger
2223 than the real time, to compensate for timer in-accuracy */
2224 bp->cmng.rs_vars.rs_threshold =
2225 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2227 /* resolution of fairness timer */
2228 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2229 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2230 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2232 /* this is the threshold below which we won't arm the timer anymore */
2233 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2235 /* we multiply by 1e3/8 to get bytes/msec.
2236 We don't want the credits to pass a credit
2237 of the t_fair*FAIR_MEM (algorithm resolution) */
2238 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2239 /* since each tick is 4 usec */
2240 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2243 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2245 struct rate_shaping_vars_per_vn m_rs_vn;
2246 struct fairness_vars_per_vn m_fair_vn;
2247 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2248 u16 vn_min_rate, vn_max_rate;
2251 /* If function is hidden - set min and max to zeroes */
2252 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2257 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2258 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2259 /* If fairness is enabled (not all min rates are zeroes) and
2260 if current min rate is zero - set it to 1.
2261 This is a requirement of the algorithm. */
2262 if (bp->vn_weight_sum && (vn_min_rate == 0))
2263 vn_min_rate = DEF_MIN_RATE;
2264 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2265 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2269 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2270 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2272 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2273 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2275 /* global vn counter - maximal Mbps for this vn */
2276 m_rs_vn.vn_counter.rate = vn_max_rate;
2278 /* quota - number of bytes transmitted in this period */
2279 m_rs_vn.vn_counter.quota =
2280 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2282 if (bp->vn_weight_sum) {
2283 /* credit for each period of the fairness algorithm:
2284 number of bytes in T_FAIR (the vn share the port rate).
2285 vn_weight_sum should not be larger than 10000, thus
2286 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2288 m_fair_vn.vn_credit_delta =
2289 max((u32)(vn_min_rate * (T_FAIR_COEF /
2290 (8 * bp->vn_weight_sum))),
2291 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2292 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2293 m_fair_vn.vn_credit_delta);
2296 /* Store it to internal memory */
2297 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2298 REG_WR(bp, BAR_XSTRORM_INTMEM +
2299 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2300 ((u32 *)(&m_rs_vn))[i]);
2302 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2303 REG_WR(bp, BAR_XSTRORM_INTMEM +
2304 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2305 ((u32 *)(&m_fair_vn))[i]);
2309 /* This function is called upon link interrupt */
2310 static void bnx2x_link_attn(struct bnx2x *bp)
2312 /* Make sure that we are synced with the current statistics */
2313 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2315 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2317 if (bp->link_vars.link_up) {
2319 /* dropless flow control */
2320 if (CHIP_IS_E1H(bp)) {
2321 int port = BP_PORT(bp);
2322 u32 pause_enabled = 0;
2324 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2327 REG_WR(bp, BAR_USTRORM_INTMEM +
2328 USTORM_ETH_PAUSE_ENABLED_OFFSET(port),
2332 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2333 struct host_port_stats *pstats;
2335 pstats = bnx2x_sp(bp, port_stats);
2336 /* reset old bmac stats */
2337 memset(&(pstats->mac_stx[0]), 0,
2338 sizeof(struct mac_stx));
2340 if ((bp->state == BNX2X_STATE_OPEN) ||
2341 (bp->state == BNX2X_STATE_DISABLED))
2342 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2345 /* indicate link status */
2346 bnx2x_link_report(bp);
2349 int port = BP_PORT(bp);
2353 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2354 if (vn == BP_E1HVN(bp))
2357 func = ((vn << 1) | port);
2359 /* Set the attention towards other drivers
2361 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2362 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2365 if (bp->link_vars.link_up) {
2368 /* Init rate shaping and fairness contexts */
2369 bnx2x_init_port_minmax(bp);
2371 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2372 bnx2x_init_vn_minmax(bp, 2*vn + port);
2374 /* Store it to internal memory */
2376 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2377 REG_WR(bp, BAR_XSTRORM_INTMEM +
2378 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2379 ((u32 *)(&bp->cmng))[i]);
2384 static void bnx2x__link_status_update(struct bnx2x *bp)
2386 if (bp->state != BNX2X_STATE_OPEN)
2389 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2391 if (bp->link_vars.link_up)
2392 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2394 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2396 /* indicate link status */
2397 bnx2x_link_report(bp);
2400 static void bnx2x_pmf_update(struct bnx2x *bp)
2402 int port = BP_PORT(bp);
2406 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2408 /* enable nig attention */
2409 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2410 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2411 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2413 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2421 * General service functions
2424 /* the slow path queue is odd since completions arrive on the fastpath ring */
2425 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2426 u32 data_hi, u32 data_lo, int common)
2428 int func = BP_FUNC(bp);
2430 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2431 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2432 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2433 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2434 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2436 #ifdef BNX2X_STOP_ON_ERROR
2437 if (unlikely(bp->panic))
2441 spin_lock_bh(&bp->spq_lock);
2443 if (!bp->spq_left) {
2444 BNX2X_ERR("BUG! SPQ ring full!\n");
2445 spin_unlock_bh(&bp->spq_lock);
2450 /* CID needs port number to be encoded int it */
2451 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2452 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2454 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2456 bp->spq_prod_bd->hdr.type |=
2457 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2459 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2460 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2464 if (bp->spq_prod_bd == bp->spq_last_bd) {
2465 bp->spq_prod_bd = bp->spq;
2466 bp->spq_prod_idx = 0;
2467 DP(NETIF_MSG_TIMER, "end of spq\n");
2474 /* Make sure that BD data is updated before writing the producer */
2477 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2482 spin_unlock_bh(&bp->spq_lock);
2486 /* acquire split MCP access lock register */
2487 static int bnx2x_acquire_alr(struct bnx2x *bp)
2494 for (j = 0; j < i*10; j++) {
2496 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2497 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2498 if (val & (1L << 31))
2503 if (!(val & (1L << 31))) {
2504 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2511 /* release split MCP access lock register */
2512 static void bnx2x_release_alr(struct bnx2x *bp)
2516 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2519 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2521 struct host_def_status_block *def_sb = bp->def_status_blk;
2524 barrier(); /* status block is written to by the chip */
2525 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2526 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2529 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2530 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2533 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2534 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2537 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2538 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2541 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2542 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2549 * slow path service functions
2552 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2554 int port = BP_PORT(bp);
2555 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2556 COMMAND_REG_ATTN_BITS_SET);
2557 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2558 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2559 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2560 NIG_REG_MASK_INTERRUPT_PORT0;
2564 if (bp->attn_state & asserted)
2565 BNX2X_ERR("IGU ERROR\n");
2567 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2568 aeu_mask = REG_RD(bp, aeu_addr);
2570 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2571 aeu_mask, asserted);
2572 aeu_mask &= ~(asserted & 0xff);
2573 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2575 REG_WR(bp, aeu_addr, aeu_mask);
2576 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2578 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2579 bp->attn_state |= asserted;
2580 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2582 if (asserted & ATTN_HARD_WIRED_MASK) {
2583 if (asserted & ATTN_NIG_FOR_FUNC) {
2585 bnx2x_acquire_phy_lock(bp);
2587 /* save nig interrupt mask */
2588 nig_mask = REG_RD(bp, nig_int_mask_addr);
2589 REG_WR(bp, nig_int_mask_addr, 0);
2591 bnx2x_link_attn(bp);
2593 /* handle unicore attn? */
2595 if (asserted & ATTN_SW_TIMER_4_FUNC)
2596 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2598 if (asserted & GPIO_2_FUNC)
2599 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2601 if (asserted & GPIO_3_FUNC)
2602 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2604 if (asserted & GPIO_4_FUNC)
2605 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2608 if (asserted & ATTN_GENERAL_ATTN_1) {
2609 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2610 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2612 if (asserted & ATTN_GENERAL_ATTN_2) {
2613 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2614 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2616 if (asserted & ATTN_GENERAL_ATTN_3) {
2617 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2618 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2621 if (asserted & ATTN_GENERAL_ATTN_4) {
2622 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2623 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2625 if (asserted & ATTN_GENERAL_ATTN_5) {
2626 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2627 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2629 if (asserted & ATTN_GENERAL_ATTN_6) {
2630 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2631 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2635 } /* if hardwired */
2637 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2639 REG_WR(bp, hc_addr, asserted);
2641 /* now set back the mask */
2642 if (asserted & ATTN_NIG_FOR_FUNC) {
2643 REG_WR(bp, nig_int_mask_addr, nig_mask);
2644 bnx2x_release_phy_lock(bp);
2648 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2650 int port = BP_PORT(bp);
2652 /* mark the failure */
2653 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2654 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2655 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2656 bp->link_params.ext_phy_config);
2658 /* log the failure */
2659 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2660 " the driver to shutdown the card to prevent permanent"
2661 " damage. Please contact Dell Support for assistance\n",
2664 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2666 int port = BP_PORT(bp);
2668 u32 val, swap_val, swap_override;
2670 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2671 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2673 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2675 val = REG_RD(bp, reg_offset);
2676 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2677 REG_WR(bp, reg_offset, val);
2679 BNX2X_ERR("SPIO5 hw attention\n");
2681 /* Fan failure attention */
2682 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2683 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2684 /* Low power mode is controlled by GPIO 2 */
2685 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2686 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2687 /* The PHY reset is controlled by GPIO 1 */
2688 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2689 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2692 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2693 /* The PHY reset is controlled by GPIO 1 */
2694 /* fake the port number to cancel the swap done in
2696 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2697 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2698 port = (swap_val && swap_override) ^ 1;
2699 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2700 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2706 bnx2x_fan_failure(bp);
2709 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2710 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2711 bnx2x_acquire_phy_lock(bp);
2712 bnx2x_handle_module_detect_int(&bp->link_params);
2713 bnx2x_release_phy_lock(bp);
2716 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2718 val = REG_RD(bp, reg_offset);
2719 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2720 REG_WR(bp, reg_offset, val);
2722 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2723 (attn & HW_INTERRUT_ASSERT_SET_0));
2728 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2732 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2734 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2735 BNX2X_ERR("DB hw attention 0x%x\n", val);
2736 /* DORQ discard attention */
2738 BNX2X_ERR("FATAL error from DORQ\n");
2741 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2743 int port = BP_PORT(bp);
2746 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2747 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2749 val = REG_RD(bp, reg_offset);
2750 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2751 REG_WR(bp, reg_offset, val);
2753 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2754 (attn & HW_INTERRUT_ASSERT_SET_1));
2759 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2763 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2765 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2766 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2767 /* CFC error attention */
2769 BNX2X_ERR("FATAL error from CFC\n");
2772 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2774 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2775 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2776 /* RQ_USDMDP_FIFO_OVERFLOW */
2778 BNX2X_ERR("FATAL error from PXP\n");
2781 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2783 int port = BP_PORT(bp);
2786 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2787 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2789 val = REG_RD(bp, reg_offset);
2790 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2791 REG_WR(bp, reg_offset, val);
2793 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2794 (attn & HW_INTERRUT_ASSERT_SET_2));
2799 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2803 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2805 if (attn & BNX2X_PMF_LINK_ASSERT) {
2806 int func = BP_FUNC(bp);
2808 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2809 bnx2x__link_status_update(bp);
2810 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2812 bnx2x_pmf_update(bp);
2814 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2816 BNX2X_ERR("MC assert!\n");
2817 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2818 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2819 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2820 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2823 } else if (attn & BNX2X_MCP_ASSERT) {
2825 BNX2X_ERR("MCP assert!\n");
2826 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2830 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2833 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2834 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2835 if (attn & BNX2X_GRC_TIMEOUT) {
2836 val = CHIP_IS_E1H(bp) ?
2837 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2838 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2840 if (attn & BNX2X_GRC_RSV) {
2841 val = CHIP_IS_E1H(bp) ?
2842 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2843 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2845 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2849 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2851 struct attn_route attn;
2852 struct attn_route group_mask;
2853 int port = BP_PORT(bp);
2859 /* need to take HW lock because MCP or other port might also
2860 try to handle this event */
2861 bnx2x_acquire_alr(bp);
2863 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2864 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2865 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2866 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2867 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2868 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2870 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2871 if (deasserted & (1 << index)) {
2872 group_mask = bp->attn_group[index];
2874 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2875 index, group_mask.sig[0], group_mask.sig[1],
2876 group_mask.sig[2], group_mask.sig[3]);
2878 bnx2x_attn_int_deasserted3(bp,
2879 attn.sig[3] & group_mask.sig[3]);
2880 bnx2x_attn_int_deasserted1(bp,
2881 attn.sig[1] & group_mask.sig[1]);
2882 bnx2x_attn_int_deasserted2(bp,
2883 attn.sig[2] & group_mask.sig[2]);
2884 bnx2x_attn_int_deasserted0(bp,
2885 attn.sig[0] & group_mask.sig[0]);
2887 if ((attn.sig[0] & group_mask.sig[0] &
2888 HW_PRTY_ASSERT_SET_0) ||
2889 (attn.sig[1] & group_mask.sig[1] &
2890 HW_PRTY_ASSERT_SET_1) ||
2891 (attn.sig[2] & group_mask.sig[2] &
2892 HW_PRTY_ASSERT_SET_2))
2893 BNX2X_ERR("FATAL HW block parity attention\n");
2897 bnx2x_release_alr(bp);
2899 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2902 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2904 REG_WR(bp, reg_addr, val);
2906 if (~bp->attn_state & deasserted)
2907 BNX2X_ERR("IGU ERROR\n");
2909 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2910 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2912 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2913 aeu_mask = REG_RD(bp, reg_addr);
2915 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2916 aeu_mask, deasserted);
2917 aeu_mask |= (deasserted & 0xff);
2918 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2920 REG_WR(bp, reg_addr, aeu_mask);
2921 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2923 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2924 bp->attn_state &= ~deasserted;
2925 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2928 static void bnx2x_attn_int(struct bnx2x *bp)
2930 /* read local copy of bits */
2931 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2933 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2935 u32 attn_state = bp->attn_state;
2937 /* look for changed bits */
2938 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2939 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2942 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2943 attn_bits, attn_ack, asserted, deasserted);
2945 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2946 BNX2X_ERR("BAD attention state\n");
2948 /* handle bits that were raised */
2950 bnx2x_attn_int_asserted(bp, asserted);
2953 bnx2x_attn_int_deasserted(bp, deasserted);
2956 static void bnx2x_sp_task(struct work_struct *work)
2958 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2962 /* Return here if interrupt is disabled */
2963 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2964 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2968 status = bnx2x_update_dsb_idx(bp);
2969 /* if (status == 0) */
2970 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2972 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2978 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2980 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2982 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2984 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2986 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2991 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2993 struct net_device *dev = dev_instance;
2994 struct bnx2x *bp = netdev_priv(dev);
2996 /* Return here if interrupt is disabled */
2997 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2998 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
3002 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
3004 #ifdef BNX2X_STOP_ON_ERROR
3005 if (unlikely(bp->panic))
3009 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
3014 /* end of slow path */
3018 /****************************************************************************
3020 ****************************************************************************/
3022 /* sum[hi:lo] += add[hi:lo] */
3023 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
3026 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3029 /* difference = minuend - subtrahend */
3030 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3032 if (m_lo < s_lo) { \
3034 d_hi = m_hi - s_hi; \
3036 /* we can 'loan' 1 */ \
3038 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3040 /* m_hi <= s_hi */ \
3045 /* m_lo >= s_lo */ \
3046 if (m_hi < s_hi) { \
3050 /* m_hi >= s_hi */ \
3051 d_hi = m_hi - s_hi; \
3052 d_lo = m_lo - s_lo; \
3057 #define UPDATE_STAT64(s, t) \
3059 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3060 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3061 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3062 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3063 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3064 pstats->mac_stx[1].t##_lo, diff.lo); \
3067 #define UPDATE_STAT64_NIG(s, t) \
3069 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3070 diff.lo, new->s##_lo, old->s##_lo); \
3071 ADD_64(estats->t##_hi, diff.hi, \
3072 estats->t##_lo, diff.lo); \
3075 /* sum[hi:lo] += add */
3076 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3079 s_hi += (s_lo < a) ? 1 : 0; \
3082 #define UPDATE_EXTEND_STAT(s) \
3084 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3085 pstats->mac_stx[1].s##_lo, \
3089 #define UPDATE_EXTEND_TSTAT(s, t) \
3091 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3092 old_tclient->s = tclient->s; \
3093 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3096 #define UPDATE_EXTEND_USTAT(s, t) \
3098 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3099 old_uclient->s = uclient->s; \
3100 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3103 #define UPDATE_EXTEND_XSTAT(s, t) \
3105 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3106 old_xclient->s = xclient->s; \
3107 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3110 /* minuend -= subtrahend */
3111 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3113 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3116 /* minuend[hi:lo] -= subtrahend */
3117 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3119 SUB_64(m_hi, 0, m_lo, s); \
3122 #define SUB_EXTEND_USTAT(s, t) \
3124 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3125 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3129 * General service functions
3132 static inline long bnx2x_hilo(u32 *hiref)
3134 u32 lo = *(hiref + 1);
3135 #if (BITS_PER_LONG == 64)
3138 return HILO_U64(hi, lo);
3145 * Init service functions
3148 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3150 if (!bp->stats_pending) {
3151 struct eth_query_ramrod_data ramrod_data = {0};
3154 ramrod_data.drv_counter = bp->stats_counter++;
3155 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3156 for_each_queue(bp, i)
3157 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3159 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3160 ((u32 *)&ramrod_data)[1],
3161 ((u32 *)&ramrod_data)[0], 0);
3163 /* stats ramrod has it's own slot on the spq */
3165 bp->stats_pending = 1;
3170 static void bnx2x_stats_init(struct bnx2x *bp)
3172 int port = BP_PORT(bp);
3175 bp->stats_pending = 0;
3176 bp->executer_idx = 0;
3177 bp->stats_counter = 0;
3181 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3183 bp->port.port_stx = 0;
3184 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3186 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3187 bp->port.old_nig_stats.brb_discard =
3188 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3189 bp->port.old_nig_stats.brb_truncate =
3190 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3191 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3192 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3193 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3194 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3196 /* function stats */
3197 for_each_queue(bp, i) {
3198 struct bnx2x_fastpath *fp = &bp->fp[i];
3200 memset(&fp->old_tclient, 0,
3201 sizeof(struct tstorm_per_client_stats));
3202 memset(&fp->old_uclient, 0,
3203 sizeof(struct ustorm_per_client_stats));
3204 memset(&fp->old_xclient, 0,
3205 sizeof(struct xstorm_per_client_stats));
3206 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3209 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3210 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3212 bp->stats_state = STATS_STATE_DISABLED;
3213 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3214 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3217 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3219 struct dmae_command *dmae = &bp->stats_dmae;
3220 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3222 *stats_comp = DMAE_COMP_VAL;
3223 if (CHIP_REV_IS_SLOW(bp))
3227 if (bp->executer_idx) {
3228 int loader_idx = PMF_DMAE_C(bp);
3230 memset(dmae, 0, sizeof(struct dmae_command));
3232 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3233 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3234 DMAE_CMD_DST_RESET |
3236 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3238 DMAE_CMD_ENDIANITY_DW_SWAP |
3240 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3242 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3243 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3244 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3245 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3246 sizeof(struct dmae_command) *
3247 (loader_idx + 1)) >> 2;
3248 dmae->dst_addr_hi = 0;
3249 dmae->len = sizeof(struct dmae_command) >> 2;
3252 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3253 dmae->comp_addr_hi = 0;
3257 bnx2x_post_dmae(bp, dmae, loader_idx);
3259 } else if (bp->func_stx) {
3261 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3265 static int bnx2x_stats_comp(struct bnx2x *bp)
3267 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3271 while (*stats_comp != DMAE_COMP_VAL) {
3273 BNX2X_ERR("timeout waiting for stats finished\n");
3283 * Statistics service functions
3286 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3288 struct dmae_command *dmae;
3290 int loader_idx = PMF_DMAE_C(bp);
3291 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3294 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3295 BNX2X_ERR("BUG!\n");
3299 bp->executer_idx = 0;
3301 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3303 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3305 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3307 DMAE_CMD_ENDIANITY_DW_SWAP |
3309 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3310 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3312 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3313 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3314 dmae->src_addr_lo = bp->port.port_stx >> 2;
3315 dmae->src_addr_hi = 0;
3316 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3317 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3318 dmae->len = DMAE_LEN32_RD_MAX;
3319 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3320 dmae->comp_addr_hi = 0;
3323 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3324 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3325 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3326 dmae->src_addr_hi = 0;
3327 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3328 DMAE_LEN32_RD_MAX * 4);
3329 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3330 DMAE_LEN32_RD_MAX * 4);
3331 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3332 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3333 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3334 dmae->comp_val = DMAE_COMP_VAL;
3337 bnx2x_hw_stats_post(bp);
3338 bnx2x_stats_comp(bp);
3341 static void bnx2x_port_stats_init(struct bnx2x *bp)
3343 struct dmae_command *dmae;
3344 int port = BP_PORT(bp);
3345 int vn = BP_E1HVN(bp);
3347 int loader_idx = PMF_DMAE_C(bp);
3349 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3352 if (!bp->link_vars.link_up || !bp->port.pmf) {
3353 BNX2X_ERR("BUG!\n");
3357 bp->executer_idx = 0;
3360 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3361 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3362 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3364 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3366 DMAE_CMD_ENDIANITY_DW_SWAP |
3368 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3369 (vn << DMAE_CMD_E1HVN_SHIFT));
3371 if (bp->port.port_stx) {
3373 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3374 dmae->opcode = opcode;
3375 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3376 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3377 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3378 dmae->dst_addr_hi = 0;
3379 dmae->len = sizeof(struct host_port_stats) >> 2;
3380 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3381 dmae->comp_addr_hi = 0;
3387 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3388 dmae->opcode = opcode;
3389 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3390 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3391 dmae->dst_addr_lo = bp->func_stx >> 2;
3392 dmae->dst_addr_hi = 0;
3393 dmae->len = sizeof(struct host_func_stats) >> 2;
3394 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3395 dmae->comp_addr_hi = 0;
3400 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3401 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3402 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3404 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3406 DMAE_CMD_ENDIANITY_DW_SWAP |
3408 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3409 (vn << DMAE_CMD_E1HVN_SHIFT));
3411 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3413 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3414 NIG_REG_INGRESS_BMAC0_MEM);
3416 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3417 BIGMAC_REGISTER_TX_STAT_GTBYT */
3418 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3419 dmae->opcode = opcode;
3420 dmae->src_addr_lo = (mac_addr +
3421 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3422 dmae->src_addr_hi = 0;
3423 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3424 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3425 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3426 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3427 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3428 dmae->comp_addr_hi = 0;
3431 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3432 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3433 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3434 dmae->opcode = opcode;
3435 dmae->src_addr_lo = (mac_addr +
3436 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3437 dmae->src_addr_hi = 0;
3438 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3439 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3440 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3441 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3442 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3443 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3444 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3445 dmae->comp_addr_hi = 0;
3448 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3450 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3452 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3453 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3454 dmae->opcode = opcode;
3455 dmae->src_addr_lo = (mac_addr +
3456 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3457 dmae->src_addr_hi = 0;
3458 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3459 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3460 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3461 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3462 dmae->comp_addr_hi = 0;
3465 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3466 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3467 dmae->opcode = opcode;
3468 dmae->src_addr_lo = (mac_addr +
3469 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3470 dmae->src_addr_hi = 0;
3471 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3472 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3473 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3474 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3476 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3477 dmae->comp_addr_hi = 0;
3480 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3481 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3482 dmae->opcode = opcode;
3483 dmae->src_addr_lo = (mac_addr +
3484 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3485 dmae->src_addr_hi = 0;
3486 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3487 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3488 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3489 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3490 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3491 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3492 dmae->comp_addr_hi = 0;
3497 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3498 dmae->opcode = opcode;
3499 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3500 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3501 dmae->src_addr_hi = 0;
3502 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3503 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3504 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3505 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3506 dmae->comp_addr_hi = 0;
3509 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3510 dmae->opcode = opcode;
3511 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3512 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3513 dmae->src_addr_hi = 0;
3514 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3515 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3516 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3517 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3518 dmae->len = (2*sizeof(u32)) >> 2;
3519 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3520 dmae->comp_addr_hi = 0;
3523 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3524 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3525 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3526 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3528 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3530 DMAE_CMD_ENDIANITY_DW_SWAP |
3532 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3533 (vn << DMAE_CMD_E1HVN_SHIFT));
3534 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3535 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3536 dmae->src_addr_hi = 0;
3537 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3538 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3539 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3540 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3541 dmae->len = (2*sizeof(u32)) >> 2;
3542 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3543 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3544 dmae->comp_val = DMAE_COMP_VAL;
3549 static void bnx2x_func_stats_init(struct bnx2x *bp)
3551 struct dmae_command *dmae = &bp->stats_dmae;
3552 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3555 if (!bp->func_stx) {
3556 BNX2X_ERR("BUG!\n");
3560 bp->executer_idx = 0;
3561 memset(dmae, 0, sizeof(struct dmae_command));
3563 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3564 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3565 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3567 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3569 DMAE_CMD_ENDIANITY_DW_SWAP |
3571 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3572 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3573 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3574 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3575 dmae->dst_addr_lo = bp->func_stx >> 2;
3576 dmae->dst_addr_hi = 0;
3577 dmae->len = sizeof(struct host_func_stats) >> 2;
3578 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3579 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3580 dmae->comp_val = DMAE_COMP_VAL;
3585 static void bnx2x_stats_start(struct bnx2x *bp)
3588 bnx2x_port_stats_init(bp);
3590 else if (bp->func_stx)
3591 bnx2x_func_stats_init(bp);
3593 bnx2x_hw_stats_post(bp);
3594 bnx2x_storm_stats_post(bp);
3597 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3599 bnx2x_stats_comp(bp);
3600 bnx2x_stats_pmf_update(bp);
3601 bnx2x_stats_start(bp);
3604 static void bnx2x_stats_restart(struct bnx2x *bp)
3606 bnx2x_stats_comp(bp);
3607 bnx2x_stats_start(bp);
3610 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3612 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3613 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3614 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3620 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3621 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3622 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3623 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3624 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3625 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3626 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3627 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3628 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3629 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3630 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3631 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3632 UPDATE_STAT64(tx_stat_gt127,
3633 tx_stat_etherstatspkts65octetsto127octets);
3634 UPDATE_STAT64(tx_stat_gt255,
3635 tx_stat_etherstatspkts128octetsto255octets);
3636 UPDATE_STAT64(tx_stat_gt511,
3637 tx_stat_etherstatspkts256octetsto511octets);
3638 UPDATE_STAT64(tx_stat_gt1023,
3639 tx_stat_etherstatspkts512octetsto1023octets);
3640 UPDATE_STAT64(tx_stat_gt1518,
3641 tx_stat_etherstatspkts1024octetsto1522octets);
3642 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3643 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3644 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3645 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3646 UPDATE_STAT64(tx_stat_gterr,
3647 tx_stat_dot3statsinternalmactransmiterrors);
3648 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3650 estats->pause_frames_received_hi =
3651 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3652 estats->pause_frames_received_lo =
3653 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3655 estats->pause_frames_sent_hi =
3656 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3657 estats->pause_frames_sent_lo =
3658 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3661 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3663 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3664 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3665 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3667 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3668 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3669 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3670 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3671 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3672 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3673 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3674 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3675 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3676 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3677 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3678 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3679 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3680 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3681 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3682 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3683 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3684 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3685 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3686 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3687 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3688 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3689 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3690 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3691 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3692 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3693 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3694 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3695 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3696 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3697 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3699 estats->pause_frames_received_hi =
3700 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3701 estats->pause_frames_received_lo =
3702 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3703 ADD_64(estats->pause_frames_received_hi,
3704 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3705 estats->pause_frames_received_lo,
3706 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3708 estats->pause_frames_sent_hi =
3709 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3710 estats->pause_frames_sent_lo =
3711 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3712 ADD_64(estats->pause_frames_sent_hi,
3713 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3714 estats->pause_frames_sent_lo,
3715 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3718 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3720 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3721 struct nig_stats *old = &(bp->port.old_nig_stats);
3722 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3723 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3730 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3731 bnx2x_bmac_stats_update(bp);
3733 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3734 bnx2x_emac_stats_update(bp);
3736 else { /* unreached */
3737 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3741 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3742 new->brb_discard - old->brb_discard);
3743 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3744 new->brb_truncate - old->brb_truncate);
3746 UPDATE_STAT64_NIG(egress_mac_pkt0,
3747 etherstatspkts1024octetsto1522octets);
3748 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3750 memcpy(old, new, sizeof(struct nig_stats));
3752 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3753 sizeof(struct mac_stx));
3754 estats->brb_drop_hi = pstats->brb_drop_hi;
3755 estats->brb_drop_lo = pstats->brb_drop_lo;
3757 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3759 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3760 if (nig_timer_max != estats->nig_timer_max) {
3761 estats->nig_timer_max = nig_timer_max;
3762 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3768 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3770 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3771 struct tstorm_per_port_stats *tport =
3772 &stats->tstorm_common.port_statistics;
3773 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3774 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3777 memset(&(fstats->total_bytes_received_hi), 0,
3778 sizeof(struct host_func_stats) - 2*sizeof(u32));
3779 estats->error_bytes_received_hi = 0;
3780 estats->error_bytes_received_lo = 0;
3781 estats->etherstatsoverrsizepkts_hi = 0;
3782 estats->etherstatsoverrsizepkts_lo = 0;
3783 estats->no_buff_discard_hi = 0;
3784 estats->no_buff_discard_lo = 0;
3786 for_each_rx_queue(bp, i) {
3787 struct bnx2x_fastpath *fp = &bp->fp[i];
3788 int cl_id = fp->cl_id;
3789 struct tstorm_per_client_stats *tclient =
3790 &stats->tstorm_common.client_statistics[cl_id];
3791 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3792 struct ustorm_per_client_stats *uclient =
3793 &stats->ustorm_common.client_statistics[cl_id];
3794 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3795 struct xstorm_per_client_stats *xclient =
3796 &stats->xstorm_common.client_statistics[cl_id];
3797 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3798 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3801 /* are storm stats valid? */
3802 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3803 bp->stats_counter) {
3804 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3805 " xstorm counter (%d) != stats_counter (%d)\n",
3806 i, xclient->stats_counter, bp->stats_counter);
3809 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3810 bp->stats_counter) {
3811 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3812 " tstorm counter (%d) != stats_counter (%d)\n",
3813 i, tclient->stats_counter, bp->stats_counter);
3816 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3817 bp->stats_counter) {
3818 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3819 " ustorm counter (%d) != stats_counter (%d)\n",
3820 i, uclient->stats_counter, bp->stats_counter);
3824 qstats->total_bytes_received_hi =
3825 le32_to_cpu(tclient->rcv_broadcast_bytes.hi);
3826 qstats->total_bytes_received_lo =
3827 le32_to_cpu(tclient->rcv_broadcast_bytes.lo);
3829 ADD_64(qstats->total_bytes_received_hi,
3830 le32_to_cpu(tclient->rcv_multicast_bytes.hi),
3831 qstats->total_bytes_received_lo,
3832 le32_to_cpu(tclient->rcv_multicast_bytes.lo));
3834 ADD_64(qstats->total_bytes_received_hi,
3835 le32_to_cpu(tclient->rcv_unicast_bytes.hi),
3836 qstats->total_bytes_received_lo,
3837 le32_to_cpu(tclient->rcv_unicast_bytes.lo));
3839 qstats->valid_bytes_received_hi =
3840 qstats->total_bytes_received_hi;
3841 qstats->valid_bytes_received_lo =
3842 qstats->total_bytes_received_lo;
3844 qstats->error_bytes_received_hi =
3845 le32_to_cpu(tclient->rcv_error_bytes.hi);
3846 qstats->error_bytes_received_lo =
3847 le32_to_cpu(tclient->rcv_error_bytes.lo);
3849 ADD_64(qstats->total_bytes_received_hi,
3850 qstats->error_bytes_received_hi,
3851 qstats->total_bytes_received_lo,
3852 qstats->error_bytes_received_lo);
3854 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3855 total_unicast_packets_received);
3856 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3857 total_multicast_packets_received);
3858 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3859 total_broadcast_packets_received);
3860 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3861 etherstatsoverrsizepkts);
3862 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3864 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3865 total_unicast_packets_received);
3866 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3867 total_multicast_packets_received);
3868 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3869 total_broadcast_packets_received);
3870 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3871 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3872 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3874 qstats->total_bytes_transmitted_hi =
3875 le32_to_cpu(xclient->unicast_bytes_sent.hi);
3876 qstats->total_bytes_transmitted_lo =
3877 le32_to_cpu(xclient->unicast_bytes_sent.lo);
3879 ADD_64(qstats->total_bytes_transmitted_hi,
3880 le32_to_cpu(xclient->multicast_bytes_sent.hi),
3881 qstats->total_bytes_transmitted_lo,
3882 le32_to_cpu(xclient->multicast_bytes_sent.lo));
3884 ADD_64(qstats->total_bytes_transmitted_hi,
3885 le32_to_cpu(xclient->broadcast_bytes_sent.hi),
3886 qstats->total_bytes_transmitted_lo,
3887 le32_to_cpu(xclient->broadcast_bytes_sent.lo));
3889 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3890 total_unicast_packets_transmitted);
3891 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3892 total_multicast_packets_transmitted);
3893 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3894 total_broadcast_packets_transmitted);
3896 old_tclient->checksum_discard = tclient->checksum_discard;
3897 old_tclient->ttl0_discard = tclient->ttl0_discard;
3899 ADD_64(fstats->total_bytes_received_hi,
3900 qstats->total_bytes_received_hi,
3901 fstats->total_bytes_received_lo,
3902 qstats->total_bytes_received_lo);
3903 ADD_64(fstats->total_bytes_transmitted_hi,
3904 qstats->total_bytes_transmitted_hi,
3905 fstats->total_bytes_transmitted_lo,
3906 qstats->total_bytes_transmitted_lo);
3907 ADD_64(fstats->total_unicast_packets_received_hi,
3908 qstats->total_unicast_packets_received_hi,
3909 fstats->total_unicast_packets_received_lo,
3910 qstats->total_unicast_packets_received_lo);
3911 ADD_64(fstats->total_multicast_packets_received_hi,
3912 qstats->total_multicast_packets_received_hi,
3913 fstats->total_multicast_packets_received_lo,
3914 qstats->total_multicast_packets_received_lo);
3915 ADD_64(fstats->total_broadcast_packets_received_hi,
3916 qstats->total_broadcast_packets_received_hi,
3917 fstats->total_broadcast_packets_received_lo,
3918 qstats->total_broadcast_packets_received_lo);
3919 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3920 qstats->total_unicast_packets_transmitted_hi,
3921 fstats->total_unicast_packets_transmitted_lo,
3922 qstats->total_unicast_packets_transmitted_lo);
3923 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3924 qstats->total_multicast_packets_transmitted_hi,
3925 fstats->total_multicast_packets_transmitted_lo,
3926 qstats->total_multicast_packets_transmitted_lo);
3927 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3928 qstats->total_broadcast_packets_transmitted_hi,
3929 fstats->total_broadcast_packets_transmitted_lo,
3930 qstats->total_broadcast_packets_transmitted_lo);
3931 ADD_64(fstats->valid_bytes_received_hi,
3932 qstats->valid_bytes_received_hi,
3933 fstats->valid_bytes_received_lo,
3934 qstats->valid_bytes_received_lo);
3936 ADD_64(estats->error_bytes_received_hi,
3937 qstats->error_bytes_received_hi,
3938 estats->error_bytes_received_lo,
3939 qstats->error_bytes_received_lo);
3940 ADD_64(estats->etherstatsoverrsizepkts_hi,
3941 qstats->etherstatsoverrsizepkts_hi,
3942 estats->etherstatsoverrsizepkts_lo,
3943 qstats->etherstatsoverrsizepkts_lo);
3944 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3945 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3948 ADD_64(fstats->total_bytes_received_hi,
3949 estats->rx_stat_ifhcinbadoctets_hi,
3950 fstats->total_bytes_received_lo,
3951 estats->rx_stat_ifhcinbadoctets_lo);
3953 memcpy(estats, &(fstats->total_bytes_received_hi),
3954 sizeof(struct host_func_stats) - 2*sizeof(u32));
3956 ADD_64(estats->etherstatsoverrsizepkts_hi,
3957 estats->rx_stat_dot3statsframestoolong_hi,
3958 estats->etherstatsoverrsizepkts_lo,
3959 estats->rx_stat_dot3statsframestoolong_lo);
3960 ADD_64(estats->error_bytes_received_hi,
3961 estats->rx_stat_ifhcinbadoctets_hi,
3962 estats->error_bytes_received_lo,
3963 estats->rx_stat_ifhcinbadoctets_lo);
3966 estats->mac_filter_discard =
3967 le32_to_cpu(tport->mac_filter_discard);
3968 estats->xxoverflow_discard =
3969 le32_to_cpu(tport->xxoverflow_discard);
3970 estats->brb_truncate_discard =
3971 le32_to_cpu(tport->brb_truncate_discard);
3972 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3975 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3977 bp->stats_pending = 0;
3982 static void bnx2x_net_stats_update(struct bnx2x *bp)
3984 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3985 struct net_device_stats *nstats = &bp->dev->stats;
3988 nstats->rx_packets =
3989 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3990 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3991 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3993 nstats->tx_packets =
3994 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3995 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3996 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3998 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
4000 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
4002 nstats->rx_dropped = estats->mac_discard;
4003 for_each_rx_queue(bp, i)
4004 nstats->rx_dropped +=
4005 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
4007 nstats->tx_dropped = 0;
4010 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
4012 nstats->collisions =
4013 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
4015 nstats->rx_length_errors =
4016 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
4017 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
4018 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
4019 bnx2x_hilo(&estats->brb_truncate_hi);
4020 nstats->rx_crc_errors =
4021 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
4022 nstats->rx_frame_errors =
4023 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
4024 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
4025 nstats->rx_missed_errors = estats->xxoverflow_discard;
4027 nstats->rx_errors = nstats->rx_length_errors +
4028 nstats->rx_over_errors +
4029 nstats->rx_crc_errors +
4030 nstats->rx_frame_errors +
4031 nstats->rx_fifo_errors +
4032 nstats->rx_missed_errors;
4034 nstats->tx_aborted_errors =
4035 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
4036 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
4037 nstats->tx_carrier_errors =
4038 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
4039 nstats->tx_fifo_errors = 0;
4040 nstats->tx_heartbeat_errors = 0;
4041 nstats->tx_window_errors = 0;
4043 nstats->tx_errors = nstats->tx_aborted_errors +
4044 nstats->tx_carrier_errors +
4045 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
4048 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4050 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4053 estats->driver_xoff = 0;
4054 estats->rx_err_discard_pkt = 0;
4055 estats->rx_skb_alloc_failed = 0;
4056 estats->hw_csum_err = 0;
4057 for_each_rx_queue(bp, i) {
4058 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4060 estats->driver_xoff += qstats->driver_xoff;
4061 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4062 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4063 estats->hw_csum_err += qstats->hw_csum_err;
4067 static void bnx2x_stats_update(struct bnx2x *bp)
4069 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4071 if (*stats_comp != DMAE_COMP_VAL)
4075 bnx2x_hw_stats_update(bp);
4077 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4078 BNX2X_ERR("storm stats were not updated for 3 times\n");
4083 bnx2x_net_stats_update(bp);
4084 bnx2x_drv_stats_update(bp);
4086 if (bp->msglevel & NETIF_MSG_TIMER) {
4087 struct bnx2x_fastpath *fp0_rx = bp->fp;
4088 struct bnx2x_fastpath *fp0_tx = &(bp->fp[bp->num_rx_queues]);
4089 struct tstorm_per_client_stats *old_tclient =
4090 &bp->fp->old_tclient;
4091 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4092 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4093 struct net_device_stats *nstats = &bp->dev->stats;
4096 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4097 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4099 bnx2x_tx_avail(fp0_tx),
4100 le16_to_cpu(*fp0_tx->tx_cons_sb), nstats->tx_packets);
4101 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4103 (u16)(le16_to_cpu(*fp0_rx->rx_cons_sb) -
4104 fp0_rx->rx_comp_cons),
4105 le16_to_cpu(*fp0_rx->rx_cons_sb), nstats->rx_packets);
4106 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4107 "brb truncate %u\n",
4108 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4109 qstats->driver_xoff,
4110 estats->brb_drop_lo, estats->brb_truncate_lo);
4111 printk(KERN_DEBUG "tstats: checksum_discard %u "
4112 "packets_too_big_discard %lu no_buff_discard %lu "
4113 "mac_discard %u mac_filter_discard %u "
4114 "xxovrflow_discard %u brb_truncate_discard %u "
4115 "ttl0_discard %u\n",
4116 le32_to_cpu(old_tclient->checksum_discard),
4117 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4118 bnx2x_hilo(&qstats->no_buff_discard_hi),
4119 estats->mac_discard, estats->mac_filter_discard,
4120 estats->xxoverflow_discard, estats->brb_truncate_discard,
4121 le32_to_cpu(old_tclient->ttl0_discard));
4123 for_each_queue(bp, i) {
4124 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4125 bnx2x_fp(bp, i, tx_pkt),
4126 bnx2x_fp(bp, i, rx_pkt),
4127 bnx2x_fp(bp, i, rx_calls));
4131 bnx2x_hw_stats_post(bp);
4132 bnx2x_storm_stats_post(bp);
4135 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4137 struct dmae_command *dmae;
4139 int loader_idx = PMF_DMAE_C(bp);
4140 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4142 bp->executer_idx = 0;
4144 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4146 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4148 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4150 DMAE_CMD_ENDIANITY_DW_SWAP |
4152 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4153 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4155 if (bp->port.port_stx) {
4157 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4159 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4161 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4162 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4163 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4164 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4165 dmae->dst_addr_hi = 0;
4166 dmae->len = sizeof(struct host_port_stats) >> 2;
4168 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4169 dmae->comp_addr_hi = 0;
4172 dmae->comp_addr_lo =
4173 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4174 dmae->comp_addr_hi =
4175 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4176 dmae->comp_val = DMAE_COMP_VAL;
4184 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4185 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4186 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4187 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4188 dmae->dst_addr_lo = bp->func_stx >> 2;
4189 dmae->dst_addr_hi = 0;
4190 dmae->len = sizeof(struct host_func_stats) >> 2;
4191 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4192 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4193 dmae->comp_val = DMAE_COMP_VAL;
4199 static void bnx2x_stats_stop(struct bnx2x *bp)
4203 bnx2x_stats_comp(bp);
4206 update = (bnx2x_hw_stats_update(bp) == 0);
4208 update |= (bnx2x_storm_stats_update(bp) == 0);
4211 bnx2x_net_stats_update(bp);
4214 bnx2x_port_stats_stop(bp);
4216 bnx2x_hw_stats_post(bp);
4217 bnx2x_stats_comp(bp);
4221 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4225 static const struct {
4226 void (*action)(struct bnx2x *bp);
4227 enum bnx2x_stats_state next_state;
4228 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4231 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4232 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4233 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4234 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4237 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4238 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4239 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4240 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4244 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4246 enum bnx2x_stats_state state = bp->stats_state;
4248 bnx2x_stats_stm[state][event].action(bp);
4249 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4251 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4252 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4253 state, event, bp->stats_state);
4256 static void bnx2x_timer(unsigned long data)
4258 struct bnx2x *bp = (struct bnx2x *) data;
4260 if (!netif_running(bp->dev))
4263 if (atomic_read(&bp->intr_sem) != 0)
4267 struct bnx2x_fastpath *fp = &bp->fp[0];
4271 rc = bnx2x_rx_int(fp, 1000);
4274 if (!BP_NOMCP(bp)) {
4275 int func = BP_FUNC(bp);
4279 ++bp->fw_drv_pulse_wr_seq;
4280 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4281 /* TBD - add SYSTEM_TIME */
4282 drv_pulse = bp->fw_drv_pulse_wr_seq;
4283 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4285 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4286 MCP_PULSE_SEQ_MASK);
4287 /* The delta between driver pulse and mcp response
4288 * should be 1 (before mcp response) or 0 (after mcp response)
4290 if ((drv_pulse != mcp_pulse) &&
4291 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4292 /* someone lost a heartbeat... */
4293 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4294 drv_pulse, mcp_pulse);
4298 if ((bp->state == BNX2X_STATE_OPEN) ||
4299 (bp->state == BNX2X_STATE_DISABLED))
4300 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4303 mod_timer(&bp->timer, jiffies + bp->current_interval);
4306 /* end of Statistics */
4311 * nic init service functions
4314 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4316 int port = BP_PORT(bp);
4319 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4320 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), 0,
4321 CSTORM_SB_STATUS_BLOCK_U_SIZE / 4);
4322 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4323 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), 0,
4324 CSTORM_SB_STATUS_BLOCK_C_SIZE / 4);
4327 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4328 dma_addr_t mapping, int sb_id)
4330 int port = BP_PORT(bp);
4331 int func = BP_FUNC(bp);
4336 section = ((u64)mapping) + offsetof(struct host_status_block,
4338 sb->u_status_block.status_block_id = sb_id;
4340 REG_WR(bp, BAR_CSTRORM_INTMEM +
4341 CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id), U64_LO(section));
4342 REG_WR(bp, BAR_CSTRORM_INTMEM +
4343 ((CSTORM_SB_HOST_SB_ADDR_U_OFFSET(port, sb_id)) + 4),
4345 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_USB_FUNC_OFF +
4346 CSTORM_SB_HOST_STATUS_BLOCK_U_OFFSET(port, sb_id), func);
4348 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4349 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4350 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id, index), 1);
4353 section = ((u64)mapping) + offsetof(struct host_status_block,
4355 sb->c_status_block.status_block_id = sb_id;
4357 REG_WR(bp, BAR_CSTRORM_INTMEM +
4358 CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id), U64_LO(section));
4359 REG_WR(bp, BAR_CSTRORM_INTMEM +
4360 ((CSTORM_SB_HOST_SB_ADDR_C_OFFSET(port, sb_id)) + 4),
4362 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4363 CSTORM_SB_HOST_STATUS_BLOCK_C_OFFSET(port, sb_id), func);
4365 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4366 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4367 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id, index), 1);
4369 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4372 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4374 int func = BP_FUNC(bp);
4376 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY +
4377 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4378 sizeof(struct tstorm_def_status_block)/4);
4379 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4380 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), 0,
4381 sizeof(struct cstorm_def_status_block_u)/4);
4382 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY +
4383 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), 0,
4384 sizeof(struct cstorm_def_status_block_c)/4);
4385 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY +
4386 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4387 sizeof(struct xstorm_def_status_block)/4);
4390 static void bnx2x_init_def_sb(struct bnx2x *bp,
4391 struct host_def_status_block *def_sb,
4392 dma_addr_t mapping, int sb_id)
4394 int port = BP_PORT(bp);
4395 int func = BP_FUNC(bp);
4396 int index, val, reg_offset;
4400 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4401 atten_status_block);
4402 def_sb->atten_status_block.status_block_id = sb_id;
4406 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4407 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4409 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4410 bp->attn_group[index].sig[0] = REG_RD(bp,
4411 reg_offset + 0x10*index);
4412 bp->attn_group[index].sig[1] = REG_RD(bp,
4413 reg_offset + 0x4 + 0x10*index);
4414 bp->attn_group[index].sig[2] = REG_RD(bp,
4415 reg_offset + 0x8 + 0x10*index);
4416 bp->attn_group[index].sig[3] = REG_RD(bp,
4417 reg_offset + 0xc + 0x10*index);
4420 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4421 HC_REG_ATTN_MSG0_ADDR_L);
4423 REG_WR(bp, reg_offset, U64_LO(section));
4424 REG_WR(bp, reg_offset + 4, U64_HI(section));
4426 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4428 val = REG_RD(bp, reg_offset);
4430 REG_WR(bp, reg_offset, val);
4433 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4434 u_def_status_block);
4435 def_sb->u_def_status_block.status_block_id = sb_id;
4437 REG_WR(bp, BAR_CSTRORM_INTMEM +
4438 CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func), U64_LO(section));
4439 REG_WR(bp, BAR_CSTRORM_INTMEM +
4440 ((CSTORM_DEF_SB_HOST_SB_ADDR_U_OFFSET(func)) + 4),
4442 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_USB_FUNC_OFF +
4443 CSTORM_DEF_SB_HOST_STATUS_BLOCK_U_OFFSET(func), func);
4445 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4446 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4447 CSTORM_DEF_SB_HC_DISABLE_U_OFFSET(func, index), 1);
4450 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4451 c_def_status_block);
4452 def_sb->c_def_status_block.status_block_id = sb_id;
4454 REG_WR(bp, BAR_CSTRORM_INTMEM +
4455 CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func), U64_LO(section));
4456 REG_WR(bp, BAR_CSTRORM_INTMEM +
4457 ((CSTORM_DEF_SB_HOST_SB_ADDR_C_OFFSET(func)) + 4),
4459 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4460 CSTORM_DEF_SB_HOST_STATUS_BLOCK_C_OFFSET(func), func);
4462 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4463 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4464 CSTORM_DEF_SB_HC_DISABLE_C_OFFSET(func, index), 1);
4467 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4468 t_def_status_block);
4469 def_sb->t_def_status_block.status_block_id = sb_id;
4471 REG_WR(bp, BAR_TSTRORM_INTMEM +
4472 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4473 REG_WR(bp, BAR_TSTRORM_INTMEM +
4474 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4476 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4477 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4479 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4480 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4481 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4484 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4485 x_def_status_block);
4486 def_sb->x_def_status_block.status_block_id = sb_id;
4488 REG_WR(bp, BAR_XSTRORM_INTMEM +
4489 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4490 REG_WR(bp, BAR_XSTRORM_INTMEM +
4491 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4493 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4494 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4496 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4497 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4498 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4500 bp->stats_pending = 0;
4501 bp->set_mac_pending = 0;
4503 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4506 static void bnx2x_update_coalesce(struct bnx2x *bp)
4508 int port = BP_PORT(bp);
4511 for_each_queue(bp, i) {
4512 int sb_id = bp->fp[i].sb_id;
4514 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4515 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4516 CSTORM_SB_HC_TIMEOUT_U_OFFSET(port, sb_id,
4517 U_SB_ETH_RX_CQ_INDEX),
4519 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4520 CSTORM_SB_HC_DISABLE_U_OFFSET(port, sb_id,
4521 U_SB_ETH_RX_CQ_INDEX),
4522 (bp->rx_ticks/12) ? 0 : 1);
4524 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4525 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4526 CSTORM_SB_HC_TIMEOUT_C_OFFSET(port, sb_id,
4527 C_SB_ETH_TX_CQ_INDEX),
4529 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4530 CSTORM_SB_HC_DISABLE_C_OFFSET(port, sb_id,
4531 C_SB_ETH_TX_CQ_INDEX),
4532 (bp->tx_ticks/12) ? 0 : 1);
4536 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4537 struct bnx2x_fastpath *fp, int last)
4541 for (i = 0; i < last; i++) {
4542 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4543 struct sk_buff *skb = rx_buf->skb;
4546 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4550 if (fp->tpa_state[i] == BNX2X_TPA_START)
4551 pci_unmap_single(bp->pdev,
4552 pci_unmap_addr(rx_buf, mapping),
4553 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4560 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4562 int func = BP_FUNC(bp);
4563 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4564 ETH_MAX_AGGREGATION_QUEUES_E1H;
4565 u16 ring_prod, cqe_ring_prod;
4568 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4570 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4572 if (bp->flags & TPA_ENABLE_FLAG) {
4574 for_each_rx_queue(bp, j) {
4575 struct bnx2x_fastpath *fp = &bp->fp[j];
4577 for (i = 0; i < max_agg_queues; i++) {
4578 fp->tpa_pool[i].skb =
4579 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4580 if (!fp->tpa_pool[i].skb) {
4581 BNX2X_ERR("Failed to allocate TPA "
4582 "skb pool for queue[%d] - "
4583 "disabling TPA on this "
4585 bnx2x_free_tpa_pool(bp, fp, i);
4586 fp->disable_tpa = 1;
4589 pci_unmap_addr_set((struct sw_rx_bd *)
4590 &bp->fp->tpa_pool[i],
4592 fp->tpa_state[i] = BNX2X_TPA_STOP;
4597 for_each_rx_queue(bp, j) {
4598 struct bnx2x_fastpath *fp = &bp->fp[j];
4601 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4602 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4604 /* Mark queue as Rx */
4605 fp->is_rx_queue = 1;
4607 /* "next page" elements initialization */
4609 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4610 struct eth_rx_sge *sge;
4612 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4614 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4615 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4617 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4618 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4621 bnx2x_init_sge_ring_bit_mask(fp);
4624 for (i = 1; i <= NUM_RX_RINGS; i++) {
4625 struct eth_rx_bd *rx_bd;
4627 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4629 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4630 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4632 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4633 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4637 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4638 struct eth_rx_cqe_next_page *nextpg;
4640 nextpg = (struct eth_rx_cqe_next_page *)
4641 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4643 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4644 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4646 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4647 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4650 /* Allocate SGEs and initialize the ring elements */
4651 for (i = 0, ring_prod = 0;
4652 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4654 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4655 BNX2X_ERR("was only able to allocate "
4657 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4658 /* Cleanup already allocated elements */
4659 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4660 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4661 fp->disable_tpa = 1;
4665 ring_prod = NEXT_SGE_IDX(ring_prod);
4667 fp->rx_sge_prod = ring_prod;
4669 /* Allocate BDs and initialize BD ring */
4670 fp->rx_comp_cons = 0;
4671 cqe_ring_prod = ring_prod = 0;
4672 for (i = 0; i < bp->rx_ring_size; i++) {
4673 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4674 BNX2X_ERR("was only able to allocate "
4675 "%d rx skbs on queue[%d]\n", i, j);
4676 fp->eth_q_stats.rx_skb_alloc_failed++;
4679 ring_prod = NEXT_RX_IDX(ring_prod);
4680 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4681 WARN_ON(ring_prod <= i);
4684 fp->rx_bd_prod = ring_prod;
4685 /* must not have more available CQEs than BDs */
4686 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4688 fp->rx_pkt = fp->rx_calls = 0;
4691 * this will generate an interrupt (to the TSTORM)
4692 * must only be done after chip is initialized
4694 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4699 REG_WR(bp, BAR_USTRORM_INTMEM +
4700 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4701 U64_LO(fp->rx_comp_mapping));
4702 REG_WR(bp, BAR_USTRORM_INTMEM +
4703 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4704 U64_HI(fp->rx_comp_mapping));
4708 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4712 for_each_tx_queue(bp, j) {
4713 struct bnx2x_fastpath *fp = &bp->fp[j];
4715 for (i = 1; i <= NUM_TX_RINGS; i++) {
4716 struct eth_tx_next_bd *tx_next_bd =
4717 &fp->tx_desc_ring[TX_DESC_CNT * i - 1].next_bd;
4719 tx_next_bd->addr_hi =
4720 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4721 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4722 tx_next_bd->addr_lo =
4723 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4724 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4727 fp->tx_db.data.header.header = DOORBELL_HDR_DB_TYPE;
4728 fp->tx_db.data.zero_fill1 = 0;
4729 fp->tx_db.data.prod = 0;
4731 fp->tx_pkt_prod = 0;
4732 fp->tx_pkt_cons = 0;
4735 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4740 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4742 int func = BP_FUNC(bp);
4744 spin_lock_init(&bp->spq_lock);
4746 bp->spq_left = MAX_SPQ_PENDING;
4747 bp->spq_prod_idx = 0;
4748 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4749 bp->spq_prod_bd = bp->spq;
4750 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4752 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4753 U64_LO(bp->spq_mapping));
4755 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4756 U64_HI(bp->spq_mapping));
4758 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4762 static void bnx2x_init_context(struct bnx2x *bp)
4766 for_each_rx_queue(bp, i) {
4767 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4768 struct bnx2x_fastpath *fp = &bp->fp[i];
4769 u8 cl_id = fp->cl_id;
4771 context->ustorm_st_context.common.sb_index_numbers =
4772 BNX2X_RX_SB_INDEX_NUM;
4773 context->ustorm_st_context.common.clientId = cl_id;
4774 context->ustorm_st_context.common.status_block_id = fp->sb_id;
4775 context->ustorm_st_context.common.flags =
4776 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4777 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4778 context->ustorm_st_context.common.statistics_counter_id =
4780 context->ustorm_st_context.common.mc_alignment_log_size =
4781 BNX2X_RX_ALIGN_SHIFT;
4782 context->ustorm_st_context.common.bd_buff_size =
4784 context->ustorm_st_context.common.bd_page_base_hi =
4785 U64_HI(fp->rx_desc_mapping);
4786 context->ustorm_st_context.common.bd_page_base_lo =
4787 U64_LO(fp->rx_desc_mapping);
4788 if (!fp->disable_tpa) {
4789 context->ustorm_st_context.common.flags |=
4790 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA;
4791 context->ustorm_st_context.common.sge_buff_size =
4792 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4794 context->ustorm_st_context.common.sge_page_base_hi =
4795 U64_HI(fp->rx_sge_mapping);
4796 context->ustorm_st_context.common.sge_page_base_lo =
4797 U64_LO(fp->rx_sge_mapping);
4799 context->ustorm_st_context.common.max_sges_for_packet =
4800 SGE_PAGE_ALIGN(bp->dev->mtu) >> SGE_PAGE_SHIFT;
4801 context->ustorm_st_context.common.max_sges_for_packet =
4802 ((context->ustorm_st_context.common.
4803 max_sges_for_packet + PAGES_PER_SGE - 1) &
4804 (~(PAGES_PER_SGE - 1))) >> PAGES_PER_SGE_SHIFT;
4807 context->ustorm_ag_context.cdu_usage =
4808 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4809 CDU_REGION_NUMBER_UCM_AG,
4810 ETH_CONNECTION_TYPE);
4812 context->xstorm_ag_context.cdu_reserved =
4813 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4814 CDU_REGION_NUMBER_XCM_AG,
4815 ETH_CONNECTION_TYPE);
4818 for_each_tx_queue(bp, i) {
4819 struct bnx2x_fastpath *fp = &bp->fp[i];
4820 struct eth_context *context =
4821 bnx2x_sp(bp, context[i - bp->num_rx_queues].eth);
4823 context->cstorm_st_context.sb_index_number =
4824 C_SB_ETH_TX_CQ_INDEX;
4825 context->cstorm_st_context.status_block_id = fp->sb_id;
4827 context->xstorm_st_context.tx_bd_page_base_hi =
4828 U64_HI(fp->tx_desc_mapping);
4829 context->xstorm_st_context.tx_bd_page_base_lo =
4830 U64_LO(fp->tx_desc_mapping);
4831 context->xstorm_st_context.statistics_data = (fp->cl_id |
4832 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4836 static void bnx2x_init_ind_table(struct bnx2x *bp)
4838 int func = BP_FUNC(bp);
4841 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4845 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4846 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4847 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4848 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4849 bp->fp->cl_id + (i % bp->num_rx_queues));
4852 static void bnx2x_set_client_config(struct bnx2x *bp)
4854 struct tstorm_eth_client_config tstorm_client = {0};
4855 int port = BP_PORT(bp);
4858 tstorm_client.mtu = bp->dev->mtu;
4859 tstorm_client.config_flags =
4860 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4861 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4863 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4864 tstorm_client.config_flags |=
4865 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4866 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4870 for_each_queue(bp, i) {
4871 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4873 REG_WR(bp, BAR_TSTRORM_INTMEM +
4874 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4875 ((u32 *)&tstorm_client)[0]);
4876 REG_WR(bp, BAR_TSTRORM_INTMEM +
4877 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4878 ((u32 *)&tstorm_client)[1]);
4881 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4882 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4885 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4887 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4888 int mode = bp->rx_mode;
4889 int mask = (1 << BP_L_ID(bp));
4890 int func = BP_FUNC(bp);
4891 int port = BP_PORT(bp);
4893 /* All but management unicast packets should pass to the host as well */
4895 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4896 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4897 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4898 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4900 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4903 case BNX2X_RX_MODE_NONE: /* no Rx */
4904 tstorm_mac_filter.ucast_drop_all = mask;
4905 tstorm_mac_filter.mcast_drop_all = mask;
4906 tstorm_mac_filter.bcast_drop_all = mask;
4909 case BNX2X_RX_MODE_NORMAL:
4910 tstorm_mac_filter.bcast_accept_all = mask;
4913 case BNX2X_RX_MODE_ALLMULTI:
4914 tstorm_mac_filter.mcast_accept_all = mask;
4915 tstorm_mac_filter.bcast_accept_all = mask;
4918 case BNX2X_RX_MODE_PROMISC:
4919 tstorm_mac_filter.ucast_accept_all = mask;
4920 tstorm_mac_filter.mcast_accept_all = mask;
4921 tstorm_mac_filter.bcast_accept_all = mask;
4922 /* pass management unicast packets as well */
4923 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4927 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4932 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
4935 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4936 REG_WR(bp, BAR_TSTRORM_INTMEM +
4937 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4938 ((u32 *)&tstorm_mac_filter)[i]);
4940 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4941 ((u32 *)&tstorm_mac_filter)[i]); */
4944 if (mode != BNX2X_RX_MODE_NONE)
4945 bnx2x_set_client_config(bp);
4948 static void bnx2x_init_internal_common(struct bnx2x *bp)
4952 /* Zero this manually as its initialization is
4953 currently missing in the initTool */
4954 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4955 REG_WR(bp, BAR_USTRORM_INTMEM +
4956 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4959 static void bnx2x_init_internal_port(struct bnx2x *bp)
4961 int port = BP_PORT(bp);
4964 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_U_OFFSET(port), BNX2X_BTR);
4966 BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_C_OFFSET(port), BNX2X_BTR);
4967 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4968 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4971 /* Calculates the sum of vn_min_rates.
4972 It's needed for further normalizing of the min_rates.
4974 sum of vn_min_rates.
4976 0 - if all the min_rates are 0.
4977 In the later case fainess algorithm should be deactivated.
4978 If not all min_rates are zero then those that are zeroes will be set to 1.
4980 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4983 int port = BP_PORT(bp);
4986 bp->vn_weight_sum = 0;
4987 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4988 int func = 2*vn + port;
4990 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4991 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4992 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4994 /* Skip hidden vns */
4995 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4998 /* If min rate is zero - set it to 1 */
5000 vn_min_rate = DEF_MIN_RATE;
5004 bp->vn_weight_sum += vn_min_rate;
5007 /* ... only if all min rates are zeros - disable fairness */
5009 bp->vn_weight_sum = 0;
5012 static void bnx2x_init_internal_func(struct bnx2x *bp)
5014 struct tstorm_eth_function_common_config tstorm_config = {0};
5015 struct stats_indication_flags stats_flags = {0};
5016 int port = BP_PORT(bp);
5017 int func = BP_FUNC(bp);
5023 tstorm_config.config_flags = MULTI_FLAGS(bp);
5024 tstorm_config.rss_result_mask = MULTI_MASK;
5027 /* Enable TPA if needed */
5028 if (bp->flags & TPA_ENABLE_FLAG)
5029 tstorm_config.config_flags |=
5030 TSTORM_ETH_FUNCTION_COMMON_CONFIG_ENABLE_TPA;
5033 tstorm_config.config_flags |=
5034 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
5036 tstorm_config.leading_client_id = BP_L_ID(bp);
5038 REG_WR(bp, BAR_TSTRORM_INTMEM +
5039 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
5040 (*(u32 *)&tstorm_config));
5042 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
5043 bnx2x_set_storm_rx_mode(bp);
5045 for_each_queue(bp, i) {
5046 u8 cl_id = bp->fp[i].cl_id;
5048 /* reset xstorm per client statistics */
5049 offset = BAR_XSTRORM_INTMEM +
5050 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5052 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5053 REG_WR(bp, offset + j*4, 0);
5055 /* reset tstorm per client statistics */
5056 offset = BAR_TSTRORM_INTMEM +
5057 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5059 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5060 REG_WR(bp, offset + j*4, 0);
5062 /* reset ustorm per client statistics */
5063 offset = BAR_USTRORM_INTMEM +
5064 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5066 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5067 REG_WR(bp, offset + j*4, 0);
5070 /* Init statistics related context */
5071 stats_flags.collect_eth = 1;
5073 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5074 ((u32 *)&stats_flags)[0]);
5075 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5076 ((u32 *)&stats_flags)[1]);
5078 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5079 ((u32 *)&stats_flags)[0]);
5080 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5081 ((u32 *)&stats_flags)[1]);
5083 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5084 ((u32 *)&stats_flags)[0]);
5085 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5086 ((u32 *)&stats_flags)[1]);
5088 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5089 ((u32 *)&stats_flags)[0]);
5090 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5091 ((u32 *)&stats_flags)[1]);
5093 REG_WR(bp, BAR_XSTRORM_INTMEM +
5094 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5095 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5096 REG_WR(bp, BAR_XSTRORM_INTMEM +
5097 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5098 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5100 REG_WR(bp, BAR_TSTRORM_INTMEM +
5101 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5102 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5103 REG_WR(bp, BAR_TSTRORM_INTMEM +
5104 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5105 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5107 REG_WR(bp, BAR_USTRORM_INTMEM +
5108 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5109 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5110 REG_WR(bp, BAR_USTRORM_INTMEM +
5111 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5112 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5114 if (CHIP_IS_E1H(bp)) {
5115 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5117 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5119 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5121 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5124 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5128 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5130 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5131 SGE_PAGE_SIZE * PAGES_PER_SGE),
5133 for_each_rx_queue(bp, i) {
5134 struct bnx2x_fastpath *fp = &bp->fp[i];
5136 REG_WR(bp, BAR_USTRORM_INTMEM +
5137 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5138 U64_LO(fp->rx_comp_mapping));
5139 REG_WR(bp, BAR_USTRORM_INTMEM +
5140 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5141 U64_HI(fp->rx_comp_mapping));
5144 REG_WR(bp, BAR_USTRORM_INTMEM +
5145 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id),
5146 U64_LO(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5147 REG_WR(bp, BAR_USTRORM_INTMEM +
5148 USTORM_CQE_PAGE_NEXT_OFFSET(port, fp->cl_id) + 4,
5149 U64_HI(fp->rx_comp_mapping + BCM_PAGE_SIZE));
5151 REG_WR16(bp, BAR_USTRORM_INTMEM +
5152 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5156 /* dropless flow control */
5157 if (CHIP_IS_E1H(bp)) {
5158 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5160 rx_pause.bd_thr_low = 250;
5161 rx_pause.cqe_thr_low = 250;
5163 rx_pause.sge_thr_low = 0;
5164 rx_pause.bd_thr_high = 350;
5165 rx_pause.cqe_thr_high = 350;
5166 rx_pause.sge_thr_high = 0;
5168 for_each_rx_queue(bp, i) {
5169 struct bnx2x_fastpath *fp = &bp->fp[i];
5171 if (!fp->disable_tpa) {
5172 rx_pause.sge_thr_low = 150;
5173 rx_pause.sge_thr_high = 250;
5177 offset = BAR_USTRORM_INTMEM +
5178 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5181 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5183 REG_WR(bp, offset + j*4,
5184 ((u32 *)&rx_pause)[j]);
5188 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5190 /* Init rate shaping and fairness contexts */
5194 /* During init there is no active link
5195 Until link is up, set link rate to 10Gbps */
5196 bp->link_vars.line_speed = SPEED_10000;
5197 bnx2x_init_port_minmax(bp);
5199 bnx2x_calc_vn_weight_sum(bp);
5201 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5202 bnx2x_init_vn_minmax(bp, 2*vn + port);
5204 /* Enable rate shaping and fairness */
5205 bp->cmng.flags.cmng_enables =
5206 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5207 if (bp->vn_weight_sum)
5208 bp->cmng.flags.cmng_enables |=
5209 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5211 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5212 " fairness will be disabled\n");
5214 /* rate shaping and fairness are disabled */
5216 "single function mode minmax will be disabled\n");
5220 /* Store it to internal memory */
5222 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5223 REG_WR(bp, BAR_XSTRORM_INTMEM +
5224 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5225 ((u32 *)(&bp->cmng))[i]);
5228 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5230 switch (load_code) {
5231 case FW_MSG_CODE_DRV_LOAD_COMMON:
5232 bnx2x_init_internal_common(bp);
5235 case FW_MSG_CODE_DRV_LOAD_PORT:
5236 bnx2x_init_internal_port(bp);
5239 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5240 bnx2x_init_internal_func(bp);
5244 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5249 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5253 for_each_queue(bp, i) {
5254 struct bnx2x_fastpath *fp = &bp->fp[i];
5257 fp->state = BNX2X_FP_STATE_CLOSED;
5259 fp->cl_id = BP_L_ID(bp) + i;
5260 fp->sb_id = fp->cl_id;
5261 /* Suitable Rx and Tx SBs are served by the same client */
5262 if (i >= bp->num_rx_queues)
5263 fp->cl_id -= bp->num_rx_queues;
5265 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5266 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5267 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5269 bnx2x_update_fpsb_idx(fp);
5272 /* ensure status block indices were read */
5276 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5278 bnx2x_update_dsb_idx(bp);
5279 bnx2x_update_coalesce(bp);
5280 bnx2x_init_rx_rings(bp);
5281 bnx2x_init_tx_ring(bp);
5282 bnx2x_init_sp_ring(bp);
5283 bnx2x_init_context(bp);
5284 bnx2x_init_internal(bp, load_code);
5285 bnx2x_init_ind_table(bp);
5286 bnx2x_stats_init(bp);
5288 /* At this point, we are ready for interrupts */
5289 atomic_set(&bp->intr_sem, 0);
5291 /* flush all before enabling interrupts */
5295 bnx2x_int_enable(bp);
5297 /* Check for SPIO5 */
5298 bnx2x_attn_int_deasserted0(bp,
5299 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5300 AEU_INPUTS_ATTN_BITS_SPIO5);
5303 /* end of nic init */
5306 * gzip service functions
5309 static int bnx2x_gunzip_init(struct bnx2x *bp)
5311 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5312 &bp->gunzip_mapping);
5313 if (bp->gunzip_buf == NULL)
5316 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5317 if (bp->strm == NULL)
5320 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5322 if (bp->strm->workspace == NULL)
5332 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5333 bp->gunzip_mapping);
5334 bp->gunzip_buf = NULL;
5337 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5338 " un-compression\n", bp->dev->name);
5342 static void bnx2x_gunzip_end(struct bnx2x *bp)
5344 kfree(bp->strm->workspace);
5349 if (bp->gunzip_buf) {
5350 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5351 bp->gunzip_mapping);
5352 bp->gunzip_buf = NULL;
5356 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5360 /* check gzip header */
5361 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5362 BNX2X_ERR("Bad gzip header\n");
5370 if (zbuf[3] & FNAME)
5371 while ((zbuf[n++] != 0) && (n < len));
5373 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5374 bp->strm->avail_in = len - n;
5375 bp->strm->next_out = bp->gunzip_buf;
5376 bp->strm->avail_out = FW_BUF_SIZE;
5378 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5382 rc = zlib_inflate(bp->strm, Z_FINISH);
5383 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5384 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5385 bp->dev->name, bp->strm->msg);
5387 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5388 if (bp->gunzip_outlen & 0x3)
5389 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5390 " gunzip_outlen (%d) not aligned\n",
5391 bp->dev->name, bp->gunzip_outlen);
5392 bp->gunzip_outlen >>= 2;
5394 zlib_inflateEnd(bp->strm);
5396 if (rc == Z_STREAM_END)
5402 /* nic load/unload */
5405 * General service functions
5408 /* send a NIG loopback debug packet */
5409 static void bnx2x_lb_pckt(struct bnx2x *bp)
5413 /* Ethernet source and destination addresses */
5414 wb_write[0] = 0x55555555;
5415 wb_write[1] = 0x55555555;
5416 wb_write[2] = 0x20; /* SOP */
5417 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5419 /* NON-IP protocol */
5420 wb_write[0] = 0x09000000;
5421 wb_write[1] = 0x55555555;
5422 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5423 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5426 /* some of the internal memories
5427 * are not directly readable from the driver
5428 * to test them we send debug packets
5430 static int bnx2x_int_mem_test(struct bnx2x *bp)
5436 if (CHIP_REV_IS_FPGA(bp))
5438 else if (CHIP_REV_IS_EMUL(bp))
5443 DP(NETIF_MSG_HW, "start part1\n");
5445 /* Disable inputs of parser neighbor blocks */
5446 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5447 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5448 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5449 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5451 /* Write 0 to parser credits for CFC search request */
5452 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5454 /* send Ethernet packet */
5457 /* TODO do i reset NIG statistic? */
5458 /* Wait until NIG register shows 1 packet of size 0x10 */
5459 count = 1000 * factor;
5462 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5463 val = *bnx2x_sp(bp, wb_data[0]);
5471 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5475 /* Wait until PRS register shows 1 packet */
5476 count = 1000 * factor;
5478 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5486 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5490 /* Reset and init BRB, PRS */
5491 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5493 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5495 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5496 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5498 DP(NETIF_MSG_HW, "part2\n");
5500 /* Disable inputs of parser neighbor blocks */
5501 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5502 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5503 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5504 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5506 /* Write 0 to parser credits for CFC search request */
5507 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5509 /* send 10 Ethernet packets */
5510 for (i = 0; i < 10; i++)
5513 /* Wait until NIG register shows 10 + 1
5514 packets of size 11*0x10 = 0xb0 */
5515 count = 1000 * factor;
5518 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5519 val = *bnx2x_sp(bp, wb_data[0]);
5527 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5531 /* Wait until PRS register shows 2 packets */
5532 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5534 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5536 /* Write 1 to parser credits for CFC search request */
5537 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5539 /* Wait until PRS register shows 3 packets */
5540 msleep(10 * factor);
5541 /* Wait until NIG register shows 1 packet of size 0x10 */
5542 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5544 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5546 /* clear NIG EOP FIFO */
5547 for (i = 0; i < 11; i++)
5548 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5549 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5551 BNX2X_ERR("clear of NIG failed\n");
5555 /* Reset and init BRB, PRS, NIG */
5556 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5558 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5560 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5561 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5564 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5567 /* Enable inputs of parser neighbor blocks */
5568 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5569 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5570 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5571 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5573 DP(NETIF_MSG_HW, "done\n");
5578 static void enable_blocks_attention(struct bnx2x *bp)
5580 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5581 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5582 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5583 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5584 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5585 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5586 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5587 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5588 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5589 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5590 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5591 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5592 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5593 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5594 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5595 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5596 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5597 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5598 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5599 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5600 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5601 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5602 if (CHIP_REV_IS_FPGA(bp))
5603 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5605 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5606 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5607 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5608 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5609 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5610 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5611 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5612 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5613 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5614 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5618 static void bnx2x_reset_common(struct bnx2x *bp)
5621 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5623 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5627 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5633 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5634 SHARED_HW_CFG_FAN_FAILURE_MASK;
5636 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5640 * The fan failure mechanism is usually related to the PHY type since
5641 * the power consumption of the board is affected by the PHY. Currently,
5642 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5644 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5645 for (port = PORT_0; port < PORT_MAX; port++) {
5647 SHMEM_RD(bp, dev_info.port_hw_config[port].
5648 external_phy_config) &
5649 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5652 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5654 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5656 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5659 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5661 if (is_required == 0)
5664 /* Fan failure is indicated by SPIO 5 */
5665 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5666 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5668 /* set to active low mode */
5669 val = REG_RD(bp, MISC_REG_SPIO_INT);
5670 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5671 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5672 REG_WR(bp, MISC_REG_SPIO_INT, val);
5674 /* enable interrupt to signal the IGU */
5675 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5676 val |= (1 << MISC_REGISTERS_SPIO_5);
5677 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5680 static int bnx2x_init_common(struct bnx2x *bp)
5684 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5686 bnx2x_reset_common(bp);
5687 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5688 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5690 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5691 if (CHIP_IS_E1H(bp))
5692 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5694 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5696 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5698 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5699 if (CHIP_IS_E1(bp)) {
5700 /* enable HW interrupt from PXP on USDM overflow
5701 bit 16 on INT_MASK_0 */
5702 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5705 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5709 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5710 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5711 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5712 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5713 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5714 /* make sure this value is 0 */
5715 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5717 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5718 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5719 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5720 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5721 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5724 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5726 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5727 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5728 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5731 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5732 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5734 /* let the HW do it's magic ... */
5736 /* finish PXP init */
5737 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5739 BNX2X_ERR("PXP2 CFG failed\n");
5742 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5744 BNX2X_ERR("PXP2 RD_INIT failed\n");
5748 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5749 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5751 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5753 /* clean the DMAE memory */
5755 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5757 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5758 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5759 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5760 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5762 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5763 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5764 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5765 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5767 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5768 /* soft reset pulse */
5769 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5770 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5773 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5776 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5777 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5778 if (!CHIP_REV_IS_SLOW(bp)) {
5779 /* enable hw interrupt from doorbell Q */
5780 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5783 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5784 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5785 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5787 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5788 if (CHIP_IS_E1H(bp))
5789 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5791 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5792 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5793 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5794 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5796 bnx2x_init_fill(bp, TSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5797 bnx2x_init_fill(bp, USEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5798 bnx2x_init_fill(bp, CSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5799 bnx2x_init_fill(bp, XSEM_REG_FAST_MEMORY, 0, STORM_INTMEM_SIZE(bp));
5801 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5802 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5803 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5804 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5807 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5809 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5812 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5813 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5814 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5816 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5817 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5818 REG_WR(bp, i, 0xc0cac01a);
5819 /* TODO: replace with something meaningful */
5821 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5822 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5824 if (sizeof(union cdu_context) != 1024)
5825 /* we currently assume that a context is 1024 bytes */
5826 printk(KERN_ALERT PFX "please adjust the size of"
5827 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5829 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5830 val = (4 << 24) + (0 << 12) + 1024;
5831 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5833 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5834 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5835 /* enable context validation interrupt from CFC */
5836 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5838 /* set the thresholds to prevent CFC/CDU race */
5839 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5841 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5842 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5844 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5845 /* Reset PCIE errors for debug */
5846 REG_WR(bp, 0x2814, 0xffffffff);
5847 REG_WR(bp, 0x3820, 0xffffffff);
5849 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5850 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5851 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5852 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5854 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5855 if (CHIP_IS_E1H(bp)) {
5856 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5857 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5860 if (CHIP_REV_IS_SLOW(bp))
5863 /* finish CFC init */
5864 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5866 BNX2X_ERR("CFC LL_INIT failed\n");
5869 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5871 BNX2X_ERR("CFC AC_INIT failed\n");
5874 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5876 BNX2X_ERR("CFC CAM_INIT failed\n");
5879 REG_WR(bp, CFC_REG_DEBUG0, 0);
5881 /* read NIG statistic
5882 to see if this is our first up since powerup */
5883 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5884 val = *bnx2x_sp(bp, wb_data[0]);
5886 /* do internal memory self test */
5887 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5888 BNX2X_ERR("internal mem self test failed\n");
5892 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5893 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5894 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5895 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5896 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
5897 bp->port.need_hw_lock = 1;
5904 bnx2x_setup_fan_failure_detection(bp);
5906 /* clear PXP2 attentions */
5907 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5909 enable_blocks_attention(bp);
5911 if (!BP_NOMCP(bp)) {
5912 bnx2x_acquire_phy_lock(bp);
5913 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5914 bnx2x_release_phy_lock(bp);
5916 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5921 static int bnx2x_init_port(struct bnx2x *bp)
5923 int port = BP_PORT(bp);
5924 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5928 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5930 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5932 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5933 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5935 bnx2x_init_block(bp, TCM_BLOCK, init_stage);
5936 bnx2x_init_block(bp, UCM_BLOCK, init_stage);
5937 bnx2x_init_block(bp, CCM_BLOCK, init_stage);
5942 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5943 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5944 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5945 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5950 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5951 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5952 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5953 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5958 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5959 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5960 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5961 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5963 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5966 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5967 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5969 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5971 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5973 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5974 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5975 /* no pause for emulation and FPGA */
5980 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5981 else if (bp->dev->mtu > 4096) {
5982 if (bp->flags & ONE_PORT_FLAG)
5986 /* (24*1024 + val*4)/256 */
5987 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5990 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5991 high = low + 56; /* 14*1024/256 */
5993 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5994 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5997 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5999 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
6000 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
6001 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
6002 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
6004 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
6005 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
6006 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
6007 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
6009 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
6010 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
6012 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
6014 /* configure PBF to work without PAUSE mtu 9000 */
6015 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
6017 /* update threshold */
6018 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
6019 /* update init credit */
6020 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
6023 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
6025 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
6028 /* tell the searcher where the T2 table is */
6029 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
6031 wb_write[0] = U64_LO(bp->t2_mapping);
6032 wb_write[1] = U64_HI(bp->t2_mapping);
6033 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
6034 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
6035 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
6036 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
6038 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
6040 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
6041 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
6043 if (CHIP_IS_E1(bp)) {
6044 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6045 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6047 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6049 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6050 /* init aeu_mask_attn_func_0/1:
6051 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6052 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6053 * bits 4-7 are used for "per vn group attention" */
6054 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6055 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6057 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6058 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6059 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6060 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6061 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6063 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6065 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6067 if (CHIP_IS_E1H(bp)) {
6068 /* 0x2 disable e1hov, 0x1 enable */
6069 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6070 (IS_E1HMF(bp) ? 0x1 : 0x2));
6072 /* support pause requests from USDM, TSDM and BRB */
6073 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6076 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6077 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6078 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6082 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6083 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6085 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6086 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6088 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6090 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6091 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6093 /* The GPIO should be swapped if the swap register is
6095 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6096 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6098 /* Select function upon port-swap configuration */
6100 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6101 aeu_gpio_mask = (swap_val && swap_override) ?
6102 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6103 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6105 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6106 aeu_gpio_mask = (swap_val && swap_override) ?
6107 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6108 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6110 val = REG_RD(bp, offset);
6111 /* add GPIO3 to group */
6112 val |= aeu_gpio_mask;
6113 REG_WR(bp, offset, val);
6117 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6118 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6119 /* add SPIO 5 to group 0 */
6121 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6122 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6123 val = REG_RD(bp, reg_addr);
6124 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6125 REG_WR(bp, reg_addr, val);
6133 bnx2x__link_reset(bp);
6138 #define ILT_PER_FUNC (768/2)
6139 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6140 /* the phys address is shifted right 12 bits and has an added
6141 1=valid bit added to the 53rd bit
6142 then since this is a wide register(TM)
6143 we split it into two 32 bit writes
6145 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6146 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6147 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6148 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6150 #define CNIC_ILT_LINES 0
6152 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6156 if (CHIP_IS_E1H(bp))
6157 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6159 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6161 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6164 static int bnx2x_init_func(struct bnx2x *bp)
6166 int port = BP_PORT(bp);
6167 int func = BP_FUNC(bp);
6171 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6173 /* set MSI reconfigure capability */
6174 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6175 val = REG_RD(bp, addr);
6176 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6177 REG_WR(bp, addr, val);
6179 i = FUNC_ILT_BASE(func);
6181 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6182 if (CHIP_IS_E1H(bp)) {
6183 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6184 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6186 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6187 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6190 if (CHIP_IS_E1H(bp)) {
6191 for (i = 0; i < 9; i++)
6192 bnx2x_init_block(bp,
6193 cm_blocks[i], FUNC0_STAGE + func);
6195 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6196 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6199 /* HC init per function */
6200 if (CHIP_IS_E1H(bp)) {
6201 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6203 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6204 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6206 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6208 /* Reset PCIE errors for debug */
6209 REG_WR(bp, 0x2114, 0xffffffff);
6210 REG_WR(bp, 0x2120, 0xffffffff);
6215 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6219 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6220 BP_FUNC(bp), load_code);
6223 mutex_init(&bp->dmae_mutex);
6224 bnx2x_gunzip_init(bp);
6226 switch (load_code) {
6227 case FW_MSG_CODE_DRV_LOAD_COMMON:
6228 rc = bnx2x_init_common(bp);
6233 case FW_MSG_CODE_DRV_LOAD_PORT:
6235 rc = bnx2x_init_port(bp);
6240 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6242 rc = bnx2x_init_func(bp);
6248 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6252 if (!BP_NOMCP(bp)) {
6253 int func = BP_FUNC(bp);
6255 bp->fw_drv_pulse_wr_seq =
6256 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6257 DRV_PULSE_SEQ_MASK);
6258 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6259 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6260 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6264 /* this needs to be done before gunzip end */
6265 bnx2x_zero_def_sb(bp);
6266 for_each_queue(bp, i)
6267 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6270 bnx2x_gunzip_end(bp);
6275 /* send the MCP a request, block until there is a reply */
6276 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6278 int func = BP_FUNC(bp);
6279 u32 seq = ++bp->fw_seq;
6282 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6284 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6285 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6288 /* let the FW do it's magic ... */
6291 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6293 /* Give the FW up to 2 second (200*10ms) */
6294 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6296 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6297 cnt*delay, rc, seq);
6299 /* is this a reply to our command? */
6300 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6301 rc &= FW_MSG_CODE_MASK;
6305 BNX2X_ERR("FW failed to respond!\n");
6313 static void bnx2x_free_mem(struct bnx2x *bp)
6316 #define BNX2X_PCI_FREE(x, y, size) \
6319 pci_free_consistent(bp->pdev, size, x, y); \
6325 #define BNX2X_FREE(x) \
6337 for_each_queue(bp, i) {
6340 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6341 bnx2x_fp(bp, i, status_blk_mapping),
6342 sizeof(struct host_status_block));
6345 for_each_rx_queue(bp, i) {
6347 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6348 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6349 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6350 bnx2x_fp(bp, i, rx_desc_mapping),
6351 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6353 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6354 bnx2x_fp(bp, i, rx_comp_mapping),
6355 sizeof(struct eth_fast_path_rx_cqe) *
6359 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6360 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6361 bnx2x_fp(bp, i, rx_sge_mapping),
6362 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6365 for_each_tx_queue(bp, i) {
6367 /* fastpath tx rings: tx_buf tx_desc */
6368 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6369 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6370 bnx2x_fp(bp, i, tx_desc_mapping),
6371 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6373 /* end of fastpath */
6375 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6376 sizeof(struct host_def_status_block));
6378 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6379 sizeof(struct bnx2x_slowpath));
6382 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6383 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6384 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6385 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6387 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6389 #undef BNX2X_PCI_FREE
6393 static int bnx2x_alloc_mem(struct bnx2x *bp)
6396 #define BNX2X_PCI_ALLOC(x, y, size) \
6398 x = pci_alloc_consistent(bp->pdev, size, y); \
6400 goto alloc_mem_err; \
6401 memset(x, 0, size); \
6404 #define BNX2X_ALLOC(x, size) \
6406 x = vmalloc(size); \
6408 goto alloc_mem_err; \
6409 memset(x, 0, size); \
6416 for_each_queue(bp, i) {
6417 bnx2x_fp(bp, i, bp) = bp;
6420 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6421 &bnx2x_fp(bp, i, status_blk_mapping),
6422 sizeof(struct host_status_block));
6425 for_each_rx_queue(bp, i) {
6427 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6428 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6429 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6430 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6431 &bnx2x_fp(bp, i, rx_desc_mapping),
6432 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6434 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6435 &bnx2x_fp(bp, i, rx_comp_mapping),
6436 sizeof(struct eth_fast_path_rx_cqe) *
6440 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6441 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6442 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6443 &bnx2x_fp(bp, i, rx_sge_mapping),
6444 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6447 for_each_tx_queue(bp, i) {
6449 /* fastpath tx rings: tx_buf tx_desc */
6450 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6451 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6452 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6453 &bnx2x_fp(bp, i, tx_desc_mapping),
6454 sizeof(union eth_tx_bd_types) * NUM_TX_BD);
6456 /* end of fastpath */
6458 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6459 sizeof(struct host_def_status_block));
6461 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6462 sizeof(struct bnx2x_slowpath));
6465 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6468 for (i = 0; i < 64*1024; i += 64) {
6469 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6470 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6473 /* allocate searcher T2 table
6474 we allocate 1/4 of alloc num for T2
6475 (which is not entered into the ILT) */
6476 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6479 for (i = 0; i < 16*1024; i += 64)
6480 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6482 /* now fixup the last line in the block to point to the next block */
6483 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6485 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6486 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6488 /* QM queues (128*MAX_CONN) */
6489 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6492 /* Slow path ring */
6493 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6501 #undef BNX2X_PCI_ALLOC
6505 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6509 for_each_tx_queue(bp, i) {
6510 struct bnx2x_fastpath *fp = &bp->fp[i];
6512 u16 bd_cons = fp->tx_bd_cons;
6513 u16 sw_prod = fp->tx_pkt_prod;
6514 u16 sw_cons = fp->tx_pkt_cons;
6516 while (sw_cons != sw_prod) {
6517 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6523 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6527 for_each_rx_queue(bp, j) {
6528 struct bnx2x_fastpath *fp = &bp->fp[j];
6530 for (i = 0; i < NUM_RX_BD; i++) {
6531 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6532 struct sk_buff *skb = rx_buf->skb;
6537 pci_unmap_single(bp->pdev,
6538 pci_unmap_addr(rx_buf, mapping),
6539 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6544 if (!fp->disable_tpa)
6545 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6546 ETH_MAX_AGGREGATION_QUEUES_E1 :
6547 ETH_MAX_AGGREGATION_QUEUES_E1H);
6551 static void bnx2x_free_skbs(struct bnx2x *bp)
6553 bnx2x_free_tx_skbs(bp);
6554 bnx2x_free_rx_skbs(bp);
6557 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6561 free_irq(bp->msix_table[0].vector, bp->dev);
6562 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6563 bp->msix_table[0].vector);
6565 for_each_queue(bp, i) {
6566 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6567 "state %x\n", i, bp->msix_table[i + offset].vector,
6568 bnx2x_fp(bp, i, state));
6570 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6574 static void bnx2x_free_irq(struct bnx2x *bp)
6576 if (bp->flags & USING_MSIX_FLAG) {
6577 bnx2x_free_msix_irqs(bp);
6578 pci_disable_msix(bp->pdev);
6579 bp->flags &= ~USING_MSIX_FLAG;
6581 } else if (bp->flags & USING_MSI_FLAG) {
6582 free_irq(bp->pdev->irq, bp->dev);
6583 pci_disable_msi(bp->pdev);
6584 bp->flags &= ~USING_MSI_FLAG;
6587 free_irq(bp->pdev->irq, bp->dev);
6590 static int bnx2x_enable_msix(struct bnx2x *bp)
6592 int i, rc, offset = 1;
6595 bp->msix_table[0].entry = igu_vec;
6596 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6598 for_each_queue(bp, i) {
6599 igu_vec = BP_L_ID(bp) + offset + i;
6600 bp->msix_table[i + offset].entry = igu_vec;
6601 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6602 "(fastpath #%u)\n", i + offset, igu_vec, i);
6605 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6606 BNX2X_NUM_QUEUES(bp) + offset);
6608 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6612 bp->flags |= USING_MSIX_FLAG;
6617 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6619 int i, rc, offset = 1;
6621 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6622 bp->dev->name, bp->dev);
6624 BNX2X_ERR("request sp irq failed\n");
6628 for_each_queue(bp, i) {
6629 struct bnx2x_fastpath *fp = &bp->fp[i];
6631 if (i < bp->num_rx_queues)
6632 sprintf(fp->name, "%s-rx-%d", bp->dev->name, i);
6634 sprintf(fp->name, "%s-tx-%d",
6635 bp->dev->name, i - bp->num_rx_queues);
6637 rc = request_irq(bp->msix_table[i + offset].vector,
6638 bnx2x_msix_fp_int, 0, fp->name, fp);
6640 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6641 bnx2x_free_msix_irqs(bp);
6645 fp->state = BNX2X_FP_STATE_IRQ;
6648 i = BNX2X_NUM_QUEUES(bp);
6649 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp[%d] %d"
6651 bp->dev->name, bp->msix_table[0].vector,
6652 0, bp->msix_table[offset].vector,
6653 i - 1, bp->msix_table[offset + i - 1].vector);
6658 static int bnx2x_enable_msi(struct bnx2x *bp)
6662 rc = pci_enable_msi(bp->pdev);
6664 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6667 bp->flags |= USING_MSI_FLAG;
6672 static int bnx2x_req_irq(struct bnx2x *bp)
6674 unsigned long flags;
6677 if (bp->flags & USING_MSI_FLAG)
6680 flags = IRQF_SHARED;
6682 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6683 bp->dev->name, bp->dev);
6685 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6690 static void bnx2x_napi_enable(struct bnx2x *bp)
6694 for_each_rx_queue(bp, i)
6695 napi_enable(&bnx2x_fp(bp, i, napi));
6698 static void bnx2x_napi_disable(struct bnx2x *bp)
6702 for_each_rx_queue(bp, i)
6703 napi_disable(&bnx2x_fp(bp, i, napi));
6706 static void bnx2x_netif_start(struct bnx2x *bp)
6710 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6711 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6714 if (netif_running(bp->dev)) {
6715 bnx2x_napi_enable(bp);
6716 bnx2x_int_enable(bp);
6717 if (bp->state == BNX2X_STATE_OPEN)
6718 netif_tx_wake_all_queues(bp->dev);
6723 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6725 bnx2x_int_disable_sync(bp, disable_hw);
6726 bnx2x_napi_disable(bp);
6727 netif_tx_disable(bp->dev);
6728 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6732 * Init service functions
6735 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6737 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6738 int port = BP_PORT(bp);
6741 * unicasts 0-31:port0 32-63:port1
6742 * multicast 64-127:port0 128-191:port1
6744 config->hdr.length = 2;
6745 config->hdr.offset = port ? 32 : 0;
6746 config->hdr.client_id = bp->fp->cl_id;
6747 config->hdr.reserved1 = 0;
6750 config->config_table[0].cam_entry.msb_mac_addr =
6751 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6752 config->config_table[0].cam_entry.middle_mac_addr =
6753 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6754 config->config_table[0].cam_entry.lsb_mac_addr =
6755 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6756 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6758 config->config_table[0].target_table_entry.flags = 0;
6760 CAM_INVALIDATE(config->config_table[0]);
6761 config->config_table[0].target_table_entry.clients_bit_vector =
6762 cpu_to_le32(1 << BP_L_ID(bp));
6763 config->config_table[0].target_table_entry.vlan_id = 0;
6765 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6766 (set ? "setting" : "clearing"),
6767 config->config_table[0].cam_entry.msb_mac_addr,
6768 config->config_table[0].cam_entry.middle_mac_addr,
6769 config->config_table[0].cam_entry.lsb_mac_addr);
6772 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6773 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6774 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6775 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6777 config->config_table[1].target_table_entry.flags =
6778 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6780 CAM_INVALIDATE(config->config_table[1]);
6781 config->config_table[1].target_table_entry.clients_bit_vector =
6782 cpu_to_le32(1 << BP_L_ID(bp));
6783 config->config_table[1].target_table_entry.vlan_id = 0;
6785 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6786 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6787 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6790 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6792 struct mac_configuration_cmd_e1h *config =
6793 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6795 /* CAM allocation for E1H
6796 * unicasts: by func number
6797 * multicast: 20+FUNC*20, 20 each
6799 config->hdr.length = 1;
6800 config->hdr.offset = BP_FUNC(bp);
6801 config->hdr.client_id = bp->fp->cl_id;
6802 config->hdr.reserved1 = 0;
6805 config->config_table[0].msb_mac_addr =
6806 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6807 config->config_table[0].middle_mac_addr =
6808 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6809 config->config_table[0].lsb_mac_addr =
6810 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6811 config->config_table[0].clients_bit_vector =
6812 cpu_to_le32(1 << BP_L_ID(bp));
6813 config->config_table[0].vlan_id = 0;
6814 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6816 config->config_table[0].flags = BP_PORT(bp);
6818 config->config_table[0].flags =
6819 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6821 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6822 (set ? "setting" : "clearing"),
6823 config->config_table[0].msb_mac_addr,
6824 config->config_table[0].middle_mac_addr,
6825 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6827 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6828 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6829 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6832 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6833 int *state_p, int poll)
6835 /* can take a while if any port is running */
6838 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6839 poll ? "polling" : "waiting", state, idx);
6844 bnx2x_rx_int(bp->fp, 10);
6845 /* if index is different from 0
6846 * the reply for some commands will
6847 * be on the non default queue
6850 bnx2x_rx_int(&bp->fp[idx], 10);
6853 mb(); /* state is changed by bnx2x_sp_event() */
6854 if (*state_p == state) {
6855 #ifdef BNX2X_STOP_ON_ERROR
6856 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6865 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6866 poll ? "polling" : "waiting", state, idx);
6867 #ifdef BNX2X_STOP_ON_ERROR
6874 static int bnx2x_setup_leading(struct bnx2x *bp)
6878 /* reset IGU state */
6879 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6882 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6884 /* Wait for completion */
6885 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6890 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6892 struct bnx2x_fastpath *fp = &bp->fp[index];
6894 /* reset IGU state */
6895 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6898 fp->state = BNX2X_FP_STATE_OPENING;
6899 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6902 /* Wait for completion */
6903 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6907 static int bnx2x_poll(struct napi_struct *napi, int budget);
6909 static void bnx2x_set_int_mode_msix(struct bnx2x *bp, int *num_rx_queues_out,
6910 int *num_tx_queues_out)
6912 int _num_rx_queues = 0, _num_tx_queues = 0;
6914 switch (bp->multi_mode) {
6915 case ETH_RSS_MODE_DISABLED:
6920 case ETH_RSS_MODE_REGULAR:
6922 _num_rx_queues = min_t(u32, num_rx_queues,
6923 BNX2X_MAX_QUEUES(bp));
6925 _num_rx_queues = min_t(u32, num_online_cpus(),
6926 BNX2X_MAX_QUEUES(bp));
6929 _num_tx_queues = min_t(u32, num_tx_queues,
6930 BNX2X_MAX_QUEUES(bp));
6932 _num_tx_queues = min_t(u32, num_online_cpus(),
6933 BNX2X_MAX_QUEUES(bp));
6935 /* There must be not more Tx queues than Rx queues */
6936 if (_num_tx_queues > _num_rx_queues) {
6937 BNX2X_ERR("number of tx queues (%d) > "
6938 "number of rx queues (%d)"
6939 " defaulting to %d\n",
6940 _num_tx_queues, _num_rx_queues,
6942 _num_tx_queues = _num_rx_queues;
6953 *num_rx_queues_out = _num_rx_queues;
6954 *num_tx_queues_out = _num_tx_queues;
6957 static int bnx2x_set_int_mode(struct bnx2x *bp)
6964 bp->num_rx_queues = 1;
6965 bp->num_tx_queues = 1;
6966 DP(NETIF_MSG_IFUP, "set number of queues to 1\n");
6971 /* Set interrupt mode according to bp->multi_mode value */
6972 bnx2x_set_int_mode_msix(bp, &bp->num_rx_queues,
6973 &bp->num_tx_queues);
6975 DP(NETIF_MSG_IFUP, "set number of queues to: rx %d tx %d\n",
6976 bp->num_rx_queues, bp->num_tx_queues);
6978 /* if we can't use MSI-X we only need one fp,
6979 * so try to enable MSI-X with the requested number of fp's
6980 * and fallback to MSI or legacy INTx with one fp
6982 rc = bnx2x_enable_msix(bp);
6984 /* failed to enable MSI-X */
6986 BNX2X_ERR("Multi requested but failed to "
6987 "enable MSI-X (rx %d tx %d), "
6988 "set number of queues to 1\n",
6989 bp->num_rx_queues, bp->num_tx_queues);
6990 bp->num_rx_queues = 1;
6991 bp->num_tx_queues = 1;
6995 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6999 static void bnx2x_set_rx_mode(struct net_device *dev);
7001 /* must be called with rtnl_lock */
7002 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
7007 #ifdef BNX2X_STOP_ON_ERROR
7008 if (unlikely(bp->panic))
7012 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
7014 rc = bnx2x_set_int_mode(bp);
7016 if (bnx2x_alloc_mem(bp))
7019 for_each_rx_queue(bp, i)
7020 bnx2x_fp(bp, i, disable_tpa) =
7021 ((bp->flags & TPA_ENABLE_FLAG) == 0);
7023 for_each_rx_queue(bp, i)
7024 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
7027 bnx2x_napi_enable(bp);
7029 if (bp->flags & USING_MSIX_FLAG) {
7030 rc = bnx2x_req_msix_irqs(bp);
7032 pci_disable_msix(bp->pdev);
7036 /* Fall to INTx if failed to enable MSI-X due to lack of
7037 memory (in bnx2x_set_int_mode()) */
7038 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
7039 bnx2x_enable_msi(bp);
7041 rc = bnx2x_req_irq(bp);
7043 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
7044 if (bp->flags & USING_MSI_FLAG)
7045 pci_disable_msi(bp->pdev);
7048 if (bp->flags & USING_MSI_FLAG) {
7049 bp->dev->irq = bp->pdev->irq;
7050 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
7051 bp->dev->name, bp->pdev->irq);
7055 /* Send LOAD_REQUEST command to MCP
7056 Returns the type of LOAD command:
7057 if it is the first port to be initialized
7058 common blocks should be initialized, otherwise - not
7060 if (!BP_NOMCP(bp)) {
7061 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7063 BNX2X_ERR("MCP response failure, aborting\n");
7067 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7068 rc = -EBUSY; /* other port in diagnostic mode */
7073 int port = BP_PORT(bp);
7075 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7076 load_count[0], load_count[1], load_count[2]);
7078 load_count[1 + port]++;
7079 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7080 load_count[0], load_count[1], load_count[2]);
7081 if (load_count[0] == 1)
7082 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7083 else if (load_count[1 + port] == 1)
7084 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7086 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7089 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7090 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7094 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7097 rc = bnx2x_init_hw(bp, load_code);
7099 BNX2X_ERR("HW init failed, aborting\n");
7103 /* Setup NIC internals and enable interrupts */
7104 bnx2x_nic_init(bp, load_code);
7106 /* Send LOAD_DONE command to MCP */
7107 if (!BP_NOMCP(bp)) {
7108 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7110 BNX2X_ERR("MCP response failure, aborting\n");
7116 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7118 rc = bnx2x_setup_leading(bp);
7120 BNX2X_ERR("Setup leading failed!\n");
7124 if (CHIP_IS_E1H(bp))
7125 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7126 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7127 bp->state = BNX2X_STATE_DISABLED;
7130 if (bp->state == BNX2X_STATE_OPEN) {
7131 for_each_nondefault_queue(bp, i) {
7132 rc = bnx2x_setup_multi(bp, i);
7138 bnx2x_set_mac_addr_e1(bp, 1);
7140 bnx2x_set_mac_addr_e1h(bp, 1);
7144 bnx2x_initial_phy_init(bp, load_mode);
7146 /* Start fast path */
7147 switch (load_mode) {
7149 if (bp->state == BNX2X_STATE_OPEN) {
7150 /* Tx queue should be only reenabled */
7151 netif_tx_wake_all_queues(bp->dev);
7153 /* Initialize the receive filter. */
7154 bnx2x_set_rx_mode(bp->dev);
7158 netif_tx_start_all_queues(bp->dev);
7159 if (bp->state != BNX2X_STATE_OPEN)
7160 netif_tx_disable(bp->dev);
7161 /* Initialize the receive filter. */
7162 bnx2x_set_rx_mode(bp->dev);
7166 /* Initialize the receive filter. */
7167 bnx2x_set_rx_mode(bp->dev);
7168 bp->state = BNX2X_STATE_DIAG;
7176 bnx2x__link_status_update(bp);
7178 /* start the timer */
7179 mod_timer(&bp->timer, jiffies + bp->current_interval);
7185 bnx2x_int_disable_sync(bp, 1);
7186 if (!BP_NOMCP(bp)) {
7187 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7188 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7191 /* Free SKBs, SGEs, TPA pool and driver internals */
7192 bnx2x_free_skbs(bp);
7193 for_each_rx_queue(bp, i)
7194 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7199 bnx2x_napi_disable(bp);
7200 for_each_rx_queue(bp, i)
7201 netif_napi_del(&bnx2x_fp(bp, i, napi));
7207 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7209 struct bnx2x_fastpath *fp = &bp->fp[index];
7212 /* halt the connection */
7213 fp->state = BNX2X_FP_STATE_HALTING;
7214 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7216 /* Wait for completion */
7217 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7219 if (rc) /* timeout */
7222 /* delete cfc entry */
7223 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7225 /* Wait for completion */
7226 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7231 static int bnx2x_stop_leading(struct bnx2x *bp)
7233 __le16 dsb_sp_prod_idx;
7234 /* if the other port is handling traffic,
7235 this can take a lot of time */
7241 /* Send HALT ramrod */
7242 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7243 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7245 /* Wait for completion */
7246 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7247 &(bp->fp[0].state), 1);
7248 if (rc) /* timeout */
7251 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7253 /* Send PORT_DELETE ramrod */
7254 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7256 /* Wait for completion to arrive on default status block
7257 we are going to reset the chip anyway
7258 so there is not much to do if this times out
7260 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7262 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7263 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7264 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7265 #ifdef BNX2X_STOP_ON_ERROR
7273 rmb(); /* Refresh the dsb_sp_prod */
7275 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7276 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7281 static void bnx2x_reset_func(struct bnx2x *bp)
7283 int port = BP_PORT(bp);
7284 int func = BP_FUNC(bp);
7288 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7289 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7292 base = FUNC_ILT_BASE(func);
7293 for (i = base; i < base + ILT_PER_FUNC; i++)
7294 bnx2x_ilt_wr(bp, i, 0);
7297 static void bnx2x_reset_port(struct bnx2x *bp)
7299 int port = BP_PORT(bp);
7302 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7304 /* Do not rcv packets to BRB */
7305 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7306 /* Do not direct rcv packets that are not for MCP to the BRB */
7307 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7308 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7311 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7314 /* Check for BRB port occupancy */
7315 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7317 DP(NETIF_MSG_IFDOWN,
7318 "BRB1 is not empty %d blocks are occupied\n", val);
7320 /* TODO: Close Doorbell port? */
7323 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7325 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7326 BP_FUNC(bp), reset_code);
7328 switch (reset_code) {
7329 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7330 bnx2x_reset_port(bp);
7331 bnx2x_reset_func(bp);
7332 bnx2x_reset_common(bp);
7335 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7336 bnx2x_reset_port(bp);
7337 bnx2x_reset_func(bp);
7340 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7341 bnx2x_reset_func(bp);
7345 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7350 /* must be called with rtnl_lock */
7351 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7353 int port = BP_PORT(bp);
7357 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7359 bp->rx_mode = BNX2X_RX_MODE_NONE;
7360 bnx2x_set_storm_rx_mode(bp);
7362 bnx2x_netif_stop(bp, 1);
7364 del_timer_sync(&bp->timer);
7365 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7366 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7367 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7372 /* Wait until tx fastpath tasks complete */
7373 for_each_tx_queue(bp, i) {
7374 struct bnx2x_fastpath *fp = &bp->fp[i];
7377 while (bnx2x_has_tx_work_unload(fp)) {
7381 BNX2X_ERR("timeout waiting for queue[%d]\n",
7383 #ifdef BNX2X_STOP_ON_ERROR
7394 /* Give HW time to discard old tx messages */
7397 if (CHIP_IS_E1(bp)) {
7398 struct mac_configuration_cmd *config =
7399 bnx2x_sp(bp, mcast_config);
7401 bnx2x_set_mac_addr_e1(bp, 0);
7403 for (i = 0; i < config->hdr.length; i++)
7404 CAM_INVALIDATE(config->config_table[i]);
7406 config->hdr.length = i;
7407 if (CHIP_REV_IS_SLOW(bp))
7408 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7410 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7411 config->hdr.client_id = bp->fp->cl_id;
7412 config->hdr.reserved1 = 0;
7414 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7415 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7416 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7419 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7421 bnx2x_set_mac_addr_e1h(bp, 0);
7423 for (i = 0; i < MC_HASH_SIZE; i++)
7424 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7426 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7429 if (unload_mode == UNLOAD_NORMAL)
7430 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7432 else if (bp->flags & NO_WOL_FLAG)
7433 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7436 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7437 u8 *mac_addr = bp->dev->dev_addr;
7439 /* The mac address is written to entries 1-4 to
7440 preserve entry 0 which is used by the PMF */
7441 u8 entry = (BP_E1HVN(bp) + 1)*8;
7443 val = (mac_addr[0] << 8) | mac_addr[1];
7444 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7446 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7447 (mac_addr[4] << 8) | mac_addr[5];
7448 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7450 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7453 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7455 /* Close multi and leading connections
7456 Completions for ramrods are collected in a synchronous way */
7457 for_each_nondefault_queue(bp, i)
7458 if (bnx2x_stop_multi(bp, i))
7461 rc = bnx2x_stop_leading(bp);
7463 BNX2X_ERR("Stop leading failed!\n");
7464 #ifdef BNX2X_STOP_ON_ERROR
7473 reset_code = bnx2x_fw_command(bp, reset_code);
7475 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7476 load_count[0], load_count[1], load_count[2]);
7478 load_count[1 + port]--;
7479 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7480 load_count[0], load_count[1], load_count[2]);
7481 if (load_count[0] == 0)
7482 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7483 else if (load_count[1 + port] == 0)
7484 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7486 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7489 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7490 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7491 bnx2x__link_reset(bp);
7493 /* Reset the chip */
7494 bnx2x_reset_chip(bp, reset_code);
7496 /* Report UNLOAD_DONE to MCP */
7498 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7502 /* Free SKBs, SGEs, TPA pool and driver internals */
7503 bnx2x_free_skbs(bp);
7504 for_each_rx_queue(bp, i)
7505 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7506 for_each_rx_queue(bp, i)
7507 netif_napi_del(&bnx2x_fp(bp, i, napi));
7510 bp->state = BNX2X_STATE_CLOSED;
7512 netif_carrier_off(bp->dev);
7517 static void bnx2x_reset_task(struct work_struct *work)
7519 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7521 #ifdef BNX2X_STOP_ON_ERROR
7522 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7523 " so reset not done to allow debug dump,\n"
7524 " you will need to reboot when done\n");
7530 if (!netif_running(bp->dev))
7531 goto reset_task_exit;
7533 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7534 bnx2x_nic_load(bp, LOAD_NORMAL);
7540 /* end of nic load/unload */
7545 * Init service functions
7548 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7551 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7552 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7553 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7554 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7555 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7556 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7557 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7558 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7560 BNX2X_ERR("Unsupported function index: %d\n", func);
7565 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7567 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7569 /* Flush all outstanding writes */
7572 /* Pretend to be function 0 */
7574 /* Flush the GRC transaction (in the chip) */
7575 new_val = REG_RD(bp, reg);
7577 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7582 /* From now we are in the "like-E1" mode */
7583 bnx2x_int_disable(bp);
7585 /* Flush all outstanding writes */
7588 /* Restore the original funtion settings */
7589 REG_WR(bp, reg, orig_func);
7590 new_val = REG_RD(bp, reg);
7591 if (new_val != orig_func) {
7592 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7593 orig_func, new_val);
7598 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7600 if (CHIP_IS_E1H(bp))
7601 bnx2x_undi_int_disable_e1h(bp, func);
7603 bnx2x_int_disable(bp);
7606 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7610 /* Check if there is any driver already loaded */
7611 val = REG_RD(bp, MISC_REG_UNPREPARED);
7613 /* Check if it is the UNDI driver
7614 * UNDI driver initializes CID offset for normal bell to 0x7
7616 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7617 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7619 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7621 int func = BP_FUNC(bp);
7625 /* clear the UNDI indication */
7626 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7628 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7630 /* try unload UNDI on port 0 */
7633 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7634 DRV_MSG_SEQ_NUMBER_MASK);
7635 reset_code = bnx2x_fw_command(bp, reset_code);
7637 /* if UNDI is loaded on the other port */
7638 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7640 /* send "DONE" for previous unload */
7641 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7643 /* unload UNDI on port 1 */
7646 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7647 DRV_MSG_SEQ_NUMBER_MASK);
7648 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7650 bnx2x_fw_command(bp, reset_code);
7653 /* now it's safe to release the lock */
7654 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7656 bnx2x_undi_int_disable(bp, func);
7658 /* close input traffic and wait for it */
7659 /* Do not rcv packets to BRB */
7661 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7662 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7663 /* Do not direct rcv packets that are not for MCP to
7666 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7667 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7670 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7671 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7674 /* save NIG port swap info */
7675 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7676 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7679 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7682 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7684 /* take the NIG out of reset and restore swap values */
7686 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7687 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7688 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7689 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7691 /* send unload done to the MCP */
7692 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7694 /* restore our func and fw_seq */
7697 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7698 DRV_MSG_SEQ_NUMBER_MASK);
7701 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7705 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7707 u32 val, val2, val3, val4, id;
7710 /* Get the chip revision id and number. */
7711 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7712 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7713 id = ((val & 0xffff) << 16);
7714 val = REG_RD(bp, MISC_REG_CHIP_REV);
7715 id |= ((val & 0xf) << 12);
7716 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7717 id |= ((val & 0xff) << 4);
7718 val = REG_RD(bp, MISC_REG_BOND_ID);
7720 bp->common.chip_id = id;
7721 bp->link_params.chip_id = bp->common.chip_id;
7722 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7724 val = (REG_RD(bp, 0x2874) & 0x55);
7725 if ((bp->common.chip_id & 0x1) ||
7726 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7727 bp->flags |= ONE_PORT_FLAG;
7728 BNX2X_DEV_INFO("single port device\n");
7731 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7732 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7733 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7734 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7735 bp->common.flash_size, bp->common.flash_size);
7737 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7738 bp->link_params.shmem_base = bp->common.shmem_base;
7739 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7741 if (!bp->common.shmem_base ||
7742 (bp->common.shmem_base < 0xA0000) ||
7743 (bp->common.shmem_base >= 0xC0000)) {
7744 BNX2X_DEV_INFO("MCP not active\n");
7745 bp->flags |= NO_MCP_FLAG;
7749 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7750 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7751 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7752 BNX2X_ERR("BAD MCP validity signature\n");
7754 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7755 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7757 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7758 SHARED_HW_CFG_LED_MODE_MASK) >>
7759 SHARED_HW_CFG_LED_MODE_SHIFT);
7761 bp->link_params.feature_config_flags = 0;
7762 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7763 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7764 bp->link_params.feature_config_flags |=
7765 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7767 bp->link_params.feature_config_flags &=
7768 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7770 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7771 bp->common.bc_ver = val;
7772 BNX2X_DEV_INFO("bc_ver %X\n", val);
7773 if (val < BNX2X_BC_VER) {
7774 /* for now only warn
7775 * later we might need to enforce this */
7776 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7777 " please upgrade BC\n", BNX2X_BC_VER, val);
7779 bp->link_params.feature_config_flags |=
7780 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
7781 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7783 if (BP_E1HVN(bp) == 0) {
7784 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7785 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7787 /* no WOL capability for E1HVN != 0 */
7788 bp->flags |= NO_WOL_FLAG;
7790 BNX2X_DEV_INFO("%sWoL capable\n",
7791 (bp->flags & NO_WOL_FLAG) ? "not " : "");
7793 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7794 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7795 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7796 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7798 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7799 val, val2, val3, val4);
7802 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7805 int port = BP_PORT(bp);
7808 switch (switch_cfg) {
7810 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7813 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7814 switch (ext_phy_type) {
7815 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7816 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7819 bp->port.supported |= (SUPPORTED_10baseT_Half |
7820 SUPPORTED_10baseT_Full |
7821 SUPPORTED_100baseT_Half |
7822 SUPPORTED_100baseT_Full |
7823 SUPPORTED_1000baseT_Full |
7824 SUPPORTED_2500baseX_Full |
7829 SUPPORTED_Asym_Pause);
7832 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7833 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7836 bp->port.supported |= (SUPPORTED_10baseT_Half |
7837 SUPPORTED_10baseT_Full |
7838 SUPPORTED_100baseT_Half |
7839 SUPPORTED_100baseT_Full |
7840 SUPPORTED_1000baseT_Full |
7845 SUPPORTED_Asym_Pause);
7849 BNX2X_ERR("NVRAM config error. "
7850 "BAD SerDes ext_phy_config 0x%x\n",
7851 bp->link_params.ext_phy_config);
7855 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7857 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7860 case SWITCH_CFG_10G:
7861 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7864 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7865 switch (ext_phy_type) {
7866 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7867 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7870 bp->port.supported |= (SUPPORTED_10baseT_Half |
7871 SUPPORTED_10baseT_Full |
7872 SUPPORTED_100baseT_Half |
7873 SUPPORTED_100baseT_Full |
7874 SUPPORTED_1000baseT_Full |
7875 SUPPORTED_2500baseX_Full |
7876 SUPPORTED_10000baseT_Full |
7881 SUPPORTED_Asym_Pause);
7884 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7885 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7888 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7889 SUPPORTED_1000baseT_Full |
7893 SUPPORTED_Asym_Pause);
7896 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7897 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7900 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7901 SUPPORTED_2500baseX_Full |
7902 SUPPORTED_1000baseT_Full |
7906 SUPPORTED_Asym_Pause);
7909 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7910 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7913 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7916 SUPPORTED_Asym_Pause);
7919 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7920 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7923 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7924 SUPPORTED_1000baseT_Full |
7927 SUPPORTED_Asym_Pause);
7930 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7931 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7934 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7935 SUPPORTED_1000baseT_Full |
7939 SUPPORTED_Asym_Pause);
7942 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
7943 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
7946 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7947 SUPPORTED_1000baseT_Full |
7951 SUPPORTED_Asym_Pause);
7954 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7955 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7958 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7962 SUPPORTED_Asym_Pause);
7965 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7966 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7969 bp->port.supported |= (SUPPORTED_10baseT_Half |
7970 SUPPORTED_10baseT_Full |
7971 SUPPORTED_100baseT_Half |
7972 SUPPORTED_100baseT_Full |
7973 SUPPORTED_1000baseT_Full |
7974 SUPPORTED_10000baseT_Full |
7978 SUPPORTED_Asym_Pause);
7981 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7982 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7983 bp->link_params.ext_phy_config);
7987 BNX2X_ERR("NVRAM config error. "
7988 "BAD XGXS ext_phy_config 0x%x\n",
7989 bp->link_params.ext_phy_config);
7993 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7995 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
8000 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
8001 bp->port.link_config);
8004 bp->link_params.phy_addr = bp->port.phy_addr;
8006 /* mask what we support according to speed_cap_mask */
8007 if (!(bp->link_params.speed_cap_mask &
8008 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
8009 bp->port.supported &= ~SUPPORTED_10baseT_Half;
8011 if (!(bp->link_params.speed_cap_mask &
8012 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
8013 bp->port.supported &= ~SUPPORTED_10baseT_Full;
8015 if (!(bp->link_params.speed_cap_mask &
8016 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
8017 bp->port.supported &= ~SUPPORTED_100baseT_Half;
8019 if (!(bp->link_params.speed_cap_mask &
8020 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
8021 bp->port.supported &= ~SUPPORTED_100baseT_Full;
8023 if (!(bp->link_params.speed_cap_mask &
8024 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
8025 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
8026 SUPPORTED_1000baseT_Full);
8028 if (!(bp->link_params.speed_cap_mask &
8029 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
8030 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
8032 if (!(bp->link_params.speed_cap_mask &
8033 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
8034 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
8036 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
8039 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
8041 bp->link_params.req_duplex = DUPLEX_FULL;
8043 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
8044 case PORT_FEATURE_LINK_SPEED_AUTO:
8045 if (bp->port.supported & SUPPORTED_Autoneg) {
8046 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8047 bp->port.advertising = bp->port.supported;
8050 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8052 if ((ext_phy_type ==
8053 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
8055 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
8056 /* force 10G, no AN */
8057 bp->link_params.req_line_speed = SPEED_10000;
8058 bp->port.advertising =
8059 (ADVERTISED_10000baseT_Full |
8063 BNX2X_ERR("NVRAM config error. "
8064 "Invalid link_config 0x%x"
8065 " Autoneg not supported\n",
8066 bp->port.link_config);
8071 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8072 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8073 bp->link_params.req_line_speed = SPEED_10;
8074 bp->port.advertising = (ADVERTISED_10baseT_Full |
8077 BNX2X_ERR("NVRAM config error. "
8078 "Invalid link_config 0x%x"
8079 " speed_cap_mask 0x%x\n",
8080 bp->port.link_config,
8081 bp->link_params.speed_cap_mask);
8086 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8087 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8088 bp->link_params.req_line_speed = SPEED_10;
8089 bp->link_params.req_duplex = DUPLEX_HALF;
8090 bp->port.advertising = (ADVERTISED_10baseT_Half |
8093 BNX2X_ERR("NVRAM config error. "
8094 "Invalid link_config 0x%x"
8095 " speed_cap_mask 0x%x\n",
8096 bp->port.link_config,
8097 bp->link_params.speed_cap_mask);
8102 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8103 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8104 bp->link_params.req_line_speed = SPEED_100;
8105 bp->port.advertising = (ADVERTISED_100baseT_Full |
8108 BNX2X_ERR("NVRAM config error. "
8109 "Invalid link_config 0x%x"
8110 " speed_cap_mask 0x%x\n",
8111 bp->port.link_config,
8112 bp->link_params.speed_cap_mask);
8117 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8118 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8119 bp->link_params.req_line_speed = SPEED_100;
8120 bp->link_params.req_duplex = DUPLEX_HALF;
8121 bp->port.advertising = (ADVERTISED_100baseT_Half |
8124 BNX2X_ERR("NVRAM config error. "
8125 "Invalid link_config 0x%x"
8126 " speed_cap_mask 0x%x\n",
8127 bp->port.link_config,
8128 bp->link_params.speed_cap_mask);
8133 case PORT_FEATURE_LINK_SPEED_1G:
8134 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8135 bp->link_params.req_line_speed = SPEED_1000;
8136 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8139 BNX2X_ERR("NVRAM config error. "
8140 "Invalid link_config 0x%x"
8141 " speed_cap_mask 0x%x\n",
8142 bp->port.link_config,
8143 bp->link_params.speed_cap_mask);
8148 case PORT_FEATURE_LINK_SPEED_2_5G:
8149 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8150 bp->link_params.req_line_speed = SPEED_2500;
8151 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8154 BNX2X_ERR("NVRAM config error. "
8155 "Invalid link_config 0x%x"
8156 " speed_cap_mask 0x%x\n",
8157 bp->port.link_config,
8158 bp->link_params.speed_cap_mask);
8163 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8164 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8165 case PORT_FEATURE_LINK_SPEED_10G_KR:
8166 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8167 bp->link_params.req_line_speed = SPEED_10000;
8168 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8171 BNX2X_ERR("NVRAM config error. "
8172 "Invalid link_config 0x%x"
8173 " speed_cap_mask 0x%x\n",
8174 bp->port.link_config,
8175 bp->link_params.speed_cap_mask);
8181 BNX2X_ERR("NVRAM config error. "
8182 "BAD link speed link_config 0x%x\n",
8183 bp->port.link_config);
8184 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8185 bp->port.advertising = bp->port.supported;
8189 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8190 PORT_FEATURE_FLOW_CONTROL_MASK);
8191 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8192 !(bp->port.supported & SUPPORTED_Autoneg))
8193 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8195 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8196 " advertising 0x%x\n",
8197 bp->link_params.req_line_speed,
8198 bp->link_params.req_duplex,
8199 bp->link_params.req_flow_ctrl, bp->port.advertising);
8202 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8204 int port = BP_PORT(bp);
8209 bp->link_params.bp = bp;
8210 bp->link_params.port = port;
8212 bp->link_params.lane_config =
8213 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8214 bp->link_params.ext_phy_config =
8216 dev_info.port_hw_config[port].external_phy_config);
8217 /* BCM8727_NOC => BCM8727 no over current */
8218 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8219 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8220 bp->link_params.ext_phy_config &=
8221 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8222 bp->link_params.ext_phy_config |=
8223 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8224 bp->link_params.feature_config_flags |=
8225 FEATURE_CONFIG_BCM8727_NOC;
8228 bp->link_params.speed_cap_mask =
8230 dev_info.port_hw_config[port].speed_capability_mask);
8232 bp->port.link_config =
8233 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8235 /* Get the 4 lanes xgxs config rx and tx */
8236 for (i = 0; i < 2; i++) {
8238 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8239 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8240 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8243 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8244 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8245 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8248 /* If the device is capable of WoL, set the default state according
8251 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8252 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8253 (config & PORT_FEATURE_WOL_ENABLED));
8255 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8256 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8257 bp->link_params.lane_config,
8258 bp->link_params.ext_phy_config,
8259 bp->link_params.speed_cap_mask, bp->port.link_config);
8261 bp->link_params.switch_cfg |= (bp->port.link_config &
8262 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8263 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8265 bnx2x_link_settings_requested(bp);
8267 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8268 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8269 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8270 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8271 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8272 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8273 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8274 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8275 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8276 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8279 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8281 int func = BP_FUNC(bp);
8285 bnx2x_get_common_hwinfo(bp);
8289 if (CHIP_IS_E1H(bp)) {
8291 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8293 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8294 FUNC_MF_CFG_E1HOV_TAG_MASK);
8295 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8299 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8301 func, bp->e1hov, bp->e1hov);
8303 BNX2X_DEV_INFO("single function mode\n");
8305 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8306 " aborting\n", func);
8312 if (!BP_NOMCP(bp)) {
8313 bnx2x_get_port_hwinfo(bp);
8315 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8316 DRV_MSG_SEQ_NUMBER_MASK);
8317 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8321 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8322 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8323 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8324 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8325 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8326 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8327 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8328 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8329 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8330 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8331 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8333 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8341 /* only supposed to happen on emulation/FPGA */
8342 BNX2X_ERR("warning random MAC workaround active\n");
8343 random_ether_addr(bp->dev->dev_addr);
8344 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8350 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8352 int func = BP_FUNC(bp);
8356 /* Disable interrupt handling until HW is initialized */
8357 atomic_set(&bp->intr_sem, 1);
8358 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8360 mutex_init(&bp->port.phy_mutex);
8362 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8363 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8365 rc = bnx2x_get_hwinfo(bp);
8367 /* need to reset chip if undi was active */
8369 bnx2x_undi_unload(bp);
8371 if (CHIP_REV_IS_FPGA(bp))
8372 printk(KERN_ERR PFX "FPGA detected\n");
8374 if (BP_NOMCP(bp) && (func == 0))
8376 "MCP disabled, must load devices in order!\n");
8378 /* Set multi queue mode */
8379 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8380 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8382 "Multi disabled since int_mode requested is not MSI-X\n");
8383 multi_mode = ETH_RSS_MODE_DISABLED;
8385 bp->multi_mode = multi_mode;
8390 bp->flags &= ~TPA_ENABLE_FLAG;
8391 bp->dev->features &= ~NETIF_F_LRO;
8393 bp->flags |= TPA_ENABLE_FLAG;
8394 bp->dev->features |= NETIF_F_LRO;
8399 bp->tx_ring_size = MAX_TX_AVAIL;
8400 bp->rx_ring_size = MAX_RX_AVAIL;
8407 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8408 bp->current_interval = (poll ? poll : timer_interval);
8410 init_timer(&bp->timer);
8411 bp->timer.expires = jiffies + bp->current_interval;
8412 bp->timer.data = (unsigned long) bp;
8413 bp->timer.function = bnx2x_timer;
8419 * ethtool service functions
8422 /* All ethtool functions called with rtnl_lock */
8424 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8426 struct bnx2x *bp = netdev_priv(dev);
8428 cmd->supported = bp->port.supported;
8429 cmd->advertising = bp->port.advertising;
8431 if (netif_carrier_ok(dev)) {
8432 cmd->speed = bp->link_vars.line_speed;
8433 cmd->duplex = bp->link_vars.duplex;
8435 cmd->speed = bp->link_params.req_line_speed;
8436 cmd->duplex = bp->link_params.req_duplex;
8441 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8442 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8443 if (vn_max_rate < cmd->speed)
8444 cmd->speed = vn_max_rate;
8447 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8449 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8451 switch (ext_phy_type) {
8452 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8453 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8454 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8455 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8456 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8457 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8458 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8459 cmd->port = PORT_FIBRE;
8462 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8463 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8464 cmd->port = PORT_TP;
8467 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8468 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8469 bp->link_params.ext_phy_config);
8473 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8474 bp->link_params.ext_phy_config);
8478 cmd->port = PORT_TP;
8480 cmd->phy_address = bp->port.phy_addr;
8481 cmd->transceiver = XCVR_INTERNAL;
8483 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8484 cmd->autoneg = AUTONEG_ENABLE;
8486 cmd->autoneg = AUTONEG_DISABLE;
8491 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8492 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8493 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8494 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8495 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8496 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8497 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8502 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8504 struct bnx2x *bp = netdev_priv(dev);
8510 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8511 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8512 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8513 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8514 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8515 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8516 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8518 if (cmd->autoneg == AUTONEG_ENABLE) {
8519 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8520 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8524 /* advertise the requested speed and duplex if supported */
8525 cmd->advertising &= bp->port.supported;
8527 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8528 bp->link_params.req_duplex = DUPLEX_FULL;
8529 bp->port.advertising |= (ADVERTISED_Autoneg |
8532 } else { /* forced speed */
8533 /* advertise the requested speed and duplex if supported */
8534 switch (cmd->speed) {
8536 if (cmd->duplex == DUPLEX_FULL) {
8537 if (!(bp->port.supported &
8538 SUPPORTED_10baseT_Full)) {
8540 "10M full not supported\n");
8544 advertising = (ADVERTISED_10baseT_Full |
8547 if (!(bp->port.supported &
8548 SUPPORTED_10baseT_Half)) {
8550 "10M half not supported\n");
8554 advertising = (ADVERTISED_10baseT_Half |
8560 if (cmd->duplex == DUPLEX_FULL) {
8561 if (!(bp->port.supported &
8562 SUPPORTED_100baseT_Full)) {
8564 "100M full not supported\n");
8568 advertising = (ADVERTISED_100baseT_Full |
8571 if (!(bp->port.supported &
8572 SUPPORTED_100baseT_Half)) {
8574 "100M half not supported\n");
8578 advertising = (ADVERTISED_100baseT_Half |
8584 if (cmd->duplex != DUPLEX_FULL) {
8585 DP(NETIF_MSG_LINK, "1G half not supported\n");
8589 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8590 DP(NETIF_MSG_LINK, "1G full not supported\n");
8594 advertising = (ADVERTISED_1000baseT_Full |
8599 if (cmd->duplex != DUPLEX_FULL) {
8601 "2.5G half not supported\n");
8605 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8607 "2.5G full not supported\n");
8611 advertising = (ADVERTISED_2500baseX_Full |
8616 if (cmd->duplex != DUPLEX_FULL) {
8617 DP(NETIF_MSG_LINK, "10G half not supported\n");
8621 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8622 DP(NETIF_MSG_LINK, "10G full not supported\n");
8626 advertising = (ADVERTISED_10000baseT_Full |
8631 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8635 bp->link_params.req_line_speed = cmd->speed;
8636 bp->link_params.req_duplex = cmd->duplex;
8637 bp->port.advertising = advertising;
8640 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8641 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8642 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8643 bp->port.advertising);
8645 if (netif_running(dev)) {
8646 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8653 #define PHY_FW_VER_LEN 10
8655 static void bnx2x_get_drvinfo(struct net_device *dev,
8656 struct ethtool_drvinfo *info)
8658 struct bnx2x *bp = netdev_priv(dev);
8659 u8 phy_fw_ver[PHY_FW_VER_LEN];
8661 strcpy(info->driver, DRV_MODULE_NAME);
8662 strcpy(info->version, DRV_MODULE_VERSION);
8664 phy_fw_ver[0] = '\0';
8666 bnx2x_acquire_phy_lock(bp);
8667 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8668 (bp->state != BNX2X_STATE_CLOSED),
8669 phy_fw_ver, PHY_FW_VER_LEN);
8670 bnx2x_release_phy_lock(bp);
8673 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8674 (bp->common.bc_ver & 0xff0000) >> 16,
8675 (bp->common.bc_ver & 0xff00) >> 8,
8676 (bp->common.bc_ver & 0xff),
8677 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8678 strcpy(info->bus_info, pci_name(bp->pdev));
8679 info->n_stats = BNX2X_NUM_STATS;
8680 info->testinfo_len = BNX2X_NUM_TESTS;
8681 info->eedump_len = bp->common.flash_size;
8682 info->regdump_len = 0;
8685 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8686 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8688 static int bnx2x_get_regs_len(struct net_device *dev)
8690 static u32 regdump_len;
8691 struct bnx2x *bp = netdev_priv(dev);
8697 if (CHIP_IS_E1(bp)) {
8698 for (i = 0; i < REGS_COUNT; i++)
8699 if (IS_E1_ONLINE(reg_addrs[i].info))
8700 regdump_len += reg_addrs[i].size;
8702 for (i = 0; i < WREGS_COUNT_E1; i++)
8703 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8704 regdump_len += wreg_addrs_e1[i].size *
8705 (1 + wreg_addrs_e1[i].read_regs_count);
8708 for (i = 0; i < REGS_COUNT; i++)
8709 if (IS_E1H_ONLINE(reg_addrs[i].info))
8710 regdump_len += reg_addrs[i].size;
8712 for (i = 0; i < WREGS_COUNT_E1H; i++)
8713 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8714 regdump_len += wreg_addrs_e1h[i].size *
8715 (1 + wreg_addrs_e1h[i].read_regs_count);
8718 regdump_len += sizeof(struct dump_hdr);
8723 static void bnx2x_get_regs(struct net_device *dev,
8724 struct ethtool_regs *regs, void *_p)
8727 struct bnx2x *bp = netdev_priv(dev);
8728 struct dump_hdr dump_hdr = {0};
8731 memset(p, 0, regs->len);
8733 if (!netif_running(bp->dev))
8736 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8737 dump_hdr.dump_sign = dump_sign_all;
8738 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8739 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8740 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8741 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8742 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8744 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8745 p += dump_hdr.hdr_size + 1;
8747 if (CHIP_IS_E1(bp)) {
8748 for (i = 0; i < REGS_COUNT; i++)
8749 if (IS_E1_ONLINE(reg_addrs[i].info))
8750 for (j = 0; j < reg_addrs[i].size; j++)
8752 reg_addrs[i].addr + j*4);
8755 for (i = 0; i < REGS_COUNT; i++)
8756 if (IS_E1H_ONLINE(reg_addrs[i].info))
8757 for (j = 0; j < reg_addrs[i].size; j++)
8759 reg_addrs[i].addr + j*4);
8763 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8765 struct bnx2x *bp = netdev_priv(dev);
8767 if (bp->flags & NO_WOL_FLAG) {
8771 wol->supported = WAKE_MAGIC;
8773 wol->wolopts = WAKE_MAGIC;
8777 memset(&wol->sopass, 0, sizeof(wol->sopass));
8780 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8782 struct bnx2x *bp = netdev_priv(dev);
8784 if (wol->wolopts & ~WAKE_MAGIC)
8787 if (wol->wolopts & WAKE_MAGIC) {
8788 if (bp->flags & NO_WOL_FLAG)
8798 static u32 bnx2x_get_msglevel(struct net_device *dev)
8800 struct bnx2x *bp = netdev_priv(dev);
8802 return bp->msglevel;
8805 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8807 struct bnx2x *bp = netdev_priv(dev);
8809 if (capable(CAP_NET_ADMIN))
8810 bp->msglevel = level;
8813 static int bnx2x_nway_reset(struct net_device *dev)
8815 struct bnx2x *bp = netdev_priv(dev);
8820 if (netif_running(dev)) {
8821 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8829 bnx2x_get_link(struct net_device *dev)
8831 struct bnx2x *bp = netdev_priv(dev);
8833 return bp->link_vars.link_up;
8836 static int bnx2x_get_eeprom_len(struct net_device *dev)
8838 struct bnx2x *bp = netdev_priv(dev);
8840 return bp->common.flash_size;
8843 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8845 int port = BP_PORT(bp);
8849 /* adjust timeout for emulation/FPGA */
8850 count = NVRAM_TIMEOUT_COUNT;
8851 if (CHIP_REV_IS_SLOW(bp))
8854 /* request access to nvram interface */
8855 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8856 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8858 for (i = 0; i < count*10; i++) {
8859 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8860 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8866 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8867 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8874 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8876 int port = BP_PORT(bp);
8880 /* adjust timeout for emulation/FPGA */
8881 count = NVRAM_TIMEOUT_COUNT;
8882 if (CHIP_REV_IS_SLOW(bp))
8885 /* relinquish nvram interface */
8886 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8887 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8889 for (i = 0; i < count*10; i++) {
8890 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8891 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8897 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8898 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8905 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8909 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8911 /* enable both bits, even on read */
8912 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8913 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8914 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8917 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8921 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8923 /* disable both bits, even after read */
8924 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8925 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8926 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8929 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8935 /* build the command word */
8936 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8938 /* need to clear DONE bit separately */
8939 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8941 /* address of the NVRAM to read from */
8942 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8943 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8945 /* issue a read command */
8946 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8948 /* adjust timeout for emulation/FPGA */
8949 count = NVRAM_TIMEOUT_COUNT;
8950 if (CHIP_REV_IS_SLOW(bp))
8953 /* wait for completion */
8956 for (i = 0; i < count; i++) {
8958 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8960 if (val & MCPR_NVM_COMMAND_DONE) {
8961 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8962 /* we read nvram data in cpu order
8963 * but ethtool sees it as an array of bytes
8964 * converting to big-endian will do the work */
8965 *ret_val = cpu_to_be32(val);
8974 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8981 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8983 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8988 if (offset + buf_size > bp->common.flash_size) {
8989 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8990 " buf_size (0x%x) > flash_size (0x%x)\n",
8991 offset, buf_size, bp->common.flash_size);
8995 /* request access to nvram interface */
8996 rc = bnx2x_acquire_nvram_lock(bp);
9000 /* enable access to nvram interface */
9001 bnx2x_enable_nvram_access(bp);
9003 /* read the first word(s) */
9004 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9005 while ((buf_size > sizeof(u32)) && (rc == 0)) {
9006 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9007 memcpy(ret_buf, &val, 4);
9009 /* advance to the next dword */
9010 offset += sizeof(u32);
9011 ret_buf += sizeof(u32);
9012 buf_size -= sizeof(u32);
9017 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9018 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
9019 memcpy(ret_buf, &val, 4);
9022 /* disable access to nvram interface */
9023 bnx2x_disable_nvram_access(bp);
9024 bnx2x_release_nvram_lock(bp);
9029 static int bnx2x_get_eeprom(struct net_device *dev,
9030 struct ethtool_eeprom *eeprom, u8 *eebuf)
9032 struct bnx2x *bp = netdev_priv(dev);
9035 if (!netif_running(dev))
9038 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9039 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9040 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9041 eeprom->len, eeprom->len);
9043 /* parameters already validated in ethtool_get_eeprom */
9045 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
9050 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
9055 /* build the command word */
9056 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
9058 /* need to clear DONE bit separately */
9059 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
9061 /* write the data */
9062 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
9064 /* address of the NVRAM to write to */
9065 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
9066 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9068 /* issue the write command */
9069 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9071 /* adjust timeout for emulation/FPGA */
9072 count = NVRAM_TIMEOUT_COUNT;
9073 if (CHIP_REV_IS_SLOW(bp))
9076 /* wait for completion */
9078 for (i = 0; i < count; i++) {
9080 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9081 if (val & MCPR_NVM_COMMAND_DONE) {
9090 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9092 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9100 if (offset + buf_size > bp->common.flash_size) {
9101 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9102 " buf_size (0x%x) > flash_size (0x%x)\n",
9103 offset, buf_size, bp->common.flash_size);
9107 /* request access to nvram interface */
9108 rc = bnx2x_acquire_nvram_lock(bp);
9112 /* enable access to nvram interface */
9113 bnx2x_enable_nvram_access(bp);
9115 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9116 align_offset = (offset & ~0x03);
9117 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9120 val &= ~(0xff << BYTE_OFFSET(offset));
9121 val |= (*data_buf << BYTE_OFFSET(offset));
9123 /* nvram data is returned as an array of bytes
9124 * convert it back to cpu order */
9125 val = be32_to_cpu(val);
9127 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9131 /* disable access to nvram interface */
9132 bnx2x_disable_nvram_access(bp);
9133 bnx2x_release_nvram_lock(bp);
9138 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9146 if (buf_size == 1) /* ethtool */
9147 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9149 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9151 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9156 if (offset + buf_size > bp->common.flash_size) {
9157 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9158 " buf_size (0x%x) > flash_size (0x%x)\n",
9159 offset, buf_size, bp->common.flash_size);
9163 /* request access to nvram interface */
9164 rc = bnx2x_acquire_nvram_lock(bp);
9168 /* enable access to nvram interface */
9169 bnx2x_enable_nvram_access(bp);
9172 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9173 while ((written_so_far < buf_size) && (rc == 0)) {
9174 if (written_so_far == (buf_size - sizeof(u32)))
9175 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9176 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9177 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9178 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9179 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9181 memcpy(&val, data_buf, 4);
9183 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9185 /* advance to the next dword */
9186 offset += sizeof(u32);
9187 data_buf += sizeof(u32);
9188 written_so_far += sizeof(u32);
9192 /* disable access to nvram interface */
9193 bnx2x_disable_nvram_access(bp);
9194 bnx2x_release_nvram_lock(bp);
9199 static int bnx2x_set_eeprom(struct net_device *dev,
9200 struct ethtool_eeprom *eeprom, u8 *eebuf)
9202 struct bnx2x *bp = netdev_priv(dev);
9205 if (!netif_running(dev))
9208 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9209 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9210 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9211 eeprom->len, eeprom->len);
9213 /* parameters already validated in ethtool_set_eeprom */
9215 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
9216 if (eeprom->magic == 0x00504859)
9219 bnx2x_acquire_phy_lock(bp);
9220 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9221 bp->link_params.ext_phy_config,
9222 (bp->state != BNX2X_STATE_CLOSED),
9223 eebuf, eeprom->len);
9224 if ((bp->state == BNX2X_STATE_OPEN) ||
9225 (bp->state == BNX2X_STATE_DISABLED)) {
9226 rc |= bnx2x_link_reset(&bp->link_params,
9228 rc |= bnx2x_phy_init(&bp->link_params,
9231 bnx2x_release_phy_lock(bp);
9233 } else /* Only the PMF can access the PHY */
9236 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9241 static int bnx2x_get_coalesce(struct net_device *dev,
9242 struct ethtool_coalesce *coal)
9244 struct bnx2x *bp = netdev_priv(dev);
9246 memset(coal, 0, sizeof(struct ethtool_coalesce));
9248 coal->rx_coalesce_usecs = bp->rx_ticks;
9249 coal->tx_coalesce_usecs = bp->tx_ticks;
9254 #define BNX2X_MAX_COALES_TOUT (0xf0*12) /* Maximal coalescing timeout in us */
9255 static int bnx2x_set_coalesce(struct net_device *dev,
9256 struct ethtool_coalesce *coal)
9258 struct bnx2x *bp = netdev_priv(dev);
9260 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9261 if (bp->rx_ticks > BNX2X_MAX_COALES_TOUT)
9262 bp->rx_ticks = BNX2X_MAX_COALES_TOUT;
9264 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9265 if (bp->tx_ticks > BNX2X_MAX_COALES_TOUT)
9266 bp->tx_ticks = BNX2X_MAX_COALES_TOUT;
9268 if (netif_running(dev))
9269 bnx2x_update_coalesce(bp);
9274 static void bnx2x_get_ringparam(struct net_device *dev,
9275 struct ethtool_ringparam *ering)
9277 struct bnx2x *bp = netdev_priv(dev);
9279 ering->rx_max_pending = MAX_RX_AVAIL;
9280 ering->rx_mini_max_pending = 0;
9281 ering->rx_jumbo_max_pending = 0;
9283 ering->rx_pending = bp->rx_ring_size;
9284 ering->rx_mini_pending = 0;
9285 ering->rx_jumbo_pending = 0;
9287 ering->tx_max_pending = MAX_TX_AVAIL;
9288 ering->tx_pending = bp->tx_ring_size;
9291 static int bnx2x_set_ringparam(struct net_device *dev,
9292 struct ethtool_ringparam *ering)
9294 struct bnx2x *bp = netdev_priv(dev);
9297 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9298 (ering->tx_pending > MAX_TX_AVAIL) ||
9299 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9302 bp->rx_ring_size = ering->rx_pending;
9303 bp->tx_ring_size = ering->tx_pending;
9305 if (netif_running(dev)) {
9306 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9307 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9313 static void bnx2x_get_pauseparam(struct net_device *dev,
9314 struct ethtool_pauseparam *epause)
9316 struct bnx2x *bp = netdev_priv(dev);
9318 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9319 BNX2X_FLOW_CTRL_AUTO) &&
9320 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9322 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9323 BNX2X_FLOW_CTRL_RX);
9324 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9325 BNX2X_FLOW_CTRL_TX);
9327 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9328 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9329 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9332 static int bnx2x_set_pauseparam(struct net_device *dev,
9333 struct ethtool_pauseparam *epause)
9335 struct bnx2x *bp = netdev_priv(dev);
9340 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9341 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9342 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9344 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9346 if (epause->rx_pause)
9347 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9349 if (epause->tx_pause)
9350 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9352 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9353 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9355 if (epause->autoneg) {
9356 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9357 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9361 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9362 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9366 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9368 if (netif_running(dev)) {
9369 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9376 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9378 struct bnx2x *bp = netdev_priv(dev);
9382 /* TPA requires Rx CSUM offloading */
9383 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9384 if (!(dev->features & NETIF_F_LRO)) {
9385 dev->features |= NETIF_F_LRO;
9386 bp->flags |= TPA_ENABLE_FLAG;
9390 } else if (dev->features & NETIF_F_LRO) {
9391 dev->features &= ~NETIF_F_LRO;
9392 bp->flags &= ~TPA_ENABLE_FLAG;
9396 if (changed && netif_running(dev)) {
9397 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9398 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9404 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9406 struct bnx2x *bp = netdev_priv(dev);
9411 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9413 struct bnx2x *bp = netdev_priv(dev);
9418 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9419 TPA'ed packets will be discarded due to wrong TCP CSUM */
9421 u32 flags = ethtool_op_get_flags(dev);
9423 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9429 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9432 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9433 dev->features |= NETIF_F_TSO6;
9435 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9436 dev->features &= ~NETIF_F_TSO6;
9442 static const struct {
9443 char string[ETH_GSTRING_LEN];
9444 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9445 { "register_test (offline)" },
9446 { "memory_test (offline)" },
9447 { "loopback_test (offline)" },
9448 { "nvram_test (online)" },
9449 { "interrupt_test (online)" },
9450 { "link_test (online)" },
9451 { "idle check (online)" }
9454 static int bnx2x_self_test_count(struct net_device *dev)
9456 return BNX2X_NUM_TESTS;
9459 static int bnx2x_test_registers(struct bnx2x *bp)
9461 int idx, i, rc = -ENODEV;
9463 int port = BP_PORT(bp);
9464 static const struct {
9469 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9470 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9471 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9472 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9473 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9474 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9475 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9476 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9477 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9478 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9479 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9480 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9481 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9482 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9483 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9484 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9485 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9486 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9487 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9488 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9489 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9490 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9491 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9492 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9493 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9494 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9495 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9496 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9497 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9498 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9499 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9500 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9501 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9502 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9503 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9504 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9505 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9507 { 0xffffffff, 0, 0x00000000 }
9510 if (!netif_running(bp->dev))
9513 /* Repeat the test twice:
9514 First by writing 0x00000000, second by writing 0xffffffff */
9515 for (idx = 0; idx < 2; idx++) {
9522 wr_val = 0xffffffff;
9526 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9527 u32 offset, mask, save_val, val;
9529 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9530 mask = reg_tbl[i].mask;
9532 save_val = REG_RD(bp, offset);
9534 REG_WR(bp, offset, wr_val);
9535 val = REG_RD(bp, offset);
9537 /* Restore the original register's value */
9538 REG_WR(bp, offset, save_val);
9540 /* verify that value is as expected value */
9541 if ((val & mask) != (wr_val & mask))
9552 static int bnx2x_test_memory(struct bnx2x *bp)
9554 int i, j, rc = -ENODEV;
9556 static const struct {
9560 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9561 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9562 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9563 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9564 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9565 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9566 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9570 static const struct {
9576 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9577 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9578 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9579 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9580 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9581 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9583 { NULL, 0xffffffff, 0, 0 }
9586 if (!netif_running(bp->dev))
9589 /* Go through all the memories */
9590 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9591 for (j = 0; j < mem_tbl[i].size; j++)
9592 REG_RD(bp, mem_tbl[i].offset + j*4);
9594 /* Check the parity status */
9595 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9596 val = REG_RD(bp, prty_tbl[i].offset);
9597 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9598 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9600 "%s is 0x%x\n", prty_tbl[i].name, val);
9611 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9616 while (bnx2x_link_test(bp) && cnt--)
9620 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9622 unsigned int pkt_size, num_pkts, i;
9623 struct sk_buff *skb;
9624 unsigned char *packet;
9625 struct bnx2x_fastpath *fp_rx = &bp->fp[0];
9626 struct bnx2x_fastpath *fp_tx = &bp->fp[bp->num_rx_queues];
9627 u16 tx_start_idx, tx_idx;
9628 u16 rx_start_idx, rx_idx;
9629 u16 pkt_prod, bd_prod;
9630 struct sw_tx_bd *tx_buf;
9631 struct eth_tx_start_bd *tx_start_bd;
9632 struct eth_tx_parse_bd *pbd = NULL;
9634 union eth_rx_cqe *cqe;
9636 struct sw_rx_bd *rx_buf;
9640 /* check the loopback mode */
9641 switch (loopback_mode) {
9642 case BNX2X_PHY_LOOPBACK:
9643 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9646 case BNX2X_MAC_LOOPBACK:
9647 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9648 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9654 /* prepare the loopback packet */
9655 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9656 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9657 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9660 goto test_loopback_exit;
9662 packet = skb_put(skb, pkt_size);
9663 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9664 memset(packet + ETH_ALEN, 0, ETH_ALEN);
9665 memset(packet + 2*ETH_ALEN, 0x77, (ETH_HLEN - 2*ETH_ALEN));
9666 for (i = ETH_HLEN; i < pkt_size; i++)
9667 packet[i] = (unsigned char) (i & 0xff);
9669 /* send the loopback packet */
9671 tx_start_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9672 rx_start_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
9674 pkt_prod = fp_tx->tx_pkt_prod++;
9675 tx_buf = &fp_tx->tx_buf_ring[TX_BD(pkt_prod)];
9676 tx_buf->first_bd = fp_tx->tx_bd_prod;
9680 bd_prod = TX_BD(fp_tx->tx_bd_prod);
9681 tx_start_bd = &fp_tx->tx_desc_ring[bd_prod].start_bd;
9682 mapping = pci_map_single(bp->pdev, skb->data,
9683 skb_headlen(skb), PCI_DMA_TODEVICE);
9684 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9685 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9686 tx_start_bd->nbd = cpu_to_le16(2); /* start + pbd */
9687 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9688 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
9689 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
9690 tx_start_bd->general_data = ((UNICAST_ADDRESS <<
9691 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9693 /* turn on parsing and get a BD */
9694 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
9695 pbd = &fp_tx->tx_desc_ring[bd_prod].parse_bd;
9697 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
9701 fp_tx->tx_db.data.prod += 2;
9703 DOORBELL(bp, fp_tx->index - bp->num_rx_queues, fp_tx->tx_db.raw);
9708 fp_tx->tx_bd_prod += 2; /* start + pbd */
9709 bp->dev->trans_start = jiffies;
9713 tx_idx = le16_to_cpu(*fp_tx->tx_cons_sb);
9714 if (tx_idx != tx_start_idx + num_pkts)
9715 goto test_loopback_exit;
9717 rx_idx = le16_to_cpu(*fp_rx->rx_cons_sb);
9718 if (rx_idx != rx_start_idx + num_pkts)
9719 goto test_loopback_exit;
9721 cqe = &fp_rx->rx_comp_ring[RCQ_BD(fp_rx->rx_comp_cons)];
9722 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9723 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9724 goto test_loopback_rx_exit;
9726 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9727 if (len != pkt_size)
9728 goto test_loopback_rx_exit;
9730 rx_buf = &fp_rx->rx_buf_ring[RX_BD(fp_rx->rx_bd_cons)];
9732 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9733 for (i = ETH_HLEN; i < pkt_size; i++)
9734 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9735 goto test_loopback_rx_exit;
9739 test_loopback_rx_exit:
9741 fp_rx->rx_bd_cons = NEXT_RX_IDX(fp_rx->rx_bd_cons);
9742 fp_rx->rx_bd_prod = NEXT_RX_IDX(fp_rx->rx_bd_prod);
9743 fp_rx->rx_comp_cons = NEXT_RCQ_IDX(fp_rx->rx_comp_cons);
9744 fp_rx->rx_comp_prod = NEXT_RCQ_IDX(fp_rx->rx_comp_prod);
9746 /* Update producers */
9747 bnx2x_update_rx_prod(bp, fp_rx, fp_rx->rx_bd_prod, fp_rx->rx_comp_prod,
9748 fp_rx->rx_sge_prod);
9751 bp->link_params.loopback_mode = LOOPBACK_NONE;
9756 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9760 if (!netif_running(bp->dev))
9761 return BNX2X_LOOPBACK_FAILED;
9763 bnx2x_netif_stop(bp, 1);
9764 bnx2x_acquire_phy_lock(bp);
9766 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9768 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9769 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9772 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9774 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9775 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9778 bnx2x_release_phy_lock(bp);
9779 bnx2x_netif_start(bp);
9784 #define CRC32_RESIDUAL 0xdebb20e3
9786 static int bnx2x_test_nvram(struct bnx2x *bp)
9788 static const struct {
9792 { 0, 0x14 }, /* bootstrap */
9793 { 0x14, 0xec }, /* dir */
9794 { 0x100, 0x350 }, /* manuf_info */
9795 { 0x450, 0xf0 }, /* feature_info */
9796 { 0x640, 0x64 }, /* upgrade_key_info */
9798 { 0x708, 0x70 }, /* manuf_key_info */
9802 __be32 buf[0x350 / 4];
9803 u8 *data = (u8 *)buf;
9807 rc = bnx2x_nvram_read(bp, 0, data, 4);
9809 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9810 goto test_nvram_exit;
9813 magic = be32_to_cpu(buf[0]);
9814 if (magic != 0x669955aa) {
9815 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9817 goto test_nvram_exit;
9820 for (i = 0; nvram_tbl[i].size; i++) {
9822 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9826 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9827 goto test_nvram_exit;
9830 csum = ether_crc_le(nvram_tbl[i].size, data);
9831 if (csum != CRC32_RESIDUAL) {
9833 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9835 goto test_nvram_exit;
9843 static int bnx2x_test_intr(struct bnx2x *bp)
9845 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9848 if (!netif_running(bp->dev))
9851 config->hdr.length = 0;
9853 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9855 config->hdr.offset = BP_FUNC(bp);
9856 config->hdr.client_id = bp->fp->cl_id;
9857 config->hdr.reserved1 = 0;
9859 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9860 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9861 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9863 bp->set_mac_pending++;
9864 for (i = 0; i < 10; i++) {
9865 if (!bp->set_mac_pending)
9867 msleep_interruptible(10);
9876 static void bnx2x_self_test(struct net_device *dev,
9877 struct ethtool_test *etest, u64 *buf)
9879 struct bnx2x *bp = netdev_priv(dev);
9881 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9883 if (!netif_running(dev))
9886 /* offline tests are not supported in MF mode */
9888 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9890 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9891 int port = BP_PORT(bp);
9895 /* save current value of input enable for TX port IF */
9896 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
9897 /* disable input for TX port IF */
9898 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
9900 link_up = bp->link_vars.link_up;
9901 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9902 bnx2x_nic_load(bp, LOAD_DIAG);
9903 /* wait until link state is restored */
9904 bnx2x_wait_for_link(bp, link_up);
9906 if (bnx2x_test_registers(bp) != 0) {
9908 etest->flags |= ETH_TEST_FL_FAILED;
9910 if (bnx2x_test_memory(bp) != 0) {
9912 etest->flags |= ETH_TEST_FL_FAILED;
9914 buf[2] = bnx2x_test_loopback(bp, link_up);
9916 etest->flags |= ETH_TEST_FL_FAILED;
9918 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9920 /* restore input for TX port IF */
9921 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
9923 bnx2x_nic_load(bp, LOAD_NORMAL);
9924 /* wait until link state is restored */
9925 bnx2x_wait_for_link(bp, link_up);
9927 if (bnx2x_test_nvram(bp) != 0) {
9929 etest->flags |= ETH_TEST_FL_FAILED;
9931 if (bnx2x_test_intr(bp) != 0) {
9933 etest->flags |= ETH_TEST_FL_FAILED;
9936 if (bnx2x_link_test(bp) != 0) {
9938 etest->flags |= ETH_TEST_FL_FAILED;
9941 #ifdef BNX2X_EXTRA_DEBUG
9942 bnx2x_panic_dump(bp);
9946 static const struct {
9949 u8 string[ETH_GSTRING_LEN];
9950 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9951 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9952 { Q_STATS_OFFSET32(error_bytes_received_hi),
9953 8, "[%d]: rx_error_bytes" },
9954 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9955 8, "[%d]: rx_ucast_packets" },
9956 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9957 8, "[%d]: rx_mcast_packets" },
9958 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9959 8, "[%d]: rx_bcast_packets" },
9960 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9961 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9962 4, "[%d]: rx_phy_ip_err_discards"},
9963 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9964 4, "[%d]: rx_skb_alloc_discard" },
9965 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9967 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9968 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9969 8, "[%d]: tx_packets" }
9972 static const struct {
9976 #define STATS_FLAGS_PORT 1
9977 #define STATS_FLAGS_FUNC 2
9978 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9979 u8 string[ETH_GSTRING_LEN];
9980 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9981 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9982 8, STATS_FLAGS_BOTH, "rx_bytes" },
9983 { STATS_OFFSET32(error_bytes_received_hi),
9984 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9985 { STATS_OFFSET32(total_unicast_packets_received_hi),
9986 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9987 { STATS_OFFSET32(total_multicast_packets_received_hi),
9988 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9989 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9990 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9991 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9992 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9993 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9994 8, STATS_FLAGS_PORT, "rx_align_errors" },
9995 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9996 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9997 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9998 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9999 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
10000 8, STATS_FLAGS_PORT, "rx_fragments" },
10001 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
10002 8, STATS_FLAGS_PORT, "rx_jabbers" },
10003 { STATS_OFFSET32(no_buff_discard_hi),
10004 8, STATS_FLAGS_BOTH, "rx_discards" },
10005 { STATS_OFFSET32(mac_filter_discard),
10006 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
10007 { STATS_OFFSET32(xxoverflow_discard),
10008 4, STATS_FLAGS_PORT, "rx_fw_discards" },
10009 { STATS_OFFSET32(brb_drop_hi),
10010 8, STATS_FLAGS_PORT, "rx_brb_discard" },
10011 { STATS_OFFSET32(brb_truncate_hi),
10012 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
10013 { STATS_OFFSET32(pause_frames_received_hi),
10014 8, STATS_FLAGS_PORT, "rx_pause_frames" },
10015 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
10016 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
10017 { STATS_OFFSET32(nig_timer_max),
10018 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
10019 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
10020 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
10021 { STATS_OFFSET32(rx_skb_alloc_failed),
10022 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
10023 { STATS_OFFSET32(hw_csum_err),
10024 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
10026 { STATS_OFFSET32(total_bytes_transmitted_hi),
10027 8, STATS_FLAGS_BOTH, "tx_bytes" },
10028 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
10029 8, STATS_FLAGS_PORT, "tx_error_bytes" },
10030 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
10031 8, STATS_FLAGS_BOTH, "tx_packets" },
10032 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
10033 8, STATS_FLAGS_PORT, "tx_mac_errors" },
10034 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
10035 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
10036 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
10037 8, STATS_FLAGS_PORT, "tx_single_collisions" },
10038 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
10039 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
10040 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
10041 8, STATS_FLAGS_PORT, "tx_deferred" },
10042 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
10043 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
10044 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
10045 8, STATS_FLAGS_PORT, "tx_late_collisions" },
10046 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
10047 8, STATS_FLAGS_PORT, "tx_total_collisions" },
10048 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
10049 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
10050 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
10051 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
10052 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
10053 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
10054 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
10055 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
10056 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
10057 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
10058 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
10059 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
10060 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
10061 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
10062 { STATS_OFFSET32(pause_frames_sent_hi),
10063 8, STATS_FLAGS_PORT, "tx_pause_frames" }
10066 #define IS_PORT_STAT(i) \
10067 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
10068 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
10069 #define IS_E1HMF_MODE_STAT(bp) \
10070 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
10072 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
10074 struct bnx2x *bp = netdev_priv(dev);
10077 switch (stringset) {
10079 if (is_multi(bp)) {
10081 for_each_rx_queue(bp, i) {
10082 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10083 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10084 bnx2x_q_stats_arr[j].string, i);
10085 k += BNX2X_NUM_Q_STATS;
10087 if (IS_E1HMF_MODE_STAT(bp))
10089 for (j = 0; j < BNX2X_NUM_STATS; j++)
10090 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10091 bnx2x_stats_arr[j].string);
10093 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10094 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10096 strcpy(buf + j*ETH_GSTRING_LEN,
10097 bnx2x_stats_arr[i].string);
10104 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10109 static int bnx2x_get_stats_count(struct net_device *dev)
10111 struct bnx2x *bp = netdev_priv(dev);
10114 if (is_multi(bp)) {
10115 num_stats = BNX2X_NUM_Q_STATS * bp->num_rx_queues;
10116 if (!IS_E1HMF_MODE_STAT(bp))
10117 num_stats += BNX2X_NUM_STATS;
10119 if (IS_E1HMF_MODE_STAT(bp)) {
10121 for (i = 0; i < BNX2X_NUM_STATS; i++)
10122 if (IS_FUNC_STAT(i))
10125 num_stats = BNX2X_NUM_STATS;
10131 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10132 struct ethtool_stats *stats, u64 *buf)
10134 struct bnx2x *bp = netdev_priv(dev);
10135 u32 *hw_stats, *offset;
10138 if (is_multi(bp)) {
10140 for_each_rx_queue(bp, i) {
10141 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10142 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10143 if (bnx2x_q_stats_arr[j].size == 0) {
10144 /* skip this counter */
10148 offset = (hw_stats +
10149 bnx2x_q_stats_arr[j].offset);
10150 if (bnx2x_q_stats_arr[j].size == 4) {
10151 /* 4-byte counter */
10152 buf[k + j] = (u64) *offset;
10155 /* 8-byte counter */
10156 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10158 k += BNX2X_NUM_Q_STATS;
10160 if (IS_E1HMF_MODE_STAT(bp))
10162 hw_stats = (u32 *)&bp->eth_stats;
10163 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10164 if (bnx2x_stats_arr[j].size == 0) {
10165 /* skip this counter */
10169 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10170 if (bnx2x_stats_arr[j].size == 4) {
10171 /* 4-byte counter */
10172 buf[k + j] = (u64) *offset;
10175 /* 8-byte counter */
10176 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10179 hw_stats = (u32 *)&bp->eth_stats;
10180 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10181 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10183 if (bnx2x_stats_arr[i].size == 0) {
10184 /* skip this counter */
10189 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10190 if (bnx2x_stats_arr[i].size == 4) {
10191 /* 4-byte counter */
10192 buf[j] = (u64) *offset;
10196 /* 8-byte counter */
10197 buf[j] = HILO_U64(*offset, *(offset + 1));
10203 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10205 struct bnx2x *bp = netdev_priv(dev);
10206 int port = BP_PORT(bp);
10209 if (!netif_running(dev))
10218 for (i = 0; i < (data * 2); i++) {
10220 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10221 bp->link_params.hw_led_mode,
10222 bp->link_params.chip_id);
10224 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10225 bp->link_params.hw_led_mode,
10226 bp->link_params.chip_id);
10228 msleep_interruptible(500);
10229 if (signal_pending(current))
10233 if (bp->link_vars.link_up)
10234 bnx2x_set_led(bp, port, LED_MODE_OPER,
10235 bp->link_vars.line_speed,
10236 bp->link_params.hw_led_mode,
10237 bp->link_params.chip_id);
10242 static struct ethtool_ops bnx2x_ethtool_ops = {
10243 .get_settings = bnx2x_get_settings,
10244 .set_settings = bnx2x_set_settings,
10245 .get_drvinfo = bnx2x_get_drvinfo,
10246 .get_regs_len = bnx2x_get_regs_len,
10247 .get_regs = bnx2x_get_regs,
10248 .get_wol = bnx2x_get_wol,
10249 .set_wol = bnx2x_set_wol,
10250 .get_msglevel = bnx2x_get_msglevel,
10251 .set_msglevel = bnx2x_set_msglevel,
10252 .nway_reset = bnx2x_nway_reset,
10253 .get_link = bnx2x_get_link,
10254 .get_eeprom_len = bnx2x_get_eeprom_len,
10255 .get_eeprom = bnx2x_get_eeprom,
10256 .set_eeprom = bnx2x_set_eeprom,
10257 .get_coalesce = bnx2x_get_coalesce,
10258 .set_coalesce = bnx2x_set_coalesce,
10259 .get_ringparam = bnx2x_get_ringparam,
10260 .set_ringparam = bnx2x_set_ringparam,
10261 .get_pauseparam = bnx2x_get_pauseparam,
10262 .set_pauseparam = bnx2x_set_pauseparam,
10263 .get_rx_csum = bnx2x_get_rx_csum,
10264 .set_rx_csum = bnx2x_set_rx_csum,
10265 .get_tx_csum = ethtool_op_get_tx_csum,
10266 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10267 .set_flags = bnx2x_set_flags,
10268 .get_flags = ethtool_op_get_flags,
10269 .get_sg = ethtool_op_get_sg,
10270 .set_sg = ethtool_op_set_sg,
10271 .get_tso = ethtool_op_get_tso,
10272 .set_tso = bnx2x_set_tso,
10273 .self_test_count = bnx2x_self_test_count,
10274 .self_test = bnx2x_self_test,
10275 .get_strings = bnx2x_get_strings,
10276 .phys_id = bnx2x_phys_id,
10277 .get_stats_count = bnx2x_get_stats_count,
10278 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10281 /* end of ethtool_ops */
10283 /****************************************************************************
10284 * General service functions
10285 ****************************************************************************/
10287 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10291 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10295 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10296 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10297 PCI_PM_CTRL_PME_STATUS));
10299 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10300 /* delay required during transition out of D3hot */
10305 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10309 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10311 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10314 /* No more memory access after this point until
10315 * device is brought back to D0.
10325 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10329 /* Tell compiler that status block fields can change */
10331 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10332 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10334 return (fp->rx_comp_cons != rx_cons_sb);
10338 * net_device service functions
10341 static int bnx2x_poll(struct napi_struct *napi, int budget)
10343 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10345 struct bnx2x *bp = fp->bp;
10348 #ifdef BNX2X_STOP_ON_ERROR
10349 if (unlikely(bp->panic))
10353 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10354 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10356 bnx2x_update_fpsb_idx(fp);
10358 if (bnx2x_has_rx_work(fp)) {
10359 work_done = bnx2x_rx_int(fp, budget);
10361 /* must not complete if we consumed full budget */
10362 if (work_done >= budget)
10366 /* bnx2x_has_rx_work() reads the status block, thus we need to
10367 * ensure that status block indices have been actually read
10368 * (bnx2x_update_fpsb_idx) prior to this check (bnx2x_has_rx_work)
10369 * so that we won't write the "newer" value of the status block to IGU
10370 * (if there was a DMA right after bnx2x_has_rx_work and
10371 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10372 * may be postponed to right before bnx2x_ack_sb). In this case
10373 * there will never be another interrupt until there is another update
10374 * of the status block, while there is still unhandled work.
10378 if (!bnx2x_has_rx_work(fp)) {
10379 #ifdef BNX2X_STOP_ON_ERROR
10382 napi_complete(napi);
10384 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10385 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10386 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10387 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10395 /* we split the first BD into headers and data BDs
10396 * to ease the pain of our fellow microcode engineers
10397 * we use one mapping for both BDs
10398 * So far this has only been observed to happen
10399 * in Other Operating Systems(TM)
10401 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10402 struct bnx2x_fastpath *fp,
10403 struct sw_tx_bd *tx_buf,
10404 struct eth_tx_start_bd **tx_bd, u16 hlen,
10405 u16 bd_prod, int nbd)
10407 struct eth_tx_start_bd *h_tx_bd = *tx_bd;
10408 struct eth_tx_bd *d_tx_bd;
10409 dma_addr_t mapping;
10410 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10412 /* first fix first BD */
10413 h_tx_bd->nbd = cpu_to_le16(nbd);
10414 h_tx_bd->nbytes = cpu_to_le16(hlen);
10416 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10417 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10418 h_tx_bd->addr_lo, h_tx_bd->nbd);
10420 /* now get a new data BD
10421 * (after the pbd) and fill it */
10422 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10423 d_tx_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10425 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10426 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10428 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10429 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10430 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10432 /* this marks the BD as one that has no individual mapping */
10433 tx_buf->flags |= BNX2X_TSO_SPLIT_BD;
10435 DP(NETIF_MSG_TX_QUEUED,
10436 "TSO split data size is %d (%x:%x)\n",
10437 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10440 *tx_bd = (struct eth_tx_start_bd *)d_tx_bd;
10445 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10448 csum = (u16) ~csum_fold(csum_sub(csum,
10449 csum_partial(t_header - fix, fix, 0)));
10452 csum = (u16) ~csum_fold(csum_add(csum,
10453 csum_partial(t_header, -fix, 0)));
10455 return swab16(csum);
10458 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10462 if (skb->ip_summed != CHECKSUM_PARTIAL)
10466 if (skb->protocol == htons(ETH_P_IPV6)) {
10468 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10469 rc |= XMIT_CSUM_TCP;
10473 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10474 rc |= XMIT_CSUM_TCP;
10478 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10481 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10487 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10488 /* check if packet requires linearization (packet is too fragmented)
10489 no need to check fragmentation if page size > 8K (there will be no
10490 violation to FW restrictions) */
10491 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10496 int first_bd_sz = 0;
10498 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10499 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10501 if (xmit_type & XMIT_GSO) {
10502 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10503 /* Check if LSO packet needs to be copied:
10504 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10505 int wnd_size = MAX_FETCH_BD - 3;
10506 /* Number of windows to check */
10507 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10512 /* Headers length */
10513 hlen = (int)(skb_transport_header(skb) - skb->data) +
10516 /* Amount of data (w/o headers) on linear part of SKB*/
10517 first_bd_sz = skb_headlen(skb) - hlen;
10519 wnd_sum = first_bd_sz;
10521 /* Calculate the first sum - it's special */
10522 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10524 skb_shinfo(skb)->frags[frag_idx].size;
10526 /* If there was data on linear skb data - check it */
10527 if (first_bd_sz > 0) {
10528 if (unlikely(wnd_sum < lso_mss)) {
10533 wnd_sum -= first_bd_sz;
10536 /* Others are easier: run through the frag list and
10537 check all windows */
10538 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10540 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10542 if (unlikely(wnd_sum < lso_mss)) {
10547 skb_shinfo(skb)->frags[wnd_idx].size;
10550 /* in non-LSO too fragmented packet should always
10557 if (unlikely(to_copy))
10558 DP(NETIF_MSG_TX_QUEUED,
10559 "Linearization IS REQUIRED for %s packet. "
10560 "num_frags %d hlen %d first_bd_sz %d\n",
10561 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10562 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10568 /* called with netif_tx_lock
10569 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10570 * netif_wake_queue()
10572 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10574 struct bnx2x *bp = netdev_priv(dev);
10575 struct bnx2x_fastpath *fp, *fp_stat;
10576 struct netdev_queue *txq;
10577 struct sw_tx_bd *tx_buf;
10578 struct eth_tx_start_bd *tx_start_bd;
10579 struct eth_tx_bd *tx_data_bd, *total_pkt_bd = NULL;
10580 struct eth_tx_parse_bd *pbd = NULL;
10581 u16 pkt_prod, bd_prod;
10583 dma_addr_t mapping;
10584 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10587 __le16 pkt_size = 0;
10589 #ifdef BNX2X_STOP_ON_ERROR
10590 if (unlikely(bp->panic))
10591 return NETDEV_TX_BUSY;
10594 fp_index = skb_get_queue_mapping(skb);
10595 txq = netdev_get_tx_queue(dev, fp_index);
10597 fp = &bp->fp[fp_index + bp->num_rx_queues];
10598 fp_stat = &bp->fp[fp_index];
10600 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10601 fp_stat->eth_q_stats.driver_xoff++;
10602 netif_tx_stop_queue(txq);
10603 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10604 return NETDEV_TX_BUSY;
10607 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10608 " gso type %x xmit_type %x\n",
10609 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10610 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10612 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10613 /* First, check if we need to linearize the skb (due to FW
10614 restrictions). No need to check fragmentation if page size > 8K
10615 (there will be no violation to FW restrictions) */
10616 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10617 /* Statistics of linearization */
10619 if (skb_linearize(skb) != 0) {
10620 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10621 "silently dropping this SKB\n");
10622 dev_kfree_skb_any(skb);
10623 return NETDEV_TX_OK;
10629 Please read carefully. First we use one BD which we mark as start,
10630 then we have a parsing info BD (used for TSO or xsum),
10631 and only then we have the rest of the TSO BDs.
10632 (don't forget to mark the last one as last,
10633 and to unmap only AFTER you write to the BD ...)
10634 And above all, all pdb sizes are in words - NOT DWORDS!
10637 pkt_prod = fp->tx_pkt_prod++;
10638 bd_prod = TX_BD(fp->tx_bd_prod);
10640 /* get a tx_buf and first BD */
10641 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10642 tx_start_bd = &fp->tx_desc_ring[bd_prod].start_bd;
10644 tx_start_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10645 tx_start_bd->general_data = (UNICAST_ADDRESS <<
10646 ETH_TX_START_BD_ETH_ADDR_TYPE_SHIFT);
10648 tx_start_bd->general_data |= (1 << ETH_TX_START_BD_HDR_NBDS_SHIFT);
10650 /* remember the first BD of the packet */
10651 tx_buf->first_bd = fp->tx_bd_prod;
10655 DP(NETIF_MSG_TX_QUEUED,
10656 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10657 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_start_bd);
10660 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10661 (bp->flags & HW_VLAN_TX_FLAG)) {
10662 tx_start_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10663 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10666 tx_start_bd->vlan = cpu_to_le16(pkt_prod);
10668 /* turn on parsing and get a BD */
10669 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10670 pbd = &fp->tx_desc_ring[bd_prod].parse_bd;
10672 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10674 if (xmit_type & XMIT_CSUM) {
10675 hlen = (skb_network_header(skb) - skb->data) / 2;
10677 /* for now NS flag is not used in Linux */
10679 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10680 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10682 pbd->ip_hlen = (skb_transport_header(skb) -
10683 skb_network_header(skb)) / 2;
10685 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10687 pbd->total_hlen = cpu_to_le16(hlen);
10690 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_L4_CSUM;
10692 if (xmit_type & XMIT_CSUM_V4)
10693 tx_start_bd->bd_flags.as_bitfield |=
10694 ETH_TX_BD_FLAGS_IP_CSUM;
10696 tx_start_bd->bd_flags.as_bitfield |=
10697 ETH_TX_BD_FLAGS_IPV6;
10699 if (xmit_type & XMIT_CSUM_TCP) {
10700 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10703 s8 fix = SKB_CS_OFF(skb); /* signed! */
10705 pbd->global_data |= ETH_TX_PARSE_BD_UDP_CS_FLG;
10707 DP(NETIF_MSG_TX_QUEUED,
10708 "hlen %d fix %d csum before fix %x\n",
10709 le16_to_cpu(pbd->total_hlen), fix, SKB_CS(skb));
10711 /* HW bug: fixup the CSUM */
10712 pbd->tcp_pseudo_csum =
10713 bnx2x_csum_fix(skb_transport_header(skb),
10716 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10717 pbd->tcp_pseudo_csum);
10721 mapping = pci_map_single(bp->pdev, skb->data,
10722 skb_headlen(skb), PCI_DMA_TODEVICE);
10724 tx_start_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10725 tx_start_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10726 nbd = skb_shinfo(skb)->nr_frags + 2; /* start_bd + pbd + frags */
10727 tx_start_bd->nbd = cpu_to_le16(nbd);
10728 tx_start_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10729 pkt_size = tx_start_bd->nbytes;
10731 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10732 " nbytes %d flags %x vlan %x\n",
10733 tx_start_bd, tx_start_bd->addr_hi, tx_start_bd->addr_lo,
10734 le16_to_cpu(tx_start_bd->nbd), le16_to_cpu(tx_start_bd->nbytes),
10735 tx_start_bd->bd_flags.as_bitfield, le16_to_cpu(tx_start_bd->vlan));
10737 if (xmit_type & XMIT_GSO) {
10739 DP(NETIF_MSG_TX_QUEUED,
10740 "TSO packet len %d hlen %d total len %d tso size %d\n",
10741 skb->len, hlen, skb_headlen(skb),
10742 skb_shinfo(skb)->gso_size);
10744 tx_start_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10746 if (unlikely(skb_headlen(skb) > hlen))
10747 bd_prod = bnx2x_tx_split(bp, fp, tx_buf, &tx_start_bd,
10748 hlen, bd_prod, ++nbd);
10750 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10751 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10752 pbd->tcp_flags = pbd_tcp_flags(skb);
10754 if (xmit_type & XMIT_GSO_V4) {
10755 pbd->ip_id = swab16(ip_hdr(skb)->id);
10756 pbd->tcp_pseudo_csum =
10757 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10758 ip_hdr(skb)->daddr,
10759 0, IPPROTO_TCP, 0));
10762 pbd->tcp_pseudo_csum =
10763 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10764 &ipv6_hdr(skb)->daddr,
10765 0, IPPROTO_TCP, 0));
10767 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10769 tx_data_bd = (struct eth_tx_bd *)tx_start_bd;
10771 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10772 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10774 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10775 tx_data_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10776 if (total_pkt_bd == NULL)
10777 total_pkt_bd = &fp->tx_desc_ring[bd_prod].reg_bd;
10779 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10780 frag->size, PCI_DMA_TODEVICE);
10782 tx_data_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10783 tx_data_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10784 tx_data_bd->nbytes = cpu_to_le16(frag->size);
10785 le16_add_cpu(&pkt_size, frag->size);
10787 DP(NETIF_MSG_TX_QUEUED,
10788 "frag %d bd @%p addr (%x:%x) nbytes %d\n",
10789 i, tx_data_bd, tx_data_bd->addr_hi, tx_data_bd->addr_lo,
10790 le16_to_cpu(tx_data_bd->nbytes));
10793 DP(NETIF_MSG_TX_QUEUED, "last bd @%p\n", tx_data_bd);
10795 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10797 /* now send a tx doorbell, counting the next BD
10798 * if the packet contains or ends with it
10800 if (TX_BD_POFF(bd_prod) < nbd)
10803 if (total_pkt_bd != NULL)
10804 total_pkt_bd->total_pkt_bytes = pkt_size;
10807 DP(NETIF_MSG_TX_QUEUED,
10808 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10809 " tcp_flags %x xsum %x seq %u hlen %u\n",
10810 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10811 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10812 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10814 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
10817 * Make sure that the BD data is updated before updating the producer
10818 * since FW might read the BD right after the producer is updated.
10819 * This is only applicable for weak-ordered memory model archs such
10820 * as IA-64. The following barrier is also mandatory since FW will
10821 * assumes packets must have BDs.
10825 fp->tx_db.data.prod += nbd;
10827 DOORBELL(bp, fp->index - bp->num_rx_queues, fp->tx_db.raw);
10831 fp->tx_bd_prod += nbd;
10833 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10834 netif_tx_stop_queue(txq);
10835 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10836 if we put Tx into XOFF state. */
10838 fp_stat->eth_q_stats.driver_xoff++;
10839 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10840 netif_tx_wake_queue(txq);
10844 return NETDEV_TX_OK;
10847 /* called with rtnl_lock */
10848 static int bnx2x_open(struct net_device *dev)
10850 struct bnx2x *bp = netdev_priv(dev);
10852 netif_carrier_off(dev);
10854 bnx2x_set_power_state(bp, PCI_D0);
10856 return bnx2x_nic_load(bp, LOAD_OPEN);
10859 /* called with rtnl_lock */
10860 static int bnx2x_close(struct net_device *dev)
10862 struct bnx2x *bp = netdev_priv(dev);
10864 /* Unload the driver, release IRQs */
10865 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10866 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10867 if (!CHIP_REV_IS_SLOW(bp))
10868 bnx2x_set_power_state(bp, PCI_D3hot);
10873 /* called with netif_tx_lock from dev_mcast.c */
10874 static void bnx2x_set_rx_mode(struct net_device *dev)
10876 struct bnx2x *bp = netdev_priv(dev);
10877 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10878 int port = BP_PORT(bp);
10880 if (bp->state != BNX2X_STATE_OPEN) {
10881 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10885 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10887 if (dev->flags & IFF_PROMISC)
10888 rx_mode = BNX2X_RX_MODE_PROMISC;
10890 else if ((dev->flags & IFF_ALLMULTI) ||
10891 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10892 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10894 else { /* some multicasts */
10895 if (CHIP_IS_E1(bp)) {
10896 int i, old, offset;
10897 struct dev_mc_list *mclist;
10898 struct mac_configuration_cmd *config =
10899 bnx2x_sp(bp, mcast_config);
10901 for (i = 0, mclist = dev->mc_list;
10902 mclist && (i < dev->mc_count);
10903 i++, mclist = mclist->next) {
10905 config->config_table[i].
10906 cam_entry.msb_mac_addr =
10907 swab16(*(u16 *)&mclist->dmi_addr[0]);
10908 config->config_table[i].
10909 cam_entry.middle_mac_addr =
10910 swab16(*(u16 *)&mclist->dmi_addr[2]);
10911 config->config_table[i].
10912 cam_entry.lsb_mac_addr =
10913 swab16(*(u16 *)&mclist->dmi_addr[4]);
10914 config->config_table[i].cam_entry.flags =
10916 config->config_table[i].
10917 target_table_entry.flags = 0;
10918 config->config_table[i].target_table_entry.
10919 clients_bit_vector =
10920 cpu_to_le32(1 << BP_L_ID(bp));
10921 config->config_table[i].
10922 target_table_entry.vlan_id = 0;
10925 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10926 config->config_table[i].
10927 cam_entry.msb_mac_addr,
10928 config->config_table[i].
10929 cam_entry.middle_mac_addr,
10930 config->config_table[i].
10931 cam_entry.lsb_mac_addr);
10933 old = config->hdr.length;
10935 for (; i < old; i++) {
10936 if (CAM_IS_INVALID(config->
10937 config_table[i])) {
10938 /* already invalidated */
10942 CAM_INVALIDATE(config->
10947 if (CHIP_REV_IS_SLOW(bp))
10948 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10950 offset = BNX2X_MAX_MULTICAST*(1 + port);
10952 config->hdr.length = i;
10953 config->hdr.offset = offset;
10954 config->hdr.client_id = bp->fp->cl_id;
10955 config->hdr.reserved1 = 0;
10957 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10958 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10959 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10962 /* Accept one or more multicasts */
10963 struct dev_mc_list *mclist;
10964 u32 mc_filter[MC_HASH_SIZE];
10965 u32 crc, bit, regidx;
10968 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10970 for (i = 0, mclist = dev->mc_list;
10971 mclist && (i < dev->mc_count);
10972 i++, mclist = mclist->next) {
10974 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10977 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10978 bit = (crc >> 24) & 0xff;
10981 mc_filter[regidx] |= (1 << bit);
10984 for (i = 0; i < MC_HASH_SIZE; i++)
10985 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10990 bp->rx_mode = rx_mode;
10991 bnx2x_set_storm_rx_mode(bp);
10994 /* called with rtnl_lock */
10995 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10997 struct sockaddr *addr = p;
10998 struct bnx2x *bp = netdev_priv(dev);
11000 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
11003 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
11004 if (netif_running(dev)) {
11005 if (CHIP_IS_E1(bp))
11006 bnx2x_set_mac_addr_e1(bp, 1);
11008 bnx2x_set_mac_addr_e1h(bp, 1);
11014 /* called with rtnl_lock */
11015 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
11017 struct mii_ioctl_data *data = if_mii(ifr);
11018 struct bnx2x *bp = netdev_priv(dev);
11019 int port = BP_PORT(bp);
11024 data->phy_id = bp->port.phy_addr;
11028 case SIOCGMIIREG: {
11031 if (!netif_running(dev))
11034 mutex_lock(&bp->port.phy_mutex);
11035 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
11036 DEFAULT_PHY_DEV_ADDR,
11037 (data->reg_num & 0x1f), &mii_regval);
11038 data->val_out = mii_regval;
11039 mutex_unlock(&bp->port.phy_mutex);
11044 if (!capable(CAP_NET_ADMIN))
11047 if (!netif_running(dev))
11050 mutex_lock(&bp->port.phy_mutex);
11051 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
11052 DEFAULT_PHY_DEV_ADDR,
11053 (data->reg_num & 0x1f), data->val_in);
11054 mutex_unlock(&bp->port.phy_mutex);
11062 return -EOPNOTSUPP;
11065 /* called with rtnl_lock */
11066 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
11068 struct bnx2x *bp = netdev_priv(dev);
11071 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
11072 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11075 /* This does not race with packet allocation
11076 * because the actual alloc size is
11077 * only updated as part of load
11079 dev->mtu = new_mtu;
11081 if (netif_running(dev)) {
11082 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11083 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11089 static void bnx2x_tx_timeout(struct net_device *dev)
11091 struct bnx2x *bp = netdev_priv(dev);
11093 #ifdef BNX2X_STOP_ON_ERROR
11097 /* This allows the netif to be shutdown gracefully before resetting */
11098 schedule_work(&bp->reset_task);
11102 /* called with rtnl_lock */
11103 static void bnx2x_vlan_rx_register(struct net_device *dev,
11104 struct vlan_group *vlgrp)
11106 struct bnx2x *bp = netdev_priv(dev);
11110 /* Set flags according to the required capabilities */
11111 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11113 if (dev->features & NETIF_F_HW_VLAN_TX)
11114 bp->flags |= HW_VLAN_TX_FLAG;
11116 if (dev->features & NETIF_F_HW_VLAN_RX)
11117 bp->flags |= HW_VLAN_RX_FLAG;
11119 if (netif_running(dev))
11120 bnx2x_set_client_config(bp);
11125 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11126 static void poll_bnx2x(struct net_device *dev)
11128 struct bnx2x *bp = netdev_priv(dev);
11130 disable_irq(bp->pdev->irq);
11131 bnx2x_interrupt(bp->pdev->irq, dev);
11132 enable_irq(bp->pdev->irq);
11136 static const struct net_device_ops bnx2x_netdev_ops = {
11137 .ndo_open = bnx2x_open,
11138 .ndo_stop = bnx2x_close,
11139 .ndo_start_xmit = bnx2x_start_xmit,
11140 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11141 .ndo_set_mac_address = bnx2x_change_mac_addr,
11142 .ndo_validate_addr = eth_validate_addr,
11143 .ndo_do_ioctl = bnx2x_ioctl,
11144 .ndo_change_mtu = bnx2x_change_mtu,
11145 .ndo_tx_timeout = bnx2x_tx_timeout,
11147 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11149 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11150 .ndo_poll_controller = poll_bnx2x,
11154 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11155 struct net_device *dev)
11160 SET_NETDEV_DEV(dev, &pdev->dev);
11161 bp = netdev_priv(dev);
11166 bp->func = PCI_FUNC(pdev->devfn);
11168 rc = pci_enable_device(pdev);
11170 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11174 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11175 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11178 goto err_out_disable;
11181 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11182 printk(KERN_ERR PFX "Cannot find second PCI device"
11183 " base address, aborting\n");
11185 goto err_out_disable;
11188 if (atomic_read(&pdev->enable_cnt) == 1) {
11189 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11191 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11193 goto err_out_disable;
11196 pci_set_master(pdev);
11197 pci_save_state(pdev);
11200 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11201 if (bp->pm_cap == 0) {
11202 printk(KERN_ERR PFX "Cannot find power management"
11203 " capability, aborting\n");
11205 goto err_out_release;
11208 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11209 if (bp->pcie_cap == 0) {
11210 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11213 goto err_out_release;
11216 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11217 bp->flags |= USING_DAC_FLAG;
11218 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11219 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11220 " failed, aborting\n");
11222 goto err_out_release;
11225 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11226 printk(KERN_ERR PFX "System does not support DMA,"
11229 goto err_out_release;
11232 dev->mem_start = pci_resource_start(pdev, 0);
11233 dev->base_addr = dev->mem_start;
11234 dev->mem_end = pci_resource_end(pdev, 0);
11236 dev->irq = pdev->irq;
11238 bp->regview = pci_ioremap_bar(pdev, 0);
11239 if (!bp->regview) {
11240 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11242 goto err_out_release;
11245 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11246 min_t(u64, BNX2X_DB_SIZE,
11247 pci_resource_len(pdev, 2)));
11248 if (!bp->doorbells) {
11249 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11251 goto err_out_unmap;
11254 bnx2x_set_power_state(bp, PCI_D0);
11256 /* clean indirect addresses */
11257 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11258 PCICFG_VENDOR_ID_OFFSET);
11259 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11260 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11261 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11262 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11264 dev->watchdog_timeo = TX_TIMEOUT;
11266 dev->netdev_ops = &bnx2x_netdev_ops;
11267 dev->ethtool_ops = &bnx2x_ethtool_ops;
11268 dev->features |= NETIF_F_SG;
11269 dev->features |= NETIF_F_HW_CSUM;
11270 if (bp->flags & USING_DAC_FLAG)
11271 dev->features |= NETIF_F_HIGHDMA;
11272 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11273 dev->features |= NETIF_F_TSO6;
11275 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11276 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11278 dev->vlan_features |= NETIF_F_SG;
11279 dev->vlan_features |= NETIF_F_HW_CSUM;
11280 if (bp->flags & USING_DAC_FLAG)
11281 dev->vlan_features |= NETIF_F_HIGHDMA;
11282 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11283 dev->vlan_features |= NETIF_F_TSO6;
11290 iounmap(bp->regview);
11291 bp->regview = NULL;
11293 if (bp->doorbells) {
11294 iounmap(bp->doorbells);
11295 bp->doorbells = NULL;
11299 if (atomic_read(&pdev->enable_cnt) == 1)
11300 pci_release_regions(pdev);
11303 pci_disable_device(pdev);
11304 pci_set_drvdata(pdev, NULL);
11310 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11312 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11314 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11318 /* return value of 1=2.5GHz 2=5GHz */
11319 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11321 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11323 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11326 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11328 struct bnx2x_fw_file_hdr *fw_hdr;
11329 struct bnx2x_fw_file_section *sections;
11331 u32 offset, len, num_ops;
11333 const struct firmware *firmware = bp->firmware;
11336 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11339 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11340 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11342 /* Make sure none of the offsets and sizes make us read beyond
11343 * the end of the firmware data */
11344 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11345 offset = be32_to_cpu(sections[i].offset);
11346 len = be32_to_cpu(sections[i].len);
11347 if (offset + len > firmware->size) {
11348 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11353 /* Likewise for the init_ops offsets */
11354 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11355 ops_offsets = (u16 *)(firmware->data + offset);
11356 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11358 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11359 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11360 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11365 /* Check FW version */
11366 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11367 fw_ver = firmware->data + offset;
11368 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11369 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11370 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11371 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11372 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11373 " Should be %d.%d.%d.%d\n",
11374 fw_ver[0], fw_ver[1], fw_ver[2],
11375 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11376 BCM_5710_FW_MINOR_VERSION,
11377 BCM_5710_FW_REVISION_VERSION,
11378 BCM_5710_FW_ENGINEERING_VERSION);
11385 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11388 const __be32 *source = (const __be32*)_source;
11389 u32 *target = (u32*)_target;
11391 for (i = 0; i < n/4; i++)
11392 target[i] = be32_to_cpu(source[i]);
11396 Ops array is stored in the following format:
11397 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11399 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11402 const __be32 *source = (const __be32*)_source;
11403 struct raw_op *target = (struct raw_op*)_target;
11405 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11406 tmp = be32_to_cpu(source[j]);
11407 target[i].op = (tmp >> 24) & 0xff;
11408 target[i].offset = tmp & 0xffffff;
11409 target[i].raw_data = be32_to_cpu(source[j+1]);
11412 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11415 u16 *target = (u16*)_target;
11416 const __be16 *source = (const __be16*)_source;
11418 for (i = 0; i < n/2; i++)
11419 target[i] = be16_to_cpu(source[i]);
11422 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11424 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11425 bp->arr = kmalloc(len, GFP_KERNEL); \
11427 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11430 func(bp->firmware->data + \
11431 be32_to_cpu(fw_hdr->arr.offset), \
11432 (u8*)bp->arr, len); \
11436 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11438 char fw_file_name[40] = {0};
11440 struct bnx2x_fw_file_hdr *fw_hdr;
11442 /* Create a FW file name */
11443 if (CHIP_IS_E1(bp))
11444 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11446 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11448 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11449 BCM_5710_FW_MAJOR_VERSION,
11450 BCM_5710_FW_MINOR_VERSION,
11451 BCM_5710_FW_REVISION_VERSION,
11452 BCM_5710_FW_ENGINEERING_VERSION);
11454 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11456 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11458 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11459 goto request_firmware_exit;
11462 rc = bnx2x_check_firmware(bp);
11464 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11465 goto request_firmware_exit;
11468 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11470 /* Initialize the pointers to the init arrays */
11472 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11475 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11478 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11480 /* STORMs firmware */
11481 bp->tsem_int_table_data = bp->firmware->data +
11482 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11483 bp->tsem_pram_data = bp->firmware->data +
11484 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11485 bp->usem_int_table_data = bp->firmware->data +
11486 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11487 bp->usem_pram_data = bp->firmware->data +
11488 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11489 bp->xsem_int_table_data = bp->firmware->data +
11490 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11491 bp->xsem_pram_data = bp->firmware->data +
11492 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11493 bp->csem_int_table_data = bp->firmware->data +
11494 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11495 bp->csem_pram_data = bp->firmware->data +
11496 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11499 init_offsets_alloc_err:
11500 kfree(bp->init_ops);
11501 init_ops_alloc_err:
11502 kfree(bp->init_data);
11503 request_firmware_exit:
11504 release_firmware(bp->firmware);
11511 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11512 const struct pci_device_id *ent)
11514 static int version_printed;
11515 struct net_device *dev = NULL;
11519 if (version_printed++ == 0)
11520 printk(KERN_INFO "%s", version);
11522 /* dev zeroed in init_etherdev */
11523 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11525 printk(KERN_ERR PFX "Cannot allocate net device\n");
11529 bp = netdev_priv(dev);
11530 bp->msglevel = debug;
11532 rc = bnx2x_init_dev(pdev, dev);
11538 pci_set_drvdata(pdev, dev);
11540 rc = bnx2x_init_bp(bp);
11542 goto init_one_exit;
11544 /* Set init arrays */
11545 rc = bnx2x_init_firmware(bp, &pdev->dev);
11547 printk(KERN_ERR PFX "Error loading firmware\n");
11548 goto init_one_exit;
11551 rc = register_netdev(dev);
11553 dev_err(&pdev->dev, "Cannot register net device\n");
11554 goto init_one_exit;
11557 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11558 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11559 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11560 bnx2x_get_pcie_width(bp),
11561 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11562 dev->base_addr, bp->pdev->irq);
11563 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11569 iounmap(bp->regview);
11572 iounmap(bp->doorbells);
11576 if (atomic_read(&pdev->enable_cnt) == 1)
11577 pci_release_regions(pdev);
11579 pci_disable_device(pdev);
11580 pci_set_drvdata(pdev, NULL);
11585 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11587 struct net_device *dev = pci_get_drvdata(pdev);
11591 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11594 bp = netdev_priv(dev);
11596 unregister_netdev(dev);
11598 kfree(bp->init_ops_offsets);
11599 kfree(bp->init_ops);
11600 kfree(bp->init_data);
11601 release_firmware(bp->firmware);
11604 iounmap(bp->regview);
11607 iounmap(bp->doorbells);
11611 if (atomic_read(&pdev->enable_cnt) == 1)
11612 pci_release_regions(pdev);
11614 pci_disable_device(pdev);
11615 pci_set_drvdata(pdev, NULL);
11618 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11620 struct net_device *dev = pci_get_drvdata(pdev);
11624 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11627 bp = netdev_priv(dev);
11631 pci_save_state(pdev);
11633 if (!netif_running(dev)) {
11638 netif_device_detach(dev);
11640 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11642 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11649 static int bnx2x_resume(struct pci_dev *pdev)
11651 struct net_device *dev = pci_get_drvdata(pdev);
11656 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11659 bp = netdev_priv(dev);
11663 pci_restore_state(pdev);
11665 if (!netif_running(dev)) {
11670 bnx2x_set_power_state(bp, PCI_D0);
11671 netif_device_attach(dev);
11673 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11680 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11684 bp->state = BNX2X_STATE_ERROR;
11686 bp->rx_mode = BNX2X_RX_MODE_NONE;
11688 bnx2x_netif_stop(bp, 0);
11690 del_timer_sync(&bp->timer);
11691 bp->stats_state = STATS_STATE_DISABLED;
11692 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11695 bnx2x_free_irq(bp);
11697 if (CHIP_IS_E1(bp)) {
11698 struct mac_configuration_cmd *config =
11699 bnx2x_sp(bp, mcast_config);
11701 for (i = 0; i < config->hdr.length; i++)
11702 CAM_INVALIDATE(config->config_table[i]);
11705 /* Free SKBs, SGEs, TPA pool and driver internals */
11706 bnx2x_free_skbs(bp);
11707 for_each_rx_queue(bp, i)
11708 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11709 for_each_rx_queue(bp, i)
11710 netif_napi_del(&bnx2x_fp(bp, i, napi));
11711 bnx2x_free_mem(bp);
11713 bp->state = BNX2X_STATE_CLOSED;
11715 netif_carrier_off(bp->dev);
11720 static void bnx2x_eeh_recover(struct bnx2x *bp)
11724 mutex_init(&bp->port.phy_mutex);
11726 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11727 bp->link_params.shmem_base = bp->common.shmem_base;
11728 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11730 if (!bp->common.shmem_base ||
11731 (bp->common.shmem_base < 0xA0000) ||
11732 (bp->common.shmem_base >= 0xC0000)) {
11733 BNX2X_DEV_INFO("MCP not active\n");
11734 bp->flags |= NO_MCP_FLAG;
11738 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11739 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11740 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11741 BNX2X_ERR("BAD MCP validity signature\n");
11743 if (!BP_NOMCP(bp)) {
11744 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11745 & DRV_MSG_SEQ_NUMBER_MASK);
11746 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11751 * bnx2x_io_error_detected - called when PCI error is detected
11752 * @pdev: Pointer to PCI device
11753 * @state: The current pci connection state
11755 * This function is called after a PCI bus error affecting
11756 * this device has been detected.
11758 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11759 pci_channel_state_t state)
11761 struct net_device *dev = pci_get_drvdata(pdev);
11762 struct bnx2x *bp = netdev_priv(dev);
11766 netif_device_detach(dev);
11768 if (state == pci_channel_io_perm_failure) {
11770 return PCI_ERS_RESULT_DISCONNECT;
11773 if (netif_running(dev))
11774 bnx2x_eeh_nic_unload(bp);
11776 pci_disable_device(pdev);
11780 /* Request a slot reset */
11781 return PCI_ERS_RESULT_NEED_RESET;
11785 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11786 * @pdev: Pointer to PCI device
11788 * Restart the card from scratch, as if from a cold-boot.
11790 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11792 struct net_device *dev = pci_get_drvdata(pdev);
11793 struct bnx2x *bp = netdev_priv(dev);
11797 if (pci_enable_device(pdev)) {
11798 dev_err(&pdev->dev,
11799 "Cannot re-enable PCI device after reset\n");
11801 return PCI_ERS_RESULT_DISCONNECT;
11804 pci_set_master(pdev);
11805 pci_restore_state(pdev);
11807 if (netif_running(dev))
11808 bnx2x_set_power_state(bp, PCI_D0);
11812 return PCI_ERS_RESULT_RECOVERED;
11816 * bnx2x_io_resume - called when traffic can start flowing again
11817 * @pdev: Pointer to PCI device
11819 * This callback is called when the error recovery driver tells us that
11820 * its OK to resume normal operation.
11822 static void bnx2x_io_resume(struct pci_dev *pdev)
11824 struct net_device *dev = pci_get_drvdata(pdev);
11825 struct bnx2x *bp = netdev_priv(dev);
11829 bnx2x_eeh_recover(bp);
11831 if (netif_running(dev))
11832 bnx2x_nic_load(bp, LOAD_NORMAL);
11834 netif_device_attach(dev);
11839 static struct pci_error_handlers bnx2x_err_handler = {
11840 .error_detected = bnx2x_io_error_detected,
11841 .slot_reset = bnx2x_io_slot_reset,
11842 .resume = bnx2x_io_resume,
11845 static struct pci_driver bnx2x_pci_driver = {
11846 .name = DRV_MODULE_NAME,
11847 .id_table = bnx2x_pci_tbl,
11848 .probe = bnx2x_init_one,
11849 .remove = __devexit_p(bnx2x_remove_one),
11850 .suspend = bnx2x_suspend,
11851 .resume = bnx2x_resume,
11852 .err_handler = &bnx2x_err_handler,
11855 static int __init bnx2x_init(void)
11859 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11860 if (bnx2x_wq == NULL) {
11861 printk(KERN_ERR PFX "Cannot create workqueue\n");
11865 ret = pci_register_driver(&bnx2x_pci_driver);
11867 printk(KERN_ERR PFX "Cannot register driver\n");
11868 destroy_workqueue(bnx2x_wq);
11873 static void __exit bnx2x_cleanup(void)
11875 pci_unregister_driver(&bnx2x_pci_driver);
11877 destroy_workqueue(bnx2x_wq);
11880 module_init(bnx2x_init);
11881 module_exit(bnx2x_cleanup);