1 /* bnx2x_main.c: Broadcom Everest network driver.
3 * Copyright (c) 2007-2009 Broadcom Corporation
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
9 * Maintained by: Eilon Greenstein <eilong@broadcom.com>
10 * Written by: Eliezer Tamir
11 * Based on code from Michael Chan's bnx2 driver
12 * UDP CSUM errata workaround by Arik Gendelman
13 * Slowpath rework by Vladislav Zolotarov
14 * Statistics and Link management by Yitchak Gertner
18 #include <linux/module.h>
19 #include <linux/moduleparam.h>
20 #include <linux/kernel.h>
21 #include <linux/device.h> /* for dev_info() */
22 #include <linux/timer.h>
23 #include <linux/errno.h>
24 #include <linux/ioport.h>
25 #include <linux/slab.h>
26 #include <linux/vmalloc.h>
27 #include <linux/interrupt.h>
28 #include <linux/pci.h>
29 #include <linux/init.h>
30 #include <linux/netdevice.h>
31 #include <linux/etherdevice.h>
32 #include <linux/skbuff.h>
33 #include <linux/dma-mapping.h>
34 #include <linux/bitops.h>
35 #include <linux/irq.h>
36 #include <linux/delay.h>
37 #include <asm/byteorder.h>
38 #include <linux/time.h>
39 #include <linux/ethtool.h>
40 #include <linux/mii.h>
41 #include <linux/if_vlan.h>
44 #include <net/checksum.h>
45 #include <net/ip6_checksum.h>
46 #include <linux/workqueue.h>
47 #include <linux/crc32.h>
48 #include <linux/crc32c.h>
49 #include <linux/prefetch.h>
50 #include <linux/zlib.h>
55 #include "bnx2x_init.h"
56 #include "bnx2x_init_ops.h"
57 #include "bnx2x_dump.h"
59 #define DRV_MODULE_VERSION "1.48.114-1"
60 #define DRV_MODULE_RELDATE "2009/07/29"
61 #define BNX2X_BC_VER 0x040200
63 #include <linux/firmware.h>
64 #include "bnx2x_fw_file_hdr.h"
66 #define FW_FILE_PREFIX_E1 "bnx2x-e1-"
67 #define FW_FILE_PREFIX_E1H "bnx2x-e1h-"
69 /* Time in jiffies before concluding the transmitter is hung */
70 #define TX_TIMEOUT (5*HZ)
72 static char version[] __devinitdata =
73 "Broadcom NetXtreme II 5771x 10Gigabit Ethernet Driver "
74 DRV_MODULE_NAME " " DRV_MODULE_VERSION " (" DRV_MODULE_RELDATE ")\n";
76 MODULE_AUTHOR("Eliezer Tamir");
77 MODULE_DESCRIPTION("Broadcom NetXtreme II BCM57710/57711/57711E Driver");
78 MODULE_LICENSE("GPL");
79 MODULE_VERSION(DRV_MODULE_VERSION);
81 static int multi_mode = 1;
82 module_param(multi_mode, int, 0);
83 MODULE_PARM_DESC(multi_mode, " Use per-CPU queues");
85 static int disable_tpa;
86 module_param(disable_tpa, int, 0);
87 MODULE_PARM_DESC(disable_tpa, " Disable the TPA (LRO) feature");
90 module_param(int_mode, int, 0);
91 MODULE_PARM_DESC(int_mode, " Force interrupt mode (1 INT#x; 2 MSI)");
94 module_param(poll, int, 0);
95 MODULE_PARM_DESC(poll, " Use polling (for debug)");
98 module_param(mrrs, int, 0);
99 MODULE_PARM_DESC(mrrs, " Force Max Read Req Size (0..3) (for debug)");
102 module_param(debug, int, 0);
103 MODULE_PARM_DESC(debug, " Default debug msglevel");
105 static int load_count[3]; /* 0-common, 1-port0, 2-port1 */
107 static struct workqueue_struct *bnx2x_wq;
109 enum bnx2x_board_type {
115 /* indexed by board_type, above */
118 } board_info[] __devinitdata = {
119 { "Broadcom NetXtreme II BCM57710 XGb" },
120 { "Broadcom NetXtreme II BCM57711 XGb" },
121 { "Broadcom NetXtreme II BCM57711E XGb" }
125 static const struct pci_device_id bnx2x_pci_tbl[] = {
126 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57710,
127 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57710 },
128 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711,
129 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711 },
130 { PCI_VENDOR_ID_BROADCOM, PCI_DEVICE_ID_NX2_57711E,
131 PCI_ANY_ID, PCI_ANY_ID, 0, 0, BCM57711E },
135 MODULE_DEVICE_TABLE(pci, bnx2x_pci_tbl);
137 /****************************************************************************
138 * General service functions
139 ****************************************************************************/
142 * locking is done by mcp
144 static void bnx2x_reg_wr_ind(struct bnx2x *bp, u32 addr, u32 val)
146 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
147 pci_write_config_dword(bp->pdev, PCICFG_GRC_DATA, val);
148 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
149 PCICFG_VENDOR_ID_OFFSET);
152 static u32 bnx2x_reg_rd_ind(struct bnx2x *bp, u32 addr)
156 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS, addr);
157 pci_read_config_dword(bp->pdev, PCICFG_GRC_DATA, &val);
158 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
159 PCICFG_VENDOR_ID_OFFSET);
164 static const u32 dmae_reg_go_c[] = {
165 DMAE_REG_GO_C0, DMAE_REG_GO_C1, DMAE_REG_GO_C2, DMAE_REG_GO_C3,
166 DMAE_REG_GO_C4, DMAE_REG_GO_C5, DMAE_REG_GO_C6, DMAE_REG_GO_C7,
167 DMAE_REG_GO_C8, DMAE_REG_GO_C9, DMAE_REG_GO_C10, DMAE_REG_GO_C11,
168 DMAE_REG_GO_C12, DMAE_REG_GO_C13, DMAE_REG_GO_C14, DMAE_REG_GO_C15
171 /* copy command into DMAE command memory and set DMAE command go */
172 static void bnx2x_post_dmae(struct bnx2x *bp, struct dmae_command *dmae,
178 cmd_offset = (DMAE_REG_CMD_MEM + sizeof(struct dmae_command) * idx);
179 for (i = 0; i < (sizeof(struct dmae_command)/4); i++) {
180 REG_WR(bp, cmd_offset + i*4, *(((u32 *)dmae) + i));
182 DP(BNX2X_MSG_OFF, "DMAE cmd[%d].%d (0x%08x) : 0x%08x\n",
183 idx, i, cmd_offset + i*4, *(((u32 *)dmae) + i));
185 REG_WR(bp, dmae_reg_go_c[idx], 1);
188 void bnx2x_write_dmae(struct bnx2x *bp, dma_addr_t dma_addr, u32 dst_addr,
191 struct dmae_command *dmae = &bp->init_dmae;
192 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
195 if (!bp->dmae_ready) {
196 u32 *data = bnx2x_sp(bp, wb_data[0]);
198 DP(BNX2X_MSG_OFF, "DMAE is not ready (dst_addr %08x len32 %d)"
199 " using indirect\n", dst_addr, len32);
200 bnx2x_init_ind_wr(bp, dst_addr, data, len32);
204 mutex_lock(&bp->dmae_mutex);
206 memset(dmae, 0, sizeof(struct dmae_command));
208 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
209 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
210 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
212 DMAE_CMD_ENDIANITY_B_DW_SWAP |
214 DMAE_CMD_ENDIANITY_DW_SWAP |
216 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
217 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
218 dmae->src_addr_lo = U64_LO(dma_addr);
219 dmae->src_addr_hi = U64_HI(dma_addr);
220 dmae->dst_addr_lo = dst_addr >> 2;
221 dmae->dst_addr_hi = 0;
223 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
224 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
225 dmae->comp_val = DMAE_COMP_VAL;
227 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
228 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
229 "dst_addr [%x:%08x (%08x)]\n"
230 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
231 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
232 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, dst_addr,
233 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
234 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
235 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
236 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
240 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
244 while (*wb_comp != DMAE_COMP_VAL) {
245 DP(BNX2X_MSG_OFF, "wb_comp 0x%08x\n", *wb_comp);
248 BNX2X_ERR("DMAE timeout!\n");
252 /* adjust delay for emulation/FPGA */
253 if (CHIP_REV_IS_SLOW(bp))
259 mutex_unlock(&bp->dmae_mutex);
262 void bnx2x_read_dmae(struct bnx2x *bp, u32 src_addr, u32 len32)
264 struct dmae_command *dmae = &bp->init_dmae;
265 u32 *wb_comp = bnx2x_sp(bp, wb_comp);
268 if (!bp->dmae_ready) {
269 u32 *data = bnx2x_sp(bp, wb_data[0]);
272 DP(BNX2X_MSG_OFF, "DMAE is not ready (src_addr %08x len32 %d)"
273 " using indirect\n", src_addr, len32);
274 for (i = 0; i < len32; i++)
275 data[i] = bnx2x_reg_rd_ind(bp, src_addr + i*4);
279 mutex_lock(&bp->dmae_mutex);
281 memset(bnx2x_sp(bp, wb_data[0]), 0, sizeof(u32) * 4);
282 memset(dmae, 0, sizeof(struct dmae_command));
284 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
285 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
286 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
288 DMAE_CMD_ENDIANITY_B_DW_SWAP |
290 DMAE_CMD_ENDIANITY_DW_SWAP |
292 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
293 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
294 dmae->src_addr_lo = src_addr >> 2;
295 dmae->src_addr_hi = 0;
296 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_data));
297 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_data));
299 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, wb_comp));
300 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, wb_comp));
301 dmae->comp_val = DMAE_COMP_VAL;
303 DP(BNX2X_MSG_OFF, "DMAE: opcode 0x%08x\n"
304 DP_LEVEL "src_addr [%x:%08x] len [%d *4] "
305 "dst_addr [%x:%08x (%08x)]\n"
306 DP_LEVEL "comp_addr [%x:%08x] comp_val 0x%08x\n",
307 dmae->opcode, dmae->src_addr_hi, dmae->src_addr_lo,
308 dmae->len, dmae->dst_addr_hi, dmae->dst_addr_lo, src_addr,
309 dmae->comp_addr_hi, dmae->comp_addr_lo, dmae->comp_val);
313 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
317 while (*wb_comp != DMAE_COMP_VAL) {
320 BNX2X_ERR("DMAE timeout!\n");
324 /* adjust delay for emulation/FPGA */
325 if (CHIP_REV_IS_SLOW(bp))
330 DP(BNX2X_MSG_OFF, "data [0x%08x 0x%08x 0x%08x 0x%08x]\n",
331 bp->slowpath->wb_data[0], bp->slowpath->wb_data[1],
332 bp->slowpath->wb_data[2], bp->slowpath->wb_data[3]);
334 mutex_unlock(&bp->dmae_mutex);
337 /* used only for slowpath so not inlined */
338 static void bnx2x_wb_wr(struct bnx2x *bp, int reg, u32 val_hi, u32 val_lo)
342 wb_write[0] = val_hi;
343 wb_write[1] = val_lo;
344 REG_WR_DMAE(bp, reg, wb_write, 2);
348 static u64 bnx2x_wb_rd(struct bnx2x *bp, int reg)
352 REG_RD_DMAE(bp, reg, wb_data, 2);
354 return HILO_U64(wb_data[0], wb_data[1]);
358 static int bnx2x_mc_assert(struct bnx2x *bp)
362 u32 row0, row1, row2, row3;
365 last_idx = REG_RD8(bp, BAR_XSTRORM_INTMEM +
366 XSTORM_ASSERT_LIST_INDEX_OFFSET);
368 BNX2X_ERR("XSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
370 /* print the asserts */
371 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
373 row0 = REG_RD(bp, BAR_XSTRORM_INTMEM +
374 XSTORM_ASSERT_LIST_OFFSET(i));
375 row1 = REG_RD(bp, BAR_XSTRORM_INTMEM +
376 XSTORM_ASSERT_LIST_OFFSET(i) + 4);
377 row2 = REG_RD(bp, BAR_XSTRORM_INTMEM +
378 XSTORM_ASSERT_LIST_OFFSET(i) + 8);
379 row3 = REG_RD(bp, BAR_XSTRORM_INTMEM +
380 XSTORM_ASSERT_LIST_OFFSET(i) + 12);
382 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
383 BNX2X_ERR("XSTORM_ASSERT_INDEX 0x%x = 0x%08x"
384 " 0x%08x 0x%08x 0x%08x\n",
385 i, row3, row2, row1, row0);
393 last_idx = REG_RD8(bp, BAR_TSTRORM_INTMEM +
394 TSTORM_ASSERT_LIST_INDEX_OFFSET);
396 BNX2X_ERR("TSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
398 /* print the asserts */
399 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
401 row0 = REG_RD(bp, BAR_TSTRORM_INTMEM +
402 TSTORM_ASSERT_LIST_OFFSET(i));
403 row1 = REG_RD(bp, BAR_TSTRORM_INTMEM +
404 TSTORM_ASSERT_LIST_OFFSET(i) + 4);
405 row2 = REG_RD(bp, BAR_TSTRORM_INTMEM +
406 TSTORM_ASSERT_LIST_OFFSET(i) + 8);
407 row3 = REG_RD(bp, BAR_TSTRORM_INTMEM +
408 TSTORM_ASSERT_LIST_OFFSET(i) + 12);
410 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
411 BNX2X_ERR("TSTORM_ASSERT_INDEX 0x%x = 0x%08x"
412 " 0x%08x 0x%08x 0x%08x\n",
413 i, row3, row2, row1, row0);
421 last_idx = REG_RD8(bp, BAR_CSTRORM_INTMEM +
422 CSTORM_ASSERT_LIST_INDEX_OFFSET);
424 BNX2X_ERR("CSTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
426 /* print the asserts */
427 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
429 row0 = REG_RD(bp, BAR_CSTRORM_INTMEM +
430 CSTORM_ASSERT_LIST_OFFSET(i));
431 row1 = REG_RD(bp, BAR_CSTRORM_INTMEM +
432 CSTORM_ASSERT_LIST_OFFSET(i) + 4);
433 row2 = REG_RD(bp, BAR_CSTRORM_INTMEM +
434 CSTORM_ASSERT_LIST_OFFSET(i) + 8);
435 row3 = REG_RD(bp, BAR_CSTRORM_INTMEM +
436 CSTORM_ASSERT_LIST_OFFSET(i) + 12);
438 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
439 BNX2X_ERR("CSTORM_ASSERT_INDEX 0x%x = 0x%08x"
440 " 0x%08x 0x%08x 0x%08x\n",
441 i, row3, row2, row1, row0);
449 last_idx = REG_RD8(bp, BAR_USTRORM_INTMEM +
450 USTORM_ASSERT_LIST_INDEX_OFFSET);
452 BNX2X_ERR("USTORM_ASSERT_LIST_INDEX 0x%x\n", last_idx);
454 /* print the asserts */
455 for (i = 0; i < STROM_ASSERT_ARRAY_SIZE; i++) {
457 row0 = REG_RD(bp, BAR_USTRORM_INTMEM +
458 USTORM_ASSERT_LIST_OFFSET(i));
459 row1 = REG_RD(bp, BAR_USTRORM_INTMEM +
460 USTORM_ASSERT_LIST_OFFSET(i) + 4);
461 row2 = REG_RD(bp, BAR_USTRORM_INTMEM +
462 USTORM_ASSERT_LIST_OFFSET(i) + 8);
463 row3 = REG_RD(bp, BAR_USTRORM_INTMEM +
464 USTORM_ASSERT_LIST_OFFSET(i) + 12);
466 if (row0 != COMMON_ASM_INVALID_ASSERT_OPCODE) {
467 BNX2X_ERR("USTORM_ASSERT_INDEX 0x%x = 0x%08x"
468 " 0x%08x 0x%08x 0x%08x\n",
469 i, row3, row2, row1, row0);
479 static void bnx2x_fw_dump(struct bnx2x *bp)
485 mark = REG_RD(bp, MCP_REG_MCPR_SCRATCH + 0xf104);
486 mark = ((mark + 0x3) & ~0x3);
487 printk(KERN_ERR PFX "begin fw dump (mark 0x%x)\n", mark);
489 printk(KERN_ERR PFX);
490 for (offset = mark - 0x08000000; offset <= 0xF900; offset += 0x8*4) {
491 for (word = 0; word < 8; word++)
492 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
495 printk(KERN_CONT "%s", (char *)data);
497 for (offset = 0xF108; offset <= mark - 0x08000000; offset += 0x8*4) {
498 for (word = 0; word < 8; word++)
499 data[word] = htonl(REG_RD(bp, MCP_REG_MCPR_SCRATCH +
502 printk(KERN_CONT "%s", (char *)data);
504 printk(KERN_ERR PFX "end of fw dump\n");
507 static void bnx2x_panic_dump(struct bnx2x *bp)
512 bp->stats_state = STATS_STATE_DISABLED;
513 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
515 BNX2X_ERR("begin crash dump -----------------\n");
519 BNX2X_ERR("def_c_idx(%u) def_u_idx(%u) def_x_idx(%u)"
520 " def_t_idx(%u) def_att_idx(%u) attn_state(%u)"
521 " spq_prod_idx(%u)\n",
522 bp->def_c_idx, bp->def_u_idx, bp->def_x_idx, bp->def_t_idx,
523 bp->def_att_idx, bp->attn_state, bp->spq_prod_idx);
526 for_each_rx_queue(bp, i) {
527 struct bnx2x_fastpath *fp = &bp->fp[i];
529 BNX2X_ERR("fp%d: rx_bd_prod(%x) rx_bd_cons(%x)"
530 " *rx_bd_cons_sb(%x) rx_comp_prod(%x)"
531 " rx_comp_cons(%x) *rx_cons_sb(%x)\n",
532 i, fp->rx_bd_prod, fp->rx_bd_cons,
533 le16_to_cpu(*fp->rx_bd_cons_sb), fp->rx_comp_prod,
534 fp->rx_comp_cons, le16_to_cpu(*fp->rx_cons_sb));
535 BNX2X_ERR(" rx_sge_prod(%x) last_max_sge(%x)"
536 " fp_u_idx(%x) *sb_u_idx(%x)\n",
537 fp->rx_sge_prod, fp->last_max_sge,
538 le16_to_cpu(fp->fp_u_idx),
539 fp->status_blk->u_status_block.status_block_index);
543 for_each_tx_queue(bp, i) {
544 struct bnx2x_fastpath *fp = &bp->fp[i];
545 struct eth_tx_db_data *hw_prods = fp->hw_tx_prods;
547 BNX2X_ERR("fp%d: tx_pkt_prod(%x) tx_pkt_cons(%x)"
548 " tx_bd_prod(%x) tx_bd_cons(%x) *tx_cons_sb(%x)\n",
549 i, fp->tx_pkt_prod, fp->tx_pkt_cons, fp->tx_bd_prod,
550 fp->tx_bd_cons, le16_to_cpu(*fp->tx_cons_sb));
551 BNX2X_ERR(" fp_c_idx(%x) *sb_c_idx(%x)"
552 " bd data(%x,%x)\n", le16_to_cpu(fp->fp_c_idx),
553 fp->status_blk->c_status_block.status_block_index,
554 hw_prods->packets_prod, hw_prods->bds_prod);
559 for_each_rx_queue(bp, i) {
560 struct bnx2x_fastpath *fp = &bp->fp[i];
562 start = RX_BD(le16_to_cpu(*fp->rx_cons_sb) - 10);
563 end = RX_BD(le16_to_cpu(*fp->rx_cons_sb) + 503);
564 for (j = start; j != end; j = RX_BD(j + 1)) {
565 u32 *rx_bd = (u32 *)&fp->rx_desc_ring[j];
566 struct sw_rx_bd *sw_bd = &fp->rx_buf_ring[j];
568 BNX2X_ERR("fp%d: rx_bd[%x]=[%x:%x] sw_bd=[%p]\n",
569 i, j, rx_bd[1], rx_bd[0], sw_bd->skb);
572 start = RX_SGE(fp->rx_sge_prod);
573 end = RX_SGE(fp->last_max_sge);
574 for (j = start; j != end; j = RX_SGE(j + 1)) {
575 u32 *rx_sge = (u32 *)&fp->rx_sge_ring[j];
576 struct sw_rx_page *sw_page = &fp->rx_page_ring[j];
578 BNX2X_ERR("fp%d: rx_sge[%x]=[%x:%x] sw_page=[%p]\n",
579 i, j, rx_sge[1], rx_sge[0], sw_page->page);
582 start = RCQ_BD(fp->rx_comp_cons - 10);
583 end = RCQ_BD(fp->rx_comp_cons + 503);
584 for (j = start; j != end; j = RCQ_BD(j + 1)) {
585 u32 *cqe = (u32 *)&fp->rx_comp_ring[j];
587 BNX2X_ERR("fp%d: cqe[%x]=[%x:%x:%x:%x]\n",
588 i, j, cqe[0], cqe[1], cqe[2], cqe[3]);
593 for_each_tx_queue(bp, i) {
594 struct bnx2x_fastpath *fp = &bp->fp[i];
596 start = TX_BD(le16_to_cpu(*fp->tx_cons_sb) - 10);
597 end = TX_BD(le16_to_cpu(*fp->tx_cons_sb) + 245);
598 for (j = start; j != end; j = TX_BD(j + 1)) {
599 struct sw_tx_bd *sw_bd = &fp->tx_buf_ring[j];
601 BNX2X_ERR("fp%d: packet[%x]=[%p,%x]\n",
602 i, j, sw_bd->skb, sw_bd->first_bd);
605 start = TX_BD(fp->tx_bd_cons - 10);
606 end = TX_BD(fp->tx_bd_cons + 254);
607 for (j = start; j != end; j = TX_BD(j + 1)) {
608 u32 *tx_bd = (u32 *)&fp->tx_desc_ring[j];
610 BNX2X_ERR("fp%d: tx_bd[%x]=[%x:%x:%x:%x]\n",
611 i, j, tx_bd[0], tx_bd[1], tx_bd[2], tx_bd[3]);
617 BNX2X_ERR("end crash dump -----------------\n");
620 static void bnx2x_int_enable(struct bnx2x *bp)
622 int port = BP_PORT(bp);
623 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
624 u32 val = REG_RD(bp, addr);
625 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
626 int msi = (bp->flags & USING_MSI_FLAG) ? 1 : 0;
629 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
630 HC_CONFIG_0_REG_INT_LINE_EN_0);
631 val |= (HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
632 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
634 val &= ~HC_CONFIG_0_REG_INT_LINE_EN_0;
635 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
636 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
637 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
639 val |= (HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
640 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
641 HC_CONFIG_0_REG_INT_LINE_EN_0 |
642 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
644 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
647 REG_WR(bp, addr, val);
649 val &= ~HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0;
652 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x) mode %s\n",
653 val, port, addr, (msix ? "MSI-X" : (msi ? "MSI" : "INTx")));
655 REG_WR(bp, addr, val);
657 * Ensure that HC_CONFIG is written before leading/trailing edge config
662 if (CHIP_IS_E1H(bp)) {
663 /* init leading/trailing edge */
665 val = (0xee0f | (1 << (BP_E1HVN(bp) + 4)));
667 /* enable nig and gpio3 attention */
672 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
673 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
676 /* Make sure that interrupts are indeed enabled from here on */
680 static void bnx2x_int_disable(struct bnx2x *bp)
682 int port = BP_PORT(bp);
683 u32 addr = port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0;
684 u32 val = REG_RD(bp, addr);
686 val &= ~(HC_CONFIG_0_REG_SINGLE_ISR_EN_0 |
687 HC_CONFIG_0_REG_MSI_MSIX_INT_EN_0 |
688 HC_CONFIG_0_REG_INT_LINE_EN_0 |
689 HC_CONFIG_0_REG_ATTN_BIT_EN_0);
691 DP(NETIF_MSG_INTR, "write %x to HC %d (addr 0x%x)\n",
694 /* flush all outstanding writes */
697 REG_WR(bp, addr, val);
698 if (REG_RD(bp, addr) != val)
699 BNX2X_ERR("BUG! proper val not read from IGU!\n");
703 static void bnx2x_int_disable_sync(struct bnx2x *bp, int disable_hw)
705 int msix = (bp->flags & USING_MSIX_FLAG) ? 1 : 0;
708 /* disable interrupt handling */
709 atomic_inc(&bp->intr_sem);
710 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
713 /* prevent the HW from sending interrupts */
714 bnx2x_int_disable(bp);
716 /* make sure all ISRs are done */
718 synchronize_irq(bp->msix_table[0].vector);
720 for_each_queue(bp, i)
721 synchronize_irq(bp->msix_table[i + offset].vector);
723 synchronize_irq(bp->pdev->irq);
725 /* make sure sp_task is not running */
726 cancel_delayed_work(&bp->sp_task);
727 flush_workqueue(bnx2x_wq);
733 * General service functions
736 static inline void bnx2x_ack_sb(struct bnx2x *bp, u8 sb_id,
737 u8 storm, u16 index, u8 op, u8 update)
739 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
740 COMMAND_REG_INT_ACK);
741 struct igu_ack_register igu_ack;
743 igu_ack.status_block_index = index;
744 igu_ack.sb_id_and_flags =
745 ((sb_id << IGU_ACK_REGISTER_STATUS_BLOCK_ID_SHIFT) |
746 (storm << IGU_ACK_REGISTER_STORM_ID_SHIFT) |
747 (update << IGU_ACK_REGISTER_UPDATE_INDEX_SHIFT) |
748 (op << IGU_ACK_REGISTER_INTERRUPT_MODE_SHIFT));
750 DP(BNX2X_MSG_OFF, "write 0x%08x to HC addr 0x%x\n",
751 (*(u32 *)&igu_ack), hc_addr);
752 REG_WR(bp, hc_addr, (*(u32 *)&igu_ack));
754 /* Make sure that ACK is written */
759 static inline u16 bnx2x_update_fpsb_idx(struct bnx2x_fastpath *fp)
761 struct host_status_block *fpsb = fp->status_blk;
764 barrier(); /* status block is written to by the chip */
765 if (fp->fp_c_idx != fpsb->c_status_block.status_block_index) {
766 fp->fp_c_idx = fpsb->c_status_block.status_block_index;
769 if (fp->fp_u_idx != fpsb->u_status_block.status_block_index) {
770 fp->fp_u_idx = fpsb->u_status_block.status_block_index;
776 static u16 bnx2x_ack_int(struct bnx2x *bp)
778 u32 hc_addr = (HC_REG_COMMAND_REG + BP_PORT(bp)*32 +
779 COMMAND_REG_SIMD_MASK);
780 u32 result = REG_RD(bp, hc_addr);
782 DP(BNX2X_MSG_OFF, "read 0x%08x from HC addr 0x%x\n",
790 * fast path service functions
793 static inline int bnx2x_has_tx_work(struct bnx2x_fastpath *fp)
797 /* Tell compiler that status block fields can change */
799 tx_cons_sb = le16_to_cpu(*fp->tx_cons_sb);
800 return (fp->tx_pkt_cons != tx_cons_sb);
803 static inline int bnx2x_has_tx_work_unload(struct bnx2x_fastpath *fp)
805 /* Tell compiler that consumer and producer can change */
807 return (fp->tx_pkt_prod != fp->tx_pkt_cons);
810 /* free skb in the packet ring at pos idx
811 * return idx of last bd freed
813 static u16 bnx2x_free_tx_pkt(struct bnx2x *bp, struct bnx2x_fastpath *fp,
816 struct sw_tx_bd *tx_buf = &fp->tx_buf_ring[idx];
817 struct eth_tx_bd *tx_bd;
818 struct sk_buff *skb = tx_buf->skb;
819 u16 bd_idx = TX_BD(tx_buf->first_bd), new_cons;
822 DP(BNX2X_MSG_OFF, "pkt_idx %d buff @(%p)->skb %p\n",
826 DP(BNX2X_MSG_OFF, "free bd_idx %d\n", bd_idx);
827 tx_bd = &fp->tx_desc_ring[bd_idx];
828 pci_unmap_single(bp->pdev, BD_UNMAP_ADDR(tx_bd),
829 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
831 nbd = le16_to_cpu(tx_bd->nbd) - 1;
832 new_cons = nbd + tx_buf->first_bd;
833 #ifdef BNX2X_STOP_ON_ERROR
834 if (nbd > (MAX_SKB_FRAGS + 2)) {
835 BNX2X_ERR("BAD nbd!\n");
840 /* Skip a parse bd and the TSO split header bd
841 since they have no mapping */
843 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
845 if (tx_bd->bd_flags.as_bitfield & (ETH_TX_BD_FLAGS_IP_CSUM |
846 ETH_TX_BD_FLAGS_TCP_CSUM |
847 ETH_TX_BD_FLAGS_SW_LSO)) {
849 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
850 tx_bd = &fp->tx_desc_ring[bd_idx];
851 /* is this a TSO split header bd? */
852 if (tx_bd->bd_flags.as_bitfield & ETH_TX_BD_FLAGS_SW_LSO) {
854 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
861 DP(BNX2X_MSG_OFF, "free frag bd_idx %d\n", bd_idx);
862 tx_bd = &fp->tx_desc_ring[bd_idx];
863 pci_unmap_page(bp->pdev, BD_UNMAP_ADDR(tx_bd),
864 BD_UNMAP_LEN(tx_bd), PCI_DMA_TODEVICE);
866 bd_idx = TX_BD(NEXT_TX_IDX(bd_idx));
872 tx_buf->first_bd = 0;
878 static inline u16 bnx2x_tx_avail(struct bnx2x_fastpath *fp)
884 barrier(); /* Tell compiler that prod and cons can change */
885 prod = fp->tx_bd_prod;
886 cons = fp->tx_bd_cons;
888 /* NUM_TX_RINGS = number of "next-page" entries
889 It will be used as a threshold */
890 used = SUB_S16(prod, cons) + (s16)NUM_TX_RINGS;
892 #ifdef BNX2X_STOP_ON_ERROR
894 WARN_ON(used > fp->bp->tx_ring_size);
895 WARN_ON((fp->bp->tx_ring_size - used) > MAX_TX_AVAIL);
898 return (s16)(fp->bp->tx_ring_size) - used;
901 static void bnx2x_tx_int(struct bnx2x_fastpath *fp)
903 struct bnx2x *bp = fp->bp;
904 struct netdev_queue *txq;
905 u16 hw_cons, sw_cons, bd_cons = fp->tx_bd_cons;
908 #ifdef BNX2X_STOP_ON_ERROR
909 if (unlikely(bp->panic))
913 txq = netdev_get_tx_queue(bp->dev, fp->index);
914 hw_cons = le16_to_cpu(*fp->tx_cons_sb);
915 sw_cons = fp->tx_pkt_cons;
917 while (sw_cons != hw_cons) {
920 pkt_cons = TX_BD(sw_cons);
922 /* prefetch(bp->tx_buf_ring[pkt_cons].skb); */
924 DP(NETIF_MSG_TX_DONE, "hw_cons %u sw_cons %u pkt_cons %u\n",
925 hw_cons, sw_cons, pkt_cons);
927 /* if (NEXT_TX_IDX(sw_cons) != hw_cons) {
929 prefetch(fp->tx_buf_ring[NEXT_TX_IDX(sw_cons)].skb);
932 bd_cons = bnx2x_free_tx_pkt(bp, fp, pkt_cons);
937 fp->tx_pkt_cons = sw_cons;
938 fp->tx_bd_cons = bd_cons;
940 /* TBD need a thresh? */
941 if (unlikely(netif_tx_queue_stopped(txq))) {
943 __netif_tx_lock(txq, smp_processor_id());
945 /* Need to make the tx_bd_cons update visible to start_xmit()
946 * before checking for netif_tx_queue_stopped(). Without the
947 * memory barrier, there is a small possibility that
948 * start_xmit() will miss it and cause the queue to be stopped
953 if ((netif_tx_queue_stopped(txq)) &&
954 (bp->state == BNX2X_STATE_OPEN) &&
955 (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3))
956 netif_tx_wake_queue(txq);
958 __netif_tx_unlock(txq);
963 static void bnx2x_sp_event(struct bnx2x_fastpath *fp,
964 union eth_rx_cqe *rr_cqe)
966 struct bnx2x *bp = fp->bp;
967 int cid = SW_CID(rr_cqe->ramrod_cqe.conn_and_cmd_data);
968 int command = CQE_CMD(rr_cqe->ramrod_cqe.conn_and_cmd_data);
971 "fp %d cid %d got ramrod #%d state is %x type is %d\n",
972 fp->index, cid, command, bp->state,
973 rr_cqe->ramrod_cqe.ramrod_type);
978 switch (command | fp->state) {
979 case (RAMROD_CMD_ID_ETH_CLIENT_SETUP |
980 BNX2X_FP_STATE_OPENING):
981 DP(NETIF_MSG_IFUP, "got MULTI[%d] setup ramrod\n",
983 fp->state = BNX2X_FP_STATE_OPEN;
986 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_FP_STATE_HALTING):
987 DP(NETIF_MSG_IFDOWN, "got MULTI[%d] halt ramrod\n",
989 fp->state = BNX2X_FP_STATE_HALTED;
993 BNX2X_ERR("unexpected MC reply (%d) "
994 "fp->state is %x\n", command, fp->state);
997 mb(); /* force bnx2x_wait_ramrod() to see the change */
1001 switch (command | bp->state) {
1002 case (RAMROD_CMD_ID_ETH_PORT_SETUP | BNX2X_STATE_OPENING_WAIT4_PORT):
1003 DP(NETIF_MSG_IFUP, "got setup ramrod\n");
1004 bp->state = BNX2X_STATE_OPEN;
1007 case (RAMROD_CMD_ID_ETH_HALT | BNX2X_STATE_CLOSING_WAIT4_HALT):
1008 DP(NETIF_MSG_IFDOWN, "got halt ramrod\n");
1009 bp->state = BNX2X_STATE_CLOSING_WAIT4_DELETE;
1010 fp->state = BNX2X_FP_STATE_HALTED;
1013 case (RAMROD_CMD_ID_ETH_CFC_DEL | BNX2X_STATE_CLOSING_WAIT4_HALT):
1014 DP(NETIF_MSG_IFDOWN, "got delete ramrod for MULTI[%d]\n", cid);
1015 bnx2x_fp(bp, cid, state) = BNX2X_FP_STATE_CLOSED;
1019 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_OPEN):
1020 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_DIAG):
1021 DP(NETIF_MSG_IFUP, "got set mac ramrod\n");
1022 bp->set_mac_pending = 0;
1025 case (RAMROD_CMD_ID_ETH_SET_MAC | BNX2X_STATE_CLOSING_WAIT4_HALT):
1026 DP(NETIF_MSG_IFDOWN, "got (un)set mac ramrod\n");
1030 BNX2X_ERR("unexpected MC reply (%d) bp->state is %x\n",
1031 command, bp->state);
1034 mb(); /* force bnx2x_wait_ramrod() to see the change */
1037 static inline void bnx2x_free_rx_sge(struct bnx2x *bp,
1038 struct bnx2x_fastpath *fp, u16 index)
1040 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1041 struct page *page = sw_buf->page;
1042 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1044 /* Skip "next page" elements */
1048 pci_unmap_page(bp->pdev, pci_unmap_addr(sw_buf, mapping),
1049 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1050 __free_pages(page, PAGES_PER_SGE_SHIFT);
1052 sw_buf->page = NULL;
1057 static inline void bnx2x_free_rx_sge_range(struct bnx2x *bp,
1058 struct bnx2x_fastpath *fp, int last)
1062 for (i = 0; i < last; i++)
1063 bnx2x_free_rx_sge(bp, fp, i);
1066 static inline int bnx2x_alloc_rx_sge(struct bnx2x *bp,
1067 struct bnx2x_fastpath *fp, u16 index)
1069 struct page *page = alloc_pages(GFP_ATOMIC, PAGES_PER_SGE_SHIFT);
1070 struct sw_rx_page *sw_buf = &fp->rx_page_ring[index];
1071 struct eth_rx_sge *sge = &fp->rx_sge_ring[index];
1074 if (unlikely(page == NULL))
1077 mapping = pci_map_page(bp->pdev, page, 0, SGE_PAGE_SIZE*PAGES_PER_SGE,
1078 PCI_DMA_FROMDEVICE);
1079 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1080 __free_pages(page, PAGES_PER_SGE_SHIFT);
1084 sw_buf->page = page;
1085 pci_unmap_addr_set(sw_buf, mapping, mapping);
1087 sge->addr_hi = cpu_to_le32(U64_HI(mapping));
1088 sge->addr_lo = cpu_to_le32(U64_LO(mapping));
1093 static inline int bnx2x_alloc_rx_skb(struct bnx2x *bp,
1094 struct bnx2x_fastpath *fp, u16 index)
1096 struct sk_buff *skb;
1097 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[index];
1098 struct eth_rx_bd *rx_bd = &fp->rx_desc_ring[index];
1101 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1102 if (unlikely(skb == NULL))
1105 mapping = pci_map_single(bp->pdev, skb->data, bp->rx_buf_size,
1106 PCI_DMA_FROMDEVICE);
1107 if (unlikely(dma_mapping_error(&bp->pdev->dev, mapping))) {
1113 pci_unmap_addr_set(rx_buf, mapping, mapping);
1115 rx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1116 rx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1121 /* note that we are not allocating a new skb,
1122 * we are just moving one from cons to prod
1123 * we are not creating a new mapping,
1124 * so there is no need to check for dma_mapping_error().
1126 static void bnx2x_reuse_rx_skb(struct bnx2x_fastpath *fp,
1127 struct sk_buff *skb, u16 cons, u16 prod)
1129 struct bnx2x *bp = fp->bp;
1130 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1131 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1132 struct eth_rx_bd *cons_bd = &fp->rx_desc_ring[cons];
1133 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1135 pci_dma_sync_single_for_device(bp->pdev,
1136 pci_unmap_addr(cons_rx_buf, mapping),
1137 RX_COPY_THRESH, PCI_DMA_FROMDEVICE);
1139 prod_rx_buf->skb = cons_rx_buf->skb;
1140 pci_unmap_addr_set(prod_rx_buf, mapping,
1141 pci_unmap_addr(cons_rx_buf, mapping));
1142 *prod_bd = *cons_bd;
1145 static inline void bnx2x_update_last_max_sge(struct bnx2x_fastpath *fp,
1148 u16 last_max = fp->last_max_sge;
1150 if (SUB_S16(idx, last_max) > 0)
1151 fp->last_max_sge = idx;
1154 static void bnx2x_clear_sge_mask_next_elems(struct bnx2x_fastpath *fp)
1158 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
1159 int idx = RX_SGE_CNT * i - 1;
1161 for (j = 0; j < 2; j++) {
1162 SGE_MASK_CLEAR_BIT(fp, idx);
1168 static void bnx2x_update_sge_prod(struct bnx2x_fastpath *fp,
1169 struct eth_fast_path_rx_cqe *fp_cqe)
1171 struct bnx2x *bp = fp->bp;
1172 u16 sge_len = SGE_PAGE_ALIGN(le16_to_cpu(fp_cqe->pkt_len) -
1173 le16_to_cpu(fp_cqe->len_on_bd)) >>
1175 u16 last_max, last_elem, first_elem;
1182 /* First mark all used pages */
1183 for (i = 0; i < sge_len; i++)
1184 SGE_MASK_CLEAR_BIT(fp, RX_SGE(le16_to_cpu(fp_cqe->sgl[i])));
1186 DP(NETIF_MSG_RX_STATUS, "fp_cqe->sgl[%d] = %d\n",
1187 sge_len - 1, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1189 /* Here we assume that the last SGE index is the biggest */
1190 prefetch((void *)(fp->sge_mask));
1191 bnx2x_update_last_max_sge(fp, le16_to_cpu(fp_cqe->sgl[sge_len - 1]));
1193 last_max = RX_SGE(fp->last_max_sge);
1194 last_elem = last_max >> RX_SGE_MASK_ELEM_SHIFT;
1195 first_elem = RX_SGE(fp->rx_sge_prod) >> RX_SGE_MASK_ELEM_SHIFT;
1197 /* If ring is not full */
1198 if (last_elem + 1 != first_elem)
1201 /* Now update the prod */
1202 for (i = first_elem; i != last_elem; i = NEXT_SGE_MASK_ELEM(i)) {
1203 if (likely(fp->sge_mask[i]))
1206 fp->sge_mask[i] = RX_SGE_MASK_ELEM_ONE_MASK;
1207 delta += RX_SGE_MASK_ELEM_SZ;
1211 fp->rx_sge_prod += delta;
1212 /* clear page-end entries */
1213 bnx2x_clear_sge_mask_next_elems(fp);
1216 DP(NETIF_MSG_RX_STATUS,
1217 "fp->last_max_sge = %d fp->rx_sge_prod = %d\n",
1218 fp->last_max_sge, fp->rx_sge_prod);
1221 static inline void bnx2x_init_sge_ring_bit_mask(struct bnx2x_fastpath *fp)
1223 /* Set the mask to all 1-s: it's faster to compare to 0 than to 0xf-s */
1224 memset(fp->sge_mask, 0xff,
1225 (NUM_RX_SGE >> RX_SGE_MASK_ELEM_SHIFT)*sizeof(u64));
1227 /* Clear the two last indices in the page to 1:
1228 these are the indices that correspond to the "next" element,
1229 hence will never be indicated and should be removed from
1230 the calculations. */
1231 bnx2x_clear_sge_mask_next_elems(fp);
1234 static void bnx2x_tpa_start(struct bnx2x_fastpath *fp, u16 queue,
1235 struct sk_buff *skb, u16 cons, u16 prod)
1237 struct bnx2x *bp = fp->bp;
1238 struct sw_rx_bd *cons_rx_buf = &fp->rx_buf_ring[cons];
1239 struct sw_rx_bd *prod_rx_buf = &fp->rx_buf_ring[prod];
1240 struct eth_rx_bd *prod_bd = &fp->rx_desc_ring[prod];
1243 /* move empty skb from pool to prod and map it */
1244 prod_rx_buf->skb = fp->tpa_pool[queue].skb;
1245 mapping = pci_map_single(bp->pdev, fp->tpa_pool[queue].skb->data,
1246 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1247 pci_unmap_addr_set(prod_rx_buf, mapping, mapping);
1249 /* move partial skb from cons to pool (don't unmap yet) */
1250 fp->tpa_pool[queue] = *cons_rx_buf;
1252 /* mark bin state as start - print error if current state != stop */
1253 if (fp->tpa_state[queue] != BNX2X_TPA_STOP)
1254 BNX2X_ERR("start of bin not in stop [%d]\n", queue);
1256 fp->tpa_state[queue] = BNX2X_TPA_START;
1258 /* point prod_bd to new skb */
1259 prod_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
1260 prod_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
1262 #ifdef BNX2X_STOP_ON_ERROR
1263 fp->tpa_queue_used |= (1 << queue);
1264 #ifdef __powerpc64__
1265 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%lx\n",
1267 DP(NETIF_MSG_RX_STATUS, "fp->tpa_queue_used = 0x%llx\n",
1269 fp->tpa_queue_used);
1273 static int bnx2x_fill_frag_skb(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1274 struct sk_buff *skb,
1275 struct eth_fast_path_rx_cqe *fp_cqe,
1278 struct sw_rx_page *rx_pg, old_rx_pg;
1279 u16 len_on_bd = le16_to_cpu(fp_cqe->len_on_bd);
1280 u32 i, frag_len, frag_size, pages;
1284 frag_size = le16_to_cpu(fp_cqe->pkt_len) - len_on_bd;
1285 pages = SGE_PAGE_ALIGN(frag_size) >> SGE_PAGE_SHIFT;
1287 /* This is needed in order to enable forwarding support */
1289 skb_shinfo(skb)->gso_size = min((u32)SGE_PAGE_SIZE,
1290 max(frag_size, (u32)len_on_bd));
1292 #ifdef BNX2X_STOP_ON_ERROR
1294 min((u32)8, (u32)MAX_SKB_FRAGS) * SGE_PAGE_SIZE * PAGES_PER_SGE) {
1295 BNX2X_ERR("SGL length is too long: %d. CQE index is %d\n",
1297 BNX2X_ERR("fp_cqe->pkt_len = %d fp_cqe->len_on_bd = %d\n",
1298 fp_cqe->pkt_len, len_on_bd);
1304 /* Run through the SGL and compose the fragmented skb */
1305 for (i = 0, j = 0; i < pages; i += PAGES_PER_SGE, j++) {
1306 u16 sge_idx = RX_SGE(le16_to_cpu(fp_cqe->sgl[j]));
1308 /* FW gives the indices of the SGE as if the ring is an array
1309 (meaning that "next" element will consume 2 indices) */
1310 frag_len = min(frag_size, (u32)(SGE_PAGE_SIZE*PAGES_PER_SGE));
1311 rx_pg = &fp->rx_page_ring[sge_idx];
1314 /* If we fail to allocate a substitute page, we simply stop
1315 where we are and drop the whole packet */
1316 err = bnx2x_alloc_rx_sge(bp, fp, sge_idx);
1317 if (unlikely(err)) {
1318 fp->eth_q_stats.rx_skb_alloc_failed++;
1322 /* Unmap the page as we r going to pass it to the stack */
1323 pci_unmap_page(bp->pdev, pci_unmap_addr(&old_rx_pg, mapping),
1324 SGE_PAGE_SIZE*PAGES_PER_SGE, PCI_DMA_FROMDEVICE);
1326 /* Add one frag and update the appropriate fields in the skb */
1327 skb_fill_page_desc(skb, j, old_rx_pg.page, 0, frag_len);
1329 skb->data_len += frag_len;
1330 skb->truesize += frag_len;
1331 skb->len += frag_len;
1333 frag_size -= frag_len;
1339 static void bnx2x_tpa_stop(struct bnx2x *bp, struct bnx2x_fastpath *fp,
1340 u16 queue, int pad, int len, union eth_rx_cqe *cqe,
1343 struct sw_rx_bd *rx_buf = &fp->tpa_pool[queue];
1344 struct sk_buff *skb = rx_buf->skb;
1346 struct sk_buff *new_skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
1348 /* Unmap skb in the pool anyway, as we are going to change
1349 pool entry status to BNX2X_TPA_STOP even if new skb allocation
1351 pci_unmap_single(bp->pdev, pci_unmap_addr(rx_buf, mapping),
1352 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
1354 if (likely(new_skb)) {
1355 /* fix ip xsum and give it to the stack */
1356 /* (no need to map the new skb) */
1359 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1360 PARSING_FLAGS_VLAN);
1361 int is_not_hwaccel_vlan_cqe =
1362 (is_vlan_cqe && (!(bp->flags & HW_VLAN_RX_FLAG)));
1366 prefetch(((char *)(skb)) + 128);
1368 #ifdef BNX2X_STOP_ON_ERROR
1369 if (pad + len > bp->rx_buf_size) {
1370 BNX2X_ERR("skb_put is about to fail... "
1371 "pad %d len %d rx_buf_size %d\n",
1372 pad, len, bp->rx_buf_size);
1378 skb_reserve(skb, pad);
1381 skb->protocol = eth_type_trans(skb, bp->dev);
1382 skb->ip_summed = CHECKSUM_UNNECESSARY;
1387 iph = (struct iphdr *)skb->data;
1389 /* If there is no Rx VLAN offloading -
1390 take VLAN tag into an account */
1391 if (unlikely(is_not_hwaccel_vlan_cqe))
1392 iph = (struct iphdr *)((u8 *)iph + VLAN_HLEN);
1395 iph->check = ip_fast_csum((u8 *)iph, iph->ihl);
1398 if (!bnx2x_fill_frag_skb(bp, fp, skb,
1399 &cqe->fast_path_cqe, cqe_idx)) {
1401 if ((bp->vlgrp != NULL) && is_vlan_cqe &&
1402 (!is_not_hwaccel_vlan_cqe))
1403 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1404 le16_to_cpu(cqe->fast_path_cqe.
1408 netif_receive_skb(skb);
1410 DP(NETIF_MSG_RX_STATUS, "Failed to allocate new pages"
1411 " - dropping packet!\n");
1416 /* put new skb in bin */
1417 fp->tpa_pool[queue].skb = new_skb;
1420 /* else drop the packet and keep the buffer in the bin */
1421 DP(NETIF_MSG_RX_STATUS,
1422 "Failed to allocate new skb - dropping packet!\n");
1423 fp->eth_q_stats.rx_skb_alloc_failed++;
1426 fp->tpa_state[queue] = BNX2X_TPA_STOP;
1429 static inline void bnx2x_update_rx_prod(struct bnx2x *bp,
1430 struct bnx2x_fastpath *fp,
1431 u16 bd_prod, u16 rx_comp_prod,
1434 struct ustorm_eth_rx_producers rx_prods = {0};
1437 /* Update producers */
1438 rx_prods.bd_prod = bd_prod;
1439 rx_prods.cqe_prod = rx_comp_prod;
1440 rx_prods.sge_prod = rx_sge_prod;
1443 * Make sure that the BD and SGE data is updated before updating the
1444 * producers since FW might read the BD/SGE right after the producer
1446 * This is only applicable for weak-ordered memory model archs such
1447 * as IA-64. The following barrier is also mandatory since FW will
1448 * assumes BDs must have buffers.
1452 for (i = 0; i < sizeof(struct ustorm_eth_rx_producers)/4; i++)
1453 REG_WR(bp, BAR_USTRORM_INTMEM +
1454 USTORM_RX_PRODS_OFFSET(BP_PORT(bp), fp->cl_id) + i*4,
1455 ((u32 *)&rx_prods)[i]);
1457 mmiowb(); /* keep prod updates ordered */
1459 DP(NETIF_MSG_RX_STATUS,
1460 "queue[%d]: wrote bd_prod %u cqe_prod %u sge_prod %u\n",
1461 fp->index, bd_prod, rx_comp_prod, rx_sge_prod);
1464 static int bnx2x_rx_int(struct bnx2x_fastpath *fp, int budget)
1466 struct bnx2x *bp = fp->bp;
1467 u16 bd_cons, bd_prod, bd_prod_fw, comp_ring_cons;
1468 u16 hw_comp_cons, sw_comp_cons, sw_comp_prod;
1471 #ifdef BNX2X_STOP_ON_ERROR
1472 if (unlikely(bp->panic))
1476 /* CQ "next element" is of the size of the regular element,
1477 that's why it's ok here */
1478 hw_comp_cons = le16_to_cpu(*fp->rx_cons_sb);
1479 if ((hw_comp_cons & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
1482 bd_cons = fp->rx_bd_cons;
1483 bd_prod = fp->rx_bd_prod;
1484 bd_prod_fw = bd_prod;
1485 sw_comp_cons = fp->rx_comp_cons;
1486 sw_comp_prod = fp->rx_comp_prod;
1488 /* Memory barrier necessary as speculative reads of the rx
1489 * buffer can be ahead of the index in the status block
1493 DP(NETIF_MSG_RX_STATUS,
1494 "queue[%d]: hw_comp_cons %u sw_comp_cons %u\n",
1495 fp->index, hw_comp_cons, sw_comp_cons);
1497 while (sw_comp_cons != hw_comp_cons) {
1498 struct sw_rx_bd *rx_buf = NULL;
1499 struct sk_buff *skb;
1500 union eth_rx_cqe *cqe;
1504 comp_ring_cons = RCQ_BD(sw_comp_cons);
1505 bd_prod = RX_BD(bd_prod);
1506 bd_cons = RX_BD(bd_cons);
1508 cqe = &fp->rx_comp_ring[comp_ring_cons];
1509 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
1511 DP(NETIF_MSG_RX_STATUS, "CQE type %x err %x status %x"
1512 " queue %x vlan %x len %u\n", CQE_TYPE(cqe_fp_flags),
1513 cqe_fp_flags, cqe->fast_path_cqe.status_flags,
1514 le32_to_cpu(cqe->fast_path_cqe.rss_hash_result),
1515 le16_to_cpu(cqe->fast_path_cqe.vlan_tag),
1516 le16_to_cpu(cqe->fast_path_cqe.pkt_len));
1518 /* is this a slowpath msg? */
1519 if (unlikely(CQE_TYPE(cqe_fp_flags))) {
1520 bnx2x_sp_event(fp, cqe);
1523 /* this is an rx packet */
1525 rx_buf = &fp->rx_buf_ring[bd_cons];
1527 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
1528 pad = cqe->fast_path_cqe.placement_offset;
1530 /* If CQE is marked both TPA_START and TPA_END
1531 it is a non-TPA CQE */
1532 if ((!fp->disable_tpa) &&
1533 (TPA_TYPE(cqe_fp_flags) !=
1534 (TPA_TYPE_START | TPA_TYPE_END))) {
1535 u16 queue = cqe->fast_path_cqe.queue_index;
1537 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_START) {
1538 DP(NETIF_MSG_RX_STATUS,
1539 "calling tpa_start on queue %d\n",
1542 bnx2x_tpa_start(fp, queue, skb,
1547 if (TPA_TYPE(cqe_fp_flags) == TPA_TYPE_END) {
1548 DP(NETIF_MSG_RX_STATUS,
1549 "calling tpa_stop on queue %d\n",
1552 if (!BNX2X_RX_SUM_FIX(cqe))
1553 BNX2X_ERR("STOP on none TCP "
1556 /* This is a size of the linear data
1558 len = le16_to_cpu(cqe->fast_path_cqe.
1560 bnx2x_tpa_stop(bp, fp, queue, pad,
1561 len, cqe, comp_ring_cons);
1562 #ifdef BNX2X_STOP_ON_ERROR
1567 bnx2x_update_sge_prod(fp,
1568 &cqe->fast_path_cqe);
1573 pci_dma_sync_single_for_device(bp->pdev,
1574 pci_unmap_addr(rx_buf, mapping),
1575 pad + RX_COPY_THRESH,
1576 PCI_DMA_FROMDEVICE);
1578 prefetch(((char *)(skb)) + 128);
1580 /* is this an error packet? */
1581 if (unlikely(cqe_fp_flags & ETH_RX_ERROR_FALGS)) {
1582 DP(NETIF_MSG_RX_ERR,
1583 "ERROR flags %x rx packet %u\n",
1584 cqe_fp_flags, sw_comp_cons);
1585 fp->eth_q_stats.rx_err_discard_pkt++;
1589 /* Since we don't have a jumbo ring
1590 * copy small packets if mtu > 1500
1592 if ((bp->dev->mtu > ETH_MAX_PACKET_SIZE) &&
1593 (len <= RX_COPY_THRESH)) {
1594 struct sk_buff *new_skb;
1596 new_skb = netdev_alloc_skb(bp->dev,
1598 if (new_skb == NULL) {
1599 DP(NETIF_MSG_RX_ERR,
1600 "ERROR packet dropped "
1601 "because of alloc failure\n");
1602 fp->eth_q_stats.rx_skb_alloc_failed++;
1607 skb_copy_from_linear_data_offset(skb, pad,
1608 new_skb->data + pad, len);
1609 skb_reserve(new_skb, pad);
1610 skb_put(new_skb, len);
1612 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1616 } else if (bnx2x_alloc_rx_skb(bp, fp, bd_prod) == 0) {
1617 pci_unmap_single(bp->pdev,
1618 pci_unmap_addr(rx_buf, mapping),
1620 PCI_DMA_FROMDEVICE);
1621 skb_reserve(skb, pad);
1625 DP(NETIF_MSG_RX_ERR,
1626 "ERROR packet dropped because "
1627 "of alloc failure\n");
1628 fp->eth_q_stats.rx_skb_alloc_failed++;
1630 bnx2x_reuse_rx_skb(fp, skb, bd_cons, bd_prod);
1634 skb->protocol = eth_type_trans(skb, bp->dev);
1636 skb->ip_summed = CHECKSUM_NONE;
1638 if (likely(BNX2X_RX_CSUM_OK(cqe)))
1639 skb->ip_summed = CHECKSUM_UNNECESSARY;
1641 fp->eth_q_stats.hw_csum_err++;
1645 skb_record_rx_queue(skb, fp->index);
1647 if ((bp->vlgrp != NULL) && (bp->flags & HW_VLAN_RX_FLAG) &&
1648 (le16_to_cpu(cqe->fast_path_cqe.pars_flags.flags) &
1649 PARSING_FLAGS_VLAN))
1650 vlan_hwaccel_receive_skb(skb, bp->vlgrp,
1651 le16_to_cpu(cqe->fast_path_cqe.vlan_tag));
1654 netif_receive_skb(skb);
1660 bd_cons = NEXT_RX_IDX(bd_cons);
1661 bd_prod = NEXT_RX_IDX(bd_prod);
1662 bd_prod_fw = NEXT_RX_IDX(bd_prod_fw);
1665 sw_comp_prod = NEXT_RCQ_IDX(sw_comp_prod);
1666 sw_comp_cons = NEXT_RCQ_IDX(sw_comp_cons);
1668 if (rx_pkt == budget)
1672 fp->rx_bd_cons = bd_cons;
1673 fp->rx_bd_prod = bd_prod_fw;
1674 fp->rx_comp_cons = sw_comp_cons;
1675 fp->rx_comp_prod = sw_comp_prod;
1677 /* Update producers */
1678 bnx2x_update_rx_prod(bp, fp, bd_prod_fw, sw_comp_prod,
1681 fp->rx_pkt += rx_pkt;
1687 static irqreturn_t bnx2x_msix_fp_int(int irq, void *fp_cookie)
1689 struct bnx2x_fastpath *fp = fp_cookie;
1690 struct bnx2x *bp = fp->bp;
1691 int index = fp->index;
1693 /* Return here if interrupt is disabled */
1694 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1695 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1699 DP(BNX2X_MSG_FP, "got an MSI-X interrupt on IDX:SB [%d:%d]\n",
1701 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID, 0, IGU_INT_DISABLE, 0);
1703 #ifdef BNX2X_STOP_ON_ERROR
1704 if (unlikely(bp->panic))
1708 prefetch(fp->rx_cons_sb);
1709 prefetch(fp->tx_cons_sb);
1710 prefetch(&fp->status_blk->c_status_block.status_block_index);
1711 prefetch(&fp->status_blk->u_status_block.status_block_index);
1713 napi_schedule(&bnx2x_fp(bp, index, napi));
1718 static irqreturn_t bnx2x_interrupt(int irq, void *dev_instance)
1720 struct bnx2x *bp = netdev_priv(dev_instance);
1721 u16 status = bnx2x_ack_int(bp);
1724 /* Return here if interrupt is shared and it's not for us */
1725 if (unlikely(status == 0)) {
1726 DP(NETIF_MSG_INTR, "not our interrupt!\n");
1729 DP(NETIF_MSG_INTR, "got an interrupt status 0x%x\n", status);
1731 /* Return here if interrupt is disabled */
1732 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
1733 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
1737 #ifdef BNX2X_STOP_ON_ERROR
1738 if (unlikely(bp->panic))
1742 mask = 0x2 << bp->fp[0].sb_id;
1743 if (status & mask) {
1744 struct bnx2x_fastpath *fp = &bp->fp[0];
1746 prefetch(fp->rx_cons_sb);
1747 prefetch(fp->tx_cons_sb);
1748 prefetch(&fp->status_blk->c_status_block.status_block_index);
1749 prefetch(&fp->status_blk->u_status_block.status_block_index);
1751 napi_schedule(&bnx2x_fp(bp, 0, napi));
1757 if (unlikely(status & 0x1)) {
1758 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
1766 DP(NETIF_MSG_INTR, "got an unknown interrupt! (status %u)\n",
1772 /* end of fast path */
1774 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event);
1779 * General service functions
1782 static int bnx2x_acquire_hw_lock(struct bnx2x *bp, u32 resource)
1785 u32 resource_bit = (1 << resource);
1786 int func = BP_FUNC(bp);
1787 u32 hw_lock_control_reg;
1790 /* Validating that the resource is within range */
1791 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1793 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1794 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1799 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1801 hw_lock_control_reg =
1802 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1805 /* Validating that the resource is not already taken */
1806 lock_status = REG_RD(bp, hw_lock_control_reg);
1807 if (lock_status & resource_bit) {
1808 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1809 lock_status, resource_bit);
1813 /* Try for 5 second every 5ms */
1814 for (cnt = 0; cnt < 1000; cnt++) {
1815 /* Try to acquire the lock */
1816 REG_WR(bp, hw_lock_control_reg + 4, resource_bit);
1817 lock_status = REG_RD(bp, hw_lock_control_reg);
1818 if (lock_status & resource_bit)
1823 DP(NETIF_MSG_HW, "Timeout\n");
1827 static int bnx2x_release_hw_lock(struct bnx2x *bp, u32 resource)
1830 u32 resource_bit = (1 << resource);
1831 int func = BP_FUNC(bp);
1832 u32 hw_lock_control_reg;
1834 /* Validating that the resource is within range */
1835 if (resource > HW_LOCK_MAX_RESOURCE_VALUE) {
1837 "resource(0x%x) > HW_LOCK_MAX_RESOURCE_VALUE(0x%x)\n",
1838 resource, HW_LOCK_MAX_RESOURCE_VALUE);
1843 hw_lock_control_reg = (MISC_REG_DRIVER_CONTROL_1 + func*8);
1845 hw_lock_control_reg =
1846 (MISC_REG_DRIVER_CONTROL_7 + (func - 6)*8);
1849 /* Validating that the resource is currently taken */
1850 lock_status = REG_RD(bp, hw_lock_control_reg);
1851 if (!(lock_status & resource_bit)) {
1852 DP(NETIF_MSG_HW, "lock_status 0x%x resource_bit 0x%x\n",
1853 lock_status, resource_bit);
1857 REG_WR(bp, hw_lock_control_reg, resource_bit);
1861 /* HW Lock for shared dual port PHYs */
1862 static void bnx2x_acquire_phy_lock(struct bnx2x *bp)
1864 mutex_lock(&bp->port.phy_mutex);
1866 if (bp->port.need_hw_lock)
1867 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1870 static void bnx2x_release_phy_lock(struct bnx2x *bp)
1872 if (bp->port.need_hw_lock)
1873 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_MDIO);
1875 mutex_unlock(&bp->port.phy_mutex);
1878 int bnx2x_get_gpio(struct bnx2x *bp, int gpio_num, u8 port)
1880 /* The GPIO should be swapped if swap register is set and active */
1881 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1882 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1883 int gpio_shift = gpio_num +
1884 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1885 u32 gpio_mask = (1 << gpio_shift);
1889 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1890 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1894 /* read GPIO value */
1895 gpio_reg = REG_RD(bp, MISC_REG_GPIO);
1897 /* get the requested pin value */
1898 if ((gpio_reg & gpio_mask) == gpio_mask)
1903 DP(NETIF_MSG_LINK, "pin %d value 0x%x\n", gpio_num, value);
1908 int bnx2x_set_gpio(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1910 /* The GPIO should be swapped if swap register is set and active */
1911 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1912 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1913 int gpio_shift = gpio_num +
1914 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1915 u32 gpio_mask = (1 << gpio_shift);
1918 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1919 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1923 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1924 /* read GPIO and mask except the float bits */
1925 gpio_reg = (REG_RD(bp, MISC_REG_GPIO) & MISC_REGISTERS_GPIO_FLOAT);
1928 case MISC_REGISTERS_GPIO_OUTPUT_LOW:
1929 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output low\n",
1930 gpio_num, gpio_shift);
1931 /* clear FLOAT and set CLR */
1932 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1933 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_CLR_POS);
1936 case MISC_REGISTERS_GPIO_OUTPUT_HIGH:
1937 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> output high\n",
1938 gpio_num, gpio_shift);
1939 /* clear FLOAT and set SET */
1940 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1941 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_SET_POS);
1944 case MISC_REGISTERS_GPIO_INPUT_HI_Z:
1945 DP(NETIF_MSG_LINK, "Set GPIO %d (shift %d) -> input\n",
1946 gpio_num, gpio_shift);
1948 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_FLOAT_POS);
1955 REG_WR(bp, MISC_REG_GPIO, gpio_reg);
1956 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1961 int bnx2x_set_gpio_int(struct bnx2x *bp, int gpio_num, u32 mode, u8 port)
1963 /* The GPIO should be swapped if swap register is set and active */
1964 int gpio_port = (REG_RD(bp, NIG_REG_PORT_SWAP) &&
1965 REG_RD(bp, NIG_REG_STRAP_OVERRIDE)) ^ port;
1966 int gpio_shift = gpio_num +
1967 (gpio_port ? MISC_REGISTERS_GPIO_PORT_SHIFT : 0);
1968 u32 gpio_mask = (1 << gpio_shift);
1971 if (gpio_num > MISC_REGISTERS_GPIO_3) {
1972 BNX2X_ERR("Invalid GPIO %d\n", gpio_num);
1976 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
1978 gpio_reg = REG_RD(bp, MISC_REG_GPIO_INT);
1981 case MISC_REGISTERS_GPIO_INT_OUTPUT_CLR:
1982 DP(NETIF_MSG_LINK, "Clear GPIO INT %d (shift %d) -> "
1983 "output low\n", gpio_num, gpio_shift);
1984 /* clear SET and set CLR */
1985 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
1986 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1989 case MISC_REGISTERS_GPIO_INT_OUTPUT_SET:
1990 DP(NETIF_MSG_LINK, "Set GPIO INT %d (shift %d) -> "
1991 "output high\n", gpio_num, gpio_shift);
1992 /* clear CLR and set SET */
1993 gpio_reg &= ~(gpio_mask << MISC_REGISTERS_GPIO_INT_CLR_POS);
1994 gpio_reg |= (gpio_mask << MISC_REGISTERS_GPIO_INT_SET_POS);
2001 REG_WR(bp, MISC_REG_GPIO_INT, gpio_reg);
2002 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_GPIO);
2007 static int bnx2x_set_spio(struct bnx2x *bp, int spio_num, u32 mode)
2009 u32 spio_mask = (1 << spio_num);
2012 if ((spio_num < MISC_REGISTERS_SPIO_4) ||
2013 (spio_num > MISC_REGISTERS_SPIO_7)) {
2014 BNX2X_ERR("Invalid SPIO %d\n", spio_num);
2018 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2019 /* read SPIO and mask except the float bits */
2020 spio_reg = (REG_RD(bp, MISC_REG_SPIO) & MISC_REGISTERS_SPIO_FLOAT);
2023 case MISC_REGISTERS_SPIO_OUTPUT_LOW:
2024 DP(NETIF_MSG_LINK, "Set SPIO %d -> output low\n", spio_num);
2025 /* clear FLOAT and set CLR */
2026 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2027 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_CLR_POS);
2030 case MISC_REGISTERS_SPIO_OUTPUT_HIGH:
2031 DP(NETIF_MSG_LINK, "Set SPIO %d -> output high\n", spio_num);
2032 /* clear FLOAT and set SET */
2033 spio_reg &= ~(spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2034 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_SET_POS);
2037 case MISC_REGISTERS_SPIO_INPUT_HI_Z:
2038 DP(NETIF_MSG_LINK, "Set SPIO %d -> input\n", spio_num);
2040 spio_reg |= (spio_mask << MISC_REGISTERS_SPIO_FLOAT_POS);
2047 REG_WR(bp, MISC_REG_SPIO, spio_reg);
2048 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_SPIO);
2053 static void bnx2x_calc_fc_adv(struct bnx2x *bp)
2055 switch (bp->link_vars.ieee_fc &
2056 MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_MASK) {
2057 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_NONE:
2058 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2062 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_BOTH:
2063 bp->port.advertising |= (ADVERTISED_Asym_Pause |
2067 case MDIO_COMBO_IEEE0_AUTO_NEG_ADV_PAUSE_ASYMMETRIC:
2068 bp->port.advertising |= ADVERTISED_Asym_Pause;
2072 bp->port.advertising &= ~(ADVERTISED_Asym_Pause |
2078 static void bnx2x_link_report(struct bnx2x *bp)
2080 if (bp->link_vars.link_up) {
2081 if (bp->state == BNX2X_STATE_OPEN)
2082 netif_carrier_on(bp->dev);
2083 printk(KERN_INFO PFX "%s NIC Link is Up, ", bp->dev->name);
2085 printk("%d Mbps ", bp->link_vars.line_speed);
2087 if (bp->link_vars.duplex == DUPLEX_FULL)
2088 printk("full duplex");
2090 printk("half duplex");
2092 if (bp->link_vars.flow_ctrl != BNX2X_FLOW_CTRL_NONE) {
2093 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) {
2094 printk(", receive ");
2095 if (bp->link_vars.flow_ctrl &
2097 printk("& transmit ");
2099 printk(", transmit ");
2101 printk("flow control ON");
2105 } else { /* link_down */
2106 netif_carrier_off(bp->dev);
2107 printk(KERN_ERR PFX "%s NIC Link is Down\n", bp->dev->name);
2111 static u8 bnx2x_initial_phy_init(struct bnx2x *bp, int load_mode)
2113 if (!BP_NOMCP(bp)) {
2116 /* Initialize link parameters structure variables */
2117 /* It is recommended to turn off RX FC for jumbo frames
2118 for better performance */
2120 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2121 else if (bp->dev->mtu > 5000)
2122 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_TX;
2124 bp->link_params.req_fc_auto_adv = BNX2X_FLOW_CTRL_BOTH;
2126 bnx2x_acquire_phy_lock(bp);
2128 if (load_mode == LOAD_DIAG)
2129 bp->link_params.loopback_mode = LOOPBACK_XGXS_10;
2131 rc = bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2133 bnx2x_release_phy_lock(bp);
2135 bnx2x_calc_fc_adv(bp);
2137 if (CHIP_REV_IS_SLOW(bp) && bp->link_vars.link_up) {
2138 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2139 bnx2x_link_report(bp);
2144 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
2148 static void bnx2x_link_set(struct bnx2x *bp)
2150 if (!BP_NOMCP(bp)) {
2151 bnx2x_acquire_phy_lock(bp);
2152 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
2153 bnx2x_release_phy_lock(bp);
2155 bnx2x_calc_fc_adv(bp);
2157 BNX2X_ERR("Bootcode is missing - can not set link\n");
2160 static void bnx2x__link_reset(struct bnx2x *bp)
2162 if (!BP_NOMCP(bp)) {
2163 bnx2x_acquire_phy_lock(bp);
2164 bnx2x_link_reset(&bp->link_params, &bp->link_vars, 1);
2165 bnx2x_release_phy_lock(bp);
2167 BNX2X_ERR("Bootcode is missing - can not reset link\n");
2170 static u8 bnx2x_link_test(struct bnx2x *bp)
2174 bnx2x_acquire_phy_lock(bp);
2175 rc = bnx2x_test_link(&bp->link_params, &bp->link_vars);
2176 bnx2x_release_phy_lock(bp);
2181 static void bnx2x_init_port_minmax(struct bnx2x *bp)
2183 u32 r_param = bp->link_vars.line_speed / 8;
2184 u32 fair_periodic_timeout_usec;
2187 memset(&(bp->cmng.rs_vars), 0,
2188 sizeof(struct rate_shaping_vars_per_port));
2189 memset(&(bp->cmng.fair_vars), 0, sizeof(struct fairness_vars_per_port));
2191 /* 100 usec in SDM ticks = 25 since each tick is 4 usec */
2192 bp->cmng.rs_vars.rs_periodic_timeout = RS_PERIODIC_TIMEOUT_USEC / 4;
2194 /* this is the threshold below which no timer arming will occur
2195 1.25 coefficient is for the threshold to be a little bigger
2196 than the real time, to compensate for timer in-accuracy */
2197 bp->cmng.rs_vars.rs_threshold =
2198 (RS_PERIODIC_TIMEOUT_USEC * r_param * 5) / 4;
2200 /* resolution of fairness timer */
2201 fair_periodic_timeout_usec = QM_ARB_BYTES / r_param;
2202 /* for 10G it is 1000usec. for 1G it is 10000usec. */
2203 t_fair = T_FAIR_COEF / bp->link_vars.line_speed;
2205 /* this is the threshold below which we won't arm the timer anymore */
2206 bp->cmng.fair_vars.fair_threshold = QM_ARB_BYTES;
2208 /* we multiply by 1e3/8 to get bytes/msec.
2209 We don't want the credits to pass a credit
2210 of the t_fair*FAIR_MEM (algorithm resolution) */
2211 bp->cmng.fair_vars.upper_bound = r_param * t_fair * FAIR_MEM;
2212 /* since each tick is 4 usec */
2213 bp->cmng.fair_vars.fairness_timeout = fair_periodic_timeout_usec / 4;
2216 static void bnx2x_init_vn_minmax(struct bnx2x *bp, int func)
2218 struct rate_shaping_vars_per_vn m_rs_vn;
2219 struct fairness_vars_per_vn m_fair_vn;
2220 u32 vn_cfg = SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
2221 u16 vn_min_rate, vn_max_rate;
2224 /* If function is hidden - set min and max to zeroes */
2225 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE) {
2230 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
2231 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
2232 /* If fairness is enabled (not all min rates are zeroes) and
2233 if current min rate is zero - set it to 1.
2234 This is a requirement of the algorithm. */
2235 if (bp->vn_weight_sum && (vn_min_rate == 0))
2236 vn_min_rate = DEF_MIN_RATE;
2237 vn_max_rate = ((vn_cfg & FUNC_MF_CFG_MAX_BW_MASK) >>
2238 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
2242 "func %d: vn_min_rate=%d vn_max_rate=%d vn_weight_sum=%d\n",
2243 func, vn_min_rate, vn_max_rate, bp->vn_weight_sum);
2245 memset(&m_rs_vn, 0, sizeof(struct rate_shaping_vars_per_vn));
2246 memset(&m_fair_vn, 0, sizeof(struct fairness_vars_per_vn));
2248 /* global vn counter - maximal Mbps for this vn */
2249 m_rs_vn.vn_counter.rate = vn_max_rate;
2251 /* quota - number of bytes transmitted in this period */
2252 m_rs_vn.vn_counter.quota =
2253 (vn_max_rate * RS_PERIODIC_TIMEOUT_USEC) / 8;
2255 if (bp->vn_weight_sum) {
2256 /* credit for each period of the fairness algorithm:
2257 number of bytes in T_FAIR (the vn share the port rate).
2258 vn_weight_sum should not be larger than 10000, thus
2259 T_FAIR_COEF / (8 * vn_weight_sum) will always be greater
2261 m_fair_vn.vn_credit_delta =
2262 max((u32)(vn_min_rate * (T_FAIR_COEF /
2263 (8 * bp->vn_weight_sum))),
2264 (u32)(bp->cmng.fair_vars.fair_threshold * 2));
2265 DP(NETIF_MSG_IFUP, "m_fair_vn.vn_credit_delta=%d\n",
2266 m_fair_vn.vn_credit_delta);
2269 /* Store it to internal memory */
2270 for (i = 0; i < sizeof(struct rate_shaping_vars_per_vn)/4; i++)
2271 REG_WR(bp, BAR_XSTRORM_INTMEM +
2272 XSTORM_RATE_SHAPING_PER_VN_VARS_OFFSET(func) + i * 4,
2273 ((u32 *)(&m_rs_vn))[i]);
2275 for (i = 0; i < sizeof(struct fairness_vars_per_vn)/4; i++)
2276 REG_WR(bp, BAR_XSTRORM_INTMEM +
2277 XSTORM_FAIRNESS_PER_VN_VARS_OFFSET(func) + i * 4,
2278 ((u32 *)(&m_fair_vn))[i]);
2282 /* This function is called upon link interrupt */
2283 static void bnx2x_link_attn(struct bnx2x *bp)
2285 /* Make sure that we are synced with the current statistics */
2286 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2288 bnx2x_link_update(&bp->link_params, &bp->link_vars);
2290 if (bp->link_vars.link_up) {
2292 /* dropless flow control */
2293 if (CHIP_IS_E1H(bp)) {
2294 int port = BP_PORT(bp);
2295 u32 pause_enabled = 0;
2297 if (bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX)
2300 REG_WR(bp, BAR_USTRORM_INTMEM +
2301 USTORM_PAUSE_ENABLED_OFFSET(port),
2305 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
2306 struct host_port_stats *pstats;
2308 pstats = bnx2x_sp(bp, port_stats);
2309 /* reset old bmac stats */
2310 memset(&(pstats->mac_stx[0]), 0,
2311 sizeof(struct mac_stx));
2313 if ((bp->state == BNX2X_STATE_OPEN) ||
2314 (bp->state == BNX2X_STATE_DISABLED))
2315 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2318 /* indicate link status */
2319 bnx2x_link_report(bp);
2322 int port = BP_PORT(bp);
2326 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
2327 if (vn == BP_E1HVN(bp))
2330 func = ((vn << 1) | port);
2332 /* Set the attention towards other drivers
2334 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_0 +
2335 (LINK_SYNC_ATTENTION_BIT_FUNC_0 + func)*4, 1);
2338 if (bp->link_vars.link_up) {
2341 /* Init rate shaping and fairness contexts */
2342 bnx2x_init_port_minmax(bp);
2344 for (vn = VN_0; vn < E1HVN_MAX; vn++)
2345 bnx2x_init_vn_minmax(bp, 2*vn + port);
2347 /* Store it to internal memory */
2349 i < sizeof(struct cmng_struct_per_port) / 4; i++)
2350 REG_WR(bp, BAR_XSTRORM_INTMEM +
2351 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i*4,
2352 ((u32 *)(&bp->cmng))[i]);
2357 static void bnx2x__link_status_update(struct bnx2x *bp)
2359 if (bp->state != BNX2X_STATE_OPEN)
2362 bnx2x_link_status_update(&bp->link_params, &bp->link_vars);
2364 if (bp->link_vars.link_up)
2365 bnx2x_stats_handle(bp, STATS_EVENT_LINK_UP);
2367 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
2369 /* indicate link status */
2370 bnx2x_link_report(bp);
2373 static void bnx2x_pmf_update(struct bnx2x *bp)
2375 int port = BP_PORT(bp);
2379 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
2381 /* enable nig attention */
2382 val = (0xff0f | (1 << (BP_E1HVN(bp) + 4)));
2383 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, val);
2384 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, val);
2386 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
2394 * General service functions
2397 /* the slow path queue is odd since completions arrive on the fastpath ring */
2398 static int bnx2x_sp_post(struct bnx2x *bp, int command, int cid,
2399 u32 data_hi, u32 data_lo, int common)
2401 int func = BP_FUNC(bp);
2403 DP(BNX2X_MSG_SP/*NETIF_MSG_TIMER*/,
2404 "SPQE (%x:%x) command %d hw_cid %x data (%x:%x) left %x\n",
2405 (u32)U64_HI(bp->spq_mapping), (u32)(U64_LO(bp->spq_mapping) +
2406 (void *)bp->spq_prod_bd - (void *)bp->spq), command,
2407 HW_CID(bp, cid), data_hi, data_lo, bp->spq_left);
2409 #ifdef BNX2X_STOP_ON_ERROR
2410 if (unlikely(bp->panic))
2414 spin_lock_bh(&bp->spq_lock);
2416 if (!bp->spq_left) {
2417 BNX2X_ERR("BUG! SPQ ring full!\n");
2418 spin_unlock_bh(&bp->spq_lock);
2423 /* CID needs port number to be encoded int it */
2424 bp->spq_prod_bd->hdr.conn_and_cmd_data =
2425 cpu_to_le32(((command << SPE_HDR_CMD_ID_SHIFT) |
2427 bp->spq_prod_bd->hdr.type = cpu_to_le16(ETH_CONNECTION_TYPE);
2429 bp->spq_prod_bd->hdr.type |=
2430 cpu_to_le16((1 << SPE_HDR_COMMON_RAMROD_SHIFT));
2432 bp->spq_prod_bd->data.mac_config_addr.hi = cpu_to_le32(data_hi);
2433 bp->spq_prod_bd->data.mac_config_addr.lo = cpu_to_le32(data_lo);
2437 if (bp->spq_prod_bd == bp->spq_last_bd) {
2438 bp->spq_prod_bd = bp->spq;
2439 bp->spq_prod_idx = 0;
2440 DP(NETIF_MSG_TIMER, "end of spq\n");
2447 /* Make sure that BD data is updated before writing the producer */
2450 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_SPQ_PROD_OFFSET(func),
2455 spin_unlock_bh(&bp->spq_lock);
2459 /* acquire split MCP access lock register */
2460 static int bnx2x_acquire_alr(struct bnx2x *bp)
2467 for (j = 0; j < i*10; j++) {
2469 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2470 val = REG_RD(bp, GRCBASE_MCP + 0x9c);
2471 if (val & (1L << 31))
2476 if (!(val & (1L << 31))) {
2477 BNX2X_ERR("Cannot acquire MCP access lock register\n");
2484 /* release split MCP access lock register */
2485 static void bnx2x_release_alr(struct bnx2x *bp)
2489 REG_WR(bp, GRCBASE_MCP + 0x9c, val);
2492 static inline u16 bnx2x_update_dsb_idx(struct bnx2x *bp)
2494 struct host_def_status_block *def_sb = bp->def_status_blk;
2497 barrier(); /* status block is written to by the chip */
2498 if (bp->def_att_idx != def_sb->atten_status_block.attn_bits_index) {
2499 bp->def_att_idx = def_sb->atten_status_block.attn_bits_index;
2502 if (bp->def_c_idx != def_sb->c_def_status_block.status_block_index) {
2503 bp->def_c_idx = def_sb->c_def_status_block.status_block_index;
2506 if (bp->def_u_idx != def_sb->u_def_status_block.status_block_index) {
2507 bp->def_u_idx = def_sb->u_def_status_block.status_block_index;
2510 if (bp->def_x_idx != def_sb->x_def_status_block.status_block_index) {
2511 bp->def_x_idx = def_sb->x_def_status_block.status_block_index;
2514 if (bp->def_t_idx != def_sb->t_def_status_block.status_block_index) {
2515 bp->def_t_idx = def_sb->t_def_status_block.status_block_index;
2522 * slow path service functions
2525 static void bnx2x_attn_int_asserted(struct bnx2x *bp, u32 asserted)
2527 int port = BP_PORT(bp);
2528 u32 hc_addr = (HC_REG_COMMAND_REG + port*32 +
2529 COMMAND_REG_ATTN_BITS_SET);
2530 u32 aeu_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2531 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2532 u32 nig_int_mask_addr = port ? NIG_REG_MASK_INTERRUPT_PORT1 :
2533 NIG_REG_MASK_INTERRUPT_PORT0;
2537 if (bp->attn_state & asserted)
2538 BNX2X_ERR("IGU ERROR\n");
2540 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2541 aeu_mask = REG_RD(bp, aeu_addr);
2543 DP(NETIF_MSG_HW, "aeu_mask %x newly asserted %x\n",
2544 aeu_mask, asserted);
2545 aeu_mask &= ~(asserted & 0xff);
2546 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2548 REG_WR(bp, aeu_addr, aeu_mask);
2549 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2551 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2552 bp->attn_state |= asserted;
2553 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2555 if (asserted & ATTN_HARD_WIRED_MASK) {
2556 if (asserted & ATTN_NIG_FOR_FUNC) {
2558 bnx2x_acquire_phy_lock(bp);
2560 /* save nig interrupt mask */
2561 nig_mask = REG_RD(bp, nig_int_mask_addr);
2562 REG_WR(bp, nig_int_mask_addr, 0);
2564 bnx2x_link_attn(bp);
2566 /* handle unicore attn? */
2568 if (asserted & ATTN_SW_TIMER_4_FUNC)
2569 DP(NETIF_MSG_HW, "ATTN_SW_TIMER_4_FUNC!\n");
2571 if (asserted & GPIO_2_FUNC)
2572 DP(NETIF_MSG_HW, "GPIO_2_FUNC!\n");
2574 if (asserted & GPIO_3_FUNC)
2575 DP(NETIF_MSG_HW, "GPIO_3_FUNC!\n");
2577 if (asserted & GPIO_4_FUNC)
2578 DP(NETIF_MSG_HW, "GPIO_4_FUNC!\n");
2581 if (asserted & ATTN_GENERAL_ATTN_1) {
2582 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_1!\n");
2583 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_1, 0x0);
2585 if (asserted & ATTN_GENERAL_ATTN_2) {
2586 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_2!\n");
2587 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_2, 0x0);
2589 if (asserted & ATTN_GENERAL_ATTN_3) {
2590 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_3!\n");
2591 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_3, 0x0);
2594 if (asserted & ATTN_GENERAL_ATTN_4) {
2595 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_4!\n");
2596 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_4, 0x0);
2598 if (asserted & ATTN_GENERAL_ATTN_5) {
2599 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_5!\n");
2600 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_5, 0x0);
2602 if (asserted & ATTN_GENERAL_ATTN_6) {
2603 DP(NETIF_MSG_HW, "ATTN_GENERAL_ATTN_6!\n");
2604 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_6, 0x0);
2608 } /* if hardwired */
2610 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2612 REG_WR(bp, hc_addr, asserted);
2614 /* now set back the mask */
2615 if (asserted & ATTN_NIG_FOR_FUNC) {
2616 REG_WR(bp, nig_int_mask_addr, nig_mask);
2617 bnx2x_release_phy_lock(bp);
2621 static inline void bnx2x_fan_failure(struct bnx2x *bp)
2623 int port = BP_PORT(bp);
2625 /* mark the failure */
2626 bp->link_params.ext_phy_config &= ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
2627 bp->link_params.ext_phy_config |= PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE;
2628 SHMEM_WR(bp, dev_info.port_hw_config[port].external_phy_config,
2629 bp->link_params.ext_phy_config);
2631 /* log the failure */
2632 printk(KERN_ERR PFX "Fan Failure on Network Controller %s has caused"
2633 " the driver to shutdown the card to prevent permanent"
2634 " damage. Please contact Dell Support for assistance\n",
2637 static inline void bnx2x_attn_int_deasserted0(struct bnx2x *bp, u32 attn)
2639 int port = BP_PORT(bp);
2641 u32 val, swap_val, swap_override;
2643 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
2644 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
2646 if (attn & AEU_INPUTS_ATTN_BITS_SPIO5) {
2648 val = REG_RD(bp, reg_offset);
2649 val &= ~AEU_INPUTS_ATTN_BITS_SPIO5;
2650 REG_WR(bp, reg_offset, val);
2652 BNX2X_ERR("SPIO5 hw attention\n");
2654 /* Fan failure attention */
2655 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
2656 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
2657 /* Low power mode is controlled by GPIO 2 */
2658 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_2,
2659 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2660 /* The PHY reset is controlled by GPIO 1 */
2661 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2662 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2665 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
2666 /* The PHY reset is controlled by GPIO 1 */
2667 /* fake the port number to cancel the swap done in
2669 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
2670 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
2671 port = (swap_val && swap_override) ^ 1;
2672 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_1,
2673 MISC_REGISTERS_GPIO_OUTPUT_LOW, port);
2679 bnx2x_fan_failure(bp);
2682 if (attn & (AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 |
2683 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1)) {
2684 bnx2x_acquire_phy_lock(bp);
2685 bnx2x_handle_module_detect_int(&bp->link_params);
2686 bnx2x_release_phy_lock(bp);
2689 if (attn & HW_INTERRUT_ASSERT_SET_0) {
2691 val = REG_RD(bp, reg_offset);
2692 val &= ~(attn & HW_INTERRUT_ASSERT_SET_0);
2693 REG_WR(bp, reg_offset, val);
2695 BNX2X_ERR("FATAL HW block attention set0 0x%x\n",
2696 (attn & HW_INTERRUT_ASSERT_SET_0));
2701 static inline void bnx2x_attn_int_deasserted1(struct bnx2x *bp, u32 attn)
2705 if (attn & AEU_INPUTS_ATTN_BITS_DOORBELLQ_HW_INTERRUPT) {
2707 val = REG_RD(bp, DORQ_REG_DORQ_INT_STS_CLR);
2708 BNX2X_ERR("DB hw attention 0x%x\n", val);
2709 /* DORQ discard attention */
2711 BNX2X_ERR("FATAL error from DORQ\n");
2714 if (attn & HW_INTERRUT_ASSERT_SET_1) {
2716 int port = BP_PORT(bp);
2719 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_1 :
2720 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_1);
2722 val = REG_RD(bp, reg_offset);
2723 val &= ~(attn & HW_INTERRUT_ASSERT_SET_1);
2724 REG_WR(bp, reg_offset, val);
2726 BNX2X_ERR("FATAL HW block attention set1 0x%x\n",
2727 (attn & HW_INTERRUT_ASSERT_SET_1));
2732 static inline void bnx2x_attn_int_deasserted2(struct bnx2x *bp, u32 attn)
2736 if (attn & AEU_INPUTS_ATTN_BITS_CFC_HW_INTERRUPT) {
2738 val = REG_RD(bp, CFC_REG_CFC_INT_STS_CLR);
2739 BNX2X_ERR("CFC hw attention 0x%x\n", val);
2740 /* CFC error attention */
2742 BNX2X_ERR("FATAL error from CFC\n");
2745 if (attn & AEU_INPUTS_ATTN_BITS_PXP_HW_INTERRUPT) {
2747 val = REG_RD(bp, PXP_REG_PXP_INT_STS_CLR_0);
2748 BNX2X_ERR("PXP hw attention 0x%x\n", val);
2749 /* RQ_USDMDP_FIFO_OVERFLOW */
2751 BNX2X_ERR("FATAL error from PXP\n");
2754 if (attn & HW_INTERRUT_ASSERT_SET_2) {
2756 int port = BP_PORT(bp);
2759 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_2 :
2760 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_2);
2762 val = REG_RD(bp, reg_offset);
2763 val &= ~(attn & HW_INTERRUT_ASSERT_SET_2);
2764 REG_WR(bp, reg_offset, val);
2766 BNX2X_ERR("FATAL HW block attention set2 0x%x\n",
2767 (attn & HW_INTERRUT_ASSERT_SET_2));
2772 static inline void bnx2x_attn_int_deasserted3(struct bnx2x *bp, u32 attn)
2776 if (attn & EVEREST_GEN_ATTN_IN_USE_MASK) {
2778 if (attn & BNX2X_PMF_LINK_ASSERT) {
2779 int func = BP_FUNC(bp);
2781 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
2782 bnx2x__link_status_update(bp);
2783 if (SHMEM_RD(bp, func_mb[func].drv_status) &
2785 bnx2x_pmf_update(bp);
2787 } else if (attn & BNX2X_MC_ASSERT_BITS) {
2789 BNX2X_ERR("MC assert!\n");
2790 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_10, 0);
2791 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_9, 0);
2792 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_8, 0);
2793 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_7, 0);
2796 } else if (attn & BNX2X_MCP_ASSERT) {
2798 BNX2X_ERR("MCP assert!\n");
2799 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_11, 0);
2803 BNX2X_ERR("Unknown HW assert! (attn 0x%x)\n", attn);
2806 if (attn & EVEREST_LATCHED_ATTN_IN_USE_MASK) {
2807 BNX2X_ERR("LATCHED attention 0x%08x (masked)\n", attn);
2808 if (attn & BNX2X_GRC_TIMEOUT) {
2809 val = CHIP_IS_E1H(bp) ?
2810 REG_RD(bp, MISC_REG_GRC_TIMEOUT_ATTN) : 0;
2811 BNX2X_ERR("GRC time-out 0x%08x\n", val);
2813 if (attn & BNX2X_GRC_RSV) {
2814 val = CHIP_IS_E1H(bp) ?
2815 REG_RD(bp, MISC_REG_GRC_RSV_ATTN) : 0;
2816 BNX2X_ERR("GRC reserved 0x%08x\n", val);
2818 REG_WR(bp, MISC_REG_AEU_CLR_LATCH_SIGNAL, 0x7ff);
2822 static void bnx2x_attn_int_deasserted(struct bnx2x *bp, u32 deasserted)
2824 struct attn_route attn;
2825 struct attn_route group_mask;
2826 int port = BP_PORT(bp);
2832 /* need to take HW lock because MCP or other port might also
2833 try to handle this event */
2834 bnx2x_acquire_alr(bp);
2836 attn.sig[0] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + port*4);
2837 attn.sig[1] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_2_FUNC_0 + port*4);
2838 attn.sig[2] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_3_FUNC_0 + port*4);
2839 attn.sig[3] = REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_4_FUNC_0 + port*4);
2840 DP(NETIF_MSG_HW, "attn: %08x %08x %08x %08x\n",
2841 attn.sig[0], attn.sig[1], attn.sig[2], attn.sig[3]);
2843 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
2844 if (deasserted & (1 << index)) {
2845 group_mask = bp->attn_group[index];
2847 DP(NETIF_MSG_HW, "group[%d]: %08x %08x %08x %08x\n",
2848 index, group_mask.sig[0], group_mask.sig[1],
2849 group_mask.sig[2], group_mask.sig[3]);
2851 bnx2x_attn_int_deasserted3(bp,
2852 attn.sig[3] & group_mask.sig[3]);
2853 bnx2x_attn_int_deasserted1(bp,
2854 attn.sig[1] & group_mask.sig[1]);
2855 bnx2x_attn_int_deasserted2(bp,
2856 attn.sig[2] & group_mask.sig[2]);
2857 bnx2x_attn_int_deasserted0(bp,
2858 attn.sig[0] & group_mask.sig[0]);
2860 if ((attn.sig[0] & group_mask.sig[0] &
2861 HW_PRTY_ASSERT_SET_0) ||
2862 (attn.sig[1] & group_mask.sig[1] &
2863 HW_PRTY_ASSERT_SET_1) ||
2864 (attn.sig[2] & group_mask.sig[2] &
2865 HW_PRTY_ASSERT_SET_2))
2866 BNX2X_ERR("FATAL HW block parity attention\n");
2870 bnx2x_release_alr(bp);
2872 reg_addr = (HC_REG_COMMAND_REG + port*32 + COMMAND_REG_ATTN_BITS_CLR);
2875 DP(NETIF_MSG_HW, "about to mask 0x%08x at HC addr 0x%x\n",
2877 REG_WR(bp, reg_addr, val);
2879 if (~bp->attn_state & deasserted)
2880 BNX2X_ERR("IGU ERROR\n");
2882 reg_addr = port ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
2883 MISC_REG_AEU_MASK_ATTN_FUNC_0;
2885 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2886 aeu_mask = REG_RD(bp, reg_addr);
2888 DP(NETIF_MSG_HW, "aeu_mask %x newly deasserted %x\n",
2889 aeu_mask, deasserted);
2890 aeu_mask |= (deasserted & 0xff);
2891 DP(NETIF_MSG_HW, "new mask %x\n", aeu_mask);
2893 REG_WR(bp, reg_addr, aeu_mask);
2894 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_PORT0_ATT_MASK + port);
2896 DP(NETIF_MSG_HW, "attn_state %x\n", bp->attn_state);
2897 bp->attn_state &= ~deasserted;
2898 DP(NETIF_MSG_HW, "new state %x\n", bp->attn_state);
2901 static void bnx2x_attn_int(struct bnx2x *bp)
2903 /* read local copy of bits */
2904 u32 attn_bits = le32_to_cpu(bp->def_status_blk->atten_status_block.
2906 u32 attn_ack = le32_to_cpu(bp->def_status_blk->atten_status_block.
2908 u32 attn_state = bp->attn_state;
2910 /* look for changed bits */
2911 u32 asserted = attn_bits & ~attn_ack & ~attn_state;
2912 u32 deasserted = ~attn_bits & attn_ack & attn_state;
2915 "attn_bits %x attn_ack %x asserted %x deasserted %x\n",
2916 attn_bits, attn_ack, asserted, deasserted);
2918 if (~(attn_bits ^ attn_ack) & (attn_bits ^ attn_state))
2919 BNX2X_ERR("BAD attention state\n");
2921 /* handle bits that were raised */
2923 bnx2x_attn_int_asserted(bp, asserted);
2926 bnx2x_attn_int_deasserted(bp, deasserted);
2929 static void bnx2x_sp_task(struct work_struct *work)
2931 struct bnx2x *bp = container_of(work, struct bnx2x, sp_task.work);
2935 /* Return here if interrupt is disabled */
2936 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2937 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2941 status = bnx2x_update_dsb_idx(bp);
2942 /* if (status == 0) */
2943 /* BNX2X_ERR("spurious slowpath interrupt!\n"); */
2945 DP(NETIF_MSG_INTR, "got a slowpath interrupt (updated %x)\n", status);
2951 bnx2x_ack_sb(bp, DEF_SB_ID, ATTENTION_ID, le16_to_cpu(bp->def_att_idx),
2953 bnx2x_ack_sb(bp, DEF_SB_ID, USTORM_ID, le16_to_cpu(bp->def_u_idx),
2955 bnx2x_ack_sb(bp, DEF_SB_ID, CSTORM_ID, le16_to_cpu(bp->def_c_idx),
2957 bnx2x_ack_sb(bp, DEF_SB_ID, XSTORM_ID, le16_to_cpu(bp->def_x_idx),
2959 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, le16_to_cpu(bp->def_t_idx),
2964 static irqreturn_t bnx2x_msix_sp_int(int irq, void *dev_instance)
2966 struct net_device *dev = dev_instance;
2967 struct bnx2x *bp = netdev_priv(dev);
2969 /* Return here if interrupt is disabled */
2970 if (unlikely(atomic_read(&bp->intr_sem) != 0)) {
2971 DP(NETIF_MSG_INTR, "called but intr_sem not 0, returning\n");
2975 bnx2x_ack_sb(bp, DEF_SB_ID, TSTORM_ID, 0, IGU_INT_DISABLE, 0);
2977 #ifdef BNX2X_STOP_ON_ERROR
2978 if (unlikely(bp->panic))
2982 queue_delayed_work(bnx2x_wq, &bp->sp_task, 0);
2987 /* end of slow path */
2991 /****************************************************************************
2993 ****************************************************************************/
2995 /* sum[hi:lo] += add[hi:lo] */
2996 #define ADD_64(s_hi, a_hi, s_lo, a_lo) \
2999 s_hi += a_hi + ((s_lo < a_lo) ? 1 : 0); \
3002 /* difference = minuend - subtrahend */
3003 #define DIFF_64(d_hi, m_hi, s_hi, d_lo, m_lo, s_lo) \
3005 if (m_lo < s_lo) { \
3007 d_hi = m_hi - s_hi; \
3009 /* we can 'loan' 1 */ \
3011 d_lo = m_lo + (UINT_MAX - s_lo) + 1; \
3013 /* m_hi <= s_hi */ \
3018 /* m_lo >= s_lo */ \
3019 if (m_hi < s_hi) { \
3023 /* m_hi >= s_hi */ \
3024 d_hi = m_hi - s_hi; \
3025 d_lo = m_lo - s_lo; \
3030 #define UPDATE_STAT64(s, t) \
3032 DIFF_64(diff.hi, new->s##_hi, pstats->mac_stx[0].t##_hi, \
3033 diff.lo, new->s##_lo, pstats->mac_stx[0].t##_lo); \
3034 pstats->mac_stx[0].t##_hi = new->s##_hi; \
3035 pstats->mac_stx[0].t##_lo = new->s##_lo; \
3036 ADD_64(pstats->mac_stx[1].t##_hi, diff.hi, \
3037 pstats->mac_stx[1].t##_lo, diff.lo); \
3040 #define UPDATE_STAT64_NIG(s, t) \
3042 DIFF_64(diff.hi, new->s##_hi, old->s##_hi, \
3043 diff.lo, new->s##_lo, old->s##_lo); \
3044 ADD_64(estats->t##_hi, diff.hi, \
3045 estats->t##_lo, diff.lo); \
3048 /* sum[hi:lo] += add */
3049 #define ADD_EXTEND_64(s_hi, s_lo, a) \
3052 s_hi += (s_lo < a) ? 1 : 0; \
3055 #define UPDATE_EXTEND_STAT(s) \
3057 ADD_EXTEND_64(pstats->mac_stx[1].s##_hi, \
3058 pstats->mac_stx[1].s##_lo, \
3062 #define UPDATE_EXTEND_TSTAT(s, t) \
3064 diff = le32_to_cpu(tclient->s) - le32_to_cpu(old_tclient->s); \
3065 old_tclient->s = tclient->s; \
3066 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3069 #define UPDATE_EXTEND_USTAT(s, t) \
3071 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3072 old_uclient->s = uclient->s; \
3073 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3076 #define UPDATE_EXTEND_XSTAT(s, t) \
3078 diff = le32_to_cpu(xclient->s) - le32_to_cpu(old_xclient->s); \
3079 old_xclient->s = xclient->s; \
3080 ADD_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3083 /* minuend -= subtrahend */
3084 #define SUB_64(m_hi, s_hi, m_lo, s_lo) \
3086 DIFF_64(m_hi, m_hi, s_hi, m_lo, m_lo, s_lo); \
3089 /* minuend[hi:lo] -= subtrahend */
3090 #define SUB_EXTEND_64(m_hi, m_lo, s) \
3092 SUB_64(m_hi, 0, m_lo, s); \
3095 #define SUB_EXTEND_USTAT(s, t) \
3097 diff = le32_to_cpu(uclient->s) - le32_to_cpu(old_uclient->s); \
3098 SUB_EXTEND_64(qstats->t##_hi, qstats->t##_lo, diff); \
3102 * General service functions
3105 static inline long bnx2x_hilo(u32 *hiref)
3107 u32 lo = *(hiref + 1);
3108 #if (BITS_PER_LONG == 64)
3111 return HILO_U64(hi, lo);
3118 * Init service functions
3121 static void bnx2x_storm_stats_post(struct bnx2x *bp)
3123 if (!bp->stats_pending) {
3124 struct eth_query_ramrod_data ramrod_data = {0};
3127 ramrod_data.drv_counter = bp->stats_counter++;
3128 ramrod_data.collect_port = bp->port.pmf ? 1 : 0;
3129 for_each_queue(bp, i)
3130 ramrod_data.ctr_id_vector |= (1 << bp->fp[i].cl_id);
3132 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_STAT_QUERY, 0,
3133 ((u32 *)&ramrod_data)[1],
3134 ((u32 *)&ramrod_data)[0], 0);
3136 /* stats ramrod has it's own slot on the spq */
3138 bp->stats_pending = 1;
3143 static void bnx2x_stats_init(struct bnx2x *bp)
3145 int port = BP_PORT(bp);
3148 bp->stats_pending = 0;
3149 bp->executer_idx = 0;
3150 bp->stats_counter = 0;
3154 bp->port.port_stx = SHMEM_RD(bp, port_mb[port].port_stx);
3156 bp->port.port_stx = 0;
3157 DP(BNX2X_MSG_STATS, "port_stx 0x%x\n", bp->port.port_stx);
3159 memset(&(bp->port.old_nig_stats), 0, sizeof(struct nig_stats));
3160 bp->port.old_nig_stats.brb_discard =
3161 REG_RD(bp, NIG_REG_STAT0_BRB_DISCARD + port*0x38);
3162 bp->port.old_nig_stats.brb_truncate =
3163 REG_RD(bp, NIG_REG_STAT0_BRB_TRUNCATE + port*0x38);
3164 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT0 + port*0x50,
3165 &(bp->port.old_nig_stats.egress_mac_pkt0_lo), 2);
3166 REG_RD_DMAE(bp, NIG_REG_STAT0_EGRESS_MAC_PKT1 + port*0x50,
3167 &(bp->port.old_nig_stats.egress_mac_pkt1_lo), 2);
3169 /* function stats */
3170 for_each_queue(bp, i) {
3171 struct bnx2x_fastpath *fp = &bp->fp[i];
3173 memset(&fp->old_tclient, 0,
3174 sizeof(struct tstorm_per_client_stats));
3175 memset(&fp->old_uclient, 0,
3176 sizeof(struct ustorm_per_client_stats));
3177 memset(&fp->old_xclient, 0,
3178 sizeof(struct xstorm_per_client_stats));
3179 memset(&fp->eth_q_stats, 0, sizeof(struct bnx2x_eth_q_stats));
3182 memset(&bp->dev->stats, 0, sizeof(struct net_device_stats));
3183 memset(&bp->eth_stats, 0, sizeof(struct bnx2x_eth_stats));
3185 bp->stats_state = STATS_STATE_DISABLED;
3186 if (IS_E1HMF(bp) && bp->port.pmf && bp->port.port_stx)
3187 bnx2x_stats_handle(bp, STATS_EVENT_PMF);
3190 static void bnx2x_hw_stats_post(struct bnx2x *bp)
3192 struct dmae_command *dmae = &bp->stats_dmae;
3193 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3195 *stats_comp = DMAE_COMP_VAL;
3196 if (CHIP_REV_IS_SLOW(bp))
3200 if (bp->executer_idx) {
3201 int loader_idx = PMF_DMAE_C(bp);
3203 memset(dmae, 0, sizeof(struct dmae_command));
3205 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3206 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3207 DMAE_CMD_DST_RESET |
3209 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3211 DMAE_CMD_ENDIANITY_DW_SWAP |
3213 (BP_PORT(bp) ? DMAE_CMD_PORT_1 :
3215 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3216 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, dmae[0]));
3217 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, dmae[0]));
3218 dmae->dst_addr_lo = (DMAE_REG_CMD_MEM +
3219 sizeof(struct dmae_command) *
3220 (loader_idx + 1)) >> 2;
3221 dmae->dst_addr_hi = 0;
3222 dmae->len = sizeof(struct dmae_command) >> 2;
3225 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx + 1] >> 2;
3226 dmae->comp_addr_hi = 0;
3230 bnx2x_post_dmae(bp, dmae, loader_idx);
3232 } else if (bp->func_stx) {
3234 bnx2x_post_dmae(bp, dmae, INIT_DMAE_C(bp));
3238 static int bnx2x_stats_comp(struct bnx2x *bp)
3240 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3244 while (*stats_comp != DMAE_COMP_VAL) {
3246 BNX2X_ERR("timeout waiting for stats finished\n");
3256 * Statistics service functions
3259 static void bnx2x_stats_pmf_update(struct bnx2x *bp)
3261 struct dmae_command *dmae;
3263 int loader_idx = PMF_DMAE_C(bp);
3264 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3267 if (!IS_E1HMF(bp) || !bp->port.pmf || !bp->port.port_stx) {
3268 BNX2X_ERR("BUG!\n");
3272 bp->executer_idx = 0;
3274 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3276 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3278 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3280 DMAE_CMD_ENDIANITY_DW_SWAP |
3282 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3283 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3285 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3286 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
3287 dmae->src_addr_lo = bp->port.port_stx >> 2;
3288 dmae->src_addr_hi = 0;
3289 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3290 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3291 dmae->len = DMAE_LEN32_RD_MAX;
3292 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3293 dmae->comp_addr_hi = 0;
3296 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3297 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
3298 dmae->src_addr_lo = (bp->port.port_stx >> 2) + DMAE_LEN32_RD_MAX;
3299 dmae->src_addr_hi = 0;
3300 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats) +
3301 DMAE_LEN32_RD_MAX * 4);
3302 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats) +
3303 DMAE_LEN32_RD_MAX * 4);
3304 dmae->len = (sizeof(struct host_port_stats) >> 2) - DMAE_LEN32_RD_MAX;
3305 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3306 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3307 dmae->comp_val = DMAE_COMP_VAL;
3310 bnx2x_hw_stats_post(bp);
3311 bnx2x_stats_comp(bp);
3314 static void bnx2x_port_stats_init(struct bnx2x *bp)
3316 struct dmae_command *dmae;
3317 int port = BP_PORT(bp);
3318 int vn = BP_E1HVN(bp);
3320 int loader_idx = PMF_DMAE_C(bp);
3322 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3325 if (!bp->link_vars.link_up || !bp->port.pmf) {
3326 BNX2X_ERR("BUG!\n");
3330 bp->executer_idx = 0;
3333 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3334 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3335 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3337 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3339 DMAE_CMD_ENDIANITY_DW_SWAP |
3341 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3342 (vn << DMAE_CMD_E1HVN_SHIFT));
3344 if (bp->port.port_stx) {
3346 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3347 dmae->opcode = opcode;
3348 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
3349 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
3350 dmae->dst_addr_lo = bp->port.port_stx >> 2;
3351 dmae->dst_addr_hi = 0;
3352 dmae->len = sizeof(struct host_port_stats) >> 2;
3353 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3354 dmae->comp_addr_hi = 0;
3360 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3361 dmae->opcode = opcode;
3362 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3363 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3364 dmae->dst_addr_lo = bp->func_stx >> 2;
3365 dmae->dst_addr_hi = 0;
3366 dmae->len = sizeof(struct host_func_stats) >> 2;
3367 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3368 dmae->comp_addr_hi = 0;
3373 opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3374 DMAE_CMD_C_DST_GRC | DMAE_CMD_C_ENABLE |
3375 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3377 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3379 DMAE_CMD_ENDIANITY_DW_SWAP |
3381 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3382 (vn << DMAE_CMD_E1HVN_SHIFT));
3384 if (bp->link_vars.mac_type == MAC_TYPE_BMAC) {
3386 mac_addr = (port ? NIG_REG_INGRESS_BMAC1_MEM :
3387 NIG_REG_INGRESS_BMAC0_MEM);
3389 /* BIGMAC_REGISTER_TX_STAT_GTPKT ..
3390 BIGMAC_REGISTER_TX_STAT_GTBYT */
3391 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3392 dmae->opcode = opcode;
3393 dmae->src_addr_lo = (mac_addr +
3394 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3395 dmae->src_addr_hi = 0;
3396 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3397 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3398 dmae->len = (8 + BIGMAC_REGISTER_TX_STAT_GTBYT -
3399 BIGMAC_REGISTER_TX_STAT_GTPKT) >> 2;
3400 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3401 dmae->comp_addr_hi = 0;
3404 /* BIGMAC_REGISTER_RX_STAT_GR64 ..
3405 BIGMAC_REGISTER_RX_STAT_GRIPJ */
3406 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3407 dmae->opcode = opcode;
3408 dmae->src_addr_lo = (mac_addr +
3409 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3410 dmae->src_addr_hi = 0;
3411 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3412 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3413 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3414 offsetof(struct bmac_stats, rx_stat_gr64_lo));
3415 dmae->len = (8 + BIGMAC_REGISTER_RX_STAT_GRIPJ -
3416 BIGMAC_REGISTER_RX_STAT_GR64) >> 2;
3417 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3418 dmae->comp_addr_hi = 0;
3421 } else if (bp->link_vars.mac_type == MAC_TYPE_EMAC) {
3423 mac_addr = (port ? GRCBASE_EMAC1 : GRCBASE_EMAC0);
3425 /* EMAC_REG_EMAC_RX_STAT_AC (EMAC_REG_EMAC_RX_STAT_AC_COUNT)*/
3426 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3427 dmae->opcode = opcode;
3428 dmae->src_addr_lo = (mac_addr +
3429 EMAC_REG_EMAC_RX_STAT_AC) >> 2;
3430 dmae->src_addr_hi = 0;
3431 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats));
3432 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats));
3433 dmae->len = EMAC_REG_EMAC_RX_STAT_AC_COUNT;
3434 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3435 dmae->comp_addr_hi = 0;
3438 /* EMAC_REG_EMAC_RX_STAT_AC_28 */
3439 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3440 dmae->opcode = opcode;
3441 dmae->src_addr_lo = (mac_addr +
3442 EMAC_REG_EMAC_RX_STAT_AC_28) >> 2;
3443 dmae->src_addr_hi = 0;
3444 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3445 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3446 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3447 offsetof(struct emac_stats, rx_stat_falsecarriererrors));
3449 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3450 dmae->comp_addr_hi = 0;
3453 /* EMAC_REG_EMAC_TX_STAT_AC (EMAC_REG_EMAC_TX_STAT_AC_COUNT)*/
3454 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3455 dmae->opcode = opcode;
3456 dmae->src_addr_lo = (mac_addr +
3457 EMAC_REG_EMAC_TX_STAT_AC) >> 2;
3458 dmae->src_addr_hi = 0;
3459 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, mac_stats) +
3460 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3461 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, mac_stats) +
3462 offsetof(struct emac_stats, tx_stat_ifhcoutoctets));
3463 dmae->len = EMAC_REG_EMAC_TX_STAT_AC_COUNT;
3464 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3465 dmae->comp_addr_hi = 0;
3470 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3471 dmae->opcode = opcode;
3472 dmae->src_addr_lo = (port ? NIG_REG_STAT1_BRB_DISCARD :
3473 NIG_REG_STAT0_BRB_DISCARD) >> 2;
3474 dmae->src_addr_hi = 0;
3475 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats));
3476 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats));
3477 dmae->len = (sizeof(struct nig_stats) - 4*sizeof(u32)) >> 2;
3478 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3479 dmae->comp_addr_hi = 0;
3482 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3483 dmae->opcode = opcode;
3484 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT0 :
3485 NIG_REG_STAT0_EGRESS_MAC_PKT0) >> 2;
3486 dmae->src_addr_hi = 0;
3487 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3488 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3489 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3490 offsetof(struct nig_stats, egress_mac_pkt0_lo));
3491 dmae->len = (2*sizeof(u32)) >> 2;
3492 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
3493 dmae->comp_addr_hi = 0;
3496 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
3497 dmae->opcode = (DMAE_CMD_SRC_GRC | DMAE_CMD_DST_PCI |
3498 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3499 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3501 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3503 DMAE_CMD_ENDIANITY_DW_SWAP |
3505 (port ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3506 (vn << DMAE_CMD_E1HVN_SHIFT));
3507 dmae->src_addr_lo = (port ? NIG_REG_STAT1_EGRESS_MAC_PKT1 :
3508 NIG_REG_STAT0_EGRESS_MAC_PKT1) >> 2;
3509 dmae->src_addr_hi = 0;
3510 dmae->dst_addr_lo = U64_LO(bnx2x_sp_mapping(bp, nig_stats) +
3511 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3512 dmae->dst_addr_hi = U64_HI(bnx2x_sp_mapping(bp, nig_stats) +
3513 offsetof(struct nig_stats, egress_mac_pkt1_lo));
3514 dmae->len = (2*sizeof(u32)) >> 2;
3515 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3516 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3517 dmae->comp_val = DMAE_COMP_VAL;
3522 static void bnx2x_func_stats_init(struct bnx2x *bp)
3524 struct dmae_command *dmae = &bp->stats_dmae;
3525 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
3528 if (!bp->func_stx) {
3529 BNX2X_ERR("BUG!\n");
3533 bp->executer_idx = 0;
3534 memset(dmae, 0, sizeof(struct dmae_command));
3536 dmae->opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
3537 DMAE_CMD_C_DST_PCI | DMAE_CMD_C_ENABLE |
3538 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
3540 DMAE_CMD_ENDIANITY_B_DW_SWAP |
3542 DMAE_CMD_ENDIANITY_DW_SWAP |
3544 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
3545 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
3546 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
3547 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
3548 dmae->dst_addr_lo = bp->func_stx >> 2;
3549 dmae->dst_addr_hi = 0;
3550 dmae->len = sizeof(struct host_func_stats) >> 2;
3551 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
3552 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
3553 dmae->comp_val = DMAE_COMP_VAL;
3558 static void bnx2x_stats_start(struct bnx2x *bp)
3561 bnx2x_port_stats_init(bp);
3563 else if (bp->func_stx)
3564 bnx2x_func_stats_init(bp);
3566 bnx2x_hw_stats_post(bp);
3567 bnx2x_storm_stats_post(bp);
3570 static void bnx2x_stats_pmf_start(struct bnx2x *bp)
3572 bnx2x_stats_comp(bp);
3573 bnx2x_stats_pmf_update(bp);
3574 bnx2x_stats_start(bp);
3577 static void bnx2x_stats_restart(struct bnx2x *bp)
3579 bnx2x_stats_comp(bp);
3580 bnx2x_stats_start(bp);
3583 static void bnx2x_bmac_stats_update(struct bnx2x *bp)
3585 struct bmac_stats *new = bnx2x_sp(bp, mac_stats.bmac_stats);
3586 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3587 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3593 UPDATE_STAT64(rx_stat_grerb, rx_stat_ifhcinbadoctets);
3594 UPDATE_STAT64(rx_stat_grfcs, rx_stat_dot3statsfcserrors);
3595 UPDATE_STAT64(rx_stat_grund, rx_stat_etherstatsundersizepkts);
3596 UPDATE_STAT64(rx_stat_grovr, rx_stat_dot3statsframestoolong);
3597 UPDATE_STAT64(rx_stat_grfrg, rx_stat_etherstatsfragments);
3598 UPDATE_STAT64(rx_stat_grjbr, rx_stat_etherstatsjabbers);
3599 UPDATE_STAT64(rx_stat_grxcf, rx_stat_maccontrolframesreceived);
3600 UPDATE_STAT64(rx_stat_grxpf, rx_stat_xoffstateentered);
3601 UPDATE_STAT64(rx_stat_grxpf, rx_stat_bmac_xpf);
3602 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_outxoffsent);
3603 UPDATE_STAT64(tx_stat_gtxpf, tx_stat_flowcontroldone);
3604 UPDATE_STAT64(tx_stat_gt64, tx_stat_etherstatspkts64octets);
3605 UPDATE_STAT64(tx_stat_gt127,
3606 tx_stat_etherstatspkts65octetsto127octets);
3607 UPDATE_STAT64(tx_stat_gt255,
3608 tx_stat_etherstatspkts128octetsto255octets);
3609 UPDATE_STAT64(tx_stat_gt511,
3610 tx_stat_etherstatspkts256octetsto511octets);
3611 UPDATE_STAT64(tx_stat_gt1023,
3612 tx_stat_etherstatspkts512octetsto1023octets);
3613 UPDATE_STAT64(tx_stat_gt1518,
3614 tx_stat_etherstatspkts1024octetsto1522octets);
3615 UPDATE_STAT64(tx_stat_gt2047, tx_stat_bmac_2047);
3616 UPDATE_STAT64(tx_stat_gt4095, tx_stat_bmac_4095);
3617 UPDATE_STAT64(tx_stat_gt9216, tx_stat_bmac_9216);
3618 UPDATE_STAT64(tx_stat_gt16383, tx_stat_bmac_16383);
3619 UPDATE_STAT64(tx_stat_gterr,
3620 tx_stat_dot3statsinternalmactransmiterrors);
3621 UPDATE_STAT64(tx_stat_gtufl, tx_stat_bmac_ufl);
3623 estats->pause_frames_received_hi =
3624 pstats->mac_stx[1].rx_stat_bmac_xpf_hi;
3625 estats->pause_frames_received_lo =
3626 pstats->mac_stx[1].rx_stat_bmac_xpf_lo;
3628 estats->pause_frames_sent_hi =
3629 pstats->mac_stx[1].tx_stat_outxoffsent_hi;
3630 estats->pause_frames_sent_lo =
3631 pstats->mac_stx[1].tx_stat_outxoffsent_lo;
3634 static void bnx2x_emac_stats_update(struct bnx2x *bp)
3636 struct emac_stats *new = bnx2x_sp(bp, mac_stats.emac_stats);
3637 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3638 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3640 UPDATE_EXTEND_STAT(rx_stat_ifhcinbadoctets);
3641 UPDATE_EXTEND_STAT(tx_stat_ifhcoutbadoctets);
3642 UPDATE_EXTEND_STAT(rx_stat_dot3statsfcserrors);
3643 UPDATE_EXTEND_STAT(rx_stat_dot3statsalignmenterrors);
3644 UPDATE_EXTEND_STAT(rx_stat_dot3statscarriersenseerrors);
3645 UPDATE_EXTEND_STAT(rx_stat_falsecarriererrors);
3646 UPDATE_EXTEND_STAT(rx_stat_etherstatsundersizepkts);
3647 UPDATE_EXTEND_STAT(rx_stat_dot3statsframestoolong);
3648 UPDATE_EXTEND_STAT(rx_stat_etherstatsfragments);
3649 UPDATE_EXTEND_STAT(rx_stat_etherstatsjabbers);
3650 UPDATE_EXTEND_STAT(rx_stat_maccontrolframesreceived);
3651 UPDATE_EXTEND_STAT(rx_stat_xoffstateentered);
3652 UPDATE_EXTEND_STAT(rx_stat_xonpauseframesreceived);
3653 UPDATE_EXTEND_STAT(rx_stat_xoffpauseframesreceived);
3654 UPDATE_EXTEND_STAT(tx_stat_outxonsent);
3655 UPDATE_EXTEND_STAT(tx_stat_outxoffsent);
3656 UPDATE_EXTEND_STAT(tx_stat_flowcontroldone);
3657 UPDATE_EXTEND_STAT(tx_stat_etherstatscollisions);
3658 UPDATE_EXTEND_STAT(tx_stat_dot3statssinglecollisionframes);
3659 UPDATE_EXTEND_STAT(tx_stat_dot3statsmultiplecollisionframes);
3660 UPDATE_EXTEND_STAT(tx_stat_dot3statsdeferredtransmissions);
3661 UPDATE_EXTEND_STAT(tx_stat_dot3statsexcessivecollisions);
3662 UPDATE_EXTEND_STAT(tx_stat_dot3statslatecollisions);
3663 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts64octets);
3664 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts65octetsto127octets);
3665 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts128octetsto255octets);
3666 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts256octetsto511octets);
3667 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts512octetsto1023octets);
3668 UPDATE_EXTEND_STAT(tx_stat_etherstatspkts1024octetsto1522octets);
3669 UPDATE_EXTEND_STAT(tx_stat_etherstatspktsover1522octets);
3670 UPDATE_EXTEND_STAT(tx_stat_dot3statsinternalmactransmiterrors);
3672 estats->pause_frames_received_hi =
3673 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_hi;
3674 estats->pause_frames_received_lo =
3675 pstats->mac_stx[1].rx_stat_xonpauseframesreceived_lo;
3676 ADD_64(estats->pause_frames_received_hi,
3677 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_hi,
3678 estats->pause_frames_received_lo,
3679 pstats->mac_stx[1].rx_stat_xoffpauseframesreceived_lo);
3681 estats->pause_frames_sent_hi =
3682 pstats->mac_stx[1].tx_stat_outxonsent_hi;
3683 estats->pause_frames_sent_lo =
3684 pstats->mac_stx[1].tx_stat_outxonsent_lo;
3685 ADD_64(estats->pause_frames_sent_hi,
3686 pstats->mac_stx[1].tx_stat_outxoffsent_hi,
3687 estats->pause_frames_sent_lo,
3688 pstats->mac_stx[1].tx_stat_outxoffsent_lo);
3691 static int bnx2x_hw_stats_update(struct bnx2x *bp)
3693 struct nig_stats *new = bnx2x_sp(bp, nig_stats);
3694 struct nig_stats *old = &(bp->port.old_nig_stats);
3695 struct host_port_stats *pstats = bnx2x_sp(bp, port_stats);
3696 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3703 if (bp->link_vars.mac_type == MAC_TYPE_BMAC)
3704 bnx2x_bmac_stats_update(bp);
3706 else if (bp->link_vars.mac_type == MAC_TYPE_EMAC)
3707 bnx2x_emac_stats_update(bp);
3709 else { /* unreached */
3710 BNX2X_ERR("stats updated by DMAE but no MAC active\n");
3714 ADD_EXTEND_64(pstats->brb_drop_hi, pstats->brb_drop_lo,
3715 new->brb_discard - old->brb_discard);
3716 ADD_EXTEND_64(estats->brb_truncate_hi, estats->brb_truncate_lo,
3717 new->brb_truncate - old->brb_truncate);
3719 UPDATE_STAT64_NIG(egress_mac_pkt0,
3720 etherstatspkts1024octetsto1522octets);
3721 UPDATE_STAT64_NIG(egress_mac_pkt1, etherstatspktsover1522octets);
3723 memcpy(old, new, sizeof(struct nig_stats));
3725 memcpy(&(estats->rx_stat_ifhcinbadoctets_hi), &(pstats->mac_stx[1]),
3726 sizeof(struct mac_stx));
3727 estats->brb_drop_hi = pstats->brb_drop_hi;
3728 estats->brb_drop_lo = pstats->brb_drop_lo;
3730 pstats->host_port_stats_start = ++pstats->host_port_stats_end;
3732 nig_timer_max = SHMEM_RD(bp, port_mb[BP_PORT(bp)].stat_nig_timer);
3733 if (nig_timer_max != estats->nig_timer_max) {
3734 estats->nig_timer_max = nig_timer_max;
3735 BNX2X_ERR("NIG timer max (%u)\n", estats->nig_timer_max);
3741 static int bnx2x_storm_stats_update(struct bnx2x *bp)
3743 struct eth_stats_query *stats = bnx2x_sp(bp, fw_stats);
3744 struct tstorm_per_port_stats *tport =
3745 &stats->tstorm_common.port_statistics;
3746 struct host_func_stats *fstats = bnx2x_sp(bp, func_stats);
3747 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3750 memset(&(fstats->total_bytes_received_hi), 0,
3751 sizeof(struct host_func_stats) - 2*sizeof(u32));
3752 estats->error_bytes_received_hi = 0;
3753 estats->error_bytes_received_lo = 0;
3754 estats->etherstatsoverrsizepkts_hi = 0;
3755 estats->etherstatsoverrsizepkts_lo = 0;
3756 estats->no_buff_discard_hi = 0;
3757 estats->no_buff_discard_lo = 0;
3759 for_each_queue(bp, i) {
3760 struct bnx2x_fastpath *fp = &bp->fp[i];
3761 int cl_id = fp->cl_id;
3762 struct tstorm_per_client_stats *tclient =
3763 &stats->tstorm_common.client_statistics[cl_id];
3764 struct tstorm_per_client_stats *old_tclient = &fp->old_tclient;
3765 struct ustorm_per_client_stats *uclient =
3766 &stats->ustorm_common.client_statistics[cl_id];
3767 struct ustorm_per_client_stats *old_uclient = &fp->old_uclient;
3768 struct xstorm_per_client_stats *xclient =
3769 &stats->xstorm_common.client_statistics[cl_id];
3770 struct xstorm_per_client_stats *old_xclient = &fp->old_xclient;
3771 struct bnx2x_eth_q_stats *qstats = &fp->eth_q_stats;
3774 /* are storm stats valid? */
3775 if ((u16)(le16_to_cpu(xclient->stats_counter) + 1) !=
3776 bp->stats_counter) {
3777 DP(BNX2X_MSG_STATS, "[%d] stats not updated by xstorm"
3778 " xstorm counter (%d) != stats_counter (%d)\n",
3779 i, xclient->stats_counter, bp->stats_counter);
3782 if ((u16)(le16_to_cpu(tclient->stats_counter) + 1) !=
3783 bp->stats_counter) {
3784 DP(BNX2X_MSG_STATS, "[%d] stats not updated by tstorm"
3785 " tstorm counter (%d) != stats_counter (%d)\n",
3786 i, tclient->stats_counter, bp->stats_counter);
3789 if ((u16)(le16_to_cpu(uclient->stats_counter) + 1) !=
3790 bp->stats_counter) {
3791 DP(BNX2X_MSG_STATS, "[%d] stats not updated by ustorm"
3792 " ustorm counter (%d) != stats_counter (%d)\n",
3793 i, uclient->stats_counter, bp->stats_counter);
3797 qstats->total_bytes_received_hi =
3798 qstats->valid_bytes_received_hi =
3799 le32_to_cpu(tclient->total_rcv_bytes.hi);
3800 qstats->total_bytes_received_lo =
3801 qstats->valid_bytes_received_lo =
3802 le32_to_cpu(tclient->total_rcv_bytes.lo);
3804 qstats->error_bytes_received_hi =
3805 le32_to_cpu(tclient->rcv_error_bytes.hi);
3806 qstats->error_bytes_received_lo =
3807 le32_to_cpu(tclient->rcv_error_bytes.lo);
3809 ADD_64(qstats->total_bytes_received_hi,
3810 qstats->error_bytes_received_hi,
3811 qstats->total_bytes_received_lo,
3812 qstats->error_bytes_received_lo);
3814 UPDATE_EXTEND_TSTAT(rcv_unicast_pkts,
3815 total_unicast_packets_received);
3816 UPDATE_EXTEND_TSTAT(rcv_multicast_pkts,
3817 total_multicast_packets_received);
3818 UPDATE_EXTEND_TSTAT(rcv_broadcast_pkts,
3819 total_broadcast_packets_received);
3820 UPDATE_EXTEND_TSTAT(packets_too_big_discard,
3821 etherstatsoverrsizepkts);
3822 UPDATE_EXTEND_TSTAT(no_buff_discard, no_buff_discard);
3824 SUB_EXTEND_USTAT(ucast_no_buff_pkts,
3825 total_unicast_packets_received);
3826 SUB_EXTEND_USTAT(mcast_no_buff_pkts,
3827 total_multicast_packets_received);
3828 SUB_EXTEND_USTAT(bcast_no_buff_pkts,
3829 total_broadcast_packets_received);
3830 UPDATE_EXTEND_USTAT(ucast_no_buff_pkts, no_buff_discard);
3831 UPDATE_EXTEND_USTAT(mcast_no_buff_pkts, no_buff_discard);
3832 UPDATE_EXTEND_USTAT(bcast_no_buff_pkts, no_buff_discard);
3834 qstats->total_bytes_transmitted_hi =
3835 le32_to_cpu(xclient->total_sent_bytes.hi);
3836 qstats->total_bytes_transmitted_lo =
3837 le32_to_cpu(xclient->total_sent_bytes.lo);
3839 UPDATE_EXTEND_XSTAT(unicast_pkts_sent,
3840 total_unicast_packets_transmitted);
3841 UPDATE_EXTEND_XSTAT(multicast_pkts_sent,
3842 total_multicast_packets_transmitted);
3843 UPDATE_EXTEND_XSTAT(broadcast_pkts_sent,
3844 total_broadcast_packets_transmitted);
3846 old_tclient->checksum_discard = tclient->checksum_discard;
3847 old_tclient->ttl0_discard = tclient->ttl0_discard;
3849 ADD_64(fstats->total_bytes_received_hi,
3850 qstats->total_bytes_received_hi,
3851 fstats->total_bytes_received_lo,
3852 qstats->total_bytes_received_lo);
3853 ADD_64(fstats->total_bytes_transmitted_hi,
3854 qstats->total_bytes_transmitted_hi,
3855 fstats->total_bytes_transmitted_lo,
3856 qstats->total_bytes_transmitted_lo);
3857 ADD_64(fstats->total_unicast_packets_received_hi,
3858 qstats->total_unicast_packets_received_hi,
3859 fstats->total_unicast_packets_received_lo,
3860 qstats->total_unicast_packets_received_lo);
3861 ADD_64(fstats->total_multicast_packets_received_hi,
3862 qstats->total_multicast_packets_received_hi,
3863 fstats->total_multicast_packets_received_lo,
3864 qstats->total_multicast_packets_received_lo);
3865 ADD_64(fstats->total_broadcast_packets_received_hi,
3866 qstats->total_broadcast_packets_received_hi,
3867 fstats->total_broadcast_packets_received_lo,
3868 qstats->total_broadcast_packets_received_lo);
3869 ADD_64(fstats->total_unicast_packets_transmitted_hi,
3870 qstats->total_unicast_packets_transmitted_hi,
3871 fstats->total_unicast_packets_transmitted_lo,
3872 qstats->total_unicast_packets_transmitted_lo);
3873 ADD_64(fstats->total_multicast_packets_transmitted_hi,
3874 qstats->total_multicast_packets_transmitted_hi,
3875 fstats->total_multicast_packets_transmitted_lo,
3876 qstats->total_multicast_packets_transmitted_lo);
3877 ADD_64(fstats->total_broadcast_packets_transmitted_hi,
3878 qstats->total_broadcast_packets_transmitted_hi,
3879 fstats->total_broadcast_packets_transmitted_lo,
3880 qstats->total_broadcast_packets_transmitted_lo);
3881 ADD_64(fstats->valid_bytes_received_hi,
3882 qstats->valid_bytes_received_hi,
3883 fstats->valid_bytes_received_lo,
3884 qstats->valid_bytes_received_lo);
3886 ADD_64(estats->error_bytes_received_hi,
3887 qstats->error_bytes_received_hi,
3888 estats->error_bytes_received_lo,
3889 qstats->error_bytes_received_lo);
3890 ADD_64(estats->etherstatsoverrsizepkts_hi,
3891 qstats->etherstatsoverrsizepkts_hi,
3892 estats->etherstatsoverrsizepkts_lo,
3893 qstats->etherstatsoverrsizepkts_lo);
3894 ADD_64(estats->no_buff_discard_hi, qstats->no_buff_discard_hi,
3895 estats->no_buff_discard_lo, qstats->no_buff_discard_lo);
3898 ADD_64(fstats->total_bytes_received_hi,
3899 estats->rx_stat_ifhcinbadoctets_hi,
3900 fstats->total_bytes_received_lo,
3901 estats->rx_stat_ifhcinbadoctets_lo);
3903 memcpy(estats, &(fstats->total_bytes_received_hi),
3904 sizeof(struct host_func_stats) - 2*sizeof(u32));
3906 ADD_64(estats->etherstatsoverrsizepkts_hi,
3907 estats->rx_stat_dot3statsframestoolong_hi,
3908 estats->etherstatsoverrsizepkts_lo,
3909 estats->rx_stat_dot3statsframestoolong_lo);
3910 ADD_64(estats->error_bytes_received_hi,
3911 estats->rx_stat_ifhcinbadoctets_hi,
3912 estats->error_bytes_received_lo,
3913 estats->rx_stat_ifhcinbadoctets_lo);
3916 estats->mac_filter_discard =
3917 le32_to_cpu(tport->mac_filter_discard);
3918 estats->xxoverflow_discard =
3919 le32_to_cpu(tport->xxoverflow_discard);
3920 estats->brb_truncate_discard =
3921 le32_to_cpu(tport->brb_truncate_discard);
3922 estats->mac_discard = le32_to_cpu(tport->mac_discard);
3925 fstats->host_func_stats_start = ++fstats->host_func_stats_end;
3927 bp->stats_pending = 0;
3932 static void bnx2x_net_stats_update(struct bnx2x *bp)
3934 struct bnx2x_eth_stats *estats = &bp->eth_stats;
3935 struct net_device_stats *nstats = &bp->dev->stats;
3938 nstats->rx_packets =
3939 bnx2x_hilo(&estats->total_unicast_packets_received_hi) +
3940 bnx2x_hilo(&estats->total_multicast_packets_received_hi) +
3941 bnx2x_hilo(&estats->total_broadcast_packets_received_hi);
3943 nstats->tx_packets =
3944 bnx2x_hilo(&estats->total_unicast_packets_transmitted_hi) +
3945 bnx2x_hilo(&estats->total_multicast_packets_transmitted_hi) +
3946 bnx2x_hilo(&estats->total_broadcast_packets_transmitted_hi);
3948 nstats->rx_bytes = bnx2x_hilo(&estats->total_bytes_received_hi);
3950 nstats->tx_bytes = bnx2x_hilo(&estats->total_bytes_transmitted_hi);
3952 nstats->rx_dropped = estats->mac_discard;
3953 for_each_queue(bp, i)
3954 nstats->rx_dropped +=
3955 le32_to_cpu(bp->fp[i].old_tclient.checksum_discard);
3957 nstats->tx_dropped = 0;
3960 bnx2x_hilo(&estats->total_multicast_packets_received_hi);
3962 nstats->collisions =
3963 bnx2x_hilo(&estats->tx_stat_etherstatscollisions_hi);
3965 nstats->rx_length_errors =
3966 bnx2x_hilo(&estats->rx_stat_etherstatsundersizepkts_hi) +
3967 bnx2x_hilo(&estats->etherstatsoverrsizepkts_hi);
3968 nstats->rx_over_errors = bnx2x_hilo(&estats->brb_drop_hi) +
3969 bnx2x_hilo(&estats->brb_truncate_hi);
3970 nstats->rx_crc_errors =
3971 bnx2x_hilo(&estats->rx_stat_dot3statsfcserrors_hi);
3972 nstats->rx_frame_errors =
3973 bnx2x_hilo(&estats->rx_stat_dot3statsalignmenterrors_hi);
3974 nstats->rx_fifo_errors = bnx2x_hilo(&estats->no_buff_discard_hi);
3975 nstats->rx_missed_errors = estats->xxoverflow_discard;
3977 nstats->rx_errors = nstats->rx_length_errors +
3978 nstats->rx_over_errors +
3979 nstats->rx_crc_errors +
3980 nstats->rx_frame_errors +
3981 nstats->rx_fifo_errors +
3982 nstats->rx_missed_errors;
3984 nstats->tx_aborted_errors =
3985 bnx2x_hilo(&estats->tx_stat_dot3statslatecollisions_hi) +
3986 bnx2x_hilo(&estats->tx_stat_dot3statsexcessivecollisions_hi);
3987 nstats->tx_carrier_errors =
3988 bnx2x_hilo(&estats->rx_stat_dot3statscarriersenseerrors_hi);
3989 nstats->tx_fifo_errors = 0;
3990 nstats->tx_heartbeat_errors = 0;
3991 nstats->tx_window_errors = 0;
3993 nstats->tx_errors = nstats->tx_aborted_errors +
3994 nstats->tx_carrier_errors +
3995 bnx2x_hilo(&estats->tx_stat_dot3statsinternalmactransmiterrors_hi);
3998 static void bnx2x_drv_stats_update(struct bnx2x *bp)
4000 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4003 estats->driver_xoff = 0;
4004 estats->rx_err_discard_pkt = 0;
4005 estats->rx_skb_alloc_failed = 0;
4006 estats->hw_csum_err = 0;
4007 for_each_queue(bp, i) {
4008 struct bnx2x_eth_q_stats *qstats = &bp->fp[i].eth_q_stats;
4010 estats->driver_xoff += qstats->driver_xoff;
4011 estats->rx_err_discard_pkt += qstats->rx_err_discard_pkt;
4012 estats->rx_skb_alloc_failed += qstats->rx_skb_alloc_failed;
4013 estats->hw_csum_err += qstats->hw_csum_err;
4017 static void bnx2x_stats_update(struct bnx2x *bp)
4019 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4021 if (*stats_comp != DMAE_COMP_VAL)
4025 bnx2x_hw_stats_update(bp);
4027 if (bnx2x_storm_stats_update(bp) && (bp->stats_pending++ == 3)) {
4028 BNX2X_ERR("storm stats were not updated for 3 times\n");
4033 bnx2x_net_stats_update(bp);
4034 bnx2x_drv_stats_update(bp);
4036 if (bp->msglevel & NETIF_MSG_TIMER) {
4037 struct tstorm_per_client_stats *old_tclient =
4038 &bp->fp->old_tclient;
4039 struct bnx2x_eth_q_stats *qstats = &bp->fp->eth_q_stats;
4040 struct bnx2x_eth_stats *estats = &bp->eth_stats;
4041 struct net_device_stats *nstats = &bp->dev->stats;
4044 printk(KERN_DEBUG "%s:\n", bp->dev->name);
4045 printk(KERN_DEBUG " tx avail (%4x) tx hc idx (%x)"
4047 bnx2x_tx_avail(bp->fp),
4048 le16_to_cpu(*bp->fp->tx_cons_sb), nstats->tx_packets);
4049 printk(KERN_DEBUG " rx usage (%4x) rx hc idx (%x)"
4051 (u16)(le16_to_cpu(*bp->fp->rx_cons_sb) -
4052 bp->fp->rx_comp_cons),
4053 le16_to_cpu(*bp->fp->rx_cons_sb), nstats->rx_packets);
4054 printk(KERN_DEBUG " %s (Xoff events %u) brb drops %u "
4055 "brb truncate %u\n",
4056 (netif_queue_stopped(bp->dev) ? "Xoff" : "Xon"),
4057 qstats->driver_xoff,
4058 estats->brb_drop_lo, estats->brb_truncate_lo);
4059 printk(KERN_DEBUG "tstats: checksum_discard %u "
4060 "packets_too_big_discard %lu no_buff_discard %lu "
4061 "mac_discard %u mac_filter_discard %u "
4062 "xxovrflow_discard %u brb_truncate_discard %u "
4063 "ttl0_discard %u\n",
4064 le32_to_cpu(old_tclient->checksum_discard),
4065 bnx2x_hilo(&qstats->etherstatsoverrsizepkts_hi),
4066 bnx2x_hilo(&qstats->no_buff_discard_hi),
4067 estats->mac_discard, estats->mac_filter_discard,
4068 estats->xxoverflow_discard, estats->brb_truncate_discard,
4069 le32_to_cpu(old_tclient->ttl0_discard));
4071 for_each_queue(bp, i) {
4072 printk(KERN_DEBUG "[%d]: %lu\t%lu\t%lu\n", i,
4073 bnx2x_fp(bp, i, tx_pkt),
4074 bnx2x_fp(bp, i, rx_pkt),
4075 bnx2x_fp(bp, i, rx_calls));
4079 bnx2x_hw_stats_post(bp);
4080 bnx2x_storm_stats_post(bp);
4083 static void bnx2x_port_stats_stop(struct bnx2x *bp)
4085 struct dmae_command *dmae;
4087 int loader_idx = PMF_DMAE_C(bp);
4088 u32 *stats_comp = bnx2x_sp(bp, stats_comp);
4090 bp->executer_idx = 0;
4092 opcode = (DMAE_CMD_SRC_PCI | DMAE_CMD_DST_GRC |
4094 DMAE_CMD_SRC_RESET | DMAE_CMD_DST_RESET |
4096 DMAE_CMD_ENDIANITY_B_DW_SWAP |
4098 DMAE_CMD_ENDIANITY_DW_SWAP |
4100 (BP_PORT(bp) ? DMAE_CMD_PORT_1 : DMAE_CMD_PORT_0) |
4101 (BP_E1HVN(bp) << DMAE_CMD_E1HVN_SHIFT));
4103 if (bp->port.port_stx) {
4105 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4107 dmae->opcode = (opcode | DMAE_CMD_C_DST_GRC);
4109 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4110 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, port_stats));
4111 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, port_stats));
4112 dmae->dst_addr_lo = bp->port.port_stx >> 2;
4113 dmae->dst_addr_hi = 0;
4114 dmae->len = sizeof(struct host_port_stats) >> 2;
4116 dmae->comp_addr_lo = dmae_reg_go_c[loader_idx] >> 2;
4117 dmae->comp_addr_hi = 0;
4120 dmae->comp_addr_lo =
4121 U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4122 dmae->comp_addr_hi =
4123 U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4124 dmae->comp_val = DMAE_COMP_VAL;
4132 dmae = bnx2x_sp(bp, dmae[bp->executer_idx++]);
4133 dmae->opcode = (opcode | DMAE_CMD_C_DST_PCI);
4134 dmae->src_addr_lo = U64_LO(bnx2x_sp_mapping(bp, func_stats));
4135 dmae->src_addr_hi = U64_HI(bnx2x_sp_mapping(bp, func_stats));
4136 dmae->dst_addr_lo = bp->func_stx >> 2;
4137 dmae->dst_addr_hi = 0;
4138 dmae->len = sizeof(struct host_func_stats) >> 2;
4139 dmae->comp_addr_lo = U64_LO(bnx2x_sp_mapping(bp, stats_comp));
4140 dmae->comp_addr_hi = U64_HI(bnx2x_sp_mapping(bp, stats_comp));
4141 dmae->comp_val = DMAE_COMP_VAL;
4147 static void bnx2x_stats_stop(struct bnx2x *bp)
4151 bnx2x_stats_comp(bp);
4154 update = (bnx2x_hw_stats_update(bp) == 0);
4156 update |= (bnx2x_storm_stats_update(bp) == 0);
4159 bnx2x_net_stats_update(bp);
4162 bnx2x_port_stats_stop(bp);
4164 bnx2x_hw_stats_post(bp);
4165 bnx2x_stats_comp(bp);
4169 static void bnx2x_stats_do_nothing(struct bnx2x *bp)
4173 static const struct {
4174 void (*action)(struct bnx2x *bp);
4175 enum bnx2x_stats_state next_state;
4176 } bnx2x_stats_stm[STATS_STATE_MAX][STATS_EVENT_MAX] = {
4179 /* DISABLED PMF */ {bnx2x_stats_pmf_update, STATS_STATE_DISABLED},
4180 /* LINK_UP */ {bnx2x_stats_start, STATS_STATE_ENABLED},
4181 /* UPDATE */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED},
4182 /* STOP */ {bnx2x_stats_do_nothing, STATS_STATE_DISABLED}
4185 /* ENABLED PMF */ {bnx2x_stats_pmf_start, STATS_STATE_ENABLED},
4186 /* LINK_UP */ {bnx2x_stats_restart, STATS_STATE_ENABLED},
4187 /* UPDATE */ {bnx2x_stats_update, STATS_STATE_ENABLED},
4188 /* STOP */ {bnx2x_stats_stop, STATS_STATE_DISABLED}
4192 static void bnx2x_stats_handle(struct bnx2x *bp, enum bnx2x_stats_event event)
4194 enum bnx2x_stats_state state = bp->stats_state;
4196 bnx2x_stats_stm[state][event].action(bp);
4197 bp->stats_state = bnx2x_stats_stm[state][event].next_state;
4199 if ((event != STATS_EVENT_UPDATE) || (bp->msglevel & NETIF_MSG_TIMER))
4200 DP(BNX2X_MSG_STATS, "state %d -> event %d -> state %d\n",
4201 state, event, bp->stats_state);
4204 static void bnx2x_timer(unsigned long data)
4206 struct bnx2x *bp = (struct bnx2x *) data;
4208 if (!netif_running(bp->dev))
4211 if (atomic_read(&bp->intr_sem) != 0)
4215 struct bnx2x_fastpath *fp = &bp->fp[0];
4219 rc = bnx2x_rx_int(fp, 1000);
4222 if (!BP_NOMCP(bp)) {
4223 int func = BP_FUNC(bp);
4227 ++bp->fw_drv_pulse_wr_seq;
4228 bp->fw_drv_pulse_wr_seq &= DRV_PULSE_SEQ_MASK;
4229 /* TBD - add SYSTEM_TIME */
4230 drv_pulse = bp->fw_drv_pulse_wr_seq;
4231 SHMEM_WR(bp, func_mb[func].drv_pulse_mb, drv_pulse);
4233 mcp_pulse = (SHMEM_RD(bp, func_mb[func].mcp_pulse_mb) &
4234 MCP_PULSE_SEQ_MASK);
4235 /* The delta between driver pulse and mcp response
4236 * should be 1 (before mcp response) or 0 (after mcp response)
4238 if ((drv_pulse != mcp_pulse) &&
4239 (drv_pulse != ((mcp_pulse + 1) & MCP_PULSE_SEQ_MASK))) {
4240 /* someone lost a heartbeat... */
4241 BNX2X_ERR("drv_pulse (0x%x) != mcp_pulse (0x%x)\n",
4242 drv_pulse, mcp_pulse);
4246 if ((bp->state == BNX2X_STATE_OPEN) ||
4247 (bp->state == BNX2X_STATE_DISABLED))
4248 bnx2x_stats_handle(bp, STATS_EVENT_UPDATE);
4251 mod_timer(&bp->timer, jiffies + bp->current_interval);
4254 /* end of Statistics */
4259 * nic init service functions
4262 static void bnx2x_zero_sb(struct bnx2x *bp, int sb_id)
4264 int port = BP_PORT(bp);
4266 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4267 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4268 sizeof(struct ustorm_status_block)/4);
4269 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4270 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), 0,
4271 sizeof(struct cstorm_status_block)/4);
4274 static void bnx2x_init_sb(struct bnx2x *bp, struct host_status_block *sb,
4275 dma_addr_t mapping, int sb_id)
4277 int port = BP_PORT(bp);
4278 int func = BP_FUNC(bp);
4283 section = ((u64)mapping) + offsetof(struct host_status_block,
4285 sb->u_status_block.status_block_id = sb_id;
4287 REG_WR(bp, BAR_USTRORM_INTMEM +
4288 USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4289 REG_WR(bp, BAR_USTRORM_INTMEM +
4290 ((USTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4292 REG_WR8(bp, BAR_USTRORM_INTMEM + FP_USB_FUNC_OFF +
4293 USTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4295 for (index = 0; index < HC_USTORM_SB_NUM_INDICES; index++)
4296 REG_WR16(bp, BAR_USTRORM_INTMEM +
4297 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4300 section = ((u64)mapping) + offsetof(struct host_status_block,
4302 sb->c_status_block.status_block_id = sb_id;
4304 REG_WR(bp, BAR_CSTRORM_INTMEM +
4305 CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id), U64_LO(section));
4306 REG_WR(bp, BAR_CSTRORM_INTMEM +
4307 ((CSTORM_SB_HOST_SB_ADDR_OFFSET(port, sb_id)) + 4),
4309 REG_WR8(bp, BAR_CSTRORM_INTMEM + FP_CSB_FUNC_OFF +
4310 CSTORM_SB_HOST_STATUS_BLOCK_OFFSET(port, sb_id), func);
4312 for (index = 0; index < HC_CSTORM_SB_NUM_INDICES; index++)
4313 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4314 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id, index), 1);
4316 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4319 static void bnx2x_zero_def_sb(struct bnx2x *bp)
4321 int func = BP_FUNC(bp);
4323 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR +
4324 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4325 sizeof(struct tstorm_def_status_block)/4);
4326 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR +
4327 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4328 sizeof(struct ustorm_def_status_block)/4);
4329 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR +
4330 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4331 sizeof(struct cstorm_def_status_block)/4);
4332 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR +
4333 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), 0,
4334 sizeof(struct xstorm_def_status_block)/4);
4337 static void bnx2x_init_def_sb(struct bnx2x *bp,
4338 struct host_def_status_block *def_sb,
4339 dma_addr_t mapping, int sb_id)
4341 int port = BP_PORT(bp);
4342 int func = BP_FUNC(bp);
4343 int index, val, reg_offset;
4347 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4348 atten_status_block);
4349 def_sb->atten_status_block.status_block_id = sb_id;
4353 reg_offset = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
4354 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
4356 for (index = 0; index < MAX_DYNAMIC_ATTN_GRPS; index++) {
4357 bp->attn_group[index].sig[0] = REG_RD(bp,
4358 reg_offset + 0x10*index);
4359 bp->attn_group[index].sig[1] = REG_RD(bp,
4360 reg_offset + 0x4 + 0x10*index);
4361 bp->attn_group[index].sig[2] = REG_RD(bp,
4362 reg_offset + 0x8 + 0x10*index);
4363 bp->attn_group[index].sig[3] = REG_RD(bp,
4364 reg_offset + 0xc + 0x10*index);
4367 reg_offset = (port ? HC_REG_ATTN_MSG1_ADDR_L :
4368 HC_REG_ATTN_MSG0_ADDR_L);
4370 REG_WR(bp, reg_offset, U64_LO(section));
4371 REG_WR(bp, reg_offset + 4, U64_HI(section));
4373 reg_offset = (port ? HC_REG_ATTN_NUM_P1 : HC_REG_ATTN_NUM_P0);
4375 val = REG_RD(bp, reg_offset);
4377 REG_WR(bp, reg_offset, val);
4380 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4381 u_def_status_block);
4382 def_sb->u_def_status_block.status_block_id = sb_id;
4384 REG_WR(bp, BAR_USTRORM_INTMEM +
4385 USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4386 REG_WR(bp, BAR_USTRORM_INTMEM +
4387 ((USTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4389 REG_WR8(bp, BAR_USTRORM_INTMEM + DEF_USB_FUNC_OFF +
4390 USTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4392 for (index = 0; index < HC_USTORM_DEF_SB_NUM_INDICES; index++)
4393 REG_WR16(bp, BAR_USTRORM_INTMEM +
4394 USTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4397 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4398 c_def_status_block);
4399 def_sb->c_def_status_block.status_block_id = sb_id;
4401 REG_WR(bp, BAR_CSTRORM_INTMEM +
4402 CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4403 REG_WR(bp, BAR_CSTRORM_INTMEM +
4404 ((CSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4406 REG_WR8(bp, BAR_CSTRORM_INTMEM + DEF_CSB_FUNC_OFF +
4407 CSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4409 for (index = 0; index < HC_CSTORM_DEF_SB_NUM_INDICES; index++)
4410 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4411 CSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4414 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4415 t_def_status_block);
4416 def_sb->t_def_status_block.status_block_id = sb_id;
4418 REG_WR(bp, BAR_TSTRORM_INTMEM +
4419 TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4420 REG_WR(bp, BAR_TSTRORM_INTMEM +
4421 ((TSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4423 REG_WR8(bp, BAR_TSTRORM_INTMEM + DEF_TSB_FUNC_OFF +
4424 TSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4426 for (index = 0; index < HC_TSTORM_DEF_SB_NUM_INDICES; index++)
4427 REG_WR16(bp, BAR_TSTRORM_INTMEM +
4428 TSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4431 section = ((u64)mapping) + offsetof(struct host_def_status_block,
4432 x_def_status_block);
4433 def_sb->x_def_status_block.status_block_id = sb_id;
4435 REG_WR(bp, BAR_XSTRORM_INTMEM +
4436 XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func), U64_LO(section));
4437 REG_WR(bp, BAR_XSTRORM_INTMEM +
4438 ((XSTORM_DEF_SB_HOST_SB_ADDR_OFFSET(func)) + 4),
4440 REG_WR8(bp, BAR_XSTRORM_INTMEM + DEF_XSB_FUNC_OFF +
4441 XSTORM_DEF_SB_HOST_STATUS_BLOCK_OFFSET(func), func);
4443 for (index = 0; index < HC_XSTORM_DEF_SB_NUM_INDICES; index++)
4444 REG_WR16(bp, BAR_XSTRORM_INTMEM +
4445 XSTORM_DEF_SB_HC_DISABLE_OFFSET(func, index), 1);
4447 bp->stats_pending = 0;
4448 bp->set_mac_pending = 0;
4450 bnx2x_ack_sb(bp, sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
4453 static void bnx2x_update_coalesce(struct bnx2x *bp)
4455 int port = BP_PORT(bp);
4458 for_each_queue(bp, i) {
4459 int sb_id = bp->fp[i].sb_id;
4461 /* HC_INDEX_U_ETH_RX_CQ_CONS */
4462 REG_WR8(bp, BAR_USTRORM_INTMEM +
4463 USTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4464 U_SB_ETH_RX_CQ_INDEX),
4466 REG_WR16(bp, BAR_USTRORM_INTMEM +
4467 USTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4468 U_SB_ETH_RX_CQ_INDEX),
4469 (bp->rx_ticks/12) ? 0 : 1);
4471 /* HC_INDEX_C_ETH_TX_CQ_CONS */
4472 REG_WR8(bp, BAR_CSTRORM_INTMEM +
4473 CSTORM_SB_HC_TIMEOUT_OFFSET(port, sb_id,
4474 C_SB_ETH_TX_CQ_INDEX),
4476 REG_WR16(bp, BAR_CSTRORM_INTMEM +
4477 CSTORM_SB_HC_DISABLE_OFFSET(port, sb_id,
4478 C_SB_ETH_TX_CQ_INDEX),
4479 (bp->tx_ticks/12) ? 0 : 1);
4483 static inline void bnx2x_free_tpa_pool(struct bnx2x *bp,
4484 struct bnx2x_fastpath *fp, int last)
4488 for (i = 0; i < last; i++) {
4489 struct sw_rx_bd *rx_buf = &(fp->tpa_pool[i]);
4490 struct sk_buff *skb = rx_buf->skb;
4493 DP(NETIF_MSG_IFDOWN, "tpa bin %d empty on free\n", i);
4497 if (fp->tpa_state[i] == BNX2X_TPA_START)
4498 pci_unmap_single(bp->pdev,
4499 pci_unmap_addr(rx_buf, mapping),
4500 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
4507 static void bnx2x_init_rx_rings(struct bnx2x *bp)
4509 int func = BP_FUNC(bp);
4510 int max_agg_queues = CHIP_IS_E1(bp) ? ETH_MAX_AGGREGATION_QUEUES_E1 :
4511 ETH_MAX_AGGREGATION_QUEUES_E1H;
4512 u16 ring_prod, cqe_ring_prod;
4515 bp->rx_buf_size = bp->dev->mtu + ETH_OVREHEAD + BNX2X_RX_ALIGN;
4517 "mtu %d rx_buf_size %d\n", bp->dev->mtu, bp->rx_buf_size);
4519 if (bp->flags & TPA_ENABLE_FLAG) {
4521 for_each_rx_queue(bp, j) {
4522 struct bnx2x_fastpath *fp = &bp->fp[j];
4524 for (i = 0; i < max_agg_queues; i++) {
4525 fp->tpa_pool[i].skb =
4526 netdev_alloc_skb(bp->dev, bp->rx_buf_size);
4527 if (!fp->tpa_pool[i].skb) {
4528 BNX2X_ERR("Failed to allocate TPA "
4529 "skb pool for queue[%d] - "
4530 "disabling TPA on this "
4532 bnx2x_free_tpa_pool(bp, fp, i);
4533 fp->disable_tpa = 1;
4536 pci_unmap_addr_set((struct sw_rx_bd *)
4537 &bp->fp->tpa_pool[i],
4539 fp->tpa_state[i] = BNX2X_TPA_STOP;
4544 for_each_rx_queue(bp, j) {
4545 struct bnx2x_fastpath *fp = &bp->fp[j];
4548 fp->rx_cons_sb = BNX2X_RX_SB_INDEX;
4549 fp->rx_bd_cons_sb = BNX2X_RX_SB_BD_INDEX;
4551 /* "next page" elements initialization */
4553 for (i = 1; i <= NUM_RX_SGE_PAGES; i++) {
4554 struct eth_rx_sge *sge;
4556 sge = &fp->rx_sge_ring[RX_SGE_CNT * i - 2];
4558 cpu_to_le32(U64_HI(fp->rx_sge_mapping +
4559 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4561 cpu_to_le32(U64_LO(fp->rx_sge_mapping +
4562 BCM_PAGE_SIZE*(i % NUM_RX_SGE_PAGES)));
4565 bnx2x_init_sge_ring_bit_mask(fp);
4568 for (i = 1; i <= NUM_RX_RINGS; i++) {
4569 struct eth_rx_bd *rx_bd;
4571 rx_bd = &fp->rx_desc_ring[RX_DESC_CNT * i - 2];
4573 cpu_to_le32(U64_HI(fp->rx_desc_mapping +
4574 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4576 cpu_to_le32(U64_LO(fp->rx_desc_mapping +
4577 BCM_PAGE_SIZE*(i % NUM_RX_RINGS)));
4581 for (i = 1; i <= NUM_RCQ_RINGS; i++) {
4582 struct eth_rx_cqe_next_page *nextpg;
4584 nextpg = (struct eth_rx_cqe_next_page *)
4585 &fp->rx_comp_ring[RCQ_DESC_CNT * i - 1];
4587 cpu_to_le32(U64_HI(fp->rx_comp_mapping +
4588 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4590 cpu_to_le32(U64_LO(fp->rx_comp_mapping +
4591 BCM_PAGE_SIZE*(i % NUM_RCQ_RINGS)));
4594 /* Allocate SGEs and initialize the ring elements */
4595 for (i = 0, ring_prod = 0;
4596 i < MAX_RX_SGE_CNT*NUM_RX_SGE_PAGES; i++) {
4598 if (bnx2x_alloc_rx_sge(bp, fp, ring_prod) < 0) {
4599 BNX2X_ERR("was only able to allocate "
4601 BNX2X_ERR("disabling TPA for queue[%d]\n", j);
4602 /* Cleanup already allocated elements */
4603 bnx2x_free_rx_sge_range(bp, fp, ring_prod);
4604 bnx2x_free_tpa_pool(bp, fp, max_agg_queues);
4605 fp->disable_tpa = 1;
4609 ring_prod = NEXT_SGE_IDX(ring_prod);
4611 fp->rx_sge_prod = ring_prod;
4613 /* Allocate BDs and initialize BD ring */
4614 fp->rx_comp_cons = 0;
4615 cqe_ring_prod = ring_prod = 0;
4616 for (i = 0; i < bp->rx_ring_size; i++) {
4617 if (bnx2x_alloc_rx_skb(bp, fp, ring_prod) < 0) {
4618 BNX2X_ERR("was only able to allocate "
4619 "%d rx skbs on queue[%d]\n", i, j);
4620 fp->eth_q_stats.rx_skb_alloc_failed++;
4623 ring_prod = NEXT_RX_IDX(ring_prod);
4624 cqe_ring_prod = NEXT_RCQ_IDX(cqe_ring_prod);
4625 WARN_ON(ring_prod <= i);
4628 fp->rx_bd_prod = ring_prod;
4629 /* must not have more available CQEs than BDs */
4630 fp->rx_comp_prod = min((u16)(NUM_RCQ_RINGS*RCQ_DESC_CNT),
4632 fp->rx_pkt = fp->rx_calls = 0;
4635 * this will generate an interrupt (to the TSTORM)
4636 * must only be done after chip is initialized
4638 bnx2x_update_rx_prod(bp, fp, ring_prod, fp->rx_comp_prod,
4643 REG_WR(bp, BAR_USTRORM_INTMEM +
4644 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func),
4645 U64_LO(fp->rx_comp_mapping));
4646 REG_WR(bp, BAR_USTRORM_INTMEM +
4647 USTORM_MEM_WORKAROUND_ADDRESS_OFFSET(func) + 4,
4648 U64_HI(fp->rx_comp_mapping));
4652 static void bnx2x_init_tx_ring(struct bnx2x *bp)
4656 for_each_tx_queue(bp, j) {
4657 struct bnx2x_fastpath *fp = &bp->fp[j];
4659 for (i = 1; i <= NUM_TX_RINGS; i++) {
4660 struct eth_tx_bd *tx_bd =
4661 &fp->tx_desc_ring[TX_DESC_CNT * i - 1];
4664 cpu_to_le32(U64_HI(fp->tx_desc_mapping +
4665 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4667 cpu_to_le32(U64_LO(fp->tx_desc_mapping +
4668 BCM_PAGE_SIZE*(i % NUM_TX_RINGS)));
4671 fp->tx_pkt_prod = 0;
4672 fp->tx_pkt_cons = 0;
4675 fp->tx_cons_sb = BNX2X_TX_SB_INDEX;
4680 static void bnx2x_init_sp_ring(struct bnx2x *bp)
4682 int func = BP_FUNC(bp);
4684 spin_lock_init(&bp->spq_lock);
4686 bp->spq_left = MAX_SPQ_PENDING;
4687 bp->spq_prod_idx = 0;
4688 bp->dsb_sp_prod = BNX2X_SP_DSB_INDEX;
4689 bp->spq_prod_bd = bp->spq;
4690 bp->spq_last_bd = bp->spq_prod_bd + MAX_SP_DESC_CNT;
4692 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func),
4693 U64_LO(bp->spq_mapping));
4695 XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PAGE_BASE_OFFSET(func) + 4,
4696 U64_HI(bp->spq_mapping));
4698 REG_WR(bp, XSEM_REG_FAST_MEMORY + XSTORM_SPQ_PROD_OFFSET(func),
4702 static void bnx2x_init_context(struct bnx2x *bp)
4706 for_each_queue(bp, i) {
4707 struct eth_context *context = bnx2x_sp(bp, context[i].eth);
4708 struct bnx2x_fastpath *fp = &bp->fp[i];
4709 u8 cl_id = fp->cl_id;
4710 u8 sb_id = fp->sb_id;
4712 context->ustorm_st_context.common.sb_index_numbers =
4713 BNX2X_RX_SB_INDEX_NUM;
4714 context->ustorm_st_context.common.clientId = cl_id;
4715 context->ustorm_st_context.common.status_block_id = sb_id;
4716 context->ustorm_st_context.common.flags =
4717 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_MC_ALIGNMENT |
4718 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_STATISTICS);
4719 context->ustorm_st_context.common.statistics_counter_id =
4721 context->ustorm_st_context.common.mc_alignment_log_size =
4722 BNX2X_RX_ALIGN_SHIFT;
4723 context->ustorm_st_context.common.bd_buff_size =
4725 context->ustorm_st_context.common.bd_page_base_hi =
4726 U64_HI(fp->rx_desc_mapping);
4727 context->ustorm_st_context.common.bd_page_base_lo =
4728 U64_LO(fp->rx_desc_mapping);
4729 if (!fp->disable_tpa) {
4730 context->ustorm_st_context.common.flags |=
4731 (USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_TPA |
4732 USTORM_ETH_ST_CONTEXT_CONFIG_ENABLE_SGE_RING);
4733 context->ustorm_st_context.common.sge_buff_size =
4734 (u16)min((u32)SGE_PAGE_SIZE*PAGES_PER_SGE,
4736 context->ustorm_st_context.common.sge_page_base_hi =
4737 U64_HI(fp->rx_sge_mapping);
4738 context->ustorm_st_context.common.sge_page_base_lo =
4739 U64_LO(fp->rx_sge_mapping);
4742 context->ustorm_ag_context.cdu_usage =
4743 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4744 CDU_REGION_NUMBER_UCM_AG,
4745 ETH_CONNECTION_TYPE);
4747 context->xstorm_st_context.tx_bd_page_base_hi =
4748 U64_HI(fp->tx_desc_mapping);
4749 context->xstorm_st_context.tx_bd_page_base_lo =
4750 U64_LO(fp->tx_desc_mapping);
4751 context->xstorm_st_context.db_data_addr_hi =
4752 U64_HI(fp->tx_prods_mapping);
4753 context->xstorm_st_context.db_data_addr_lo =
4754 U64_LO(fp->tx_prods_mapping);
4755 context->xstorm_st_context.statistics_data = (cl_id |
4756 XSTORM_ETH_ST_CONTEXT_STATISTICS_ENABLE);
4757 context->cstorm_st_context.sb_index_number =
4758 C_SB_ETH_TX_CQ_INDEX;
4759 context->cstorm_st_context.status_block_id = sb_id;
4761 context->xstorm_ag_context.cdu_reserved =
4762 CDU_RSRVD_VALUE_TYPE_A(HW_CID(bp, i),
4763 CDU_REGION_NUMBER_XCM_AG,
4764 ETH_CONNECTION_TYPE);
4768 static void bnx2x_init_ind_table(struct bnx2x *bp)
4770 int func = BP_FUNC(bp);
4773 if (bp->multi_mode == ETH_RSS_MODE_DISABLED)
4777 "Initializing indirection table multi_mode %d\n", bp->multi_mode);
4778 for (i = 0; i < TSTORM_INDIRECTION_TABLE_SIZE; i++)
4779 REG_WR8(bp, BAR_TSTRORM_INTMEM +
4780 TSTORM_INDIRECTION_TABLE_OFFSET(func) + i,
4781 bp->fp->cl_id + (i % bp->num_rx_queues));
4784 static void bnx2x_set_client_config(struct bnx2x *bp)
4786 struct tstorm_eth_client_config tstorm_client = {0};
4787 int port = BP_PORT(bp);
4790 tstorm_client.mtu = bp->dev->mtu;
4791 tstorm_client.config_flags =
4792 (TSTORM_ETH_CLIENT_CONFIG_STATSITICS_ENABLE |
4793 TSTORM_ETH_CLIENT_CONFIG_E1HOV_REM_ENABLE);
4795 if (bp->rx_mode && bp->vlgrp && (bp->flags & HW_VLAN_RX_FLAG)) {
4796 tstorm_client.config_flags |=
4797 TSTORM_ETH_CLIENT_CONFIG_VLAN_REM_ENABLE;
4798 DP(NETIF_MSG_IFUP, "vlan removal enabled\n");
4802 if (bp->flags & TPA_ENABLE_FLAG) {
4803 tstorm_client.max_sges_for_packet =
4804 SGE_PAGE_ALIGN(tstorm_client.mtu) >> SGE_PAGE_SHIFT;
4805 tstorm_client.max_sges_for_packet =
4806 ((tstorm_client.max_sges_for_packet +
4807 PAGES_PER_SGE - 1) & (~(PAGES_PER_SGE - 1))) >>
4808 PAGES_PER_SGE_SHIFT;
4810 tstorm_client.config_flags |=
4811 TSTORM_ETH_CLIENT_CONFIG_ENABLE_SGE_RING;
4814 for_each_queue(bp, i) {
4815 tstorm_client.statistics_counter_id = bp->fp[i].cl_id;
4817 REG_WR(bp, BAR_TSTRORM_INTMEM +
4818 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id),
4819 ((u32 *)&tstorm_client)[0]);
4820 REG_WR(bp, BAR_TSTRORM_INTMEM +
4821 TSTORM_CLIENT_CONFIG_OFFSET(port, bp->fp[i].cl_id) + 4,
4822 ((u32 *)&tstorm_client)[1]);
4825 DP(BNX2X_MSG_OFF, "tstorm_client: 0x%08x 0x%08x\n",
4826 ((u32 *)&tstorm_client)[0], ((u32 *)&tstorm_client)[1]);
4829 static void bnx2x_set_storm_rx_mode(struct bnx2x *bp)
4831 struct tstorm_eth_mac_filter_config tstorm_mac_filter = {0};
4832 int mode = bp->rx_mode;
4833 int mask = (1 << BP_L_ID(bp));
4834 int func = BP_FUNC(bp);
4835 int port = BP_PORT(bp);
4837 /* All but management unicast packets should pass to the host as well */
4839 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_BRCST |
4840 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_MLCST |
4841 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_VLAN |
4842 NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_NO_VLAN;
4844 DP(NETIF_MSG_IFUP, "rx mode %d mask 0x%x\n", mode, mask);
4847 case BNX2X_RX_MODE_NONE: /* no Rx */
4848 tstorm_mac_filter.ucast_drop_all = mask;
4849 tstorm_mac_filter.mcast_drop_all = mask;
4850 tstorm_mac_filter.bcast_drop_all = mask;
4853 case BNX2X_RX_MODE_NORMAL:
4854 tstorm_mac_filter.bcast_accept_all = mask;
4857 case BNX2X_RX_MODE_ALLMULTI:
4858 tstorm_mac_filter.mcast_accept_all = mask;
4859 tstorm_mac_filter.bcast_accept_all = mask;
4862 case BNX2X_RX_MODE_PROMISC:
4863 tstorm_mac_filter.ucast_accept_all = mask;
4864 tstorm_mac_filter.mcast_accept_all = mask;
4865 tstorm_mac_filter.bcast_accept_all = mask;
4866 /* pass management unicast packets as well */
4867 llh_mask |= NIG_LLH0_BRB1_DRV_MASK_REG_LLH0_BRB1_DRV_MASK_UNCST;
4871 BNX2X_ERR("BAD rx mode (%d)\n", mode);
4876 (port ? NIG_REG_LLH1_BRB1_DRV_MASK : NIG_REG_LLH0_BRB1_DRV_MASK),
4879 for (i = 0; i < sizeof(struct tstorm_eth_mac_filter_config)/4; i++) {
4880 REG_WR(bp, BAR_TSTRORM_INTMEM +
4881 TSTORM_MAC_FILTER_CONFIG_OFFSET(func) + i * 4,
4882 ((u32 *)&tstorm_mac_filter)[i]);
4884 /* DP(NETIF_MSG_IFUP, "tstorm_mac_filter[%d]: 0x%08x\n", i,
4885 ((u32 *)&tstorm_mac_filter)[i]); */
4888 if (mode != BNX2X_RX_MODE_NONE)
4889 bnx2x_set_client_config(bp);
4892 static void bnx2x_init_internal_common(struct bnx2x *bp)
4896 if (bp->flags & TPA_ENABLE_FLAG) {
4897 struct tstorm_eth_tpa_exist tpa = {0};
4901 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET,
4903 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_TPA_EXIST_OFFSET + 4,
4907 /* Zero this manually as its initialization is
4908 currently missing in the initTool */
4909 for (i = 0; i < (USTORM_AGG_DATA_SIZE >> 2); i++)
4910 REG_WR(bp, BAR_USTRORM_INTMEM +
4911 USTORM_AGG_DATA_OFFSET + i * 4, 0);
4914 static void bnx2x_init_internal_port(struct bnx2x *bp)
4916 int port = BP_PORT(bp);
4918 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4919 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4920 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4921 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_HC_BTR_OFFSET(port), BNX2X_BTR);
4924 /* Calculates the sum of vn_min_rates.
4925 It's needed for further normalizing of the min_rates.
4927 sum of vn_min_rates.
4929 0 - if all the min_rates are 0.
4930 In the later case fainess algorithm should be deactivated.
4931 If not all min_rates are zero then those that are zeroes will be set to 1.
4933 static void bnx2x_calc_vn_weight_sum(struct bnx2x *bp)
4936 int port = BP_PORT(bp);
4939 bp->vn_weight_sum = 0;
4940 for (vn = VN_0; vn < E1HVN_MAX; vn++) {
4941 int func = 2*vn + port;
4943 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
4944 u32 vn_min_rate = ((vn_cfg & FUNC_MF_CFG_MIN_BW_MASK) >>
4945 FUNC_MF_CFG_MIN_BW_SHIFT) * 100;
4947 /* Skip hidden vns */
4948 if (vn_cfg & FUNC_MF_CFG_FUNC_HIDE)
4951 /* If min rate is zero - set it to 1 */
4953 vn_min_rate = DEF_MIN_RATE;
4957 bp->vn_weight_sum += vn_min_rate;
4960 /* ... only if all min rates are zeros - disable fairness */
4962 bp->vn_weight_sum = 0;
4965 static void bnx2x_init_internal_func(struct bnx2x *bp)
4967 struct tstorm_eth_function_common_config tstorm_config = {0};
4968 struct stats_indication_flags stats_flags = {0};
4969 int port = BP_PORT(bp);
4970 int func = BP_FUNC(bp);
4976 tstorm_config.config_flags = MULTI_FLAGS(bp);
4977 tstorm_config.rss_result_mask = MULTI_MASK;
4980 tstorm_config.config_flags |=
4981 TSTORM_ETH_FUNCTION_COMMON_CONFIG_E1HOV_IN_CAM;
4983 tstorm_config.leading_client_id = BP_L_ID(bp);
4985 REG_WR(bp, BAR_TSTRORM_INTMEM +
4986 TSTORM_FUNCTION_COMMON_CONFIG_OFFSET(func),
4987 (*(u32 *)&tstorm_config));
4989 bp->rx_mode = BNX2X_RX_MODE_NONE; /* no rx until link is up */
4990 bnx2x_set_storm_rx_mode(bp);
4992 for_each_queue(bp, i) {
4993 u8 cl_id = bp->fp[i].cl_id;
4995 /* reset xstorm per client statistics */
4996 offset = BAR_XSTRORM_INTMEM +
4997 XSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
4999 j < sizeof(struct xstorm_per_client_stats) / 4; j++)
5000 REG_WR(bp, offset + j*4, 0);
5002 /* reset tstorm per client statistics */
5003 offset = BAR_TSTRORM_INTMEM +
5004 TSTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5006 j < sizeof(struct tstorm_per_client_stats) / 4; j++)
5007 REG_WR(bp, offset + j*4, 0);
5009 /* reset ustorm per client statistics */
5010 offset = BAR_USTRORM_INTMEM +
5011 USTORM_PER_COUNTER_ID_STATS_OFFSET(port, cl_id);
5013 j < sizeof(struct ustorm_per_client_stats) / 4; j++)
5014 REG_WR(bp, offset + j*4, 0);
5017 /* Init statistics related context */
5018 stats_flags.collect_eth = 1;
5020 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func),
5021 ((u32 *)&stats_flags)[0]);
5022 REG_WR(bp, BAR_XSTRORM_INTMEM + XSTORM_STATS_FLAGS_OFFSET(func) + 4,
5023 ((u32 *)&stats_flags)[1]);
5025 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func),
5026 ((u32 *)&stats_flags)[0]);
5027 REG_WR(bp, BAR_TSTRORM_INTMEM + TSTORM_STATS_FLAGS_OFFSET(func) + 4,
5028 ((u32 *)&stats_flags)[1]);
5030 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func),
5031 ((u32 *)&stats_flags)[0]);
5032 REG_WR(bp, BAR_USTRORM_INTMEM + USTORM_STATS_FLAGS_OFFSET(func) + 4,
5033 ((u32 *)&stats_flags)[1]);
5035 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func),
5036 ((u32 *)&stats_flags)[0]);
5037 REG_WR(bp, BAR_CSTRORM_INTMEM + CSTORM_STATS_FLAGS_OFFSET(func) + 4,
5038 ((u32 *)&stats_flags)[1]);
5040 REG_WR(bp, BAR_XSTRORM_INTMEM +
5041 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5042 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5043 REG_WR(bp, BAR_XSTRORM_INTMEM +
5044 XSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5045 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5047 REG_WR(bp, BAR_TSTRORM_INTMEM +
5048 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5049 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5050 REG_WR(bp, BAR_TSTRORM_INTMEM +
5051 TSTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5052 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5054 REG_WR(bp, BAR_USTRORM_INTMEM +
5055 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func),
5056 U64_LO(bnx2x_sp_mapping(bp, fw_stats)));
5057 REG_WR(bp, BAR_USTRORM_INTMEM +
5058 USTORM_ETH_STATS_QUERY_ADDR_OFFSET(func) + 4,
5059 U64_HI(bnx2x_sp_mapping(bp, fw_stats)));
5061 if (CHIP_IS_E1H(bp)) {
5062 REG_WR8(bp, BAR_XSTRORM_INTMEM + XSTORM_FUNCTION_MODE_OFFSET,
5064 REG_WR8(bp, BAR_TSTRORM_INTMEM + TSTORM_FUNCTION_MODE_OFFSET,
5066 REG_WR8(bp, BAR_CSTRORM_INTMEM + CSTORM_FUNCTION_MODE_OFFSET,
5068 REG_WR8(bp, BAR_USTRORM_INTMEM + USTORM_FUNCTION_MODE_OFFSET,
5071 REG_WR16(bp, BAR_XSTRORM_INTMEM + XSTORM_E1HOV_OFFSET(func),
5075 /* Init CQ ring mapping and aggregation size, the FW limit is 8 frags */
5077 min((u32)(min((u32)8, (u32)MAX_SKB_FRAGS) *
5078 SGE_PAGE_SIZE * PAGES_PER_SGE),
5080 for_each_rx_queue(bp, i) {
5081 struct bnx2x_fastpath *fp = &bp->fp[i];
5083 REG_WR(bp, BAR_USTRORM_INTMEM +
5084 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id),
5085 U64_LO(fp->rx_comp_mapping));
5086 REG_WR(bp, BAR_USTRORM_INTMEM +
5087 USTORM_CQE_PAGE_BASE_OFFSET(port, fp->cl_id) + 4,
5088 U64_HI(fp->rx_comp_mapping));
5090 REG_WR16(bp, BAR_USTRORM_INTMEM +
5091 USTORM_MAX_AGG_SIZE_OFFSET(port, fp->cl_id),
5095 /* dropless flow control */
5096 if (CHIP_IS_E1H(bp)) {
5097 struct ustorm_eth_rx_pause_data_e1h rx_pause = {0};
5099 rx_pause.bd_thr_low = 250;
5100 rx_pause.cqe_thr_low = 250;
5102 rx_pause.sge_thr_low = 0;
5103 rx_pause.bd_thr_high = 350;
5104 rx_pause.cqe_thr_high = 350;
5105 rx_pause.sge_thr_high = 0;
5107 for_each_rx_queue(bp, i) {
5108 struct bnx2x_fastpath *fp = &bp->fp[i];
5110 if (!fp->disable_tpa) {
5111 rx_pause.sge_thr_low = 150;
5112 rx_pause.sge_thr_high = 250;
5116 offset = BAR_USTRORM_INTMEM +
5117 USTORM_ETH_RING_PAUSE_DATA_OFFSET(port,
5120 j < sizeof(struct ustorm_eth_rx_pause_data_e1h)/4;
5122 REG_WR(bp, offset + j*4,
5123 ((u32 *)&rx_pause)[j]);
5127 memset(&(bp->cmng), 0, sizeof(struct cmng_struct_per_port));
5129 /* Init rate shaping and fairness contexts */
5133 /* During init there is no active link
5134 Until link is up, set link rate to 10Gbps */
5135 bp->link_vars.line_speed = SPEED_10000;
5136 bnx2x_init_port_minmax(bp);
5138 bnx2x_calc_vn_weight_sum(bp);
5140 for (vn = VN_0; vn < E1HVN_MAX; vn++)
5141 bnx2x_init_vn_minmax(bp, 2*vn + port);
5143 /* Enable rate shaping and fairness */
5144 bp->cmng.flags.cmng_enables =
5145 CMNG_FLAGS_PER_PORT_RATE_SHAPING_VN;
5146 if (bp->vn_weight_sum)
5147 bp->cmng.flags.cmng_enables |=
5148 CMNG_FLAGS_PER_PORT_FAIRNESS_VN;
5150 DP(NETIF_MSG_IFUP, "All MIN values are zeroes"
5151 " fairness will be disabled\n");
5153 /* rate shaping and fairness are disabled */
5155 "single function mode minmax will be disabled\n");
5159 /* Store it to internal memory */
5161 for (i = 0; i < sizeof(struct cmng_struct_per_port) / 4; i++)
5162 REG_WR(bp, BAR_XSTRORM_INTMEM +
5163 XSTORM_CMNG_PER_PORT_VARS_OFFSET(port) + i * 4,
5164 ((u32 *)(&bp->cmng))[i]);
5167 static void bnx2x_init_internal(struct bnx2x *bp, u32 load_code)
5169 switch (load_code) {
5170 case FW_MSG_CODE_DRV_LOAD_COMMON:
5171 bnx2x_init_internal_common(bp);
5174 case FW_MSG_CODE_DRV_LOAD_PORT:
5175 bnx2x_init_internal_port(bp);
5178 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
5179 bnx2x_init_internal_func(bp);
5183 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
5188 static void bnx2x_nic_init(struct bnx2x *bp, u32 load_code)
5192 for_each_queue(bp, i) {
5193 struct bnx2x_fastpath *fp = &bp->fp[i];
5196 fp->state = BNX2X_FP_STATE_CLOSED;
5198 fp->cl_id = BP_L_ID(bp) + i;
5199 fp->sb_id = fp->cl_id;
5201 "queue[%d]: bnx2x_init_sb(%p,%p) cl_id %d sb %d\n",
5202 i, bp, fp->status_blk, fp->cl_id, fp->sb_id);
5203 bnx2x_init_sb(bp, fp->status_blk, fp->status_blk_mapping,
5205 bnx2x_update_fpsb_idx(fp);
5208 /* ensure status block indices were read */
5212 bnx2x_init_def_sb(bp, bp->def_status_blk, bp->def_status_blk_mapping,
5214 bnx2x_update_dsb_idx(bp);
5215 bnx2x_update_coalesce(bp);
5216 bnx2x_init_rx_rings(bp);
5217 bnx2x_init_tx_ring(bp);
5218 bnx2x_init_sp_ring(bp);
5219 bnx2x_init_context(bp);
5220 bnx2x_init_internal(bp, load_code);
5221 bnx2x_init_ind_table(bp);
5222 bnx2x_stats_init(bp);
5224 /* At this point, we are ready for interrupts */
5225 atomic_set(&bp->intr_sem, 0);
5227 /* flush all before enabling interrupts */
5231 bnx2x_int_enable(bp);
5233 /* Check for SPIO5 */
5234 bnx2x_attn_int_deasserted0(bp,
5235 REG_RD(bp, MISC_REG_AEU_AFTER_INVERT_1_FUNC_0 + BP_PORT(bp)*4) &
5236 AEU_INPUTS_ATTN_BITS_SPIO5);
5239 /* end of nic init */
5242 * gzip service functions
5245 static int bnx2x_gunzip_init(struct bnx2x *bp)
5247 bp->gunzip_buf = pci_alloc_consistent(bp->pdev, FW_BUF_SIZE,
5248 &bp->gunzip_mapping);
5249 if (bp->gunzip_buf == NULL)
5252 bp->strm = kmalloc(sizeof(*bp->strm), GFP_KERNEL);
5253 if (bp->strm == NULL)
5256 bp->strm->workspace = kmalloc(zlib_inflate_workspacesize(),
5258 if (bp->strm->workspace == NULL)
5268 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5269 bp->gunzip_mapping);
5270 bp->gunzip_buf = NULL;
5273 printk(KERN_ERR PFX "%s: Cannot allocate firmware buffer for"
5274 " un-compression\n", bp->dev->name);
5278 static void bnx2x_gunzip_end(struct bnx2x *bp)
5280 kfree(bp->strm->workspace);
5285 if (bp->gunzip_buf) {
5286 pci_free_consistent(bp->pdev, FW_BUF_SIZE, bp->gunzip_buf,
5287 bp->gunzip_mapping);
5288 bp->gunzip_buf = NULL;
5292 static int bnx2x_gunzip(struct bnx2x *bp, const u8 *zbuf, int len)
5296 /* check gzip header */
5297 if ((zbuf[0] != 0x1f) || (zbuf[1] != 0x8b) || (zbuf[2] != Z_DEFLATED)) {
5298 BNX2X_ERR("Bad gzip header\n");
5306 if (zbuf[3] & FNAME)
5307 while ((zbuf[n++] != 0) && (n < len));
5309 bp->strm->next_in = (typeof(bp->strm->next_in))zbuf + n;
5310 bp->strm->avail_in = len - n;
5311 bp->strm->next_out = bp->gunzip_buf;
5312 bp->strm->avail_out = FW_BUF_SIZE;
5314 rc = zlib_inflateInit2(bp->strm, -MAX_WBITS);
5318 rc = zlib_inflate(bp->strm, Z_FINISH);
5319 if ((rc != Z_OK) && (rc != Z_STREAM_END))
5320 printk(KERN_ERR PFX "%s: Firmware decompression error: %s\n",
5321 bp->dev->name, bp->strm->msg);
5323 bp->gunzip_outlen = (FW_BUF_SIZE - bp->strm->avail_out);
5324 if (bp->gunzip_outlen & 0x3)
5325 printk(KERN_ERR PFX "%s: Firmware decompression error:"
5326 " gunzip_outlen (%d) not aligned\n",
5327 bp->dev->name, bp->gunzip_outlen);
5328 bp->gunzip_outlen >>= 2;
5330 zlib_inflateEnd(bp->strm);
5332 if (rc == Z_STREAM_END)
5338 /* nic load/unload */
5341 * General service functions
5344 /* send a NIG loopback debug packet */
5345 static void bnx2x_lb_pckt(struct bnx2x *bp)
5349 /* Ethernet source and destination addresses */
5350 wb_write[0] = 0x55555555;
5351 wb_write[1] = 0x55555555;
5352 wb_write[2] = 0x20; /* SOP */
5353 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5355 /* NON-IP protocol */
5356 wb_write[0] = 0x09000000;
5357 wb_write[1] = 0x55555555;
5358 wb_write[2] = 0x10; /* EOP, eop_bvalid = 0 */
5359 REG_WR_DMAE(bp, NIG_REG_DEBUG_PACKET_LB, wb_write, 3);
5362 /* some of the internal memories
5363 * are not directly readable from the driver
5364 * to test them we send debug packets
5366 static int bnx2x_int_mem_test(struct bnx2x *bp)
5372 if (CHIP_REV_IS_FPGA(bp))
5374 else if (CHIP_REV_IS_EMUL(bp))
5379 DP(NETIF_MSG_HW, "start part1\n");
5381 /* Disable inputs of parser neighbor blocks */
5382 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5383 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5384 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5385 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5387 /* Write 0 to parser credits for CFC search request */
5388 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5390 /* send Ethernet packet */
5393 /* TODO do i reset NIG statistic? */
5394 /* Wait until NIG register shows 1 packet of size 0x10 */
5395 count = 1000 * factor;
5398 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5399 val = *bnx2x_sp(bp, wb_data[0]);
5407 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5411 /* Wait until PRS register shows 1 packet */
5412 count = 1000 * factor;
5414 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5422 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5426 /* Reset and init BRB, PRS */
5427 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5429 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5431 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5432 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5434 DP(NETIF_MSG_HW, "part2\n");
5436 /* Disable inputs of parser neighbor blocks */
5437 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x0);
5438 REG_WR(bp, TCM_REG_PRS_IFEN, 0x0);
5439 REG_WR(bp, CFC_REG_DEBUG0, 0x1);
5440 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x0);
5442 /* Write 0 to parser credits for CFC search request */
5443 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x0);
5445 /* send 10 Ethernet packets */
5446 for (i = 0; i < 10; i++)
5449 /* Wait until NIG register shows 10 + 1
5450 packets of size 11*0x10 = 0xb0 */
5451 count = 1000 * factor;
5454 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5455 val = *bnx2x_sp(bp, wb_data[0]);
5463 BNX2X_ERR("NIG timeout val = 0x%x\n", val);
5467 /* Wait until PRS register shows 2 packets */
5468 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5470 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5472 /* Write 1 to parser credits for CFC search request */
5473 REG_WR(bp, PRS_REG_CFC_SEARCH_INITIAL_CREDIT, 0x1);
5475 /* Wait until PRS register shows 3 packets */
5476 msleep(10 * factor);
5477 /* Wait until NIG register shows 1 packet of size 0x10 */
5478 val = REG_RD(bp, PRS_REG_NUM_OF_PACKETS);
5480 BNX2X_ERR("PRS timeout val = 0x%x\n", val);
5482 /* clear NIG EOP FIFO */
5483 for (i = 0; i < 11; i++)
5484 REG_RD(bp, NIG_REG_INGRESS_EOP_LB_FIFO);
5485 val = REG_RD(bp, NIG_REG_INGRESS_EOP_LB_EMPTY);
5487 BNX2X_ERR("clear of NIG failed\n");
5491 /* Reset and init BRB, PRS, NIG */
5492 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR, 0x03);
5494 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0x03);
5496 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5497 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5500 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5503 /* Enable inputs of parser neighbor blocks */
5504 REG_WR(bp, TSDM_REG_ENABLE_IN1, 0x7fffffff);
5505 REG_WR(bp, TCM_REG_PRS_IFEN, 0x1);
5506 REG_WR(bp, CFC_REG_DEBUG0, 0x0);
5507 REG_WR(bp, NIG_REG_PRS_REQ_IN_EN, 0x1);
5509 DP(NETIF_MSG_HW, "done\n");
5514 static void enable_blocks_attention(struct bnx2x *bp)
5516 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5517 REG_WR(bp, PXP_REG_PXP_INT_MASK_1, 0);
5518 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5519 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5520 REG_WR(bp, QM_REG_QM_INT_MASK, 0);
5521 REG_WR(bp, TM_REG_TM_INT_MASK, 0);
5522 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_0, 0);
5523 REG_WR(bp, XSDM_REG_XSDM_INT_MASK_1, 0);
5524 REG_WR(bp, XCM_REG_XCM_INT_MASK, 0);
5525 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_0, 0); */
5526 /* REG_WR(bp, XSEM_REG_XSEM_INT_MASK_1, 0); */
5527 REG_WR(bp, USDM_REG_USDM_INT_MASK_0, 0);
5528 REG_WR(bp, USDM_REG_USDM_INT_MASK_1, 0);
5529 REG_WR(bp, UCM_REG_UCM_INT_MASK, 0);
5530 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_0, 0); */
5531 /* REG_WR(bp, USEM_REG_USEM_INT_MASK_1, 0); */
5532 REG_WR(bp, GRCBASE_UPB + PB_REG_PB_INT_MASK, 0);
5533 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_0, 0);
5534 REG_WR(bp, CSDM_REG_CSDM_INT_MASK_1, 0);
5535 REG_WR(bp, CCM_REG_CCM_INT_MASK, 0);
5536 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_0, 0); */
5537 /* REG_WR(bp, CSEM_REG_CSEM_INT_MASK_1, 0); */
5538 if (CHIP_REV_IS_FPGA(bp))
5539 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x580000);
5541 REG_WR(bp, PXP2_REG_PXP2_INT_MASK_0, 0x480000);
5542 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_0, 0);
5543 REG_WR(bp, TSDM_REG_TSDM_INT_MASK_1, 0);
5544 REG_WR(bp, TCM_REG_TCM_INT_MASK, 0);
5545 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_0, 0); */
5546 /* REG_WR(bp, TSEM_REG_TSEM_INT_MASK_1, 0); */
5547 REG_WR(bp, CDU_REG_CDU_INT_MASK, 0);
5548 REG_WR(bp, DMAE_REG_DMAE_INT_MASK, 0);
5549 /* REG_WR(bp, MISC_REG_MISC_INT_MASK, 0); */
5550 REG_WR(bp, PBF_REG_PBF_INT_MASK, 0X18); /* bit 3,4 masked */
5554 static void bnx2x_reset_common(struct bnx2x *bp)
5557 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5559 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR, 0x1403);
5563 static void bnx2x_setup_fan_failure_detection(struct bnx2x *bp)
5569 val = SHMEM_RD(bp, dev_info.shared_hw_config.config2) &
5570 SHARED_HW_CFG_FAN_FAILURE_MASK;
5572 if (val == SHARED_HW_CFG_FAN_FAILURE_ENABLED)
5576 * The fan failure mechanism is usually related to the PHY type since
5577 * the power consumption of the board is affected by the PHY. Currently,
5578 * fan is required for most designs with SFX7101, BCM8727 and BCM8481.
5580 else if (val == SHARED_HW_CFG_FAN_FAILURE_PHY_TYPE)
5581 for (port = PORT_0; port < PORT_MAX; port++) {
5583 SHMEM_RD(bp, dev_info.port_hw_config[port].
5584 external_phy_config) &
5585 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
5588 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101) ||
5590 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727) ||
5592 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481));
5595 DP(NETIF_MSG_HW, "fan detection setting: %d\n", is_required);
5597 if (is_required == 0)
5600 /* Fan failure is indicated by SPIO 5 */
5601 bnx2x_set_spio(bp, MISC_REGISTERS_SPIO_5,
5602 MISC_REGISTERS_SPIO_INPUT_HI_Z);
5604 /* set to active low mode */
5605 val = REG_RD(bp, MISC_REG_SPIO_INT);
5606 val |= ((1 << MISC_REGISTERS_SPIO_5) <<
5607 MISC_REGISTERS_SPIO_INT_OLD_SET_POS);
5608 REG_WR(bp, MISC_REG_SPIO_INT, val);
5610 /* enable interrupt to signal the IGU */
5611 val = REG_RD(bp, MISC_REG_SPIO_EVENT_EN);
5612 val |= (1 << MISC_REGISTERS_SPIO_5);
5613 REG_WR(bp, MISC_REG_SPIO_EVENT_EN, val);
5616 static int bnx2x_init_common(struct bnx2x *bp)
5620 DP(BNX2X_MSG_MCP, "starting common init func %d\n", BP_FUNC(bp));
5622 bnx2x_reset_common(bp);
5623 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET, 0xffffffff);
5624 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_SET, 0xfffc);
5626 bnx2x_init_block(bp, MISC_BLOCK, COMMON_STAGE);
5627 if (CHIP_IS_E1H(bp))
5628 REG_WR(bp, MISC_REG_E1HMF_MODE, IS_E1HMF(bp));
5630 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x100);
5632 REG_WR(bp, MISC_REG_LCPLL_CTRL_REG_2, 0x0);
5634 bnx2x_init_block(bp, PXP_BLOCK, COMMON_STAGE);
5635 if (CHIP_IS_E1(bp)) {
5636 /* enable HW interrupt from PXP on USDM overflow
5637 bit 16 on INT_MASK_0 */
5638 REG_WR(bp, PXP_REG_PXP_INT_MASK_0, 0);
5641 bnx2x_init_block(bp, PXP2_BLOCK, COMMON_STAGE);
5645 REG_WR(bp, PXP2_REG_RQ_QM_ENDIAN_M, 1);
5646 REG_WR(bp, PXP2_REG_RQ_TM_ENDIAN_M, 1);
5647 REG_WR(bp, PXP2_REG_RQ_SRC_ENDIAN_M, 1);
5648 REG_WR(bp, PXP2_REG_RQ_CDU_ENDIAN_M, 1);
5649 REG_WR(bp, PXP2_REG_RQ_DBG_ENDIAN_M, 1);
5650 /* make sure this value is 0 */
5651 REG_WR(bp, PXP2_REG_RQ_HC_ENDIAN_M, 0);
5653 /* REG_WR(bp, PXP2_REG_RD_PBF_SWAP_MODE, 1); */
5654 REG_WR(bp, PXP2_REG_RD_QM_SWAP_MODE, 1);
5655 REG_WR(bp, PXP2_REG_RD_TM_SWAP_MODE, 1);
5656 REG_WR(bp, PXP2_REG_RD_SRC_SWAP_MODE, 1);
5657 REG_WR(bp, PXP2_REG_RD_CDURD_SWAP_MODE, 1);
5660 REG_WR(bp, PXP2_REG_RQ_CDU_P_SIZE, 2);
5662 REG_WR(bp, PXP2_REG_RQ_TM_P_SIZE, 5);
5663 REG_WR(bp, PXP2_REG_RQ_QM_P_SIZE, 5);
5664 REG_WR(bp, PXP2_REG_RQ_SRC_P_SIZE, 5);
5667 if (CHIP_REV_IS_FPGA(bp) && CHIP_IS_E1H(bp))
5668 REG_WR(bp, PXP2_REG_PGL_TAGS_LIMIT, 0x1);
5670 /* let the HW do it's magic ... */
5672 /* finish PXP init */
5673 val = REG_RD(bp, PXP2_REG_RQ_CFG_DONE);
5675 BNX2X_ERR("PXP2 CFG failed\n");
5678 val = REG_RD(bp, PXP2_REG_RD_INIT_DONE);
5680 BNX2X_ERR("PXP2 RD_INIT failed\n");
5684 REG_WR(bp, PXP2_REG_RQ_DISABLE_INPUTS, 0);
5685 REG_WR(bp, PXP2_REG_RD_DISABLE_INPUTS, 0);
5687 bnx2x_init_block(bp, DMAE_BLOCK, COMMON_STAGE);
5689 /* clean the DMAE memory */
5691 bnx2x_init_fill(bp, TSEM_REG_PRAM, 0, 8);
5693 bnx2x_init_block(bp, TCM_BLOCK, COMMON_STAGE);
5694 bnx2x_init_block(bp, UCM_BLOCK, COMMON_STAGE);
5695 bnx2x_init_block(bp, CCM_BLOCK, COMMON_STAGE);
5696 bnx2x_init_block(bp, XCM_BLOCK, COMMON_STAGE);
5698 bnx2x_read_dmae(bp, XSEM_REG_PASSIVE_BUFFER, 3);
5699 bnx2x_read_dmae(bp, CSEM_REG_PASSIVE_BUFFER, 3);
5700 bnx2x_read_dmae(bp, TSEM_REG_PASSIVE_BUFFER, 3);
5701 bnx2x_read_dmae(bp, USEM_REG_PASSIVE_BUFFER, 3);
5703 bnx2x_init_block(bp, QM_BLOCK, COMMON_STAGE);
5704 /* soft reset pulse */
5705 REG_WR(bp, QM_REG_SOFT_RESET, 1);
5706 REG_WR(bp, QM_REG_SOFT_RESET, 0);
5709 bnx2x_init_block(bp, TIMERS_BLOCK, COMMON_STAGE);
5712 bnx2x_init_block(bp, DQ_BLOCK, COMMON_STAGE);
5713 REG_WR(bp, DORQ_REG_DPM_CID_OFST, BCM_PAGE_SHIFT);
5714 if (!CHIP_REV_IS_SLOW(bp)) {
5715 /* enable hw interrupt from doorbell Q */
5716 REG_WR(bp, DORQ_REG_DORQ_INT_MASK, 0);
5719 bnx2x_init_block(bp, BRB1_BLOCK, COMMON_STAGE);
5720 bnx2x_init_block(bp, PRS_BLOCK, COMMON_STAGE);
5721 REG_WR(bp, PRS_REG_A_PRSU_20, 0xf);
5723 REG_WR(bp, PRS_REG_NIC_MODE, 1);
5724 if (CHIP_IS_E1H(bp))
5725 REG_WR(bp, PRS_REG_E1HOV_MODE, IS_E1HMF(bp));
5727 bnx2x_init_block(bp, TSDM_BLOCK, COMMON_STAGE);
5728 bnx2x_init_block(bp, CSDM_BLOCK, COMMON_STAGE);
5729 bnx2x_init_block(bp, USDM_BLOCK, COMMON_STAGE);
5730 bnx2x_init_block(bp, XSDM_BLOCK, COMMON_STAGE);
5732 bnx2x_init_fill(bp, TSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5733 bnx2x_init_fill(bp, USTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5734 bnx2x_init_fill(bp, CSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5735 bnx2x_init_fill(bp, XSTORM_INTMEM_ADDR, 0, STORM_INTMEM_SIZE(bp));
5737 bnx2x_init_block(bp, TSEM_BLOCK, COMMON_STAGE);
5738 bnx2x_init_block(bp, USEM_BLOCK, COMMON_STAGE);
5739 bnx2x_init_block(bp, CSEM_BLOCK, COMMON_STAGE);
5740 bnx2x_init_block(bp, XSEM_BLOCK, COMMON_STAGE);
5743 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
5745 REG_WR(bp, GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
5748 bnx2x_init_block(bp, UPB_BLOCK, COMMON_STAGE);
5749 bnx2x_init_block(bp, XPB_BLOCK, COMMON_STAGE);
5750 bnx2x_init_block(bp, PBF_BLOCK, COMMON_STAGE);
5752 REG_WR(bp, SRC_REG_SOFT_RST, 1);
5753 for (i = SRC_REG_KEYRSS0_0; i <= SRC_REG_KEYRSS1_9; i += 4) {
5754 REG_WR(bp, i, 0xc0cac01a);
5755 /* TODO: replace with something meaningful */
5757 bnx2x_init_block(bp, SRCH_BLOCK, COMMON_STAGE);
5758 REG_WR(bp, SRC_REG_SOFT_RST, 0);
5760 if (sizeof(union cdu_context) != 1024)
5761 /* we currently assume that a context is 1024 bytes */
5762 printk(KERN_ALERT PFX "please adjust the size of"
5763 " cdu_context(%ld)\n", (long)sizeof(union cdu_context));
5765 bnx2x_init_block(bp, CDU_BLOCK, COMMON_STAGE);
5766 val = (4 << 24) + (0 << 12) + 1024;
5767 REG_WR(bp, CDU_REG_CDU_GLOBAL_PARAMS, val);
5768 if (CHIP_IS_E1(bp)) {
5769 /* !!! fix pxp client crdit until excel update */
5770 REG_WR(bp, CDU_REG_CDU_DEBUG, 0x264);
5771 REG_WR(bp, CDU_REG_CDU_DEBUG, 0);
5774 bnx2x_init_block(bp, CFC_BLOCK, COMMON_STAGE);
5775 REG_WR(bp, CFC_REG_INIT_REG, 0x7FF);
5776 /* enable context validation interrupt from CFC */
5777 REG_WR(bp, CFC_REG_CFC_INT_MASK, 0);
5779 /* set the thresholds to prevent CFC/CDU race */
5780 REG_WR(bp, CFC_REG_DEBUG0, 0x20020000);
5782 bnx2x_init_block(bp, HC_BLOCK, COMMON_STAGE);
5783 bnx2x_init_block(bp, MISC_AEU_BLOCK, COMMON_STAGE);
5785 /* PXPCS COMMON comes here */
5786 bnx2x_init_block(bp, PXPCS_BLOCK, COMMON_STAGE);
5787 /* Reset PCIE errors for debug */
5788 REG_WR(bp, 0x2814, 0xffffffff);
5789 REG_WR(bp, 0x3820, 0xffffffff);
5791 /* EMAC0 COMMON comes here */
5792 bnx2x_init_block(bp, EMAC0_BLOCK, COMMON_STAGE);
5793 /* EMAC1 COMMON comes here */
5794 bnx2x_init_block(bp, EMAC1_BLOCK, COMMON_STAGE);
5795 /* DBU COMMON comes here */
5796 bnx2x_init_block(bp, DBU_BLOCK, COMMON_STAGE);
5797 /* DBG COMMON comes here */
5798 bnx2x_init_block(bp, DBG_BLOCK, COMMON_STAGE);
5800 bnx2x_init_block(bp, NIG_BLOCK, COMMON_STAGE);
5801 if (CHIP_IS_E1H(bp)) {
5802 REG_WR(bp, NIG_REG_LLH_MF_MODE, IS_E1HMF(bp));
5803 REG_WR(bp, NIG_REG_LLH_E1HOV_MODE, IS_E1HMF(bp));
5806 if (CHIP_REV_IS_SLOW(bp))
5809 /* finish CFC init */
5810 val = reg_poll(bp, CFC_REG_LL_INIT_DONE, 1, 100, 10);
5812 BNX2X_ERR("CFC LL_INIT failed\n");
5815 val = reg_poll(bp, CFC_REG_AC_INIT_DONE, 1, 100, 10);
5817 BNX2X_ERR("CFC AC_INIT failed\n");
5820 val = reg_poll(bp, CFC_REG_CAM_INIT_DONE, 1, 100, 10);
5822 BNX2X_ERR("CFC CAM_INIT failed\n");
5825 REG_WR(bp, CFC_REG_DEBUG0, 0);
5827 /* read NIG statistic
5828 to see if this is our first up since powerup */
5829 bnx2x_read_dmae(bp, NIG_REG_STAT2_BRB_OCTET, 2);
5830 val = *bnx2x_sp(bp, wb_data[0]);
5832 /* do internal memory self test */
5833 if ((CHIP_IS_E1(bp)) && (val == 0) && bnx2x_int_mem_test(bp)) {
5834 BNX2X_ERR("internal mem self test failed\n");
5838 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
5839 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
5840 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
5841 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
5842 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
5843 bp->port.need_hw_lock = 1;
5850 bnx2x_setup_fan_failure_detection(bp);
5852 /* clear PXP2 attentions */
5853 REG_RD(bp, PXP2_REG_PXP2_INT_STS_CLR_0);
5855 enable_blocks_attention(bp);
5857 if (!BP_NOMCP(bp)) {
5858 bnx2x_acquire_phy_lock(bp);
5859 bnx2x_common_init_phy(bp, bp->common.shmem_base);
5860 bnx2x_release_phy_lock(bp);
5862 BNX2X_ERR("Bootcode is missing - can not initialize link\n");
5867 static int bnx2x_init_port(struct bnx2x *bp)
5869 int port = BP_PORT(bp);
5870 int init_stage = port ? PORT1_STAGE : PORT0_STAGE;
5874 DP(BNX2X_MSG_MCP, "starting port init port %x\n", port);
5876 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
5878 /* Port PXP comes here */
5879 bnx2x_init_block(bp, PXP_BLOCK, init_stage);
5880 /* Port PXP2 comes here */
5881 bnx2x_init_block(bp, PXP2_BLOCK, init_stage);
5886 wb_write[0] = ONCHIP_ADDR1(bp->timers_mapping);
5887 wb_write[1] = ONCHIP_ADDR2(bp->timers_mapping);
5888 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5889 REG_WR(bp, PXP2_REG_PSWRQ_TM0_L2P + func*4, PXP_ONE_ILT(i));
5894 wb_write[0] = ONCHIP_ADDR1(bp->qm_mapping);
5895 wb_write[1] = ONCHIP_ADDR2(bp->qm_mapping);
5896 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5897 REG_WR(bp, PXP2_REG_PSWRQ_QM0_L2P + func*4, PXP_ONE_ILT(i));
5902 wb_write[0] = ONCHIP_ADDR1(bp->t1_mapping);
5903 wb_write[1] = ONCHIP_ADDR2(bp->t1_mapping);
5904 REG_WR_DMAE(bp, PXP2_REG_RQ_ONCHIP_AT + i*8, wb_write, 2);
5905 REG_WR(bp, PXP2_REG_PSWRQ_SRC0_L2P + func*4, PXP_ONE_ILT(i));
5907 /* Port CMs come here */
5908 bnx2x_init_block(bp, XCM_BLOCK, init_stage);
5910 /* Port QM comes here */
5912 REG_WR(bp, TM_REG_LIN0_SCAN_TIME + func*4, 1024/64*20);
5913 REG_WR(bp, TM_REG_LIN0_MAX_ACTIVE_CID + func*4, 31);
5915 bnx2x_init_block(bp, TIMERS_BLOCK, init_stage);
5917 /* Port DQ comes here */
5918 bnx2x_init_block(bp, DQ_BLOCK, init_stage);
5920 bnx2x_init_block(bp, BRB1_BLOCK, init_stage);
5921 if (CHIP_REV_IS_SLOW(bp) && !CHIP_IS_E1H(bp)) {
5922 /* no pause for emulation and FPGA */
5927 low = ((bp->flags & ONE_PORT_FLAG) ? 160 : 246);
5928 else if (bp->dev->mtu > 4096) {
5929 if (bp->flags & ONE_PORT_FLAG)
5933 /* (24*1024 + val*4)/256 */
5934 low = 96 + (val/64) + ((val % 64) ? 1 : 0);
5937 low = ((bp->flags & ONE_PORT_FLAG) ? 80 : 160);
5938 high = low + 56; /* 14*1024/256 */
5940 REG_WR(bp, BRB1_REG_PAUSE_LOW_THRESHOLD_0 + port*4, low);
5941 REG_WR(bp, BRB1_REG_PAUSE_HIGH_THRESHOLD_0 + port*4, high);
5944 /* Port PRS comes here */
5945 bnx2x_init_block(bp, PRS_BLOCK, init_stage);
5946 /* Port TSDM comes here */
5947 bnx2x_init_block(bp, TSDM_BLOCK, init_stage);
5948 /* Port CSDM comes here */
5949 bnx2x_init_block(bp, CSDM_BLOCK, init_stage);
5950 /* Port USDM comes here */
5951 bnx2x_init_block(bp, USDM_BLOCK, init_stage);
5952 /* Port XSDM comes here */
5953 bnx2x_init_block(bp, XSDM_BLOCK, init_stage);
5955 bnx2x_init_block(bp, TSEM_BLOCK, init_stage);
5956 bnx2x_init_block(bp, USEM_BLOCK, init_stage);
5957 bnx2x_init_block(bp, CSEM_BLOCK, init_stage);
5958 bnx2x_init_block(bp, XSEM_BLOCK, init_stage);
5960 /* Port UPB comes here */
5961 bnx2x_init_block(bp, UPB_BLOCK, init_stage);
5962 /* Port XPB comes here */
5963 bnx2x_init_block(bp, XPB_BLOCK, init_stage);
5965 bnx2x_init_block(bp, PBF_BLOCK, init_stage);
5967 /* configure PBF to work without PAUSE mtu 9000 */
5968 REG_WR(bp, PBF_REG_P0_PAUSE_ENABLE + port*4, 0);
5970 /* update threshold */
5971 REG_WR(bp, PBF_REG_P0_ARB_THRSH + port*4, (9040/16));
5972 /* update init credit */
5973 REG_WR(bp, PBF_REG_P0_INIT_CRD + port*4, (9040/16) + 553 - 22);
5976 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 1);
5978 REG_WR(bp, PBF_REG_INIT_P0 + port*4, 0);
5981 /* tell the searcher where the T2 table is */
5982 REG_WR(bp, SRC_REG_COUNTFREE0 + func*4, 16*1024/64);
5984 wb_write[0] = U64_LO(bp->t2_mapping);
5985 wb_write[1] = U64_HI(bp->t2_mapping);
5986 REG_WR_DMAE(bp, SRC_REG_FIRSTFREE0 + func*4, wb_write, 2);
5987 wb_write[0] = U64_LO((u64)bp->t2_mapping + 16*1024 - 64);
5988 wb_write[1] = U64_HI((u64)bp->t2_mapping + 16*1024 - 64);
5989 REG_WR_DMAE(bp, SRC_REG_LASTFREE0 + func*4, wb_write, 2);
5991 REG_WR(bp, SRC_REG_NUMBER_HASH_BITS0 + func*4, 10);
5992 /* Port SRCH comes here */
5994 /* Port CDU comes here */
5995 bnx2x_init_block(bp, CDU_BLOCK, init_stage);
5996 /* Port CFC comes here */
5997 bnx2x_init_block(bp, CFC_BLOCK, init_stage);
5999 if (CHIP_IS_E1(bp)) {
6000 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6001 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6003 bnx2x_init_block(bp, HC_BLOCK, init_stage);
6005 bnx2x_init_block(bp, MISC_AEU_BLOCK, init_stage);
6006 /* init aeu_mask_attn_func_0/1:
6007 * - SF mode: bits 3-7 are masked. only bits 0-2 are in use
6008 * - MF mode: bit 3 is masked. bits 0-2 are in use as in SF
6009 * bits 4-7 are used for "per vn group attention" */
6010 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4,
6011 (IS_E1HMF(bp) ? 0xF7 : 0x7));
6013 /* Port PXPCS comes here */
6014 bnx2x_init_block(bp, PXPCS_BLOCK, init_stage);
6015 /* Port EMAC0 comes here */
6016 bnx2x_init_block(bp, EMAC0_BLOCK, init_stage);
6017 /* Port EMAC1 comes here */
6018 bnx2x_init_block(bp, EMAC1_BLOCK, init_stage);
6019 /* Port DBU comes here */
6020 bnx2x_init_block(bp, DBU_BLOCK, init_stage);
6021 /* Port DBG comes here */
6022 bnx2x_init_block(bp, DBG_BLOCK, init_stage);
6024 bnx2x_init_block(bp, NIG_BLOCK, init_stage);
6026 REG_WR(bp, NIG_REG_XGXS_SERDES0_MODE_SEL + port*4, 1);
6028 if (CHIP_IS_E1H(bp)) {
6029 /* 0x2 disable e1hov, 0x1 enable */
6030 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK_MF + port*4,
6031 (IS_E1HMF(bp) ? 0x1 : 0x2));
6033 /* support pause requests from USDM, TSDM and BRB */
6034 REG_WR(bp, NIG_REG_LLFC_EGRESS_SRC_ENABLE_0 + port*4, 0x7);
6037 REG_WR(bp, NIG_REG_LLFC_ENABLE_0 + port*4, 0);
6038 REG_WR(bp, NIG_REG_LLFC_OUT_EN_0 + port*4, 0);
6039 REG_WR(bp, NIG_REG_PAUSE_ENABLE_0 + port*4, 1);
6043 /* Port MCP comes here */
6044 bnx2x_init_block(bp, MCP_BLOCK, init_stage);
6045 /* Port DMAE comes here */
6046 bnx2x_init_block(bp, DMAE_BLOCK, init_stage);
6048 switch (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config)) {
6049 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
6051 u32 swap_val, swap_override, aeu_gpio_mask, offset;
6053 bnx2x_set_gpio(bp, MISC_REGISTERS_GPIO_3,
6054 MISC_REGISTERS_GPIO_INPUT_HI_Z, port);
6056 /* The GPIO should be swapped if the swap register is
6058 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
6059 swap_override = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
6061 /* Select function upon port-swap configuration */
6063 offset = MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0;
6064 aeu_gpio_mask = (swap_val && swap_override) ?
6065 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1 :
6066 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0;
6068 offset = MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0;
6069 aeu_gpio_mask = (swap_val && swap_override) ?
6070 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_0 :
6071 AEU_INPUTS_ATTN_BITS_GPIO3_FUNCTION_1;
6073 val = REG_RD(bp, offset);
6074 /* add GPIO3 to group */
6075 val |= aeu_gpio_mask;
6076 REG_WR(bp, offset, val);
6080 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
6081 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
6082 /* add SPIO 5 to group 0 */
6084 u32 reg_addr = (port ? MISC_REG_AEU_ENABLE1_FUNC_1_OUT_0 :
6085 MISC_REG_AEU_ENABLE1_FUNC_0_OUT_0);
6086 val = REG_RD(bp, reg_addr);
6087 val |= AEU_INPUTS_ATTN_BITS_SPIO5;
6088 REG_WR(bp, reg_addr, val);
6096 bnx2x__link_reset(bp);
6101 #define ILT_PER_FUNC (768/2)
6102 #define FUNC_ILT_BASE(func) (func * ILT_PER_FUNC)
6103 /* the phys address is shifted right 12 bits and has an added
6104 1=valid bit added to the 53rd bit
6105 then since this is a wide register(TM)
6106 we split it into two 32 bit writes
6108 #define ONCHIP_ADDR1(x) ((u32)(((u64)x >> 12) & 0xFFFFFFFF))
6109 #define ONCHIP_ADDR2(x) ((u32)((1 << 20) | ((u64)x >> 44)))
6110 #define PXP_ONE_ILT(x) (((x) << 10) | x)
6111 #define PXP_ILT_RANGE(f, l) (((l) << 10) | f)
6113 #define CNIC_ILT_LINES 0
6115 static void bnx2x_ilt_wr(struct bnx2x *bp, u32 index, dma_addr_t addr)
6119 if (CHIP_IS_E1H(bp))
6120 reg = PXP2_REG_RQ_ONCHIP_AT_B0 + index*8;
6122 reg = PXP2_REG_RQ_ONCHIP_AT + index*8;
6124 bnx2x_wb_wr(bp, reg, ONCHIP_ADDR1(addr), ONCHIP_ADDR2(addr));
6127 static int bnx2x_init_func(struct bnx2x *bp)
6129 int port = BP_PORT(bp);
6130 int func = BP_FUNC(bp);
6134 DP(BNX2X_MSG_MCP, "starting func init func %x\n", func);
6136 /* set MSI reconfigure capability */
6137 addr = (port ? HC_REG_CONFIG_1 : HC_REG_CONFIG_0);
6138 val = REG_RD(bp, addr);
6139 val |= HC_CONFIG_0_REG_MSI_ATTN_EN_0;
6140 REG_WR(bp, addr, val);
6142 i = FUNC_ILT_BASE(func);
6144 bnx2x_ilt_wr(bp, i, bnx2x_sp_mapping(bp, context));
6145 if (CHIP_IS_E1H(bp)) {
6146 REG_WR(bp, PXP2_REG_RQ_CDU_FIRST_ILT, i);
6147 REG_WR(bp, PXP2_REG_RQ_CDU_LAST_ILT, i + CNIC_ILT_LINES);
6149 REG_WR(bp, PXP2_REG_PSWRQ_CDU0_L2P + func*4,
6150 PXP_ILT_RANGE(i, i + CNIC_ILT_LINES));
6153 if (CHIP_IS_E1H(bp)) {
6154 for (i = 0; i < 9; i++)
6155 bnx2x_init_block(bp,
6156 cm_blocks[i], FUNC0_STAGE + func);
6158 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 1);
6159 REG_WR(bp, NIG_REG_LLH0_FUNC_VLAN_ID + port*8, bp->e1hov);
6162 /* HC init per function */
6163 if (CHIP_IS_E1H(bp)) {
6164 REG_WR(bp, MISC_REG_AEU_GENERAL_ATTN_12 + func*4, 0);
6166 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
6167 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
6169 bnx2x_init_block(bp, HC_BLOCK, FUNC0_STAGE + func);
6171 /* Reset PCIE errors for debug */
6172 REG_WR(bp, 0x2114, 0xffffffff);
6173 REG_WR(bp, 0x2120, 0xffffffff);
6178 static int bnx2x_init_hw(struct bnx2x *bp, u32 load_code)
6182 DP(BNX2X_MSG_MCP, "function %d load_code %x\n",
6183 BP_FUNC(bp), load_code);
6186 mutex_init(&bp->dmae_mutex);
6187 bnx2x_gunzip_init(bp);
6189 switch (load_code) {
6190 case FW_MSG_CODE_DRV_LOAD_COMMON:
6191 rc = bnx2x_init_common(bp);
6196 case FW_MSG_CODE_DRV_LOAD_PORT:
6198 rc = bnx2x_init_port(bp);
6203 case FW_MSG_CODE_DRV_LOAD_FUNCTION:
6205 rc = bnx2x_init_func(bp);
6211 BNX2X_ERR("Unknown load_code (0x%x) from MCP\n", load_code);
6215 if (!BP_NOMCP(bp)) {
6216 int func = BP_FUNC(bp);
6218 bp->fw_drv_pulse_wr_seq =
6219 (SHMEM_RD(bp, func_mb[func].drv_pulse_mb) &
6220 DRV_PULSE_SEQ_MASK);
6221 bp->func_stx = SHMEM_RD(bp, func_mb[func].fw_mb_param);
6222 DP(BNX2X_MSG_MCP, "drv_pulse 0x%x func_stx 0x%x\n",
6223 bp->fw_drv_pulse_wr_seq, bp->func_stx);
6227 /* this needs to be done before gunzip end */
6228 bnx2x_zero_def_sb(bp);
6229 for_each_queue(bp, i)
6230 bnx2x_zero_sb(bp, BP_L_ID(bp) + i);
6233 bnx2x_gunzip_end(bp);
6238 /* send the MCP a request, block until there is a reply */
6239 u32 bnx2x_fw_command(struct bnx2x *bp, u32 command)
6241 int func = BP_FUNC(bp);
6242 u32 seq = ++bp->fw_seq;
6245 u8 delay = CHIP_REV_IS_SLOW(bp) ? 100 : 10;
6247 SHMEM_WR(bp, func_mb[func].drv_mb_header, (command | seq));
6248 DP(BNX2X_MSG_MCP, "wrote command (%x) to FW MB\n", (command | seq));
6251 /* let the FW do it's magic ... */
6254 rc = SHMEM_RD(bp, func_mb[func].fw_mb_header);
6256 /* Give the FW up to 2 second (200*10ms) */
6257 } while ((seq != (rc & FW_MSG_SEQ_NUMBER_MASK)) && (cnt++ < 200));
6259 DP(BNX2X_MSG_MCP, "[after %d ms] read (%x) seq is (%x) from FW MB\n",
6260 cnt*delay, rc, seq);
6262 /* is this a reply to our command? */
6263 if (seq == (rc & FW_MSG_SEQ_NUMBER_MASK)) {
6264 rc &= FW_MSG_CODE_MASK;
6268 BNX2X_ERR("FW failed to respond!\n");
6276 static void bnx2x_free_mem(struct bnx2x *bp)
6279 #define BNX2X_PCI_FREE(x, y, size) \
6282 pci_free_consistent(bp->pdev, size, x, y); \
6288 #define BNX2X_FREE(x) \
6300 for_each_queue(bp, i) {
6303 BNX2X_PCI_FREE(bnx2x_fp(bp, i, status_blk),
6304 bnx2x_fp(bp, i, status_blk_mapping),
6305 sizeof(struct host_status_block) +
6306 sizeof(struct eth_tx_db_data));
6309 for_each_rx_queue(bp, i) {
6311 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6312 BNX2X_FREE(bnx2x_fp(bp, i, rx_buf_ring));
6313 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_desc_ring),
6314 bnx2x_fp(bp, i, rx_desc_mapping),
6315 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6317 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_comp_ring),
6318 bnx2x_fp(bp, i, rx_comp_mapping),
6319 sizeof(struct eth_fast_path_rx_cqe) *
6323 BNX2X_FREE(bnx2x_fp(bp, i, rx_page_ring));
6324 BNX2X_PCI_FREE(bnx2x_fp(bp, i, rx_sge_ring),
6325 bnx2x_fp(bp, i, rx_sge_mapping),
6326 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6329 for_each_tx_queue(bp, i) {
6331 /* fastpath tx rings: tx_buf tx_desc */
6332 BNX2X_FREE(bnx2x_fp(bp, i, tx_buf_ring));
6333 BNX2X_PCI_FREE(bnx2x_fp(bp, i, tx_desc_ring),
6334 bnx2x_fp(bp, i, tx_desc_mapping),
6335 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6337 /* end of fastpath */
6339 BNX2X_PCI_FREE(bp->def_status_blk, bp->def_status_blk_mapping,
6340 sizeof(struct host_def_status_block));
6342 BNX2X_PCI_FREE(bp->slowpath, bp->slowpath_mapping,
6343 sizeof(struct bnx2x_slowpath));
6346 BNX2X_PCI_FREE(bp->t1, bp->t1_mapping, 64*1024);
6347 BNX2X_PCI_FREE(bp->t2, bp->t2_mapping, 16*1024);
6348 BNX2X_PCI_FREE(bp->timers, bp->timers_mapping, 8*1024);
6349 BNX2X_PCI_FREE(bp->qm, bp->qm_mapping, 128*1024);
6351 BNX2X_PCI_FREE(bp->spq, bp->spq_mapping, BCM_PAGE_SIZE);
6353 #undef BNX2X_PCI_FREE
6357 static int bnx2x_alloc_mem(struct bnx2x *bp)
6360 #define BNX2X_PCI_ALLOC(x, y, size) \
6362 x = pci_alloc_consistent(bp->pdev, size, y); \
6364 goto alloc_mem_err; \
6365 memset(x, 0, size); \
6368 #define BNX2X_ALLOC(x, size) \
6370 x = vmalloc(size); \
6372 goto alloc_mem_err; \
6373 memset(x, 0, size); \
6380 for_each_queue(bp, i) {
6381 bnx2x_fp(bp, i, bp) = bp;
6384 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, status_blk),
6385 &bnx2x_fp(bp, i, status_blk_mapping),
6386 sizeof(struct host_status_block) +
6387 sizeof(struct eth_tx_db_data));
6390 for_each_rx_queue(bp, i) {
6392 /* fastpath rx rings: rx_buf rx_desc rx_comp */
6393 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_buf_ring),
6394 sizeof(struct sw_rx_bd) * NUM_RX_BD);
6395 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_desc_ring),
6396 &bnx2x_fp(bp, i, rx_desc_mapping),
6397 sizeof(struct eth_rx_bd) * NUM_RX_BD);
6399 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_comp_ring),
6400 &bnx2x_fp(bp, i, rx_comp_mapping),
6401 sizeof(struct eth_fast_path_rx_cqe) *
6405 BNX2X_ALLOC(bnx2x_fp(bp, i, rx_page_ring),
6406 sizeof(struct sw_rx_page) * NUM_RX_SGE);
6407 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, rx_sge_ring),
6408 &bnx2x_fp(bp, i, rx_sge_mapping),
6409 BCM_PAGE_SIZE * NUM_RX_SGE_PAGES);
6412 for_each_tx_queue(bp, i) {
6414 bnx2x_fp(bp, i, hw_tx_prods) =
6415 (void *)(bnx2x_fp(bp, i, status_blk) + 1);
6417 bnx2x_fp(bp, i, tx_prods_mapping) =
6418 bnx2x_fp(bp, i, status_blk_mapping) +
6419 sizeof(struct host_status_block);
6421 /* fastpath tx rings: tx_buf tx_desc */
6422 BNX2X_ALLOC(bnx2x_fp(bp, i, tx_buf_ring),
6423 sizeof(struct sw_tx_bd) * NUM_TX_BD);
6424 BNX2X_PCI_ALLOC(bnx2x_fp(bp, i, tx_desc_ring),
6425 &bnx2x_fp(bp, i, tx_desc_mapping),
6426 sizeof(struct eth_tx_bd) * NUM_TX_BD);
6428 /* end of fastpath */
6430 BNX2X_PCI_ALLOC(bp->def_status_blk, &bp->def_status_blk_mapping,
6431 sizeof(struct host_def_status_block));
6433 BNX2X_PCI_ALLOC(bp->slowpath, &bp->slowpath_mapping,
6434 sizeof(struct bnx2x_slowpath));
6437 BNX2X_PCI_ALLOC(bp->t1, &bp->t1_mapping, 64*1024);
6440 for (i = 0; i < 64*1024; i += 64) {
6441 *(u64 *)((char *)bp->t1 + i + 56) = 0x0UL;
6442 *(u64 *)((char *)bp->t1 + i + 3) = 0x0UL;
6445 /* allocate searcher T2 table
6446 we allocate 1/4 of alloc num for T2
6447 (which is not entered into the ILT) */
6448 BNX2X_PCI_ALLOC(bp->t2, &bp->t2_mapping, 16*1024);
6451 for (i = 0; i < 16*1024; i += 64)
6452 * (u64 *)((char *)bp->t2 + i + 56) = bp->t2_mapping + i + 64;
6454 /* now fixup the last line in the block to point to the next block */
6455 *(u64 *)((char *)bp->t2 + 1024*16-8) = bp->t2_mapping;
6457 /* Timer block array (MAX_CONN*8) phys uncached for now 1024 conns */
6458 BNX2X_PCI_ALLOC(bp->timers, &bp->timers_mapping, 8*1024);
6460 /* QM queues (128*MAX_CONN) */
6461 BNX2X_PCI_ALLOC(bp->qm, &bp->qm_mapping, 128*1024);
6464 /* Slow path ring */
6465 BNX2X_PCI_ALLOC(bp->spq, &bp->spq_mapping, BCM_PAGE_SIZE);
6473 #undef BNX2X_PCI_ALLOC
6477 static void bnx2x_free_tx_skbs(struct bnx2x *bp)
6481 for_each_tx_queue(bp, i) {
6482 struct bnx2x_fastpath *fp = &bp->fp[i];
6484 u16 bd_cons = fp->tx_bd_cons;
6485 u16 sw_prod = fp->tx_pkt_prod;
6486 u16 sw_cons = fp->tx_pkt_cons;
6488 while (sw_cons != sw_prod) {
6489 bd_cons = bnx2x_free_tx_pkt(bp, fp, TX_BD(sw_cons));
6495 static void bnx2x_free_rx_skbs(struct bnx2x *bp)
6499 for_each_rx_queue(bp, j) {
6500 struct bnx2x_fastpath *fp = &bp->fp[j];
6502 for (i = 0; i < NUM_RX_BD; i++) {
6503 struct sw_rx_bd *rx_buf = &fp->rx_buf_ring[i];
6504 struct sk_buff *skb = rx_buf->skb;
6509 pci_unmap_single(bp->pdev,
6510 pci_unmap_addr(rx_buf, mapping),
6511 bp->rx_buf_size, PCI_DMA_FROMDEVICE);
6516 if (!fp->disable_tpa)
6517 bnx2x_free_tpa_pool(bp, fp, CHIP_IS_E1(bp) ?
6518 ETH_MAX_AGGREGATION_QUEUES_E1 :
6519 ETH_MAX_AGGREGATION_QUEUES_E1H);
6523 static void bnx2x_free_skbs(struct bnx2x *bp)
6525 bnx2x_free_tx_skbs(bp);
6526 bnx2x_free_rx_skbs(bp);
6529 static void bnx2x_free_msix_irqs(struct bnx2x *bp)
6533 free_irq(bp->msix_table[0].vector, bp->dev);
6534 DP(NETIF_MSG_IFDOWN, "released sp irq (%d)\n",
6535 bp->msix_table[0].vector);
6537 for_each_queue(bp, i) {
6538 DP(NETIF_MSG_IFDOWN, "about to release fp #%d->%d irq "
6539 "state %x\n", i, bp->msix_table[i + offset].vector,
6540 bnx2x_fp(bp, i, state));
6542 free_irq(bp->msix_table[i + offset].vector, &bp->fp[i]);
6546 static void bnx2x_free_irq(struct bnx2x *bp)
6548 if (bp->flags & USING_MSIX_FLAG) {
6549 bnx2x_free_msix_irqs(bp);
6550 pci_disable_msix(bp->pdev);
6551 bp->flags &= ~USING_MSIX_FLAG;
6553 } else if (bp->flags & USING_MSI_FLAG) {
6554 free_irq(bp->pdev->irq, bp->dev);
6555 pci_disable_msi(bp->pdev);
6556 bp->flags &= ~USING_MSI_FLAG;
6559 free_irq(bp->pdev->irq, bp->dev);
6562 static int bnx2x_enable_msix(struct bnx2x *bp)
6564 int i, rc, offset = 1;
6567 bp->msix_table[0].entry = igu_vec;
6568 DP(NETIF_MSG_IFUP, "msix_table[0].entry = %d (slowpath)\n", igu_vec);
6570 for_each_queue(bp, i) {
6571 igu_vec = BP_L_ID(bp) + offset + i;
6572 bp->msix_table[i + offset].entry = igu_vec;
6573 DP(NETIF_MSG_IFUP, "msix_table[%d].entry = %d "
6574 "(fastpath #%u)\n", i + offset, igu_vec, i);
6577 rc = pci_enable_msix(bp->pdev, &bp->msix_table[0],
6578 BNX2X_NUM_QUEUES(bp) + offset);
6580 DP(NETIF_MSG_IFUP, "MSI-X is not attainable rc %d\n", rc);
6584 bp->flags |= USING_MSIX_FLAG;
6589 static int bnx2x_req_msix_irqs(struct bnx2x *bp)
6591 int i, rc, offset = 1;
6593 rc = request_irq(bp->msix_table[0].vector, bnx2x_msix_sp_int, 0,
6594 bp->dev->name, bp->dev);
6596 BNX2X_ERR("request sp irq failed\n");
6600 for_each_queue(bp, i) {
6601 struct bnx2x_fastpath *fp = &bp->fp[i];
6603 sprintf(fp->name, "%s.fp%d", bp->dev->name, i);
6604 rc = request_irq(bp->msix_table[i + offset].vector,
6605 bnx2x_msix_fp_int, 0, fp->name, fp);
6607 BNX2X_ERR("request fp #%d irq failed rc %d\n", i, rc);
6608 bnx2x_free_msix_irqs(bp);
6612 fp->state = BNX2X_FP_STATE_IRQ;
6615 i = BNX2X_NUM_QUEUES(bp);
6617 printk(KERN_INFO PFX
6618 "%s: using MSI-X IRQs: sp %d fp %d - %d\n",
6619 bp->dev->name, bp->msix_table[0].vector,
6620 bp->msix_table[offset].vector,
6621 bp->msix_table[offset + i - 1].vector);
6623 printk(KERN_INFO PFX "%s: using MSI-X IRQs: sp %d fp %d\n",
6624 bp->dev->name, bp->msix_table[0].vector,
6625 bp->msix_table[offset + i - 1].vector);
6630 static int bnx2x_enable_msi(struct bnx2x *bp)
6634 rc = pci_enable_msi(bp->pdev);
6636 DP(NETIF_MSG_IFUP, "MSI is not attainable\n");
6639 bp->flags |= USING_MSI_FLAG;
6644 static int bnx2x_req_irq(struct bnx2x *bp)
6646 unsigned long flags;
6649 if (bp->flags & USING_MSI_FLAG)
6652 flags = IRQF_SHARED;
6654 rc = request_irq(bp->pdev->irq, bnx2x_interrupt, flags,
6655 bp->dev->name, bp->dev);
6657 bnx2x_fp(bp, 0, state) = BNX2X_FP_STATE_IRQ;
6662 static void bnx2x_napi_enable(struct bnx2x *bp)
6666 for_each_rx_queue(bp, i)
6667 napi_enable(&bnx2x_fp(bp, i, napi));
6670 static void bnx2x_napi_disable(struct bnx2x *bp)
6674 for_each_rx_queue(bp, i)
6675 napi_disable(&bnx2x_fp(bp, i, napi));
6678 static void bnx2x_netif_start(struct bnx2x *bp)
6682 intr_sem = atomic_dec_and_test(&bp->intr_sem);
6683 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
6686 if (netif_running(bp->dev)) {
6687 bnx2x_napi_enable(bp);
6688 bnx2x_int_enable(bp);
6689 if (bp->state == BNX2X_STATE_OPEN)
6690 netif_tx_wake_all_queues(bp->dev);
6695 static void bnx2x_netif_stop(struct bnx2x *bp, int disable_hw)
6697 bnx2x_int_disable_sync(bp, disable_hw);
6698 bnx2x_napi_disable(bp);
6699 netif_tx_disable(bp->dev);
6700 bp->dev->trans_start = jiffies; /* prevent tx timeout */
6704 * Init service functions
6707 static void bnx2x_set_mac_addr_e1(struct bnx2x *bp, int set)
6709 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
6710 int port = BP_PORT(bp);
6713 * unicasts 0-31:port0 32-63:port1
6714 * multicast 64-127:port0 128-191:port1
6716 config->hdr.length = 2;
6717 config->hdr.offset = port ? 32 : 0;
6718 config->hdr.client_id = bp->fp->cl_id;
6719 config->hdr.reserved1 = 0;
6722 config->config_table[0].cam_entry.msb_mac_addr =
6723 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6724 config->config_table[0].cam_entry.middle_mac_addr =
6725 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6726 config->config_table[0].cam_entry.lsb_mac_addr =
6727 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6728 config->config_table[0].cam_entry.flags = cpu_to_le16(port);
6730 config->config_table[0].target_table_entry.flags = 0;
6732 CAM_INVALIDATE(config->config_table[0]);
6733 config->config_table[0].target_table_entry.client_id = 0;
6734 config->config_table[0].target_table_entry.vlan_id = 0;
6736 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x)\n",
6737 (set ? "setting" : "clearing"),
6738 config->config_table[0].cam_entry.msb_mac_addr,
6739 config->config_table[0].cam_entry.middle_mac_addr,
6740 config->config_table[0].cam_entry.lsb_mac_addr);
6743 config->config_table[1].cam_entry.msb_mac_addr = cpu_to_le16(0xffff);
6744 config->config_table[1].cam_entry.middle_mac_addr = cpu_to_le16(0xffff);
6745 config->config_table[1].cam_entry.lsb_mac_addr = cpu_to_le16(0xffff);
6746 config->config_table[1].cam_entry.flags = cpu_to_le16(port);
6748 config->config_table[1].target_table_entry.flags =
6749 TSTORM_CAM_TARGET_TABLE_ENTRY_BROADCAST;
6751 CAM_INVALIDATE(config->config_table[1]);
6752 config->config_table[1].target_table_entry.client_id = 0;
6753 config->config_table[1].target_table_entry.vlan_id = 0;
6755 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6756 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6757 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6760 static void bnx2x_set_mac_addr_e1h(struct bnx2x *bp, int set)
6762 struct mac_configuration_cmd_e1h *config =
6763 (struct mac_configuration_cmd_e1h *)bnx2x_sp(bp, mac_config);
6765 if (set && (bp->state != BNX2X_STATE_OPEN)) {
6766 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
6770 /* CAM allocation for E1H
6771 * unicasts: by func number
6772 * multicast: 20+FUNC*20, 20 each
6774 config->hdr.length = 1;
6775 config->hdr.offset = BP_FUNC(bp);
6776 config->hdr.client_id = bp->fp->cl_id;
6777 config->hdr.reserved1 = 0;
6780 config->config_table[0].msb_mac_addr =
6781 swab16(*(u16 *)&bp->dev->dev_addr[0]);
6782 config->config_table[0].middle_mac_addr =
6783 swab16(*(u16 *)&bp->dev->dev_addr[2]);
6784 config->config_table[0].lsb_mac_addr =
6785 swab16(*(u16 *)&bp->dev->dev_addr[4]);
6786 config->config_table[0].client_id = BP_L_ID(bp);
6787 config->config_table[0].vlan_id = 0;
6788 config->config_table[0].e1hov_id = cpu_to_le16(bp->e1hov);
6790 config->config_table[0].flags = BP_PORT(bp);
6792 config->config_table[0].flags =
6793 MAC_CONFIGURATION_ENTRY_E1H_ACTION_TYPE;
6795 DP(NETIF_MSG_IFUP, "%s MAC (%04x:%04x:%04x) E1HOV %d CLID %d\n",
6796 (set ? "setting" : "clearing"),
6797 config->config_table[0].msb_mac_addr,
6798 config->config_table[0].middle_mac_addr,
6799 config->config_table[0].lsb_mac_addr, bp->e1hov, BP_L_ID(bp));
6801 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
6802 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
6803 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
6806 static int bnx2x_wait_ramrod(struct bnx2x *bp, int state, int idx,
6807 int *state_p, int poll)
6809 /* can take a while if any port is running */
6812 DP(NETIF_MSG_IFUP, "%s for state to become %x on IDX [%d]\n",
6813 poll ? "polling" : "waiting", state, idx);
6818 bnx2x_rx_int(bp->fp, 10);
6819 /* if index is different from 0
6820 * the reply for some commands will
6821 * be on the non default queue
6824 bnx2x_rx_int(&bp->fp[idx], 10);
6827 mb(); /* state is changed by bnx2x_sp_event() */
6828 if (*state_p == state) {
6829 #ifdef BNX2X_STOP_ON_ERROR
6830 DP(NETIF_MSG_IFUP, "exit (cnt %d)\n", 5000 - cnt);
6839 BNX2X_ERR("timeout %s for state %x on IDX [%d]\n",
6840 poll ? "polling" : "waiting", state, idx);
6841 #ifdef BNX2X_STOP_ON_ERROR
6848 static int bnx2x_setup_leading(struct bnx2x *bp)
6852 /* reset IGU state */
6853 bnx2x_ack_sb(bp, bp->fp[0].sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6856 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_SETUP, 0, 0, 0, 0);
6858 /* Wait for completion */
6859 rc = bnx2x_wait_ramrod(bp, BNX2X_STATE_OPEN, 0, &(bp->state), 0);
6864 static int bnx2x_setup_multi(struct bnx2x *bp, int index)
6866 struct bnx2x_fastpath *fp = &bp->fp[index];
6868 /* reset IGU state */
6869 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID, 0, IGU_INT_ENABLE, 0);
6872 fp->state = BNX2X_FP_STATE_OPENING;
6873 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CLIENT_SETUP, index, 0,
6876 /* Wait for completion */
6877 return bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_OPEN, index,
6881 static int bnx2x_poll(struct napi_struct *napi, int budget);
6883 static void bnx2x_set_int_mode(struct bnx2x *bp)
6891 bp->num_rx_queues = num_queues;
6892 bp->num_tx_queues = num_queues;
6894 "set number of queues to %d\n", num_queues);
6899 if (bp->multi_mode == ETH_RSS_MODE_REGULAR)
6900 num_queues = min_t(u32, num_online_cpus(),
6901 BNX2X_MAX_QUEUES(bp));
6904 bp->num_rx_queues = num_queues;
6905 bp->num_tx_queues = num_queues;
6906 DP(NETIF_MSG_IFUP, "set number of rx queues to %d"
6907 " number of tx queues to %d\n",
6908 bp->num_rx_queues, bp->num_tx_queues);
6909 /* if we can't use MSI-X we only need one fp,
6910 * so try to enable MSI-X with the requested number of fp's
6911 * and fallback to MSI or legacy INTx with one fp
6913 if (bnx2x_enable_msix(bp)) {
6914 /* failed to enable MSI-X */
6916 bp->num_rx_queues = num_queues;
6917 bp->num_tx_queues = num_queues;
6919 BNX2X_ERR("Multi requested but failed to "
6920 "enable MSI-X set number of "
6921 "queues to %d\n", num_queues);
6925 bp->dev->real_num_tx_queues = bp->num_tx_queues;
6928 static void bnx2x_set_rx_mode(struct net_device *dev);
6930 /* must be called with rtnl_lock */
6931 static int bnx2x_nic_load(struct bnx2x *bp, int load_mode)
6935 #ifdef BNX2X_STOP_ON_ERROR
6936 DP(NETIF_MSG_IFUP, "enter load_mode %d\n", load_mode);
6937 if (unlikely(bp->panic))
6941 bp->state = BNX2X_STATE_OPENING_WAIT4_LOAD;
6943 bnx2x_set_int_mode(bp);
6945 if (bnx2x_alloc_mem(bp))
6948 for_each_rx_queue(bp, i)
6949 bnx2x_fp(bp, i, disable_tpa) =
6950 ((bp->flags & TPA_ENABLE_FLAG) == 0);
6952 for_each_rx_queue(bp, i)
6953 netif_napi_add(bp->dev, &bnx2x_fp(bp, i, napi),
6956 #ifdef BNX2X_STOP_ON_ERROR
6957 for_each_rx_queue(bp, i) {
6958 struct bnx2x_fastpath *fp = &bp->fp[i];
6960 fp->poll_no_work = 0;
6962 fp->poll_max_calls = 0;
6963 fp->poll_complete = 0;
6967 bnx2x_napi_enable(bp);
6969 if (bp->flags & USING_MSIX_FLAG) {
6970 rc = bnx2x_req_msix_irqs(bp);
6972 pci_disable_msix(bp->pdev);
6976 if ((rc != -ENOMEM) && (int_mode != INT_MODE_INTx))
6977 bnx2x_enable_msi(bp);
6979 rc = bnx2x_req_irq(bp);
6981 BNX2X_ERR("IRQ request failed rc %d, aborting\n", rc);
6982 if (bp->flags & USING_MSI_FLAG)
6983 pci_disable_msi(bp->pdev);
6986 if (bp->flags & USING_MSI_FLAG) {
6987 bp->dev->irq = bp->pdev->irq;
6988 printk(KERN_INFO PFX "%s: using MSI IRQ %d\n",
6989 bp->dev->name, bp->pdev->irq);
6993 /* Send LOAD_REQUEST command to MCP
6994 Returns the type of LOAD command:
6995 if it is the first port to be initialized
6996 common blocks should be initialized, otherwise - not
6998 if (!BP_NOMCP(bp)) {
6999 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_REQ);
7001 BNX2X_ERR("MCP response failure, aborting\n");
7005 if (load_code == FW_MSG_CODE_DRV_LOAD_REFUSED) {
7006 rc = -EBUSY; /* other port in diagnostic mode */
7011 int port = BP_PORT(bp);
7013 DP(NETIF_MSG_IFUP, "NO MCP - load counts %d, %d, %d\n",
7014 load_count[0], load_count[1], load_count[2]);
7016 load_count[1 + port]++;
7017 DP(NETIF_MSG_IFUP, "NO MCP - new load counts %d, %d, %d\n",
7018 load_count[0], load_count[1], load_count[2]);
7019 if (load_count[0] == 1)
7020 load_code = FW_MSG_CODE_DRV_LOAD_COMMON;
7021 else if (load_count[1 + port] == 1)
7022 load_code = FW_MSG_CODE_DRV_LOAD_PORT;
7024 load_code = FW_MSG_CODE_DRV_LOAD_FUNCTION;
7027 if ((load_code == FW_MSG_CODE_DRV_LOAD_COMMON) ||
7028 (load_code == FW_MSG_CODE_DRV_LOAD_PORT))
7032 DP(NETIF_MSG_LINK, "pmf %d\n", bp->port.pmf);
7035 rc = bnx2x_init_hw(bp, load_code);
7037 BNX2X_ERR("HW init failed, aborting\n");
7041 /* Setup NIC internals and enable interrupts */
7042 bnx2x_nic_init(bp, load_code);
7044 /* Send LOAD_DONE command to MCP */
7045 if (!BP_NOMCP(bp)) {
7046 load_code = bnx2x_fw_command(bp, DRV_MSG_CODE_LOAD_DONE);
7048 BNX2X_ERR("MCP response failure, aborting\n");
7054 bp->state = BNX2X_STATE_OPENING_WAIT4_PORT;
7056 rc = bnx2x_setup_leading(bp);
7058 BNX2X_ERR("Setup leading failed!\n");
7062 if (CHIP_IS_E1H(bp))
7063 if (bp->mf_config & FUNC_MF_CFG_FUNC_DISABLED) {
7064 DP(NETIF_MSG_IFUP, "mf_cfg function disabled\n");
7065 bp->state = BNX2X_STATE_DISABLED;
7068 if (bp->state == BNX2X_STATE_OPEN)
7069 for_each_nondefault_queue(bp, i) {
7070 rc = bnx2x_setup_multi(bp, i);
7076 bnx2x_set_mac_addr_e1(bp, 1);
7078 bnx2x_set_mac_addr_e1h(bp, 1);
7081 bnx2x_initial_phy_init(bp, load_mode);
7083 /* Start fast path */
7084 switch (load_mode) {
7086 /* Tx queue should be only reenabled */
7087 netif_tx_wake_all_queues(bp->dev);
7088 /* Initialize the receive filter. */
7089 bnx2x_set_rx_mode(bp->dev);
7093 netif_tx_start_all_queues(bp->dev);
7094 /* Initialize the receive filter. */
7095 bnx2x_set_rx_mode(bp->dev);
7099 /* Initialize the receive filter. */
7100 bnx2x_set_rx_mode(bp->dev);
7101 bp->state = BNX2X_STATE_DIAG;
7109 bnx2x__link_status_update(bp);
7111 /* start the timer */
7112 mod_timer(&bp->timer, jiffies + bp->current_interval);
7118 bnx2x_int_disable_sync(bp, 1);
7119 if (!BP_NOMCP(bp)) {
7120 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP);
7121 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7124 /* Free SKBs, SGEs, TPA pool and driver internals */
7125 bnx2x_free_skbs(bp);
7126 for_each_rx_queue(bp, i)
7127 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7132 bnx2x_napi_disable(bp);
7133 for_each_rx_queue(bp, i)
7134 netif_napi_del(&bnx2x_fp(bp, i, napi));
7140 static int bnx2x_stop_multi(struct bnx2x *bp, int index)
7142 struct bnx2x_fastpath *fp = &bp->fp[index];
7145 /* halt the connection */
7146 fp->state = BNX2X_FP_STATE_HALTING;
7147 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, index, 0, fp->cl_id, 0);
7149 /* Wait for completion */
7150 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, index,
7152 if (rc) /* timeout */
7155 /* delete cfc entry */
7156 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_CFC_DEL, index, 0, 0, 1);
7158 /* Wait for completion */
7159 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_CLOSED, index,
7164 static int bnx2x_stop_leading(struct bnx2x *bp)
7166 __le16 dsb_sp_prod_idx;
7167 /* if the other port is handling traffic,
7168 this can take a lot of time */
7174 /* Send HALT ramrod */
7175 bp->fp[0].state = BNX2X_FP_STATE_HALTING;
7176 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_HALT, 0, 0, bp->fp->cl_id, 0);
7178 /* Wait for completion */
7179 rc = bnx2x_wait_ramrod(bp, BNX2X_FP_STATE_HALTED, 0,
7180 &(bp->fp[0].state), 1);
7181 if (rc) /* timeout */
7184 dsb_sp_prod_idx = *bp->dsb_sp_prod;
7186 /* Send PORT_DELETE ramrod */
7187 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_PORT_DEL, 0, 0, 0, 1);
7189 /* Wait for completion to arrive on default status block
7190 we are going to reset the chip anyway
7191 so there is not much to do if this times out
7193 while (dsb_sp_prod_idx == *bp->dsb_sp_prod) {
7195 DP(NETIF_MSG_IFDOWN, "timeout waiting for port del "
7196 "dsb_sp_prod 0x%x != dsb_sp_prod_idx 0x%x\n",
7197 *bp->dsb_sp_prod, dsb_sp_prod_idx);
7198 #ifdef BNX2X_STOP_ON_ERROR
7206 rmb(); /* Refresh the dsb_sp_prod */
7208 bp->state = BNX2X_STATE_CLOSING_WAIT4_UNLOAD;
7209 bp->fp[0].state = BNX2X_FP_STATE_CLOSED;
7214 static void bnx2x_reset_func(struct bnx2x *bp)
7216 int port = BP_PORT(bp);
7217 int func = BP_FUNC(bp);
7221 REG_WR(bp, HC_REG_LEADING_EDGE_0 + port*8, 0);
7222 REG_WR(bp, HC_REG_TRAILING_EDGE_0 + port*8, 0);
7225 base = FUNC_ILT_BASE(func);
7226 for (i = base; i < base + ILT_PER_FUNC; i++)
7227 bnx2x_ilt_wr(bp, i, 0);
7230 static void bnx2x_reset_port(struct bnx2x *bp)
7232 int port = BP_PORT(bp);
7235 REG_WR(bp, NIG_REG_MASK_INTERRUPT_PORT0 + port*4, 0);
7237 /* Do not rcv packets to BRB */
7238 REG_WR(bp, NIG_REG_LLH0_BRB1_DRV_MASK + port*4, 0x0);
7239 /* Do not direct rcv packets that are not for MCP to the BRB */
7240 REG_WR(bp, (port ? NIG_REG_LLH1_BRB1_NOT_MCP :
7241 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7244 REG_WR(bp, MISC_REG_AEU_MASK_ATTN_FUNC_0 + port*4, 0);
7247 /* Check for BRB port occupancy */
7248 val = REG_RD(bp, BRB1_REG_PORT_NUM_OCC_BLOCKS_0 + port*4);
7250 DP(NETIF_MSG_IFDOWN,
7251 "BRB1 is not empty %d blocks are occupied\n", val);
7253 /* TODO: Close Doorbell port? */
7256 static void bnx2x_reset_chip(struct bnx2x *bp, u32 reset_code)
7258 DP(BNX2X_MSG_MCP, "function %d reset_code %x\n",
7259 BP_FUNC(bp), reset_code);
7261 switch (reset_code) {
7262 case FW_MSG_CODE_DRV_UNLOAD_COMMON:
7263 bnx2x_reset_port(bp);
7264 bnx2x_reset_func(bp);
7265 bnx2x_reset_common(bp);
7268 case FW_MSG_CODE_DRV_UNLOAD_PORT:
7269 bnx2x_reset_port(bp);
7270 bnx2x_reset_func(bp);
7273 case FW_MSG_CODE_DRV_UNLOAD_FUNCTION:
7274 bnx2x_reset_func(bp);
7278 BNX2X_ERR("Unknown reset_code (0x%x) from MCP\n", reset_code);
7283 /* must be called with rtnl_lock */
7284 static int bnx2x_nic_unload(struct bnx2x *bp, int unload_mode)
7286 int port = BP_PORT(bp);
7290 bp->state = BNX2X_STATE_CLOSING_WAIT4_HALT;
7292 bp->rx_mode = BNX2X_RX_MODE_NONE;
7293 bnx2x_set_storm_rx_mode(bp);
7295 bnx2x_netif_stop(bp, 1);
7297 del_timer_sync(&bp->timer);
7298 SHMEM_WR(bp, func_mb[BP_FUNC(bp)].drv_pulse_mb,
7299 (DRV_PULSE_ALWAYS_ALIVE | bp->fw_drv_pulse_wr_seq));
7300 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
7305 /* Wait until tx fastpath tasks complete */
7306 for_each_tx_queue(bp, i) {
7307 struct bnx2x_fastpath *fp = &bp->fp[i];
7310 while (bnx2x_has_tx_work_unload(fp)) {
7314 BNX2X_ERR("timeout waiting for queue[%d]\n",
7316 #ifdef BNX2X_STOP_ON_ERROR
7327 /* Give HW time to discard old tx messages */
7330 if (CHIP_IS_E1(bp)) {
7331 struct mac_configuration_cmd *config =
7332 bnx2x_sp(bp, mcast_config);
7334 bnx2x_set_mac_addr_e1(bp, 0);
7336 for (i = 0; i < config->hdr.length; i++)
7337 CAM_INVALIDATE(config->config_table[i]);
7339 config->hdr.length = i;
7340 if (CHIP_REV_IS_SLOW(bp))
7341 config->hdr.offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
7343 config->hdr.offset = BNX2X_MAX_MULTICAST*(1 + port);
7344 config->hdr.client_id = bp->fp->cl_id;
7345 config->hdr.reserved1 = 0;
7347 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
7348 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
7349 U64_LO(bnx2x_sp_mapping(bp, mcast_config)), 0);
7352 REG_WR(bp, NIG_REG_LLH0_FUNC_EN + port*8, 0);
7354 bnx2x_set_mac_addr_e1h(bp, 0);
7356 for (i = 0; i < MC_HASH_SIZE; i++)
7357 REG_WR(bp, MC_HASH_OFFSET(bp, i), 0);
7359 REG_WR(bp, MISC_REG_E1HMF_MODE, 0);
7362 if (unload_mode == UNLOAD_NORMAL)
7363 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7365 else if (bp->flags & NO_WOL_FLAG)
7366 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_MCP;
7369 u32 emac_base = port ? GRCBASE_EMAC1 : GRCBASE_EMAC0;
7370 u8 *mac_addr = bp->dev->dev_addr;
7372 /* The mac address is written to entries 1-4 to
7373 preserve entry 0 which is used by the PMF */
7374 u8 entry = (BP_E1HVN(bp) + 1)*8;
7376 val = (mac_addr[0] << 8) | mac_addr[1];
7377 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry, val);
7379 val = (mac_addr[2] << 24) | (mac_addr[3] << 16) |
7380 (mac_addr[4] << 8) | mac_addr[5];
7381 EMAC_WR(bp, EMAC_REG_EMAC_MAC_MATCH + entry + 4, val);
7383 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_EN;
7386 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7388 /* Close multi and leading connections
7389 Completions for ramrods are collected in a synchronous way */
7390 for_each_nondefault_queue(bp, i)
7391 if (bnx2x_stop_multi(bp, i))
7394 rc = bnx2x_stop_leading(bp);
7396 BNX2X_ERR("Stop leading failed!\n");
7397 #ifdef BNX2X_STOP_ON_ERROR
7406 reset_code = bnx2x_fw_command(bp, reset_code);
7408 DP(NETIF_MSG_IFDOWN, "NO MCP - load counts %d, %d, %d\n",
7409 load_count[0], load_count[1], load_count[2]);
7411 load_count[1 + port]--;
7412 DP(NETIF_MSG_IFDOWN, "NO MCP - new load counts %d, %d, %d\n",
7413 load_count[0], load_count[1], load_count[2]);
7414 if (load_count[0] == 0)
7415 reset_code = FW_MSG_CODE_DRV_UNLOAD_COMMON;
7416 else if (load_count[1 + port] == 0)
7417 reset_code = FW_MSG_CODE_DRV_UNLOAD_PORT;
7419 reset_code = FW_MSG_CODE_DRV_UNLOAD_FUNCTION;
7422 if ((reset_code == FW_MSG_CODE_DRV_UNLOAD_COMMON) ||
7423 (reset_code == FW_MSG_CODE_DRV_UNLOAD_PORT))
7424 bnx2x__link_reset(bp);
7426 /* Reset the chip */
7427 bnx2x_reset_chip(bp, reset_code);
7429 /* Report UNLOAD_DONE to MCP */
7431 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7435 /* Free SKBs, SGEs, TPA pool and driver internals */
7436 bnx2x_free_skbs(bp);
7437 for_each_rx_queue(bp, i)
7438 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
7439 for_each_rx_queue(bp, i)
7440 netif_napi_del(&bnx2x_fp(bp, i, napi));
7443 bp->state = BNX2X_STATE_CLOSED;
7445 netif_carrier_off(bp->dev);
7450 static void bnx2x_reset_task(struct work_struct *work)
7452 struct bnx2x *bp = container_of(work, struct bnx2x, reset_task);
7454 #ifdef BNX2X_STOP_ON_ERROR
7455 BNX2X_ERR("reset task called but STOP_ON_ERROR defined"
7456 " so reset not done to allow debug dump,\n"
7457 " you will need to reboot when done\n");
7463 if (!netif_running(bp->dev))
7464 goto reset_task_exit;
7466 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
7467 bnx2x_nic_load(bp, LOAD_NORMAL);
7473 /* end of nic load/unload */
7478 * Init service functions
7481 static inline u32 bnx2x_get_pretend_reg(struct bnx2x *bp, int func)
7484 case 0: return PXP2_REG_PGL_PRETEND_FUNC_F0;
7485 case 1: return PXP2_REG_PGL_PRETEND_FUNC_F1;
7486 case 2: return PXP2_REG_PGL_PRETEND_FUNC_F2;
7487 case 3: return PXP2_REG_PGL_PRETEND_FUNC_F3;
7488 case 4: return PXP2_REG_PGL_PRETEND_FUNC_F4;
7489 case 5: return PXP2_REG_PGL_PRETEND_FUNC_F5;
7490 case 6: return PXP2_REG_PGL_PRETEND_FUNC_F6;
7491 case 7: return PXP2_REG_PGL_PRETEND_FUNC_F7;
7493 BNX2X_ERR("Unsupported function index: %d\n", func);
7498 static void bnx2x_undi_int_disable_e1h(struct bnx2x *bp, int orig_func)
7500 u32 reg = bnx2x_get_pretend_reg(bp, orig_func), new_val;
7502 /* Flush all outstanding writes */
7505 /* Pretend to be function 0 */
7507 /* Flush the GRC transaction (in the chip) */
7508 new_val = REG_RD(bp, reg);
7510 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (0,%d)!\n",
7515 /* From now we are in the "like-E1" mode */
7516 bnx2x_int_disable(bp);
7518 /* Flush all outstanding writes */
7521 /* Restore the original funtion settings */
7522 REG_WR(bp, reg, orig_func);
7523 new_val = REG_RD(bp, reg);
7524 if (new_val != orig_func) {
7525 BNX2X_ERR("Hmmm... Pretend register wasn't updated: (%d,%d)!\n",
7526 orig_func, new_val);
7531 static inline void bnx2x_undi_int_disable(struct bnx2x *bp, int func)
7533 if (CHIP_IS_E1H(bp))
7534 bnx2x_undi_int_disable_e1h(bp, func);
7536 bnx2x_int_disable(bp);
7539 static void __devinit bnx2x_undi_unload(struct bnx2x *bp)
7543 /* Check if there is any driver already loaded */
7544 val = REG_RD(bp, MISC_REG_UNPREPARED);
7546 /* Check if it is the UNDI driver
7547 * UNDI driver initializes CID offset for normal bell to 0x7
7549 bnx2x_acquire_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7550 val = REG_RD(bp, DORQ_REG_NORM_CID_OFST);
7552 u32 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7554 int func = BP_FUNC(bp);
7558 /* clear the UNDI indication */
7559 REG_WR(bp, DORQ_REG_NORM_CID_OFST, 0);
7561 BNX2X_DEV_INFO("UNDI is active! reset device\n");
7563 /* try unload UNDI on port 0 */
7566 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7567 DRV_MSG_SEQ_NUMBER_MASK);
7568 reset_code = bnx2x_fw_command(bp, reset_code);
7570 /* if UNDI is loaded on the other port */
7571 if (reset_code != FW_MSG_CODE_DRV_UNLOAD_COMMON) {
7573 /* send "DONE" for previous unload */
7574 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7576 /* unload UNDI on port 1 */
7579 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7580 DRV_MSG_SEQ_NUMBER_MASK);
7581 reset_code = DRV_MSG_CODE_UNLOAD_REQ_WOL_DIS;
7583 bnx2x_fw_command(bp, reset_code);
7586 /* now it's safe to release the lock */
7587 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7589 bnx2x_undi_int_disable(bp, func);
7591 /* close input traffic and wait for it */
7592 /* Do not rcv packets to BRB */
7594 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_DRV_MASK :
7595 NIG_REG_LLH0_BRB1_DRV_MASK), 0x0);
7596 /* Do not direct rcv packets that are not for MCP to
7599 (BP_PORT(bp) ? NIG_REG_LLH1_BRB1_NOT_MCP :
7600 NIG_REG_LLH0_BRB1_NOT_MCP), 0x0);
7603 (BP_PORT(bp) ? MISC_REG_AEU_MASK_ATTN_FUNC_1 :
7604 MISC_REG_AEU_MASK_ATTN_FUNC_0), 0);
7607 /* save NIG port swap info */
7608 swap_val = REG_RD(bp, NIG_REG_PORT_SWAP);
7609 swap_en = REG_RD(bp, NIG_REG_STRAP_OVERRIDE);
7612 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_CLEAR,
7615 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_2_CLEAR,
7617 /* take the NIG out of reset and restore swap values */
7619 GRCBASE_MISC + MISC_REGISTERS_RESET_REG_1_SET,
7620 MISC_REGISTERS_RESET_REG_1_RST_NIG);
7621 REG_WR(bp, NIG_REG_PORT_SWAP, swap_val);
7622 REG_WR(bp, NIG_REG_STRAP_OVERRIDE, swap_en);
7624 /* send unload done to the MCP */
7625 bnx2x_fw_command(bp, DRV_MSG_CODE_UNLOAD_DONE);
7627 /* restore our func and fw_seq */
7630 (SHMEM_RD(bp, func_mb[bp->func].drv_mb_header) &
7631 DRV_MSG_SEQ_NUMBER_MASK);
7634 bnx2x_release_hw_lock(bp, HW_LOCK_RESOURCE_UNDI);
7638 static void __devinit bnx2x_get_common_hwinfo(struct bnx2x *bp)
7640 u32 val, val2, val3, val4, id;
7643 /* Get the chip revision id and number. */
7644 /* chip num:16-31, rev:12-15, metal:4-11, bond_id:0-3 */
7645 val = REG_RD(bp, MISC_REG_CHIP_NUM);
7646 id = ((val & 0xffff) << 16);
7647 val = REG_RD(bp, MISC_REG_CHIP_REV);
7648 id |= ((val & 0xf) << 12);
7649 val = REG_RD(bp, MISC_REG_CHIP_METAL);
7650 id |= ((val & 0xff) << 4);
7651 val = REG_RD(bp, MISC_REG_BOND_ID);
7653 bp->common.chip_id = id;
7654 bp->link_params.chip_id = bp->common.chip_id;
7655 BNX2X_DEV_INFO("chip ID is 0x%x\n", id);
7657 val = (REG_RD(bp, 0x2874) & 0x55);
7658 if ((bp->common.chip_id & 0x1) ||
7659 (CHIP_IS_E1(bp) && val) || (CHIP_IS_E1H(bp) && (val == 0x55))) {
7660 bp->flags |= ONE_PORT_FLAG;
7661 BNX2X_DEV_INFO("single port device\n");
7664 val = REG_RD(bp, MCP_REG_MCPR_NVM_CFG4);
7665 bp->common.flash_size = (NVRAM_1MB_SIZE <<
7666 (val & MCPR_NVM_CFG4_FLASH_SIZE));
7667 BNX2X_DEV_INFO("flash_size 0x%x (%d)\n",
7668 bp->common.flash_size, bp->common.flash_size);
7670 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
7671 bp->link_params.shmem_base = bp->common.shmem_base;
7672 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
7674 if (!bp->common.shmem_base ||
7675 (bp->common.shmem_base < 0xA0000) ||
7676 (bp->common.shmem_base >= 0xC0000)) {
7677 BNX2X_DEV_INFO("MCP not active\n");
7678 bp->flags |= NO_MCP_FLAG;
7682 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
7683 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7684 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
7685 BNX2X_ERR("BAD MCP validity signature\n");
7687 bp->common.hw_config = SHMEM_RD(bp, dev_info.shared_hw_config.config);
7688 BNX2X_DEV_INFO("hw_config 0x%08x\n", bp->common.hw_config);
7690 bp->link_params.hw_led_mode = ((bp->common.hw_config &
7691 SHARED_HW_CFG_LED_MODE_MASK) >>
7692 SHARED_HW_CFG_LED_MODE_SHIFT);
7694 bp->link_params.feature_config_flags = 0;
7695 val = SHMEM_RD(bp, dev_info.shared_feature_config.config);
7696 if (val & SHARED_FEAT_CFG_OVERRIDE_PREEMPHASIS_CFG_ENABLED)
7697 bp->link_params.feature_config_flags |=
7698 FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7700 bp->link_params.feature_config_flags &=
7701 ~FEATURE_CONFIG_OVERRIDE_PREEMPHASIS_ENABLED;
7703 val = SHMEM_RD(bp, dev_info.bc_rev) >> 8;
7704 bp->common.bc_ver = val;
7705 BNX2X_DEV_INFO("bc_ver %X\n", val);
7706 if (val < BNX2X_BC_VER) {
7707 /* for now only warn
7708 * later we might need to enforce this */
7709 BNX2X_ERR("This driver needs bc_ver %X but found %X,"
7710 " please upgrade BC\n", BNX2X_BC_VER, val);
7712 bp->link_params.feature_config_flags |=
7713 (val >= REQ_BC_VER_4_VRFY_OPT_MDL) ?
7714 FEATURE_CONFIG_BC_SUPPORTS_OPT_MDL_VRFY : 0;
7716 if (BP_E1HVN(bp) == 0) {
7717 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_PMC, &pmc);
7718 bp->flags |= (pmc & PCI_PM_CAP_PME_D3cold) ? 0 : NO_WOL_FLAG;
7720 /* no WOL capability for E1HVN != 0 */
7721 bp->flags |= NO_WOL_FLAG;
7723 BNX2X_DEV_INFO("%sWoL capable\n",
7724 (bp->flags & NO_WOL_FLAG) ? "not " : "");
7726 val = SHMEM_RD(bp, dev_info.shared_hw_config.part_num);
7727 val2 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[4]);
7728 val3 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[8]);
7729 val4 = SHMEM_RD(bp, dev_info.shared_hw_config.part_num[12]);
7731 printk(KERN_INFO PFX "part number %X-%X-%X-%X\n",
7732 val, val2, val3, val4);
7735 static void __devinit bnx2x_link_settings_supported(struct bnx2x *bp,
7738 int port = BP_PORT(bp);
7741 switch (switch_cfg) {
7743 BNX2X_DEV_INFO("switch_cfg 0x%x (1G)\n", switch_cfg);
7746 SERDES_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7747 switch (ext_phy_type) {
7748 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_DIRECT:
7749 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7752 bp->port.supported |= (SUPPORTED_10baseT_Half |
7753 SUPPORTED_10baseT_Full |
7754 SUPPORTED_100baseT_Half |
7755 SUPPORTED_100baseT_Full |
7756 SUPPORTED_1000baseT_Full |
7757 SUPPORTED_2500baseX_Full |
7762 SUPPORTED_Asym_Pause);
7765 case PORT_HW_CFG_SERDES_EXT_PHY_TYPE_BCM5482:
7766 BNX2X_DEV_INFO("ext_phy_type 0x%x (5482)\n",
7769 bp->port.supported |= (SUPPORTED_10baseT_Half |
7770 SUPPORTED_10baseT_Full |
7771 SUPPORTED_100baseT_Half |
7772 SUPPORTED_100baseT_Full |
7773 SUPPORTED_1000baseT_Full |
7778 SUPPORTED_Asym_Pause);
7782 BNX2X_ERR("NVRAM config error. "
7783 "BAD SerDes ext_phy_config 0x%x\n",
7784 bp->link_params.ext_phy_config);
7788 bp->port.phy_addr = REG_RD(bp, NIG_REG_SERDES0_CTRL_PHY_ADDR +
7790 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7793 case SWITCH_CFG_10G:
7794 BNX2X_DEV_INFO("switch_cfg 0x%x (10G)\n", switch_cfg);
7797 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7798 switch (ext_phy_type) {
7799 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
7800 BNX2X_DEV_INFO("ext_phy_type 0x%x (Direct)\n",
7803 bp->port.supported |= (SUPPORTED_10baseT_Half |
7804 SUPPORTED_10baseT_Full |
7805 SUPPORTED_100baseT_Half |
7806 SUPPORTED_100baseT_Full |
7807 SUPPORTED_1000baseT_Full |
7808 SUPPORTED_2500baseX_Full |
7809 SUPPORTED_10000baseT_Full |
7814 SUPPORTED_Asym_Pause);
7817 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
7818 BNX2X_DEV_INFO("ext_phy_type 0x%x (8072)\n",
7821 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7822 SUPPORTED_1000baseT_Full |
7826 SUPPORTED_Asym_Pause);
7829 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
7830 BNX2X_DEV_INFO("ext_phy_type 0x%x (8073)\n",
7833 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7834 SUPPORTED_2500baseX_Full |
7835 SUPPORTED_1000baseT_Full |
7839 SUPPORTED_Asym_Pause);
7842 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
7843 BNX2X_DEV_INFO("ext_phy_type 0x%x (8705)\n",
7846 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7849 SUPPORTED_Asym_Pause);
7852 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
7853 BNX2X_DEV_INFO("ext_phy_type 0x%x (8706)\n",
7856 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7857 SUPPORTED_1000baseT_Full |
7860 SUPPORTED_Asym_Pause);
7863 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
7864 BNX2X_DEV_INFO("ext_phy_type 0x%x (8726)\n",
7867 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7868 SUPPORTED_1000baseT_Full |
7872 SUPPORTED_Asym_Pause);
7875 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
7876 BNX2X_DEV_INFO("ext_phy_type 0x%x (8727)\n",
7879 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7880 SUPPORTED_1000baseT_Full |
7884 SUPPORTED_Asym_Pause);
7887 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
7888 BNX2X_DEV_INFO("ext_phy_type 0x%x (SFX7101)\n",
7891 bp->port.supported |= (SUPPORTED_10000baseT_Full |
7895 SUPPORTED_Asym_Pause);
7898 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
7899 BNX2X_DEV_INFO("ext_phy_type 0x%x (BCM8481)\n",
7902 bp->port.supported |= (SUPPORTED_10baseT_Half |
7903 SUPPORTED_10baseT_Full |
7904 SUPPORTED_100baseT_Half |
7905 SUPPORTED_100baseT_Full |
7906 SUPPORTED_1000baseT_Full |
7907 SUPPORTED_10000baseT_Full |
7911 SUPPORTED_Asym_Pause);
7914 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
7915 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
7916 bp->link_params.ext_phy_config);
7920 BNX2X_ERR("NVRAM config error. "
7921 "BAD XGXS ext_phy_config 0x%x\n",
7922 bp->link_params.ext_phy_config);
7926 bp->port.phy_addr = REG_RD(bp, NIG_REG_XGXS0_CTRL_PHY_ADDR +
7928 BNX2X_DEV_INFO("phy_addr 0x%x\n", bp->port.phy_addr);
7933 BNX2X_ERR("BAD switch_cfg link_config 0x%x\n",
7934 bp->port.link_config);
7937 bp->link_params.phy_addr = bp->port.phy_addr;
7939 /* mask what we support according to speed_cap_mask */
7940 if (!(bp->link_params.speed_cap_mask &
7941 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_HALF))
7942 bp->port.supported &= ~SUPPORTED_10baseT_Half;
7944 if (!(bp->link_params.speed_cap_mask &
7945 PORT_HW_CFG_SPEED_CAPABILITY_D0_10M_FULL))
7946 bp->port.supported &= ~SUPPORTED_10baseT_Full;
7948 if (!(bp->link_params.speed_cap_mask &
7949 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_HALF))
7950 bp->port.supported &= ~SUPPORTED_100baseT_Half;
7952 if (!(bp->link_params.speed_cap_mask &
7953 PORT_HW_CFG_SPEED_CAPABILITY_D0_100M_FULL))
7954 bp->port.supported &= ~SUPPORTED_100baseT_Full;
7956 if (!(bp->link_params.speed_cap_mask &
7957 PORT_HW_CFG_SPEED_CAPABILITY_D0_1G))
7958 bp->port.supported &= ~(SUPPORTED_1000baseT_Half |
7959 SUPPORTED_1000baseT_Full);
7961 if (!(bp->link_params.speed_cap_mask &
7962 PORT_HW_CFG_SPEED_CAPABILITY_D0_2_5G))
7963 bp->port.supported &= ~SUPPORTED_2500baseX_Full;
7965 if (!(bp->link_params.speed_cap_mask &
7966 PORT_HW_CFG_SPEED_CAPABILITY_D0_10G))
7967 bp->port.supported &= ~SUPPORTED_10000baseT_Full;
7969 BNX2X_DEV_INFO("supported 0x%x\n", bp->port.supported);
7972 static void __devinit bnx2x_link_settings_requested(struct bnx2x *bp)
7974 bp->link_params.req_duplex = DUPLEX_FULL;
7976 switch (bp->port.link_config & PORT_FEATURE_LINK_SPEED_MASK) {
7977 case PORT_FEATURE_LINK_SPEED_AUTO:
7978 if (bp->port.supported & SUPPORTED_Autoneg) {
7979 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
7980 bp->port.advertising = bp->port.supported;
7983 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
7985 if ((ext_phy_type ==
7986 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705) ||
7988 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706)) {
7989 /* force 10G, no AN */
7990 bp->link_params.req_line_speed = SPEED_10000;
7991 bp->port.advertising =
7992 (ADVERTISED_10000baseT_Full |
7996 BNX2X_ERR("NVRAM config error. "
7997 "Invalid link_config 0x%x"
7998 " Autoneg not supported\n",
7999 bp->port.link_config);
8004 case PORT_FEATURE_LINK_SPEED_10M_FULL:
8005 if (bp->port.supported & SUPPORTED_10baseT_Full) {
8006 bp->link_params.req_line_speed = SPEED_10;
8007 bp->port.advertising = (ADVERTISED_10baseT_Full |
8010 BNX2X_ERR("NVRAM config error. "
8011 "Invalid link_config 0x%x"
8012 " speed_cap_mask 0x%x\n",
8013 bp->port.link_config,
8014 bp->link_params.speed_cap_mask);
8019 case PORT_FEATURE_LINK_SPEED_10M_HALF:
8020 if (bp->port.supported & SUPPORTED_10baseT_Half) {
8021 bp->link_params.req_line_speed = SPEED_10;
8022 bp->link_params.req_duplex = DUPLEX_HALF;
8023 bp->port.advertising = (ADVERTISED_10baseT_Half |
8026 BNX2X_ERR("NVRAM config error. "
8027 "Invalid link_config 0x%x"
8028 " speed_cap_mask 0x%x\n",
8029 bp->port.link_config,
8030 bp->link_params.speed_cap_mask);
8035 case PORT_FEATURE_LINK_SPEED_100M_FULL:
8036 if (bp->port.supported & SUPPORTED_100baseT_Full) {
8037 bp->link_params.req_line_speed = SPEED_100;
8038 bp->port.advertising = (ADVERTISED_100baseT_Full |
8041 BNX2X_ERR("NVRAM config error. "
8042 "Invalid link_config 0x%x"
8043 " speed_cap_mask 0x%x\n",
8044 bp->port.link_config,
8045 bp->link_params.speed_cap_mask);
8050 case PORT_FEATURE_LINK_SPEED_100M_HALF:
8051 if (bp->port.supported & SUPPORTED_100baseT_Half) {
8052 bp->link_params.req_line_speed = SPEED_100;
8053 bp->link_params.req_duplex = DUPLEX_HALF;
8054 bp->port.advertising = (ADVERTISED_100baseT_Half |
8057 BNX2X_ERR("NVRAM config error. "
8058 "Invalid link_config 0x%x"
8059 " speed_cap_mask 0x%x\n",
8060 bp->port.link_config,
8061 bp->link_params.speed_cap_mask);
8066 case PORT_FEATURE_LINK_SPEED_1G:
8067 if (bp->port.supported & SUPPORTED_1000baseT_Full) {
8068 bp->link_params.req_line_speed = SPEED_1000;
8069 bp->port.advertising = (ADVERTISED_1000baseT_Full |
8072 BNX2X_ERR("NVRAM config error. "
8073 "Invalid link_config 0x%x"
8074 " speed_cap_mask 0x%x\n",
8075 bp->port.link_config,
8076 bp->link_params.speed_cap_mask);
8081 case PORT_FEATURE_LINK_SPEED_2_5G:
8082 if (bp->port.supported & SUPPORTED_2500baseX_Full) {
8083 bp->link_params.req_line_speed = SPEED_2500;
8084 bp->port.advertising = (ADVERTISED_2500baseX_Full |
8087 BNX2X_ERR("NVRAM config error. "
8088 "Invalid link_config 0x%x"
8089 " speed_cap_mask 0x%x\n",
8090 bp->port.link_config,
8091 bp->link_params.speed_cap_mask);
8096 case PORT_FEATURE_LINK_SPEED_10G_CX4:
8097 case PORT_FEATURE_LINK_SPEED_10G_KX4:
8098 case PORT_FEATURE_LINK_SPEED_10G_KR:
8099 if (bp->port.supported & SUPPORTED_10000baseT_Full) {
8100 bp->link_params.req_line_speed = SPEED_10000;
8101 bp->port.advertising = (ADVERTISED_10000baseT_Full |
8104 BNX2X_ERR("NVRAM config error. "
8105 "Invalid link_config 0x%x"
8106 " speed_cap_mask 0x%x\n",
8107 bp->port.link_config,
8108 bp->link_params.speed_cap_mask);
8114 BNX2X_ERR("NVRAM config error. "
8115 "BAD link speed link_config 0x%x\n",
8116 bp->port.link_config);
8117 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8118 bp->port.advertising = bp->port.supported;
8122 bp->link_params.req_flow_ctrl = (bp->port.link_config &
8123 PORT_FEATURE_FLOW_CONTROL_MASK);
8124 if ((bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO) &&
8125 !(bp->port.supported & SUPPORTED_Autoneg))
8126 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
8128 BNX2X_DEV_INFO("req_line_speed %d req_duplex %d req_flow_ctrl 0x%x"
8129 " advertising 0x%x\n",
8130 bp->link_params.req_line_speed,
8131 bp->link_params.req_duplex,
8132 bp->link_params.req_flow_ctrl, bp->port.advertising);
8135 static void __devinit bnx2x_get_port_hwinfo(struct bnx2x *bp)
8137 int port = BP_PORT(bp);
8142 bp->link_params.bp = bp;
8143 bp->link_params.port = port;
8145 bp->link_params.lane_config =
8146 SHMEM_RD(bp, dev_info.port_hw_config[port].lane_config);
8147 bp->link_params.ext_phy_config =
8149 dev_info.port_hw_config[port].external_phy_config);
8150 /* BCM8727_NOC => BCM8727 no over current */
8151 if (XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config) ==
8152 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727_NOC) {
8153 bp->link_params.ext_phy_config &=
8154 ~PORT_HW_CFG_XGXS_EXT_PHY_TYPE_MASK;
8155 bp->link_params.ext_phy_config |=
8156 PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727;
8157 bp->link_params.feature_config_flags |=
8158 FEATURE_CONFIG_BCM8727_NOC;
8161 bp->link_params.speed_cap_mask =
8163 dev_info.port_hw_config[port].speed_capability_mask);
8165 bp->port.link_config =
8166 SHMEM_RD(bp, dev_info.port_feature_config[port].link_config);
8168 /* Get the 4 lanes xgxs config rx and tx */
8169 for (i = 0; i < 2; i++) {
8171 dev_info.port_hw_config[port].xgxs_config_rx[i<<1]);
8172 bp->link_params.xgxs_config_rx[i << 1] = ((val>>16) & 0xffff);
8173 bp->link_params.xgxs_config_rx[(i << 1) + 1] = (val & 0xffff);
8176 dev_info.port_hw_config[port].xgxs_config_tx[i<<1]);
8177 bp->link_params.xgxs_config_tx[i << 1] = ((val>>16) & 0xffff);
8178 bp->link_params.xgxs_config_tx[(i << 1) + 1] = (val & 0xffff);
8181 /* If the device is capable of WoL, set the default state according
8184 config = SHMEM_RD(bp, dev_info.port_feature_config[port].config);
8185 bp->wol = (!(bp->flags & NO_WOL_FLAG) &&
8186 (config & PORT_FEATURE_WOL_ENABLED));
8188 BNX2X_DEV_INFO("lane_config 0x%08x ext_phy_config 0x%08x"
8189 " speed_cap_mask 0x%08x link_config 0x%08x\n",
8190 bp->link_params.lane_config,
8191 bp->link_params.ext_phy_config,
8192 bp->link_params.speed_cap_mask, bp->port.link_config);
8194 bp->link_params.switch_cfg |= (bp->port.link_config &
8195 PORT_FEATURE_CONNECTED_SWITCH_MASK);
8196 bnx2x_link_settings_supported(bp, bp->link_params.switch_cfg);
8198 bnx2x_link_settings_requested(bp);
8200 val2 = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_upper);
8201 val = SHMEM_RD(bp, dev_info.port_hw_config[port].mac_lower);
8202 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8203 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8204 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8205 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8206 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8207 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8208 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr, ETH_ALEN);
8209 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8212 static int __devinit bnx2x_get_hwinfo(struct bnx2x *bp)
8214 int func = BP_FUNC(bp);
8218 bnx2x_get_common_hwinfo(bp);
8222 if (CHIP_IS_E1H(bp)) {
8224 SHMEM_RD(bp, mf_cfg.func_mf_config[func].config);
8226 val = (SHMEM_RD(bp, mf_cfg.func_mf_config[func].e1hov_tag) &
8227 FUNC_MF_CFG_E1HOV_TAG_MASK);
8228 if (val != FUNC_MF_CFG_E1HOV_TAG_DEFAULT) {
8232 BNX2X_DEV_INFO("MF mode E1HOV for func %d is %d "
8234 func, bp->e1hov, bp->e1hov);
8236 BNX2X_DEV_INFO("single function mode\n");
8238 BNX2X_ERR("!!! No valid E1HOV for func %d,"
8239 " aborting\n", func);
8245 if (!BP_NOMCP(bp)) {
8246 bnx2x_get_port_hwinfo(bp);
8248 bp->fw_seq = (SHMEM_RD(bp, func_mb[func].drv_mb_header) &
8249 DRV_MSG_SEQ_NUMBER_MASK);
8250 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
8254 val2 = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_upper);
8255 val = SHMEM_RD(bp, mf_cfg.func_mf_config[func].mac_lower);
8256 if ((val2 != FUNC_MF_CFG_UPPERMAC_DEFAULT) &&
8257 (val != FUNC_MF_CFG_LOWERMAC_DEFAULT)) {
8258 bp->dev->dev_addr[0] = (u8)(val2 >> 8 & 0xff);
8259 bp->dev->dev_addr[1] = (u8)(val2 & 0xff);
8260 bp->dev->dev_addr[2] = (u8)(val >> 24 & 0xff);
8261 bp->dev->dev_addr[3] = (u8)(val >> 16 & 0xff);
8262 bp->dev->dev_addr[4] = (u8)(val >> 8 & 0xff);
8263 bp->dev->dev_addr[5] = (u8)(val & 0xff);
8264 memcpy(bp->link_params.mac_addr, bp->dev->dev_addr,
8266 memcpy(bp->dev->perm_addr, bp->dev->dev_addr,
8274 /* only supposed to happen on emulation/FPGA */
8275 BNX2X_ERR("warning random MAC workaround active\n");
8276 random_ether_addr(bp->dev->dev_addr);
8277 memcpy(bp->dev->perm_addr, bp->dev->dev_addr, ETH_ALEN);
8283 static int __devinit bnx2x_init_bp(struct bnx2x *bp)
8285 int func = BP_FUNC(bp);
8289 /* Disable interrupt handling until HW is initialized */
8290 atomic_set(&bp->intr_sem, 1);
8291 smp_wmb(); /* Ensure that bp->intr_sem update is SMP-safe */
8293 mutex_init(&bp->port.phy_mutex);
8295 INIT_DELAYED_WORK(&bp->sp_task, bnx2x_sp_task);
8296 INIT_WORK(&bp->reset_task, bnx2x_reset_task);
8298 rc = bnx2x_get_hwinfo(bp);
8300 /* need to reset chip if undi was active */
8302 bnx2x_undi_unload(bp);
8304 if (CHIP_REV_IS_FPGA(bp))
8305 printk(KERN_ERR PFX "FPGA detected\n");
8307 if (BP_NOMCP(bp) && (func == 0))
8309 "MCP disabled, must load devices in order!\n");
8311 /* Set multi queue mode */
8312 if ((multi_mode != ETH_RSS_MODE_DISABLED) &&
8313 ((int_mode == INT_MODE_INTx) || (int_mode == INT_MODE_MSI))) {
8315 "Multi disabled since int_mode requested is not MSI-X\n");
8316 multi_mode = ETH_RSS_MODE_DISABLED;
8318 bp->multi_mode = multi_mode;
8323 bp->flags &= ~TPA_ENABLE_FLAG;
8324 bp->dev->features &= ~NETIF_F_LRO;
8326 bp->flags |= TPA_ENABLE_FLAG;
8327 bp->dev->features |= NETIF_F_LRO;
8332 bp->tx_ring_size = MAX_TX_AVAIL;
8333 bp->rx_ring_size = MAX_RX_AVAIL;
8340 timer_interval = (CHIP_REV_IS_SLOW(bp) ? 5*HZ : HZ);
8341 bp->current_interval = (poll ? poll : timer_interval);
8343 init_timer(&bp->timer);
8344 bp->timer.expires = jiffies + bp->current_interval;
8345 bp->timer.data = (unsigned long) bp;
8346 bp->timer.function = bnx2x_timer;
8352 * ethtool service functions
8355 /* All ethtool functions called with rtnl_lock */
8357 static int bnx2x_get_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8359 struct bnx2x *bp = netdev_priv(dev);
8361 cmd->supported = bp->port.supported;
8362 cmd->advertising = bp->port.advertising;
8364 if (netif_carrier_ok(dev)) {
8365 cmd->speed = bp->link_vars.line_speed;
8366 cmd->duplex = bp->link_vars.duplex;
8368 cmd->speed = bp->link_params.req_line_speed;
8369 cmd->duplex = bp->link_params.req_duplex;
8374 vn_max_rate = ((bp->mf_config & FUNC_MF_CFG_MAX_BW_MASK) >>
8375 FUNC_MF_CFG_MAX_BW_SHIFT) * 100;
8376 if (vn_max_rate < cmd->speed)
8377 cmd->speed = vn_max_rate;
8380 if (bp->link_params.switch_cfg == SWITCH_CFG_10G) {
8382 XGXS_EXT_PHY_TYPE(bp->link_params.ext_phy_config);
8384 switch (ext_phy_type) {
8385 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_DIRECT:
8386 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8072:
8387 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8073:
8388 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8705:
8389 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8706:
8390 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8726:
8391 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8727:
8392 cmd->port = PORT_FIBRE;
8395 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_SFX7101:
8396 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_BCM8481:
8397 cmd->port = PORT_TP;
8400 case PORT_HW_CFG_XGXS_EXT_PHY_TYPE_FAILURE:
8401 BNX2X_ERR("XGXS PHY Failure detected 0x%x\n",
8402 bp->link_params.ext_phy_config);
8406 DP(NETIF_MSG_LINK, "BAD XGXS ext_phy_config 0x%x\n",
8407 bp->link_params.ext_phy_config);
8411 cmd->port = PORT_TP;
8413 cmd->phy_address = bp->port.phy_addr;
8414 cmd->transceiver = XCVR_INTERNAL;
8416 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
8417 cmd->autoneg = AUTONEG_ENABLE;
8419 cmd->autoneg = AUTONEG_DISABLE;
8424 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8425 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8426 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8427 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8428 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8429 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8430 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8435 static int bnx2x_set_settings(struct net_device *dev, struct ethtool_cmd *cmd)
8437 struct bnx2x *bp = netdev_priv(dev);
8443 DP(NETIF_MSG_LINK, "ethtool_cmd: cmd %d\n"
8444 DP_LEVEL " supported 0x%x advertising 0x%x speed %d\n"
8445 DP_LEVEL " duplex %d port %d phy_address %d transceiver %d\n"
8446 DP_LEVEL " autoneg %d maxtxpkt %d maxrxpkt %d\n",
8447 cmd->cmd, cmd->supported, cmd->advertising, cmd->speed,
8448 cmd->duplex, cmd->port, cmd->phy_address, cmd->transceiver,
8449 cmd->autoneg, cmd->maxtxpkt, cmd->maxrxpkt);
8451 if (cmd->autoneg == AUTONEG_ENABLE) {
8452 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
8453 DP(NETIF_MSG_LINK, "Autoneg not supported\n");
8457 /* advertise the requested speed and duplex if supported */
8458 cmd->advertising &= bp->port.supported;
8460 bp->link_params.req_line_speed = SPEED_AUTO_NEG;
8461 bp->link_params.req_duplex = DUPLEX_FULL;
8462 bp->port.advertising |= (ADVERTISED_Autoneg |
8465 } else { /* forced speed */
8466 /* advertise the requested speed and duplex if supported */
8467 switch (cmd->speed) {
8469 if (cmd->duplex == DUPLEX_FULL) {
8470 if (!(bp->port.supported &
8471 SUPPORTED_10baseT_Full)) {
8473 "10M full not supported\n");
8477 advertising = (ADVERTISED_10baseT_Full |
8480 if (!(bp->port.supported &
8481 SUPPORTED_10baseT_Half)) {
8483 "10M half not supported\n");
8487 advertising = (ADVERTISED_10baseT_Half |
8493 if (cmd->duplex == DUPLEX_FULL) {
8494 if (!(bp->port.supported &
8495 SUPPORTED_100baseT_Full)) {
8497 "100M full not supported\n");
8501 advertising = (ADVERTISED_100baseT_Full |
8504 if (!(bp->port.supported &
8505 SUPPORTED_100baseT_Half)) {
8507 "100M half not supported\n");
8511 advertising = (ADVERTISED_100baseT_Half |
8517 if (cmd->duplex != DUPLEX_FULL) {
8518 DP(NETIF_MSG_LINK, "1G half not supported\n");
8522 if (!(bp->port.supported & SUPPORTED_1000baseT_Full)) {
8523 DP(NETIF_MSG_LINK, "1G full not supported\n");
8527 advertising = (ADVERTISED_1000baseT_Full |
8532 if (cmd->duplex != DUPLEX_FULL) {
8534 "2.5G half not supported\n");
8538 if (!(bp->port.supported & SUPPORTED_2500baseX_Full)) {
8540 "2.5G full not supported\n");
8544 advertising = (ADVERTISED_2500baseX_Full |
8549 if (cmd->duplex != DUPLEX_FULL) {
8550 DP(NETIF_MSG_LINK, "10G half not supported\n");
8554 if (!(bp->port.supported & SUPPORTED_10000baseT_Full)) {
8555 DP(NETIF_MSG_LINK, "10G full not supported\n");
8559 advertising = (ADVERTISED_10000baseT_Full |
8564 DP(NETIF_MSG_LINK, "Unsupported speed\n");
8568 bp->link_params.req_line_speed = cmd->speed;
8569 bp->link_params.req_duplex = cmd->duplex;
8570 bp->port.advertising = advertising;
8573 DP(NETIF_MSG_LINK, "req_line_speed %d\n"
8574 DP_LEVEL " req_duplex %d advertising 0x%x\n",
8575 bp->link_params.req_line_speed, bp->link_params.req_duplex,
8576 bp->port.advertising);
8578 if (netif_running(dev)) {
8579 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8586 #define PHY_FW_VER_LEN 10
8588 static void bnx2x_get_drvinfo(struct net_device *dev,
8589 struct ethtool_drvinfo *info)
8591 struct bnx2x *bp = netdev_priv(dev);
8592 u8 phy_fw_ver[PHY_FW_VER_LEN];
8594 strcpy(info->driver, DRV_MODULE_NAME);
8595 strcpy(info->version, DRV_MODULE_VERSION);
8597 phy_fw_ver[0] = '\0';
8599 bnx2x_acquire_phy_lock(bp);
8600 bnx2x_get_ext_phy_fw_version(&bp->link_params,
8601 (bp->state != BNX2X_STATE_CLOSED),
8602 phy_fw_ver, PHY_FW_VER_LEN);
8603 bnx2x_release_phy_lock(bp);
8606 snprintf(info->fw_version, 32, "BC:%d.%d.%d%s%s",
8607 (bp->common.bc_ver & 0xff0000) >> 16,
8608 (bp->common.bc_ver & 0xff00) >> 8,
8609 (bp->common.bc_ver & 0xff),
8610 ((phy_fw_ver[0] != '\0') ? " PHY:" : ""), phy_fw_ver);
8611 strcpy(info->bus_info, pci_name(bp->pdev));
8612 info->n_stats = BNX2X_NUM_STATS;
8613 info->testinfo_len = BNX2X_NUM_TESTS;
8614 info->eedump_len = bp->common.flash_size;
8615 info->regdump_len = 0;
8618 #define IS_E1_ONLINE(info) (((info) & RI_E1_ONLINE) == RI_E1_ONLINE)
8619 #define IS_E1H_ONLINE(info) (((info) & RI_E1H_ONLINE) == RI_E1H_ONLINE)
8621 static int bnx2x_get_regs_len(struct net_device *dev)
8623 static u32 regdump_len;
8624 struct bnx2x *bp = netdev_priv(dev);
8630 if (CHIP_IS_E1(bp)) {
8631 for (i = 0; i < REGS_COUNT; i++)
8632 if (IS_E1_ONLINE(reg_addrs[i].info))
8633 regdump_len += reg_addrs[i].size;
8635 for (i = 0; i < WREGS_COUNT_E1; i++)
8636 if (IS_E1_ONLINE(wreg_addrs_e1[i].info))
8637 regdump_len += wreg_addrs_e1[i].size *
8638 (1 + wreg_addrs_e1[i].read_regs_count);
8641 for (i = 0; i < REGS_COUNT; i++)
8642 if (IS_E1H_ONLINE(reg_addrs[i].info))
8643 regdump_len += reg_addrs[i].size;
8645 for (i = 0; i < WREGS_COUNT_E1H; i++)
8646 if (IS_E1H_ONLINE(wreg_addrs_e1h[i].info))
8647 regdump_len += wreg_addrs_e1h[i].size *
8648 (1 + wreg_addrs_e1h[i].read_regs_count);
8651 regdump_len += sizeof(struct dump_hdr);
8656 static void bnx2x_get_regs(struct net_device *dev,
8657 struct ethtool_regs *regs, void *_p)
8660 struct bnx2x *bp = netdev_priv(dev);
8661 struct dump_hdr dump_hdr = {0};
8664 memset(p, 0, regs->len);
8666 if (!netif_running(bp->dev))
8669 dump_hdr.hdr_size = (sizeof(struct dump_hdr) / 4) - 1;
8670 dump_hdr.dump_sign = dump_sign_all;
8671 dump_hdr.xstorm_waitp = REG_RD(bp, XSTORM_WAITP_ADDR);
8672 dump_hdr.tstorm_waitp = REG_RD(bp, TSTORM_WAITP_ADDR);
8673 dump_hdr.ustorm_waitp = REG_RD(bp, USTORM_WAITP_ADDR);
8674 dump_hdr.cstorm_waitp = REG_RD(bp, CSTORM_WAITP_ADDR);
8675 dump_hdr.info = CHIP_IS_E1(bp) ? RI_E1_ONLINE : RI_E1H_ONLINE;
8677 memcpy(p, &dump_hdr, sizeof(struct dump_hdr));
8678 p += dump_hdr.hdr_size + 1;
8680 if (CHIP_IS_E1(bp)) {
8681 for (i = 0; i < REGS_COUNT; i++)
8682 if (IS_E1_ONLINE(reg_addrs[i].info))
8683 for (j = 0; j < reg_addrs[i].size; j++)
8685 reg_addrs[i].addr + j*4);
8688 for (i = 0; i < REGS_COUNT; i++)
8689 if (IS_E1H_ONLINE(reg_addrs[i].info))
8690 for (j = 0; j < reg_addrs[i].size; j++)
8692 reg_addrs[i].addr + j*4);
8696 static void bnx2x_get_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8698 struct bnx2x *bp = netdev_priv(dev);
8700 if (bp->flags & NO_WOL_FLAG) {
8704 wol->supported = WAKE_MAGIC;
8706 wol->wolopts = WAKE_MAGIC;
8710 memset(&wol->sopass, 0, sizeof(wol->sopass));
8713 static int bnx2x_set_wol(struct net_device *dev, struct ethtool_wolinfo *wol)
8715 struct bnx2x *bp = netdev_priv(dev);
8717 if (wol->wolopts & ~WAKE_MAGIC)
8720 if (wol->wolopts & WAKE_MAGIC) {
8721 if (bp->flags & NO_WOL_FLAG)
8731 static u32 bnx2x_get_msglevel(struct net_device *dev)
8733 struct bnx2x *bp = netdev_priv(dev);
8735 return bp->msglevel;
8738 static void bnx2x_set_msglevel(struct net_device *dev, u32 level)
8740 struct bnx2x *bp = netdev_priv(dev);
8742 if (capable(CAP_NET_ADMIN))
8743 bp->msglevel = level;
8746 static int bnx2x_nway_reset(struct net_device *dev)
8748 struct bnx2x *bp = netdev_priv(dev);
8753 if (netif_running(dev)) {
8754 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
8762 bnx2x_get_link(struct net_device *dev)
8764 struct bnx2x *bp = netdev_priv(dev);
8766 return bp->link_vars.link_up;
8769 static int bnx2x_get_eeprom_len(struct net_device *dev)
8771 struct bnx2x *bp = netdev_priv(dev);
8773 return bp->common.flash_size;
8776 static int bnx2x_acquire_nvram_lock(struct bnx2x *bp)
8778 int port = BP_PORT(bp);
8782 /* adjust timeout for emulation/FPGA */
8783 count = NVRAM_TIMEOUT_COUNT;
8784 if (CHIP_REV_IS_SLOW(bp))
8787 /* request access to nvram interface */
8788 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8789 (MCPR_NVM_SW_ARB_ARB_REQ_SET1 << port));
8791 for (i = 0; i < count*10; i++) {
8792 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8793 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))
8799 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port))) {
8800 DP(BNX2X_MSG_NVM, "cannot get access to nvram interface\n");
8807 static int bnx2x_release_nvram_lock(struct bnx2x *bp)
8809 int port = BP_PORT(bp);
8813 /* adjust timeout for emulation/FPGA */
8814 count = NVRAM_TIMEOUT_COUNT;
8815 if (CHIP_REV_IS_SLOW(bp))
8818 /* relinquish nvram interface */
8819 REG_WR(bp, MCP_REG_MCPR_NVM_SW_ARB,
8820 (MCPR_NVM_SW_ARB_ARB_REQ_CLR1 << port));
8822 for (i = 0; i < count*10; i++) {
8823 val = REG_RD(bp, MCP_REG_MCPR_NVM_SW_ARB);
8824 if (!(val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)))
8830 if (val & (MCPR_NVM_SW_ARB_ARB_ARB1 << port)) {
8831 DP(BNX2X_MSG_NVM, "cannot free access to nvram interface\n");
8838 static void bnx2x_enable_nvram_access(struct bnx2x *bp)
8842 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8844 /* enable both bits, even on read */
8845 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8846 (val | MCPR_NVM_ACCESS_ENABLE_EN |
8847 MCPR_NVM_ACCESS_ENABLE_WR_EN));
8850 static void bnx2x_disable_nvram_access(struct bnx2x *bp)
8854 val = REG_RD(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE);
8856 /* disable both bits, even after read */
8857 REG_WR(bp, MCP_REG_MCPR_NVM_ACCESS_ENABLE,
8858 (val & ~(MCPR_NVM_ACCESS_ENABLE_EN |
8859 MCPR_NVM_ACCESS_ENABLE_WR_EN)));
8862 static int bnx2x_nvram_read_dword(struct bnx2x *bp, u32 offset, __be32 *ret_val,
8868 /* build the command word */
8869 cmd_flags |= MCPR_NVM_COMMAND_DOIT;
8871 /* need to clear DONE bit separately */
8872 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8874 /* address of the NVRAM to read from */
8875 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8876 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
8878 /* issue a read command */
8879 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
8881 /* adjust timeout for emulation/FPGA */
8882 count = NVRAM_TIMEOUT_COUNT;
8883 if (CHIP_REV_IS_SLOW(bp))
8886 /* wait for completion */
8889 for (i = 0; i < count; i++) {
8891 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
8893 if (val & MCPR_NVM_COMMAND_DONE) {
8894 val = REG_RD(bp, MCP_REG_MCPR_NVM_READ);
8895 /* we read nvram data in cpu order
8896 * but ethtool sees it as an array of bytes
8897 * converting to big-endian will do the work */
8898 *ret_val = cpu_to_be32(val);
8907 static int bnx2x_nvram_read(struct bnx2x *bp, u32 offset, u8 *ret_buf,
8914 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
8916 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
8921 if (offset + buf_size > bp->common.flash_size) {
8922 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
8923 " buf_size (0x%x) > flash_size (0x%x)\n",
8924 offset, buf_size, bp->common.flash_size);
8928 /* request access to nvram interface */
8929 rc = bnx2x_acquire_nvram_lock(bp);
8933 /* enable access to nvram interface */
8934 bnx2x_enable_nvram_access(bp);
8936 /* read the first word(s) */
8937 cmd_flags = MCPR_NVM_COMMAND_FIRST;
8938 while ((buf_size > sizeof(u32)) && (rc == 0)) {
8939 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8940 memcpy(ret_buf, &val, 4);
8942 /* advance to the next dword */
8943 offset += sizeof(u32);
8944 ret_buf += sizeof(u32);
8945 buf_size -= sizeof(u32);
8950 cmd_flags |= MCPR_NVM_COMMAND_LAST;
8951 rc = bnx2x_nvram_read_dword(bp, offset, &val, cmd_flags);
8952 memcpy(ret_buf, &val, 4);
8955 /* disable access to nvram interface */
8956 bnx2x_disable_nvram_access(bp);
8957 bnx2x_release_nvram_lock(bp);
8962 static int bnx2x_get_eeprom(struct net_device *dev,
8963 struct ethtool_eeprom *eeprom, u8 *eebuf)
8965 struct bnx2x *bp = netdev_priv(dev);
8968 if (!netif_running(dev))
8971 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
8972 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
8973 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
8974 eeprom->len, eeprom->len);
8976 /* parameters already validated in ethtool_get_eeprom */
8978 rc = bnx2x_nvram_read(bp, eeprom->offset, eebuf, eeprom->len);
8983 static int bnx2x_nvram_write_dword(struct bnx2x *bp, u32 offset, u32 val,
8988 /* build the command word */
8989 cmd_flags |= MCPR_NVM_COMMAND_DOIT | MCPR_NVM_COMMAND_WR;
8991 /* need to clear DONE bit separately */
8992 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, MCPR_NVM_COMMAND_DONE);
8994 /* write the data */
8995 REG_WR(bp, MCP_REG_MCPR_NVM_WRITE, val);
8997 /* address of the NVRAM to write to */
8998 REG_WR(bp, MCP_REG_MCPR_NVM_ADDR,
8999 (offset & MCPR_NVM_ADDR_NVM_ADDR_VALUE));
9001 /* issue the write command */
9002 REG_WR(bp, MCP_REG_MCPR_NVM_COMMAND, cmd_flags);
9004 /* adjust timeout for emulation/FPGA */
9005 count = NVRAM_TIMEOUT_COUNT;
9006 if (CHIP_REV_IS_SLOW(bp))
9009 /* wait for completion */
9011 for (i = 0; i < count; i++) {
9013 val = REG_RD(bp, MCP_REG_MCPR_NVM_COMMAND);
9014 if (val & MCPR_NVM_COMMAND_DONE) {
9023 #define BYTE_OFFSET(offset) (8 * (offset & 0x03))
9025 static int bnx2x_nvram_write1(struct bnx2x *bp, u32 offset, u8 *data_buf,
9033 if (offset + buf_size > bp->common.flash_size) {
9034 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9035 " buf_size (0x%x) > flash_size (0x%x)\n",
9036 offset, buf_size, bp->common.flash_size);
9040 /* request access to nvram interface */
9041 rc = bnx2x_acquire_nvram_lock(bp);
9045 /* enable access to nvram interface */
9046 bnx2x_enable_nvram_access(bp);
9048 cmd_flags = (MCPR_NVM_COMMAND_FIRST | MCPR_NVM_COMMAND_LAST);
9049 align_offset = (offset & ~0x03);
9050 rc = bnx2x_nvram_read_dword(bp, align_offset, &val, cmd_flags);
9053 val &= ~(0xff << BYTE_OFFSET(offset));
9054 val |= (*data_buf << BYTE_OFFSET(offset));
9056 /* nvram data is returned as an array of bytes
9057 * convert it back to cpu order */
9058 val = be32_to_cpu(val);
9060 rc = bnx2x_nvram_write_dword(bp, align_offset, val,
9064 /* disable access to nvram interface */
9065 bnx2x_disable_nvram_access(bp);
9066 bnx2x_release_nvram_lock(bp);
9071 static int bnx2x_nvram_write(struct bnx2x *bp, u32 offset, u8 *data_buf,
9079 if (buf_size == 1) /* ethtool */
9080 return bnx2x_nvram_write1(bp, offset, data_buf, buf_size);
9082 if ((offset & 0x03) || (buf_size & 0x03) || (buf_size == 0)) {
9084 "Invalid parameter: offset 0x%x buf_size 0x%x\n",
9089 if (offset + buf_size > bp->common.flash_size) {
9090 DP(BNX2X_MSG_NVM, "Invalid parameter: offset (0x%x) +"
9091 " buf_size (0x%x) > flash_size (0x%x)\n",
9092 offset, buf_size, bp->common.flash_size);
9096 /* request access to nvram interface */
9097 rc = bnx2x_acquire_nvram_lock(bp);
9101 /* enable access to nvram interface */
9102 bnx2x_enable_nvram_access(bp);
9105 cmd_flags = MCPR_NVM_COMMAND_FIRST;
9106 while ((written_so_far < buf_size) && (rc == 0)) {
9107 if (written_so_far == (buf_size - sizeof(u32)))
9108 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9109 else if (((offset + 4) % NVRAM_PAGE_SIZE) == 0)
9110 cmd_flags |= MCPR_NVM_COMMAND_LAST;
9111 else if ((offset % NVRAM_PAGE_SIZE) == 0)
9112 cmd_flags |= MCPR_NVM_COMMAND_FIRST;
9114 memcpy(&val, data_buf, 4);
9116 rc = bnx2x_nvram_write_dword(bp, offset, val, cmd_flags);
9118 /* advance to the next dword */
9119 offset += sizeof(u32);
9120 data_buf += sizeof(u32);
9121 written_so_far += sizeof(u32);
9125 /* disable access to nvram interface */
9126 bnx2x_disable_nvram_access(bp);
9127 bnx2x_release_nvram_lock(bp);
9132 static int bnx2x_set_eeprom(struct net_device *dev,
9133 struct ethtool_eeprom *eeprom, u8 *eebuf)
9135 struct bnx2x *bp = netdev_priv(dev);
9138 if (!netif_running(dev))
9141 DP(BNX2X_MSG_NVM, "ethtool_eeprom: cmd %d\n"
9142 DP_LEVEL " magic 0x%x offset 0x%x (%d) len 0x%x (%d)\n",
9143 eeprom->cmd, eeprom->magic, eeprom->offset, eeprom->offset,
9144 eeprom->len, eeprom->len);
9146 /* parameters already validated in ethtool_set_eeprom */
9148 /* If the magic number is PHY (0x00504859) upgrade the PHY FW */
9149 if (eeprom->magic == 0x00504859)
9152 bnx2x_acquire_phy_lock(bp);
9153 rc = bnx2x_flash_download(bp, BP_PORT(bp),
9154 bp->link_params.ext_phy_config,
9155 (bp->state != BNX2X_STATE_CLOSED),
9156 eebuf, eeprom->len);
9157 if ((bp->state == BNX2X_STATE_OPEN) ||
9158 (bp->state == BNX2X_STATE_DISABLED)) {
9159 rc |= bnx2x_link_reset(&bp->link_params,
9161 rc |= bnx2x_phy_init(&bp->link_params,
9164 bnx2x_release_phy_lock(bp);
9166 } else /* Only the PMF can access the PHY */
9169 rc = bnx2x_nvram_write(bp, eeprom->offset, eebuf, eeprom->len);
9174 static int bnx2x_get_coalesce(struct net_device *dev,
9175 struct ethtool_coalesce *coal)
9177 struct bnx2x *bp = netdev_priv(dev);
9179 memset(coal, 0, sizeof(struct ethtool_coalesce));
9181 coal->rx_coalesce_usecs = bp->rx_ticks;
9182 coal->tx_coalesce_usecs = bp->tx_ticks;
9187 static int bnx2x_set_coalesce(struct net_device *dev,
9188 struct ethtool_coalesce *coal)
9190 struct bnx2x *bp = netdev_priv(dev);
9192 bp->rx_ticks = (u16) coal->rx_coalesce_usecs;
9193 if (bp->rx_ticks > BNX2X_MAX_COALESCE_TOUT)
9194 bp->rx_ticks = BNX2X_MAX_COALESCE_TOUT;
9196 bp->tx_ticks = (u16) coal->tx_coalesce_usecs;
9197 if (bp->tx_ticks > BNX2X_MAX_COALESCE_TOUT)
9198 bp->tx_ticks = BNX2X_MAX_COALESCE_TOUT;
9200 if (netif_running(dev))
9201 bnx2x_update_coalesce(bp);
9206 static void bnx2x_get_ringparam(struct net_device *dev,
9207 struct ethtool_ringparam *ering)
9209 struct bnx2x *bp = netdev_priv(dev);
9211 ering->rx_max_pending = MAX_RX_AVAIL;
9212 ering->rx_mini_max_pending = 0;
9213 ering->rx_jumbo_max_pending = 0;
9215 ering->rx_pending = bp->rx_ring_size;
9216 ering->rx_mini_pending = 0;
9217 ering->rx_jumbo_pending = 0;
9219 ering->tx_max_pending = MAX_TX_AVAIL;
9220 ering->tx_pending = bp->tx_ring_size;
9223 static int bnx2x_set_ringparam(struct net_device *dev,
9224 struct ethtool_ringparam *ering)
9226 struct bnx2x *bp = netdev_priv(dev);
9229 if ((ering->rx_pending > MAX_RX_AVAIL) ||
9230 (ering->tx_pending > MAX_TX_AVAIL) ||
9231 (ering->tx_pending <= MAX_SKB_FRAGS + 4))
9234 bp->rx_ring_size = ering->rx_pending;
9235 bp->tx_ring_size = ering->tx_pending;
9237 if (netif_running(dev)) {
9238 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9239 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9245 static void bnx2x_get_pauseparam(struct net_device *dev,
9246 struct ethtool_pauseparam *epause)
9248 struct bnx2x *bp = netdev_priv(dev);
9250 epause->autoneg = (bp->link_params.req_flow_ctrl ==
9251 BNX2X_FLOW_CTRL_AUTO) &&
9252 (bp->link_params.req_line_speed == SPEED_AUTO_NEG);
9254 epause->rx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_RX) ==
9255 BNX2X_FLOW_CTRL_RX);
9256 epause->tx_pause = ((bp->link_vars.flow_ctrl & BNX2X_FLOW_CTRL_TX) ==
9257 BNX2X_FLOW_CTRL_TX);
9259 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9260 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9261 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9264 static int bnx2x_set_pauseparam(struct net_device *dev,
9265 struct ethtool_pauseparam *epause)
9267 struct bnx2x *bp = netdev_priv(dev);
9272 DP(NETIF_MSG_LINK, "ethtool_pauseparam: cmd %d\n"
9273 DP_LEVEL " autoneg %d rx_pause %d tx_pause %d\n",
9274 epause->cmd, epause->autoneg, epause->rx_pause, epause->tx_pause);
9276 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9278 if (epause->rx_pause)
9279 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_RX;
9281 if (epause->tx_pause)
9282 bp->link_params.req_flow_ctrl |= BNX2X_FLOW_CTRL_TX;
9284 if (bp->link_params.req_flow_ctrl == BNX2X_FLOW_CTRL_AUTO)
9285 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_NONE;
9287 if (epause->autoneg) {
9288 if (!(bp->port.supported & SUPPORTED_Autoneg)) {
9289 DP(NETIF_MSG_LINK, "autoneg not supported\n");
9293 if (bp->link_params.req_line_speed == SPEED_AUTO_NEG)
9294 bp->link_params.req_flow_ctrl = BNX2X_FLOW_CTRL_AUTO;
9298 "req_flow_ctrl 0x%x\n", bp->link_params.req_flow_ctrl);
9300 if (netif_running(dev)) {
9301 bnx2x_stats_handle(bp, STATS_EVENT_STOP);
9308 static int bnx2x_set_flags(struct net_device *dev, u32 data)
9310 struct bnx2x *bp = netdev_priv(dev);
9314 /* TPA requires Rx CSUM offloading */
9315 if ((data & ETH_FLAG_LRO) && bp->rx_csum) {
9316 if (!(dev->features & NETIF_F_LRO)) {
9317 dev->features |= NETIF_F_LRO;
9318 bp->flags |= TPA_ENABLE_FLAG;
9322 } else if (dev->features & NETIF_F_LRO) {
9323 dev->features &= ~NETIF_F_LRO;
9324 bp->flags &= ~TPA_ENABLE_FLAG;
9328 if (changed && netif_running(dev)) {
9329 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9330 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
9336 static u32 bnx2x_get_rx_csum(struct net_device *dev)
9338 struct bnx2x *bp = netdev_priv(dev);
9343 static int bnx2x_set_rx_csum(struct net_device *dev, u32 data)
9345 struct bnx2x *bp = netdev_priv(dev);
9350 /* Disable TPA, when Rx CSUM is disabled. Otherwise all
9351 TPA'ed packets will be discarded due to wrong TCP CSUM */
9353 u32 flags = ethtool_op_get_flags(dev);
9355 rc = bnx2x_set_flags(dev, (flags & ~ETH_FLAG_LRO));
9361 static int bnx2x_set_tso(struct net_device *dev, u32 data)
9364 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
9365 dev->features |= NETIF_F_TSO6;
9367 dev->features &= ~(NETIF_F_TSO | NETIF_F_TSO_ECN);
9368 dev->features &= ~NETIF_F_TSO6;
9374 static const struct {
9375 char string[ETH_GSTRING_LEN];
9376 } bnx2x_tests_str_arr[BNX2X_NUM_TESTS] = {
9377 { "register_test (offline)" },
9378 { "memory_test (offline)" },
9379 { "loopback_test (offline)" },
9380 { "nvram_test (online)" },
9381 { "interrupt_test (online)" },
9382 { "link_test (online)" },
9383 { "idle check (online)" }
9386 static int bnx2x_self_test_count(struct net_device *dev)
9388 return BNX2X_NUM_TESTS;
9391 static int bnx2x_test_registers(struct bnx2x *bp)
9393 int idx, i, rc = -ENODEV;
9395 int port = BP_PORT(bp);
9396 static const struct {
9401 /* 0 */ { BRB1_REG_PAUSE_LOW_THRESHOLD_0, 4, 0x000003ff },
9402 { DORQ_REG_DB_ADDR0, 4, 0xffffffff },
9403 { HC_REG_AGG_INT_0, 4, 0x000003ff },
9404 { PBF_REG_MAC_IF0_ENABLE, 4, 0x00000001 },
9405 { PBF_REG_P0_INIT_CRD, 4, 0x000007ff },
9406 { PRS_REG_CID_PORT_0, 4, 0x00ffffff },
9407 { PXP2_REG_PSWRQ_CDU0_L2P, 4, 0x000fffff },
9408 { PXP2_REG_RQ_CDU0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9409 { PXP2_REG_PSWRQ_TM0_L2P, 4, 0x000fffff },
9410 { PXP2_REG_RQ_USDM0_EFIRST_MEM_ADDR, 8, 0x0003ffff },
9411 /* 10 */ { PXP2_REG_PSWRQ_TSDM0_L2P, 4, 0x000fffff },
9412 { QM_REG_CONNNUM_0, 4, 0x000fffff },
9413 { TM_REG_LIN0_MAX_ACTIVE_CID, 4, 0x0003ffff },
9414 { SRC_REG_KEYRSS0_0, 40, 0xffffffff },
9415 { SRC_REG_KEYRSS0_7, 40, 0xffffffff },
9416 { XCM_REG_WU_DA_SET_TMR_CNT_FLG_CMD00, 4, 0x00000001 },
9417 { XCM_REG_WU_DA_CNT_CMD00, 4, 0x00000003 },
9418 { XCM_REG_GLB_DEL_ACK_MAX_CNT_0, 4, 0x000000ff },
9419 { NIG_REG_LLH0_T_BIT, 4, 0x00000001 },
9420 { NIG_REG_EMAC0_IN_EN, 4, 0x00000001 },
9421 /* 20 */ { NIG_REG_BMAC0_IN_EN, 4, 0x00000001 },
9422 { NIG_REG_XCM0_OUT_EN, 4, 0x00000001 },
9423 { NIG_REG_BRB0_OUT_EN, 4, 0x00000001 },
9424 { NIG_REG_LLH0_XCM_MASK, 4, 0x00000007 },
9425 { NIG_REG_LLH0_ACPI_PAT_6_LEN, 68, 0x000000ff },
9426 { NIG_REG_LLH0_ACPI_PAT_0_CRC, 68, 0xffffffff },
9427 { NIG_REG_LLH0_DEST_MAC_0_0, 160, 0xffffffff },
9428 { NIG_REG_LLH0_DEST_IP_0_1, 160, 0xffffffff },
9429 { NIG_REG_LLH0_IPV4_IPV6_0, 160, 0x00000001 },
9430 { NIG_REG_LLH0_DEST_UDP_0, 160, 0x0000ffff },
9431 /* 30 */ { NIG_REG_LLH0_DEST_TCP_0, 160, 0x0000ffff },
9432 { NIG_REG_LLH0_VLAN_ID_0, 160, 0x00000fff },
9433 { NIG_REG_XGXS_SERDES0_MODE_SEL, 4, 0x00000001 },
9434 { NIG_REG_LED_CONTROL_OVERRIDE_TRAFFIC_P0, 4, 0x00000001 },
9435 { NIG_REG_STATUS_INTERRUPT_PORT0, 4, 0x07ffffff },
9436 { NIG_REG_XGXS0_CTRL_EXTREMOTEMDIOST, 24, 0x00000001 },
9437 { NIG_REG_SERDES0_CTRL_PHY_ADDR, 16, 0x0000001f },
9439 { 0xffffffff, 0, 0x00000000 }
9442 if (!netif_running(bp->dev))
9445 /* Repeat the test twice:
9446 First by writing 0x00000000, second by writing 0xffffffff */
9447 for (idx = 0; idx < 2; idx++) {
9454 wr_val = 0xffffffff;
9458 for (i = 0; reg_tbl[i].offset0 != 0xffffffff; i++) {
9459 u32 offset, mask, save_val, val;
9461 offset = reg_tbl[i].offset0 + port*reg_tbl[i].offset1;
9462 mask = reg_tbl[i].mask;
9464 save_val = REG_RD(bp, offset);
9466 REG_WR(bp, offset, wr_val);
9467 val = REG_RD(bp, offset);
9469 /* Restore the original register's value */
9470 REG_WR(bp, offset, save_val);
9472 /* verify that value is as expected value */
9473 if ((val & mask) != (wr_val & mask))
9484 static int bnx2x_test_memory(struct bnx2x *bp)
9486 int i, j, rc = -ENODEV;
9488 static const struct {
9492 { CCM_REG_XX_DESCR_TABLE, CCM_REG_XX_DESCR_TABLE_SIZE },
9493 { CFC_REG_ACTIVITY_COUNTER, CFC_REG_ACTIVITY_COUNTER_SIZE },
9494 { CFC_REG_LINK_LIST, CFC_REG_LINK_LIST_SIZE },
9495 { DMAE_REG_CMD_MEM, DMAE_REG_CMD_MEM_SIZE },
9496 { TCM_REG_XX_DESCR_TABLE, TCM_REG_XX_DESCR_TABLE_SIZE },
9497 { UCM_REG_XX_DESCR_TABLE, UCM_REG_XX_DESCR_TABLE_SIZE },
9498 { XCM_REG_XX_DESCR_TABLE, XCM_REG_XX_DESCR_TABLE_SIZE },
9502 static const struct {
9508 { "CCM_PRTY_STS", CCM_REG_CCM_PRTY_STS, 0x3ffc0, 0 },
9509 { "CFC_PRTY_STS", CFC_REG_CFC_PRTY_STS, 0x2, 0x2 },
9510 { "DMAE_PRTY_STS", DMAE_REG_DMAE_PRTY_STS, 0, 0 },
9511 { "TCM_PRTY_STS", TCM_REG_TCM_PRTY_STS, 0x3ffc0, 0 },
9512 { "UCM_PRTY_STS", UCM_REG_UCM_PRTY_STS, 0x3ffc0, 0 },
9513 { "XCM_PRTY_STS", XCM_REG_XCM_PRTY_STS, 0x3ffc1, 0 },
9515 { NULL, 0xffffffff, 0, 0 }
9518 if (!netif_running(bp->dev))
9521 /* Go through all the memories */
9522 for (i = 0; mem_tbl[i].offset != 0xffffffff; i++)
9523 for (j = 0; j < mem_tbl[i].size; j++)
9524 REG_RD(bp, mem_tbl[i].offset + j*4);
9526 /* Check the parity status */
9527 for (i = 0; prty_tbl[i].offset != 0xffffffff; i++) {
9528 val = REG_RD(bp, prty_tbl[i].offset);
9529 if ((CHIP_IS_E1(bp) && (val & ~(prty_tbl[i].e1_mask))) ||
9530 (CHIP_IS_E1H(bp) && (val & ~(prty_tbl[i].e1h_mask)))) {
9532 "%s is 0x%x\n", prty_tbl[i].name, val);
9543 static void bnx2x_wait_for_link(struct bnx2x *bp, u8 link_up)
9548 while (bnx2x_link_test(bp) && cnt--)
9552 static int bnx2x_run_loopback(struct bnx2x *bp, int loopback_mode, u8 link_up)
9554 unsigned int pkt_size, num_pkts, i;
9555 struct sk_buff *skb;
9556 unsigned char *packet;
9557 struct bnx2x_fastpath *fp = &bp->fp[0];
9558 u16 tx_start_idx, tx_idx;
9559 u16 rx_start_idx, rx_idx;
9561 struct sw_tx_bd *tx_buf;
9562 struct eth_tx_bd *tx_bd;
9564 union eth_rx_cqe *cqe;
9566 struct sw_rx_bd *rx_buf;
9570 /* check the loopback mode */
9571 switch (loopback_mode) {
9572 case BNX2X_PHY_LOOPBACK:
9573 if (bp->link_params.loopback_mode != LOOPBACK_XGXS_10)
9576 case BNX2X_MAC_LOOPBACK:
9577 bp->link_params.loopback_mode = LOOPBACK_BMAC;
9578 bnx2x_phy_init(&bp->link_params, &bp->link_vars);
9584 /* prepare the loopback packet */
9585 pkt_size = (((bp->dev->mtu < ETH_MAX_PACKET_SIZE) ?
9586 bp->dev->mtu : ETH_MAX_PACKET_SIZE) + ETH_HLEN);
9587 skb = netdev_alloc_skb(bp->dev, bp->rx_buf_size);
9590 goto test_loopback_exit;
9592 packet = skb_put(skb, pkt_size);
9593 memcpy(packet, bp->dev->dev_addr, ETH_ALEN);
9594 memset(packet + ETH_ALEN, 0, (ETH_HLEN - ETH_ALEN));
9595 for (i = ETH_HLEN; i < pkt_size; i++)
9596 packet[i] = (unsigned char) (i & 0xff);
9598 /* send the loopback packet */
9600 tx_start_idx = le16_to_cpu(*fp->tx_cons_sb);
9601 rx_start_idx = le16_to_cpu(*fp->rx_cons_sb);
9603 pkt_prod = fp->tx_pkt_prod++;
9604 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
9605 tx_buf->first_bd = fp->tx_bd_prod;
9608 tx_bd = &fp->tx_desc_ring[TX_BD(fp->tx_bd_prod)];
9609 mapping = pci_map_single(bp->pdev, skb->data,
9610 skb_headlen(skb), PCI_DMA_TODEVICE);
9611 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
9612 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
9613 tx_bd->nbd = cpu_to_le16(1);
9614 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
9615 tx_bd->vlan = cpu_to_le16(pkt_prod);
9616 tx_bd->bd_flags.as_bitfield = (ETH_TX_BD_FLAGS_START_BD |
9617 ETH_TX_BD_FLAGS_END_BD);
9618 tx_bd->general_data = ((UNICAST_ADDRESS <<
9619 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT) | 1);
9623 le16_add_cpu(&fp->hw_tx_prods->bds_prod, 1);
9624 mb(); /* FW restriction: must not reorder writing nbd and packets */
9625 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
9626 DOORBELL(bp, fp->index, 0);
9632 bp->dev->trans_start = jiffies;
9636 tx_idx = le16_to_cpu(*fp->tx_cons_sb);
9637 if (tx_idx != tx_start_idx + num_pkts)
9638 goto test_loopback_exit;
9640 rx_idx = le16_to_cpu(*fp->rx_cons_sb);
9641 if (rx_idx != rx_start_idx + num_pkts)
9642 goto test_loopback_exit;
9644 cqe = &fp->rx_comp_ring[RCQ_BD(fp->rx_comp_cons)];
9645 cqe_fp_flags = cqe->fast_path_cqe.type_error_flags;
9646 if (CQE_TYPE(cqe_fp_flags) || (cqe_fp_flags & ETH_RX_ERROR_FALGS))
9647 goto test_loopback_rx_exit;
9649 len = le16_to_cpu(cqe->fast_path_cqe.pkt_len);
9650 if (len != pkt_size)
9651 goto test_loopback_rx_exit;
9653 rx_buf = &fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)];
9655 skb_reserve(skb, cqe->fast_path_cqe.placement_offset);
9656 for (i = ETH_HLEN; i < pkt_size; i++)
9657 if (*(skb->data + i) != (unsigned char) (i & 0xff))
9658 goto test_loopback_rx_exit;
9662 test_loopback_rx_exit:
9664 fp->rx_bd_cons = NEXT_RX_IDX(fp->rx_bd_cons);
9665 fp->rx_bd_prod = NEXT_RX_IDX(fp->rx_bd_prod);
9666 fp->rx_comp_cons = NEXT_RCQ_IDX(fp->rx_comp_cons);
9667 fp->rx_comp_prod = NEXT_RCQ_IDX(fp->rx_comp_prod);
9669 /* Update producers */
9670 bnx2x_update_rx_prod(bp, fp, fp->rx_bd_prod, fp->rx_comp_prod,
9674 bp->link_params.loopback_mode = LOOPBACK_NONE;
9679 static int bnx2x_test_loopback(struct bnx2x *bp, u8 link_up)
9683 if (!netif_running(bp->dev))
9684 return BNX2X_LOOPBACK_FAILED;
9686 bnx2x_netif_stop(bp, 1);
9687 bnx2x_acquire_phy_lock(bp);
9689 res = bnx2x_run_loopback(bp, BNX2X_PHY_LOOPBACK, link_up);
9691 DP(NETIF_MSG_PROBE, " PHY loopback failed (res %d)\n", res);
9692 rc |= BNX2X_PHY_LOOPBACK_FAILED;
9695 res = bnx2x_run_loopback(bp, BNX2X_MAC_LOOPBACK, link_up);
9697 DP(NETIF_MSG_PROBE, " MAC loopback failed (res %d)\n", res);
9698 rc |= BNX2X_MAC_LOOPBACK_FAILED;
9701 bnx2x_release_phy_lock(bp);
9702 bnx2x_netif_start(bp);
9707 #define CRC32_RESIDUAL 0xdebb20e3
9709 static int bnx2x_test_nvram(struct bnx2x *bp)
9711 static const struct {
9715 { 0, 0x14 }, /* bootstrap */
9716 { 0x14, 0xec }, /* dir */
9717 { 0x100, 0x350 }, /* manuf_info */
9718 { 0x450, 0xf0 }, /* feature_info */
9719 { 0x640, 0x64 }, /* upgrade_key_info */
9721 { 0x708, 0x70 }, /* manuf_key_info */
9725 __be32 buf[0x350 / 4];
9726 u8 *data = (u8 *)buf;
9730 rc = bnx2x_nvram_read(bp, 0, data, 4);
9732 DP(NETIF_MSG_PROBE, "magic value read (rc %d)\n", rc);
9733 goto test_nvram_exit;
9736 magic = be32_to_cpu(buf[0]);
9737 if (magic != 0x669955aa) {
9738 DP(NETIF_MSG_PROBE, "magic value (0x%08x)\n", magic);
9740 goto test_nvram_exit;
9743 for (i = 0; nvram_tbl[i].size; i++) {
9745 rc = bnx2x_nvram_read(bp, nvram_tbl[i].offset, data,
9749 "nvram_tbl[%d] read data (rc %d)\n", i, rc);
9750 goto test_nvram_exit;
9753 csum = ether_crc_le(nvram_tbl[i].size, data);
9754 if (csum != CRC32_RESIDUAL) {
9756 "nvram_tbl[%d] csum value (0x%08x)\n", i, csum);
9758 goto test_nvram_exit;
9766 static int bnx2x_test_intr(struct bnx2x *bp)
9768 struct mac_configuration_cmd *config = bnx2x_sp(bp, mac_config);
9771 if (!netif_running(bp->dev))
9774 config->hdr.length = 0;
9776 config->hdr.offset = (BP_PORT(bp) ? 32 : 0);
9778 config->hdr.offset = BP_FUNC(bp);
9779 config->hdr.client_id = bp->fp->cl_id;
9780 config->hdr.reserved1 = 0;
9782 rc = bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
9783 U64_HI(bnx2x_sp_mapping(bp, mac_config)),
9784 U64_LO(bnx2x_sp_mapping(bp, mac_config)), 0);
9786 bp->set_mac_pending++;
9787 for (i = 0; i < 10; i++) {
9788 if (!bp->set_mac_pending)
9790 msleep_interruptible(10);
9799 static void bnx2x_self_test(struct net_device *dev,
9800 struct ethtool_test *etest, u64 *buf)
9802 struct bnx2x *bp = netdev_priv(dev);
9804 memset(buf, 0, sizeof(u64) * BNX2X_NUM_TESTS);
9806 if (!netif_running(dev))
9809 /* offline tests are not supported in MF mode */
9811 etest->flags &= ~ETH_TEST_FL_OFFLINE;
9813 if (etest->flags & ETH_TEST_FL_OFFLINE) {
9814 int port = BP_PORT(bp);
9818 /* save current value of input enable for TX port IF */
9819 val = REG_RD(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4);
9820 /* disable input for TX port IF */
9821 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, 0);
9823 link_up = bp->link_vars.link_up;
9824 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9825 bnx2x_nic_load(bp, LOAD_DIAG);
9826 /* wait until link state is restored */
9827 bnx2x_wait_for_link(bp, link_up);
9829 if (bnx2x_test_registers(bp) != 0) {
9831 etest->flags |= ETH_TEST_FL_FAILED;
9833 if (bnx2x_test_memory(bp) != 0) {
9835 etest->flags |= ETH_TEST_FL_FAILED;
9837 buf[2] = bnx2x_test_loopback(bp, link_up);
9839 etest->flags |= ETH_TEST_FL_FAILED;
9841 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
9843 /* restore input for TX port IF */
9844 REG_WR(bp, NIG_REG_EGRESS_UMP0_IN_EN + port*4, val);
9846 bnx2x_nic_load(bp, LOAD_NORMAL);
9847 /* wait until link state is restored */
9848 bnx2x_wait_for_link(bp, link_up);
9850 if (bnx2x_test_nvram(bp) != 0) {
9852 etest->flags |= ETH_TEST_FL_FAILED;
9854 if (bnx2x_test_intr(bp) != 0) {
9856 etest->flags |= ETH_TEST_FL_FAILED;
9859 if (bnx2x_link_test(bp) != 0) {
9861 etest->flags |= ETH_TEST_FL_FAILED;
9864 #ifdef BNX2X_EXTRA_DEBUG
9865 bnx2x_panic_dump(bp);
9869 static const struct {
9872 u8 string[ETH_GSTRING_LEN];
9873 } bnx2x_q_stats_arr[BNX2X_NUM_Q_STATS] = {
9874 /* 1 */ { Q_STATS_OFFSET32(total_bytes_received_hi), 8, "[%d]: rx_bytes" },
9875 { Q_STATS_OFFSET32(error_bytes_received_hi),
9876 8, "[%d]: rx_error_bytes" },
9877 { Q_STATS_OFFSET32(total_unicast_packets_received_hi),
9878 8, "[%d]: rx_ucast_packets" },
9879 { Q_STATS_OFFSET32(total_multicast_packets_received_hi),
9880 8, "[%d]: rx_mcast_packets" },
9881 { Q_STATS_OFFSET32(total_broadcast_packets_received_hi),
9882 8, "[%d]: rx_bcast_packets" },
9883 { Q_STATS_OFFSET32(no_buff_discard_hi), 8, "[%d]: rx_discards" },
9884 { Q_STATS_OFFSET32(rx_err_discard_pkt),
9885 4, "[%d]: rx_phy_ip_err_discards"},
9886 { Q_STATS_OFFSET32(rx_skb_alloc_failed),
9887 4, "[%d]: rx_skb_alloc_discard" },
9888 { Q_STATS_OFFSET32(hw_csum_err), 4, "[%d]: rx_csum_offload_errors" },
9890 /* 10 */{ Q_STATS_OFFSET32(total_bytes_transmitted_hi), 8, "[%d]: tx_bytes" },
9891 { Q_STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9892 8, "[%d]: tx_packets" }
9895 static const struct {
9899 #define STATS_FLAGS_PORT 1
9900 #define STATS_FLAGS_FUNC 2
9901 #define STATS_FLAGS_BOTH (STATS_FLAGS_FUNC | STATS_FLAGS_PORT)
9902 u8 string[ETH_GSTRING_LEN];
9903 } bnx2x_stats_arr[BNX2X_NUM_STATS] = {
9904 /* 1 */ { STATS_OFFSET32(total_bytes_received_hi),
9905 8, STATS_FLAGS_BOTH, "rx_bytes" },
9906 { STATS_OFFSET32(error_bytes_received_hi),
9907 8, STATS_FLAGS_BOTH, "rx_error_bytes" },
9908 { STATS_OFFSET32(total_unicast_packets_received_hi),
9909 8, STATS_FLAGS_BOTH, "rx_ucast_packets" },
9910 { STATS_OFFSET32(total_multicast_packets_received_hi),
9911 8, STATS_FLAGS_BOTH, "rx_mcast_packets" },
9912 { STATS_OFFSET32(total_broadcast_packets_received_hi),
9913 8, STATS_FLAGS_BOTH, "rx_bcast_packets" },
9914 { STATS_OFFSET32(rx_stat_dot3statsfcserrors_hi),
9915 8, STATS_FLAGS_PORT, "rx_crc_errors" },
9916 { STATS_OFFSET32(rx_stat_dot3statsalignmenterrors_hi),
9917 8, STATS_FLAGS_PORT, "rx_align_errors" },
9918 { STATS_OFFSET32(rx_stat_etherstatsundersizepkts_hi),
9919 8, STATS_FLAGS_PORT, "rx_undersize_packets" },
9920 { STATS_OFFSET32(etherstatsoverrsizepkts_hi),
9921 8, STATS_FLAGS_PORT, "rx_oversize_packets" },
9922 /* 10 */{ STATS_OFFSET32(rx_stat_etherstatsfragments_hi),
9923 8, STATS_FLAGS_PORT, "rx_fragments" },
9924 { STATS_OFFSET32(rx_stat_etherstatsjabbers_hi),
9925 8, STATS_FLAGS_PORT, "rx_jabbers" },
9926 { STATS_OFFSET32(no_buff_discard_hi),
9927 8, STATS_FLAGS_BOTH, "rx_discards" },
9928 { STATS_OFFSET32(mac_filter_discard),
9929 4, STATS_FLAGS_PORT, "rx_filtered_packets" },
9930 { STATS_OFFSET32(xxoverflow_discard),
9931 4, STATS_FLAGS_PORT, "rx_fw_discards" },
9932 { STATS_OFFSET32(brb_drop_hi),
9933 8, STATS_FLAGS_PORT, "rx_brb_discard" },
9934 { STATS_OFFSET32(brb_truncate_hi),
9935 8, STATS_FLAGS_PORT, "rx_brb_truncate" },
9936 { STATS_OFFSET32(pause_frames_received_hi),
9937 8, STATS_FLAGS_PORT, "rx_pause_frames" },
9938 { STATS_OFFSET32(rx_stat_maccontrolframesreceived_hi),
9939 8, STATS_FLAGS_PORT, "rx_mac_ctrl_frames" },
9940 { STATS_OFFSET32(nig_timer_max),
9941 4, STATS_FLAGS_PORT, "rx_constant_pause_events" },
9942 /* 20 */{ STATS_OFFSET32(rx_err_discard_pkt),
9943 4, STATS_FLAGS_BOTH, "rx_phy_ip_err_discards"},
9944 { STATS_OFFSET32(rx_skb_alloc_failed),
9945 4, STATS_FLAGS_BOTH, "rx_skb_alloc_discard" },
9946 { STATS_OFFSET32(hw_csum_err),
9947 4, STATS_FLAGS_BOTH, "rx_csum_offload_errors" },
9949 { STATS_OFFSET32(total_bytes_transmitted_hi),
9950 8, STATS_FLAGS_BOTH, "tx_bytes" },
9951 { STATS_OFFSET32(tx_stat_ifhcoutbadoctets_hi),
9952 8, STATS_FLAGS_PORT, "tx_error_bytes" },
9953 { STATS_OFFSET32(total_unicast_packets_transmitted_hi),
9954 8, STATS_FLAGS_BOTH, "tx_packets" },
9955 { STATS_OFFSET32(tx_stat_dot3statsinternalmactransmiterrors_hi),
9956 8, STATS_FLAGS_PORT, "tx_mac_errors" },
9957 { STATS_OFFSET32(rx_stat_dot3statscarriersenseerrors_hi),
9958 8, STATS_FLAGS_PORT, "tx_carrier_errors" },
9959 { STATS_OFFSET32(tx_stat_dot3statssinglecollisionframes_hi),
9960 8, STATS_FLAGS_PORT, "tx_single_collisions" },
9961 { STATS_OFFSET32(tx_stat_dot3statsmultiplecollisionframes_hi),
9962 8, STATS_FLAGS_PORT, "tx_multi_collisions" },
9963 /* 30 */{ STATS_OFFSET32(tx_stat_dot3statsdeferredtransmissions_hi),
9964 8, STATS_FLAGS_PORT, "tx_deferred" },
9965 { STATS_OFFSET32(tx_stat_dot3statsexcessivecollisions_hi),
9966 8, STATS_FLAGS_PORT, "tx_excess_collisions" },
9967 { STATS_OFFSET32(tx_stat_dot3statslatecollisions_hi),
9968 8, STATS_FLAGS_PORT, "tx_late_collisions" },
9969 { STATS_OFFSET32(tx_stat_etherstatscollisions_hi),
9970 8, STATS_FLAGS_PORT, "tx_total_collisions" },
9971 { STATS_OFFSET32(tx_stat_etherstatspkts64octets_hi),
9972 8, STATS_FLAGS_PORT, "tx_64_byte_packets" },
9973 { STATS_OFFSET32(tx_stat_etherstatspkts65octetsto127octets_hi),
9974 8, STATS_FLAGS_PORT, "tx_65_to_127_byte_packets" },
9975 { STATS_OFFSET32(tx_stat_etherstatspkts128octetsto255octets_hi),
9976 8, STATS_FLAGS_PORT, "tx_128_to_255_byte_packets" },
9977 { STATS_OFFSET32(tx_stat_etherstatspkts256octetsto511octets_hi),
9978 8, STATS_FLAGS_PORT, "tx_256_to_511_byte_packets" },
9979 { STATS_OFFSET32(tx_stat_etherstatspkts512octetsto1023octets_hi),
9980 8, STATS_FLAGS_PORT, "tx_512_to_1023_byte_packets" },
9981 { STATS_OFFSET32(etherstatspkts1024octetsto1522octets_hi),
9982 8, STATS_FLAGS_PORT, "tx_1024_to_1522_byte_packets" },
9983 /* 40 */{ STATS_OFFSET32(etherstatspktsover1522octets_hi),
9984 8, STATS_FLAGS_PORT, "tx_1523_to_9022_byte_packets" },
9985 { STATS_OFFSET32(pause_frames_sent_hi),
9986 8, STATS_FLAGS_PORT, "tx_pause_frames" }
9989 #define IS_PORT_STAT(i) \
9990 ((bnx2x_stats_arr[i].flags & STATS_FLAGS_BOTH) == STATS_FLAGS_PORT)
9991 #define IS_FUNC_STAT(i) (bnx2x_stats_arr[i].flags & STATS_FLAGS_FUNC)
9992 #define IS_E1HMF_MODE_STAT(bp) \
9993 (IS_E1HMF(bp) && !(bp->msglevel & BNX2X_MSG_STATS))
9995 static void bnx2x_get_strings(struct net_device *dev, u32 stringset, u8 *buf)
9997 struct bnx2x *bp = netdev_priv(dev);
10000 switch (stringset) {
10002 if (is_multi(bp)) {
10004 for_each_queue(bp, i) {
10005 for (j = 0; j < BNX2X_NUM_Q_STATS; j++)
10006 sprintf(buf + (k + j)*ETH_GSTRING_LEN,
10007 bnx2x_q_stats_arr[j].string, i);
10008 k += BNX2X_NUM_Q_STATS;
10010 if (IS_E1HMF_MODE_STAT(bp))
10012 for (j = 0; j < BNX2X_NUM_STATS; j++)
10013 strcpy(buf + (k + j)*ETH_GSTRING_LEN,
10014 bnx2x_stats_arr[j].string);
10016 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10017 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10019 strcpy(buf + j*ETH_GSTRING_LEN,
10020 bnx2x_stats_arr[i].string);
10027 memcpy(buf, bnx2x_tests_str_arr, sizeof(bnx2x_tests_str_arr));
10032 static int bnx2x_get_stats_count(struct net_device *dev)
10034 struct bnx2x *bp = netdev_priv(dev);
10037 if (is_multi(bp)) {
10038 num_stats = BNX2X_NUM_Q_STATS * BNX2X_NUM_QUEUES(bp);
10039 if (!IS_E1HMF_MODE_STAT(bp))
10040 num_stats += BNX2X_NUM_STATS;
10042 if (IS_E1HMF_MODE_STAT(bp)) {
10044 for (i = 0; i < BNX2X_NUM_STATS; i++)
10045 if (IS_FUNC_STAT(i))
10048 num_stats = BNX2X_NUM_STATS;
10054 static void bnx2x_get_ethtool_stats(struct net_device *dev,
10055 struct ethtool_stats *stats, u64 *buf)
10057 struct bnx2x *bp = netdev_priv(dev);
10058 u32 *hw_stats, *offset;
10061 if (is_multi(bp)) {
10063 for_each_queue(bp, i) {
10064 hw_stats = (u32 *)&bp->fp[i].eth_q_stats;
10065 for (j = 0; j < BNX2X_NUM_Q_STATS; j++) {
10066 if (bnx2x_q_stats_arr[j].size == 0) {
10067 /* skip this counter */
10071 offset = (hw_stats +
10072 bnx2x_q_stats_arr[j].offset);
10073 if (bnx2x_q_stats_arr[j].size == 4) {
10074 /* 4-byte counter */
10075 buf[k + j] = (u64) *offset;
10078 /* 8-byte counter */
10079 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10081 k += BNX2X_NUM_Q_STATS;
10083 if (IS_E1HMF_MODE_STAT(bp))
10085 hw_stats = (u32 *)&bp->eth_stats;
10086 for (j = 0; j < BNX2X_NUM_STATS; j++) {
10087 if (bnx2x_stats_arr[j].size == 0) {
10088 /* skip this counter */
10092 offset = (hw_stats + bnx2x_stats_arr[j].offset);
10093 if (bnx2x_stats_arr[j].size == 4) {
10094 /* 4-byte counter */
10095 buf[k + j] = (u64) *offset;
10098 /* 8-byte counter */
10099 buf[k + j] = HILO_U64(*offset, *(offset + 1));
10102 hw_stats = (u32 *)&bp->eth_stats;
10103 for (i = 0, j = 0; i < BNX2X_NUM_STATS; i++) {
10104 if (IS_E1HMF_MODE_STAT(bp) && IS_PORT_STAT(i))
10106 if (bnx2x_stats_arr[i].size == 0) {
10107 /* skip this counter */
10112 offset = (hw_stats + bnx2x_stats_arr[i].offset);
10113 if (bnx2x_stats_arr[i].size == 4) {
10114 /* 4-byte counter */
10115 buf[j] = (u64) *offset;
10119 /* 8-byte counter */
10120 buf[j] = HILO_U64(*offset, *(offset + 1));
10126 static int bnx2x_phys_id(struct net_device *dev, u32 data)
10128 struct bnx2x *bp = netdev_priv(dev);
10129 int port = BP_PORT(bp);
10132 if (!netif_running(dev))
10141 for (i = 0; i < (data * 2); i++) {
10143 bnx2x_set_led(bp, port, LED_MODE_OPER, SPEED_1000,
10144 bp->link_params.hw_led_mode,
10145 bp->link_params.chip_id);
10147 bnx2x_set_led(bp, port, LED_MODE_OFF, 0,
10148 bp->link_params.hw_led_mode,
10149 bp->link_params.chip_id);
10151 msleep_interruptible(500);
10152 if (signal_pending(current))
10156 if (bp->link_vars.link_up)
10157 bnx2x_set_led(bp, port, LED_MODE_OPER,
10158 bp->link_vars.line_speed,
10159 bp->link_params.hw_led_mode,
10160 bp->link_params.chip_id);
10165 static struct ethtool_ops bnx2x_ethtool_ops = {
10166 .get_settings = bnx2x_get_settings,
10167 .set_settings = bnx2x_set_settings,
10168 .get_drvinfo = bnx2x_get_drvinfo,
10169 .get_regs_len = bnx2x_get_regs_len,
10170 .get_regs = bnx2x_get_regs,
10171 .get_wol = bnx2x_get_wol,
10172 .set_wol = bnx2x_set_wol,
10173 .get_msglevel = bnx2x_get_msglevel,
10174 .set_msglevel = bnx2x_set_msglevel,
10175 .nway_reset = bnx2x_nway_reset,
10176 .get_link = bnx2x_get_link,
10177 .get_eeprom_len = bnx2x_get_eeprom_len,
10178 .get_eeprom = bnx2x_get_eeprom,
10179 .set_eeprom = bnx2x_set_eeprom,
10180 .get_coalesce = bnx2x_get_coalesce,
10181 .set_coalesce = bnx2x_set_coalesce,
10182 .get_ringparam = bnx2x_get_ringparam,
10183 .set_ringparam = bnx2x_set_ringparam,
10184 .get_pauseparam = bnx2x_get_pauseparam,
10185 .set_pauseparam = bnx2x_set_pauseparam,
10186 .get_rx_csum = bnx2x_get_rx_csum,
10187 .set_rx_csum = bnx2x_set_rx_csum,
10188 .get_tx_csum = ethtool_op_get_tx_csum,
10189 .set_tx_csum = ethtool_op_set_tx_hw_csum,
10190 .set_flags = bnx2x_set_flags,
10191 .get_flags = ethtool_op_get_flags,
10192 .get_sg = ethtool_op_get_sg,
10193 .set_sg = ethtool_op_set_sg,
10194 .get_tso = ethtool_op_get_tso,
10195 .set_tso = bnx2x_set_tso,
10196 .self_test_count = bnx2x_self_test_count,
10197 .self_test = bnx2x_self_test,
10198 .get_strings = bnx2x_get_strings,
10199 .phys_id = bnx2x_phys_id,
10200 .get_stats_count = bnx2x_get_stats_count,
10201 .get_ethtool_stats = bnx2x_get_ethtool_stats,
10204 /* end of ethtool_ops */
10206 /****************************************************************************
10207 * General service functions
10208 ****************************************************************************/
10210 static int bnx2x_set_power_state(struct bnx2x *bp, pci_power_t state)
10214 pci_read_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL, &pmcsr);
10218 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10219 ((pmcsr & ~PCI_PM_CTRL_STATE_MASK) |
10220 PCI_PM_CTRL_PME_STATUS));
10222 if (pmcsr & PCI_PM_CTRL_STATE_MASK)
10223 /* delay required during transition out of D3hot */
10228 pmcsr &= ~PCI_PM_CTRL_STATE_MASK;
10232 pmcsr |= PCI_PM_CTRL_PME_ENABLE;
10234 pci_write_config_word(bp->pdev, bp->pm_cap + PCI_PM_CTRL,
10237 /* No more memory access after this point until
10238 * device is brought back to D0.
10248 static inline int bnx2x_has_rx_work(struct bnx2x_fastpath *fp)
10252 /* Tell compiler that status block fields can change */
10254 rx_cons_sb = le16_to_cpu(*fp->rx_cons_sb);
10255 if ((rx_cons_sb & MAX_RCQ_DESC_CNT) == MAX_RCQ_DESC_CNT)
10257 return (fp->rx_comp_cons != rx_cons_sb);
10261 * net_device service functions
10264 static int bnx2x_poll(struct napi_struct *napi, int budget)
10266 struct bnx2x_fastpath *fp = container_of(napi, struct bnx2x_fastpath,
10268 struct bnx2x *bp = fp->bp;
10271 #ifdef BNX2X_STOP_ON_ERROR
10272 if (unlikely(bp->panic))
10276 prefetch(fp->tx_buf_ring[TX_BD(fp->tx_pkt_cons)].skb);
10277 prefetch(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb);
10278 prefetch((char *)(fp->rx_buf_ring[RX_BD(fp->rx_bd_cons)].skb) + 256);
10280 bnx2x_update_fpsb_idx(fp);
10282 if (bnx2x_has_tx_work(fp))
10285 if (bnx2x_has_rx_work(fp)) {
10286 work_done = bnx2x_rx_int(fp, budget);
10288 /* must not complete if we consumed full budget */
10289 if (work_done >= budget)
10293 /* BNX2X_HAS_WORK() reads the status block, thus we need to
10294 * ensure that status block indices have been actually read
10295 * (bnx2x_update_fpsb_idx) prior to this check (BNX2X_HAS_WORK)
10296 * so that we won't write the "newer" value of the status block to IGU
10297 * (if there was a DMA right after BNX2X_HAS_WORK and
10298 * if there is no rmb, the memory reading (bnx2x_update_fpsb_idx)
10299 * may be postponed to right before bnx2x_ack_sb). In this case
10300 * there will never be another interrupt until there is another update
10301 * of the status block, while there is still unhandled work.
10305 if (!BNX2X_HAS_WORK(fp)) {
10306 #ifdef BNX2X_STOP_ON_ERROR
10309 napi_complete(napi);
10311 bnx2x_ack_sb(bp, fp->sb_id, USTORM_ID,
10312 le16_to_cpu(fp->fp_u_idx), IGU_INT_NOP, 1);
10313 bnx2x_ack_sb(bp, fp->sb_id, CSTORM_ID,
10314 le16_to_cpu(fp->fp_c_idx), IGU_INT_ENABLE, 1);
10322 /* we split the first BD into headers and data BDs
10323 * to ease the pain of our fellow microcode engineers
10324 * we use one mapping for both BDs
10325 * So far this has only been observed to happen
10326 * in Other Operating Systems(TM)
10328 static noinline u16 bnx2x_tx_split(struct bnx2x *bp,
10329 struct bnx2x_fastpath *fp,
10330 struct eth_tx_bd **tx_bd, u16 hlen,
10331 u16 bd_prod, int nbd)
10333 struct eth_tx_bd *h_tx_bd = *tx_bd;
10334 struct eth_tx_bd *d_tx_bd;
10335 dma_addr_t mapping;
10336 int old_len = le16_to_cpu(h_tx_bd->nbytes);
10338 /* first fix first BD */
10339 h_tx_bd->nbd = cpu_to_le16(nbd);
10340 h_tx_bd->nbytes = cpu_to_le16(hlen);
10342 DP(NETIF_MSG_TX_QUEUED, "TSO split header size is %d "
10343 "(%x:%x) nbd %d\n", h_tx_bd->nbytes, h_tx_bd->addr_hi,
10344 h_tx_bd->addr_lo, h_tx_bd->nbd);
10346 /* now get a new data BD
10347 * (after the pbd) and fill it */
10348 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10349 d_tx_bd = &fp->tx_desc_ring[bd_prod];
10351 mapping = HILO_U64(le32_to_cpu(h_tx_bd->addr_hi),
10352 le32_to_cpu(h_tx_bd->addr_lo)) + hlen;
10354 d_tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10355 d_tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10356 d_tx_bd->nbytes = cpu_to_le16(old_len - hlen);
10358 /* this marks the BD as one that has no individual mapping
10359 * the FW ignores this flag in a BD not marked start
10361 d_tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_SW_LSO;
10362 DP(NETIF_MSG_TX_QUEUED,
10363 "TSO split data size is %d (%x:%x)\n",
10364 d_tx_bd->nbytes, d_tx_bd->addr_hi, d_tx_bd->addr_lo);
10366 /* update tx_bd for marking the last BD flag */
10372 static inline u16 bnx2x_csum_fix(unsigned char *t_header, u16 csum, s8 fix)
10375 csum = (u16) ~csum_fold(csum_sub(csum,
10376 csum_partial(t_header - fix, fix, 0)));
10379 csum = (u16) ~csum_fold(csum_add(csum,
10380 csum_partial(t_header, -fix, 0)));
10382 return swab16(csum);
10385 static inline u32 bnx2x_xmit_type(struct bnx2x *bp, struct sk_buff *skb)
10389 if (skb->ip_summed != CHECKSUM_PARTIAL)
10393 if (skb->protocol == htons(ETH_P_IPV6)) {
10395 if (ipv6_hdr(skb)->nexthdr == IPPROTO_TCP)
10396 rc |= XMIT_CSUM_TCP;
10400 if (ip_hdr(skb)->protocol == IPPROTO_TCP)
10401 rc |= XMIT_CSUM_TCP;
10405 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
10408 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
10414 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10415 /* check if packet requires linearization (packet is too fragmented)
10416 no need to check fragmentation if page size > 8K (there will be no
10417 violation to FW restrictions) */
10418 static int bnx2x_pkt_req_lin(struct bnx2x *bp, struct sk_buff *skb,
10423 int first_bd_sz = 0;
10425 /* 3 = 1 (for linear data BD) + 2 (for PBD and last BD) */
10426 if (skb_shinfo(skb)->nr_frags >= (MAX_FETCH_BD - 3)) {
10428 if (xmit_type & XMIT_GSO) {
10429 unsigned short lso_mss = skb_shinfo(skb)->gso_size;
10430 /* Check if LSO packet needs to be copied:
10431 3 = 1 (for headers BD) + 2 (for PBD and last BD) */
10432 int wnd_size = MAX_FETCH_BD - 3;
10433 /* Number of windows to check */
10434 int num_wnds = skb_shinfo(skb)->nr_frags - wnd_size;
10439 /* Headers length */
10440 hlen = (int)(skb_transport_header(skb) - skb->data) +
10443 /* Amount of data (w/o headers) on linear part of SKB*/
10444 first_bd_sz = skb_headlen(skb) - hlen;
10446 wnd_sum = first_bd_sz;
10448 /* Calculate the first sum - it's special */
10449 for (frag_idx = 0; frag_idx < wnd_size - 1; frag_idx++)
10451 skb_shinfo(skb)->frags[frag_idx].size;
10453 /* If there was data on linear skb data - check it */
10454 if (first_bd_sz > 0) {
10455 if (unlikely(wnd_sum < lso_mss)) {
10460 wnd_sum -= first_bd_sz;
10463 /* Others are easier: run through the frag list and
10464 check all windows */
10465 for (wnd_idx = 0; wnd_idx <= num_wnds; wnd_idx++) {
10467 skb_shinfo(skb)->frags[wnd_idx + wnd_size - 1].size;
10469 if (unlikely(wnd_sum < lso_mss)) {
10474 skb_shinfo(skb)->frags[wnd_idx].size;
10477 /* in non-LSO too fragmented packet should always
10484 if (unlikely(to_copy))
10485 DP(NETIF_MSG_TX_QUEUED,
10486 "Linearization IS REQUIRED for %s packet. "
10487 "num_frags %d hlen %d first_bd_sz %d\n",
10488 (xmit_type & XMIT_GSO) ? "LSO" : "non-LSO",
10489 skb_shinfo(skb)->nr_frags, hlen, first_bd_sz);
10495 /* called with netif_tx_lock
10496 * bnx2x_tx_int() runs without netif_tx_lock unless it needs to call
10497 * netif_wake_queue()
10499 static int bnx2x_start_xmit(struct sk_buff *skb, struct net_device *dev)
10501 struct bnx2x *bp = netdev_priv(dev);
10502 struct bnx2x_fastpath *fp;
10503 struct netdev_queue *txq;
10504 struct sw_tx_bd *tx_buf;
10505 struct eth_tx_bd *tx_bd;
10506 struct eth_tx_parse_bd *pbd = NULL;
10507 u16 pkt_prod, bd_prod;
10509 dma_addr_t mapping;
10510 u32 xmit_type = bnx2x_xmit_type(bp, skb);
10511 int vlan_off = (bp->e1hov ? 4 : 0);
10515 #ifdef BNX2X_STOP_ON_ERROR
10516 if (unlikely(bp->panic))
10517 return NETDEV_TX_BUSY;
10520 fp_index = skb_get_queue_mapping(skb);
10521 txq = netdev_get_tx_queue(dev, fp_index);
10523 fp = &bp->fp[fp_index];
10525 if (unlikely(bnx2x_tx_avail(fp) < (skb_shinfo(skb)->nr_frags + 3))) {
10526 fp->eth_q_stats.driver_xoff++,
10527 netif_tx_stop_queue(txq);
10528 BNX2X_ERR("BUG! Tx ring full when queue awake!\n");
10529 return NETDEV_TX_BUSY;
10532 DP(NETIF_MSG_TX_QUEUED, "SKB: summed %x protocol %x protocol(%x,%x)"
10533 " gso type %x xmit_type %x\n",
10534 skb->ip_summed, skb->protocol, ipv6_hdr(skb)->nexthdr,
10535 ip_hdr(skb)->protocol, skb_shinfo(skb)->gso_type, xmit_type);
10537 #if (MAX_SKB_FRAGS >= MAX_FETCH_BD - 3)
10538 /* First, check if we need to linearize the skb (due to FW
10539 restrictions). No need to check fragmentation if page size > 8K
10540 (there will be no violation to FW restrictions) */
10541 if (bnx2x_pkt_req_lin(bp, skb, xmit_type)) {
10542 /* Statistics of linearization */
10544 if (skb_linearize(skb) != 0) {
10545 DP(NETIF_MSG_TX_QUEUED, "SKB linearization failed - "
10546 "silently dropping this SKB\n");
10547 dev_kfree_skb_any(skb);
10548 return NETDEV_TX_OK;
10554 Please read carefully. First we use one BD which we mark as start,
10555 then for TSO or xsum we have a parsing info BD,
10556 and only then we have the rest of the TSO BDs.
10557 (don't forget to mark the last one as last,
10558 and to unmap only AFTER you write to the BD ...)
10559 And above all, all pdb sizes are in words - NOT DWORDS!
10562 pkt_prod = fp->tx_pkt_prod++;
10563 bd_prod = TX_BD(fp->tx_bd_prod);
10565 /* get a tx_buf and first BD */
10566 tx_buf = &fp->tx_buf_ring[TX_BD(pkt_prod)];
10567 tx_bd = &fp->tx_desc_ring[bd_prod];
10569 tx_bd->bd_flags.as_bitfield = ETH_TX_BD_FLAGS_START_BD;
10570 tx_bd->general_data = (UNICAST_ADDRESS <<
10571 ETH_TX_BD_ETH_ADDR_TYPE_SHIFT);
10573 tx_bd->general_data |= (1 << ETH_TX_BD_HDR_NBDS_SHIFT);
10575 /* remember the first BD of the packet */
10576 tx_buf->first_bd = fp->tx_bd_prod;
10579 DP(NETIF_MSG_TX_QUEUED,
10580 "sending pkt %u @%p next_idx %u bd %u @%p\n",
10581 pkt_prod, tx_buf, fp->tx_pkt_prod, bd_prod, tx_bd);
10584 if ((bp->vlgrp != NULL) && vlan_tx_tag_present(skb) &&
10585 (bp->flags & HW_VLAN_TX_FLAG)) {
10586 tx_bd->vlan = cpu_to_le16(vlan_tx_tag_get(skb));
10587 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_VLAN_TAG;
10591 tx_bd->vlan = cpu_to_le16(pkt_prod);
10594 /* turn on parsing and get a BD */
10595 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10596 pbd = (void *)&fp->tx_desc_ring[bd_prod];
10598 memset(pbd, 0, sizeof(struct eth_tx_parse_bd));
10601 if (xmit_type & XMIT_CSUM) {
10602 hlen = (skb_network_header(skb) - skb->data + vlan_off) / 2;
10604 /* for now NS flag is not used in Linux */
10606 (hlen | ((skb->protocol == cpu_to_be16(ETH_P_8021Q)) <<
10607 ETH_TX_PARSE_BD_LLC_SNAP_EN_SHIFT));
10609 pbd->ip_hlen = (skb_transport_header(skb) -
10610 skb_network_header(skb)) / 2;
10612 hlen += pbd->ip_hlen + tcp_hdrlen(skb) / 2;
10614 pbd->total_hlen = cpu_to_le16(hlen);
10615 hlen = hlen*2 - vlan_off;
10617 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_TCP_CSUM;
10619 if (xmit_type & XMIT_CSUM_V4)
10620 tx_bd->bd_flags.as_bitfield |=
10621 ETH_TX_BD_FLAGS_IP_CSUM;
10623 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_IPV6;
10625 if (xmit_type & XMIT_CSUM_TCP) {
10626 pbd->tcp_pseudo_csum = swab16(tcp_hdr(skb)->check);
10629 s8 fix = SKB_CS_OFF(skb); /* signed! */
10631 pbd->global_data |= ETH_TX_PARSE_BD_CS_ANY_FLG;
10632 pbd->cs_offset = fix / 2;
10634 DP(NETIF_MSG_TX_QUEUED,
10635 "hlen %d offset %d fix %d csum before fix %x\n",
10636 le16_to_cpu(pbd->total_hlen), pbd->cs_offset, fix,
10639 /* HW bug: fixup the CSUM */
10640 pbd->tcp_pseudo_csum =
10641 bnx2x_csum_fix(skb_transport_header(skb),
10644 DP(NETIF_MSG_TX_QUEUED, "csum after fix %x\n",
10645 pbd->tcp_pseudo_csum);
10649 mapping = pci_map_single(bp->pdev, skb->data,
10650 skb_headlen(skb), PCI_DMA_TODEVICE);
10652 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10653 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10654 nbd = skb_shinfo(skb)->nr_frags + ((pbd == NULL) ? 1 : 2);
10655 tx_bd->nbd = cpu_to_le16(nbd);
10656 tx_bd->nbytes = cpu_to_le16(skb_headlen(skb));
10658 DP(NETIF_MSG_TX_QUEUED, "first bd @%p addr (%x:%x) nbd %d"
10659 " nbytes %d flags %x vlan %x\n",
10660 tx_bd, tx_bd->addr_hi, tx_bd->addr_lo, le16_to_cpu(tx_bd->nbd),
10661 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield,
10662 le16_to_cpu(tx_bd->vlan));
10664 if (xmit_type & XMIT_GSO) {
10666 DP(NETIF_MSG_TX_QUEUED,
10667 "TSO packet len %d hlen %d total len %d tso size %d\n",
10668 skb->len, hlen, skb_headlen(skb),
10669 skb_shinfo(skb)->gso_size);
10671 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_SW_LSO;
10673 if (unlikely(skb_headlen(skb) > hlen))
10674 bd_prod = bnx2x_tx_split(bp, fp, &tx_bd, hlen,
10677 pbd->lso_mss = cpu_to_le16(skb_shinfo(skb)->gso_size);
10678 pbd->tcp_send_seq = swab32(tcp_hdr(skb)->seq);
10679 pbd->tcp_flags = pbd_tcp_flags(skb);
10681 if (xmit_type & XMIT_GSO_V4) {
10682 pbd->ip_id = swab16(ip_hdr(skb)->id);
10683 pbd->tcp_pseudo_csum =
10684 swab16(~csum_tcpudp_magic(ip_hdr(skb)->saddr,
10685 ip_hdr(skb)->daddr,
10686 0, IPPROTO_TCP, 0));
10689 pbd->tcp_pseudo_csum =
10690 swab16(~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
10691 &ipv6_hdr(skb)->daddr,
10692 0, IPPROTO_TCP, 0));
10694 pbd->global_data |= ETH_TX_PARSE_BD_PSEUDO_CS_WITHOUT_LEN;
10697 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
10698 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
10700 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10701 tx_bd = &fp->tx_desc_ring[bd_prod];
10703 mapping = pci_map_page(bp->pdev, frag->page, frag->page_offset,
10704 frag->size, PCI_DMA_TODEVICE);
10706 tx_bd->addr_hi = cpu_to_le32(U64_HI(mapping));
10707 tx_bd->addr_lo = cpu_to_le32(U64_LO(mapping));
10708 tx_bd->nbytes = cpu_to_le16(frag->size);
10709 tx_bd->vlan = cpu_to_le16(pkt_prod);
10710 tx_bd->bd_flags.as_bitfield = 0;
10712 DP(NETIF_MSG_TX_QUEUED,
10713 "frag %d bd @%p addr (%x:%x) nbytes %d flags %x\n",
10714 i, tx_bd, tx_bd->addr_hi, tx_bd->addr_lo,
10715 le16_to_cpu(tx_bd->nbytes), tx_bd->bd_flags.as_bitfield);
10718 /* now at last mark the BD as the last BD */
10719 tx_bd->bd_flags.as_bitfield |= ETH_TX_BD_FLAGS_END_BD;
10721 DP(NETIF_MSG_TX_QUEUED, "last bd @%p flags %x\n",
10722 tx_bd, tx_bd->bd_flags.as_bitfield);
10724 bd_prod = TX_BD(NEXT_TX_IDX(bd_prod));
10726 /* now send a tx doorbell, counting the next BD
10727 * if the packet contains or ends with it
10729 if (TX_BD_POFF(bd_prod) < nbd)
10733 DP(NETIF_MSG_TX_QUEUED,
10734 "PBD @%p ip_data %x ip_hlen %u ip_id %u lso_mss %u"
10735 " tcp_flags %x xsum %x seq %u hlen %u\n",
10736 pbd, pbd->global_data, pbd->ip_hlen, pbd->ip_id,
10737 pbd->lso_mss, pbd->tcp_flags, pbd->tcp_pseudo_csum,
10738 pbd->tcp_send_seq, le16_to_cpu(pbd->total_hlen));
10740 DP(NETIF_MSG_TX_QUEUED, "doorbell: nbd %d bd %u\n", nbd, bd_prod);
10743 * Make sure that the BD data is updated before updating the producer
10744 * since FW might read the BD right after the producer is updated.
10745 * This is only applicable for weak-ordered memory model archs such
10746 * as IA-64. The following barrier is also mandatory since FW will
10747 * assumes packets must have BDs.
10751 le16_add_cpu(&fp->hw_tx_prods->bds_prod, nbd);
10752 mb(); /* FW restriction: must not reorder writing nbd and packets */
10753 le32_add_cpu(&fp->hw_tx_prods->packets_prod, 1);
10754 DOORBELL(bp, fp->index, 0);
10758 fp->tx_bd_prod += nbd;
10760 if (unlikely(bnx2x_tx_avail(fp) < MAX_SKB_FRAGS + 3)) {
10761 /* We want bnx2x_tx_int to "see" the updated tx_bd_prod
10762 if we put Tx into XOFF state. */
10764 netif_tx_stop_queue(txq);
10765 fp->eth_q_stats.driver_xoff++;
10766 if (bnx2x_tx_avail(fp) >= MAX_SKB_FRAGS + 3)
10767 netif_tx_wake_queue(txq);
10771 return NETDEV_TX_OK;
10774 /* called with rtnl_lock */
10775 static int bnx2x_open(struct net_device *dev)
10777 struct bnx2x *bp = netdev_priv(dev);
10779 netif_carrier_off(dev);
10781 bnx2x_set_power_state(bp, PCI_D0);
10783 return bnx2x_nic_load(bp, LOAD_OPEN);
10786 /* called with rtnl_lock */
10787 static int bnx2x_close(struct net_device *dev)
10789 struct bnx2x *bp = netdev_priv(dev);
10791 /* Unload the driver, release IRQs */
10792 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
10793 if (atomic_read(&bp->pdev->enable_cnt) == 1)
10794 if (!CHIP_REV_IS_SLOW(bp))
10795 bnx2x_set_power_state(bp, PCI_D3hot);
10800 /* called with netif_tx_lock from dev_mcast.c */
10801 static void bnx2x_set_rx_mode(struct net_device *dev)
10803 struct bnx2x *bp = netdev_priv(dev);
10804 u32 rx_mode = BNX2X_RX_MODE_NORMAL;
10805 int port = BP_PORT(bp);
10807 if (bp->state != BNX2X_STATE_OPEN) {
10808 DP(NETIF_MSG_IFUP, "state is %x, returning\n", bp->state);
10812 DP(NETIF_MSG_IFUP, "dev->flags = %x\n", dev->flags);
10814 if (dev->flags & IFF_PROMISC)
10815 rx_mode = BNX2X_RX_MODE_PROMISC;
10817 else if ((dev->flags & IFF_ALLMULTI) ||
10818 ((dev->mc_count > BNX2X_MAX_MULTICAST) && CHIP_IS_E1(bp)))
10819 rx_mode = BNX2X_RX_MODE_ALLMULTI;
10821 else { /* some multicasts */
10822 if (CHIP_IS_E1(bp)) {
10823 int i, old, offset;
10824 struct dev_mc_list *mclist;
10825 struct mac_configuration_cmd *config =
10826 bnx2x_sp(bp, mcast_config);
10828 for (i = 0, mclist = dev->mc_list;
10829 mclist && (i < dev->mc_count);
10830 i++, mclist = mclist->next) {
10832 config->config_table[i].
10833 cam_entry.msb_mac_addr =
10834 swab16(*(u16 *)&mclist->dmi_addr[0]);
10835 config->config_table[i].
10836 cam_entry.middle_mac_addr =
10837 swab16(*(u16 *)&mclist->dmi_addr[2]);
10838 config->config_table[i].
10839 cam_entry.lsb_mac_addr =
10840 swab16(*(u16 *)&mclist->dmi_addr[4]);
10841 config->config_table[i].cam_entry.flags =
10843 config->config_table[i].
10844 target_table_entry.flags = 0;
10845 config->config_table[i].
10846 target_table_entry.client_id = 0;
10847 config->config_table[i].
10848 target_table_entry.vlan_id = 0;
10851 "setting MCAST[%d] (%04x:%04x:%04x)\n", i,
10852 config->config_table[i].
10853 cam_entry.msb_mac_addr,
10854 config->config_table[i].
10855 cam_entry.middle_mac_addr,
10856 config->config_table[i].
10857 cam_entry.lsb_mac_addr);
10859 old = config->hdr.length;
10861 for (; i < old; i++) {
10862 if (CAM_IS_INVALID(config->
10863 config_table[i])) {
10864 /* already invalidated */
10868 CAM_INVALIDATE(config->
10873 if (CHIP_REV_IS_SLOW(bp))
10874 offset = BNX2X_MAX_EMUL_MULTI*(1 + port);
10876 offset = BNX2X_MAX_MULTICAST*(1 + port);
10878 config->hdr.length = i;
10879 config->hdr.offset = offset;
10880 config->hdr.client_id = bp->fp->cl_id;
10881 config->hdr.reserved1 = 0;
10883 bnx2x_sp_post(bp, RAMROD_CMD_ID_ETH_SET_MAC, 0,
10884 U64_HI(bnx2x_sp_mapping(bp, mcast_config)),
10885 U64_LO(bnx2x_sp_mapping(bp, mcast_config)),
10888 /* Accept one or more multicasts */
10889 struct dev_mc_list *mclist;
10890 u32 mc_filter[MC_HASH_SIZE];
10891 u32 crc, bit, regidx;
10894 memset(mc_filter, 0, 4 * MC_HASH_SIZE);
10896 for (i = 0, mclist = dev->mc_list;
10897 mclist && (i < dev->mc_count);
10898 i++, mclist = mclist->next) {
10900 DP(NETIF_MSG_IFUP, "Adding mcast MAC: %pM\n",
10903 crc = crc32c_le(0, mclist->dmi_addr, ETH_ALEN);
10904 bit = (crc >> 24) & 0xff;
10907 mc_filter[regidx] |= (1 << bit);
10910 for (i = 0; i < MC_HASH_SIZE; i++)
10911 REG_WR(bp, MC_HASH_OFFSET(bp, i),
10916 bp->rx_mode = rx_mode;
10917 bnx2x_set_storm_rx_mode(bp);
10920 /* called with rtnl_lock */
10921 static int bnx2x_change_mac_addr(struct net_device *dev, void *p)
10923 struct sockaddr *addr = p;
10924 struct bnx2x *bp = netdev_priv(dev);
10926 if (!is_valid_ether_addr((u8 *)(addr->sa_data)))
10929 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
10930 if (netif_running(dev)) {
10931 if (CHIP_IS_E1(bp))
10932 bnx2x_set_mac_addr_e1(bp, 1);
10934 bnx2x_set_mac_addr_e1h(bp, 1);
10940 /* called with rtnl_lock */
10941 static int bnx2x_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
10943 struct mii_ioctl_data *data = if_mii(ifr);
10944 struct bnx2x *bp = netdev_priv(dev);
10945 int port = BP_PORT(bp);
10950 data->phy_id = bp->port.phy_addr;
10954 case SIOCGMIIREG: {
10957 if (!netif_running(dev))
10960 mutex_lock(&bp->port.phy_mutex);
10961 err = bnx2x_cl45_read(bp, port, 0, bp->port.phy_addr,
10962 DEFAULT_PHY_DEV_ADDR,
10963 (data->reg_num & 0x1f), &mii_regval);
10964 data->val_out = mii_regval;
10965 mutex_unlock(&bp->port.phy_mutex);
10970 if (!capable(CAP_NET_ADMIN))
10973 if (!netif_running(dev))
10976 mutex_lock(&bp->port.phy_mutex);
10977 err = bnx2x_cl45_write(bp, port, 0, bp->port.phy_addr,
10978 DEFAULT_PHY_DEV_ADDR,
10979 (data->reg_num & 0x1f), data->val_in);
10980 mutex_unlock(&bp->port.phy_mutex);
10988 return -EOPNOTSUPP;
10991 /* called with rtnl_lock */
10992 static int bnx2x_change_mtu(struct net_device *dev, int new_mtu)
10994 struct bnx2x *bp = netdev_priv(dev);
10997 if ((new_mtu > ETH_MAX_JUMBO_PACKET_SIZE) ||
10998 ((new_mtu + ETH_HLEN) < ETH_MIN_PACKET_SIZE))
11001 /* This does not race with packet allocation
11002 * because the actual alloc size is
11003 * only updated as part of load
11005 dev->mtu = new_mtu;
11007 if (netif_running(dev)) {
11008 bnx2x_nic_unload(bp, UNLOAD_NORMAL);
11009 rc = bnx2x_nic_load(bp, LOAD_NORMAL);
11015 static void bnx2x_tx_timeout(struct net_device *dev)
11017 struct bnx2x *bp = netdev_priv(dev);
11019 #ifdef BNX2X_STOP_ON_ERROR
11023 /* This allows the netif to be shutdown gracefully before resetting */
11024 schedule_work(&bp->reset_task);
11028 /* called with rtnl_lock */
11029 static void bnx2x_vlan_rx_register(struct net_device *dev,
11030 struct vlan_group *vlgrp)
11032 struct bnx2x *bp = netdev_priv(dev);
11036 /* Set flags according to the required capabilities */
11037 bp->flags &= ~(HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11039 if (dev->features & NETIF_F_HW_VLAN_TX)
11040 bp->flags |= HW_VLAN_TX_FLAG;
11042 if (dev->features & NETIF_F_HW_VLAN_RX)
11043 bp->flags |= HW_VLAN_RX_FLAG;
11045 if (netif_running(dev))
11046 bnx2x_set_client_config(bp);
11051 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11052 static void poll_bnx2x(struct net_device *dev)
11054 struct bnx2x *bp = netdev_priv(dev);
11056 disable_irq(bp->pdev->irq);
11057 bnx2x_interrupt(bp->pdev->irq, dev);
11058 enable_irq(bp->pdev->irq);
11062 static const struct net_device_ops bnx2x_netdev_ops = {
11063 .ndo_open = bnx2x_open,
11064 .ndo_stop = bnx2x_close,
11065 .ndo_start_xmit = bnx2x_start_xmit,
11066 .ndo_set_multicast_list = bnx2x_set_rx_mode,
11067 .ndo_set_mac_address = bnx2x_change_mac_addr,
11068 .ndo_validate_addr = eth_validate_addr,
11069 .ndo_do_ioctl = bnx2x_ioctl,
11070 .ndo_change_mtu = bnx2x_change_mtu,
11071 .ndo_tx_timeout = bnx2x_tx_timeout,
11073 .ndo_vlan_rx_register = bnx2x_vlan_rx_register,
11075 #if defined(HAVE_POLL_CONTROLLER) || defined(CONFIG_NET_POLL_CONTROLLER)
11076 .ndo_poll_controller = poll_bnx2x,
11080 static int __devinit bnx2x_init_dev(struct pci_dev *pdev,
11081 struct net_device *dev)
11086 SET_NETDEV_DEV(dev, &pdev->dev);
11087 bp = netdev_priv(dev);
11092 bp->func = PCI_FUNC(pdev->devfn);
11094 rc = pci_enable_device(pdev);
11096 printk(KERN_ERR PFX "Cannot enable PCI device, aborting\n");
11100 if (!(pci_resource_flags(pdev, 0) & IORESOURCE_MEM)) {
11101 printk(KERN_ERR PFX "Cannot find PCI device base address,"
11104 goto err_out_disable;
11107 if (!(pci_resource_flags(pdev, 2) & IORESOURCE_MEM)) {
11108 printk(KERN_ERR PFX "Cannot find second PCI device"
11109 " base address, aborting\n");
11111 goto err_out_disable;
11114 if (atomic_read(&pdev->enable_cnt) == 1) {
11115 rc = pci_request_regions(pdev, DRV_MODULE_NAME);
11117 printk(KERN_ERR PFX "Cannot obtain PCI resources,"
11119 goto err_out_disable;
11122 pci_set_master(pdev);
11123 pci_save_state(pdev);
11126 bp->pm_cap = pci_find_capability(pdev, PCI_CAP_ID_PM);
11127 if (bp->pm_cap == 0) {
11128 printk(KERN_ERR PFX "Cannot find power management"
11129 " capability, aborting\n");
11131 goto err_out_release;
11134 bp->pcie_cap = pci_find_capability(pdev, PCI_CAP_ID_EXP);
11135 if (bp->pcie_cap == 0) {
11136 printk(KERN_ERR PFX "Cannot find PCI Express capability,"
11139 goto err_out_release;
11142 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
11143 bp->flags |= USING_DAC_FLAG;
11144 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
11145 printk(KERN_ERR PFX "pci_set_consistent_dma_mask"
11146 " failed, aborting\n");
11148 goto err_out_release;
11151 } else if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
11152 printk(KERN_ERR PFX "System does not support DMA,"
11155 goto err_out_release;
11158 dev->mem_start = pci_resource_start(pdev, 0);
11159 dev->base_addr = dev->mem_start;
11160 dev->mem_end = pci_resource_end(pdev, 0);
11162 dev->irq = pdev->irq;
11164 bp->regview = pci_ioremap_bar(pdev, 0);
11165 if (!bp->regview) {
11166 printk(KERN_ERR PFX "Cannot map register space, aborting\n");
11168 goto err_out_release;
11171 bp->doorbells = ioremap_nocache(pci_resource_start(pdev, 2),
11172 min_t(u64, BNX2X_DB_SIZE,
11173 pci_resource_len(pdev, 2)));
11174 if (!bp->doorbells) {
11175 printk(KERN_ERR PFX "Cannot map doorbell space, aborting\n");
11177 goto err_out_unmap;
11180 bnx2x_set_power_state(bp, PCI_D0);
11182 /* clean indirect addresses */
11183 pci_write_config_dword(bp->pdev, PCICFG_GRC_ADDRESS,
11184 PCICFG_VENDOR_ID_OFFSET);
11185 REG_WR(bp, PXP2_REG_PGL_ADDR_88_F0 + BP_PORT(bp)*16, 0);
11186 REG_WR(bp, PXP2_REG_PGL_ADDR_8C_F0 + BP_PORT(bp)*16, 0);
11187 REG_WR(bp, PXP2_REG_PGL_ADDR_90_F0 + BP_PORT(bp)*16, 0);
11188 REG_WR(bp, PXP2_REG_PGL_ADDR_94_F0 + BP_PORT(bp)*16, 0);
11190 dev->watchdog_timeo = TX_TIMEOUT;
11192 dev->netdev_ops = &bnx2x_netdev_ops;
11193 dev->ethtool_ops = &bnx2x_ethtool_ops;
11194 dev->features |= NETIF_F_SG;
11195 dev->features |= NETIF_F_HW_CSUM;
11196 if (bp->flags & USING_DAC_FLAG)
11197 dev->features |= NETIF_F_HIGHDMA;
11198 dev->features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11199 dev->features |= NETIF_F_TSO6;
11201 dev->features |= (NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
11202 bp->flags |= (HW_VLAN_RX_FLAG | HW_VLAN_TX_FLAG);
11204 dev->vlan_features |= NETIF_F_SG;
11205 dev->vlan_features |= NETIF_F_HW_CSUM;
11206 if (bp->flags & USING_DAC_FLAG)
11207 dev->vlan_features |= NETIF_F_HIGHDMA;
11208 dev->vlan_features |= (NETIF_F_TSO | NETIF_F_TSO_ECN);
11209 dev->vlan_features |= NETIF_F_TSO6;
11216 iounmap(bp->regview);
11217 bp->regview = NULL;
11219 if (bp->doorbells) {
11220 iounmap(bp->doorbells);
11221 bp->doorbells = NULL;
11225 if (atomic_read(&pdev->enable_cnt) == 1)
11226 pci_release_regions(pdev);
11229 pci_disable_device(pdev);
11230 pci_set_drvdata(pdev, NULL);
11236 static int __devinit bnx2x_get_pcie_width(struct bnx2x *bp)
11238 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11240 val = (val & PCICFG_LINK_WIDTH) >> PCICFG_LINK_WIDTH_SHIFT;
11244 /* return value of 1=2.5GHz 2=5GHz */
11245 static int __devinit bnx2x_get_pcie_speed(struct bnx2x *bp)
11247 u32 val = REG_RD(bp, PCICFG_OFFSET + PCICFG_LINK_CONTROL);
11249 val = (val & PCICFG_LINK_SPEED) >> PCICFG_LINK_SPEED_SHIFT;
11252 static int __devinit bnx2x_check_firmware(struct bnx2x *bp)
11254 struct bnx2x_fw_file_hdr *fw_hdr;
11255 struct bnx2x_fw_file_section *sections;
11257 u32 offset, len, num_ops;
11259 const struct firmware *firmware = bp->firmware;
11262 if (firmware->size < sizeof(struct bnx2x_fw_file_hdr))
11265 fw_hdr = (struct bnx2x_fw_file_hdr *)firmware->data;
11266 sections = (struct bnx2x_fw_file_section *)fw_hdr;
11268 /* Make sure none of the offsets and sizes make us read beyond
11269 * the end of the firmware data */
11270 for (i = 0; i < sizeof(*fw_hdr) / sizeof(*sections); i++) {
11271 offset = be32_to_cpu(sections[i].offset);
11272 len = be32_to_cpu(sections[i].len);
11273 if (offset + len > firmware->size) {
11274 printk(KERN_ERR PFX "Section %d length is out of bounds\n", i);
11279 /* Likewise for the init_ops offsets */
11280 offset = be32_to_cpu(fw_hdr->init_ops_offsets.offset);
11281 ops_offsets = (u16 *)(firmware->data + offset);
11282 num_ops = be32_to_cpu(fw_hdr->init_ops.len) / sizeof(struct raw_op);
11284 for (i = 0; i < be32_to_cpu(fw_hdr->init_ops_offsets.len) / 2; i++) {
11285 if (be16_to_cpu(ops_offsets[i]) > num_ops) {
11286 printk(KERN_ERR PFX "Section offset %d is out of bounds\n", i);
11291 /* Check FW version */
11292 offset = be32_to_cpu(fw_hdr->fw_version.offset);
11293 fw_ver = firmware->data + offset;
11294 if ((fw_ver[0] != BCM_5710_FW_MAJOR_VERSION) ||
11295 (fw_ver[1] != BCM_5710_FW_MINOR_VERSION) ||
11296 (fw_ver[2] != BCM_5710_FW_REVISION_VERSION) ||
11297 (fw_ver[3] != BCM_5710_FW_ENGINEERING_VERSION)) {
11298 printk(KERN_ERR PFX "Bad FW version:%d.%d.%d.%d."
11299 " Should be %d.%d.%d.%d\n",
11300 fw_ver[0], fw_ver[1], fw_ver[2],
11301 fw_ver[3], BCM_5710_FW_MAJOR_VERSION,
11302 BCM_5710_FW_MINOR_VERSION,
11303 BCM_5710_FW_REVISION_VERSION,
11304 BCM_5710_FW_ENGINEERING_VERSION);
11311 static void inline be32_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11314 const __be32 *source = (const __be32*)_source;
11315 u32 *target = (u32*)_target;
11317 for (i = 0; i < n/4; i++)
11318 target[i] = be32_to_cpu(source[i]);
11322 Ops array is stored in the following format:
11323 {op(8bit), offset(24bit, big endian), data(32bit, big endian)}
11325 static void inline bnx2x_prep_ops(const u8 *_source, u8 *_target, u32 n)
11328 const __be32 *source = (const __be32*)_source;
11329 struct raw_op *target = (struct raw_op*)_target;
11331 for (i = 0, j = 0; i < n/8; i++, j+=2) {
11332 tmp = be32_to_cpu(source[j]);
11333 target[i].op = (tmp >> 24) & 0xff;
11334 target[i].offset = tmp & 0xffffff;
11335 target[i].raw_data = be32_to_cpu(source[j+1]);
11338 static void inline be16_to_cpu_n(const u8 *_source, u8 *_target, u32 n)
11341 u16 *target = (u16*)_target;
11342 const __be16 *source = (const __be16*)_source;
11344 for (i = 0; i < n/2; i++)
11345 target[i] = be16_to_cpu(source[i]);
11348 #define BNX2X_ALLOC_AND_SET(arr, lbl, func) \
11350 u32 len = be32_to_cpu(fw_hdr->arr.len); \
11351 bp->arr = kmalloc(len, GFP_KERNEL); \
11353 printk(KERN_ERR PFX "Failed to allocate %d bytes for "#arr"\n", len); \
11356 func(bp->firmware->data + \
11357 be32_to_cpu(fw_hdr->arr.offset), \
11358 (u8*)bp->arr, len); \
11362 static int __devinit bnx2x_init_firmware(struct bnx2x *bp, struct device *dev)
11364 char fw_file_name[40] = {0};
11366 struct bnx2x_fw_file_hdr *fw_hdr;
11368 /* Create a FW file name */
11369 if (CHIP_IS_E1(bp))
11370 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1);
11372 offset = sprintf(fw_file_name, FW_FILE_PREFIX_E1H);
11374 sprintf(fw_file_name + offset, "%d.%d.%d.%d.fw",
11375 BCM_5710_FW_MAJOR_VERSION,
11376 BCM_5710_FW_MINOR_VERSION,
11377 BCM_5710_FW_REVISION_VERSION,
11378 BCM_5710_FW_ENGINEERING_VERSION);
11380 printk(KERN_INFO PFX "Loading %s\n", fw_file_name);
11382 rc = request_firmware(&bp->firmware, fw_file_name, dev);
11384 printk(KERN_ERR PFX "Can't load firmware file %s\n", fw_file_name);
11385 goto request_firmware_exit;
11388 rc = bnx2x_check_firmware(bp);
11390 printk(KERN_ERR PFX "Corrupt firmware file %s\n", fw_file_name);
11391 goto request_firmware_exit;
11394 fw_hdr = (struct bnx2x_fw_file_hdr *)bp->firmware->data;
11396 /* Initialize the pointers to the init arrays */
11398 BNX2X_ALLOC_AND_SET(init_data, request_firmware_exit, be32_to_cpu_n);
11401 BNX2X_ALLOC_AND_SET(init_ops, init_ops_alloc_err, bnx2x_prep_ops);
11404 BNX2X_ALLOC_AND_SET(init_ops_offsets, init_offsets_alloc_err, be16_to_cpu_n);
11406 /* STORMs firmware */
11407 bp->tsem_int_table_data = bp->firmware->data +
11408 be32_to_cpu(fw_hdr->tsem_int_table_data.offset);
11409 bp->tsem_pram_data = bp->firmware->data +
11410 be32_to_cpu(fw_hdr->tsem_pram_data.offset);
11411 bp->usem_int_table_data = bp->firmware->data +
11412 be32_to_cpu(fw_hdr->usem_int_table_data.offset);
11413 bp->usem_pram_data = bp->firmware->data +
11414 be32_to_cpu(fw_hdr->usem_pram_data.offset);
11415 bp->xsem_int_table_data = bp->firmware->data +
11416 be32_to_cpu(fw_hdr->xsem_int_table_data.offset);
11417 bp->xsem_pram_data = bp->firmware->data +
11418 be32_to_cpu(fw_hdr->xsem_pram_data.offset);
11419 bp->csem_int_table_data = bp->firmware->data +
11420 be32_to_cpu(fw_hdr->csem_int_table_data.offset);
11421 bp->csem_pram_data = bp->firmware->data +
11422 be32_to_cpu(fw_hdr->csem_pram_data.offset);
11425 init_offsets_alloc_err:
11426 kfree(bp->init_ops);
11427 init_ops_alloc_err:
11428 kfree(bp->init_data);
11429 request_firmware_exit:
11430 release_firmware(bp->firmware);
11437 static int __devinit bnx2x_init_one(struct pci_dev *pdev,
11438 const struct pci_device_id *ent)
11440 static int version_printed;
11441 struct net_device *dev = NULL;
11445 if (version_printed++ == 0)
11446 printk(KERN_INFO "%s", version);
11448 /* dev zeroed in init_etherdev */
11449 dev = alloc_etherdev_mq(sizeof(*bp), MAX_CONTEXT);
11451 printk(KERN_ERR PFX "Cannot allocate net device\n");
11455 bp = netdev_priv(dev);
11456 bp->msglevel = debug;
11458 rc = bnx2x_init_dev(pdev, dev);
11464 pci_set_drvdata(pdev, dev);
11466 rc = bnx2x_init_bp(bp);
11468 goto init_one_exit;
11470 /* Set init arrays */
11471 rc = bnx2x_init_firmware(bp, &pdev->dev);
11473 printk(KERN_ERR PFX "Error loading firmware\n");
11474 goto init_one_exit;
11477 rc = register_netdev(dev);
11479 dev_err(&pdev->dev, "Cannot register net device\n");
11480 goto init_one_exit;
11483 printk(KERN_INFO "%s: %s (%c%d) PCI-E x%d %s found at mem %lx,"
11484 " IRQ %d, ", dev->name, board_info[ent->driver_data].name,
11485 (CHIP_REV(bp) >> 12) + 'A', (CHIP_METAL(bp) >> 4),
11486 bnx2x_get_pcie_width(bp),
11487 (bnx2x_get_pcie_speed(bp) == 2) ? "5GHz (Gen2)" : "2.5GHz",
11488 dev->base_addr, bp->pdev->irq);
11489 printk(KERN_CONT "node addr %pM\n", dev->dev_addr);
11495 iounmap(bp->regview);
11498 iounmap(bp->doorbells);
11502 if (atomic_read(&pdev->enable_cnt) == 1)
11503 pci_release_regions(pdev);
11505 pci_disable_device(pdev);
11506 pci_set_drvdata(pdev, NULL);
11511 static void __devexit bnx2x_remove_one(struct pci_dev *pdev)
11513 struct net_device *dev = pci_get_drvdata(pdev);
11517 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11520 bp = netdev_priv(dev);
11522 unregister_netdev(dev);
11524 kfree(bp->init_ops_offsets);
11525 kfree(bp->init_ops);
11526 kfree(bp->init_data);
11527 release_firmware(bp->firmware);
11530 iounmap(bp->regview);
11533 iounmap(bp->doorbells);
11537 if (atomic_read(&pdev->enable_cnt) == 1)
11538 pci_release_regions(pdev);
11540 pci_disable_device(pdev);
11541 pci_set_drvdata(pdev, NULL);
11544 static int bnx2x_suspend(struct pci_dev *pdev, pm_message_t state)
11546 struct net_device *dev = pci_get_drvdata(pdev);
11550 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11553 bp = netdev_priv(dev);
11557 pci_save_state(pdev);
11559 if (!netif_running(dev)) {
11564 netif_device_detach(dev);
11566 bnx2x_nic_unload(bp, UNLOAD_CLOSE);
11568 bnx2x_set_power_state(bp, pci_choose_state(pdev, state));
11575 static int bnx2x_resume(struct pci_dev *pdev)
11577 struct net_device *dev = pci_get_drvdata(pdev);
11582 printk(KERN_ERR PFX "BAD net device from bnx2x_init_one\n");
11585 bp = netdev_priv(dev);
11589 pci_restore_state(pdev);
11591 if (!netif_running(dev)) {
11596 bnx2x_set_power_state(bp, PCI_D0);
11597 netif_device_attach(dev);
11599 rc = bnx2x_nic_load(bp, LOAD_OPEN);
11606 static int bnx2x_eeh_nic_unload(struct bnx2x *bp)
11610 bp->state = BNX2X_STATE_ERROR;
11612 bp->rx_mode = BNX2X_RX_MODE_NONE;
11614 bnx2x_netif_stop(bp, 0);
11616 del_timer_sync(&bp->timer);
11617 bp->stats_state = STATS_STATE_DISABLED;
11618 DP(BNX2X_MSG_STATS, "stats_state - DISABLED\n");
11621 bnx2x_free_irq(bp);
11623 if (CHIP_IS_E1(bp)) {
11624 struct mac_configuration_cmd *config =
11625 bnx2x_sp(bp, mcast_config);
11627 for (i = 0; i < config->hdr.length; i++)
11628 CAM_INVALIDATE(config->config_table[i]);
11631 /* Free SKBs, SGEs, TPA pool and driver internals */
11632 bnx2x_free_skbs(bp);
11633 for_each_rx_queue(bp, i)
11634 bnx2x_free_rx_sge_range(bp, bp->fp + i, NUM_RX_SGE);
11635 for_each_rx_queue(bp, i)
11636 netif_napi_del(&bnx2x_fp(bp, i, napi));
11637 bnx2x_free_mem(bp);
11639 bp->state = BNX2X_STATE_CLOSED;
11641 netif_carrier_off(bp->dev);
11646 static void bnx2x_eeh_recover(struct bnx2x *bp)
11650 mutex_init(&bp->port.phy_mutex);
11652 bp->common.shmem_base = REG_RD(bp, MISC_REG_SHARED_MEM_ADDR);
11653 bp->link_params.shmem_base = bp->common.shmem_base;
11654 BNX2X_DEV_INFO("shmem offset is 0x%x\n", bp->common.shmem_base);
11656 if (!bp->common.shmem_base ||
11657 (bp->common.shmem_base < 0xA0000) ||
11658 (bp->common.shmem_base >= 0xC0000)) {
11659 BNX2X_DEV_INFO("MCP not active\n");
11660 bp->flags |= NO_MCP_FLAG;
11664 val = SHMEM_RD(bp, validity_map[BP_PORT(bp)]);
11665 if ((val & (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11666 != (SHR_MEM_VALIDITY_DEV_INFO | SHR_MEM_VALIDITY_MB))
11667 BNX2X_ERR("BAD MCP validity signature\n");
11669 if (!BP_NOMCP(bp)) {
11670 bp->fw_seq = (SHMEM_RD(bp, func_mb[BP_FUNC(bp)].drv_mb_header)
11671 & DRV_MSG_SEQ_NUMBER_MASK);
11672 BNX2X_DEV_INFO("fw_seq 0x%08x\n", bp->fw_seq);
11677 * bnx2x_io_error_detected - called when PCI error is detected
11678 * @pdev: Pointer to PCI device
11679 * @state: The current pci connection state
11681 * This function is called after a PCI bus error affecting
11682 * this device has been detected.
11684 static pci_ers_result_t bnx2x_io_error_detected(struct pci_dev *pdev,
11685 pci_channel_state_t state)
11687 struct net_device *dev = pci_get_drvdata(pdev);
11688 struct bnx2x *bp = netdev_priv(dev);
11692 netif_device_detach(dev);
11694 if (state == pci_channel_io_perm_failure) {
11696 return PCI_ERS_RESULT_DISCONNECT;
11699 if (netif_running(dev))
11700 bnx2x_eeh_nic_unload(bp);
11702 pci_disable_device(pdev);
11706 /* Request a slot reset */
11707 return PCI_ERS_RESULT_NEED_RESET;
11711 * bnx2x_io_slot_reset - called after the PCI bus has been reset
11712 * @pdev: Pointer to PCI device
11714 * Restart the card from scratch, as if from a cold-boot.
11716 static pci_ers_result_t bnx2x_io_slot_reset(struct pci_dev *pdev)
11718 struct net_device *dev = pci_get_drvdata(pdev);
11719 struct bnx2x *bp = netdev_priv(dev);
11723 if (pci_enable_device(pdev)) {
11724 dev_err(&pdev->dev,
11725 "Cannot re-enable PCI device after reset\n");
11727 return PCI_ERS_RESULT_DISCONNECT;
11730 pci_set_master(pdev);
11731 pci_restore_state(pdev);
11733 if (netif_running(dev))
11734 bnx2x_set_power_state(bp, PCI_D0);
11738 return PCI_ERS_RESULT_RECOVERED;
11742 * bnx2x_io_resume - called when traffic can start flowing again
11743 * @pdev: Pointer to PCI device
11745 * This callback is called when the error recovery driver tells us that
11746 * its OK to resume normal operation.
11748 static void bnx2x_io_resume(struct pci_dev *pdev)
11750 struct net_device *dev = pci_get_drvdata(pdev);
11751 struct bnx2x *bp = netdev_priv(dev);
11755 bnx2x_eeh_recover(bp);
11757 if (netif_running(dev))
11758 bnx2x_nic_load(bp, LOAD_NORMAL);
11760 netif_device_attach(dev);
11765 static struct pci_error_handlers bnx2x_err_handler = {
11766 .error_detected = bnx2x_io_error_detected,
11767 .slot_reset = bnx2x_io_slot_reset,
11768 .resume = bnx2x_io_resume,
11771 static struct pci_driver bnx2x_pci_driver = {
11772 .name = DRV_MODULE_NAME,
11773 .id_table = bnx2x_pci_tbl,
11774 .probe = bnx2x_init_one,
11775 .remove = __devexit_p(bnx2x_remove_one),
11776 .suspend = bnx2x_suspend,
11777 .resume = bnx2x_resume,
11778 .err_handler = &bnx2x_err_handler,
11781 static int __init bnx2x_init(void)
11785 bnx2x_wq = create_singlethread_workqueue("bnx2x");
11786 if (bnx2x_wq == NULL) {
11787 printk(KERN_ERR PFX "Cannot create workqueue\n");
11791 ret = pci_register_driver(&bnx2x_pci_driver);
11793 printk(KERN_ERR PFX "Cannot register driver\n");
11794 destroy_workqueue(bnx2x_wq);
11799 static void __exit bnx2x_cleanup(void)
11801 pci_unregister_driver(&bnx2x_pci_driver);
11803 destroy_workqueue(bnx2x_wq);
11806 module_init(bnx2x_init);
11807 module_exit(bnx2x_cleanup);