1 /************************************************************************
2 * s2io.c: A Linux PCI-X Ethernet driver for S2IO 10GbE Server NIC
3 * Copyright(c) 2002-2005 S2IO Technologies
5 * This software may be used and distributed according to the terms of
6 * the GNU General Public License (GPL), incorporated herein by reference.
7 * Drivers based on or derived from this code fall under the GPL and must
8 * retain the authorship, copyright and license notice. This file is not
9 * a complete program and may only be used when the entire operating
10 * system is licensed under the GPL.
11 * See the file COPYING in this distribution for more information.
14 * Jeff Garzik : For pointing out the improper error condition
15 * check in the s2io_xmit routine and also some
16 * issues in the Tx watch dog function. Also for
17 * patiently answering all those innumerable
18 * questions regaring the 2.6 porting issues.
19 * Stephen Hemminger : Providing proper 2.6 porting mechanism for some
20 * macros available only in 2.6 Kernel.
21 * Francois Romieu : For pointing out all code part that were
22 * deprecated and also styling related comments.
23 * Grant Grundler : For helping me get rid of some Architecture
25 * Christopher Hellwig : Some more 2.6 specific issues in the driver.
27 * The module loadable parameters that are supported by the driver and a brief
28 * explaination of all the variables.
29 * rx_ring_num : This can be used to program the number of receive rings used
31 * rx_ring_len: This defines the number of descriptors each ring can have. This
32 * is also an array of size 8.
33 * tx_fifo_num: This defines the number of Tx FIFOs thats used int the driver.
34 * tx_fifo_len: This too is an array of 8. Each element defines the number of
35 * Tx descriptors that can be associated with each corresponding FIFO.
36 * in PCI Configuration space.
37 ************************************************************************/
39 #include<linux/config.h>
40 #include<linux/module.h>
41 #include<linux/types.h>
42 #include<linux/errno.h>
43 #include<linux/ioport.h>
45 #include<linux/kernel.h>
46 #include<linux/netdevice.h>
47 #include<linux/etherdevice.h>
48 #include<linux/skbuff.h>
49 #include<linux/init.h>
50 #include<linux/delay.h>
51 #include<linux/stddef.h>
52 #include<linux/ioctl.h>
53 #include<linux/timex.h>
54 #include<linux/sched.h>
55 #include<linux/ethtool.h>
56 #include<asm/system.h>
57 #include<asm/uaccess.h>
58 #include<linux/version.h>
60 #include<linux/workqueue.h>
64 #include "s2io-regs.h"
66 /* S2io Driver name & version. */
67 static char s2io_driver_name[] = "s2io";
68 static char s2io_driver_version[] = "Version 1.7.5.1";
71 * Cards with following subsystem_id have a link state indication
72 * problem, 600B, 600C, 600D, 640B, 640C and 640D.
73 * macro below identifies these cards given the subsystem_id.
75 #define CARDS_WITH_FAULTY_LINK_INDICATORS(subid) \
76 (((subid >= 0x600B) && (subid <= 0x600D)) || \
77 ((subid >= 0x640B) && (subid <= 0x640D))) ? 1 : 0
79 #define LINK_IS_UP(val64) (!(val64 & (ADAPTER_STATUS_RMAC_REMOTE_FAULT | \
80 ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
81 #define TASKLET_IN_USE test_and_set_bit(0, (&sp->tasklet_status))
84 static inline int rx_buffer_level(nic_t * sp, int rxb_size, int ring)
87 if ((sp->pkt_cnt[ring] - rxb_size) > 16) {
89 if ((sp->pkt_cnt[ring] - rxb_size) < MAX_RXDS_PER_BLOCK) {
97 /* Ethtool related variables and Macros. */
98 static char s2io_gstrings[][ETH_GSTRING_LEN] = {
99 "Register test\t(offline)",
100 "Eeprom test\t(offline)",
101 "Link test\t(online)",
102 "RLDRAM test\t(offline)",
103 "BIST Test\t(offline)"
106 static char ethtool_stats_keys[][ETH_GSTRING_LEN] = {
108 {"tmac_data_octets"},
112 {"tmac_pause_ctrl_frms"},
113 {"tmac_any_err_frms"},
114 {"tmac_vld_ip_octets"},
122 {"rmac_data_octets"},
123 {"rmac_fcs_err_frms"},
125 {"rmac_vld_mcst_frms"},
126 {"rmac_vld_bcst_frms"},
127 {"rmac_in_rng_len_err_frms"},
129 {"rmac_pause_ctrl_frms"},
130 {"rmac_discarded_frms"},
131 {"rmac_usized_frms"},
132 {"rmac_osized_frms"},
134 {"rmac_jabber_frms"},
142 {"rmac_err_drp_udp"},
144 {"rmac_accepted_ip"},
148 #define S2IO_STAT_LEN sizeof(ethtool_stats_keys)/ ETH_GSTRING_LEN
149 #define S2IO_STAT_STRINGS_LEN S2IO_STAT_LEN * ETH_GSTRING_LEN
151 #define S2IO_TEST_LEN sizeof(s2io_gstrings) / ETH_GSTRING_LEN
152 #define S2IO_STRINGS_LEN S2IO_TEST_LEN * ETH_GSTRING_LEN
156 * Constants to be programmed into the Xena's registers, to configure
160 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
163 static u64 default_mdio_cfg[] = {
165 0xC001010000000000ULL, 0xC0010100000000E0ULL,
166 0xC0010100008000E4ULL,
167 /* Remove Reset from PMA PLL */
168 0xC001010000000000ULL, 0xC0010100000000E0ULL,
169 0xC0010100000000E4ULL,
173 static u64 default_dtx_cfg[] = {
174 0x8000051500000000ULL, 0x80000515000000E0ULL,
175 0x80000515D93500E4ULL, 0x8001051500000000ULL,
176 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
177 0x8002051500000000ULL, 0x80020515000000E0ULL,
178 0x80020515F21000E4ULL,
179 /* Set PADLOOPBACKN */
180 0x8002051500000000ULL, 0x80020515000000E0ULL,
181 0x80020515B20000E4ULL, 0x8003051500000000ULL,
182 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
183 0x8004051500000000ULL, 0x80040515000000E0ULL,
184 0x80040515B20000E4ULL, 0x8005051500000000ULL,
185 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
187 /* Remove PADLOOPBACKN */
188 0x8002051500000000ULL, 0x80020515000000E0ULL,
189 0x80020515F20000E4ULL, 0x8003051500000000ULL,
190 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
191 0x8004051500000000ULL, 0x80040515000000E0ULL,
192 0x80040515F20000E4ULL, 0x8005051500000000ULL,
193 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
199 * Constants for Fixing the MacAddress problem seen mostly on
202 static u64 fix_mac[] = {
203 0x0060000000000000ULL, 0x0060600000000000ULL,
204 0x0040600000000000ULL, 0x0000600000000000ULL,
205 0x0020600000000000ULL, 0x0060600000000000ULL,
206 0x0020600000000000ULL, 0x0060600000000000ULL,
207 0x0020600000000000ULL, 0x0060600000000000ULL,
208 0x0020600000000000ULL, 0x0060600000000000ULL,
209 0x0020600000000000ULL, 0x0060600000000000ULL,
210 0x0020600000000000ULL, 0x0060600000000000ULL,
211 0x0020600000000000ULL, 0x0060600000000000ULL,
212 0x0020600000000000ULL, 0x0060600000000000ULL,
213 0x0020600000000000ULL, 0x0060600000000000ULL,
214 0x0020600000000000ULL, 0x0060600000000000ULL,
215 0x0020600000000000ULL, 0x0000600000000000ULL,
216 0x0040600000000000ULL, 0x0060600000000000ULL,
220 /* Module Loadable parameters. */
221 static unsigned int tx_fifo_num = 1;
222 static unsigned int tx_fifo_len[MAX_TX_FIFOS] =
223 {[0 ...(MAX_TX_FIFOS - 1)] = 0 };
224 static unsigned int rx_ring_num = 1;
225 static unsigned int rx_ring_sz[MAX_RX_RINGS] =
226 {[0 ...(MAX_RX_RINGS - 1)] = 0 };
227 static unsigned int Stats_refresh_time = 4;
228 static unsigned int rmac_pause_time = 65535;
229 static unsigned int mc_pause_threshold_q0q3 = 187;
230 static unsigned int mc_pause_threshold_q4q7 = 187;
231 static unsigned int shared_splits;
232 static unsigned int tmac_util_period = 5;
233 static unsigned int rmac_util_period = 5;
234 #ifndef CONFIG_S2IO_NAPI
235 static unsigned int indicate_max_pkts;
240 * This table lists all the devices that this driver supports.
242 static struct pci_device_id s2io_tbl[] __devinitdata = {
243 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_WIN,
244 PCI_ANY_ID, PCI_ANY_ID},
245 {PCI_VENDOR_ID_S2IO, PCI_DEVICE_ID_S2IO_UNI,
246 PCI_ANY_ID, PCI_ANY_ID},
250 MODULE_DEVICE_TABLE(pci, s2io_tbl);
252 static struct pci_driver s2io_driver = {
254 .id_table = s2io_tbl,
255 .probe = s2io_init_nic,
256 .remove = __devexit_p(s2io_rem_nic),
259 /* A simplifier macro used both by init and free shared_mem Fns(). */
260 #define TXD_MEM_PAGE_CNT(len, per_each) ((len+per_each - 1) / per_each)
263 * init_shared_mem - Allocation and Initialization of Memory
264 * @nic: Device private variable.
265 * Description: The function allocates all the memory areas shared
266 * between the NIC and the driver. This includes Tx descriptors,
267 * Rx descriptors and the statistics block.
270 static int init_shared_mem(struct s2io_nic *nic)
273 void *tmp_v_addr, *tmp_v_addr_next;
274 dma_addr_t tmp_p_addr, tmp_p_addr_next;
275 RxD_block_t *pre_rxd_blk = NULL;
277 int lst_size, lst_per_page;
278 struct net_device *dev = nic->dev;
279 #ifdef CONFIG_2BUFF_MODE
284 mac_info_t *mac_control;
285 struct config_param *config;
287 mac_control = &nic->mac_control;
288 config = &nic->config;
291 /* Allocation and initialization of TXDLs in FIOFs */
293 for (i = 0; i < config->tx_fifo_num; i++) {
294 size += config->tx_cfg[i].fifo_len;
296 if (size > MAX_AVAILABLE_TXDS) {
297 DBG_PRINT(ERR_DBG, "%s: Total number of Tx FIFOs ",
299 DBG_PRINT(ERR_DBG, "exceeds the maximum value ");
300 DBG_PRINT(ERR_DBG, "that can be used\n");
304 lst_size = (sizeof(TxD_t) * config->max_txds);
305 lst_per_page = PAGE_SIZE / lst_size;
307 for (i = 0; i < config->tx_fifo_num; i++) {
308 int fifo_len = config->tx_cfg[i].fifo_len;
309 int list_holder_size = fifo_len * sizeof(list_info_hold_t);
310 nic->list_info[i] = kmalloc(list_holder_size, GFP_KERNEL);
311 if (!nic->list_info[i]) {
313 "Malloc failed for list_info\n");
316 memset(nic->list_info[i], 0, list_holder_size);
318 for (i = 0; i < config->tx_fifo_num; i++) {
319 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
321 mac_control->tx_curr_put_info[i].offset = 0;
322 mac_control->tx_curr_put_info[i].fifo_len =
323 config->tx_cfg[i].fifo_len - 1;
324 mac_control->tx_curr_get_info[i].offset = 0;
325 mac_control->tx_curr_get_info[i].fifo_len =
326 config->tx_cfg[i].fifo_len - 1;
327 for (j = 0; j < page_num; j++) {
331 tmp_v = pci_alloc_consistent(nic->pdev,
335 "pci_alloc_consistent ");
336 DBG_PRINT(ERR_DBG, "failed for TxDL\n");
339 while (k < lst_per_page) {
340 int l = (j * lst_per_page) + k;
341 if (l == config->tx_cfg[i].fifo_len)
343 nic->list_info[i][l].list_virt_addr =
344 tmp_v + (k * lst_size);
345 nic->list_info[i][l].list_phy_addr =
346 tmp_p + (k * lst_size);
353 /* Allocation and initialization of RXDs in Rings */
355 for (i = 0; i < config->rx_ring_num; i++) {
356 if (config->rx_cfg[i].num_rxd % (MAX_RXDS_PER_BLOCK + 1)) {
357 DBG_PRINT(ERR_DBG, "%s: RxD count of ", dev->name);
358 DBG_PRINT(ERR_DBG, "Ring%d is not a multiple of ",
360 DBG_PRINT(ERR_DBG, "RxDs per Block");
363 size += config->rx_cfg[i].num_rxd;
364 nic->block_count[i] =
365 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
367 config->rx_cfg[i].num_rxd - nic->block_count[i];
370 for (i = 0; i < config->rx_ring_num; i++) {
371 mac_control->rx_curr_get_info[i].block_index = 0;
372 mac_control->rx_curr_get_info[i].offset = 0;
373 mac_control->rx_curr_get_info[i].ring_len =
374 config->rx_cfg[i].num_rxd - 1;
375 mac_control->rx_curr_put_info[i].block_index = 0;
376 mac_control->rx_curr_put_info[i].offset = 0;
377 mac_control->rx_curr_put_info[i].ring_len =
378 config->rx_cfg[i].num_rxd - 1;
380 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
381 /* Allocating all the Rx blocks */
382 for (j = 0; j < blk_cnt; j++) {
383 #ifndef CONFIG_2BUFF_MODE
384 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
386 size = SIZE_OF_BLOCK;
388 tmp_v_addr = pci_alloc_consistent(nic->pdev, size,
390 if (tmp_v_addr == NULL) {
392 * In case of failure, free_shared_mem()
393 * is called, which should free any
394 * memory that was alloced till the
397 nic->rx_blocks[i][j].block_virt_addr =
401 memset(tmp_v_addr, 0, size);
402 nic->rx_blocks[i][j].block_virt_addr = tmp_v_addr;
403 nic->rx_blocks[i][j].block_dma_addr = tmp_p_addr;
405 /* Interlinking all Rx Blocks */
406 for (j = 0; j < blk_cnt; j++) {
407 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
409 nic->rx_blocks[i][(j + 1) %
410 blk_cnt].block_virt_addr;
411 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
413 nic->rx_blocks[i][(j + 1) %
414 blk_cnt].block_dma_addr;
416 pre_rxd_blk = (RxD_block_t *) tmp_v_addr;
417 pre_rxd_blk->reserved_1 = END_OF_BLOCK; /* last RxD
420 #ifndef CONFIG_2BUFF_MODE
421 pre_rxd_blk->reserved_2_pNext_RxD_block =
422 (unsigned long) tmp_v_addr_next;
424 pre_rxd_blk->pNext_RxD_Blk_physical =
425 (u64) tmp_p_addr_next;
429 #ifdef CONFIG_2BUFF_MODE
431 * Allocation of Storages for buffer addresses in 2BUFF mode
432 * and the buffers as well.
434 for (i = 0; i < config->rx_ring_num; i++) {
436 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
437 nic->ba[i] = kmalloc((sizeof(buffAdd_t *) * blk_cnt),
441 for (j = 0; j < blk_cnt; j++) {
443 nic->ba[i][j] = kmalloc((sizeof(buffAdd_t) *
444 (MAX_RXDS_PER_BLOCK + 1)),
448 while (k != MAX_RXDS_PER_BLOCK) {
449 ba = &nic->ba[i][j][k];
451 ba->ba_0_org = kmalloc
452 (BUF0_LEN + ALIGN_SIZE, GFP_KERNEL);
455 tmp = (unsigned long) ba->ba_0_org;
457 tmp &= ~((unsigned long) ALIGN_SIZE);
458 ba->ba_0 = (void *) tmp;
460 ba->ba_1_org = kmalloc
461 (BUF1_LEN + ALIGN_SIZE, GFP_KERNEL);
464 tmp = (unsigned long) ba->ba_1_org;
466 tmp &= ~((unsigned long) ALIGN_SIZE);
467 ba->ba_1 = (void *) tmp;
474 /* Allocation and initialization of Statistics block */
475 size = sizeof(StatInfo_t);
476 mac_control->stats_mem = pci_alloc_consistent
477 (nic->pdev, size, &mac_control->stats_mem_phy);
479 if (!mac_control->stats_mem) {
481 * In case of failure, free_shared_mem() is called, which
482 * should free any memory that was alloced till the
487 mac_control->stats_mem_sz = size;
489 tmp_v_addr = mac_control->stats_mem;
490 mac_control->stats_info = (StatInfo_t *) tmp_v_addr;
491 memset(tmp_v_addr, 0, size);
493 DBG_PRINT(INIT_DBG, "%s:Ring Mem PHY: 0x%llx\n", dev->name,
494 (unsigned long long) tmp_p_addr);
500 * free_shared_mem - Free the allocated Memory
501 * @nic: Device private variable.
502 * Description: This function is to free all memory locations allocated by
503 * the init_shared_mem() function and return it to the kernel.
506 static void free_shared_mem(struct s2io_nic *nic)
508 int i, j, blk_cnt, size;
510 dma_addr_t tmp_p_addr;
511 mac_info_t *mac_control;
512 struct config_param *config;
513 int lst_size, lst_per_page;
519 mac_control = &nic->mac_control;
520 config = &nic->config;
522 lst_size = (sizeof(TxD_t) * config->max_txds);
523 lst_per_page = PAGE_SIZE / lst_size;
525 for (i = 0; i < config->tx_fifo_num; i++) {
526 int page_num = TXD_MEM_PAGE_CNT(config->tx_cfg[i].fifo_len,
528 for (j = 0; j < page_num; j++) {
529 int mem_blks = (j * lst_per_page);
530 if (!nic->list_info[i][mem_blks].list_virt_addr)
532 pci_free_consistent(nic->pdev, PAGE_SIZE,
533 nic->list_info[i][mem_blks].
535 nic->list_info[i][mem_blks].
538 kfree(nic->list_info[i]);
541 #ifndef CONFIG_2BUFF_MODE
542 size = (MAX_RXDS_PER_BLOCK + 1) * (sizeof(RxD_t));
544 size = SIZE_OF_BLOCK;
546 for (i = 0; i < config->rx_ring_num; i++) {
547 blk_cnt = nic->block_count[i];
548 for (j = 0; j < blk_cnt; j++) {
549 tmp_v_addr = nic->rx_blocks[i][j].block_virt_addr;
550 tmp_p_addr = nic->rx_blocks[i][j].block_dma_addr;
551 if (tmp_v_addr == NULL)
553 pci_free_consistent(nic->pdev, size,
554 tmp_v_addr, tmp_p_addr);
558 #ifdef CONFIG_2BUFF_MODE
559 /* Freeing buffer storage addresses in 2BUFF mode. */
560 for (i = 0; i < config->rx_ring_num; i++) {
562 config->rx_cfg[i].num_rxd / (MAX_RXDS_PER_BLOCK + 1);
565 for (j = 0; j < blk_cnt; j++) {
567 if (!nic->ba[i][j]) {
571 while (k != MAX_RXDS_PER_BLOCK) {
572 buffAdd_t *ba = &nic->ba[i][j][k];
573 if (!ba || !ba->ba_0_org || !ba->ba_1_org)
576 kfree(nic->ba[i][j]);
587 kfree(nic->ba[i][j]);
594 if (mac_control->stats_mem) {
595 pci_free_consistent(nic->pdev,
596 mac_control->stats_mem_sz,
597 mac_control->stats_mem,
598 mac_control->stats_mem_phy);
603 * init_nic - Initialization of hardware
604 * @nic: device peivate variable
605 * Description: The function sequentially configures every block
606 * of the H/W from their reset values.
607 * Return Value: SUCCESS on success and
608 * '-1' on failure (endian settings incorrect).
611 static int init_nic(struct s2io_nic *nic)
613 XENA_dev_config_t __iomem *bar0 = nic->bar0;
614 struct net_device *dev = nic->dev;
615 register u64 val64 = 0;
619 mac_info_t *mac_control;
620 struct config_param *config;
621 int mdio_cnt = 0, dtx_cnt = 0;
622 unsigned long long print_var, mem_share;
624 mac_control = &nic->mac_control;
625 config = &nic->config;
628 * Set proper endian settings and verify the same by
629 * reading the PIF Feed-back register.
633 * The device by default set to a big endian format, so
634 * a big endian driver need not set anything.
636 writeq(0xffffffffffffffffULL, &bar0->swapper_ctrl);
637 val64 = (SWAPPER_CTRL_PIF_R_FE |
638 SWAPPER_CTRL_PIF_R_SE |
639 SWAPPER_CTRL_PIF_W_FE |
640 SWAPPER_CTRL_PIF_W_SE |
641 SWAPPER_CTRL_TXP_FE |
642 SWAPPER_CTRL_TXP_SE |
643 SWAPPER_CTRL_TXD_R_FE |
644 SWAPPER_CTRL_TXD_W_FE |
645 SWAPPER_CTRL_TXF_R_FE |
646 SWAPPER_CTRL_RXD_R_FE |
647 SWAPPER_CTRL_RXD_W_FE |
648 SWAPPER_CTRL_RXF_W_FE |
649 SWAPPER_CTRL_XMSI_FE |
650 SWAPPER_CTRL_XMSI_SE |
651 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
652 writeq(val64, &bar0->swapper_ctrl);
655 * Initially we enable all bits to make it accessible by
656 * the driver, then we selectively enable only those bits
657 * that we want to set.
659 writeq(0xffffffffffffffffULL, &bar0->swapper_ctrl);
660 val64 = (SWAPPER_CTRL_PIF_R_FE |
661 SWAPPER_CTRL_PIF_R_SE |
662 SWAPPER_CTRL_PIF_W_FE |
663 SWAPPER_CTRL_PIF_W_SE |
664 SWAPPER_CTRL_TXP_FE |
665 SWAPPER_CTRL_TXP_SE |
666 SWAPPER_CTRL_TXD_R_FE |
667 SWAPPER_CTRL_TXD_R_SE |
668 SWAPPER_CTRL_TXD_W_FE |
669 SWAPPER_CTRL_TXD_W_SE |
670 SWAPPER_CTRL_TXF_R_FE |
671 SWAPPER_CTRL_RXD_R_FE |
672 SWAPPER_CTRL_RXD_R_SE |
673 SWAPPER_CTRL_RXD_W_FE |
674 SWAPPER_CTRL_RXD_W_SE |
675 SWAPPER_CTRL_RXF_W_FE |
676 SWAPPER_CTRL_XMSI_FE |
677 SWAPPER_CTRL_XMSI_SE |
678 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
679 writeq(val64, &bar0->swapper_ctrl);
683 * Verifying if endian settings are accurate by
684 * reading a feedback register.
686 val64 = readq(&bar0->pif_rd_swapper_fb);
687 if (val64 != 0x0123456789ABCDEFULL) {
688 /* Endian settings are incorrect, calls for another dekko. */
689 print_var = (unsigned long long) val64;
690 DBG_PRINT(INIT_DBG, "%s: Endian settings are wrong",
692 DBG_PRINT(ERR_DBG, ", feedback read %llx\n", print_var);
697 /* Remove XGXS from reset state */
699 writeq(val64, &bar0->sw_reset);
700 val64 = readq(&bar0->sw_reset);
701 set_current_state(TASK_UNINTERRUPTIBLE);
702 schedule_timeout(HZ / 2);
704 /* Enable Receiving broadcasts */
705 add = &bar0->mac_cfg;
706 val64 = readq(&bar0->mac_cfg);
707 val64 |= MAC_RMAC_BCAST_ENABLE;
708 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
709 writel((u32) val64, add);
710 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
711 writel((u32) (val64 >> 32), (add + 4));
713 /* Read registers in all blocks */
714 val64 = readq(&bar0->mac_int_mask);
715 val64 = readq(&bar0->mc_int_mask);
716 val64 = readq(&bar0->xgxs_int_mask);
720 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
723 * Configuring the XAUI Interface of Xena.
724 * ***************************************
725 * To Configure the Xena's XAUI, one has to write a series
726 * of 64 bit values into two registers in a particular
727 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
728 * which will be defined in the array of configuration values
729 * (default_dtx_cfg & default_mdio_cfg) at appropriate places
730 * to switch writing from one regsiter to another. We continue
731 * writing these values until we encounter the 'END_SIGN' macro.
732 * For example, After making a series of 21 writes into
733 * dtx_control register the 'SWITCH_SIGN' appears and hence we
734 * start writing into mdio_control until we encounter END_SIGN.
738 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
739 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
743 SPECIAL_REG_WRITE(default_dtx_cfg[dtx_cnt],
744 &bar0->dtx_control, UF);
745 val64 = readq(&bar0->dtx_control);
749 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
750 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
754 SPECIAL_REG_WRITE(default_mdio_cfg[mdio_cnt],
755 &bar0->mdio_control, UF);
756 val64 = readq(&bar0->mdio_control);
759 if ((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
760 (default_mdio_cfg[mdio_cnt] == END_SIGN)) {
767 /* Tx DMA Initialization */
769 writeq(val64, &bar0->tx_fifo_partition_0);
770 writeq(val64, &bar0->tx_fifo_partition_1);
771 writeq(val64, &bar0->tx_fifo_partition_2);
772 writeq(val64, &bar0->tx_fifo_partition_3);
775 for (i = 0, j = 0; i < config->tx_fifo_num; i++) {
777 vBIT(config->tx_cfg[i].fifo_len - 1, ((i * 32) + 19),
778 13) | vBIT(config->tx_cfg[i].fifo_priority,
781 if (i == (config->tx_fifo_num - 1)) {
788 writeq(val64, &bar0->tx_fifo_partition_0);
792 writeq(val64, &bar0->tx_fifo_partition_1);
796 writeq(val64, &bar0->tx_fifo_partition_2);
800 writeq(val64, &bar0->tx_fifo_partition_3);
805 /* Enable Tx FIFO partition 0. */
806 val64 = readq(&bar0->tx_fifo_partition_0);
807 val64 |= BIT(0); /* To enable the FIFO partition. */
808 writeq(val64, &bar0->tx_fifo_partition_0);
810 val64 = readq(&bar0->tx_fifo_partition_0);
811 DBG_PRINT(INIT_DBG, "Fifo partition at: 0x%p is: 0x%llx\n",
812 &bar0->tx_fifo_partition_0, (unsigned long long) val64);
815 * Initialization of Tx_PA_CONFIG register to ignore packet
816 * integrity checking.
818 val64 = readq(&bar0->tx_pa_cfg);
819 val64 |= TX_PA_CFG_IGNORE_FRM_ERR | TX_PA_CFG_IGNORE_SNAP_OUI |
820 TX_PA_CFG_IGNORE_LLC_CTRL | TX_PA_CFG_IGNORE_L2_ERR;
821 writeq(val64, &bar0->tx_pa_cfg);
823 /* Rx DMA intialization. */
825 for (i = 0; i < config->rx_ring_num; i++) {
827 vBIT(config->rx_cfg[i].ring_priority, (5 + (i * 8)),
830 writeq(val64, &bar0->rx_queue_priority);
833 * Allocating equal share of memory to all the
837 for (i = 0; i < config->rx_ring_num; i++) {
840 mem_share = (64 / config->rx_ring_num +
841 64 % config->rx_ring_num);
842 val64 |= RX_QUEUE_CFG_Q0_SZ(mem_share);
845 mem_share = (64 / config->rx_ring_num);
846 val64 |= RX_QUEUE_CFG_Q1_SZ(mem_share);
849 mem_share = (64 / config->rx_ring_num);
850 val64 |= RX_QUEUE_CFG_Q2_SZ(mem_share);
853 mem_share = (64 / config->rx_ring_num);
854 val64 |= RX_QUEUE_CFG_Q3_SZ(mem_share);
857 mem_share = (64 / config->rx_ring_num);
858 val64 |= RX_QUEUE_CFG_Q4_SZ(mem_share);
861 mem_share = (64 / config->rx_ring_num);
862 val64 |= RX_QUEUE_CFG_Q5_SZ(mem_share);
865 mem_share = (64 / config->rx_ring_num);
866 val64 |= RX_QUEUE_CFG_Q6_SZ(mem_share);
869 mem_share = (64 / config->rx_ring_num);
870 val64 |= RX_QUEUE_CFG_Q7_SZ(mem_share);
874 writeq(val64, &bar0->rx_queue_cfg);
877 * Initializing the Tx round robin registers to 0.
878 * Filling Tx and Rx round robin registers as per the
879 * number of FIFOs and Rings is still TODO.
881 writeq(0, &bar0->tx_w_round_robin_0);
882 writeq(0, &bar0->tx_w_round_robin_1);
883 writeq(0, &bar0->tx_w_round_robin_2);
884 writeq(0, &bar0->tx_w_round_robin_3);
885 writeq(0, &bar0->tx_w_round_robin_4);
889 * Disable Rx steering. Hard coding all packets be steered to
892 val64 = 0x8080808080808080ULL;
893 writeq(val64, &bar0->rts_qos_steering);
897 for (i = 1; i < 8; i++)
898 writeq(val64, &bar0->rts_frm_len_n[i]);
900 /* Set rts_frm_len register for fifo 0 */
901 writeq(MAC_RTS_FRM_LEN_SET(dev->mtu + 22),
902 &bar0->rts_frm_len_n[0]);
904 /* Enable statistics */
905 writeq(mac_control->stats_mem_phy, &bar0->stat_addr);
906 val64 = SET_UPDT_PERIOD(Stats_refresh_time) |
907 STAT_CFG_STAT_RO | STAT_CFG_STAT_EN;
908 writeq(val64, &bar0->stat_cfg);
911 * Initializing the sampling rate for the device to calculate the
912 * bandwidth utilization.
914 val64 = MAC_TX_LINK_UTIL_VAL(tmac_util_period) |
915 MAC_RX_LINK_UTIL_VAL(rmac_util_period);
916 writeq(val64, &bar0->mac_link_util);
920 * Initializing the Transmit and Receive Traffic Interrupt
923 /* TTI Initialization */
924 val64 = TTI_DATA1_MEM_TX_TIMER_VAL(0xFFF) |
925 TTI_DATA1_MEM_TX_URNG_A(0xA) |
926 TTI_DATA1_MEM_TX_URNG_B(0x10) |
927 TTI_DATA1_MEM_TX_URNG_C(0x30) | TTI_DATA1_MEM_TX_TIMER_AC_EN;
928 writeq(val64, &bar0->tti_data1_mem);
930 val64 = TTI_DATA2_MEM_TX_UFC_A(0x10) |
931 TTI_DATA2_MEM_TX_UFC_B(0x20) |
932 TTI_DATA2_MEM_TX_UFC_C(0x40) | TTI_DATA2_MEM_TX_UFC_D(0x80);
933 writeq(val64, &bar0->tti_data2_mem);
935 val64 = TTI_CMD_MEM_WE | TTI_CMD_MEM_STROBE_NEW_CMD;
936 writeq(val64, &bar0->tti_command_mem);
939 * Once the operation completes, the Strobe bit of the command
940 * register will be reset. We poll for this particular condition
941 * We wait for a maximum of 500ms for the operation to complete,
942 * if it's not complete by then we return error.
946 val64 = readq(&bar0->tti_command_mem);
947 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
951 DBG_PRINT(ERR_DBG, "%s: TTI init Failed\n",
955 set_current_state(TASK_UNINTERRUPTIBLE);
956 schedule_timeout(HZ / 20);
960 /* RTI Initialization */
961 val64 = RTI_DATA1_MEM_RX_TIMER_VAL(0xFFF) |
962 RTI_DATA1_MEM_RX_URNG_A(0xA) |
963 RTI_DATA1_MEM_RX_URNG_B(0x10) |
964 RTI_DATA1_MEM_RX_URNG_C(0x30) | RTI_DATA1_MEM_RX_TIMER_AC_EN;
966 writeq(val64, &bar0->rti_data1_mem);
968 val64 = RTI_DATA2_MEM_RX_UFC_A(0x1) |
969 RTI_DATA2_MEM_RX_UFC_B(0x2) |
970 RTI_DATA2_MEM_RX_UFC_C(0x40) | RTI_DATA2_MEM_RX_UFC_D(0x80);
971 writeq(val64, &bar0->rti_data2_mem);
973 val64 = RTI_CMD_MEM_WE | RTI_CMD_MEM_STROBE_NEW_CMD;
974 writeq(val64, &bar0->rti_command_mem);
977 * Once the operation completes, the Strobe bit of the command
978 * register will be reset. We poll for this particular condition
979 * We wait for a maximum of 500ms for the operation to complete,
980 * if it's not complete by then we return error.
984 val64 = readq(&bar0->rti_command_mem);
985 if (!(val64 & TTI_CMD_MEM_STROBE_NEW_CMD)) {
989 DBG_PRINT(ERR_DBG, "%s: RTI init Failed\n",
994 set_current_state(TASK_UNINTERRUPTIBLE);
995 schedule_timeout(HZ / 20);
999 * Initializing proper values as Pause threshold into all
1000 * the 8 Queues on Rx side.
1002 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q0q3);
1003 writeq(0xffbbffbbffbbffbbULL, &bar0->mc_pause_thresh_q4q7);
1005 /* Disable RMAC PAD STRIPPING */
1006 add = &bar0->mac_cfg;
1007 val64 = readq(&bar0->mac_cfg);
1008 val64 &= ~(MAC_CFG_RMAC_STRIP_PAD);
1009 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1010 writel((u32) (val64), add);
1011 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
1012 writel((u32) (val64 >> 32), (add + 4));
1013 val64 = readq(&bar0->mac_cfg);
1016 * Set the time value to be inserted in the pause frame
1017 * generated by xena.
1019 val64 = readq(&bar0->rmac_pause_cfg);
1020 val64 &= ~(RMAC_PAUSE_HG_PTIME(0xffff));
1021 val64 |= RMAC_PAUSE_HG_PTIME(nic->mac_control.rmac_pause_time);
1022 writeq(val64, &bar0->rmac_pause_cfg);
1025 * Set the Threshold Limit for Generating the pause frame
1026 * If the amount of data in any Queue exceeds ratio of
1027 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
1028 * pause frame is generated
1031 for (i = 0; i < 4; i++) {
1033 (((u64) 0xFF00 | nic->mac_control.
1034 mc_pause_threshold_q0q3)
1037 writeq(val64, &bar0->mc_pause_thresh_q0q3);
1040 for (i = 0; i < 4; i++) {
1042 (((u64) 0xFF00 | nic->mac_control.
1043 mc_pause_threshold_q4q7)
1046 writeq(val64, &bar0->mc_pause_thresh_q4q7);
1049 * TxDMA will stop Read request if the number of read split has
1050 * exceeded the limit pointed by shared_splits
1052 val64 = readq(&bar0->pic_control);
1053 val64 |= PIC_CNTL_SHARED_SPLITS(shared_splits);
1054 writeq(val64, &bar0->pic_control);
1060 * en_dis_able_nic_intrs - Enable or Disable the interrupts
1061 * @nic: device private variable,
1062 * @mask: A mask indicating which Intr block must be modified and,
1063 * @flag: A flag indicating whether to enable or disable the Intrs.
1064 * Description: This function will either disable or enable the interrupts
1065 * depending on the flag argument. The mask argument can be used to
1066 * enable/disable any Intr block.
1067 * Return Value: NONE.
1070 static void en_dis_able_nic_intrs(struct s2io_nic *nic, u16 mask, int flag)
1072 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1073 register u64 val64 = 0, temp64 = 0;
1075 /* Top level interrupt classification */
1076 /* PIC Interrupts */
1077 if ((mask & (TX_PIC_INTR | RX_PIC_INTR))) {
1078 /* Enable PIC Intrs in the general intr mask register */
1079 val64 = TXPIC_INT_M | PIC_RX_INT_M;
1080 if (flag == ENABLE_INTRS) {
1081 temp64 = readq(&bar0->general_int_mask);
1082 temp64 &= ~((u64) val64);
1083 writeq(temp64, &bar0->general_int_mask);
1085 * Disabled all PCIX, Flash, MDIO, IIC and GPIO
1086 * interrupts for now.
1089 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1091 * No MSI Support is available presently, so TTI and
1092 * RTI interrupts are also disabled.
1094 } else if (flag == DISABLE_INTRS) {
1096 * Disable PIC Intrs in the general
1097 * intr mask register
1099 writeq(DISABLE_ALL_INTRS, &bar0->pic_int_mask);
1100 temp64 = readq(&bar0->general_int_mask);
1102 writeq(val64, &bar0->general_int_mask);
1106 /* DMA Interrupts */
1107 /* Enabling/Disabling Tx DMA interrupts */
1108 if (mask & TX_DMA_INTR) {
1109 /* Enable TxDMA Intrs in the general intr mask register */
1110 val64 = TXDMA_INT_M;
1111 if (flag == ENABLE_INTRS) {
1112 temp64 = readq(&bar0->general_int_mask);
1113 temp64 &= ~((u64) val64);
1114 writeq(temp64, &bar0->general_int_mask);
1116 * Keep all interrupts other than PFC interrupt
1117 * and PCC interrupt disabled in DMA level.
1119 val64 = DISABLE_ALL_INTRS & ~(TXDMA_PFC_INT_M |
1121 writeq(val64, &bar0->txdma_int_mask);
1123 * Enable only the MISC error 1 interrupt in PFC block
1125 val64 = DISABLE_ALL_INTRS & (~PFC_MISC_ERR_1);
1126 writeq(val64, &bar0->pfc_err_mask);
1128 * Enable only the FB_ECC error interrupt in PCC block
1130 val64 = DISABLE_ALL_INTRS & (~PCC_FB_ECC_ERR);
1131 writeq(val64, &bar0->pcc_err_mask);
1132 } else if (flag == DISABLE_INTRS) {
1134 * Disable TxDMA Intrs in the general intr mask
1137 writeq(DISABLE_ALL_INTRS, &bar0->txdma_int_mask);
1138 writeq(DISABLE_ALL_INTRS, &bar0->pfc_err_mask);
1139 temp64 = readq(&bar0->general_int_mask);
1141 writeq(val64, &bar0->general_int_mask);
1145 /* Enabling/Disabling Rx DMA interrupts */
1146 if (mask & RX_DMA_INTR) {
1147 /* Enable RxDMA Intrs in the general intr mask register */
1148 val64 = RXDMA_INT_M;
1149 if (flag == ENABLE_INTRS) {
1150 temp64 = readq(&bar0->general_int_mask);
1151 temp64 &= ~((u64) val64);
1152 writeq(temp64, &bar0->general_int_mask);
1154 * All RxDMA block interrupts are disabled for now
1157 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1158 } else if (flag == DISABLE_INTRS) {
1160 * Disable RxDMA Intrs in the general intr mask
1163 writeq(DISABLE_ALL_INTRS, &bar0->rxdma_int_mask);
1164 temp64 = readq(&bar0->general_int_mask);
1166 writeq(val64, &bar0->general_int_mask);
1170 /* MAC Interrupts */
1171 /* Enabling/Disabling MAC interrupts */
1172 if (mask & (TX_MAC_INTR | RX_MAC_INTR)) {
1173 val64 = TXMAC_INT_M | RXMAC_INT_M;
1174 if (flag == ENABLE_INTRS) {
1175 temp64 = readq(&bar0->general_int_mask);
1176 temp64 &= ~((u64) val64);
1177 writeq(temp64, &bar0->general_int_mask);
1179 * All MAC block error interrupts are disabled for now
1180 * except the link status change interrupt.
1183 val64 = MAC_INT_STATUS_RMAC_INT;
1184 temp64 = readq(&bar0->mac_int_mask);
1185 temp64 &= ~((u64) val64);
1186 writeq(temp64, &bar0->mac_int_mask);
1188 val64 = readq(&bar0->mac_rmac_err_mask);
1189 val64 &= ~((u64) RMAC_LINK_STATE_CHANGE_INT);
1190 writeq(val64, &bar0->mac_rmac_err_mask);
1191 } else if (flag == DISABLE_INTRS) {
1193 * Disable MAC Intrs in the general intr mask register
1195 writeq(DISABLE_ALL_INTRS, &bar0->mac_int_mask);
1196 writeq(DISABLE_ALL_INTRS,
1197 &bar0->mac_rmac_err_mask);
1199 temp64 = readq(&bar0->general_int_mask);
1201 writeq(val64, &bar0->general_int_mask);
1205 /* XGXS Interrupts */
1206 if (mask & (TX_XGXS_INTR | RX_XGXS_INTR)) {
1207 val64 = TXXGXS_INT_M | RXXGXS_INT_M;
1208 if (flag == ENABLE_INTRS) {
1209 temp64 = readq(&bar0->general_int_mask);
1210 temp64 &= ~((u64) val64);
1211 writeq(temp64, &bar0->general_int_mask);
1213 * All XGXS block error interrupts are disabled for now
1216 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1217 } else if (flag == DISABLE_INTRS) {
1219 * Disable MC Intrs in the general intr mask register
1221 writeq(DISABLE_ALL_INTRS, &bar0->xgxs_int_mask);
1222 temp64 = readq(&bar0->general_int_mask);
1224 writeq(val64, &bar0->general_int_mask);
1228 /* Memory Controller(MC) interrupts */
1229 if (mask & MC_INTR) {
1231 if (flag == ENABLE_INTRS) {
1232 temp64 = readq(&bar0->general_int_mask);
1233 temp64 &= ~((u64) val64);
1234 writeq(temp64, &bar0->general_int_mask);
1236 * All MC block error interrupts are disabled for now
1239 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1240 } else if (flag == DISABLE_INTRS) {
1242 * Disable MC Intrs in the general intr mask register
1244 writeq(DISABLE_ALL_INTRS, &bar0->mc_int_mask);
1245 temp64 = readq(&bar0->general_int_mask);
1247 writeq(val64, &bar0->general_int_mask);
1252 /* Tx traffic interrupts */
1253 if (mask & TX_TRAFFIC_INTR) {
1254 val64 = TXTRAFFIC_INT_M;
1255 if (flag == ENABLE_INTRS) {
1256 temp64 = readq(&bar0->general_int_mask);
1257 temp64 &= ~((u64) val64);
1258 writeq(temp64, &bar0->general_int_mask);
1260 * Enable all the Tx side interrupts
1261 * writing 0 Enables all 64 TX interrupt levels
1263 writeq(0x0, &bar0->tx_traffic_mask);
1264 } else if (flag == DISABLE_INTRS) {
1266 * Disable Tx Traffic Intrs in the general intr mask
1269 writeq(DISABLE_ALL_INTRS, &bar0->tx_traffic_mask);
1270 temp64 = readq(&bar0->general_int_mask);
1272 writeq(val64, &bar0->general_int_mask);
1276 /* Rx traffic interrupts */
1277 if (mask & RX_TRAFFIC_INTR) {
1278 val64 = RXTRAFFIC_INT_M;
1279 if (flag == ENABLE_INTRS) {
1280 temp64 = readq(&bar0->general_int_mask);
1281 temp64 &= ~((u64) val64);
1282 writeq(temp64, &bar0->general_int_mask);
1283 /* writing 0 Enables all 8 RX interrupt levels */
1284 writeq(0x0, &bar0->rx_traffic_mask);
1285 } else if (flag == DISABLE_INTRS) {
1287 * Disable Rx Traffic Intrs in the general intr mask
1290 writeq(DISABLE_ALL_INTRS, &bar0->rx_traffic_mask);
1291 temp64 = readq(&bar0->general_int_mask);
1293 writeq(val64, &bar0->general_int_mask);
1299 * verify_xena_quiescence - Checks whether the H/W is ready
1300 * @val64 : Value read from adapter status register.
1301 * @flag : indicates if the adapter enable bit was ever written once
1303 * Description: Returns whether the H/W is ready to go or not. Depending
1304 * on whether adapter enable bit was written or not the comparison
1305 * differs and the calling function passes the input argument flag to
1307 * Return: 1 If xena is quiescence
1308 * 0 If Xena is not quiescence
1311 static int verify_xena_quiescence(u64 val64, int flag)
1314 u64 tmp64 = ~((u64) val64);
1318 (ADAPTER_STATUS_TDMA_READY | ADAPTER_STATUS_RDMA_READY |
1319 ADAPTER_STATUS_PFC_READY | ADAPTER_STATUS_TMAC_BUF_EMPTY |
1320 ADAPTER_STATUS_PIC_QUIESCENT | ADAPTER_STATUS_MC_DRAM_READY |
1321 ADAPTER_STATUS_MC_QUEUES_READY | ADAPTER_STATUS_M_PLL_LOCK |
1322 ADAPTER_STATUS_P_PLL_LOCK))) {
1323 if (flag == FALSE) {
1324 if (!(val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1325 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1326 ADAPTER_STATUS_RC_PRC_QUIESCENT)) {
1332 if (((val64 & ADAPTER_STATUS_RMAC_PCC_IDLE) ==
1333 ADAPTER_STATUS_RMAC_PCC_IDLE) &&
1334 (!(val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ||
1335 ((val64 & ADAPTER_STATUS_RC_PRC_QUIESCENT) ==
1336 ADAPTER_STATUS_RC_PRC_QUIESCENT))) {
1348 * fix_mac_address - Fix for Mac addr problem on Alpha platforms
1349 * @sp: Pointer to device specifc structure
1351 * New procedure to clear mac address reading problems on Alpha platforms
1355 void fix_mac_address(nic_t * sp)
1357 XENA_dev_config_t __iomem *bar0 = sp->bar0;
1361 while (fix_mac[i] != END_SIGN) {
1362 writeq(fix_mac[i++], &bar0->gpio_control);
1363 val64 = readq(&bar0->gpio_control);
1368 * start_nic - Turns the device on
1369 * @nic : device private variable.
1371 * This function actually turns the device on. Before this function is
1372 * called,all Registers are configured from their reset states
1373 * and shared memory is allocated but the NIC is still quiescent. On
1374 * calling this function, the device interrupts are cleared and the NIC is
1375 * literally switched on by writing into the adapter control register.
1377 * SUCCESS on success and -1 on failure.
1380 static int start_nic(struct s2io_nic *nic)
1382 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1383 struct net_device *dev = nic->dev;
1384 register u64 val64 = 0;
1385 u16 interruptible, i;
1387 mac_info_t *mac_control;
1388 struct config_param *config;
1390 mac_control = &nic->mac_control;
1391 config = &nic->config;
1393 /* PRC Initialization and configuration */
1394 for (i = 0; i < config->rx_ring_num; i++) {
1395 writeq((u64) nic->rx_blocks[i][0].block_dma_addr,
1396 &bar0->prc_rxd0_n[i]);
1398 val64 = readq(&bar0->prc_ctrl_n[i]);
1399 #ifndef CONFIG_2BUFF_MODE
1400 val64 |= PRC_CTRL_RC_ENABLED;
1402 val64 |= PRC_CTRL_RC_ENABLED | PRC_CTRL_RING_MODE_3;
1404 writeq(val64, &bar0->prc_ctrl_n[i]);
1407 #ifdef CONFIG_2BUFF_MODE
1408 /* Enabling 2 buffer mode by writing into Rx_pa_cfg reg. */
1409 val64 = readq(&bar0->rx_pa_cfg);
1410 val64 |= RX_PA_CFG_IGNORE_L2_ERR;
1411 writeq(val64, &bar0->rx_pa_cfg);
1415 * Enabling MC-RLDRAM. After enabling the device, we timeout
1416 * for around 100ms, which is approximately the time required
1417 * for the device to be ready for operation.
1419 val64 = readq(&bar0->mc_rldram_mrs);
1420 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE | MC_RLDRAM_MRS_ENABLE;
1421 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
1422 val64 = readq(&bar0->mc_rldram_mrs);
1424 set_current_state(TASK_UNINTERRUPTIBLE);
1425 schedule_timeout(HZ / 10); /* Delay by around 100 ms. */
1427 /* Enabling ECC Protection. */
1428 val64 = readq(&bar0->adapter_control);
1429 val64 &= ~ADAPTER_ECC_EN;
1430 writeq(val64, &bar0->adapter_control);
1433 * Clearing any possible Link state change interrupts that
1434 * could have popped up just before Enabling the card.
1436 val64 = readq(&bar0->mac_rmac_err_reg);
1438 writeq(val64, &bar0->mac_rmac_err_reg);
1441 * Verify if the device is ready to be enabled, if so enable
1444 val64 = readq(&bar0->adapter_status);
1445 if (!verify_xena_quiescence(val64, nic->device_enabled_once)) {
1446 DBG_PRINT(ERR_DBG, "%s: device is not ready, ", dev->name);
1447 DBG_PRINT(ERR_DBG, "Adapter status reads: 0x%llx\n",
1448 (unsigned long long) val64);
1452 /* Enable select interrupts */
1453 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1455 en_dis_able_nic_intrs(nic, interruptible, ENABLE_INTRS);
1458 * With some switches, link might be already up at this point.
1459 * Because of this weird behavior, when we enable laser,
1460 * we may not get link. We need to handle this. We cannot
1461 * figure out which switch is misbehaving. So we are forced to
1462 * make a global change.
1465 /* Enabling Laser. */
1466 val64 = readq(&bar0->adapter_control);
1467 val64 |= ADAPTER_EOI_TX_ON;
1468 writeq(val64, &bar0->adapter_control);
1470 /* SXE-002: Initialize link and activity LED */
1471 subid = nic->pdev->subsystem_device;
1472 if ((subid & 0xFF) >= 0x07) {
1473 val64 = readq(&bar0->gpio_control);
1474 val64 |= 0x0000800000000000ULL;
1475 writeq(val64, &bar0->gpio_control);
1476 val64 = 0x0411040400000000ULL;
1477 writeq(val64, (void __iomem *) bar0 + 0x2700);
1481 * Don't see link state interrupts on certain switches, so
1482 * directly scheduling a link state task from here.
1484 schedule_work(&nic->set_link_task);
1487 * Here we are performing soft reset on XGXS to
1488 * force link down. Since link is already up, we will get
1489 * link state change interrupt after this reset
1491 SPECIAL_REG_WRITE(0x80010515001E0000ULL, &bar0->dtx_control, UF);
1492 val64 = readq(&bar0->dtx_control);
1494 SPECIAL_REG_WRITE(0x80010515001E00E0ULL, &bar0->dtx_control, UF);
1495 val64 = readq(&bar0->dtx_control);
1497 SPECIAL_REG_WRITE(0x80070515001F00E4ULL, &bar0->dtx_control, UF);
1498 val64 = readq(&bar0->dtx_control);
1505 * free_tx_buffers - Free all queued Tx buffers
1506 * @nic : device private variable.
1508 * Free all queued Tx buffers.
1509 * Return Value: void
1512 void free_tx_buffers(struct s2io_nic *nic)
1514 struct net_device *dev = nic->dev;
1515 struct sk_buff *skb;
1518 mac_info_t *mac_control;
1519 struct config_param *config;
1522 mac_control = &nic->mac_control;
1523 config = &nic->config;
1525 for (i = 0; i < config->tx_fifo_num; i++) {
1526 for (j = 0; j < config->tx_cfg[i].fifo_len - 1; j++) {
1527 txdp = (TxD_t *) nic->list_info[i][j].
1530 (struct sk_buff *) ((unsigned long) txdp->
1533 memset(txdp, 0, sizeof(TxD_t));
1537 memset(txdp, 0, sizeof(TxD_t));
1541 "%s:forcibly freeing %d skbs on FIFO%d\n",
1543 mac_control->tx_curr_get_info[i].offset = 0;
1544 mac_control->tx_curr_put_info[i].offset = 0;
1549 * stop_nic - To stop the nic
1550 * @nic ; device private variable.
1552 * This function does exactly the opposite of what the start_nic()
1553 * function does. This function is called to stop the device.
1558 static void stop_nic(struct s2io_nic *nic)
1560 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1561 register u64 val64 = 0;
1562 u16 interruptible, i;
1563 mac_info_t *mac_control;
1564 struct config_param *config;
1566 mac_control = &nic->mac_control;
1567 config = &nic->config;
1569 /* Disable all interrupts */
1570 interruptible = TX_TRAFFIC_INTR | RX_TRAFFIC_INTR | TX_MAC_INTR |
1572 en_dis_able_nic_intrs(nic, interruptible, DISABLE_INTRS);
1575 for (i = 0; i < config->rx_ring_num; i++) {
1576 val64 = readq(&bar0->prc_ctrl_n[i]);
1577 val64 &= ~((u64) PRC_CTRL_RC_ENABLED);
1578 writeq(val64, &bar0->prc_ctrl_n[i]);
1583 * fill_rx_buffers - Allocates the Rx side skbs
1584 * @nic: device private variable
1585 * @ring_no: ring number
1587 * The function allocates Rx side skbs and puts the physical
1588 * address of these buffers into the RxD buffer pointers, so that the NIC
1589 * can DMA the received frame into these locations.
1590 * The NIC supports 3 receive modes, viz
1592 * 2. three buffer and
1593 * 3. Five buffer modes.
1594 * Each mode defines how many fragments the received frame will be split
1595 * up into by the NIC. The frame is split into L3 header, L4 Header,
1596 * L4 payload in three buffer mode and in 5 buffer mode, L4 payload itself
1597 * is split into 3 fragments. As of now only single buffer mode is
1600 * SUCCESS on success or an appropriate -ve value on failure.
1603 int fill_rx_buffers(struct s2io_nic *nic, int ring_no)
1605 struct net_device *dev = nic->dev;
1606 struct sk_buff *skb;
1608 int off, off1, size, block_no, block_no1;
1609 int offset, offset1;
1611 u32 alloc_cnt = nic->pkt_cnt[ring_no] -
1612 atomic_read(&nic->rx_bufs_left[ring_no]);
1613 mac_info_t *mac_control;
1614 struct config_param *config;
1615 #ifdef CONFIG_2BUFF_MODE
1620 dma_addr_t rxdpphys;
1622 #ifndef CONFIG_S2IO_NAPI
1623 unsigned long flags;
1626 mac_control = &nic->mac_control;
1627 config = &nic->config;
1629 size = dev->mtu + HEADER_ETHERNET_II_802_3_SIZE +
1630 HEADER_802_2_SIZE + HEADER_SNAP_SIZE;
1632 while (alloc_tab < alloc_cnt) {
1633 block_no = mac_control->rx_curr_put_info[ring_no].
1635 block_no1 = mac_control->rx_curr_get_info[ring_no].
1637 off = mac_control->rx_curr_put_info[ring_no].offset;
1638 off1 = mac_control->rx_curr_get_info[ring_no].offset;
1639 #ifndef CONFIG_2BUFF_MODE
1640 offset = block_no * (MAX_RXDS_PER_BLOCK + 1) + off;
1641 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK + 1) + off1;
1643 offset = block_no * (MAX_RXDS_PER_BLOCK) + off;
1644 offset1 = block_no1 * (MAX_RXDS_PER_BLOCK) + off1;
1647 rxdp = nic->rx_blocks[ring_no][block_no].
1648 block_virt_addr + off;
1649 if ((offset == offset1) && (rxdp->Host_Control)) {
1650 DBG_PRINT(INTR_DBG, "%s: Get and Put", dev->name);
1651 DBG_PRINT(INTR_DBG, " info equated\n");
1654 #ifndef CONFIG_2BUFF_MODE
1655 if (rxdp->Control_1 == END_OF_BLOCK) {
1656 mac_control->rx_curr_put_info[ring_no].
1658 mac_control->rx_curr_put_info[ring_no].
1659 block_index %= nic->block_count[ring_no];
1660 block_no = mac_control->rx_curr_put_info
1661 [ring_no].block_index;
1663 off %= (MAX_RXDS_PER_BLOCK + 1);
1664 mac_control->rx_curr_put_info[ring_no].offset =
1666 rxdp = (RxD_t *) ((unsigned long) rxdp->Control_2);
1667 DBG_PRINT(INTR_DBG, "%s: Next block at: %p\n",
1670 #ifndef CONFIG_S2IO_NAPI
1671 spin_lock_irqsave(&nic->put_lock, flags);
1672 nic->put_pos[ring_no] =
1673 (block_no * (MAX_RXDS_PER_BLOCK + 1)) + off;
1674 spin_unlock_irqrestore(&nic->put_lock, flags);
1677 if (rxdp->Host_Control == END_OF_BLOCK) {
1678 mac_control->rx_curr_put_info[ring_no].
1680 mac_control->rx_curr_put_info[ring_no].
1681 block_index %= nic->block_count[ring_no];
1682 block_no = mac_control->rx_curr_put_info
1683 [ring_no].block_index;
1685 DBG_PRINT(INTR_DBG, "%s: block%d at: 0x%llx\n",
1686 dev->name, block_no,
1687 (unsigned long long) rxdp->Control_1);
1688 mac_control->rx_curr_put_info[ring_no].offset =
1690 rxdp = nic->rx_blocks[ring_no][block_no].
1693 #ifndef CONFIG_S2IO_NAPI
1694 spin_lock_irqsave(&nic->put_lock, flags);
1695 nic->put_pos[ring_no] = (block_no *
1696 (MAX_RXDS_PER_BLOCK + 1)) + off;
1697 spin_unlock_irqrestore(&nic->put_lock, flags);
1701 #ifndef CONFIG_2BUFF_MODE
1702 if (rxdp->Control_1 & RXD_OWN_XENA)
1704 if (rxdp->Control_2 & BIT(0))
1707 mac_control->rx_curr_put_info[ring_no].
1711 #ifdef CONFIG_2BUFF_MODE
1713 * RxDs Spanning cache lines will be replenished only
1714 * if the succeeding RxD is also owned by Host. It
1715 * will always be the ((8*i)+3) and ((8*i)+6)
1716 * descriptors for the 48 byte descriptor. The offending
1717 * decsriptor is of-course the 3rd descriptor.
1719 rxdpphys = nic->rx_blocks[ring_no][block_no].
1720 block_dma_addr + (off * sizeof(RxD_t));
1721 if (((u64) (rxdpphys)) % 128 > 80) {
1722 rxdpnext = nic->rx_blocks[ring_no][block_no].
1723 block_virt_addr + (off + 1);
1724 if (rxdpnext->Host_Control == END_OF_BLOCK) {
1725 nextblk = (block_no + 1) %
1726 (nic->block_count[ring_no]);
1727 rxdpnext = nic->rx_blocks[ring_no]
1728 [nextblk].block_virt_addr;
1730 if (rxdpnext->Control_2 & BIT(0))
1735 #ifndef CONFIG_2BUFF_MODE
1736 skb = dev_alloc_skb(size + NET_IP_ALIGN);
1738 skb = dev_alloc_skb(dev->mtu + ALIGN_SIZE + BUF0_LEN + 4);
1741 DBG_PRINT(ERR_DBG, "%s: Out of ", dev->name);
1742 DBG_PRINT(ERR_DBG, "memory to allocate SKBs\n");
1745 #ifndef CONFIG_2BUFF_MODE
1746 skb_reserve(skb, NET_IP_ALIGN);
1747 memset(rxdp, 0, sizeof(RxD_t));
1748 rxdp->Buffer0_ptr = pci_map_single
1749 (nic->pdev, skb->data, size, PCI_DMA_FROMDEVICE);
1750 rxdp->Control_2 &= (~MASK_BUFFER0_SIZE);
1751 rxdp->Control_2 |= SET_BUFFER0_SIZE(size);
1752 rxdp->Host_Control = (unsigned long) (skb);
1753 rxdp->Control_1 |= RXD_OWN_XENA;
1755 off %= (MAX_RXDS_PER_BLOCK + 1);
1756 mac_control->rx_curr_put_info[ring_no].offset = off;
1758 ba = &nic->ba[ring_no][block_no][off];
1759 skb_reserve(skb, BUF0_LEN);
1760 tmp = (unsigned long) skb->data;
1763 skb->data = (void *) tmp;
1764 skb->tail = (void *) tmp;
1766 memset(rxdp, 0, sizeof(RxD_t));
1767 rxdp->Buffer2_ptr = pci_map_single
1768 (nic->pdev, skb->data, dev->mtu + BUF0_LEN + 4,
1769 PCI_DMA_FROMDEVICE);
1771 pci_map_single(nic->pdev, ba->ba_0, BUF0_LEN,
1772 PCI_DMA_FROMDEVICE);
1774 pci_map_single(nic->pdev, ba->ba_1, BUF1_LEN,
1775 PCI_DMA_FROMDEVICE);
1777 rxdp->Control_2 = SET_BUFFER2_SIZE(dev->mtu + 4);
1778 rxdp->Control_2 |= SET_BUFFER0_SIZE(BUF0_LEN);
1779 rxdp->Control_2 |= SET_BUFFER1_SIZE(1); /* dummy. */
1780 rxdp->Control_2 |= BIT(0); /* Set Buffer_Empty bit. */
1781 rxdp->Host_Control = (u64) ((unsigned long) (skb));
1782 rxdp->Control_1 |= RXD_OWN_XENA;
1784 mac_control->rx_curr_put_info[ring_no].offset = off;
1786 atomic_inc(&nic->rx_bufs_left[ring_no]);
1795 * free_rx_buffers - Frees all Rx buffers
1796 * @sp: device private variable.
1798 * This function will free all Rx buffers allocated by host.
1803 static void free_rx_buffers(struct s2io_nic *sp)
1805 struct net_device *dev = sp->dev;
1806 int i, j, blk = 0, off, buf_cnt = 0;
1808 struct sk_buff *skb;
1809 mac_info_t *mac_control;
1810 struct config_param *config;
1811 #ifdef CONFIG_2BUFF_MODE
1815 mac_control = &sp->mac_control;
1816 config = &sp->config;
1818 for (i = 0; i < config->rx_ring_num; i++) {
1819 for (j = 0, blk = 0; j < config->rx_cfg[i].num_rxd; j++) {
1820 off = j % (MAX_RXDS_PER_BLOCK + 1);
1821 rxdp = sp->rx_blocks[i][blk].block_virt_addr + off;
1823 #ifndef CONFIG_2BUFF_MODE
1824 if (rxdp->Control_1 == END_OF_BLOCK) {
1826 (RxD_t *) ((unsigned long) rxdp->
1832 if (rxdp->Host_Control == END_OF_BLOCK) {
1838 if (!(rxdp->Control_1 & RXD_OWN_XENA)) {
1839 memset(rxdp, 0, sizeof(RxD_t));
1844 (struct sk_buff *) ((unsigned long) rxdp->
1847 #ifndef CONFIG_2BUFF_MODE
1848 pci_unmap_single(sp->pdev, (dma_addr_t)
1851 HEADER_ETHERNET_II_802_3_SIZE
1852 + HEADER_802_2_SIZE +
1854 PCI_DMA_FROMDEVICE);
1856 ba = &sp->ba[i][blk][off];
1857 pci_unmap_single(sp->pdev, (dma_addr_t)
1860 PCI_DMA_FROMDEVICE);
1861 pci_unmap_single(sp->pdev, (dma_addr_t)
1864 PCI_DMA_FROMDEVICE);
1865 pci_unmap_single(sp->pdev, (dma_addr_t)
1867 dev->mtu + BUF0_LEN + 4,
1868 PCI_DMA_FROMDEVICE);
1871 atomic_dec(&sp->rx_bufs_left[i]);
1874 memset(rxdp, 0, sizeof(RxD_t));
1876 mac_control->rx_curr_put_info[i].block_index = 0;
1877 mac_control->rx_curr_get_info[i].block_index = 0;
1878 mac_control->rx_curr_put_info[i].offset = 0;
1879 mac_control->rx_curr_get_info[i].offset = 0;
1880 atomic_set(&sp->rx_bufs_left[i], 0);
1881 DBG_PRINT(INIT_DBG, "%s:Freed 0x%x Rx Buffers on ring%d\n",
1882 dev->name, buf_cnt, i);
1887 * s2io_poll - Rx interrupt handler for NAPI support
1888 * @dev : pointer to the device structure.
1889 * @budget : The number of packets that were budgeted to be processed
1890 * during one pass through the 'Poll" function.
1892 * Comes into picture only if NAPI support has been incorporated. It does
1893 * the same thing that rx_intr_handler does, but not in a interrupt context
1894 * also It will process only a given number of packets.
1896 * 0 on success and 1 if there are No Rx packets to be processed.
1899 #ifdef CONFIG_S2IO_NAPI
1900 static int s2io_poll(struct net_device *dev, int *budget)
1902 nic_t *nic = dev->priv;
1903 XENA_dev_config_t __iomem *bar0 = nic->bar0;
1904 int pkts_to_process = *budget, pkt_cnt = 0;
1905 register u64 val64 = 0;
1906 rx_curr_get_info_t get_info, put_info;
1907 int i, get_block, put_block, get_offset, put_offset, ring_bufs;
1908 #ifndef CONFIG_2BUFF_MODE
1911 struct sk_buff *skb;
1913 mac_info_t *mac_control;
1914 struct config_param *config;
1915 #ifdef CONFIG_2BUFF_MODE
1919 mac_control = &nic->mac_control;
1920 config = &nic->config;
1922 if (pkts_to_process > dev->quota)
1923 pkts_to_process = dev->quota;
1925 val64 = readq(&bar0->rx_traffic_int);
1926 writeq(val64, &bar0->rx_traffic_int);
1928 for (i = 0; i < config->rx_ring_num; i++) {
1929 get_info = mac_control->rx_curr_get_info[i];
1930 get_block = get_info.block_index;
1931 put_info = mac_control->rx_curr_put_info[i];
1932 put_block = put_info.block_index;
1933 ring_bufs = config->rx_cfg[i].num_rxd;
1934 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
1936 #ifndef CONFIG_2BUFF_MODE
1937 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1939 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1941 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1942 (((get_offset + 1) % ring_bufs) != put_offset)) {
1943 if (--pkts_to_process < 0) {
1946 if (rxdp->Control_1 == END_OF_BLOCK) {
1948 (RxD_t *) ((unsigned long) rxdp->
1952 (MAX_RXDS_PER_BLOCK + 1);
1954 get_block %= nic->block_count[i];
1955 mac_control->rx_curr_get_info[i].
1956 offset = get_info.offset;
1957 mac_control->rx_curr_get_info[i].
1958 block_index = get_block;
1962 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1965 (struct sk_buff *) ((unsigned long) rxdp->
1968 DBG_PRINT(ERR_DBG, "%s: The skb is ",
1970 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
1973 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
1974 val16 = (u16) (val64 >> 48);
1975 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
1976 pci_unmap_single(nic->pdev, (dma_addr_t)
1979 HEADER_ETHERNET_II_802_3_SIZE +
1982 PCI_DMA_FROMDEVICE);
1983 rx_osm_handler(nic, val16, rxdp, i);
1986 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
1988 nic->rx_blocks[i][get_block].block_virt_addr +
1990 mac_control->rx_curr_get_info[i].offset =
1994 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
1996 put_offset = (put_block * (MAX_RXDS_PER_BLOCK + 1)) +
1998 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
1999 !(rxdp->Control_2 & BIT(0))) &&
2000 (((get_offset + 1) % ring_bufs) != put_offset)) {
2001 if (--pkts_to_process < 0) {
2004 skb = (struct sk_buff *) ((unsigned long)
2005 rxdp->Host_Control);
2007 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2009 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2013 pci_unmap_single(nic->pdev, (dma_addr_t)
2015 BUF0_LEN, PCI_DMA_FROMDEVICE);
2016 pci_unmap_single(nic->pdev, (dma_addr_t)
2018 BUF1_LEN, PCI_DMA_FROMDEVICE);
2019 pci_unmap_single(nic->pdev, (dma_addr_t)
2021 dev->mtu + BUF0_LEN + 4,
2022 PCI_DMA_FROMDEVICE);
2023 ba = &nic->ba[i][get_block][get_info.offset];
2025 rx_osm_handler(nic, rxdp, i, ba);
2028 mac_control->rx_curr_get_info[i].offset =
2031 nic->rx_blocks[i][get_block].block_virt_addr +
2034 if (get_info.offset &&
2035 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2036 get_info.offset = 0;
2037 mac_control->rx_curr_get_info[i].
2038 offset = get_info.offset;
2040 get_block %= nic->block_count[i];
2041 mac_control->rx_curr_get_info[i].
2042 block_index = get_block;
2044 nic->rx_blocks[i][get_block].
2048 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2057 dev->quota -= pkt_cnt;
2059 netif_rx_complete(dev);
2061 for (i = 0; i < config->rx_ring_num; i++) {
2062 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2063 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2064 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2068 /* Re enable the Rx interrupts. */
2069 en_dis_able_nic_intrs(nic, RX_TRAFFIC_INTR, ENABLE_INTRS);
2073 dev->quota -= pkt_cnt;
2076 for (i = 0; i < config->rx_ring_num; i++) {
2077 if (fill_rx_buffers(nic, i) == -ENOMEM) {
2078 DBG_PRINT(ERR_DBG, "%s:Out of memory", dev->name);
2079 DBG_PRINT(ERR_DBG, " in Rx Poll!!\n");
2087 * rx_intr_handler - Rx interrupt handler
2088 * @nic: device private variable.
2090 * If the interrupt is because of a received frame or if the
2091 * receive ring contains fresh as yet un-processed frames,this function is
2092 * called. It picks out the RxD at which place the last Rx processing had
2093 * stopped and sends the skb to the OSM's Rx handler and then increments
2099 static void rx_intr_handler(struct s2io_nic *nic)
2101 struct net_device *dev = (struct net_device *) nic->dev;
2102 XENA_dev_config_t *bar0 = (XENA_dev_config_t *) nic->bar0;
2103 rx_curr_get_info_t get_info, put_info;
2105 struct sk_buff *skb;
2106 #ifndef CONFIG_2BUFF_MODE
2109 register u64 val64 = 0;
2110 int get_block, get_offset, put_block, put_offset, ring_bufs;
2112 mac_info_t *mac_control;
2113 struct config_param *config;
2114 #ifdef CONFIG_2BUFF_MODE
2118 mac_control = &nic->mac_control;
2119 config = &nic->config;
2122 * rx_traffic_int reg is an R1 register, hence we read and write back
2123 * the samevalue in the register to clear it.
2125 val64 = readq(&bar0->rx_traffic_int);
2126 writeq(val64, &bar0->rx_traffic_int);
2128 for (i = 0; i < config->rx_ring_num; i++) {
2129 get_info = mac_control->rx_curr_get_info[i];
2130 get_block = get_info.block_index;
2131 put_info = mac_control->rx_curr_put_info[i];
2132 put_block = put_info.block_index;
2133 ring_bufs = config->rx_cfg[i].num_rxd;
2134 rxdp = nic->rx_blocks[i][get_block].block_virt_addr +
2136 #ifndef CONFIG_2BUFF_MODE
2137 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2139 spin_lock(&nic->put_lock);
2140 put_offset = nic->put_pos[i];
2141 spin_unlock(&nic->put_lock);
2142 while ((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
2143 (((get_offset + 1) % ring_bufs) != put_offset)) {
2144 if (rxdp->Control_1 == END_OF_BLOCK) {
2145 rxdp = (RxD_t *) ((unsigned long)
2149 (MAX_RXDS_PER_BLOCK + 1);
2151 get_block %= nic->block_count[i];
2152 mac_control->rx_curr_get_info[i].
2153 offset = get_info.offset;
2154 mac_control->rx_curr_get_info[i].
2155 block_index = get_block;
2159 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2161 skb = (struct sk_buff *) ((unsigned long)
2162 rxdp->Host_Control);
2164 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2166 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2169 val64 = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
2170 val16 = (u16) (val64 >> 48);
2171 cksum = RXD_GET_L4_CKSUM(rxdp->Control_1);
2172 pci_unmap_single(nic->pdev, (dma_addr_t)
2175 HEADER_ETHERNET_II_802_3_SIZE +
2178 PCI_DMA_FROMDEVICE);
2179 rx_osm_handler(nic, val16, rxdp, i);
2181 get_info.offset %= (MAX_RXDS_PER_BLOCK + 1);
2183 nic->rx_blocks[i][get_block].block_virt_addr +
2185 mac_control->rx_curr_get_info[i].offset =
2188 if ((indicate_max_pkts)
2189 && (pkt_cnt > indicate_max_pkts))
2193 get_offset = (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2195 spin_lock(&nic->put_lock);
2196 put_offset = nic->put_pos[i];
2197 spin_unlock(&nic->put_lock);
2198 while (((!(rxdp->Control_1 & RXD_OWN_XENA)) &&
2199 !(rxdp->Control_2 & BIT(0))) &&
2200 (((get_offset + 1) % ring_bufs) != put_offset)) {
2201 skb = (struct sk_buff *) ((unsigned long)
2202 rxdp->Host_Control);
2204 DBG_PRINT(ERR_DBG, "%s: The skb is ",
2206 DBG_PRINT(ERR_DBG, "Null in Rx Intr\n");
2210 pci_unmap_single(nic->pdev, (dma_addr_t)
2212 BUF0_LEN, PCI_DMA_FROMDEVICE);
2213 pci_unmap_single(nic->pdev, (dma_addr_t)
2215 BUF1_LEN, PCI_DMA_FROMDEVICE);
2216 pci_unmap_single(nic->pdev, (dma_addr_t)
2218 dev->mtu + BUF0_LEN + 4,
2219 PCI_DMA_FROMDEVICE);
2220 ba = &nic->ba[i][get_block][get_info.offset];
2222 rx_osm_handler(nic, rxdp, i, ba);
2225 mac_control->rx_curr_get_info[i].offset =
2228 nic->rx_blocks[i][get_block].block_virt_addr +
2231 if (get_info.offset &&
2232 (!(get_info.offset % MAX_RXDS_PER_BLOCK))) {
2233 get_info.offset = 0;
2234 mac_control->rx_curr_get_info[i].
2235 offset = get_info.offset;
2237 get_block %= nic->block_count[i];
2238 mac_control->rx_curr_get_info[i].
2239 block_index = get_block;
2241 nic->rx_blocks[i][get_block].
2245 (get_block * (MAX_RXDS_PER_BLOCK + 1)) +
2248 if ((indicate_max_pkts)
2249 && (pkt_cnt > indicate_max_pkts))
2253 if ((indicate_max_pkts) && (pkt_cnt > indicate_max_pkts))
2259 * tx_intr_handler - Transmit interrupt handler
2260 * @nic : device private variable
2262 * If an interrupt was raised to indicate DMA complete of the
2263 * Tx packet, this function is called. It identifies the last TxD
2264 * whose buffer was freed and frees all skbs whose data have already
2265 * DMA'ed into the NICs internal memory.
2270 static void tx_intr_handler(struct s2io_nic *nic)
2272 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2273 struct net_device *dev = (struct net_device *) nic->dev;
2274 tx_curr_get_info_t get_info, put_info;
2275 struct sk_buff *skb;
2277 register u64 val64 = 0;
2280 mac_info_t *mac_control;
2281 struct config_param *config;
2283 mac_control = &nic->mac_control;
2284 config = &nic->config;
2287 * tx_traffic_int reg is an R1 register, hence we read and write
2288 * back the samevalue in the register to clear it.
2290 val64 = readq(&bar0->tx_traffic_int);
2291 writeq(val64, &bar0->tx_traffic_int);
2293 for (i = 0; i < config->tx_fifo_num; i++) {
2294 get_info = mac_control->tx_curr_get_info[i];
2295 put_info = mac_control->tx_curr_put_info[i];
2296 txdlp = (TxD_t *) nic->list_info[i][get_info.offset].
2298 while ((!(txdlp->Control_1 & TXD_LIST_OWN_XENA)) &&
2299 (get_info.offset != put_info.offset) &&
2300 (txdlp->Host_Control)) {
2301 /* Check for TxD errors */
2302 if (txdlp->Control_1 & TXD_T_CODE) {
2303 unsigned long long err;
2304 err = txdlp->Control_1 & TXD_T_CODE;
2305 DBG_PRINT(ERR_DBG, "***TxD error %llx\n",
2309 skb = (struct sk_buff *) ((unsigned long)
2310 txdlp->Host_Control);
2312 DBG_PRINT(ERR_DBG, "%s: Null skb ",
2314 DBG_PRINT(ERR_DBG, "in Tx Free Intr\n");
2317 nic->tx_pkt_count++;
2319 frg_cnt = skb_shinfo(skb)->nr_frags;
2321 /* For unfragmented skb */
2322 pci_unmap_single(nic->pdev, (dma_addr_t)
2323 txdlp->Buffer_Pointer,
2324 skb->len - skb->data_len,
2327 TxD_t *temp = txdlp;
2329 for (j = 0; j < frg_cnt; j++, txdlp++) {
2331 &skb_shinfo(skb)->frags[j];
2332 pci_unmap_page(nic->pdev,
2342 (sizeof(TxD_t) * config->max_txds));
2344 /* Updating the statistics block */
2345 nic->stats.tx_packets++;
2346 nic->stats.tx_bytes += skb->len;
2347 dev_kfree_skb_irq(skb);
2350 get_info.offset %= get_info.fifo_len + 1;
2351 txdlp = (TxD_t *) nic->list_info[i]
2352 [get_info.offset].list_virt_addr;
2353 mac_control->tx_curr_get_info[i].offset =
2358 spin_lock(&nic->tx_lock);
2359 if (netif_queue_stopped(dev))
2360 netif_wake_queue(dev);
2361 spin_unlock(&nic->tx_lock);
2365 * alarm_intr_handler - Alarm Interrrupt handler
2366 * @nic: device private variable
2367 * Description: If the interrupt was neither because of Rx packet or Tx
2368 * complete, this function is called. If the interrupt was to indicate
2369 * a loss of link, the OSM link status handler is invoked for any other
2370 * alarm interrupt the block that raised the interrupt is displayed
2371 * and a H/W reset is issued.
2376 static void alarm_intr_handler(struct s2io_nic *nic)
2378 struct net_device *dev = (struct net_device *) nic->dev;
2379 XENA_dev_config_t __iomem *bar0 = nic->bar0;
2380 register u64 val64 = 0, err_reg = 0;
2382 /* Handling link status change error Intr */
2383 err_reg = readq(&bar0->mac_rmac_err_reg);
2384 writeq(err_reg, &bar0->mac_rmac_err_reg);
2385 if (err_reg & RMAC_LINK_STATE_CHANGE_INT) {
2386 schedule_work(&nic->set_link_task);
2389 /* In case of a serious error, the device will be Reset. */
2390 val64 = readq(&bar0->serr_source);
2391 if (val64 & SERR_SOURCE_ANY) {
2392 DBG_PRINT(ERR_DBG, "%s: Device indicates ", dev->name);
2393 DBG_PRINT(ERR_DBG, "serious error!!\n");
2394 netif_stop_queue(dev);
2395 schedule_work(&nic->rst_timer_task);
2399 * Also as mentioned in the latest Errata sheets if the PCC_FB_ECC
2400 * Error occurs, the adapter will be recycled by disabling the
2401 * adapter enable bit and enabling it again after the device
2402 * becomes Quiescent.
2404 val64 = readq(&bar0->pcc_err_reg);
2405 writeq(val64, &bar0->pcc_err_reg);
2406 if (val64 & PCC_FB_ECC_DB_ERR) {
2407 u64 ac = readq(&bar0->adapter_control);
2408 ac &= ~(ADAPTER_CNTL_EN);
2409 writeq(ac, &bar0->adapter_control);
2410 ac = readq(&bar0->adapter_control);
2411 schedule_work(&nic->set_link_task);
2414 /* Other type of interrupts are not being handled now, TODO */
2418 * wait_for_cmd_complete - waits for a command to complete.
2419 * @sp : private member of the device structure, which is a pointer to the
2420 * s2io_nic structure.
2421 * Description: Function that waits for a command to Write into RMAC
2422 * ADDR DATA registers to be completed and returns either success or
2423 * error depending on whether the command was complete or not.
2425 * SUCCESS on success and FAILURE on failure.
2428 int wait_for_cmd_complete(nic_t * sp)
2430 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2431 int ret = FAILURE, cnt = 0;
2435 val64 = readq(&bar0->rmac_addr_cmd_mem);
2436 if (!(val64 & RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING)) {
2440 set_current_state(TASK_UNINTERRUPTIBLE);
2441 schedule_timeout(HZ / 20);
2450 * s2io_reset - Resets the card.
2451 * @sp : private member of the device structure.
2452 * Description: Function to Reset the card. This function then also
2453 * restores the previously saved PCI configuration space registers as
2454 * the card reset also resets the configuration space.
2459 void s2io_reset(nic_t * sp)
2461 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2465 val64 = SW_RESET_ALL;
2466 writeq(val64, &bar0->sw_reset);
2469 * At this stage, if the PCI write is indeed completed, the
2470 * card is reset and so is the PCI Config space of the device.
2471 * So a read cannot be issued at this stage on any of the
2472 * registers to ensure the write into "sw_reset" register
2474 * Question: Is there any system call that will explicitly force
2475 * all the write commands still pending on the bus to be pushed
2477 * As of now I'am just giving a 250ms delay and hoping that the
2478 * PCI write to sw_reset register is done by this time.
2480 set_current_state(TASK_UNINTERRUPTIBLE);
2481 schedule_timeout(HZ / 4);
2483 /* Restore the PCI state saved during initializarion. */
2484 pci_restore_state(sp->pdev);
2487 set_current_state(TASK_UNINTERRUPTIBLE);
2488 schedule_timeout(HZ / 4);
2490 /* SXE-002: Configure link and activity LED to turn it off */
2491 subid = sp->pdev->subsystem_device;
2492 if ((subid & 0xFF) >= 0x07) {
2493 val64 = readq(&bar0->gpio_control);
2494 val64 |= 0x0000800000000000ULL;
2495 writeq(val64, &bar0->gpio_control);
2496 val64 = 0x0411040400000000ULL;
2497 writeq(val64, (void __iomem *) bar0 + 0x2700);
2500 sp->device_enabled_once = FALSE;
2504 * s2io_set_swapper - to set the swapper controle on the card
2505 * @sp : private member of the device structure,
2506 * pointer to the s2io_nic structure.
2507 * Description: Function to set the swapper control on the card
2508 * correctly depending on the 'endianness' of the system.
2510 * SUCCESS on success and FAILURE on failure.
2513 int s2io_set_swapper(nic_t * sp)
2515 struct net_device *dev = sp->dev;
2516 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2520 * Set proper endian settings and verify the same by reading
2521 * the PIF Feed-back register.
2525 * The device by default set to a big endian format, so a
2526 * big endian driver need not set anything.
2528 writeq(0xffffffffffffffffULL, &bar0->swapper_ctrl);
2529 val64 = (SWAPPER_CTRL_PIF_R_FE |
2530 SWAPPER_CTRL_PIF_R_SE |
2531 SWAPPER_CTRL_PIF_W_FE |
2532 SWAPPER_CTRL_PIF_W_SE |
2533 SWAPPER_CTRL_TXP_FE |
2534 SWAPPER_CTRL_TXP_SE |
2535 SWAPPER_CTRL_TXD_R_FE |
2536 SWAPPER_CTRL_TXD_W_FE |
2537 SWAPPER_CTRL_TXF_R_FE |
2538 SWAPPER_CTRL_RXD_R_FE |
2539 SWAPPER_CTRL_RXD_W_FE |
2540 SWAPPER_CTRL_RXF_W_FE |
2541 SWAPPER_CTRL_XMSI_FE |
2542 SWAPPER_CTRL_XMSI_SE |
2543 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2544 writeq(val64, &bar0->swapper_ctrl);
2547 * Initially we enable all bits to make it accessible by the
2548 * driver, then we selectively enable only those bits that
2551 writeq(0xffffffffffffffffULL, &bar0->swapper_ctrl);
2552 val64 = (SWAPPER_CTRL_PIF_R_FE |
2553 SWAPPER_CTRL_PIF_R_SE |
2554 SWAPPER_CTRL_PIF_W_FE |
2555 SWAPPER_CTRL_PIF_W_SE |
2556 SWAPPER_CTRL_TXP_FE |
2557 SWAPPER_CTRL_TXP_SE |
2558 SWAPPER_CTRL_TXD_R_FE |
2559 SWAPPER_CTRL_TXD_R_SE |
2560 SWAPPER_CTRL_TXD_W_FE |
2561 SWAPPER_CTRL_TXD_W_SE |
2562 SWAPPER_CTRL_TXF_R_FE |
2563 SWAPPER_CTRL_RXD_R_FE |
2564 SWAPPER_CTRL_RXD_R_SE |
2565 SWAPPER_CTRL_RXD_W_FE |
2566 SWAPPER_CTRL_RXD_W_SE |
2567 SWAPPER_CTRL_RXF_W_FE |
2568 SWAPPER_CTRL_XMSI_FE |
2569 SWAPPER_CTRL_XMSI_SE |
2570 SWAPPER_CTRL_STATS_FE | SWAPPER_CTRL_STATS_SE);
2571 writeq(val64, &bar0->swapper_ctrl);
2575 * Verifying if endian settings are accurate by reading a
2576 * feedback register.
2578 val64 = readq(&bar0->pif_rd_swapper_fb);
2579 if (val64 != 0x0123456789ABCDEFULL) {
2580 /* Endian settings are incorrect, calls for another dekko. */
2581 DBG_PRINT(ERR_DBG, "%s: Endian settings are wrong, ",
2583 DBG_PRINT(ERR_DBG, "feedback read %llx\n",
2584 (unsigned long long) val64);
2591 /* ********************************************************* *
2592 * Functions defined below concern the OS part of the driver *
2593 * ********************************************************* */
2596 * s2io_open - open entry point of the driver
2597 * @dev : pointer to the device structure.
2599 * This function is the open entry point of the driver. It mainly calls a
2600 * function to allocate Rx buffers and inserts them into the buffer
2601 * descriptors and then enables the Rx part of the NIC.
2603 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2607 int s2io_open(struct net_device *dev)
2609 nic_t *sp = dev->priv;
2613 * Make sure you have link off by default every time
2614 * Nic is initialized
2616 netif_carrier_off(dev);
2617 sp->last_link_state = LINK_DOWN;
2619 /* Initialize H/W and enable interrupts */
2620 if (s2io_card_up(sp)) {
2621 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
2626 /* After proper initialization of H/W, register ISR */
2627 err = request_irq((int) sp->irq, s2io_isr, SA_SHIRQ,
2631 DBG_PRINT(ERR_DBG, "%s: ISR registration failed\n",
2636 if (s2io_set_mac_addr(dev, dev->dev_addr) == FAILURE) {
2637 DBG_PRINT(ERR_DBG, "Set Mac Address Failed\n");
2642 netif_start_queue(dev);
2647 * s2io_close -close entry point of the driver
2648 * @dev : device pointer.
2650 * This is the stop entry point of the driver. It needs to undo exactly
2651 * whatever was done by the open entry point,thus it's usually referred to
2652 * as the close function.Among other things this function mainly stops the
2653 * Rx side of the NIC and frees all the Rx buffers in the Rx rings.
2655 * 0 on success and an appropriate (-)ve integer as defined in errno.h
2659 int s2io_close(struct net_device *dev)
2661 nic_t *sp = dev->priv;
2663 flush_scheduled_work();
2664 netif_stop_queue(dev);
2665 /* Reset card, kill tasklet and free Tx and Rx buffers. */
2668 free_irq(dev->irq, dev);
2669 sp->device_close_flag = TRUE; /* Device is shut down. */
2674 * s2io_xmit - Tx entry point of te driver
2675 * @skb : the socket buffer containing the Tx data.
2676 * @dev : device pointer.
2678 * This function is the Tx entry point of the driver. S2IO NIC supports
2679 * certain protocol assist features on Tx side, namely CSO, S/G, LSO.
2680 * NOTE: when device cant queue the pkt,just the trans_start variable will
2683 * 0 on success & 1 on failure.
2686 int s2io_xmit(struct sk_buff *skb, struct net_device *dev)
2688 nic_t *sp = dev->priv;
2689 u16 frg_cnt, frg_len, i, queue, queue_len, put_off, get_off;
2692 TxFIFO_element_t __iomem *tx_fifo;
2693 unsigned long flags;
2697 mac_info_t *mac_control;
2698 struct config_param *config;
2699 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2701 mac_control = &sp->mac_control;
2702 config = &sp->config;
2704 DBG_PRINT(TX_DBG, "%s: In S2IO Tx routine\n", dev->name);
2705 spin_lock_irqsave(&sp->tx_lock, flags);
2707 if (atomic_read(&sp->card_state) == CARD_DOWN) {
2708 DBG_PRINT(ERR_DBG, "%s: Card going down for reset\n",
2710 spin_unlock_irqrestore(&sp->tx_lock, flags);
2715 put_off = (u16) mac_control->tx_curr_put_info[queue].offset;
2716 get_off = (u16) mac_control->tx_curr_get_info[queue].offset;
2717 txdp = (TxD_t *) sp->list_info[queue][put_off].list_virt_addr;
2719 queue_len = mac_control->tx_curr_put_info[queue].fifo_len + 1;
2720 /* Avoid "put" pointer going beyond "get" pointer */
2721 if (txdp->Host_Control || (((put_off + 1) % queue_len) == get_off)) {
2722 DBG_PRINT(ERR_DBG, "Error in xmit, No free TXDs.\n");
2723 netif_stop_queue(dev);
2725 spin_unlock_irqrestore(&sp->tx_lock, flags);
2729 mss = skb_shinfo(skb)->tso_size;
2731 txdp->Control_1 |= TXD_TCP_LSO_EN;
2732 txdp->Control_1 |= TXD_TCP_LSO_MSS(mss);
2736 frg_cnt = skb_shinfo(skb)->nr_frags;
2737 frg_len = skb->len - skb->data_len;
2739 txdp->Host_Control = (unsigned long) skb;
2740 txdp->Buffer_Pointer = pci_map_single
2741 (sp->pdev, skb->data, frg_len, PCI_DMA_TODEVICE);
2742 if (skb->ip_summed == CHECKSUM_HW) {
2744 (TXD_TX_CKO_IPV4_EN | TXD_TX_CKO_TCP_EN |
2748 txdp->Control_2 |= config->tx_intr_type;
2750 txdp->Control_1 |= (TXD_BUFFER0_SIZE(frg_len) |
2751 TXD_GATHER_CODE_FIRST);
2752 txdp->Control_1 |= TXD_LIST_OWN_XENA;
2754 /* For fragmented SKB. */
2755 for (i = 0; i < frg_cnt; i++) {
2756 skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
2758 txdp->Buffer_Pointer = (u64) pci_map_page
2759 (sp->pdev, frag->page, frag->page_offset,
2760 frag->size, PCI_DMA_TODEVICE);
2761 txdp->Control_1 |= TXD_BUFFER0_SIZE(frag->size);
2763 txdp->Control_1 |= TXD_GATHER_CODE_LAST;
2765 tx_fifo = mac_control->tx_FIFO_start[queue];
2766 val64 = sp->list_info[queue][put_off].list_phy_addr;
2767 writeq(val64, &tx_fifo->TxDL_Pointer);
2769 val64 = (TX_FIFO_LAST_TXD_NUM(frg_cnt) | TX_FIFO_FIRST_LIST |
2773 val64 |= TX_FIFO_SPECIAL_FUNC;
2775 writeq(val64, &tx_fifo->List_Control);
2777 /* Perform a PCI read to flush previous writes */
2778 val64 = readq(&bar0->general_int_status);
2781 put_off %= mac_control->tx_curr_put_info[queue].fifo_len + 1;
2782 mac_control->tx_curr_put_info[queue].offset = put_off;
2784 /* Avoid "put" pointer going beyond "get" pointer */
2785 if (((put_off + 1) % queue_len) == get_off) {
2787 "No free TxDs for xmit, Put: 0x%x Get:0x%x\n",
2789 netif_stop_queue(dev);
2792 dev->trans_start = jiffies;
2793 spin_unlock_irqrestore(&sp->tx_lock, flags);
2799 * s2io_isr - ISR handler of the device .
2800 * @irq: the irq of the device.
2801 * @dev_id: a void pointer to the dev structure of the NIC.
2802 * @pt_regs: pointer to the registers pushed on the stack.
2803 * Description: This function is the ISR handler of the device. It
2804 * identifies the reason for the interrupt and calls the relevant
2805 * service routines. As a contongency measure, this ISR allocates the
2806 * recv buffers, if their numbers are below the panic value which is
2807 * presently set to 25% of the original number of rcv buffers allocated.
2809 * IRQ_HANDLED: will be returned if IRQ was handled by this routine
2810 * IRQ_NONE: will be returned if interrupt is not from our device
2812 static irqreturn_t s2io_isr(int irq, void *dev_id, struct pt_regs *regs)
2814 struct net_device *dev = (struct net_device *) dev_id;
2815 nic_t *sp = dev->priv;
2816 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2817 #ifndef CONFIG_S2IO_NAPI
2821 mac_info_t *mac_control;
2822 struct config_param *config;
2824 mac_control = &sp->mac_control;
2825 config = &sp->config;
2828 * Identify the cause for interrupt and call the appropriate
2829 * interrupt handler. Causes for the interrupt could be;
2833 * 4. Error in any functional blocks of the NIC.
2835 reason = readq(&bar0->general_int_status);
2838 /* The interrupt was not raised by Xena. */
2842 /* If Intr is because of Tx Traffic */
2843 if (reason & GEN_INTR_TXTRAFFIC) {
2844 tx_intr_handler(sp);
2847 /* If Intr is because of an error */
2848 if (reason & (GEN_ERROR_INTR))
2849 alarm_intr_handler(sp);
2851 #ifdef CONFIG_S2IO_NAPI
2852 if (reason & GEN_INTR_RXTRAFFIC) {
2853 if (netif_rx_schedule_prep(dev)) {
2854 en_dis_able_nic_intrs(sp, RX_TRAFFIC_INTR,
2856 __netif_rx_schedule(dev);
2860 /* If Intr is because of Rx Traffic */
2861 if (reason & GEN_INTR_RXTRAFFIC) {
2862 rx_intr_handler(sp);
2867 * If the Rx buffer count is below the panic threshold then
2868 * reallocate the buffers from the interrupt handler itself,
2869 * else schedule a tasklet to reallocate the buffers.
2871 #ifndef CONFIG_S2IO_NAPI
2872 for (i = 0; i < config->rx_ring_num; i++) {
2873 int rxb_size = atomic_read(&sp->rx_bufs_left[i]);
2874 int level = rx_buffer_level(sp, rxb_size, i);
2876 if ((level == PANIC) && (!TASKLET_IN_USE)) {
2877 DBG_PRINT(INTR_DBG, "%s: Rx BD hit ", dev->name);
2878 DBG_PRINT(INTR_DBG, "PANIC levels\n");
2879 if ((ret = fill_rx_buffers(sp, i)) == -ENOMEM) {
2880 DBG_PRINT(ERR_DBG, "%s:Out of memory",
2882 DBG_PRINT(ERR_DBG, " in ISR!!\n");
2883 clear_bit(0, (&sp->tasklet_status));
2886 clear_bit(0, (&sp->tasklet_status));
2887 } else if (level == LOW) {
2888 tasklet_schedule(&sp->task);
2897 * s2io_get_stats - Updates the device statistics structure.
2898 * @dev : pointer to the device structure.
2900 * This function updates the device statistics structure in the s2io_nic
2901 * structure and returns a pointer to the same.
2903 * pointer to the updated net_device_stats structure.
2906 struct net_device_stats *s2io_get_stats(struct net_device *dev)
2908 nic_t *sp = dev->priv;
2909 mac_info_t *mac_control;
2910 struct config_param *config;
2912 mac_control = &sp->mac_control;
2913 config = &sp->config;
2915 sp->stats.tx_errors = mac_control->stats_info->tmac_any_err_frms;
2916 sp->stats.rx_errors = mac_control->stats_info->rmac_drop_frms;
2917 sp->stats.multicast = mac_control->stats_info->rmac_vld_mcst_frms;
2918 sp->stats.rx_length_errors =
2919 mac_control->stats_info->rmac_long_frms;
2921 return (&sp->stats);
2925 * s2io_set_multicast - entry point for multicast address enable/disable.
2926 * @dev : pointer to the device structure
2928 * This function is a driver entry point which gets called by the kernel
2929 * whenever multicast addresses must be enabled/disabled. This also gets
2930 * called to set/reset promiscuous mode. Depending on the deivce flag, we
2931 * determine, if multicast address must be enabled or if promiscuous mode
2932 * is to be disabled etc.
2937 static void s2io_set_multicast(struct net_device *dev)
2940 struct dev_mc_list *mclist;
2941 nic_t *sp = dev->priv;
2942 XENA_dev_config_t __iomem *bar0 = sp->bar0;
2943 u64 val64 = 0, multi_mac = 0x010203040506ULL, mask =
2945 u64 dis_addr = 0xffffffffffffULL, mac_addr = 0;
2948 if ((dev->flags & IFF_ALLMULTI) && (!sp->m_cast_flg)) {
2949 /* Enable all Multicast addresses */
2950 writeq(RMAC_ADDR_DATA0_MEM_ADDR(multi_mac),
2951 &bar0->rmac_addr_data0_mem);
2952 writeq(RMAC_ADDR_DATA1_MEM_MASK(mask),
2953 &bar0->rmac_addr_data1_mem);
2954 val64 = RMAC_ADDR_CMD_MEM_WE |
2955 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2956 RMAC_ADDR_CMD_MEM_OFFSET(MAC_MC_ALL_MC_ADDR_OFFSET);
2957 writeq(val64, &bar0->rmac_addr_cmd_mem);
2958 /* Wait till command completes */
2959 wait_for_cmd_complete(sp);
2962 sp->all_multi_pos = MAC_MC_ALL_MC_ADDR_OFFSET;
2963 } else if ((dev->flags & IFF_ALLMULTI) && (sp->m_cast_flg)) {
2964 /* Disable all Multicast addresses */
2965 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
2966 &bar0->rmac_addr_data0_mem);
2967 val64 = RMAC_ADDR_CMD_MEM_WE |
2968 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
2969 RMAC_ADDR_CMD_MEM_OFFSET(sp->all_multi_pos);
2970 writeq(val64, &bar0->rmac_addr_cmd_mem);
2971 /* Wait till command completes */
2972 wait_for_cmd_complete(sp);
2975 sp->all_multi_pos = 0;
2978 if ((dev->flags & IFF_PROMISC) && (!sp->promisc_flg)) {
2979 /* Put the NIC into promiscuous mode */
2980 add = &bar0->mac_cfg;
2981 val64 = readq(&bar0->mac_cfg);
2982 val64 |= MAC_CFG_RMAC_PROM_ENABLE;
2984 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2985 writel((u32) val64, add);
2986 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
2987 writel((u32) (val64 >> 32), (add + 4));
2989 val64 = readq(&bar0->mac_cfg);
2990 sp->promisc_flg = 1;
2991 DBG_PRINT(ERR_DBG, "%s: entered promiscuous mode\n",
2993 } else if (!(dev->flags & IFF_PROMISC) && (sp->promisc_flg)) {
2994 /* Remove the NIC from promiscuous mode */
2995 add = &bar0->mac_cfg;
2996 val64 = readq(&bar0->mac_cfg);
2997 val64 &= ~MAC_CFG_RMAC_PROM_ENABLE;
2999 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3000 writel((u32) val64, add);
3001 writeq(RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
3002 writel((u32) (val64 >> 32), (add + 4));
3004 val64 = readq(&bar0->mac_cfg);
3005 sp->promisc_flg = 0;
3006 DBG_PRINT(ERR_DBG, "%s: left promiscuous mode\n",
3010 /* Update individual M_CAST address list */
3011 if ((!sp->m_cast_flg) && dev->mc_count) {
3013 (MAX_ADDRS_SUPPORTED - MAC_MC_ADDR_START_OFFSET - 1)) {
3014 DBG_PRINT(ERR_DBG, "%s: No more Rx filters ",
3016 DBG_PRINT(ERR_DBG, "can be added, please enable ");
3017 DBG_PRINT(ERR_DBG, "ALL_MULTI instead\n");
3021 prev_cnt = sp->mc_addr_count;
3022 sp->mc_addr_count = dev->mc_count;
3024 /* Clear out the previous list of Mc in the H/W. */
3025 for (i = 0; i < prev_cnt; i++) {
3026 writeq(RMAC_ADDR_DATA0_MEM_ADDR(dis_addr),
3027 &bar0->rmac_addr_data0_mem);
3028 val64 = RMAC_ADDR_CMD_MEM_WE |
3029 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3030 RMAC_ADDR_CMD_MEM_OFFSET
3031 (MAC_MC_ADDR_START_OFFSET + i);
3032 writeq(val64, &bar0->rmac_addr_cmd_mem);
3034 /* Wait for command completes */
3035 if (wait_for_cmd_complete(sp)) {
3036 DBG_PRINT(ERR_DBG, "%s: Adding ",
3038 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3043 /* Create the new Rx filter list and update the same in H/W. */
3044 for (i = 0, mclist = dev->mc_list; i < dev->mc_count;
3045 i++, mclist = mclist->next) {
3046 memcpy(sp->usr_addrs[i].addr, mclist->dmi_addr,
3048 for (j = 0; j < ETH_ALEN; j++) {
3049 mac_addr |= mclist->dmi_addr[j];
3052 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3053 &bar0->rmac_addr_data0_mem);
3055 val64 = RMAC_ADDR_CMD_MEM_WE |
3056 RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3057 RMAC_ADDR_CMD_MEM_OFFSET
3058 (i + MAC_MC_ADDR_START_OFFSET);
3059 writeq(val64, &bar0->rmac_addr_cmd_mem);
3061 /* Wait for command completes */
3062 if (wait_for_cmd_complete(sp)) {
3063 DBG_PRINT(ERR_DBG, "%s: Adding ",
3065 DBG_PRINT(ERR_DBG, "Multicasts failed\n");
3073 * s2io_set_mac_addr - Programs the Xframe mac address
3074 * @dev : pointer to the device structure.
3075 * @addr: a uchar pointer to the new mac address which is to be set.
3076 * Description : This procedure will program the Xframe to receive
3077 * frames with new Mac Address
3078 * Return value: SUCCESS on success and an appropriate (-)ve integer
3079 * as defined in errno.h file on failure.
3082 int s2io_set_mac_addr(struct net_device *dev, u8 * addr)
3084 nic_t *sp = dev->priv;
3085 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3086 register u64 val64, mac_addr = 0;
3090 * Set the new MAC address as the new unicast filter and reflect this
3091 * change on the device address registered with the OS. It will be
3094 for (i = 0; i < ETH_ALEN; i++) {
3096 mac_addr |= addr[i];
3099 writeq(RMAC_ADDR_DATA0_MEM_ADDR(mac_addr),
3100 &bar0->rmac_addr_data0_mem);
3103 RMAC_ADDR_CMD_MEM_WE | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
3104 RMAC_ADDR_CMD_MEM_OFFSET(0);
3105 writeq(val64, &bar0->rmac_addr_cmd_mem);
3106 /* Wait till command completes */
3107 if (wait_for_cmd_complete(sp)) {
3108 DBG_PRINT(ERR_DBG, "%s: set_mac_addr failed\n", dev->name);
3116 * s2io_ethtool_sset - Sets different link parameters.
3117 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3118 * @info: pointer to the structure with parameters given by ethtool to set
3121 * The function sets different link parameters provided by the user onto
3127 static int s2io_ethtool_sset(struct net_device *dev,
3128 struct ethtool_cmd *info)
3130 nic_t *sp = dev->priv;
3131 if ((info->autoneg == AUTONEG_ENABLE) ||
3132 (info->speed != SPEED_10000) || (info->duplex != DUPLEX_FULL))
3135 s2io_close(sp->dev);
3143 * s2io_ethtol_gset - Return link specific information.
3144 * @sp : private member of the device structure, pointer to the
3145 * s2io_nic structure.
3146 * @info : pointer to the structure with parameters given by ethtool
3147 * to return link information.
3149 * Returns link specific information like speed, duplex etc.. to ethtool.
3151 * return 0 on success.
3154 int s2io_ethtool_gset(struct net_device *dev, struct ethtool_cmd *info)
3156 nic_t *sp = dev->priv;
3157 info->supported = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3158 info->advertising = (SUPPORTED_10000baseT_Full | SUPPORTED_FIBRE);
3159 info->port = PORT_FIBRE;
3160 /* info->transceiver?? TODO */
3162 if (netif_carrier_ok(sp->dev)) {
3163 info->speed = 10000;
3164 info->duplex = DUPLEX_FULL;
3170 info->autoneg = AUTONEG_DISABLE;
3175 * s2io_ethtool_gdrvinfo - Returns driver specific information.
3176 * @sp : private member of the device structure, which is a pointer to the
3177 * s2io_nic structure.
3178 * @info : pointer to the structure with parameters given by ethtool to
3179 * return driver information.
3181 * Returns driver specefic information like name, version etc.. to ethtool.
3186 static void s2io_ethtool_gdrvinfo(struct net_device *dev,
3187 struct ethtool_drvinfo *info)
3189 nic_t *sp = dev->priv;
3191 strncpy(info->driver, s2io_driver_name, sizeof(s2io_driver_name));
3192 strncpy(info->version, s2io_driver_version,
3193 sizeof(s2io_driver_version));
3194 strncpy(info->fw_version, "", 32);
3195 strncpy(info->bus_info, sp->pdev->slot_name, 32);
3196 info->regdump_len = XENA_REG_SPACE;
3197 info->eedump_len = XENA_EEPROM_SPACE;
3198 info->testinfo_len = S2IO_TEST_LEN;
3199 info->n_stats = S2IO_STAT_LEN;
3203 * s2io_ethtool_gregs - dumps the entire space of Xfame into the buffer.
3204 * @sp: private member of the device structure, which is a pointer to the
3205 * s2io_nic structure.
3206 * @regs : pointer to the structure with parameters given by ethtool for
3207 * dumping the registers.
3208 * @reg_space: The input argumnet into which all the registers are dumped.
3210 * Dumps the entire register space of xFrame NIC into the user given
3216 static void s2io_ethtool_gregs(struct net_device *dev,
3217 struct ethtool_regs *regs, void *space)
3221 u8 *reg_space = (u8 *) space;
3222 nic_t *sp = dev->priv;
3224 regs->len = XENA_REG_SPACE;
3225 regs->version = sp->pdev->subsystem_device;
3227 for (i = 0; i < regs->len; i += 8) {
3228 reg = readq(sp->bar0 + i);
3229 memcpy((reg_space + i), ®, 8);
3234 * s2io_phy_id - timer function that alternates adapter LED.
3235 * @data : address of the private member of the device structure, which
3236 * is a pointer to the s2io_nic structure, provided as an u32.
3237 * Description: This is actually the timer function that alternates the
3238 * adapter LED bit of the adapter control bit to set/reset every time on
3239 * invocation. The timer is set for 1/2 a second, hence tha NIC blinks
3240 * once every second.
3242 static void s2io_phy_id(unsigned long data)
3244 nic_t *sp = (nic_t *) data;
3245 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3249 subid = sp->pdev->subsystem_device;
3250 if ((subid & 0xFF) >= 0x07) {
3251 val64 = readq(&bar0->gpio_control);
3252 val64 ^= GPIO_CTRL_GPIO_0;
3253 writeq(val64, &bar0->gpio_control);
3255 val64 = readq(&bar0->adapter_control);
3256 val64 ^= ADAPTER_LED_ON;
3257 writeq(val64, &bar0->adapter_control);
3260 mod_timer(&sp->id_timer, jiffies + HZ / 2);
3264 * s2io_ethtool_idnic - To physically identify the nic on the system.
3265 * @sp : private member of the device structure, which is a pointer to the
3266 * s2io_nic structure.
3267 * @id : pointer to the structure with identification parameters given by
3269 * Description: Used to physically identify the NIC on the system.
3270 * The Link LED will blink for a time specified by the user for
3272 * NOTE: The Link has to be Up to be able to blink the LED. Hence
3273 * identification is possible only if it's link is up.
3275 * int , returns 0 on success
3278 static int s2io_ethtool_idnic(struct net_device *dev, u32 data)
3280 u64 val64 = 0, last_gpio_ctrl_val;
3281 nic_t *sp = dev->priv;
3282 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3285 subid = sp->pdev->subsystem_device;
3286 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3287 if ((subid & 0xFF) < 0x07) {
3288 val64 = readq(&bar0->adapter_control);
3289 if (!(val64 & ADAPTER_CNTL_EN)) {
3291 "Adapter Link down, cannot blink LED\n");
3295 if (sp->id_timer.function == NULL) {
3296 init_timer(&sp->id_timer);
3297 sp->id_timer.function = s2io_phy_id;
3298 sp->id_timer.data = (unsigned long) sp;
3300 mod_timer(&sp->id_timer, jiffies);
3301 set_current_state(TASK_INTERRUPTIBLE);
3303 schedule_timeout(data * HZ);
3305 schedule_timeout(MAX_SCHEDULE_TIMEOUT);
3306 del_timer_sync(&sp->id_timer);
3308 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
3309 writeq(last_gpio_ctrl_val, &bar0->gpio_control);
3310 last_gpio_ctrl_val = readq(&bar0->gpio_control);
3317 * s2io_ethtool_getpause_data -Pause frame frame generation and reception.
3318 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3319 * @ep : pointer to the structure with pause parameters given by ethtool.
3321 * Returns the Pause frame generation and reception capability of the NIC.
3325 static void s2io_ethtool_getpause_data(struct net_device *dev,
3326 struct ethtool_pauseparam *ep)
3329 nic_t *sp = dev->priv;
3330 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3332 val64 = readq(&bar0->rmac_pause_cfg);
3333 if (val64 & RMAC_PAUSE_GEN_ENABLE)
3334 ep->tx_pause = TRUE;
3335 if (val64 & RMAC_PAUSE_RX_ENABLE)
3336 ep->rx_pause = TRUE;
3337 ep->autoneg = FALSE;
3341 * s2io_ethtool_setpause_data - set/reset pause frame generation.
3342 * @sp : private member of the device structure, which is a pointer to the
3343 * s2io_nic structure.
3344 * @ep : pointer to the structure with pause parameters given by ethtool.
3346 * It can be used to set or reset Pause frame generation or reception
3347 * support of the NIC.
3349 * int, returns 0 on Success
3352 int s2io_ethtool_setpause_data(struct net_device *dev,
3353 struct ethtool_pauseparam *ep)
3356 nic_t *sp = dev->priv;
3357 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3359 val64 = readq(&bar0->rmac_pause_cfg);
3361 val64 |= RMAC_PAUSE_GEN_ENABLE;
3363 val64 &= ~RMAC_PAUSE_GEN_ENABLE;
3365 val64 |= RMAC_PAUSE_RX_ENABLE;
3367 val64 &= ~RMAC_PAUSE_RX_ENABLE;
3368 writeq(val64, &bar0->rmac_pause_cfg);
3373 * read_eeprom - reads 4 bytes of data from user given offset.
3374 * @sp : private member of the device structure, which is a pointer to the
3375 * s2io_nic structure.
3376 * @off : offset at which the data must be written
3377 * @data : Its an output parameter where the data read at the given
3380 * Will read 4 bytes of data from the user given offset and return the
3382 * NOTE: Will allow to read only part of the EEPROM visible through the
3385 * -1 on failure and 0 on success.
3388 #define S2IO_DEV_ID 5
3389 static int read_eeprom(nic_t * sp, int off, u32 * data)
3394 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3396 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3397 I2C_CONTROL_BYTE_CNT(0x3) | I2C_CONTROL_READ |
3398 I2C_CONTROL_CNTL_START;
3399 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3401 while (exit_cnt < 5) {
3402 val64 = readq(&bar0->i2c_control);
3403 if (I2C_CONTROL_CNTL_END(val64)) {
3404 *data = I2C_CONTROL_GET_DATA(val64);
3408 set_current_state(TASK_UNINTERRUPTIBLE);
3409 schedule_timeout(HZ / 20);
3417 * write_eeprom - actually writes the relevant part of the data value.
3418 * @sp : private member of the device structure, which is a pointer to the
3419 * s2io_nic structure.
3420 * @off : offset at which the data must be written
3421 * @data : The data that is to be written
3422 * @cnt : Number of bytes of the data that are actually to be written into
3423 * the Eeprom. (max of 3)
3425 * Actually writes the relevant part of the data value into the Eeprom
3426 * through the I2C bus.
3428 * 0 on success, -1 on failure.
3431 static int write_eeprom(nic_t * sp, int off, u32 data, int cnt)
3433 int exit_cnt = 0, ret = -1;
3435 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3437 val64 = I2C_CONTROL_DEV_ID(S2IO_DEV_ID) | I2C_CONTROL_ADDR(off) |
3438 I2C_CONTROL_BYTE_CNT(cnt) | I2C_CONTROL_SET_DATA(data) |
3439 I2C_CONTROL_CNTL_START;
3440 SPECIAL_REG_WRITE(val64, &bar0->i2c_control, LF);
3442 while (exit_cnt < 5) {
3443 val64 = readq(&bar0->i2c_control);
3444 if (I2C_CONTROL_CNTL_END(val64)) {
3445 if (!(val64 & I2C_CONTROL_NACK))
3449 set_current_state(TASK_UNINTERRUPTIBLE);
3450 schedule_timeout(HZ / 20);
3458 * s2io_ethtool_geeprom - reads the value stored in the Eeprom.
3459 * @sp : private member of the device structure, which is a pointer to the * s2io_nic structure.
3460 * @eeprom : pointer to the user level structure provided by ethtool,
3461 * containing all relevant information.
3462 * @data_buf : user defined value to be written into Eeprom.
3463 * Description: Reads the values stored in the Eeprom at given offset
3464 * for a given length. Stores these values int the input argument data
3465 * buffer 'data_buf' and returns these to the caller (ethtool.)
3470 int s2io_ethtool_geeprom(struct net_device *dev,
3471 struct ethtool_eeprom *eeprom, u8 * data_buf)
3474 nic_t *sp = dev->priv;
3476 eeprom->magic = sp->pdev->vendor | (sp->pdev->device << 16);
3478 if ((eeprom->offset + eeprom->len) > (XENA_EEPROM_SPACE))
3479 eeprom->len = XENA_EEPROM_SPACE - eeprom->offset;
3481 for (i = 0; i < eeprom->len; i += 4) {
3482 if (read_eeprom(sp, (eeprom->offset + i), &data)) {
3483 DBG_PRINT(ERR_DBG, "Read of EEPROM failed\n");
3487 memcpy((data_buf + i), &valid, 4);
3493 * s2io_ethtool_seeprom - tries to write the user provided value in Eeprom
3494 * @sp : private member of the device structure, which is a pointer to the
3495 * s2io_nic structure.
3496 * @eeprom : pointer to the user level structure provided by ethtool,
3497 * containing all relevant information.
3498 * @data_buf ; user defined value to be written into Eeprom.
3500 * Tries to write the user provided value in the Eeprom, at the offset
3501 * given by the user.
3503 * 0 on success, -EFAULT on failure.
3506 static int s2io_ethtool_seeprom(struct net_device *dev,
3507 struct ethtool_eeprom *eeprom,
3510 int len = eeprom->len, cnt = 0;
3511 u32 valid = 0, data;
3512 nic_t *sp = dev->priv;
3514 if (eeprom->magic != (sp->pdev->vendor | (sp->pdev->device << 16))) {
3516 "ETHTOOL_WRITE_EEPROM Err: Magic value ");
3517 DBG_PRINT(ERR_DBG, "is wrong, Its not 0x%x\n",
3523 data = (u32) data_buf[cnt] & 0x000000FF;
3525 valid = (u32) (data << 24);
3529 if (write_eeprom(sp, (eeprom->offset + cnt), valid, 0)) {
3531 "ETHTOOL_WRITE_EEPROM Err: Cannot ");
3533 "write into the specified offset\n");
3544 * s2io_register_test - reads and writes into all clock domains.
3545 * @sp : private member of the device structure, which is a pointer to the
3546 * s2io_nic structure.
3547 * @data : variable that returns the result of each of the test conducted b
3550 * Read and write into all clock domains. The NIC has 3 clock domains,
3551 * see that registers in all the three regions are accessible.
3556 static int s2io_register_test(nic_t * sp, uint64_t * data)
3558 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3562 val64 = readq(&bar0->pcc_enable);
3563 if (val64 != 0xff00000000000000ULL) {
3565 DBG_PRINT(INFO_DBG, "Read Test level 1 fails\n");
3568 val64 = readq(&bar0->rmac_pause_cfg);
3569 if (val64 != 0xc000ffff00000000ULL) {
3571 DBG_PRINT(INFO_DBG, "Read Test level 2 fails\n");
3574 val64 = readq(&bar0->rx_queue_cfg);
3575 if (val64 != 0x0808080808080808ULL) {
3577 DBG_PRINT(INFO_DBG, "Read Test level 3 fails\n");
3580 val64 = readq(&bar0->xgxs_efifo_cfg);
3581 if (val64 != 0x000000001923141EULL) {
3583 DBG_PRINT(INFO_DBG, "Read Test level 4 fails\n");
3586 val64 = 0x5A5A5A5A5A5A5A5AULL;
3587 writeq(val64, &bar0->xmsi_data);
3588 val64 = readq(&bar0->xmsi_data);
3589 if (val64 != 0x5A5A5A5A5A5A5A5AULL) {
3591 DBG_PRINT(ERR_DBG, "Write Test level 1 fails\n");
3594 val64 = 0xA5A5A5A5A5A5A5A5ULL;
3595 writeq(val64, &bar0->xmsi_data);
3596 val64 = readq(&bar0->xmsi_data);
3597 if (val64 != 0xA5A5A5A5A5A5A5A5ULL) {
3599 DBG_PRINT(ERR_DBG, "Write Test level 2 fails\n");
3607 * s2io_eeprom_test - to verify that EEprom in the xena can be programmed.
3608 * @sp : private member of the device structure, which is a pointer to the
3609 * s2io_nic structure.
3610 * @data:variable that returns the result of each of the test conducted by
3613 * Verify that EEPROM in the xena can be programmed using I2C_CONTROL
3619 static int s2io_eeprom_test(nic_t * sp, uint64_t * data)
3624 /* Test Write Error at offset 0 */
3625 if (!write_eeprom(sp, 0, 0, 3))
3628 /* Test Write at offset 4f0 */
3629 if (write_eeprom(sp, 0x4F0, 0x01234567, 3))
3631 if (read_eeprom(sp, 0x4F0, &ret_data))
3634 if (ret_data != 0x01234567)
3637 /* Reset the EEPROM data go FFFF */
3638 write_eeprom(sp, 0x4F0, 0xFFFFFFFF, 3);
3640 /* Test Write Request Error at offset 0x7c */
3641 if (!write_eeprom(sp, 0x07C, 0, 3))
3644 /* Test Write Request at offset 0x7fc */
3645 if (write_eeprom(sp, 0x7FC, 0x01234567, 3))
3647 if (read_eeprom(sp, 0x7FC, &ret_data))
3650 if (ret_data != 0x01234567)
3653 /* Reset the EEPROM data go FFFF */
3654 write_eeprom(sp, 0x7FC, 0xFFFFFFFF, 3);
3656 /* Test Write Error at offset 0x80 */
3657 if (!write_eeprom(sp, 0x080, 0, 3))
3660 /* Test Write Error at offset 0xfc */
3661 if (!write_eeprom(sp, 0x0FC, 0, 3))
3664 /* Test Write Error at offset 0x100 */
3665 if (!write_eeprom(sp, 0x100, 0, 3))
3668 /* Test Write Error at offset 4ec */
3669 if (!write_eeprom(sp, 0x4EC, 0, 3))
3677 * s2io_bist_test - invokes the MemBist test of the card .
3678 * @sp : private member of the device structure, which is a pointer to the
3679 * s2io_nic structure.
3680 * @data:variable that returns the result of each of the test conducted by
3683 * This invokes the MemBist test of the card. We give around
3684 * 2 secs time for the Test to complete. If it's still not complete
3685 * within this peiod, we consider that the test failed.
3687 * 0 on success and -1 on failure.
3690 static int s2io_bist_test(nic_t * sp, uint64_t * data)
3693 int cnt = 0, ret = -1;
3695 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3696 bist |= PCI_BIST_START;
3697 pci_write_config_word(sp->pdev, PCI_BIST, bist);
3700 pci_read_config_byte(sp->pdev, PCI_BIST, &bist);
3701 if (!(bist & PCI_BIST_START)) {
3702 *data = (bist & PCI_BIST_CODE_MASK);
3706 set_current_state(TASK_UNINTERRUPTIBLE);
3707 schedule_timeout(HZ / 10);
3715 * s2io-link_test - verifies the link state of the nic
3716 * @sp ; private member of the device structure, which is a pointer to the
3717 * s2io_nic structure.
3718 * @data: variable that returns the result of each of the test conducted by
3721 * The function verifies the link state of the NIC and updates the input
3722 * argument 'data' appropriately.
3727 static int s2io_link_test(nic_t * sp, uint64_t * data)
3729 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3732 val64 = readq(&bar0->adapter_status);
3733 if (val64 & ADAPTER_STATUS_RMAC_LOCAL_FAULT)
3740 * s2io_rldram_test - offline test for access to the RldRam chip on the NIC
3741 * @sp - private member of the device structure, which is a pointer to the
3742 * s2io_nic structure.
3743 * @data - variable that returns the result of each of the test
3744 * conducted by the driver.
3746 * This is one of the offline test that tests the read and write
3747 * access to the RldRam chip on the NIC.
3752 static int s2io_rldram_test(nic_t * sp, uint64_t * data)
3754 XENA_dev_config_t __iomem *bar0 = sp->bar0;
3756 int cnt, iteration = 0, test_pass = 0;
3758 val64 = readq(&bar0->adapter_control);
3759 val64 &= ~ADAPTER_ECC_EN;
3760 writeq(val64, &bar0->adapter_control);
3762 val64 = readq(&bar0->mc_rldram_test_ctrl);
3763 val64 |= MC_RLDRAM_TEST_MODE;
3764 writeq(val64, &bar0->mc_rldram_test_ctrl);
3766 val64 = readq(&bar0->mc_rldram_mrs);
3767 val64 |= MC_RLDRAM_QUEUE_SIZE_ENABLE;
3768 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3770 val64 |= MC_RLDRAM_MRS_ENABLE;
3771 SPECIAL_REG_WRITE(val64, &bar0->mc_rldram_mrs, UF);
3773 while (iteration < 2) {
3774 val64 = 0x55555555aaaa0000ULL;
3775 if (iteration == 1) {
3776 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3778 writeq(val64, &bar0->mc_rldram_test_d0);
3780 val64 = 0xaaaa5a5555550000ULL;
3781 if (iteration == 1) {
3782 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3784 writeq(val64, &bar0->mc_rldram_test_d1);
3786 val64 = 0x55aaaaaaaa5a0000ULL;
3787 if (iteration == 1) {
3788 val64 ^= 0xFFFFFFFFFFFF0000ULL;
3790 writeq(val64, &bar0->mc_rldram_test_d2);
3792 val64 = (u64) (0x0000003fffff0000ULL);
3793 writeq(val64, &bar0->mc_rldram_test_add);
3796 val64 = MC_RLDRAM_TEST_MODE;
3797 writeq(val64, &bar0->mc_rldram_test_ctrl);
3800 MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_WRITE |
3802 writeq(val64, &bar0->mc_rldram_test_ctrl);
3804 for (cnt = 0; cnt < 5; cnt++) {
3805 val64 = readq(&bar0->mc_rldram_test_ctrl);
3806 if (val64 & MC_RLDRAM_TEST_DONE)
3808 set_current_state(TASK_UNINTERRUPTIBLE);
3809 schedule_timeout(HZ / 5);
3815 val64 = MC_RLDRAM_TEST_MODE;
3816 writeq(val64, &bar0->mc_rldram_test_ctrl);
3818 val64 |= MC_RLDRAM_TEST_MODE | MC_RLDRAM_TEST_GO;
3819 writeq(val64, &bar0->mc_rldram_test_ctrl);
3821 for (cnt = 0; cnt < 5; cnt++) {
3822 val64 = readq(&bar0->mc_rldram_test_ctrl);
3823 if (val64 & MC_RLDRAM_TEST_DONE)
3825 set_current_state(TASK_UNINTERRUPTIBLE);
3826 schedule_timeout(HZ / 2);
3832 val64 = readq(&bar0->mc_rldram_test_ctrl);
3833 if (val64 & MC_RLDRAM_TEST_PASS)
3848 * s2io_ethtool_test - conducts 6 tsets to determine the health of card.
3849 * @sp : private member of the device structure, which is a pointer to the
3850 * s2io_nic structure.
3851 * @ethtest : pointer to a ethtool command specific structure that will be
3852 * returned to the user.
3853 * @data : variable that returns the result of each of the test
3854 * conducted by the driver.
3856 * This function conducts 6 tests ( 4 offline and 2 online) to determine
3857 * the health of the card.
3862 static void s2io_ethtool_test(struct net_device *dev,
3863 struct ethtool_test *ethtest,
3866 nic_t *sp = dev->priv;
3867 int orig_state = netif_running(sp->dev);
3869 if (ethtest->flags == ETH_TEST_FL_OFFLINE) {
3870 /* Offline Tests. */
3872 s2io_close(sp->dev);
3873 s2io_set_swapper(sp);
3875 s2io_set_swapper(sp);
3877 if (s2io_register_test(sp, &data[0]))
3878 ethtest->flags |= ETH_TEST_FL_FAILED;
3881 s2io_set_swapper(sp);
3883 if (s2io_rldram_test(sp, &data[3]))
3884 ethtest->flags |= ETH_TEST_FL_FAILED;
3887 s2io_set_swapper(sp);
3889 if (s2io_eeprom_test(sp, &data[1]))
3890 ethtest->flags |= ETH_TEST_FL_FAILED;
3892 if (s2io_bist_test(sp, &data[4]))
3893 ethtest->flags |= ETH_TEST_FL_FAILED;
3903 "%s: is not up, cannot run test\n",
3912 if (s2io_link_test(sp, &data[2]))
3913 ethtest->flags |= ETH_TEST_FL_FAILED;
3922 static void s2io_get_ethtool_stats(struct net_device *dev,
3923 struct ethtool_stats *estats,
3927 nic_t *sp = dev->priv;
3928 StatInfo_t *stat_info = sp->mac_control.stats_info;
3930 tmp_stats[i++] = stat_info->tmac_frms;
3931 tmp_stats[i++] = stat_info->tmac_data_octets;
3932 tmp_stats[i++] = stat_info->tmac_drop_frms;
3933 tmp_stats[i++] = stat_info->tmac_mcst_frms;
3934 tmp_stats[i++] = stat_info->tmac_bcst_frms;
3935 tmp_stats[i++] = stat_info->tmac_pause_ctrl_frms;
3936 tmp_stats[i++] = stat_info->tmac_any_err_frms;
3937 tmp_stats[i++] = stat_info->tmac_vld_ip_octets;
3938 tmp_stats[i++] = stat_info->tmac_vld_ip;
3939 tmp_stats[i++] = stat_info->tmac_drop_ip;
3940 tmp_stats[i++] = stat_info->tmac_icmp;
3941 tmp_stats[i++] = stat_info->tmac_rst_tcp;
3942 tmp_stats[i++] = stat_info->tmac_tcp;
3943 tmp_stats[i++] = stat_info->tmac_udp;
3944 tmp_stats[i++] = stat_info->rmac_vld_frms;
3945 tmp_stats[i++] = stat_info->rmac_data_octets;
3946 tmp_stats[i++] = stat_info->rmac_fcs_err_frms;
3947 tmp_stats[i++] = stat_info->rmac_drop_frms;
3948 tmp_stats[i++] = stat_info->rmac_vld_mcst_frms;
3949 tmp_stats[i++] = stat_info->rmac_vld_bcst_frms;
3950 tmp_stats[i++] = stat_info->rmac_in_rng_len_err_frms;
3951 tmp_stats[i++] = stat_info->rmac_long_frms;
3952 tmp_stats[i++] = stat_info->rmac_pause_ctrl_frms;
3953 tmp_stats[i++] = stat_info->rmac_discarded_frms;
3954 tmp_stats[i++] = stat_info->rmac_usized_frms;
3955 tmp_stats[i++] = stat_info->rmac_osized_frms;
3956 tmp_stats[i++] = stat_info->rmac_frag_frms;
3957 tmp_stats[i++] = stat_info->rmac_jabber_frms;
3958 tmp_stats[i++] = stat_info->rmac_ip;
3959 tmp_stats[i++] = stat_info->rmac_ip_octets;
3960 tmp_stats[i++] = stat_info->rmac_hdr_err_ip;
3961 tmp_stats[i++] = stat_info->rmac_drop_ip;
3962 tmp_stats[i++] = stat_info->rmac_icmp;
3963 tmp_stats[i++] = stat_info->rmac_tcp;
3964 tmp_stats[i++] = stat_info->rmac_udp;
3965 tmp_stats[i++] = stat_info->rmac_err_drp_udp;
3966 tmp_stats[i++] = stat_info->rmac_pause_cnt;
3967 tmp_stats[i++] = stat_info->rmac_accepted_ip;
3968 tmp_stats[i++] = stat_info->rmac_err_tcp;
3971 int s2io_ethtool_get_regs_len(struct net_device *dev)
3973 return (XENA_REG_SPACE);
3977 u32 s2io_ethtool_get_rx_csum(struct net_device * dev)
3979 nic_t *sp = dev->priv;
3981 return (sp->rx_csum);
3983 int s2io_ethtool_set_rx_csum(struct net_device *dev, u32 data)
3985 nic_t *sp = dev->priv;
3994 int s2io_get_eeprom_len(struct net_device *dev)
3996 return (XENA_EEPROM_SPACE);
3999 int s2io_ethtool_self_test_count(struct net_device *dev)
4001 return (S2IO_TEST_LEN);
4003 void s2io_ethtool_get_strings(struct net_device *dev,
4004 u32 stringset, u8 * data)
4006 switch (stringset) {
4008 memcpy(data, s2io_gstrings, S2IO_STRINGS_LEN);
4011 memcpy(data, ðtool_stats_keys,
4012 sizeof(ethtool_stats_keys));
4015 static int s2io_ethtool_get_stats_count(struct net_device *dev)
4017 return (S2IO_STAT_LEN);
4020 int s2io_ethtool_op_set_tx_csum(struct net_device *dev, u32 data)
4023 dev->features |= NETIF_F_IP_CSUM;
4025 dev->features &= ~NETIF_F_IP_CSUM;
4031 static struct ethtool_ops netdev_ethtool_ops = {
4032 .get_settings = s2io_ethtool_gset,
4033 .set_settings = s2io_ethtool_sset,
4034 .get_drvinfo = s2io_ethtool_gdrvinfo,
4035 .get_regs_len = s2io_ethtool_get_regs_len,
4036 .get_regs = s2io_ethtool_gregs,
4037 .get_link = ethtool_op_get_link,
4038 .get_eeprom_len = s2io_get_eeprom_len,
4039 .get_eeprom = s2io_ethtool_geeprom,
4040 .set_eeprom = s2io_ethtool_seeprom,
4041 .get_pauseparam = s2io_ethtool_getpause_data,
4042 .set_pauseparam = s2io_ethtool_setpause_data,
4043 .get_rx_csum = s2io_ethtool_get_rx_csum,
4044 .set_rx_csum = s2io_ethtool_set_rx_csum,
4045 .get_tx_csum = ethtool_op_get_tx_csum,
4046 .set_tx_csum = s2io_ethtool_op_set_tx_csum,
4047 .get_sg = ethtool_op_get_sg,
4048 .set_sg = ethtool_op_set_sg,
4050 .get_tso = ethtool_op_get_tso,
4051 .set_tso = ethtool_op_set_tso,
4053 .self_test_count = s2io_ethtool_self_test_count,
4054 .self_test = s2io_ethtool_test,
4055 .get_strings = s2io_ethtool_get_strings,
4056 .phys_id = s2io_ethtool_idnic,
4057 .get_stats_count = s2io_ethtool_get_stats_count,
4058 .get_ethtool_stats = s2io_get_ethtool_stats
4062 * s2io_ioctl - Entry point for the Ioctl
4063 * @dev : Device pointer.
4064 * @ifr : An IOCTL specefic structure, that can contain a pointer to
4065 * a proprietary structure used to pass information to the driver.
4066 * @cmd : This is used to distinguish between the different commands that
4067 * can be passed to the IOCTL functions.
4069 * This function has support for ethtool, adding multiple MAC addresses on
4070 * the NIC and some DBG commands for the util tool.
4072 * Currently the IOCTL supports no operations, hence by default this
4073 * function returns OP NOT SUPPORTED value.
4076 int s2io_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
4082 * s2io_change_mtu - entry point to change MTU size for the device.
4083 * @dev : device pointer.
4084 * @new_mtu : the new MTU size for the device.
4085 * Description: A driver entry point to change MTU size for the device.
4086 * Before changing the MTU the device must be stopped.
4088 * 0 on success and an appropriate (-)ve integer as defined in errno.h
4092 int s2io_change_mtu(struct net_device *dev, int new_mtu)
4094 nic_t *sp = dev->priv;
4095 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4098 if (netif_running(dev)) {
4099 DBG_PRINT(ERR_DBG, "%s: Must be stopped to ", dev->name);
4100 DBG_PRINT(ERR_DBG, "change its MTU \n");
4104 if ((new_mtu < MIN_MTU) || (new_mtu > S2IO_JUMBO_SIZE)) {
4105 DBG_PRINT(ERR_DBG, "%s: MTU size is invalid.\n",
4110 /* Set the new MTU into the PYLD register of the NIC */
4112 writeq(vBIT(val64, 2, 14), &bar0->rmac_max_pyld_len);
4120 * s2io_tasklet - Bottom half of the ISR.
4121 * @dev_adr : address of the device structure in dma_addr_t format.
4123 * This is the tasklet or the bottom half of the ISR. This is
4124 * an extension of the ISR which is scheduled by the scheduler to be run
4125 * when the load on the CPU is low. All low priority tasks of the ISR can
4126 * be pushed into the tasklet. For now the tasklet is used only to
4127 * replenish the Rx buffers in the Rx buffer descriptors.
4132 static void s2io_tasklet(unsigned long dev_addr)
4134 struct net_device *dev = (struct net_device *) dev_addr;
4135 nic_t *sp = dev->priv;
4137 mac_info_t *mac_control;
4138 struct config_param *config;
4140 mac_control = &sp->mac_control;
4141 config = &sp->config;
4143 if (!TASKLET_IN_USE) {
4144 for (i = 0; i < config->rx_ring_num; i++) {
4145 ret = fill_rx_buffers(sp, i);
4146 if (ret == -ENOMEM) {
4147 DBG_PRINT(ERR_DBG, "%s: Out of ",
4149 DBG_PRINT(ERR_DBG, "memory in tasklet\n");
4151 } else if (ret == -EFILL) {
4153 "%s: Rx Ring %d is full\n",
4158 clear_bit(0, (&sp->tasklet_status));
4163 * s2io_set_link - Set the LInk status
4164 * @data: long pointer to device private structue
4165 * Description: Sets the link status for the adapter
4168 static void s2io_set_link(unsigned long data)
4170 nic_t *nic = (nic_t *) data;
4171 struct net_device *dev = nic->dev;
4172 XENA_dev_config_t __iomem *bar0 = nic->bar0;
4176 if (test_and_set_bit(0, &(nic->link_state))) {
4177 /* The card is being reset, no point doing anything */
4181 subid = nic->pdev->subsystem_device;
4183 * Allow a small delay for the NICs self initiated
4184 * cleanup to complete.
4186 set_current_state(TASK_UNINTERRUPTIBLE);
4187 schedule_timeout(HZ / 10);
4189 val64 = readq(&bar0->adapter_status);
4190 if (verify_xena_quiescence(val64, nic->device_enabled_once)) {
4191 if (LINK_IS_UP(val64)) {
4192 val64 = readq(&bar0->adapter_control);
4193 val64 |= ADAPTER_CNTL_EN;
4194 writeq(val64, &bar0->adapter_control);
4195 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4196 val64 = readq(&bar0->gpio_control);
4197 val64 |= GPIO_CTRL_GPIO_0;
4198 writeq(val64, &bar0->gpio_control);
4199 val64 = readq(&bar0->gpio_control);
4201 val64 |= ADAPTER_LED_ON;
4202 writeq(val64, &bar0->adapter_control);
4204 val64 = readq(&bar0->adapter_status);
4205 if (!LINK_IS_UP(val64)) {
4206 DBG_PRINT(ERR_DBG, "%s:", dev->name);
4207 DBG_PRINT(ERR_DBG, " Link down");
4208 DBG_PRINT(ERR_DBG, "after ");
4209 DBG_PRINT(ERR_DBG, "enabling ");
4210 DBG_PRINT(ERR_DBG, "device \n");
4212 if (nic->device_enabled_once == FALSE) {
4213 nic->device_enabled_once = TRUE;
4215 s2io_link(nic, LINK_UP);
4217 if (CARDS_WITH_FAULTY_LINK_INDICATORS(subid)) {
4218 val64 = readq(&bar0->gpio_control);
4219 val64 &= ~GPIO_CTRL_GPIO_0;
4220 writeq(val64, &bar0->gpio_control);
4221 val64 = readq(&bar0->gpio_control);
4223 s2io_link(nic, LINK_DOWN);
4225 } else { /* NIC is not Quiescent. */
4226 DBG_PRINT(ERR_DBG, "%s: Error: ", dev->name);
4227 DBG_PRINT(ERR_DBG, "device is not Quiescent\n");
4228 netif_stop_queue(dev);
4230 clear_bit(0, &(nic->link_state));
4233 static void s2io_card_down(nic_t * sp)
4236 XENA_dev_config_t __iomem *bar0 = sp->bar0;
4237 unsigned long flags;
4238 register u64 val64 = 0;
4240 /* If s2io_set_link task is executing, wait till it completes. */
4241 while (test_and_set_bit(0, &(sp->link_state))) {
4242 set_current_state(TASK_UNINTERRUPTIBLE);
4243 schedule_timeout(HZ / 20);
4245 atomic_set(&sp->card_state, CARD_DOWN);
4247 /* disable Tx and Rx traffic on the NIC */
4251 tasklet_kill(&sp->task);
4253 /* Check if the device is Quiescent and then Reset the NIC */
4255 val64 = readq(&bar0->adapter_status);
4256 if (verify_xena_quiescence(val64, sp->device_enabled_once)) {
4260 set_current_state(TASK_UNINTERRUPTIBLE);
4261 schedule_timeout(HZ / 20);
4265 "s2io_close:Device not Quiescent ");
4266 DBG_PRINT(ERR_DBG, "adaper status reads 0x%llx\n",
4267 (unsigned long long) val64);
4271 spin_lock_irqsave(&sp->tx_lock, flags);
4274 /* Free all unused Tx and Rx buffers */
4275 free_tx_buffers(sp);
4276 free_rx_buffers(sp);
4278 spin_unlock_irqrestore(&sp->tx_lock, flags);
4279 clear_bit(0, &(sp->link_state));
4282 static int s2io_card_up(nic_t * sp)
4285 mac_info_t *mac_control;
4286 struct config_param *config;
4287 struct net_device *dev = (struct net_device *) sp->dev;
4289 /* Initialize the H/W I/O registers */
4290 if (init_nic(sp) != 0) {
4291 DBG_PRINT(ERR_DBG, "%s: H/W initialization failed\n",
4297 * Initializing the Rx buffers. For now we are considering only 1
4298 * Rx ring and initializing buffers into 30 Rx blocks
4300 mac_control = &sp->mac_control;
4301 config = &sp->config;
4303 for (i = 0; i < config->rx_ring_num; i++) {
4304 if ((ret = fill_rx_buffers(sp, i))) {
4305 DBG_PRINT(ERR_DBG, "%s: Out of memory in Open\n",
4308 free_rx_buffers(sp);
4311 DBG_PRINT(INFO_DBG, "Buf in ring:%d is %d:\n", i,
4312 atomic_read(&sp->rx_bufs_left[i]));
4315 /* Setting its receive mode */
4316 s2io_set_multicast(dev);
4318 /* Enable tasklet for the device */
4319 tasklet_init(&sp->task, s2io_tasklet, (unsigned long) dev);
4321 /* Enable Rx Traffic and interrupts on the NIC */
4322 if (start_nic(sp)) {
4323 DBG_PRINT(ERR_DBG, "%s: Starting NIC failed\n", dev->name);
4324 tasklet_kill(&sp->task);
4326 free_irq(dev->irq, dev);
4327 free_rx_buffers(sp);
4331 atomic_set(&sp->card_state, CARD_UP);
4336 * s2io_restart_nic - Resets the NIC.
4337 * @data : long pointer to the device private structure
4339 * This function is scheduled to be run by the s2io_tx_watchdog
4340 * function after 0.5 secs to reset the NIC. The idea is to reduce
4341 * the run time of the watch dog routine which is run holding a
4345 static void s2io_restart_nic(unsigned long data)
4347 struct net_device *dev = (struct net_device *) data;
4348 nic_t *sp = dev->priv;
4351 if (s2io_card_up(sp)) {
4352 DBG_PRINT(ERR_DBG, "%s: Device bring up failed\n",
4355 netif_wake_queue(dev);
4356 DBG_PRINT(ERR_DBG, "%s: was reset by Tx watchdog timer\n",
4361 * s2io_tx_watchdog - Watchdog for transmit side.
4362 * @dev : Pointer to net device structure
4364 * This function is triggered if the Tx Queue is stopped
4365 * for a pre-defined amount of time when the Interface is still up.
4366 * If the Interface is jammed in such a situation, the hardware is
4367 * reset (by s2io_close) and restarted again (by s2io_open) to
4368 * overcome any problem that might have been caused in the hardware.
4373 static void s2io_tx_watchdog(struct net_device *dev)
4375 nic_t *sp = dev->priv;
4377 if (netif_carrier_ok(dev)) {
4378 schedule_work(&sp->rst_timer_task);
4383 * rx_osm_handler - To perform some OS related operations on SKB.
4384 * @sp: private member of the device structure,pointer to s2io_nic structure.
4385 * @skb : the socket buffer pointer.
4386 * @len : length of the packet
4387 * @cksum : FCS checksum of the frame.
4388 * @ring_no : the ring from which this RxD was extracted.
4390 * This function is called by the Tx interrupt serivce routine to perform
4391 * some OS related operations on the SKB before passing it to the upper
4392 * layers. It mainly checks if the checksum is OK, if so adds it to the
4393 * SKBs cksum variable, increments the Rx packet count and passes the SKB
4394 * to the upper layer. If the checksum is wrong, it increments the Rx
4395 * packet error count, frees the SKB and returns error.
4397 * SUCCESS on success and -1 on failure.
4399 #ifndef CONFIG_2BUFF_MODE
4400 static int rx_osm_handler(nic_t * sp, u16 len, RxD_t * rxdp, int ring_no)
4402 static int rx_osm_handler(nic_t * sp, RxD_t * rxdp, int ring_no,
4406 struct net_device *dev = (struct net_device *) sp->dev;
4407 struct sk_buff *skb =
4408 (struct sk_buff *) ((unsigned long) rxdp->Host_Control);
4409 u16 l3_csum, l4_csum;
4410 #ifdef CONFIG_2BUFF_MODE
4411 int buf0_len, buf2_len;
4412 unsigned char *buff;
4415 l3_csum = RXD_GET_L3_CKSUM(rxdp->Control_1);
4416 if ((rxdp->Control_1 & TCP_OR_UDP_FRAME) && (sp->rx_csum)) {
4417 l4_csum = RXD_GET_L4_CKSUM(rxdp->Control_1);
4418 if ((l3_csum == L3_CKSUM_OK) && (l4_csum == L4_CKSUM_OK)) {
4420 * NIC verifies if the Checksum of the received
4421 * frame is Ok or not and accordingly returns
4422 * a flag in the RxD.
4424 skb->ip_summed = CHECKSUM_UNNECESSARY;
4427 * Packet with erroneous checksum, let the
4428 * upper layers deal with it.
4430 skb->ip_summed = CHECKSUM_NONE;
4433 skb->ip_summed = CHECKSUM_NONE;
4436 if (rxdp->Control_1 & RXD_T_CODE) {
4437 unsigned long long err = rxdp->Control_1 & RXD_T_CODE;
4438 DBG_PRINT(ERR_DBG, "%s: Rx error Value: 0x%llx\n",
4441 #ifdef CONFIG_2BUFF_MODE
4442 buf0_len = RXD_GET_BUFFER0_SIZE(rxdp->Control_2);
4443 buf2_len = RXD_GET_BUFFER2_SIZE(rxdp->Control_2);
4447 #ifndef CONFIG_2BUFF_MODE
4449 skb->protocol = eth_type_trans(skb, dev);
4451 buff = skb_push(skb, buf0_len);
4452 memcpy(buff, ba->ba_0, buf0_len);
4453 skb_put(skb, buf2_len);
4454 skb->protocol = eth_type_trans(skb, dev);
4457 #ifdef CONFIG_S2IO_NAPI
4458 netif_receive_skb(skb);
4463 dev->last_rx = jiffies;
4465 sp->stats.rx_packets++;
4466 #ifndef CONFIG_2BUFF_MODE
4467 sp->stats.rx_bytes += len;
4469 sp->stats.rx_bytes += buf0_len + buf2_len;
4472 atomic_dec(&sp->rx_bufs_left[ring_no]);
4473 rxdp->Host_Control = 0;
4478 * s2io_link - stops/starts the Tx queue.
4479 * @sp : private member of the device structure, which is a pointer to the
4480 * s2io_nic structure.
4481 * @link : inidicates whether link is UP/DOWN.
4483 * This function stops/starts the Tx queue depending on whether the link
4484 * status of the NIC is is down or up. This is called by the Alarm
4485 * interrupt handler whenever a link change interrupt comes up.
4490 void s2io_link(nic_t * sp, int link)
4492 struct net_device *dev = (struct net_device *) sp->dev;
4494 if (link != sp->last_link_state) {
4495 if (link == LINK_DOWN) {
4496 DBG_PRINT(ERR_DBG, "%s: Link down\n", dev->name);
4497 netif_carrier_off(dev);
4499 DBG_PRINT(ERR_DBG, "%s: Link Up\n", dev->name);
4500 netif_carrier_on(dev);
4503 sp->last_link_state = link;
4507 * get_xena_rev_id - to identify revision ID of xena.
4508 * @pdev : PCI Dev structure
4510 * Function to identify the Revision ID of xena.
4512 * returns the revision ID of the device.
4515 int get_xena_rev_id(struct pci_dev *pdev)
4519 ret = pci_read_config_byte(pdev, PCI_REVISION_ID, (u8 *) & id);
4524 * s2io_init_pci -Initialization of PCI and PCI-X configuration registers .
4525 * @sp : private member of the device structure, which is a pointer to the
4526 * s2io_nic structure.
4528 * This function initializes a few of the PCI and PCI-X configuration registers
4529 * with recommended values.
4534 static void s2io_init_pci(nic_t * sp)
4538 /* Enable Data Parity Error Recovery in PCI-X command register. */
4539 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4541 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4542 (sp->pcix_cmd | 1));
4543 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4546 /* Set the PErr Response bit in PCI command register. */
4547 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4548 pci_write_config_word(sp->pdev, PCI_COMMAND,
4549 (pci_cmd | PCI_COMMAND_PARITY));
4550 pci_read_config_word(sp->pdev, PCI_COMMAND, &pci_cmd);
4552 /* Set MMRB count to 1024 in PCI-X Command register. */
4553 sp->pcix_cmd &= 0xFFF3;
4554 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER, (sp->pcix_cmd | (0x1 << 2))); /* MMRBC 1K */
4555 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4558 /* Setting Maximum outstanding splits based on system type. */
4559 sp->pcix_cmd &= 0xFF8F;
4561 sp->pcix_cmd |= XENA_MAX_OUTSTANDING_SPLITS(0x1); /* 2 splits. */
4562 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4564 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4566 /* Forcibly disabling relaxed ordering capability of the card. */
4567 sp->pcix_cmd &= 0xfffd;
4568 pci_write_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4570 pci_read_config_word(sp->pdev, PCIX_COMMAND_REGISTER,
4574 MODULE_AUTHOR("Raghavendra Koushik <raghavendra.koushik@s2io.com>");
4575 MODULE_LICENSE("GPL");
4576 module_param(tx_fifo_num, int, 0);
4577 module_param_array(tx_fifo_len, int, NULL, 0);
4578 module_param(rx_ring_num, int, 0);
4579 module_param_array(rx_ring_sz, int, NULL, 0);
4580 module_param(Stats_refresh_time, int, 0);
4581 module_param(rmac_pause_time, int, 0);
4582 module_param(mc_pause_threshold_q0q3, int, 0);
4583 module_param(mc_pause_threshold_q4q7, int, 0);
4584 module_param(shared_splits, int, 0);
4585 module_param(tmac_util_period, int, 0);
4586 module_param(rmac_util_period, int, 0);
4587 #ifndef CONFIG_S2IO_NAPI
4588 module_param(indicate_max_pkts, int, 0);
4591 * s2io_init_nic - Initialization of the adapter .
4592 * @pdev : structure containing the PCI related information of the device.
4593 * @pre: List of PCI devices supported by the driver listed in s2io_tbl.
4595 * The function initializes an adapter identified by the pci_dec structure.
4596 * All OS related initialization including memory and device structure and
4597 * initlaization of the device private variable is done. Also the swapper
4598 * control register is initialized to enable read and write into the I/O
4599 * registers of the device.
4601 * returns 0 on success and negative on failure.
4604 static int __devinit
4605 s2io_init_nic(struct pci_dev *pdev, const struct pci_device_id *pre)
4608 struct net_device *dev;
4609 char *dev_name = "S2IO 10GE NIC";
4611 int dma_flag = FALSE;
4612 u32 mac_up, mac_down;
4613 u64 val64 = 0, tmp64 = 0;
4614 XENA_dev_config_t __iomem *bar0 = NULL;
4616 mac_info_t *mac_control;
4617 struct config_param *config;
4620 DBG_PRINT(ERR_DBG, "Loading S2IO driver with %s\n",
4621 s2io_driver_version);
4623 if ((ret = pci_enable_device(pdev))) {
4625 "s2io_init_nic: pci_enable_device failed\n");
4629 if (!pci_set_dma_mask(pdev, 0xffffffffffffffffULL)) {
4630 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 64bit DMA\n");
4633 if (pci_set_consistent_dma_mask
4634 (pdev, 0xffffffffffffffffULL)) {
4636 "Unable to obtain 64bit DMA for \
4637 consistent allocations\n");
4638 pci_disable_device(pdev);
4641 } else if (!pci_set_dma_mask(pdev, 0xffffffffUL)) {
4642 DBG_PRINT(INIT_DBG, "s2io_init_nic: Using 32bit DMA\n");
4644 pci_disable_device(pdev);
4648 if (pci_request_regions(pdev, s2io_driver_name)) {
4649 DBG_PRINT(ERR_DBG, "Request Regions failed\n"),
4650 pci_disable_device(pdev);
4654 dev = alloc_etherdev(sizeof(nic_t));
4656 DBG_PRINT(ERR_DBG, "Device allocation failed\n");
4657 pci_disable_device(pdev);
4658 pci_release_regions(pdev);
4662 pci_set_master(pdev);
4663 pci_set_drvdata(pdev, dev);
4664 SET_MODULE_OWNER(dev);
4665 SET_NETDEV_DEV(dev, &pdev->dev);
4667 /* Private member variable initialized to s2io NIC structure */
4669 memset(sp, 0, sizeof(nic_t));
4672 sp->vendor_id = pdev->vendor;
4673 sp->device_id = pdev->device;
4674 sp->high_dma_flag = dma_flag;
4675 sp->irq = pdev->irq;
4676 sp->device_enabled_once = FALSE;
4677 strcpy(sp->name, dev_name);
4679 /* Initialize some PCI/PCI-X fields of the NIC. */
4683 * Setting the device configuration parameters.
4684 * Most of these parameters can be specified by the user during
4685 * module insertion as they are module loadable parameters. If
4686 * these parameters are not not specified during load time, they
4687 * are initialized with default values.
4689 mac_control = &sp->mac_control;
4690 config = &sp->config;
4692 /* Tx side parameters. */
4693 tx_fifo_len[0] = DEFAULT_FIFO_LEN; /* Default value. */
4694 config->tx_fifo_num = tx_fifo_num;
4695 for (i = 0; i < MAX_TX_FIFOS; i++) {
4696 config->tx_cfg[i].fifo_len = tx_fifo_len[i];
4697 config->tx_cfg[i].fifo_priority = i;
4700 config->tx_intr_type = TXD_INT_TYPE_UTILZ;
4701 for (i = 0; i < config->tx_fifo_num; i++) {
4702 config->tx_cfg[i].f_no_snoop =
4703 (NO_SNOOP_TXD | NO_SNOOP_TXD_BUFFER);
4704 if (config->tx_cfg[i].fifo_len < 65) {
4705 config->tx_intr_type = TXD_INT_TYPE_PER_LIST;
4709 config->max_txds = MAX_SKB_FRAGS;
4711 /* Rx side parameters. */
4712 rx_ring_sz[0] = SMALL_BLK_CNT; /* Default value. */
4713 config->rx_ring_num = rx_ring_num;
4714 for (i = 0; i < MAX_RX_RINGS; i++) {
4715 config->rx_cfg[i].num_rxd = rx_ring_sz[i] *
4716 (MAX_RXDS_PER_BLOCK + 1);
4717 config->rx_cfg[i].ring_priority = i;
4720 for (i = 0; i < rx_ring_num; i++) {
4721 config->rx_cfg[i].ring_org = RING_ORG_BUFF1;
4722 config->rx_cfg[i].f_no_snoop =
4723 (NO_SNOOP_RXD | NO_SNOOP_RXD_BUFFER);
4726 /* Setting Mac Control parameters */
4727 mac_control->rmac_pause_time = rmac_pause_time;
4728 mac_control->mc_pause_threshold_q0q3 = mc_pause_threshold_q0q3;
4729 mac_control->mc_pause_threshold_q4q7 = mc_pause_threshold_q4q7;
4732 /* Initialize Ring buffer parameters. */
4733 for (i = 0; i < config->rx_ring_num; i++)
4734 atomic_set(&sp->rx_bufs_left[i], 0);
4736 /* initialize the shared memory used by the NIC and the host */
4737 if (init_shared_mem(sp)) {
4738 DBG_PRINT(ERR_DBG, "%s: Memory allocation failed\n",
4741 goto mem_alloc_failed;
4744 sp->bar0 = ioremap(pci_resource_start(pdev, 0),
4745 pci_resource_len(pdev, 0));
4747 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem1\n",
4750 goto bar0_remap_failed;
4753 sp->bar1 = ioremap(pci_resource_start(pdev, 2),
4754 pci_resource_len(pdev, 2));
4756 DBG_PRINT(ERR_DBG, "%s: S2IO: cannot remap io mem2\n",
4759 goto bar1_remap_failed;
4762 dev->irq = pdev->irq;
4763 dev->base_addr = (unsigned long) sp->bar0;
4765 /* Initializing the BAR1 address as the start of the FIFO pointer. */
4766 for (j = 0; j < MAX_TX_FIFOS; j++) {
4767 mac_control->tx_FIFO_start[j] = (TxFIFO_element_t __iomem *)
4768 (sp->bar1 + (j * 0x00020000));
4771 /* Driver entry points */
4772 dev->open = &s2io_open;
4773 dev->stop = &s2io_close;
4774 dev->hard_start_xmit = &s2io_xmit;
4775 dev->get_stats = &s2io_get_stats;
4776 dev->set_multicast_list = &s2io_set_multicast;
4777 dev->do_ioctl = &s2io_ioctl;
4778 dev->change_mtu = &s2io_change_mtu;
4779 SET_ETHTOOL_OPS(dev, &netdev_ethtool_ops);
4781 * will use eth_mac_addr() for dev->set_mac_address
4782 * mac address will be set every time dev->open() is called
4784 #ifdef CONFIG_S2IO_NAPI
4785 dev->poll = s2io_poll;
4789 dev->features |= NETIF_F_SG | NETIF_F_IP_CSUM;
4790 if (sp->high_dma_flag == TRUE)
4791 dev->features |= NETIF_F_HIGHDMA;
4793 dev->features |= NETIF_F_TSO;
4796 dev->tx_timeout = &s2io_tx_watchdog;
4797 dev->watchdog_timeo = WATCH_DOG_TIMEOUT;
4798 INIT_WORK(&sp->rst_timer_task,
4799 (void (*)(void *)) s2io_restart_nic, dev);
4800 INIT_WORK(&sp->set_link_task,
4801 (void (*)(void *)) s2io_set_link, sp);
4803 pci_save_state(sp->pdev);
4805 /* Setting swapper control on the NIC, for proper reset operation */
4806 if (s2io_set_swapper(sp)) {
4807 DBG_PRINT(ERR_DBG, "%s:swapper settings are wrong\n",
4810 goto set_swap_failed;
4813 /* Fix for all "FFs" MAC address problems observed on Alpha platforms */
4814 fix_mac_address(sp);
4818 * Setting swapper control on the NIC, so the MAC address can be read.
4820 if (s2io_set_swapper(sp)) {
4822 "%s: S2IO: swapper settings are wrong\n",
4825 goto set_swap_failed;
4829 * MAC address initialization.
4830 * For now only one mac address will be read and used.
4833 val64 = RMAC_ADDR_CMD_MEM_RD | RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4834 RMAC_ADDR_CMD_MEM_OFFSET(0 + MAC_MAC_ADDR_START_OFFSET);
4835 writeq(val64, &bar0->rmac_addr_cmd_mem);
4836 wait_for_cmd_complete(sp);
4838 tmp64 = readq(&bar0->rmac_addr_data0_mem);
4839 mac_down = (u32) tmp64;
4840 mac_up = (u32) (tmp64 >> 32);
4842 memset(sp->def_mac_addr[0].mac_addr, 0, sizeof(ETH_ALEN));
4844 sp->def_mac_addr[0].mac_addr[3] = (u8) (mac_up);
4845 sp->def_mac_addr[0].mac_addr[2] = (u8) (mac_up >> 8);
4846 sp->def_mac_addr[0].mac_addr[1] = (u8) (mac_up >> 16);
4847 sp->def_mac_addr[0].mac_addr[0] = (u8) (mac_up >> 24);
4848 sp->def_mac_addr[0].mac_addr[5] = (u8) (mac_down >> 16);
4849 sp->def_mac_addr[0].mac_addr[4] = (u8) (mac_down >> 24);
4852 "DEFAULT MAC ADDR:0x%02x-%02x-%02x-%02x-%02x-%02x\n",
4853 sp->def_mac_addr[0].mac_addr[0],
4854 sp->def_mac_addr[0].mac_addr[1],
4855 sp->def_mac_addr[0].mac_addr[2],
4856 sp->def_mac_addr[0].mac_addr[3],
4857 sp->def_mac_addr[0].mac_addr[4],
4858 sp->def_mac_addr[0].mac_addr[5]);
4860 /* Set the factory defined MAC address initially */
4861 dev->addr_len = ETH_ALEN;
4862 memcpy(dev->dev_addr, sp->def_mac_addr, ETH_ALEN);
4865 * Initialize the tasklet status and link state flags
4866 * and the card statte parameter
4868 atomic_set(&(sp->card_state), 0);
4869 sp->tasklet_status = 0;
4873 /* Initialize spinlocks */
4874 spin_lock_init(&sp->tx_lock);
4875 #ifndef CONFIG_S2IO_NAPI
4876 spin_lock_init(&sp->put_lock);
4880 * SXE-002: Configure link and activity LED to init state
4883 subid = sp->pdev->subsystem_device;
4884 if ((subid & 0xFF) >= 0x07) {
4885 val64 = readq(&bar0->gpio_control);
4886 val64 |= 0x0000800000000000ULL;
4887 writeq(val64, &bar0->gpio_control);
4888 val64 = 0x0411040400000000ULL;
4889 writeq(val64, (void __iomem *) bar0 + 0x2700);
4890 val64 = readq(&bar0->gpio_control);
4893 sp->rx_csum = 1; /* Rx chksum verify enabled by default */
4895 if (register_netdev(dev)) {
4896 DBG_PRINT(ERR_DBG, "Device registration failed\n");
4898 goto register_failed;
4902 * Make Link state as off at this point, when the Link change
4903 * interrupt comes the state will be automatically changed to
4906 netif_carrier_off(dev);
4907 sp->last_link_state = LINK_DOWN;
4918 free_shared_mem(sp);
4919 pci_disable_device(pdev);
4920 pci_release_regions(pdev);
4921 pci_set_drvdata(pdev, NULL);
4928 * s2io_rem_nic - Free the PCI device
4929 * @pdev: structure containing the PCI related information of the device.
4930 * Description: This function is called by the Pci subsystem to release a
4931 * PCI device and free up all resource held up by the device. This could
4932 * be in response to a Hot plug event or when the driver is to be removed
4936 static void __devexit s2io_rem_nic(struct pci_dev *pdev)
4938 struct net_device *dev =
4939 (struct net_device *) pci_get_drvdata(pdev);
4943 DBG_PRINT(ERR_DBG, "Driver Data is NULL!!\n");
4948 unregister_netdev(dev);
4950 free_shared_mem(sp);
4953 pci_disable_device(pdev);
4954 pci_release_regions(pdev);
4955 pci_set_drvdata(pdev, NULL);
4961 * s2io_starter - Entry point for the driver
4962 * Description: This function is the entry point for the driver. It verifies
4963 * the module loadable parameters and initializes PCI configuration space.
4966 int __init s2io_starter(void)
4968 return pci_module_init(&s2io_driver);
4972 * s2io_closer - Cleanup routine for the driver
4973 * Description: This function is the cleanup routine for the driver. It unregist * ers the driver.
4976 void s2io_closer(void)
4978 pci_unregister_driver(&s2io_driver);
4979 DBG_PRINT(INIT_DBG, "cleanup done\n");
4982 module_init(s2io_starter);
4983 module_exit(s2io_closer);