f180f498e4d83df4907f4fc70418c5886ce3ac95
[linux-flexiantxendom0.git] / drivers / net / ethernet / emulex / benet / be_main.c
1 /*
2  * Copyright (C) 2005 - 2011 Emulex
3  * All rights reserved.
4  *
5  * This program is free software; you can redistribute it and/or
6  * modify it under the terms of the GNU General Public License version 2
7  * as published by the Free Software Foundation.  The full GNU General
8  * Public License is included in this distribution in the file called COPYING.
9  *
10  * Contact Information:
11  * linux-drivers@emulex.com
12  *
13  * Emulex
14  * 3333 Susan Street
15  * Costa Mesa, CA 92626
16  */
17
18 #include <linux/prefetch.h>
19 #include <linux/module.h>
20 #include "be.h"
21 #include "be_cmds.h"
22 #include <asm/div64.h>
23
24 MODULE_VERSION(DRV_VER);
25 MODULE_DEVICE_TABLE(pci, be_dev_ids);
26 MODULE_DESCRIPTION(DRV_DESC " " DRV_VER);
27 MODULE_AUTHOR("ServerEngines Corporation");
28 MODULE_LICENSE("GPL");
29
30 static ushort rx_frag_size = 2048;
31 static unsigned int num_vfs;
32 module_param(rx_frag_size, ushort, S_IRUGO);
33 module_param(num_vfs, uint, S_IRUGO);
34 MODULE_PARM_DESC(rx_frag_size, "Size of a fragment that holds rcvd data.");
35 MODULE_PARM_DESC(num_vfs, "Number of PCI VFs to initialize");
36
37 static DEFINE_PCI_DEVICE_TABLE(be_dev_ids) = {
38         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID1) },
39         { PCI_DEVICE(BE_VENDOR_ID, BE_DEVICE_ID2) },
40         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID1) },
41         { PCI_DEVICE(BE_VENDOR_ID, OC_DEVICE_ID2) },
42         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID3)},
43         { PCI_DEVICE(EMULEX_VENDOR_ID, OC_DEVICE_ID4)},
44         { 0 }
45 };
46 MODULE_DEVICE_TABLE(pci, be_dev_ids);
47 /* UE Status Low CSR */
48 static const char * const ue_status_low_desc[] = {
49         "CEV",
50         "CTX",
51         "DBUF",
52         "ERX",
53         "Host",
54         "MPU",
55         "NDMA",
56         "PTC ",
57         "RDMA ",
58         "RXF ",
59         "RXIPS ",
60         "RXULP0 ",
61         "RXULP1 ",
62         "RXULP2 ",
63         "TIM ",
64         "TPOST ",
65         "TPRE ",
66         "TXIPS ",
67         "TXULP0 ",
68         "TXULP1 ",
69         "UC ",
70         "WDMA ",
71         "TXULP2 ",
72         "HOST1 ",
73         "P0_OB_LINK ",
74         "P1_OB_LINK ",
75         "HOST_GPIO ",
76         "MBOX ",
77         "AXGMAC0",
78         "AXGMAC1",
79         "JTAG",
80         "MPU_INTPEND"
81 };
82 /* UE Status High CSR */
83 static const char * const ue_status_hi_desc[] = {
84         "LPCMEMHOST",
85         "MGMT_MAC",
86         "PCS0ONLINE",
87         "MPU_IRAM",
88         "PCS1ONLINE",
89         "PCTL0",
90         "PCTL1",
91         "PMEM",
92         "RR",
93         "TXPB",
94         "RXPP",
95         "XAUI",
96         "TXP",
97         "ARM",
98         "IPC",
99         "HOST2",
100         "HOST3",
101         "HOST4",
102         "HOST5",
103         "HOST6",
104         "HOST7",
105         "HOST8",
106         "HOST9",
107         "NETC",
108         "Unknown",
109         "Unknown",
110         "Unknown",
111         "Unknown",
112         "Unknown",
113         "Unknown",
114         "Unknown",
115         "Unknown"
116 };
117
118 /* Is BE in a multi-channel mode */
119 static inline bool be_is_mc(struct be_adapter *adapter) {
120         return (adapter->function_mode & FLEX10_MODE ||
121                 adapter->function_mode & VNIC_MODE ||
122                 adapter->function_mode & UMC_ENABLED);
123 }
124
125 static void be_queue_free(struct be_adapter *adapter, struct be_queue_info *q)
126 {
127         struct be_dma_mem *mem = &q->dma_mem;
128         if (mem->va)
129                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
130                                   mem->dma);
131 }
132
133 static int be_queue_alloc(struct be_adapter *adapter, struct be_queue_info *q,
134                 u16 len, u16 entry_size)
135 {
136         struct be_dma_mem *mem = &q->dma_mem;
137
138         memset(q, 0, sizeof(*q));
139         q->len = len;
140         q->entry_size = entry_size;
141         mem->size = len * entry_size;
142         mem->va = dma_alloc_coherent(&adapter->pdev->dev, mem->size, &mem->dma,
143                                      GFP_KERNEL);
144         if (!mem->va)
145                 return -1;
146         memset(mem->va, 0, mem->size);
147         return 0;
148 }
149
150 static void be_intr_set(struct be_adapter *adapter, bool enable)
151 {
152         u32 reg, enabled;
153
154         if (adapter->eeh_err)
155                 return;
156
157         pci_read_config_dword(adapter->pdev, PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET,
158                                 &reg);
159         enabled = reg & MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
160
161         if (!enabled && enable)
162                 reg |= MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
163         else if (enabled && !enable)
164                 reg &= ~MEMBAR_CTRL_INT_CTRL_HOSTINTR_MASK;
165         else
166                 return;
167
168         pci_write_config_dword(adapter->pdev,
169                         PCICFG_MEMBAR_CTRL_INT_CTRL_OFFSET, reg);
170 }
171
172 static void be_rxq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
173 {
174         u32 val = 0;
175         val |= qid & DB_RQ_RING_ID_MASK;
176         val |= posted << DB_RQ_NUM_POSTED_SHIFT;
177
178         wmb();
179         iowrite32(val, adapter->db + DB_RQ_OFFSET);
180 }
181
182 static void be_txq_notify(struct be_adapter *adapter, u16 qid, u16 posted)
183 {
184         u32 val = 0;
185         val |= qid & DB_TXULP_RING_ID_MASK;
186         val |= (posted & DB_TXULP_NUM_POSTED_MASK) << DB_TXULP_NUM_POSTED_SHIFT;
187
188         wmb();
189         iowrite32(val, adapter->db + DB_TXULP1_OFFSET);
190 }
191
192 static void be_eq_notify(struct be_adapter *adapter, u16 qid,
193                 bool arm, bool clear_int, u16 num_popped)
194 {
195         u32 val = 0;
196         val |= qid & DB_EQ_RING_ID_MASK;
197         val |= ((qid & DB_EQ_RING_ID_EXT_MASK) <<
198                         DB_EQ_RING_ID_EXT_MASK_SHIFT);
199
200         if (adapter->eeh_err)
201                 return;
202
203         if (arm)
204                 val |= 1 << DB_EQ_REARM_SHIFT;
205         if (clear_int)
206                 val |= 1 << DB_EQ_CLR_SHIFT;
207         val |= 1 << DB_EQ_EVNT_SHIFT;
208         val |= num_popped << DB_EQ_NUM_POPPED_SHIFT;
209         iowrite32(val, adapter->db + DB_EQ_OFFSET);
210 }
211
212 void be_cq_notify(struct be_adapter *adapter, u16 qid, bool arm, u16 num_popped)
213 {
214         u32 val = 0;
215         val |= qid & DB_CQ_RING_ID_MASK;
216         val |= ((qid & DB_CQ_RING_ID_EXT_MASK) <<
217                         DB_CQ_RING_ID_EXT_MASK_SHIFT);
218
219         if (adapter->eeh_err)
220                 return;
221
222         if (arm)
223                 val |= 1 << DB_CQ_REARM_SHIFT;
224         val |= num_popped << DB_CQ_NUM_POPPED_SHIFT;
225         iowrite32(val, adapter->db + DB_CQ_OFFSET);
226 }
227
228 static int be_mac_addr_set(struct net_device *netdev, void *p)
229 {
230         struct be_adapter *adapter = netdev_priv(netdev);
231         struct sockaddr *addr = p;
232         int status = 0;
233         u8 current_mac[ETH_ALEN];
234         u32 pmac_id = adapter->pmac_id;
235
236         if (!is_valid_ether_addr(addr->sa_data))
237                 return -EADDRNOTAVAIL;
238
239         status = be_cmd_mac_addr_query(adapter, current_mac,
240                                 MAC_ADDRESS_TYPE_NETWORK, false,
241                                 adapter->if_handle, 0);
242         if (status)
243                 goto err;
244
245         if (memcmp(addr->sa_data, current_mac, ETH_ALEN)) {
246                 status = be_cmd_pmac_add(adapter, (u8 *)addr->sa_data,
247                                 adapter->if_handle, &adapter->pmac_id, 0);
248                 if (status)
249                         goto err;
250
251                 be_cmd_pmac_del(adapter, adapter->if_handle, pmac_id, 0);
252         }
253         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
254         return 0;
255 err:
256         dev_err(&adapter->pdev->dev, "MAC %pM set Failed\n", addr->sa_data);
257         return status;
258 }
259
260 static void populate_be2_stats(struct be_adapter *adapter)
261 {
262         struct be_hw_stats_v0 *hw_stats = hw_stats_from_cmd(adapter);
263         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
264         struct be_rxf_stats_v0 *rxf_stats = &hw_stats->rxf;
265         struct be_port_rxf_stats_v0 *port_stats =
266                                         &rxf_stats->port[adapter->port_num];
267         struct be_drv_stats *drvs = &adapter->drv_stats;
268
269         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
270         drvs->rx_pause_frames = port_stats->rx_pause_frames;
271         drvs->rx_crc_errors = port_stats->rx_crc_errors;
272         drvs->rx_control_frames = port_stats->rx_control_frames;
273         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
274         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
275         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
276         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
277         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
278         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
279         drvs->rxpp_fifo_overflow_drop = port_stats->rx_fifo_overflow;
280         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
281         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
282         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
283         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
284         drvs->rx_input_fifo_overflow_drop = port_stats->rx_input_fifo_overflow;
285         drvs->rx_dropped_header_too_small =
286                 port_stats->rx_dropped_header_too_small;
287         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
288         drvs->rx_alignment_symbol_errors =
289                 port_stats->rx_alignment_symbol_errors;
290
291         drvs->tx_pauseframes = port_stats->tx_pauseframes;
292         drvs->tx_controlframes = port_stats->tx_controlframes;
293
294         if (adapter->port_num)
295                 drvs->jabber_events = rxf_stats->port1_jabber_events;
296         else
297                 drvs->jabber_events = rxf_stats->port0_jabber_events;
298         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
299         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
300         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
301         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
302         drvs->forwarded_packets = rxf_stats->forwarded_packets;
303         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
304         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
305         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
306         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
307 }
308
309 static void populate_be3_stats(struct be_adapter *adapter)
310 {
311         struct be_hw_stats_v1 *hw_stats = hw_stats_from_cmd(adapter);
312         struct be_pmem_stats *pmem_sts = &hw_stats->pmem;
313         struct be_rxf_stats_v1 *rxf_stats = &hw_stats->rxf;
314         struct be_port_rxf_stats_v1 *port_stats =
315                                         &rxf_stats->port[adapter->port_num];
316         struct be_drv_stats *drvs = &adapter->drv_stats;
317
318         be_dws_le_to_cpu(hw_stats, sizeof(*hw_stats));
319         drvs->pmem_fifo_overflow_drop = port_stats->pmem_fifo_overflow_drop;
320         drvs->rx_priority_pause_frames = port_stats->rx_priority_pause_frames;
321         drvs->rx_pause_frames = port_stats->rx_pause_frames;
322         drvs->rx_crc_errors = port_stats->rx_crc_errors;
323         drvs->rx_control_frames = port_stats->rx_control_frames;
324         drvs->rx_in_range_errors = port_stats->rx_in_range_errors;
325         drvs->rx_frame_too_long = port_stats->rx_frame_too_long;
326         drvs->rx_dropped_runt = port_stats->rx_dropped_runt;
327         drvs->rx_ip_checksum_errs = port_stats->rx_ip_checksum_errs;
328         drvs->rx_tcp_checksum_errs = port_stats->rx_tcp_checksum_errs;
329         drvs->rx_udp_checksum_errs = port_stats->rx_udp_checksum_errs;
330         drvs->rx_dropped_tcp_length = port_stats->rx_dropped_tcp_length;
331         drvs->rx_dropped_too_small = port_stats->rx_dropped_too_small;
332         drvs->rx_dropped_too_short = port_stats->rx_dropped_too_short;
333         drvs->rx_out_range_errors = port_stats->rx_out_range_errors;
334         drvs->rx_dropped_header_too_small =
335                 port_stats->rx_dropped_header_too_small;
336         drvs->rx_input_fifo_overflow_drop =
337                 port_stats->rx_input_fifo_overflow_drop;
338         drvs->rx_address_match_errors = port_stats->rx_address_match_errors;
339         drvs->rx_alignment_symbol_errors =
340                 port_stats->rx_alignment_symbol_errors;
341         drvs->rxpp_fifo_overflow_drop = port_stats->rxpp_fifo_overflow_drop;
342         drvs->tx_pauseframes = port_stats->tx_pauseframes;
343         drvs->tx_controlframes = port_stats->tx_controlframes;
344         drvs->jabber_events = port_stats->jabber_events;
345         drvs->rx_drops_no_pbuf = rxf_stats->rx_drops_no_pbuf;
346         drvs->rx_drops_no_txpb = rxf_stats->rx_drops_no_txpb;
347         drvs->rx_drops_no_erx_descr = rxf_stats->rx_drops_no_erx_descr;
348         drvs->rx_drops_invalid_ring = rxf_stats->rx_drops_invalid_ring;
349         drvs->forwarded_packets = rxf_stats->forwarded_packets;
350         drvs->rx_drops_mtu = rxf_stats->rx_drops_mtu;
351         drvs->rx_drops_no_tpre_descr = rxf_stats->rx_drops_no_tpre_descr;
352         drvs->rx_drops_too_many_frags = rxf_stats->rx_drops_too_many_frags;
353         adapter->drv_stats.eth_red_drops = pmem_sts->eth_red_drops;
354 }
355
356 static void populate_lancer_stats(struct be_adapter *adapter)
357 {
358
359         struct be_drv_stats *drvs = &adapter->drv_stats;
360         struct lancer_pport_stats *pport_stats =
361                                         pport_stats_from_cmd(adapter);
362
363         be_dws_le_to_cpu(pport_stats, sizeof(*pport_stats));
364         drvs->rx_pause_frames = pport_stats->rx_pause_frames_lo;
365         drvs->rx_crc_errors = pport_stats->rx_crc_errors_lo;
366         drvs->rx_control_frames = pport_stats->rx_control_frames_lo;
367         drvs->rx_in_range_errors = pport_stats->rx_in_range_errors;
368         drvs->rx_frame_too_long = pport_stats->rx_frames_too_long_lo;
369         drvs->rx_dropped_runt = pport_stats->rx_dropped_runt;
370         drvs->rx_ip_checksum_errs = pport_stats->rx_ip_checksum_errors;
371         drvs->rx_tcp_checksum_errs = pport_stats->rx_tcp_checksum_errors;
372         drvs->rx_udp_checksum_errs = pport_stats->rx_udp_checksum_errors;
373         drvs->rx_dropped_tcp_length =
374                                 pport_stats->rx_dropped_invalid_tcp_length;
375         drvs->rx_dropped_too_small = pport_stats->rx_dropped_too_small;
376         drvs->rx_dropped_too_short = pport_stats->rx_dropped_too_short;
377         drvs->rx_out_range_errors = pport_stats->rx_out_of_range_errors;
378         drvs->rx_dropped_header_too_small =
379                                 pport_stats->rx_dropped_header_too_small;
380         drvs->rx_input_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
381         drvs->rx_address_match_errors = pport_stats->rx_address_match_errors;
382         drvs->rx_alignment_symbol_errors = pport_stats->rx_symbol_errors_lo;
383         drvs->rxpp_fifo_overflow_drop = pport_stats->rx_fifo_overflow;
384         drvs->tx_pauseframes = pport_stats->tx_pause_frames_lo;
385         drvs->tx_controlframes = pport_stats->tx_control_frames_lo;
386         drvs->jabber_events = pport_stats->rx_jabbers;
387         drvs->rx_drops_invalid_ring = pport_stats->rx_drops_invalid_queue;
388         drvs->forwarded_packets = pport_stats->num_forwards_lo;
389         drvs->rx_drops_mtu = pport_stats->rx_drops_mtu_lo;
390         drvs->rx_drops_too_many_frags =
391                                 pport_stats->rx_drops_too_many_frags_lo;
392 }
393
394 static void accumulate_16bit_val(u32 *acc, u16 val)
395 {
396 #define lo(x)                   (x & 0xFFFF)
397 #define hi(x)                   (x & 0xFFFF0000)
398         bool wrapped = val < lo(*acc);
399         u32 newacc = hi(*acc) + val;
400
401         if (wrapped)
402                 newacc += 65536;
403         ACCESS_ONCE(*acc) = newacc;
404 }
405
406 void be_parse_stats(struct be_adapter *adapter)
407 {
408         struct be_erx_stats_v1 *erx = be_erx_stats_from_cmd(adapter);
409         struct be_rx_obj *rxo;
410         int i;
411
412         if (adapter->generation == BE_GEN3) {
413                 if (lancer_chip(adapter))
414                         populate_lancer_stats(adapter);
415                  else
416                         populate_be3_stats(adapter);
417         } else {
418                 populate_be2_stats(adapter);
419         }
420
421         /* as erx_v1 is longer than v0, ok to use v1 defn for v0 access */
422         for_all_rx_queues(adapter, rxo, i) {
423                 /* below erx HW counter can actually wrap around after
424                  * 65535. Driver accumulates a 32-bit value
425                  */
426                 accumulate_16bit_val(&rx_stats(rxo)->rx_drops_no_frags,
427                                 (u16)erx->rx_drops_no_fragments[rxo->q.id]);
428         }
429 }
430
431 static struct rtnl_link_stats64 *be_get_stats64(struct net_device *netdev,
432                                         struct rtnl_link_stats64 *stats)
433 {
434         struct be_adapter *adapter = netdev_priv(netdev);
435         struct be_drv_stats *drvs = &adapter->drv_stats;
436         struct be_rx_obj *rxo;
437         struct be_tx_obj *txo;
438         u64 pkts, bytes;
439         unsigned int start;
440         int i;
441
442         for_all_rx_queues(adapter, rxo, i) {
443                 const struct be_rx_stats *rx_stats = rx_stats(rxo);
444                 do {
445                         start = u64_stats_fetch_begin_bh(&rx_stats->sync);
446                         pkts = rx_stats(rxo)->rx_pkts;
447                         bytes = rx_stats(rxo)->rx_bytes;
448                 } while (u64_stats_fetch_retry_bh(&rx_stats->sync, start));
449                 stats->rx_packets += pkts;
450                 stats->rx_bytes += bytes;
451                 stats->multicast += rx_stats(rxo)->rx_mcast_pkts;
452                 stats->rx_dropped += rx_stats(rxo)->rx_drops_no_skbs +
453                                         rx_stats(rxo)->rx_drops_no_frags;
454         }
455
456         for_all_tx_queues(adapter, txo, i) {
457                 const struct be_tx_stats *tx_stats = tx_stats(txo);
458                 do {
459                         start = u64_stats_fetch_begin_bh(&tx_stats->sync);
460                         pkts = tx_stats(txo)->tx_pkts;
461                         bytes = tx_stats(txo)->tx_bytes;
462                 } while (u64_stats_fetch_retry_bh(&tx_stats->sync, start));
463                 stats->tx_packets += pkts;
464                 stats->tx_bytes += bytes;
465         }
466
467         /* bad pkts received */
468         stats->rx_errors = drvs->rx_crc_errors +
469                 drvs->rx_alignment_symbol_errors +
470                 drvs->rx_in_range_errors +
471                 drvs->rx_out_range_errors +
472                 drvs->rx_frame_too_long +
473                 drvs->rx_dropped_too_small +
474                 drvs->rx_dropped_too_short +
475                 drvs->rx_dropped_header_too_small +
476                 drvs->rx_dropped_tcp_length +
477                 drvs->rx_dropped_runt;
478
479         /* detailed rx errors */
480         stats->rx_length_errors = drvs->rx_in_range_errors +
481                 drvs->rx_out_range_errors +
482                 drvs->rx_frame_too_long;
483
484         stats->rx_crc_errors = drvs->rx_crc_errors;
485
486         /* frame alignment errors */
487         stats->rx_frame_errors = drvs->rx_alignment_symbol_errors;
488
489         /* receiver fifo overrun */
490         /* drops_no_pbuf is no per i/f, it's per BE card */
491         stats->rx_fifo_errors = drvs->rxpp_fifo_overflow_drop +
492                                 drvs->rx_input_fifo_overflow_drop +
493                                 drvs->rx_drops_no_pbuf;
494         return stats;
495 }
496
497 void be_link_status_update(struct be_adapter *adapter, u32 link_status)
498 {
499         struct net_device *netdev = adapter->netdev;
500
501         /* when link status changes, link speed must be re-queried from card */
502         adapter->link_speed = -1;
503         if ((link_status & LINK_STATUS_MASK) == LINK_UP) {
504                 netif_carrier_on(netdev);
505                 dev_info(&adapter->pdev->dev, "%s: Link up\n", netdev->name);
506         } else {
507                 netif_carrier_off(netdev);
508                 dev_info(&adapter->pdev->dev, "%s: Link down\n", netdev->name);
509         }
510 }
511
512 static void be_tx_stats_update(struct be_tx_obj *txo,
513                         u32 wrb_cnt, u32 copied, u32 gso_segs, bool stopped)
514 {
515         struct be_tx_stats *stats = tx_stats(txo);
516
517         u64_stats_update_begin(&stats->sync);
518         stats->tx_reqs++;
519         stats->tx_wrbs += wrb_cnt;
520         stats->tx_bytes += copied;
521         stats->tx_pkts += (gso_segs ? gso_segs : 1);
522         if (stopped)
523                 stats->tx_stops++;
524         u64_stats_update_end(&stats->sync);
525 }
526
527 /* Determine number of WRB entries needed to xmit data in an skb */
528 static u32 wrb_cnt_for_skb(struct be_adapter *adapter, struct sk_buff *skb,
529                                                                 bool *dummy)
530 {
531         int cnt = (skb->len > skb->data_len);
532
533         cnt += skb_shinfo(skb)->nr_frags;
534
535         /* to account for hdr wrb */
536         cnt++;
537         if (lancer_chip(adapter) || !(cnt & 1)) {
538                 *dummy = false;
539         } else {
540                 /* add a dummy to make it an even num */
541                 cnt++;
542                 *dummy = true;
543         }
544         BUG_ON(cnt > BE_MAX_TX_FRAG_COUNT);
545         return cnt;
546 }
547
548 static inline void wrb_fill(struct be_eth_wrb *wrb, u64 addr, int len)
549 {
550         wrb->frag_pa_hi = upper_32_bits(addr);
551         wrb->frag_pa_lo = addr & 0xFFFFFFFF;
552         wrb->frag_len = len & ETH_WRB_FRAG_LEN_MASK;
553 }
554
555 static inline u16 be_get_tx_vlan_tag(struct be_adapter *adapter,
556                                         struct sk_buff *skb)
557 {
558         u8 vlan_prio;
559         u16 vlan_tag;
560
561         vlan_tag = vlan_tx_tag_get(skb);
562         vlan_prio = (vlan_tag & VLAN_PRIO_MASK) >> VLAN_PRIO_SHIFT;
563         /* If vlan priority provided by OS is NOT in available bmap */
564         if (!(adapter->vlan_prio_bmap & (1 << vlan_prio)))
565                 vlan_tag = (vlan_tag & ~VLAN_PRIO_MASK) |
566                                 adapter->recommended_prio;
567
568         return vlan_tag;
569 }
570
571 static void wrb_fill_hdr(struct be_adapter *adapter, struct be_eth_hdr_wrb *hdr,
572                 struct sk_buff *skb, u32 wrb_cnt, u32 len)
573 {
574         u16 vlan_tag;
575
576         memset(hdr, 0, sizeof(*hdr));
577
578         AMAP_SET_BITS(struct amap_eth_hdr_wrb, crc, hdr, 1);
579
580         if (skb_is_gso(skb)) {
581                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso, hdr, 1);
582                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso_mss,
583                         hdr, skb_shinfo(skb)->gso_size);
584                 if (skb_is_gso_v6(skb) && !lancer_chip(adapter))
585                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, lso6, hdr, 1);
586                 if (lancer_chip(adapter) && adapter->sli_family  ==
587                                                         LANCER_A0_SLI_FAMILY) {
588                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, ipcs, hdr, 1);
589                         if (is_tcp_pkt(skb))
590                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
591                                                                 tcpcs, hdr, 1);
592                         else if (is_udp_pkt(skb))
593                                 AMAP_SET_BITS(struct amap_eth_hdr_wrb,
594                                                                 udpcs, hdr, 1);
595                 }
596         } else if (skb->ip_summed == CHECKSUM_PARTIAL) {
597                 if (is_tcp_pkt(skb))
598                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, tcpcs, hdr, 1);
599                 else if (is_udp_pkt(skb))
600                         AMAP_SET_BITS(struct amap_eth_hdr_wrb, udpcs, hdr, 1);
601         }
602
603         if (vlan_tx_tag_present(skb)) {
604                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan, hdr, 1);
605                 vlan_tag = be_get_tx_vlan_tag(adapter, skb);
606                 AMAP_SET_BITS(struct amap_eth_hdr_wrb, vlan_tag, hdr, vlan_tag);
607         }
608
609         AMAP_SET_BITS(struct amap_eth_hdr_wrb, event, hdr, 1);
610         AMAP_SET_BITS(struct amap_eth_hdr_wrb, complete, hdr, 1);
611         AMAP_SET_BITS(struct amap_eth_hdr_wrb, num_wrb, hdr, wrb_cnt);
612         AMAP_SET_BITS(struct amap_eth_hdr_wrb, len, hdr, len);
613 }
614
615 static void unmap_tx_frag(struct device *dev, struct be_eth_wrb *wrb,
616                 bool unmap_single)
617 {
618         dma_addr_t dma;
619
620         be_dws_le_to_cpu(wrb, sizeof(*wrb));
621
622         dma = (u64)wrb->frag_pa_hi << 32 | (u64)wrb->frag_pa_lo;
623         if (wrb->frag_len) {
624                 if (unmap_single)
625                         dma_unmap_single(dev, dma, wrb->frag_len,
626                                          DMA_TO_DEVICE);
627                 else
628                         dma_unmap_page(dev, dma, wrb->frag_len, DMA_TO_DEVICE);
629         }
630 }
631
632 static int make_tx_wrbs(struct be_adapter *adapter, struct be_queue_info *txq,
633                 struct sk_buff *skb, u32 wrb_cnt, bool dummy_wrb)
634 {
635         dma_addr_t busaddr;
636         int i, copied = 0;
637         struct device *dev = &adapter->pdev->dev;
638         struct sk_buff *first_skb = skb;
639         struct be_eth_wrb *wrb;
640         struct be_eth_hdr_wrb *hdr;
641         bool map_single = false;
642         u16 map_head;
643
644         hdr = queue_head_node(txq);
645         queue_head_inc(txq);
646         map_head = txq->head;
647
648         if (skb->len > skb->data_len) {
649                 int len = skb_headlen(skb);
650                 busaddr = dma_map_single(dev, skb->data, len, DMA_TO_DEVICE);
651                 if (dma_mapping_error(dev, busaddr))
652                         goto dma_err;
653                 map_single = true;
654                 wrb = queue_head_node(txq);
655                 wrb_fill(wrb, busaddr, len);
656                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
657                 queue_head_inc(txq);
658                 copied += len;
659         }
660
661         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
662                 const struct skb_frag_struct *frag =
663                         &skb_shinfo(skb)->frags[i];
664                 busaddr = skb_frag_dma_map(dev, frag, 0,
665                                            skb_frag_size(frag), DMA_TO_DEVICE);
666                 if (dma_mapping_error(dev, busaddr))
667                         goto dma_err;
668                 wrb = queue_head_node(txq);
669                 wrb_fill(wrb, busaddr, skb_frag_size(frag));
670                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
671                 queue_head_inc(txq);
672                 copied += skb_frag_size(frag);
673         }
674
675         if (dummy_wrb) {
676                 wrb = queue_head_node(txq);
677                 wrb_fill(wrb, 0, 0);
678                 be_dws_cpu_to_le(wrb, sizeof(*wrb));
679                 queue_head_inc(txq);
680         }
681
682         wrb_fill_hdr(adapter, hdr, first_skb, wrb_cnt, copied);
683         be_dws_cpu_to_le(hdr, sizeof(*hdr));
684
685         return copied;
686 dma_err:
687         txq->head = map_head;
688         while (copied) {
689                 wrb = queue_head_node(txq);
690                 unmap_tx_frag(dev, wrb, map_single);
691                 map_single = false;
692                 copied -= wrb->frag_len;
693                 queue_head_inc(txq);
694         }
695         return 0;
696 }
697
698 static netdev_tx_t be_xmit(struct sk_buff *skb,
699                         struct net_device *netdev)
700 {
701         struct be_adapter *adapter = netdev_priv(netdev);
702         struct be_tx_obj *txo = &adapter->tx_obj[skb_get_queue_mapping(skb)];
703         struct be_queue_info *txq = &txo->q;
704         u32 wrb_cnt = 0, copied = 0;
705         u32 start = txq->head;
706         bool dummy_wrb, stopped = false;
707
708         /* For vlan tagged pkts, BE
709          * 1) calculates checksum even when CSO is not requested
710          * 2) calculates checksum wrongly for padded pkt less than
711          * 60 bytes long.
712          * As a workaround disable TX vlan offloading in such cases.
713          */
714         if (unlikely(vlan_tx_tag_present(skb) &&
715                      (skb->ip_summed != CHECKSUM_PARTIAL || skb->len <= 60))) {
716                 skb = skb_share_check(skb, GFP_ATOMIC);
717                 if (unlikely(!skb))
718                         goto tx_drop;
719
720                 skb = __vlan_put_tag(skb, be_get_tx_vlan_tag(adapter, skb));
721                 if (unlikely(!skb))
722                         goto tx_drop;
723
724                 skb->vlan_tci = 0;
725         }
726
727         wrb_cnt = wrb_cnt_for_skb(adapter, skb, &dummy_wrb);
728
729         copied = make_tx_wrbs(adapter, txq, skb, wrb_cnt, dummy_wrb);
730         if (copied) {
731                 /* record the sent skb in the sent_skb table */
732                 BUG_ON(txo->sent_skb_list[start]);
733                 txo->sent_skb_list[start] = skb;
734
735                 /* Ensure txq has space for the next skb; Else stop the queue
736                  * *BEFORE* ringing the tx doorbell, so that we serialze the
737                  * tx compls of the current transmit which'll wake up the queue
738                  */
739                 atomic_add(wrb_cnt, &txq->used);
740                 if ((BE_MAX_TX_FRAG_COUNT + atomic_read(&txq->used)) >=
741                                                                 txq->len) {
742                         netif_stop_subqueue(netdev, skb_get_queue_mapping(skb));
743                         stopped = true;
744                 }
745
746                 be_txq_notify(adapter, txq->id, wrb_cnt);
747
748                 be_tx_stats_update(txo, wrb_cnt, copied,
749                                 skb_shinfo(skb)->gso_segs, stopped);
750         } else {
751                 txq->head = start;
752                 dev_kfree_skb_any(skb);
753         }
754 tx_drop:
755         return NETDEV_TX_OK;
756 }
757
758 static int be_change_mtu(struct net_device *netdev, int new_mtu)
759 {
760         struct be_adapter *adapter = netdev_priv(netdev);
761         if (new_mtu < BE_MIN_MTU ||
762                         new_mtu > (BE_MAX_JUMBO_FRAME_SIZE -
763                                         (ETH_HLEN + ETH_FCS_LEN))) {
764                 dev_info(&adapter->pdev->dev,
765                         "MTU must be between %d and %d bytes\n",
766                         BE_MIN_MTU,
767                         (BE_MAX_JUMBO_FRAME_SIZE - (ETH_HLEN + ETH_FCS_LEN)));
768                 return -EINVAL;
769         }
770         dev_info(&adapter->pdev->dev, "MTU changed from %d to %d bytes\n",
771                         netdev->mtu, new_mtu);
772         netdev->mtu = new_mtu;
773         return 0;
774 }
775
776 /*
777  * A max of 64 (BE_NUM_VLANS_SUPPORTED) vlans can be configured in BE.
778  * If the user configures more, place BE in vlan promiscuous mode.
779  */
780 static int be_vid_config(struct be_adapter *adapter, bool vf, u32 vf_num)
781 {
782         u16 vtag[BE_NUM_VLANS_SUPPORTED];
783         u16 ntags = 0, i;
784         int status = 0;
785         u32 if_handle;
786
787         if (vf) {
788                 if_handle = adapter->vf_cfg[vf_num].vf_if_handle;
789                 vtag[0] = cpu_to_le16(adapter->vf_cfg[vf_num].vf_vlan_tag);
790                 status = be_cmd_vlan_config(adapter, if_handle, vtag, 1, 1, 0);
791         }
792
793         /* No need to further configure vids if in promiscuous mode */
794         if (adapter->promiscuous)
795                 return 0;
796
797         if (adapter->vlans_added <= adapter->max_vlans)  {
798                 /* Construct VLAN Table to give to HW */
799                 for (i = 0; i < VLAN_N_VID; i++) {
800                         if (adapter->vlan_tag[i]) {
801                                 vtag[ntags] = cpu_to_le16(i);
802                                 ntags++;
803                         }
804                 }
805                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
806                                         vtag, ntags, 1, 0);
807         } else {
808                 status = be_cmd_vlan_config(adapter, adapter->if_handle,
809                                         NULL, 0, 1, 1);
810         }
811
812         return status;
813 }
814
815 static void be_vlan_add_vid(struct net_device *netdev, u16 vid)
816 {
817         struct be_adapter *adapter = netdev_priv(netdev);
818
819         adapter->vlans_added++;
820         if (!be_physfn(adapter))
821                 return;
822
823         adapter->vlan_tag[vid] = 1;
824         if (adapter->vlans_added <= (adapter->max_vlans + 1))
825                 be_vid_config(adapter, false, 0);
826 }
827
828 static void be_vlan_rem_vid(struct net_device *netdev, u16 vid)
829 {
830         struct be_adapter *adapter = netdev_priv(netdev);
831
832         adapter->vlans_added--;
833
834         if (!be_physfn(adapter))
835                 return;
836
837         adapter->vlan_tag[vid] = 0;
838         if (adapter->vlans_added <= adapter->max_vlans)
839                 be_vid_config(adapter, false, 0);
840 }
841
842 static void be_set_rx_mode(struct net_device *netdev)
843 {
844         struct be_adapter *adapter = netdev_priv(netdev);
845
846         if (netdev->flags & IFF_PROMISC) {
847                 be_cmd_rx_filter(adapter, IFF_PROMISC, ON);
848                 adapter->promiscuous = true;
849                 goto done;
850         }
851
852         /* BE was previously in promiscuous mode; disable it */
853         if (adapter->promiscuous) {
854                 adapter->promiscuous = false;
855                 be_cmd_rx_filter(adapter, IFF_PROMISC, OFF);
856
857                 if (adapter->vlans_added)
858                         be_vid_config(adapter, false, 0);
859         }
860
861         /* Enable multicast promisc if num configured exceeds what we support */
862         if (netdev->flags & IFF_ALLMULTI ||
863                         netdev_mc_count(netdev) > BE_MAX_MC) {
864                 be_cmd_rx_filter(adapter, IFF_ALLMULTI, ON);
865                 goto done;
866         }
867
868         be_cmd_rx_filter(adapter, IFF_MULTICAST, ON);
869 done:
870         return;
871 }
872
873 static int be_set_vf_mac(struct net_device *netdev, int vf, u8 *mac)
874 {
875         struct be_adapter *adapter = netdev_priv(netdev);
876         int status;
877
878         if (!adapter->sriov_enabled)
879                 return -EPERM;
880
881         if (!is_valid_ether_addr(mac) || (vf >= num_vfs))
882                 return -EINVAL;
883
884         if (lancer_chip(adapter)) {
885                 status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
886         } else {
887                 status = be_cmd_pmac_del(adapter,
888                                 adapter->vf_cfg[vf].vf_if_handle,
889                                 adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
890
891                 status = be_cmd_pmac_add(adapter, mac,
892                                 adapter->vf_cfg[vf].vf_if_handle,
893                                 &adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
894         }
895
896         if (status)
897                 dev_err(&adapter->pdev->dev, "MAC %pM set on VF %d Failed\n",
898                                 mac, vf);
899         else
900                 memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
901
902         return status;
903 }
904
905 static int be_get_vf_config(struct net_device *netdev, int vf,
906                         struct ifla_vf_info *vi)
907 {
908         struct be_adapter *adapter = netdev_priv(netdev);
909
910         if (!adapter->sriov_enabled)
911                 return -EPERM;
912
913         if (vf >= num_vfs)
914                 return -EINVAL;
915
916         vi->vf = vf;
917         vi->tx_rate = adapter->vf_cfg[vf].vf_tx_rate;
918         vi->vlan = adapter->vf_cfg[vf].vf_vlan_tag;
919         vi->qos = 0;
920         memcpy(&vi->mac, adapter->vf_cfg[vf].vf_mac_addr, ETH_ALEN);
921
922         return 0;
923 }
924
925 static int be_set_vf_vlan(struct net_device *netdev,
926                         int vf, u16 vlan, u8 qos)
927 {
928         struct be_adapter *adapter = netdev_priv(netdev);
929         int status = 0;
930
931         if (!adapter->sriov_enabled)
932                 return -EPERM;
933
934         if ((vf >= num_vfs) || (vlan > 4095))
935                 return -EINVAL;
936
937         if (vlan) {
938                 adapter->vf_cfg[vf].vf_vlan_tag = vlan;
939                 adapter->vlans_added++;
940         } else {
941                 adapter->vf_cfg[vf].vf_vlan_tag = 0;
942                 adapter->vlans_added--;
943         }
944
945         status = be_vid_config(adapter, true, vf);
946
947         if (status)
948                 dev_info(&adapter->pdev->dev,
949                                 "VLAN %d config on VF %d failed\n", vlan, vf);
950         return status;
951 }
952
953 static int be_set_vf_tx_rate(struct net_device *netdev,
954                         int vf, int rate)
955 {
956         struct be_adapter *adapter = netdev_priv(netdev);
957         int status = 0;
958
959         if (!adapter->sriov_enabled)
960                 return -EPERM;
961
962         if ((vf >= num_vfs) || (rate < 0))
963                 return -EINVAL;
964
965         if (rate > 10000)
966                 rate = 10000;
967
968         adapter->vf_cfg[vf].vf_tx_rate = rate;
969         status = be_cmd_set_qos(adapter, rate / 10, vf + 1);
970
971         if (status)
972                 dev_info(&adapter->pdev->dev,
973                                 "tx rate %d on VF %d failed\n", rate, vf);
974         return status;
975 }
976
977 static void be_rx_eqd_update(struct be_adapter *adapter, struct be_rx_obj *rxo)
978 {
979         struct be_eq_obj *rx_eq = &rxo->rx_eq;
980         struct be_rx_stats *stats = rx_stats(rxo);
981         ulong now = jiffies;
982         ulong delta = now - stats->rx_jiffies;
983         u64 pkts;
984         unsigned int start, eqd;
985
986         if (!rx_eq->enable_aic)
987                 return;
988
989         /* Wrapped around */
990         if (time_before(now, stats->rx_jiffies)) {
991                 stats->rx_jiffies = now;
992                 return;
993         }
994
995         /* Update once a second */
996         if (delta < HZ)
997                 return;
998
999         do {
1000                 start = u64_stats_fetch_begin_bh(&stats->sync);
1001                 pkts = stats->rx_pkts;
1002         } while (u64_stats_fetch_retry_bh(&stats->sync, start));
1003
1004         stats->rx_pps = (unsigned long)(pkts - stats->rx_pkts_prev) / (delta / HZ);
1005         stats->rx_pkts_prev = pkts;
1006         stats->rx_jiffies = now;
1007         eqd = stats->rx_pps / 110000;
1008         eqd = eqd << 3;
1009         if (eqd > rx_eq->max_eqd)
1010                 eqd = rx_eq->max_eqd;
1011         if (eqd < rx_eq->min_eqd)
1012                 eqd = rx_eq->min_eqd;
1013         if (eqd < 10)
1014                 eqd = 0;
1015         if (eqd != rx_eq->cur_eqd) {
1016                 be_cmd_modify_eqd(adapter, rx_eq->q.id, eqd);
1017                 rx_eq->cur_eqd = eqd;
1018         }
1019 }
1020
1021 static void be_rx_stats_update(struct be_rx_obj *rxo,
1022                 struct be_rx_compl_info *rxcp)
1023 {
1024         struct be_rx_stats *stats = rx_stats(rxo);
1025
1026         u64_stats_update_begin(&stats->sync);
1027         stats->rx_compl++;
1028         stats->rx_bytes += rxcp->pkt_size;
1029         stats->rx_pkts++;
1030         if (rxcp->pkt_type == BE_MULTICAST_PACKET)
1031                 stats->rx_mcast_pkts++;
1032         if (rxcp->err)
1033                 stats->rx_compl_err++;
1034         u64_stats_update_end(&stats->sync);
1035 }
1036
1037 static inline bool csum_passed(struct be_rx_compl_info *rxcp)
1038 {
1039         /* L4 checksum is not reliable for non TCP/UDP packets.
1040          * Also ignore ipcksm for ipv6 pkts */
1041         return (rxcp->tcpf || rxcp->udpf) && rxcp->l4_csum &&
1042                                 (rxcp->ip_csum || rxcp->ipv6);
1043 }
1044
1045 static struct be_rx_page_info *
1046 get_rx_page_info(struct be_adapter *adapter,
1047                 struct be_rx_obj *rxo,
1048                 u16 frag_idx)
1049 {
1050         struct be_rx_page_info *rx_page_info;
1051         struct be_queue_info *rxq = &rxo->q;
1052
1053         rx_page_info = &rxo->page_info_tbl[frag_idx];
1054         BUG_ON(!rx_page_info->page);
1055
1056         if (rx_page_info->last_page_user) {
1057                 dma_unmap_page(&adapter->pdev->dev,
1058                                dma_unmap_addr(rx_page_info, bus),
1059                                adapter->big_page_size, DMA_FROM_DEVICE);
1060                 rx_page_info->last_page_user = false;
1061         }
1062
1063         atomic_dec(&rxq->used);
1064         return rx_page_info;
1065 }
1066
1067 /* Throwaway the data in the Rx completion */
1068 static void be_rx_compl_discard(struct be_adapter *adapter,
1069                 struct be_rx_obj *rxo,
1070                 struct be_rx_compl_info *rxcp)
1071 {
1072         struct be_queue_info *rxq = &rxo->q;
1073         struct be_rx_page_info *page_info;
1074         u16 i, num_rcvd = rxcp->num_rcvd;
1075
1076         for (i = 0; i < num_rcvd; i++) {
1077                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1078                 put_page(page_info->page);
1079                 memset(page_info, 0, sizeof(*page_info));
1080                 index_inc(&rxcp->rxq_idx, rxq->len);
1081         }
1082 }
1083
1084 /*
1085  * skb_fill_rx_data forms a complete skb for an ether frame
1086  * indicated by rxcp.
1087  */
1088 static void skb_fill_rx_data(struct be_adapter *adapter, struct be_rx_obj *rxo,
1089                         struct sk_buff *skb, struct be_rx_compl_info *rxcp)
1090 {
1091         struct be_queue_info *rxq = &rxo->q;
1092         struct be_rx_page_info *page_info;
1093         u16 i, j;
1094         u16 hdr_len, curr_frag_len, remaining;
1095         u8 *start;
1096
1097         page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1098         start = page_address(page_info->page) + page_info->page_offset;
1099         prefetch(start);
1100
1101         /* Copy data in the first descriptor of this completion */
1102         curr_frag_len = min(rxcp->pkt_size, rx_frag_size);
1103
1104         /* Copy the header portion into skb_data */
1105         hdr_len = min(BE_HDR_LEN, curr_frag_len);
1106         memcpy(skb->data, start, hdr_len);
1107         skb->len = curr_frag_len;
1108         if (curr_frag_len <= BE_HDR_LEN) { /* tiny packet */
1109                 /* Complete packet has now been moved to data */
1110                 put_page(page_info->page);
1111                 skb->data_len = 0;
1112                 skb->tail += curr_frag_len;
1113         } else {
1114                 skb_shinfo(skb)->nr_frags = 1;
1115                 skb_frag_set_page(skb, 0, page_info->page);
1116                 skb_shinfo(skb)->frags[0].page_offset =
1117                                         page_info->page_offset + hdr_len;
1118                 skb_frag_size_set(&skb_shinfo(skb)->frags[0], curr_frag_len - hdr_len);
1119                 skb->data_len = curr_frag_len - hdr_len;
1120                 skb->truesize += rx_frag_size;
1121                 skb->tail += hdr_len;
1122         }
1123         page_info->page = NULL;
1124
1125         if (rxcp->pkt_size <= rx_frag_size) {
1126                 BUG_ON(rxcp->num_rcvd != 1);
1127                 return;
1128         }
1129
1130         /* More frags present for this completion */
1131         index_inc(&rxcp->rxq_idx, rxq->len);
1132         remaining = rxcp->pkt_size - curr_frag_len;
1133         for (i = 1, j = 0; i < rxcp->num_rcvd; i++) {
1134                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1135                 curr_frag_len = min(remaining, rx_frag_size);
1136
1137                 /* Coalesce all frags from the same physical page in one slot */
1138                 if (page_info->page_offset == 0) {
1139                         /* Fresh page */
1140                         j++;
1141                         skb_frag_set_page(skb, j, page_info->page);
1142                         skb_shinfo(skb)->frags[j].page_offset =
1143                                                         page_info->page_offset;
1144                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1145                         skb_shinfo(skb)->nr_frags++;
1146                 } else {
1147                         put_page(page_info->page);
1148                 }
1149
1150                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1151                 skb->len += curr_frag_len;
1152                 skb->data_len += curr_frag_len;
1153                 skb->truesize += rx_frag_size;
1154                 remaining -= curr_frag_len;
1155                 index_inc(&rxcp->rxq_idx, rxq->len);
1156                 page_info->page = NULL;
1157         }
1158         BUG_ON(j > MAX_SKB_FRAGS);
1159 }
1160
1161 /* Process the RX completion indicated by rxcp when GRO is disabled */
1162 static void be_rx_compl_process(struct be_adapter *adapter,
1163                         struct be_rx_obj *rxo,
1164                         struct be_rx_compl_info *rxcp)
1165 {
1166         struct net_device *netdev = adapter->netdev;
1167         struct sk_buff *skb;
1168
1169         skb = netdev_alloc_skb_ip_align(netdev, BE_HDR_LEN);
1170         if (unlikely(!skb)) {
1171                 rx_stats(rxo)->rx_drops_no_skbs++;
1172                 be_rx_compl_discard(adapter, rxo, rxcp);
1173                 return;
1174         }
1175
1176         skb_fill_rx_data(adapter, rxo, skb, rxcp);
1177
1178         if (likely((netdev->features & NETIF_F_RXCSUM) && csum_passed(rxcp)))
1179                 skb->ip_summed = CHECKSUM_UNNECESSARY;
1180         else
1181                 skb_checksum_none_assert(skb);
1182
1183         skb->protocol = eth_type_trans(skb, netdev);
1184         if (adapter->netdev->features & NETIF_F_RXHASH)
1185                 skb->rxhash = rxcp->rss_hash;
1186
1187
1188         if (rxcp->vlanf)
1189                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1190
1191         netif_receive_skb(skb);
1192 }
1193
1194 /* Process the RX completion indicated by rxcp when GRO is enabled */
1195 static void be_rx_compl_process_gro(struct be_adapter *adapter,
1196                 struct be_rx_obj *rxo,
1197                 struct be_rx_compl_info *rxcp)
1198 {
1199         struct be_rx_page_info *page_info;
1200         struct sk_buff *skb = NULL;
1201         struct be_queue_info *rxq = &rxo->q;
1202         struct be_eq_obj *eq_obj =  &rxo->rx_eq;
1203         u16 remaining, curr_frag_len;
1204         u16 i, j;
1205
1206         skb = napi_get_frags(&eq_obj->napi);
1207         if (!skb) {
1208                 be_rx_compl_discard(adapter, rxo, rxcp);
1209                 return;
1210         }
1211
1212         remaining = rxcp->pkt_size;
1213         for (i = 0, j = -1; i < rxcp->num_rcvd; i++) {
1214                 page_info = get_rx_page_info(adapter, rxo, rxcp->rxq_idx);
1215
1216                 curr_frag_len = min(remaining, rx_frag_size);
1217
1218                 /* Coalesce all frags from the same physical page in one slot */
1219                 if (i == 0 || page_info->page_offset == 0) {
1220                         /* First frag or Fresh page */
1221                         j++;
1222                         skb_frag_set_page(skb, j, page_info->page);
1223                         skb_shinfo(skb)->frags[j].page_offset =
1224                                                         page_info->page_offset;
1225                         skb_frag_size_set(&skb_shinfo(skb)->frags[j], 0);
1226                 } else {
1227                         put_page(page_info->page);
1228                 }
1229                 skb_frag_size_add(&skb_shinfo(skb)->frags[j], curr_frag_len);
1230                 skb->truesize += rx_frag_size;
1231                 remaining -= curr_frag_len;
1232                 index_inc(&rxcp->rxq_idx, rxq->len);
1233                 memset(page_info, 0, sizeof(*page_info));
1234         }
1235         BUG_ON(j > MAX_SKB_FRAGS);
1236
1237         skb_shinfo(skb)->nr_frags = j + 1;
1238         skb->len = rxcp->pkt_size;
1239         skb->data_len = rxcp->pkt_size;
1240         skb->ip_summed = CHECKSUM_UNNECESSARY;
1241         if (adapter->netdev->features & NETIF_F_RXHASH)
1242                 skb->rxhash = rxcp->rss_hash;
1243
1244         if (rxcp->vlanf)
1245                 __vlan_hwaccel_put_tag(skb, rxcp->vlan_tag);
1246
1247         napi_gro_frags(&eq_obj->napi);
1248 }
1249
1250 static void be_parse_rx_compl_v1(struct be_adapter *adapter,
1251                                 struct be_eth_rx_compl *compl,
1252                                 struct be_rx_compl_info *rxcp)
1253 {
1254         rxcp->pkt_size =
1255                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, pktsize, compl);
1256         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtp, compl);
1257         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, err, compl);
1258         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, tcpf, compl);
1259         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, udpf, compl);
1260         rxcp->ip_csum =
1261                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ipcksm, compl);
1262         rxcp->l4_csum =
1263                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, l4_cksm, compl);
1264         rxcp->ipv6 =
1265                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, ip_version, compl);
1266         rxcp->rxq_idx =
1267                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, fragndx, compl);
1268         rxcp->num_rcvd =
1269                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, numfrags, compl);
1270         rxcp->pkt_type =
1271                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, cast_enc, compl);
1272         rxcp->rss_hash =
1273                 AMAP_GET_BITS(struct amap_eth_rx_compl_v1, rsshash, rxcp);
1274         if (rxcp->vlanf) {
1275                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vtm,
1276                                           compl);
1277                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, vlan_tag,
1278                                                compl);
1279         }
1280         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v1, port, compl);
1281 }
1282
1283 static void be_parse_rx_compl_v0(struct be_adapter *adapter,
1284                                 struct be_eth_rx_compl *compl,
1285                                 struct be_rx_compl_info *rxcp)
1286 {
1287         rxcp->pkt_size =
1288                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, pktsize, compl);
1289         rxcp->vlanf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtp, compl);
1290         rxcp->err = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, err, compl);
1291         rxcp->tcpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, tcpf, compl);
1292         rxcp->udpf = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, udpf, compl);
1293         rxcp->ip_csum =
1294                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ipcksm, compl);
1295         rxcp->l4_csum =
1296                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, l4_cksm, compl);
1297         rxcp->ipv6 =
1298                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, ip_version, compl);
1299         rxcp->rxq_idx =
1300                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, fragndx, compl);
1301         rxcp->num_rcvd =
1302                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, numfrags, compl);
1303         rxcp->pkt_type =
1304                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, cast_enc, compl);
1305         rxcp->rss_hash =
1306                 AMAP_GET_BITS(struct amap_eth_rx_compl_v0, rsshash, rxcp);
1307         if (rxcp->vlanf) {
1308                 rxcp->vtm = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vtm,
1309                                           compl);
1310                 rxcp->vlan_tag = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, vlan_tag,
1311                                                compl);
1312         }
1313         rxcp->port = AMAP_GET_BITS(struct amap_eth_rx_compl_v0, port, compl);
1314 }
1315
1316 static struct be_rx_compl_info *be_rx_compl_get(struct be_rx_obj *rxo)
1317 {
1318         struct be_eth_rx_compl *compl = queue_tail_node(&rxo->cq);
1319         struct be_rx_compl_info *rxcp = &rxo->rxcp;
1320         struct be_adapter *adapter = rxo->adapter;
1321
1322         /* For checking the valid bit it is Ok to use either definition as the
1323          * valid bit is at the same position in both v0 and v1 Rx compl */
1324         if (compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] == 0)
1325                 return NULL;
1326
1327         rmb();
1328         be_dws_le_to_cpu(compl, sizeof(*compl));
1329
1330         if (adapter->be3_native)
1331                 be_parse_rx_compl_v1(adapter, compl, rxcp);
1332         else
1333                 be_parse_rx_compl_v0(adapter, compl, rxcp);
1334
1335         if (rxcp->vlanf) {
1336                 /* vlanf could be wrongly set in some cards.
1337                  * ignore if vtm is not set */
1338                 if ((adapter->function_mode & FLEX10_MODE) && !rxcp->vtm)
1339                         rxcp->vlanf = 0;
1340
1341                 if (!lancer_chip(adapter))
1342                         rxcp->vlan_tag = swab16(rxcp->vlan_tag);
1343
1344                 if (adapter->pvid == (rxcp->vlan_tag & VLAN_VID_MASK) &&
1345                     !adapter->vlan_tag[rxcp->vlan_tag])
1346                         rxcp->vlanf = 0;
1347         }
1348
1349         /* As the compl has been parsed, reset it; we wont touch it again */
1350         compl->dw[offsetof(struct amap_eth_rx_compl_v1, valid) / 32] = 0;
1351
1352         queue_tail_inc(&rxo->cq);
1353         return rxcp;
1354 }
1355
1356 static inline struct page *be_alloc_pages(u32 size, gfp_t gfp)
1357 {
1358         u32 order = get_order(size);
1359
1360         if (order > 0)
1361                 gfp |= __GFP_COMP;
1362         return  alloc_pages(gfp, order);
1363 }
1364
1365 /*
1366  * Allocate a page, split it to fragments of size rx_frag_size and post as
1367  * receive buffers to BE
1368  */
1369 static void be_post_rx_frags(struct be_rx_obj *rxo, gfp_t gfp)
1370 {
1371         struct be_adapter *adapter = rxo->adapter;
1372         struct be_rx_page_info *page_info_tbl = rxo->page_info_tbl;
1373         struct be_rx_page_info *page_info = NULL, *prev_page_info = NULL;
1374         struct be_queue_info *rxq = &rxo->q;
1375         struct page *pagep = NULL;
1376         struct be_eth_rx_d *rxd;
1377         u64 page_dmaaddr = 0, frag_dmaaddr;
1378         u32 posted, page_offset = 0;
1379
1380         page_info = &rxo->page_info_tbl[rxq->head];
1381         for (posted = 0; posted < MAX_RX_POST && !page_info->page; posted++) {
1382                 if (!pagep) {
1383                         pagep = be_alloc_pages(adapter->big_page_size, gfp);
1384                         if (unlikely(!pagep)) {
1385                                 rx_stats(rxo)->rx_post_fail++;
1386                                 break;
1387                         }
1388                         page_dmaaddr = dma_map_page(&adapter->pdev->dev, pagep,
1389                                                     0, adapter->big_page_size,
1390                                                     DMA_FROM_DEVICE);
1391                         page_info->page_offset = 0;
1392                 } else {
1393                         get_page(pagep);
1394                         page_info->page_offset = page_offset + rx_frag_size;
1395                 }
1396                 page_offset = page_info->page_offset;
1397                 page_info->page = pagep;
1398                 dma_unmap_addr_set(page_info, bus, page_dmaaddr);
1399                 frag_dmaaddr = page_dmaaddr + page_info->page_offset;
1400
1401                 rxd = queue_head_node(rxq);
1402                 rxd->fragpa_lo = cpu_to_le32(frag_dmaaddr & 0xFFFFFFFF);
1403                 rxd->fragpa_hi = cpu_to_le32(upper_32_bits(frag_dmaaddr));
1404
1405                 /* Any space left in the current big page for another frag? */
1406                 if ((page_offset + rx_frag_size + rx_frag_size) >
1407                                         adapter->big_page_size) {
1408                         pagep = NULL;
1409                         page_info->last_page_user = true;
1410                 }
1411
1412                 prev_page_info = page_info;
1413                 queue_head_inc(rxq);
1414                 page_info = &page_info_tbl[rxq->head];
1415         }
1416         if (pagep)
1417                 prev_page_info->last_page_user = true;
1418
1419         if (posted) {
1420                 atomic_add(posted, &rxq->used);
1421                 be_rxq_notify(adapter, rxq->id, posted);
1422         } else if (atomic_read(&rxq->used) == 0) {
1423                 /* Let be_worker replenish when memory is available */
1424                 rxo->rx_post_starved = true;
1425         }
1426 }
1427
1428 static struct be_eth_tx_compl *be_tx_compl_get(struct be_queue_info *tx_cq)
1429 {
1430         struct be_eth_tx_compl *txcp = queue_tail_node(tx_cq);
1431
1432         if (txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] == 0)
1433                 return NULL;
1434
1435         rmb();
1436         be_dws_le_to_cpu(txcp, sizeof(*txcp));
1437
1438         txcp->dw[offsetof(struct amap_eth_tx_compl, valid) / 32] = 0;
1439
1440         queue_tail_inc(tx_cq);
1441         return txcp;
1442 }
1443
1444 static u16 be_tx_compl_process(struct be_adapter *adapter,
1445                 struct be_tx_obj *txo, u16 last_index)
1446 {
1447         struct be_queue_info *txq = &txo->q;
1448         struct be_eth_wrb *wrb;
1449         struct sk_buff **sent_skbs = txo->sent_skb_list;
1450         struct sk_buff *sent_skb;
1451         u16 cur_index, num_wrbs = 1; /* account for hdr wrb */
1452         bool unmap_skb_hdr = true;
1453
1454         sent_skb = sent_skbs[txq->tail];
1455         BUG_ON(!sent_skb);
1456         sent_skbs[txq->tail] = NULL;
1457
1458         /* skip header wrb */
1459         queue_tail_inc(txq);
1460
1461         do {
1462                 cur_index = txq->tail;
1463                 wrb = queue_tail_node(txq);
1464                 unmap_tx_frag(&adapter->pdev->dev, wrb,
1465                               (unmap_skb_hdr && skb_headlen(sent_skb)));
1466                 unmap_skb_hdr = false;
1467
1468                 num_wrbs++;
1469                 queue_tail_inc(txq);
1470         } while (cur_index != last_index);
1471
1472         kfree_skb(sent_skb);
1473         return num_wrbs;
1474 }
1475
1476 static inline struct be_eq_entry *event_get(struct be_eq_obj *eq_obj)
1477 {
1478         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1479
1480         if (!eqe->evt)
1481                 return NULL;
1482
1483         rmb();
1484         eqe->evt = le32_to_cpu(eqe->evt);
1485         queue_tail_inc(&eq_obj->q);
1486         return eqe;
1487 }
1488
1489 static int event_handle(struct be_adapter *adapter,
1490                         struct be_eq_obj *eq_obj,
1491                         bool rearm)
1492 {
1493         struct be_eq_entry *eqe;
1494         u16 num = 0;
1495
1496         while ((eqe = event_get(eq_obj)) != NULL) {
1497                 eqe->evt = 0;
1498                 num++;
1499         }
1500
1501         /* Deal with any spurious interrupts that come
1502          * without events
1503          */
1504         if (!num)
1505                 rearm = true;
1506
1507         be_eq_notify(adapter, eq_obj->q.id, rearm, true, num);
1508         if (num)
1509                 napi_schedule(&eq_obj->napi);
1510
1511         return num;
1512 }
1513
1514 /* Just read and notify events without processing them.
1515  * Used at the time of destroying event queues */
1516 static void be_eq_clean(struct be_adapter *adapter,
1517                         struct be_eq_obj *eq_obj)
1518 {
1519         struct be_eq_entry *eqe;
1520         u16 num = 0;
1521
1522         while ((eqe = event_get(eq_obj)) != NULL) {
1523                 eqe->evt = 0;
1524                 num++;
1525         }
1526
1527         if (num)
1528                 be_eq_notify(adapter, eq_obj->q.id, false, true, num);
1529 }
1530
1531 static void be_rx_q_clean(struct be_adapter *adapter, struct be_rx_obj *rxo)
1532 {
1533         struct be_rx_page_info *page_info;
1534         struct be_queue_info *rxq = &rxo->q;
1535         struct be_queue_info *rx_cq = &rxo->cq;
1536         struct be_rx_compl_info *rxcp;
1537         u16 tail;
1538
1539         /* First cleanup pending rx completions */
1540         while ((rxcp = be_rx_compl_get(rxo)) != NULL) {
1541                 be_rx_compl_discard(adapter, rxo, rxcp);
1542                 be_cq_notify(adapter, rx_cq->id, false, 1);
1543         }
1544
1545         /* Then free posted rx buffer that were not used */
1546         tail = (rxq->head + rxq->len - atomic_read(&rxq->used)) % rxq->len;
1547         for (; atomic_read(&rxq->used) > 0; index_inc(&tail, rxq->len)) {
1548                 page_info = get_rx_page_info(adapter, rxo, tail);
1549                 put_page(page_info->page);
1550                 memset(page_info, 0, sizeof(*page_info));
1551         }
1552         BUG_ON(atomic_read(&rxq->used));
1553         rxq->tail = rxq->head = 0;
1554 }
1555
1556 static void be_tx_compl_clean(struct be_adapter *adapter,
1557                                 struct be_tx_obj *txo)
1558 {
1559         struct be_queue_info *tx_cq = &txo->cq;
1560         struct be_queue_info *txq = &txo->q;
1561         struct be_eth_tx_compl *txcp;
1562         u16 end_idx, cmpl = 0, timeo = 0, num_wrbs = 0;
1563         struct sk_buff **sent_skbs = txo->sent_skb_list;
1564         struct sk_buff *sent_skb;
1565         bool dummy_wrb;
1566
1567         /* Wait for a max of 200ms for all the tx-completions to arrive. */
1568         do {
1569                 while ((txcp = be_tx_compl_get(tx_cq))) {
1570                         end_idx = AMAP_GET_BITS(struct amap_eth_tx_compl,
1571                                         wrb_index, txcp);
1572                         num_wrbs += be_tx_compl_process(adapter, txo, end_idx);
1573                         cmpl++;
1574                 }
1575                 if (cmpl) {
1576                         be_cq_notify(adapter, tx_cq->id, false, cmpl);
1577                         atomic_sub(num_wrbs, &txq->used);
1578                         cmpl = 0;
1579                         num_wrbs = 0;
1580                 }
1581
1582                 if (atomic_read(&txq->used) == 0 || ++timeo > 200)
1583                         break;
1584
1585                 mdelay(1);
1586         } while (true);
1587
1588         if (atomic_read(&txq->used))
1589                 dev_err(&adapter->pdev->dev, "%d pending tx-completions\n",
1590                         atomic_read(&txq->used));
1591
1592         /* free posted tx for which compls will never arrive */
1593         while (atomic_read(&txq->used)) {
1594                 sent_skb = sent_skbs[txq->tail];
1595                 end_idx = txq->tail;
1596                 index_adv(&end_idx,
1597                         wrb_cnt_for_skb(adapter, sent_skb, &dummy_wrb) - 1,
1598                         txq->len);
1599                 num_wrbs = be_tx_compl_process(adapter, txo, end_idx);
1600                 atomic_sub(num_wrbs, &txq->used);
1601         }
1602 }
1603
1604 static void be_mcc_queues_destroy(struct be_adapter *adapter)
1605 {
1606         struct be_queue_info *q;
1607
1608         q = &adapter->mcc_obj.q;
1609         if (q->created)
1610                 be_cmd_q_destroy(adapter, q, QTYPE_MCCQ);
1611         be_queue_free(adapter, q);
1612
1613         q = &adapter->mcc_obj.cq;
1614         if (q->created)
1615                 be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1616         be_queue_free(adapter, q);
1617 }
1618
1619 /* Must be called only after TX qs are created as MCC shares TX EQ */
1620 static int be_mcc_queues_create(struct be_adapter *adapter)
1621 {
1622         struct be_queue_info *q, *cq;
1623
1624         /* Alloc MCC compl queue */
1625         cq = &adapter->mcc_obj.cq;
1626         if (be_queue_alloc(adapter, cq, MCC_CQ_LEN,
1627                         sizeof(struct be_mcc_compl)))
1628                 goto err;
1629
1630         /* Ask BE to create MCC compl queue; share TX's eq */
1631         if (be_cmd_cq_create(adapter, cq, &adapter->tx_eq.q, false, true, 0))
1632                 goto mcc_cq_free;
1633
1634         /* Alloc MCC queue */
1635         q = &adapter->mcc_obj.q;
1636         if (be_queue_alloc(adapter, q, MCC_Q_LEN, sizeof(struct be_mcc_wrb)))
1637                 goto mcc_cq_destroy;
1638
1639         /* Ask BE to create MCC queue */
1640         if (be_cmd_mccq_create(adapter, q, cq))
1641                 goto mcc_q_free;
1642
1643         return 0;
1644
1645 mcc_q_free:
1646         be_queue_free(adapter, q);
1647 mcc_cq_destroy:
1648         be_cmd_q_destroy(adapter, cq, QTYPE_CQ);
1649 mcc_cq_free:
1650         be_queue_free(adapter, cq);
1651 err:
1652         return -1;
1653 }
1654
1655 static void be_tx_queues_destroy(struct be_adapter *adapter)
1656 {
1657         struct be_queue_info *q;
1658         struct be_tx_obj *txo;
1659         u8 i;
1660
1661         for_all_tx_queues(adapter, txo, i) {
1662                 q = &txo->q;
1663                 if (q->created)
1664                         be_cmd_q_destroy(adapter, q, QTYPE_TXQ);
1665                 be_queue_free(adapter, q);
1666
1667                 q = &txo->cq;
1668                 if (q->created)
1669                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1670                 be_queue_free(adapter, q);
1671         }
1672
1673         /* Clear any residual events */
1674         be_eq_clean(adapter, &adapter->tx_eq);
1675
1676         q = &adapter->tx_eq.q;
1677         if (q->created)
1678                 be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1679         be_queue_free(adapter, q);
1680 }
1681
1682 static int be_num_txqs_want(struct be_adapter *adapter)
1683 {
1684         if ((num_vfs && adapter->sriov_enabled) ||
1685                 be_is_mc(adapter) ||
1686                 lancer_chip(adapter) || !be_physfn(adapter) ||
1687                 adapter->generation == BE_GEN2)
1688                 return 1;
1689         else
1690                 return MAX_TX_QS;
1691 }
1692
1693 /* One TX event queue is shared by all TX compl qs */
1694 static int be_tx_queues_create(struct be_adapter *adapter)
1695 {
1696         struct be_queue_info *eq, *q, *cq;
1697         struct be_tx_obj *txo;
1698         u8 i;
1699
1700         adapter->num_tx_qs = be_num_txqs_want(adapter);
1701         if (adapter->num_tx_qs != MAX_TX_QS) {
1702                 rtnl_lock();
1703                 netif_set_real_num_tx_queues(adapter->netdev,
1704                         adapter->num_tx_qs);
1705                 rtnl_unlock();
1706         }
1707
1708         adapter->tx_eq.max_eqd = 0;
1709         adapter->tx_eq.min_eqd = 0;
1710         adapter->tx_eq.cur_eqd = 96;
1711         adapter->tx_eq.enable_aic = false;
1712
1713         eq = &adapter->tx_eq.q;
1714         if (be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1715                 sizeof(struct be_eq_entry)))
1716                 return -1;
1717
1718         if (be_cmd_eq_create(adapter, eq, adapter->tx_eq.cur_eqd))
1719                 goto err;
1720         adapter->tx_eq.eq_idx = adapter->eq_next_idx++;
1721
1722         for_all_tx_queues(adapter, txo, i) {
1723                 cq = &txo->cq;
1724                 if (be_queue_alloc(adapter, cq, TX_CQ_LEN,
1725                         sizeof(struct be_eth_tx_compl)))
1726                         goto err;
1727
1728                 if (be_cmd_cq_create(adapter, cq, eq, false, false, 3))
1729                         goto err;
1730
1731                 q = &txo->q;
1732                 if (be_queue_alloc(adapter, q, TX_Q_LEN,
1733                         sizeof(struct be_eth_wrb)))
1734                         goto err;
1735         }
1736         return 0;
1737
1738 err:
1739         be_tx_queues_destroy(adapter);
1740         return -1;
1741 }
1742
1743 static void be_rx_queues_destroy(struct be_adapter *adapter)
1744 {
1745         struct be_queue_info *q;
1746         struct be_rx_obj *rxo;
1747         int i;
1748
1749         for_all_rx_queues(adapter, rxo, i) {
1750                 be_queue_free(adapter, &rxo->q);
1751
1752                 q = &rxo->cq;
1753                 if (q->created)
1754                         be_cmd_q_destroy(adapter, q, QTYPE_CQ);
1755                 be_queue_free(adapter, q);
1756
1757                 q = &rxo->rx_eq.q;
1758                 if (q->created)
1759                         be_cmd_q_destroy(adapter, q, QTYPE_EQ);
1760                 be_queue_free(adapter, q);
1761         }
1762 }
1763
1764 static u32 be_num_rxqs_want(struct be_adapter *adapter)
1765 {
1766         if ((adapter->function_caps & BE_FUNCTION_CAPS_RSS) &&
1767                 !adapter->sriov_enabled && be_physfn(adapter) &&
1768                 !be_is_mc(adapter)) {
1769                 return 1 + MAX_RSS_QS; /* one default non-RSS queue */
1770         } else {
1771                 dev_warn(&adapter->pdev->dev,
1772                         "No support for multiple RX queues\n");
1773                 return 1;
1774         }
1775 }
1776
1777 static int be_rx_queues_create(struct be_adapter *adapter)
1778 {
1779         struct be_queue_info *eq, *q, *cq;
1780         struct be_rx_obj *rxo;
1781         int rc, i;
1782
1783         adapter->num_rx_qs = min(be_num_rxqs_want(adapter),
1784                                 msix_enabled(adapter) ?
1785                                         adapter->num_msix_vec - 1 : 1);
1786         if (adapter->num_rx_qs != MAX_RX_QS)
1787                 dev_warn(&adapter->pdev->dev,
1788                         "Can create only %d RX queues", adapter->num_rx_qs);
1789
1790         adapter->big_page_size = (1 << get_order(rx_frag_size)) * PAGE_SIZE;
1791         for_all_rx_queues(adapter, rxo, i) {
1792                 rxo->adapter = adapter;
1793                 rxo->rx_eq.max_eqd = BE_MAX_EQD;
1794                 rxo->rx_eq.enable_aic = true;
1795
1796                 /* EQ */
1797                 eq = &rxo->rx_eq.q;
1798                 rc = be_queue_alloc(adapter, eq, EVNT_Q_LEN,
1799                                         sizeof(struct be_eq_entry));
1800                 if (rc)
1801                         goto err;
1802
1803                 rc = be_cmd_eq_create(adapter, eq, rxo->rx_eq.cur_eqd);
1804                 if (rc)
1805                         goto err;
1806
1807                 rxo->rx_eq.eq_idx = adapter->eq_next_idx++;
1808
1809                 /* CQ */
1810                 cq = &rxo->cq;
1811                 rc = be_queue_alloc(adapter, cq, RX_CQ_LEN,
1812                                 sizeof(struct be_eth_rx_compl));
1813                 if (rc)
1814                         goto err;
1815
1816                 rc = be_cmd_cq_create(adapter, cq, eq, false, false, 3);
1817                 if (rc)
1818                         goto err;
1819
1820                 /* Rx Q - will be created in be_open() */
1821                 q = &rxo->q;
1822                 rc = be_queue_alloc(adapter, q, RX_Q_LEN,
1823                                 sizeof(struct be_eth_rx_d));
1824                 if (rc)
1825                         goto err;
1826
1827         }
1828
1829         return 0;
1830 err:
1831         be_rx_queues_destroy(adapter);
1832         return -1;
1833 }
1834
1835 static bool event_peek(struct be_eq_obj *eq_obj)
1836 {
1837         struct be_eq_entry *eqe = queue_tail_node(&eq_obj->q);
1838         if (!eqe->evt)
1839                 return false;
1840         else
1841                 return true;
1842 }
1843
1844 static irqreturn_t be_intx(int irq, void *dev)
1845 {
1846         struct be_adapter *adapter = dev;
1847         struct be_rx_obj *rxo;
1848         int isr, i, tx = 0 , rx = 0;
1849
1850         if (lancer_chip(adapter)) {
1851                 if (event_peek(&adapter->tx_eq))
1852                         tx = event_handle(adapter, &adapter->tx_eq, false);
1853                 for_all_rx_queues(adapter, rxo, i) {
1854                         if (event_peek(&rxo->rx_eq))
1855                                 rx |= event_handle(adapter, &rxo->rx_eq, true);
1856                 }
1857
1858                 if (!(tx || rx))
1859                         return IRQ_NONE;
1860
1861         } else {
1862                 isr = ioread32(adapter->csr + CEV_ISR0_OFFSET +
1863                         (adapter->tx_eq.q.id / 8) * CEV_ISR_SIZE);
1864                 if (!isr)
1865                         return IRQ_NONE;
1866
1867                 if ((1 << adapter->tx_eq.eq_idx & isr))
1868                         event_handle(adapter, &adapter->tx_eq, false);
1869
1870                 for_all_rx_queues(adapter, rxo, i) {
1871                         if ((1 << rxo->rx_eq.eq_idx & isr))
1872                                 event_handle(adapter, &rxo->rx_eq, true);
1873                 }
1874         }
1875
1876         return IRQ_HANDLED;
1877 }
1878
1879 static irqreturn_t be_msix_rx(int irq, void *dev)
1880 {
1881         struct be_rx_obj *rxo = dev;
1882         struct be_adapter *adapter = rxo->adapter;
1883
1884         event_handle(adapter, &rxo->rx_eq, true);
1885
1886         return IRQ_HANDLED;
1887 }
1888
1889 static irqreturn_t be_msix_tx_mcc(int irq, void *dev)
1890 {
1891         struct be_adapter *adapter = dev;
1892
1893         event_handle(adapter, &adapter->tx_eq, false);
1894
1895         return IRQ_HANDLED;
1896 }
1897
1898 static inline bool do_gro(struct be_rx_compl_info *rxcp)
1899 {
1900         return (rxcp->tcpf && !rxcp->err) ? true : false;
1901 }
1902
1903 static int be_poll_rx(struct napi_struct *napi, int budget)
1904 {
1905         struct be_eq_obj *rx_eq = container_of(napi, struct be_eq_obj, napi);
1906         struct be_rx_obj *rxo = container_of(rx_eq, struct be_rx_obj, rx_eq);
1907         struct be_adapter *adapter = rxo->adapter;
1908         struct be_queue_info *rx_cq = &rxo->cq;
1909         struct be_rx_compl_info *rxcp;
1910         u32 work_done;
1911
1912         rx_stats(rxo)->rx_polls++;
1913         for (work_done = 0; work_done < budget; work_done++) {
1914                 rxcp = be_rx_compl_get(rxo);
1915                 if (!rxcp)
1916                         break;
1917
1918                 /* Is it a flush compl that has no data */
1919                 if (unlikely(rxcp->num_rcvd == 0))
1920                         goto loop_continue;
1921
1922                 /* Discard compl with partial DMA Lancer B0 */
1923                 if (unlikely(!rxcp->pkt_size)) {
1924                         be_rx_compl_discard(adapter, rxo, rxcp);
1925                         goto loop_continue;
1926                 }
1927
1928                 /* On BE drop pkts that arrive due to imperfect filtering in
1929                  * promiscuous mode on some skews
1930                  */
1931                 if (unlikely(rxcp->port != adapter->port_num &&
1932                                 !lancer_chip(adapter))) {
1933                         be_rx_compl_discard(adapter, rxo, rxcp);
1934                         goto loop_continue;
1935                 }
1936
1937                 if (do_gro(rxcp))
1938                         be_rx_compl_process_gro(adapter, rxo, rxcp);
1939                 else
1940                         be_rx_compl_process(adapter, rxo, rxcp);
1941 loop_continue:
1942                 be_rx_stats_update(rxo, rxcp);
1943         }
1944
1945         be_cq_notify(adapter, rx_cq->id, false, work_done);
1946
1947         /* Refill the queue */
1948         if (work_done && atomic_read(&rxo->q.used) < RX_FRAGS_REFILL_WM)
1949                 be_post_rx_frags(rxo, GFP_ATOMIC);
1950
1951         /* All consumed */
1952         if (work_done < budget) {
1953                 napi_complete(napi);
1954                 /* Arm CQ */
1955                 be_cq_notify(adapter, rx_cq->id, true, 0);
1956         }
1957         return work_done;
1958 }
1959
1960 /* As TX and MCC share the same EQ check for both TX and MCC completions.
1961  * For TX/MCC we don't honour budget; consume everything
1962  */
1963 static int be_poll_tx_mcc(struct napi_struct *napi, int budget)
1964 {
1965         struct be_eq_obj *tx_eq = container_of(napi, struct be_eq_obj, napi);
1966         struct be_adapter *adapter =
1967                 container_of(tx_eq, struct be_adapter, tx_eq);
1968         struct be_tx_obj *txo;
1969         struct be_eth_tx_compl *txcp;
1970         int tx_compl, mcc_compl, status = 0;
1971         u8 i;
1972         u16 num_wrbs;
1973
1974         for_all_tx_queues(adapter, txo, i) {
1975                 tx_compl = 0;
1976                 num_wrbs = 0;
1977                 while ((txcp = be_tx_compl_get(&txo->cq))) {
1978                         num_wrbs += be_tx_compl_process(adapter, txo,
1979                                 AMAP_GET_BITS(struct amap_eth_tx_compl,
1980                                         wrb_index, txcp));
1981                         tx_compl++;
1982                 }
1983                 if (tx_compl) {
1984                         be_cq_notify(adapter, txo->cq.id, true, tx_compl);
1985
1986                         atomic_sub(num_wrbs, &txo->q.used);
1987
1988                         /* As Tx wrbs have been freed up, wake up netdev queue
1989                          * if it was stopped due to lack of tx wrbs.  */
1990                         if (__netif_subqueue_stopped(adapter->netdev, i) &&
1991                                 atomic_read(&txo->q.used) < txo->q.len / 2) {
1992                                 netif_wake_subqueue(adapter->netdev, i);
1993                         }
1994
1995                         u64_stats_update_begin(&tx_stats(txo)->sync_compl);
1996                         tx_stats(txo)->tx_compl += tx_compl;
1997                         u64_stats_update_end(&tx_stats(txo)->sync_compl);
1998                 }
1999         }
2000
2001         mcc_compl = be_process_mcc(adapter, &status);
2002
2003         if (mcc_compl) {
2004                 struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
2005                 be_cq_notify(adapter, mcc_obj->cq.id, true, mcc_compl);
2006         }
2007
2008         napi_complete(napi);
2009
2010         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2011         adapter->drv_stats.tx_events++;
2012         return 1;
2013 }
2014
2015 void be_detect_dump_ue(struct be_adapter *adapter)
2016 {
2017         u32 ue_lo = 0, ue_hi = 0, ue_lo_mask = 0, ue_hi_mask = 0;
2018         u32 sliport_status = 0, sliport_err1 = 0, sliport_err2 = 0;
2019         u32 i;
2020
2021         if (adapter->eeh_err || adapter->ue_detected)
2022                 return;
2023
2024         if (lancer_chip(adapter)) {
2025                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
2026                 if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2027                         sliport_err1 = ioread32(adapter->db +
2028                                         SLIPORT_ERROR1_OFFSET);
2029                         sliport_err2 = ioread32(adapter->db +
2030                                         SLIPORT_ERROR2_OFFSET);
2031                 }
2032         } else {
2033                 pci_read_config_dword(adapter->pdev,
2034                                 PCICFG_UE_STATUS_LOW, &ue_lo);
2035                 pci_read_config_dword(adapter->pdev,
2036                                 PCICFG_UE_STATUS_HIGH, &ue_hi);
2037                 pci_read_config_dword(adapter->pdev,
2038                                 PCICFG_UE_STATUS_LOW_MASK, &ue_lo_mask);
2039                 pci_read_config_dword(adapter->pdev,
2040                                 PCICFG_UE_STATUS_HI_MASK, &ue_hi_mask);
2041
2042                 ue_lo = (ue_lo & (~ue_lo_mask));
2043                 ue_hi = (ue_hi & (~ue_hi_mask));
2044         }
2045
2046         if (ue_lo || ue_hi ||
2047                 sliport_status & SLIPORT_STATUS_ERR_MASK) {
2048                 adapter->ue_detected = true;
2049                 adapter->eeh_err = true;
2050                 dev_err(&adapter->pdev->dev,
2051                         "Unrecoverable error in the card\n");
2052         }
2053
2054         if (ue_lo) {
2055                 for (i = 0; ue_lo; ue_lo >>= 1, i++) {
2056                         if (ue_lo & 1)
2057                                 dev_err(&adapter->pdev->dev,
2058                                 "UE: %s bit set\n", ue_status_low_desc[i]);
2059                 }
2060         }
2061         if (ue_hi) {
2062                 for (i = 0; ue_hi; ue_hi >>= 1, i++) {
2063                         if (ue_hi & 1)
2064                                 dev_err(&adapter->pdev->dev,
2065                                 "UE: %s bit set\n", ue_status_hi_desc[i]);
2066                 }
2067         }
2068
2069         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
2070                 dev_err(&adapter->pdev->dev,
2071                         "sliport status 0x%x\n", sliport_status);
2072                 dev_err(&adapter->pdev->dev,
2073                         "sliport error1 0x%x\n", sliport_err1);
2074                 dev_err(&adapter->pdev->dev,
2075                         "sliport error2 0x%x\n", sliport_err2);
2076         }
2077 }
2078
2079 static void be_msix_disable(struct be_adapter *adapter)
2080 {
2081         if (msix_enabled(adapter)) {
2082                 pci_disable_msix(adapter->pdev);
2083                 adapter->num_msix_vec = 0;
2084         }
2085 }
2086
2087 static void be_msix_enable(struct be_adapter *adapter)
2088 {
2089 #define BE_MIN_MSIX_VECTORS     (1 + 1) /* Rx + Tx */
2090         int i, status, num_vec;
2091
2092         num_vec = be_num_rxqs_want(adapter) + 1;
2093
2094         for (i = 0; i < num_vec; i++)
2095                 adapter->msix_entries[i].entry = i;
2096
2097         status = pci_enable_msix(adapter->pdev, adapter->msix_entries, num_vec);
2098         if (status == 0) {
2099                 goto done;
2100         } else if (status >= BE_MIN_MSIX_VECTORS) {
2101                 num_vec = status;
2102                 if (pci_enable_msix(adapter->pdev, adapter->msix_entries,
2103                                 num_vec) == 0)
2104                         goto done;
2105         }
2106         return;
2107 done:
2108         adapter->num_msix_vec = num_vec;
2109         return;
2110 }
2111
2112 static int be_sriov_enable(struct be_adapter *adapter)
2113 {
2114         be_check_sriov_fn_type(adapter);
2115 #ifdef CONFIG_PCI_IOV
2116         if (be_physfn(adapter) && num_vfs) {
2117                 int status, pos;
2118                 u16 nvfs;
2119
2120                 pos = pci_find_ext_capability(adapter->pdev,
2121                                                 PCI_EXT_CAP_ID_SRIOV);
2122                 pci_read_config_word(adapter->pdev,
2123                                         pos + PCI_SRIOV_TOTAL_VF, &nvfs);
2124
2125                 if (num_vfs > nvfs) {
2126                         dev_info(&adapter->pdev->dev,
2127                                         "Device supports %d VFs and not %d\n",
2128                                         nvfs, num_vfs);
2129                         num_vfs = nvfs;
2130                 }
2131
2132                 status = pci_enable_sriov(adapter->pdev, num_vfs);
2133                 adapter->sriov_enabled = status ? false : true;
2134
2135                 if (adapter->sriov_enabled) {
2136                         adapter->vf_cfg = kcalloc(num_vfs,
2137                                                 sizeof(struct be_vf_cfg),
2138                                                 GFP_KERNEL);
2139                         if (!adapter->vf_cfg)
2140                                 return -ENOMEM;
2141                 }
2142         }
2143 #endif
2144         return 0;
2145 }
2146
2147 static void be_sriov_disable(struct be_adapter *adapter)
2148 {
2149 #ifdef CONFIG_PCI_IOV
2150         if (adapter->sriov_enabled) {
2151                 pci_disable_sriov(adapter->pdev);
2152                 kfree(adapter->vf_cfg);
2153                 adapter->sriov_enabled = false;
2154         }
2155 #endif
2156 }
2157
2158 static inline int be_msix_vec_get(struct be_adapter *adapter,
2159                                         struct be_eq_obj *eq_obj)
2160 {
2161         return adapter->msix_entries[eq_obj->eq_idx].vector;
2162 }
2163
2164 static int be_request_irq(struct be_adapter *adapter,
2165                 struct be_eq_obj *eq_obj,
2166                 void *handler, char *desc, void *context)
2167 {
2168         struct net_device *netdev = adapter->netdev;
2169         int vec;
2170
2171         sprintf(eq_obj->desc, "%s-%s", netdev->name, desc);
2172         vec = be_msix_vec_get(adapter, eq_obj);
2173         return request_irq(vec, handler, 0, eq_obj->desc, context);
2174 }
2175
2176 static void be_free_irq(struct be_adapter *adapter, struct be_eq_obj *eq_obj,
2177                         void *context)
2178 {
2179         int vec = be_msix_vec_get(adapter, eq_obj);
2180         free_irq(vec, context);
2181 }
2182
2183 static int be_msix_register(struct be_adapter *adapter)
2184 {
2185         struct be_rx_obj *rxo;
2186         int status, i;
2187         char qname[10];
2188
2189         status = be_request_irq(adapter, &adapter->tx_eq, be_msix_tx_mcc, "tx",
2190                                 adapter);
2191         if (status)
2192                 goto err;
2193
2194         for_all_rx_queues(adapter, rxo, i) {
2195                 sprintf(qname, "rxq%d", i);
2196                 status = be_request_irq(adapter, &rxo->rx_eq, be_msix_rx,
2197                                 qname, rxo);
2198                 if (status)
2199                         goto err_msix;
2200         }
2201
2202         return 0;
2203
2204 err_msix:
2205         be_free_irq(adapter, &adapter->tx_eq, adapter);
2206
2207         for (i--, rxo = &adapter->rx_obj[i]; i >= 0; i--, rxo--)
2208                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2209
2210 err:
2211         dev_warn(&adapter->pdev->dev,
2212                 "MSIX Request IRQ failed - err %d\n", status);
2213         be_msix_disable(adapter);
2214         return status;
2215 }
2216
2217 static int be_irq_register(struct be_adapter *adapter)
2218 {
2219         struct net_device *netdev = adapter->netdev;
2220         int status;
2221
2222         if (msix_enabled(adapter)) {
2223                 status = be_msix_register(adapter);
2224                 if (status == 0)
2225                         goto done;
2226                 /* INTx is not supported for VF */
2227                 if (!be_physfn(adapter))
2228                         return status;
2229         }
2230
2231         /* INTx */
2232         netdev->irq = adapter->pdev->irq;
2233         status = request_irq(netdev->irq, be_intx, IRQF_SHARED, netdev->name,
2234                         adapter);
2235         if (status) {
2236                 dev_err(&adapter->pdev->dev,
2237                         "INTx request IRQ failed - err %d\n", status);
2238                 return status;
2239         }
2240 done:
2241         adapter->isr_registered = true;
2242         return 0;
2243 }
2244
2245 static void be_irq_unregister(struct be_adapter *adapter)
2246 {
2247         struct net_device *netdev = adapter->netdev;
2248         struct be_rx_obj *rxo;
2249         int i;
2250
2251         if (!adapter->isr_registered)
2252                 return;
2253
2254         /* INTx */
2255         if (!msix_enabled(adapter)) {
2256                 free_irq(netdev->irq, adapter);
2257                 goto done;
2258         }
2259
2260         /* MSIx */
2261         be_free_irq(adapter, &adapter->tx_eq, adapter);
2262
2263         for_all_rx_queues(adapter, rxo, i)
2264                 be_free_irq(adapter, &rxo->rx_eq, rxo);
2265
2266 done:
2267         adapter->isr_registered = false;
2268 }
2269
2270 static void be_rx_queues_clear(struct be_adapter *adapter)
2271 {
2272         struct be_queue_info *q;
2273         struct be_rx_obj *rxo;
2274         int i;
2275
2276         for_all_rx_queues(adapter, rxo, i) {
2277                 q = &rxo->q;
2278                 if (q->created) {
2279                         be_cmd_rxq_destroy(adapter, q);
2280                         /* After the rxq is invalidated, wait for a grace time
2281                          * of 1ms for all dma to end and the flush compl to
2282                          * arrive
2283                          */
2284                         mdelay(1);
2285                         be_rx_q_clean(adapter, rxo);
2286                 }
2287
2288                 /* Clear any residual events */
2289                 q = &rxo->rx_eq.q;
2290                 if (q->created)
2291                         be_eq_clean(adapter, &rxo->rx_eq);
2292         }
2293 }
2294
2295 static int be_close(struct net_device *netdev)
2296 {
2297         struct be_adapter *adapter = netdev_priv(netdev);
2298         struct be_rx_obj *rxo;
2299         struct be_tx_obj *txo;
2300         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2301         int vec, i;
2302
2303         be_async_mcc_disable(adapter);
2304
2305         if (!lancer_chip(adapter))
2306                 be_intr_set(adapter, false);
2307
2308         for_all_rx_queues(adapter, rxo, i)
2309                 napi_disable(&rxo->rx_eq.napi);
2310
2311         napi_disable(&tx_eq->napi);
2312
2313         if (lancer_chip(adapter)) {
2314                 be_cq_notify(adapter, adapter->mcc_obj.cq.id, false, 0);
2315                 for_all_rx_queues(adapter, rxo, i)
2316                          be_cq_notify(adapter, rxo->cq.id, false, 0);
2317                 for_all_tx_queues(adapter, txo, i)
2318                          be_cq_notify(adapter, txo->cq.id, false, 0);
2319         }
2320
2321         if (msix_enabled(adapter)) {
2322                 vec = be_msix_vec_get(adapter, tx_eq);
2323                 synchronize_irq(vec);
2324
2325                 for_all_rx_queues(adapter, rxo, i) {
2326                         vec = be_msix_vec_get(adapter, &rxo->rx_eq);
2327                         synchronize_irq(vec);
2328                 }
2329         } else {
2330                 synchronize_irq(netdev->irq);
2331         }
2332         be_irq_unregister(adapter);
2333
2334         /* Wait for all pending tx completions to arrive so that
2335          * all tx skbs are freed.
2336          */
2337         for_all_tx_queues(adapter, txo, i)
2338                 be_tx_compl_clean(adapter, txo);
2339
2340         be_rx_queues_clear(adapter);
2341         return 0;
2342 }
2343
2344 static int be_rx_queues_setup(struct be_adapter *adapter)
2345 {
2346         struct be_rx_obj *rxo;
2347         int rc, i, j;
2348         u8 rsstable[128];
2349
2350         for_all_rx_queues(adapter, rxo, i) {
2351                 rc = be_cmd_rxq_create(adapter, &rxo->q, rxo->cq.id,
2352                         rx_frag_size, BE_MAX_JUMBO_FRAME_SIZE,
2353                         adapter->if_handle,
2354                         (i > 0) ? 1 : 0/* rss enable */, &rxo->rss_id);
2355                 if (rc)
2356                         return rc;
2357         }
2358
2359         if (be_multi_rxq(adapter)) {
2360                 for (j = 0; j < 128; j += adapter->num_rx_qs - 1) {
2361                         for_all_rss_queues(adapter, rxo, i) {
2362                                 if ((j + i) >= 128)
2363                                         break;
2364                                 rsstable[j + i] = rxo->rss_id;
2365                         }
2366                 }
2367                 rc = be_cmd_rss_config(adapter, rsstable, 128);
2368
2369                 if (rc)
2370                         return rc;
2371         }
2372
2373         /* First time posting */
2374         for_all_rx_queues(adapter, rxo, i) {
2375                 be_post_rx_frags(rxo, GFP_KERNEL);
2376                 napi_enable(&rxo->rx_eq.napi);
2377         }
2378         return 0;
2379 }
2380
2381 static int be_open(struct net_device *netdev)
2382 {
2383         struct be_adapter *adapter = netdev_priv(netdev);
2384         struct be_eq_obj *tx_eq = &adapter->tx_eq;
2385         struct be_rx_obj *rxo;
2386         int status, i;
2387
2388         status = be_rx_queues_setup(adapter);
2389         if (status)
2390                 goto err;
2391
2392         napi_enable(&tx_eq->napi);
2393
2394         be_irq_register(adapter);
2395
2396         if (!lancer_chip(adapter))
2397                 be_intr_set(adapter, true);
2398
2399         /* The evt queues are created in unarmed state; arm them */
2400         for_all_rx_queues(adapter, rxo, i) {
2401                 be_eq_notify(adapter, rxo->rx_eq.q.id, true, false, 0);
2402                 be_cq_notify(adapter, rxo->cq.id, true, 0);
2403         }
2404         be_eq_notify(adapter, tx_eq->q.id, true, false, 0);
2405
2406         /* Now that interrupts are on we can process async mcc */
2407         be_async_mcc_enable(adapter);
2408
2409         return 0;
2410 err:
2411         be_close(adapter->netdev);
2412         return -EIO;
2413 }
2414
2415 static int be_setup_wol(struct be_adapter *adapter, bool enable)
2416 {
2417         struct be_dma_mem cmd;
2418         int status = 0;
2419         u8 mac[ETH_ALEN];
2420
2421         memset(mac, 0, ETH_ALEN);
2422
2423         cmd.size = sizeof(struct be_cmd_req_acpi_wol_magic_config);
2424         cmd.va = dma_alloc_coherent(&adapter->pdev->dev, cmd.size, &cmd.dma,
2425                                     GFP_KERNEL);
2426         if (cmd.va == NULL)
2427                 return -1;
2428         memset(cmd.va, 0, cmd.size);
2429
2430         if (enable) {
2431                 status = pci_write_config_dword(adapter->pdev,
2432                         PCICFG_PM_CONTROL_OFFSET, PCICFG_PM_CONTROL_MASK);
2433                 if (status) {
2434                         dev_err(&adapter->pdev->dev,
2435                                 "Could not enable Wake-on-lan\n");
2436                         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va,
2437                                           cmd.dma);
2438                         return status;
2439                 }
2440                 status = be_cmd_enable_magic_wol(adapter,
2441                                 adapter->netdev->dev_addr, &cmd);
2442                 pci_enable_wake(adapter->pdev, PCI_D3hot, 1);
2443                 pci_enable_wake(adapter->pdev, PCI_D3cold, 1);
2444         } else {
2445                 status = be_cmd_enable_magic_wol(adapter, mac, &cmd);
2446                 pci_enable_wake(adapter->pdev, PCI_D3hot, 0);
2447                 pci_enable_wake(adapter->pdev, PCI_D3cold, 0);
2448         }
2449
2450         dma_free_coherent(&adapter->pdev->dev, cmd.size, cmd.va, cmd.dma);
2451         return status;
2452 }
2453
2454 /*
2455  * Generate a seed MAC address from the PF MAC Address using jhash.
2456  * MAC Address for VFs are assigned incrementally starting from the seed.
2457  * These addresses are programmed in the ASIC by the PF and the VF driver
2458  * queries for the MAC address during its probe.
2459  */
2460 static inline int be_vf_eth_addr_config(struct be_adapter *adapter)
2461 {
2462         u32 vf;
2463         int status = 0;
2464         u8 mac[ETH_ALEN];
2465
2466         be_vf_eth_addr_generate(adapter, mac);
2467
2468         for (vf = 0; vf < num_vfs; vf++) {
2469                 if (lancer_chip(adapter)) {
2470                         status = be_cmd_set_mac_list(adapter,  mac, 1, vf + 1);
2471                 } else {
2472                         status = be_cmd_pmac_add(adapter, mac,
2473                                         adapter->vf_cfg[vf].vf_if_handle,
2474                                         &adapter->vf_cfg[vf].vf_pmac_id,
2475                                         vf + 1);
2476                 }
2477
2478                 if (status)
2479                         dev_err(&adapter->pdev->dev,
2480                         "Mac address assignment failed for VF %d\n", vf);
2481                 else
2482                         memcpy(adapter->vf_cfg[vf].vf_mac_addr, mac, ETH_ALEN);
2483
2484                 mac[5] += 1;
2485         }
2486         return status;
2487 }
2488
2489 static void be_vf_clear(struct be_adapter *adapter)
2490 {
2491         u32 vf;
2492
2493         for (vf = 0; vf < num_vfs; vf++) {
2494                 if (lancer_chip(adapter))
2495                         be_cmd_set_mac_list(adapter, NULL, 0, vf + 1);
2496                 else
2497                         be_cmd_pmac_del(adapter,
2498                                         adapter->vf_cfg[vf].vf_if_handle,
2499                                         adapter->vf_cfg[vf].vf_pmac_id, vf + 1);
2500         }
2501
2502         for (vf = 0; vf < num_vfs; vf++)
2503                 be_cmd_if_destroy(adapter, adapter->vf_cfg[vf].vf_if_handle,
2504                                 vf + 1);
2505 }
2506
2507 static int be_clear(struct be_adapter *adapter)
2508 {
2509         if (be_physfn(adapter) && adapter->sriov_enabled)
2510                 be_vf_clear(adapter);
2511
2512         be_cmd_if_destroy(adapter, adapter->if_handle,  0);
2513
2514         be_mcc_queues_destroy(adapter);
2515         be_rx_queues_destroy(adapter);
2516         be_tx_queues_destroy(adapter);
2517
2518         /* tell fw we're done with firing cmds */
2519         be_cmd_fw_clean(adapter);
2520         return 0;
2521 }
2522
2523 static void be_vf_setup_init(struct be_adapter *adapter)
2524 {
2525         int vf;
2526
2527         for (vf = 0; vf < num_vfs; vf++) {
2528                 adapter->vf_cfg[vf].vf_if_handle = -1;
2529                 adapter->vf_cfg[vf].vf_pmac_id = -1;
2530         }
2531 }
2532
2533 static int be_vf_setup(struct be_adapter *adapter)
2534 {
2535         u32 cap_flags, en_flags, vf;
2536         u16 lnk_speed;
2537         int status;
2538
2539         be_vf_setup_init(adapter);
2540
2541         cap_flags = en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2542                                 BE_IF_FLAGS_MULTICAST;
2543
2544         for (vf = 0; vf < num_vfs; vf++) {
2545                 status = be_cmd_if_create(adapter, cap_flags, en_flags, NULL,
2546                                         &adapter->vf_cfg[vf].vf_if_handle,
2547                                         NULL, vf+1);
2548                 if (status)
2549                         goto err;
2550         }
2551
2552         status = be_vf_eth_addr_config(adapter);
2553         if (status)
2554                 goto err;
2555
2556         for (vf = 0; vf < num_vfs; vf++) {
2557                 status = be_cmd_link_status_query(adapter, NULL, &lnk_speed,
2558                                 vf + 1);
2559                 if (status)
2560                         goto err;
2561                 adapter->vf_cfg[vf].vf_tx_rate = lnk_speed * 10;
2562         }
2563         return 0;
2564 err:
2565         return status;
2566 }
2567
2568 static void be_setup_init(struct be_adapter *adapter)
2569 {
2570         adapter->vlan_prio_bmap = 0xff;
2571         adapter->link_speed = -1;
2572         adapter->if_handle = -1;
2573         adapter->be3_native = false;
2574         adapter->promiscuous = false;
2575         adapter->eq_next_idx = 0;
2576 }
2577
2578 static int be_configure_mac_from_list(struct be_adapter *adapter, u8 *mac)
2579 {
2580         u32 pmac_id;
2581         int status = be_cmd_get_mac_from_list(adapter, 0, &pmac_id);
2582         if (status != 0)
2583                 goto do_none;
2584         status = be_cmd_mac_addr_query(adapter, mac,
2585                         MAC_ADDRESS_TYPE_NETWORK,
2586                         false, adapter->if_handle, pmac_id);
2587         if (status != 0)
2588                 goto do_none;
2589         status = be_cmd_pmac_add(adapter, mac, adapter->if_handle,
2590                         &adapter->pmac_id, 0);
2591 do_none:
2592         return status;
2593 }
2594
2595 static int be_setup(struct be_adapter *adapter)
2596 {
2597         struct net_device *netdev = adapter->netdev;
2598         u32 cap_flags, en_flags;
2599         u32 tx_fc, rx_fc;
2600         int status, i;
2601         u8 mac[ETH_ALEN];
2602         struct be_tx_obj *txo;
2603
2604         be_setup_init(adapter);
2605
2606         be_cmd_req_native_mode(adapter);
2607
2608         status = be_tx_queues_create(adapter);
2609         if (status != 0)
2610                 goto err;
2611
2612         status = be_rx_queues_create(adapter);
2613         if (status != 0)
2614                 goto err;
2615
2616         status = be_mcc_queues_create(adapter);
2617         if (status != 0)
2618                 goto err;
2619
2620         memset(mac, 0, ETH_ALEN);
2621         status = be_cmd_mac_addr_query(adapter, mac, MAC_ADDRESS_TYPE_NETWORK,
2622                         true /*permanent */, 0, 0);
2623         if (status)
2624                 return status;
2625         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2626         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2627
2628         en_flags = BE_IF_FLAGS_UNTAGGED | BE_IF_FLAGS_BROADCAST |
2629                         BE_IF_FLAGS_MULTICAST | BE_IF_FLAGS_PASS_L3L4_ERRORS;
2630         cap_flags = en_flags | BE_IF_FLAGS_MCAST_PROMISCUOUS |
2631                         BE_IF_FLAGS_VLAN_PROMISCUOUS | BE_IF_FLAGS_PROMISCUOUS;
2632
2633         if (adapter->function_caps & BE_FUNCTION_CAPS_RSS) {
2634                 cap_flags |= BE_IF_FLAGS_RSS;
2635                 en_flags |= BE_IF_FLAGS_RSS;
2636         }
2637         status = be_cmd_if_create(adapter, cap_flags, en_flags,
2638                         netdev->dev_addr, &adapter->if_handle,
2639                         &adapter->pmac_id, 0);
2640         if (status != 0)
2641                 goto err;
2642
2643          for_all_tx_queues(adapter, txo, i) {
2644                 status = be_cmd_txq_create(adapter, &txo->q, &txo->cq);
2645                 if (status)
2646                         goto err;
2647         }
2648
2649          /* The VF's permanent mac queried from card is incorrect.
2650           * For BEx: Query the mac configued by the PF using if_handle
2651           * For Lancer: Get and use mac_list to obtain mac address.
2652           */
2653         if (!be_physfn(adapter)) {
2654                 if (lancer_chip(adapter))
2655                         status = be_configure_mac_from_list(adapter, mac);
2656                 else
2657                         status = be_cmd_mac_addr_query(adapter, mac,
2658                                         MAC_ADDRESS_TYPE_NETWORK, false,
2659                                         adapter->if_handle, 0);
2660                 if (!status) {
2661                         memcpy(adapter->netdev->dev_addr, mac, ETH_ALEN);
2662                         memcpy(adapter->netdev->perm_addr, mac, ETH_ALEN);
2663                 }
2664         }
2665
2666         be_cmd_get_fw_ver(adapter, adapter->fw_ver, NULL);
2667
2668         status = be_vid_config(adapter, false, 0);
2669         if (status)
2670                 goto err;
2671
2672         be_set_rx_mode(adapter->netdev);
2673
2674         status = be_cmd_get_flow_control(adapter, &tx_fc, &rx_fc);
2675         /* For Lancer: It is legal for this cmd to fail on VF */
2676         if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2677                 goto err;
2678
2679         if (rx_fc != adapter->rx_fc || tx_fc != adapter->tx_fc) {
2680                 status = be_cmd_set_flow_control(adapter, adapter->tx_fc,
2681                                         adapter->rx_fc);
2682                 /* For Lancer: It is legal for this cmd to fail on VF */
2683                 if (status && (be_physfn(adapter) || !lancer_chip(adapter)))
2684                         goto err;
2685         }
2686
2687         pcie_set_readrq(adapter->pdev, 4096);
2688
2689         if (be_physfn(adapter) && adapter->sriov_enabled) {
2690                 status = be_vf_setup(adapter);
2691                 if (status)
2692                         goto err;
2693         }
2694
2695         return 0;
2696 err:
2697         be_clear(adapter);
2698         return status;
2699 }
2700
2701 #ifdef CONFIG_NET_POLL_CONTROLLER
2702 static void be_netpoll(struct net_device *netdev)
2703 {
2704         struct be_adapter *adapter = netdev_priv(netdev);
2705         struct be_rx_obj *rxo;
2706         int i;
2707
2708         event_handle(adapter, &adapter->tx_eq, false);
2709         for_all_rx_queues(adapter, rxo, i)
2710                 event_handle(adapter, &rxo->rx_eq, true);
2711 }
2712 #endif
2713
2714 #define FW_FILE_HDR_SIGN        "ServerEngines Corp. "
2715 static bool be_flash_redboot(struct be_adapter *adapter,
2716                         const u8 *p, u32 img_start, int image_size,
2717                         int hdr_size)
2718 {
2719         u32 crc_offset;
2720         u8 flashed_crc[4];
2721         int status;
2722
2723         crc_offset = hdr_size + img_start + image_size - 4;
2724
2725         p += crc_offset;
2726
2727         status = be_cmd_get_flash_crc(adapter, flashed_crc,
2728                         (image_size - 4));
2729         if (status) {
2730                 dev_err(&adapter->pdev->dev,
2731                 "could not get crc from flash, not flashing redboot\n");
2732                 return false;
2733         }
2734
2735         /*update redboot only if crc does not match*/
2736         if (!memcmp(flashed_crc, p, 4))
2737                 return false;
2738         else
2739                 return true;
2740 }
2741
2742 static bool phy_flashing_required(struct be_adapter *adapter)
2743 {
2744         int status = 0;
2745         struct be_phy_info phy_info;
2746
2747         status = be_cmd_get_phy_info(adapter, &phy_info);
2748         if (status)
2749                 return false;
2750         if ((phy_info.phy_type == TN_8022) &&
2751                 (phy_info.interface_type == PHY_TYPE_BASET_10GB)) {
2752                 return true;
2753         }
2754         return false;
2755 }
2756
2757 static int be_flash_data(struct be_adapter *adapter,
2758                         const struct firmware *fw,
2759                         struct be_dma_mem *flash_cmd, int num_of_images)
2760
2761 {
2762         int status = 0, i, filehdr_size = 0;
2763         u32 total_bytes = 0, flash_op;
2764         int num_bytes;
2765         const u8 *p = fw->data;
2766         struct be_cmd_write_flashrom *req = flash_cmd->va;
2767         const struct flash_comp *pflashcomp;
2768         int num_comp;
2769
2770         static const struct flash_comp gen3_flash_types[10] = {
2771                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g3, IMG_TYPE_ISCSI_ACTIVE,
2772                         FLASH_IMAGE_MAX_SIZE_g3},
2773                 { FLASH_REDBOOT_START_g3, IMG_TYPE_REDBOOT,
2774                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g3},
2775                 { FLASH_iSCSI_BIOS_START_g3, IMG_TYPE_BIOS,
2776                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2777                 { FLASH_PXE_BIOS_START_g3, IMG_TYPE_PXE_BIOS,
2778                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2779                 { FLASH_FCoE_BIOS_START_g3, IMG_TYPE_FCOE_BIOS,
2780                         FLASH_BIOS_IMAGE_MAX_SIZE_g3},
2781                 { FLASH_iSCSI_BACKUP_IMAGE_START_g3, IMG_TYPE_ISCSI_BACKUP,
2782                         FLASH_IMAGE_MAX_SIZE_g3},
2783                 { FLASH_FCoE_PRIMARY_IMAGE_START_g3, IMG_TYPE_FCOE_FW_ACTIVE,
2784                         FLASH_IMAGE_MAX_SIZE_g3},
2785                 { FLASH_FCoE_BACKUP_IMAGE_START_g3, IMG_TYPE_FCOE_FW_BACKUP,
2786                         FLASH_IMAGE_MAX_SIZE_g3},
2787                 { FLASH_NCSI_START_g3, IMG_TYPE_NCSI_FW,
2788                         FLASH_NCSI_IMAGE_MAX_SIZE_g3},
2789                 { FLASH_PHY_FW_START_g3, IMG_TYPE_PHY_FW,
2790                         FLASH_PHY_FW_IMAGE_MAX_SIZE_g3}
2791         };
2792         static const struct flash_comp gen2_flash_types[8] = {
2793                 { FLASH_iSCSI_PRIMARY_IMAGE_START_g2, IMG_TYPE_ISCSI_ACTIVE,
2794                         FLASH_IMAGE_MAX_SIZE_g2},
2795                 { FLASH_REDBOOT_START_g2, IMG_TYPE_REDBOOT,
2796                         FLASH_REDBOOT_IMAGE_MAX_SIZE_g2},
2797                 { FLASH_iSCSI_BIOS_START_g2, IMG_TYPE_BIOS,
2798                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2799                 { FLASH_PXE_BIOS_START_g2, IMG_TYPE_PXE_BIOS,
2800                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2801                 { FLASH_FCoE_BIOS_START_g2, IMG_TYPE_FCOE_BIOS,
2802                         FLASH_BIOS_IMAGE_MAX_SIZE_g2},
2803                 { FLASH_iSCSI_BACKUP_IMAGE_START_g2, IMG_TYPE_ISCSI_BACKUP,
2804                         FLASH_IMAGE_MAX_SIZE_g2},
2805                 { FLASH_FCoE_PRIMARY_IMAGE_START_g2, IMG_TYPE_FCOE_FW_ACTIVE,
2806                         FLASH_IMAGE_MAX_SIZE_g2},
2807                 { FLASH_FCoE_BACKUP_IMAGE_START_g2, IMG_TYPE_FCOE_FW_BACKUP,
2808                          FLASH_IMAGE_MAX_SIZE_g2}
2809         };
2810
2811         if (adapter->generation == BE_GEN3) {
2812                 pflashcomp = gen3_flash_types;
2813                 filehdr_size = sizeof(struct flash_file_hdr_g3);
2814                 num_comp = ARRAY_SIZE(gen3_flash_types);
2815         } else {
2816                 pflashcomp = gen2_flash_types;
2817                 filehdr_size = sizeof(struct flash_file_hdr_g2);
2818                 num_comp = ARRAY_SIZE(gen2_flash_types);
2819         }
2820         for (i = 0; i < num_comp; i++) {
2821                 if ((pflashcomp[i].optype == IMG_TYPE_NCSI_FW) &&
2822                                 memcmp(adapter->fw_ver, "3.102.148.0", 11) < 0)
2823                         continue;
2824                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW) {
2825                         if (!phy_flashing_required(adapter))
2826                                 continue;
2827                 }
2828                 if ((pflashcomp[i].optype == IMG_TYPE_REDBOOT) &&
2829                         (!be_flash_redboot(adapter, fw->data,
2830                         pflashcomp[i].offset, pflashcomp[i].size, filehdr_size +
2831                         (num_of_images * sizeof(struct image_hdr)))))
2832                         continue;
2833                 p = fw->data;
2834                 p += filehdr_size + pflashcomp[i].offset
2835                         + (num_of_images * sizeof(struct image_hdr));
2836                 if (p + pflashcomp[i].size > fw->data + fw->size)
2837                         return -1;
2838                 total_bytes = pflashcomp[i].size;
2839                 while (total_bytes) {
2840                         if (total_bytes > 32*1024)
2841                                 num_bytes = 32*1024;
2842                         else
2843                                 num_bytes = total_bytes;
2844                         total_bytes -= num_bytes;
2845                         if (!total_bytes) {
2846                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2847                                         flash_op = FLASHROM_OPER_PHY_FLASH;
2848                                 else
2849                                         flash_op = FLASHROM_OPER_FLASH;
2850                         } else {
2851                                 if (pflashcomp[i].optype == IMG_TYPE_PHY_FW)
2852                                         flash_op = FLASHROM_OPER_PHY_SAVE;
2853                                 else
2854                                         flash_op = FLASHROM_OPER_SAVE;
2855                         }
2856                         memcpy(req->params.data_buf, p, num_bytes);
2857                         p += num_bytes;
2858                         status = be_cmd_write_flashrom(adapter, flash_cmd,
2859                                 pflashcomp[i].optype, flash_op, num_bytes);
2860                         if (status) {
2861                                 if ((status == ILLEGAL_IOCTL_REQ) &&
2862                                         (pflashcomp[i].optype ==
2863                                                 IMG_TYPE_PHY_FW))
2864                                         break;
2865                                 dev_err(&adapter->pdev->dev,
2866                                         "cmd to write to flash rom failed.\n");
2867                                 return -1;
2868                         }
2869                 }
2870         }
2871         return 0;
2872 }
2873
2874 static int get_ufigen_type(struct flash_file_hdr_g2 *fhdr)
2875 {
2876         if (fhdr == NULL)
2877                 return 0;
2878         if (fhdr->build[0] == '3')
2879                 return BE_GEN3;
2880         else if (fhdr->build[0] == '2')
2881                 return BE_GEN2;
2882         else
2883                 return 0;
2884 }
2885
2886 static int lancer_fw_download(struct be_adapter *adapter,
2887                                 const struct firmware *fw)
2888 {
2889 #define LANCER_FW_DOWNLOAD_CHUNK      (32 * 1024)
2890 #define LANCER_FW_DOWNLOAD_LOCATION   "/prg"
2891         struct be_dma_mem flash_cmd;
2892         const u8 *data_ptr = NULL;
2893         u8 *dest_image_ptr = NULL;
2894         size_t image_size = 0;
2895         u32 chunk_size = 0;
2896         u32 data_written = 0;
2897         u32 offset = 0;
2898         int status = 0;
2899         u8 add_status = 0;
2900
2901         if (!IS_ALIGNED(fw->size, sizeof(u32))) {
2902                 dev_err(&adapter->pdev->dev,
2903                         "FW Image not properly aligned. "
2904                         "Length must be 4 byte aligned.\n");
2905                 status = -EINVAL;
2906                 goto lancer_fw_exit;
2907         }
2908
2909         flash_cmd.size = sizeof(struct lancer_cmd_req_write_object)
2910                                 + LANCER_FW_DOWNLOAD_CHUNK;
2911         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2912                                                 &flash_cmd.dma, GFP_KERNEL);
2913         if (!flash_cmd.va) {
2914                 status = -ENOMEM;
2915                 dev_err(&adapter->pdev->dev,
2916                         "Memory allocation failure while flashing\n");
2917                 goto lancer_fw_exit;
2918         }
2919
2920         dest_image_ptr = flash_cmd.va +
2921                                 sizeof(struct lancer_cmd_req_write_object);
2922         image_size = fw->size;
2923         data_ptr = fw->data;
2924
2925         while (image_size) {
2926                 chunk_size = min_t(u32, image_size, LANCER_FW_DOWNLOAD_CHUNK);
2927
2928                 /* Copy the image chunk content. */
2929                 memcpy(dest_image_ptr, data_ptr, chunk_size);
2930
2931                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2932                                 chunk_size, offset, LANCER_FW_DOWNLOAD_LOCATION,
2933                                 &data_written, &add_status);
2934
2935                 if (status)
2936                         break;
2937
2938                 offset += data_written;
2939                 data_ptr += data_written;
2940                 image_size -= data_written;
2941         }
2942
2943         if (!status) {
2944                 /* Commit the FW written */
2945                 status = lancer_cmd_write_object(adapter, &flash_cmd,
2946                                         0, offset, LANCER_FW_DOWNLOAD_LOCATION,
2947                                         &data_written, &add_status);
2948         }
2949
2950         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
2951                                 flash_cmd.dma);
2952         if (status) {
2953                 dev_err(&adapter->pdev->dev,
2954                         "Firmware load error. "
2955                         "Status code: 0x%x Additional Status: 0x%x\n",
2956                         status, add_status);
2957                 goto lancer_fw_exit;
2958         }
2959
2960         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
2961 lancer_fw_exit:
2962         return status;
2963 }
2964
2965 static int be_fw_download(struct be_adapter *adapter, const struct firmware* fw)
2966 {
2967         struct flash_file_hdr_g2 *fhdr;
2968         struct flash_file_hdr_g3 *fhdr3;
2969         struct image_hdr *img_hdr_ptr = NULL;
2970         struct be_dma_mem flash_cmd;
2971         const u8 *p;
2972         int status = 0, i = 0, num_imgs = 0;
2973
2974         p = fw->data;
2975         fhdr = (struct flash_file_hdr_g2 *) p;
2976
2977         flash_cmd.size = sizeof(struct be_cmd_write_flashrom) + 32*1024;
2978         flash_cmd.va = dma_alloc_coherent(&adapter->pdev->dev, flash_cmd.size,
2979                                           &flash_cmd.dma, GFP_KERNEL);
2980         if (!flash_cmd.va) {
2981                 status = -ENOMEM;
2982                 dev_err(&adapter->pdev->dev,
2983                         "Memory allocation failure while flashing\n");
2984                 goto be_fw_exit;
2985         }
2986
2987         if ((adapter->generation == BE_GEN3) &&
2988                         (get_ufigen_type(fhdr) == BE_GEN3)) {
2989                 fhdr3 = (struct flash_file_hdr_g3 *) fw->data;
2990                 num_imgs = le32_to_cpu(fhdr3->num_imgs);
2991                 for (i = 0; i < num_imgs; i++) {
2992                         img_hdr_ptr = (struct image_hdr *) (fw->data +
2993                                         (sizeof(struct flash_file_hdr_g3) +
2994                                          i * sizeof(struct image_hdr)));
2995                         if (le32_to_cpu(img_hdr_ptr->imageid) == 1)
2996                                 status = be_flash_data(adapter, fw, &flash_cmd,
2997                                                         num_imgs);
2998                 }
2999         } else if ((adapter->generation == BE_GEN2) &&
3000                         (get_ufigen_type(fhdr) == BE_GEN2)) {
3001                 status = be_flash_data(adapter, fw, &flash_cmd, 0);
3002         } else {
3003                 dev_err(&adapter->pdev->dev,
3004                         "UFI and Interface are not compatible for flashing\n");
3005                 status = -1;
3006         }
3007
3008         dma_free_coherent(&adapter->pdev->dev, flash_cmd.size, flash_cmd.va,
3009                           flash_cmd.dma);
3010         if (status) {
3011                 dev_err(&adapter->pdev->dev, "Firmware load error\n");
3012                 goto be_fw_exit;
3013         }
3014
3015         dev_info(&adapter->pdev->dev, "Firmware flashed successfully\n");
3016
3017 be_fw_exit:
3018         return status;
3019 }
3020
3021 int be_load_fw(struct be_adapter *adapter, u8 *fw_file)
3022 {
3023         const struct firmware *fw;
3024         int status;
3025
3026         if (!netif_running(adapter->netdev)) {
3027                 dev_err(&adapter->pdev->dev,
3028                         "Firmware load not allowed (interface is down)\n");
3029                 return -1;
3030         }
3031
3032         status = request_firmware(&fw, fw_file, &adapter->pdev->dev);
3033         if (status)
3034                 goto fw_exit;
3035
3036         dev_info(&adapter->pdev->dev, "Flashing firmware file %s\n", fw_file);
3037
3038         if (lancer_chip(adapter))
3039                 status = lancer_fw_download(adapter, fw);
3040         else
3041                 status = be_fw_download(adapter, fw);
3042
3043 fw_exit:
3044         release_firmware(fw);
3045         return status;
3046 }
3047
3048 static struct net_device_ops be_netdev_ops = {
3049         .ndo_open               = be_open,
3050         .ndo_stop               = be_close,
3051         .ndo_start_xmit         = be_xmit,
3052         .ndo_set_rx_mode        = be_set_rx_mode,
3053         .ndo_set_mac_address    = be_mac_addr_set,
3054         .ndo_change_mtu         = be_change_mtu,
3055         .ndo_get_stats64        = be_get_stats64,
3056         .ndo_validate_addr      = eth_validate_addr,
3057         .ndo_vlan_rx_add_vid    = be_vlan_add_vid,
3058         .ndo_vlan_rx_kill_vid   = be_vlan_rem_vid,
3059         .ndo_set_vf_mac         = be_set_vf_mac,
3060         .ndo_set_vf_vlan        = be_set_vf_vlan,
3061         .ndo_set_vf_tx_rate     = be_set_vf_tx_rate,
3062         .ndo_get_vf_config      = be_get_vf_config,
3063 #ifdef CONFIG_NET_POLL_CONTROLLER
3064         .ndo_poll_controller    = be_netpoll,
3065 #endif
3066 };
3067
3068 static void be_netdev_init(struct net_device *netdev)
3069 {
3070         struct be_adapter *adapter = netdev_priv(netdev);
3071         struct be_rx_obj *rxo;
3072         int i;
3073
3074         netdev->hw_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3075                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
3076                 NETIF_F_HW_VLAN_TX;
3077         if (be_multi_rxq(adapter))
3078                 netdev->hw_features |= NETIF_F_RXHASH;
3079
3080         netdev->features |= netdev->hw_features |
3081                 NETIF_F_HW_VLAN_RX | NETIF_F_HW_VLAN_FILTER;
3082
3083         netdev->vlan_features |= NETIF_F_SG | NETIF_F_TSO | NETIF_F_TSO6 |
3084                 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM;
3085
3086         netdev->flags |= IFF_MULTICAST;
3087
3088         netif_set_gso_max_size(netdev, 65535);
3089
3090         BE_SET_NETDEV_OPS(netdev, &be_netdev_ops);
3091
3092         SET_ETHTOOL_OPS(netdev, &be_ethtool_ops);
3093
3094         for_all_rx_queues(adapter, rxo, i)
3095                 netif_napi_add(netdev, &rxo->rx_eq.napi, be_poll_rx,
3096                                 BE_NAPI_WEIGHT);
3097
3098         netif_napi_add(netdev, &adapter->tx_eq.napi, be_poll_tx_mcc,
3099                 BE_NAPI_WEIGHT);
3100 }
3101
3102 static void be_unmap_pci_bars(struct be_adapter *adapter)
3103 {
3104         if (adapter->csr)
3105                 iounmap(adapter->csr);
3106         if (adapter->db)
3107                 iounmap(adapter->db);
3108 }
3109
3110 static int be_map_pci_bars(struct be_adapter *adapter)
3111 {
3112         u8 __iomem *addr;
3113         int db_reg;
3114
3115         if (lancer_chip(adapter)) {
3116                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 0),
3117                         pci_resource_len(adapter->pdev, 0));
3118                 if (addr == NULL)
3119                         return -ENOMEM;
3120                 adapter->db = addr;
3121                 return 0;
3122         }
3123
3124         if (be_physfn(adapter)) {
3125                 addr = ioremap_nocache(pci_resource_start(adapter->pdev, 2),
3126                                 pci_resource_len(adapter->pdev, 2));
3127                 if (addr == NULL)
3128                         return -ENOMEM;
3129                 adapter->csr = addr;
3130         }
3131
3132         if (adapter->generation == BE_GEN2) {
3133                 db_reg = 4;
3134         } else {
3135                 if (be_physfn(adapter))
3136                         db_reg = 4;
3137                 else
3138                         db_reg = 0;
3139         }
3140         addr = ioremap_nocache(pci_resource_start(adapter->pdev, db_reg),
3141                                 pci_resource_len(adapter->pdev, db_reg));
3142         if (addr == NULL)
3143                 goto pci_map_err;
3144         adapter->db = addr;
3145
3146         return 0;
3147 pci_map_err:
3148         be_unmap_pci_bars(adapter);
3149         return -ENOMEM;
3150 }
3151
3152
3153 static void be_ctrl_cleanup(struct be_adapter *adapter)
3154 {
3155         struct be_dma_mem *mem = &adapter->mbox_mem_alloced;
3156
3157         be_unmap_pci_bars(adapter);
3158
3159         if (mem->va)
3160                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3161                                   mem->dma);
3162
3163         mem = &adapter->rx_filter;
3164         if (mem->va)
3165                 dma_free_coherent(&adapter->pdev->dev, mem->size, mem->va,
3166                                   mem->dma);
3167 }
3168
3169 static int be_ctrl_init(struct be_adapter *adapter)
3170 {
3171         struct be_dma_mem *mbox_mem_alloc = &adapter->mbox_mem_alloced;
3172         struct be_dma_mem *mbox_mem_align = &adapter->mbox_mem;
3173         struct be_dma_mem *rx_filter = &adapter->rx_filter;
3174         int status;
3175
3176         status = be_map_pci_bars(adapter);
3177         if (status)
3178                 goto done;
3179
3180         mbox_mem_alloc->size = sizeof(struct be_mcc_mailbox) + 16;
3181         mbox_mem_alloc->va = dma_alloc_coherent(&adapter->pdev->dev,
3182                                                 mbox_mem_alloc->size,
3183                                                 &mbox_mem_alloc->dma,
3184                                                 GFP_KERNEL);
3185         if (!mbox_mem_alloc->va) {
3186                 status = -ENOMEM;
3187                 goto unmap_pci_bars;
3188         }
3189         mbox_mem_align->size = sizeof(struct be_mcc_mailbox);
3190         mbox_mem_align->va = PTR_ALIGN(mbox_mem_alloc->va, 16);
3191         mbox_mem_align->dma = PTR_ALIGN(mbox_mem_alloc->dma, 16);
3192         memset(mbox_mem_align->va, 0, sizeof(struct be_mcc_mailbox));
3193
3194         rx_filter->size = sizeof(struct be_cmd_req_rx_filter);
3195         rx_filter->va = dma_alloc_coherent(&adapter->pdev->dev, rx_filter->size,
3196                                         &rx_filter->dma, GFP_KERNEL);
3197         if (rx_filter->va == NULL) {
3198                 status = -ENOMEM;
3199                 goto free_mbox;
3200         }
3201         memset(rx_filter->va, 0, rx_filter->size);
3202
3203         mutex_init(&adapter->mbox_lock);
3204         spin_lock_init(&adapter->mcc_lock);
3205         spin_lock_init(&adapter->mcc_cq_lock);
3206
3207         init_completion(&adapter->flash_compl);
3208         pci_save_state(adapter->pdev);
3209         return 0;
3210
3211 free_mbox:
3212         dma_free_coherent(&adapter->pdev->dev, mbox_mem_alloc->size,
3213                           mbox_mem_alloc->va, mbox_mem_alloc->dma);
3214
3215 unmap_pci_bars:
3216         be_unmap_pci_bars(adapter);
3217
3218 done:
3219         return status;
3220 }
3221
3222 static void be_stats_cleanup(struct be_adapter *adapter)
3223 {
3224         struct be_dma_mem *cmd = &adapter->stats_cmd;
3225
3226         if (cmd->va)
3227                 dma_free_coherent(&adapter->pdev->dev, cmd->size,
3228                                   cmd->va, cmd->dma);
3229 }
3230
3231 static int be_stats_init(struct be_adapter *adapter)
3232 {
3233         struct be_dma_mem *cmd = &adapter->stats_cmd;
3234
3235         if (adapter->generation == BE_GEN2) {
3236                 cmd->size = sizeof(struct be_cmd_req_get_stats_v0);
3237         } else {
3238                 if (lancer_chip(adapter))
3239                         cmd->size = sizeof(struct lancer_cmd_req_pport_stats);
3240                 else
3241                         cmd->size = sizeof(struct be_cmd_req_get_stats_v1);
3242         }
3243         cmd->va = dma_alloc_coherent(&adapter->pdev->dev, cmd->size, &cmd->dma,
3244                                      GFP_KERNEL);
3245         if (cmd->va == NULL)
3246                 return -1;
3247         memset(cmd->va, 0, cmd->size);
3248         return 0;
3249 }
3250
3251 static void __devexit be_remove(struct pci_dev *pdev)
3252 {
3253         struct be_adapter *adapter = pci_get_drvdata(pdev);
3254
3255         if (!adapter)
3256                 return;
3257
3258         cancel_delayed_work_sync(&adapter->work);
3259
3260         unregister_netdev(adapter->netdev);
3261
3262         be_clear(adapter);
3263
3264         be_stats_cleanup(adapter);
3265
3266         be_ctrl_cleanup(adapter);
3267
3268         be_sriov_disable(adapter);
3269
3270         be_msix_disable(adapter);
3271
3272         pci_set_drvdata(pdev, NULL);
3273         pci_release_regions(pdev);
3274         pci_disable_device(pdev);
3275
3276         free_netdev(adapter->netdev);
3277 }
3278
3279 static int be_get_config(struct be_adapter *adapter)
3280 {
3281         int status;
3282
3283         status = be_cmd_query_fw_cfg(adapter, &adapter->port_num,
3284                         &adapter->function_mode, &adapter->function_caps);
3285         if (status)
3286                 return status;
3287
3288         if (adapter->function_mode & FLEX10_MODE)
3289                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED/4;
3290         else
3291                 adapter->max_vlans = BE_NUM_VLANS_SUPPORTED;
3292
3293         status = be_cmd_get_cntl_attributes(adapter);
3294         if (status)
3295                 return status;
3296
3297         return 0;
3298 }
3299
3300 static int be_dev_family_check(struct be_adapter *adapter)
3301 {
3302         struct pci_dev *pdev = adapter->pdev;
3303         u32 sli_intf = 0, if_type;
3304
3305         switch (pdev->device) {
3306         case BE_DEVICE_ID1:
3307         case OC_DEVICE_ID1:
3308                 adapter->generation = BE_GEN2;
3309                 break;
3310         case BE_DEVICE_ID2:
3311         case OC_DEVICE_ID2:
3312                 adapter->generation = BE_GEN3;
3313                 break;
3314         case OC_DEVICE_ID3:
3315         case OC_DEVICE_ID4:
3316                 pci_read_config_dword(pdev, SLI_INTF_REG_OFFSET, &sli_intf);
3317                 if_type = (sli_intf & SLI_INTF_IF_TYPE_MASK) >>
3318                                                 SLI_INTF_IF_TYPE_SHIFT;
3319
3320                 if (((sli_intf & SLI_INTF_VALID_MASK) != SLI_INTF_VALID) ||
3321                         if_type != 0x02) {
3322                         dev_err(&pdev->dev, "SLI_INTF reg val is not valid\n");
3323                         return -EINVAL;
3324                 }
3325                 adapter->sli_family = ((sli_intf & SLI_INTF_FAMILY_MASK) >>
3326                                          SLI_INTF_FAMILY_SHIFT);
3327                 adapter->generation = BE_GEN3;
3328                 break;
3329         default:
3330                 adapter->generation = 0;
3331         }
3332         return 0;
3333 }
3334
3335 static int lancer_wait_ready(struct be_adapter *adapter)
3336 {
3337 #define SLIPORT_READY_TIMEOUT 30
3338         u32 sliport_status;
3339         int status = 0, i;
3340
3341         for (i = 0; i < SLIPORT_READY_TIMEOUT; i++) {
3342                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3343                 if (sliport_status & SLIPORT_STATUS_RDY_MASK)
3344                         break;
3345
3346                 msleep(1000);
3347         }
3348
3349         if (i == SLIPORT_READY_TIMEOUT)
3350                 status = -1;
3351
3352         return status;
3353 }
3354
3355 static int lancer_test_and_set_rdy_state(struct be_adapter *adapter)
3356 {
3357         int status;
3358         u32 sliport_status, err, reset_needed;
3359         status = lancer_wait_ready(adapter);
3360         if (!status) {
3361                 sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3362                 err = sliport_status & SLIPORT_STATUS_ERR_MASK;
3363                 reset_needed = sliport_status & SLIPORT_STATUS_RN_MASK;
3364                 if (err && reset_needed) {
3365                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3366                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3367
3368                         /* check adapter has corrected the error */
3369                         status = lancer_wait_ready(adapter);
3370                         sliport_status = ioread32(adapter->db +
3371                                                         SLIPORT_STATUS_OFFSET);
3372                         sliport_status &= (SLIPORT_STATUS_ERR_MASK |
3373                                                 SLIPORT_STATUS_RN_MASK);
3374                         if (status || sliport_status)
3375                                 status = -1;
3376                 } else if (err || reset_needed) {
3377                         status = -1;
3378                 }
3379         }
3380         return status;
3381 }
3382
3383 static void lancer_test_and_recover_fn_err(struct be_adapter *adapter)
3384 {
3385         int status;
3386         u32 sliport_status;
3387
3388         if (adapter->eeh_err || adapter->ue_detected)
3389                 return;
3390
3391         sliport_status = ioread32(adapter->db + SLIPORT_STATUS_OFFSET);
3392
3393         if (sliport_status & SLIPORT_STATUS_ERR_MASK) {
3394                 dev_err(&adapter->pdev->dev,
3395                                 "Adapter in error state."
3396                                 "Trying to recover.\n");
3397
3398                 status = lancer_test_and_set_rdy_state(adapter);
3399                 if (status)
3400                         goto err;
3401
3402                 netif_device_detach(adapter->netdev);
3403
3404                 if (netif_running(adapter->netdev))
3405                         be_close(adapter->netdev);
3406
3407                 be_clear(adapter);
3408
3409                 adapter->fw_timeout = false;
3410
3411                 status = be_setup(adapter);
3412                 if (status)
3413                         goto err;
3414
3415                 if (netif_running(adapter->netdev)) {
3416                         status = be_open(adapter->netdev);
3417                         if (status)
3418                                 goto err;
3419                 }
3420
3421                 netif_device_attach(adapter->netdev);
3422
3423                 dev_err(&adapter->pdev->dev,
3424                                 "Adapter error recovery succeeded\n");
3425         }
3426         return;
3427 err:
3428         dev_err(&adapter->pdev->dev,
3429                         "Adapter error recovery failed\n");
3430 }
3431
3432 static void be_worker(struct work_struct *work)
3433 {
3434         struct be_adapter *adapter =
3435                 container_of(work, struct be_adapter, work.work);
3436         struct be_rx_obj *rxo;
3437         int i;
3438
3439         if (lancer_chip(adapter))
3440                 lancer_test_and_recover_fn_err(adapter);
3441
3442         be_detect_dump_ue(adapter);
3443
3444         /* when interrupts are not yet enabled, just reap any pending
3445         * mcc completions */
3446         if (!netif_running(adapter->netdev)) {
3447                 int mcc_compl, status = 0;
3448
3449                 mcc_compl = be_process_mcc(adapter, &status);
3450
3451                 if (mcc_compl) {
3452                         struct be_mcc_obj *mcc_obj = &adapter->mcc_obj;
3453                         be_cq_notify(adapter, mcc_obj->cq.id, false, mcc_compl);
3454                 }
3455
3456                 goto reschedule;
3457         }
3458
3459         if (!adapter->stats_cmd_sent) {
3460                 if (lancer_chip(adapter))
3461                         lancer_cmd_get_pport_stats(adapter,
3462                                                 &adapter->stats_cmd);
3463                 else
3464                         be_cmd_get_stats(adapter, &adapter->stats_cmd);
3465         }
3466
3467         for_all_rx_queues(adapter, rxo, i) {
3468                 be_rx_eqd_update(adapter, rxo);
3469
3470                 if (rxo->rx_post_starved) {
3471                         rxo->rx_post_starved = false;
3472                         be_post_rx_frags(rxo, GFP_KERNEL);
3473                 }
3474         }
3475
3476 reschedule:
3477         adapter->work_counter++;
3478         schedule_delayed_work(&adapter->work, msecs_to_jiffies(1000));
3479 }
3480
3481 static int __devinit be_probe(struct pci_dev *pdev,
3482                         const struct pci_device_id *pdev_id)
3483 {
3484         int status = 0;
3485         struct be_adapter *adapter;
3486         struct net_device *netdev;
3487
3488         status = pci_enable_device(pdev);
3489         if (status)
3490                 goto do_none;
3491
3492         status = pci_request_regions(pdev, DRV_NAME);
3493         if (status)
3494                 goto disable_dev;
3495         pci_set_master(pdev);
3496
3497         netdev = alloc_etherdev_mq(sizeof(struct be_adapter), MAX_TX_QS);
3498         if (netdev == NULL) {
3499                 status = -ENOMEM;
3500                 goto rel_reg;
3501         }
3502         adapter = netdev_priv(netdev);
3503         adapter->pdev = pdev;
3504         pci_set_drvdata(pdev, adapter);
3505
3506         status = be_dev_family_check(adapter);
3507         if (status)
3508                 goto free_netdev;
3509
3510         adapter->netdev = netdev;
3511         SET_NETDEV_DEV(netdev, &pdev->dev);
3512
3513         status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
3514         if (!status) {
3515                 netdev->features |= NETIF_F_HIGHDMA;
3516         } else {
3517                 status = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
3518                 if (status) {
3519                         dev_err(&pdev->dev, "Could not set PCI DMA Mask\n");
3520                         goto free_netdev;
3521                 }
3522         }
3523
3524         status = be_sriov_enable(adapter);
3525         if (status)
3526                 goto free_netdev;
3527
3528         status = be_ctrl_init(adapter);
3529         if (status)
3530                 goto disable_sriov;
3531
3532         if (lancer_chip(adapter)) {
3533                 status = lancer_wait_ready(adapter);
3534                 if (!status) {
3535                         iowrite32(SLI_PORT_CONTROL_IP_MASK,
3536                                         adapter->db + SLIPORT_CONTROL_OFFSET);
3537                         status = lancer_test_and_set_rdy_state(adapter);
3538                 }
3539                 if (status) {
3540                         dev_err(&pdev->dev, "Adapter in non recoverable error\n");
3541                         goto ctrl_clean;
3542                 }
3543         }
3544
3545         /* sync up with fw's ready state */
3546         if (be_physfn(adapter)) {
3547                 status = be_cmd_POST(adapter);
3548                 if (status)
3549                         goto ctrl_clean;
3550         }
3551
3552         /* tell fw we're ready to fire cmds */
3553         status = be_cmd_fw_init(adapter);
3554         if (status)
3555                 goto ctrl_clean;
3556
3557         status = be_cmd_reset_function(adapter);
3558         if (status)
3559                 goto ctrl_clean;
3560
3561         status = be_stats_init(adapter);
3562         if (status)
3563                 goto ctrl_clean;
3564
3565         status = be_get_config(adapter);
3566         if (status)
3567                 goto stats_clean;
3568
3569         /* The INTR bit may be set in the card when probed by a kdump kernel
3570          * after a crash.
3571          */
3572         if (!lancer_chip(adapter))
3573                 be_intr_set(adapter, false);
3574
3575         be_msix_enable(adapter);
3576
3577         INIT_DELAYED_WORK(&adapter->work, be_worker);
3578         adapter->rx_fc = adapter->tx_fc = true;
3579
3580         status = be_setup(adapter);
3581         if (status)
3582                 goto msix_disable;
3583
3584         be_netdev_init(netdev);
3585         status = register_netdev(netdev);
3586         if (status != 0)
3587                 goto unsetup;
3588
3589         dev_info(&pdev->dev, "%s port %d\n", nic_name(pdev), adapter->port_num);
3590
3591         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3592         return 0;
3593
3594 unsetup:
3595         be_clear(adapter);
3596 msix_disable:
3597         be_msix_disable(adapter);
3598 stats_clean:
3599         be_stats_cleanup(adapter);
3600 ctrl_clean:
3601         be_ctrl_cleanup(adapter);
3602 disable_sriov:
3603         be_sriov_disable(adapter);
3604 free_netdev:
3605         free_netdev(netdev);
3606         pci_set_drvdata(pdev, NULL);
3607 rel_reg:
3608         pci_release_regions(pdev);
3609 disable_dev:
3610         pci_disable_device(pdev);
3611 do_none:
3612         dev_err(&pdev->dev, "%s initialization failed\n", nic_name(pdev));
3613         return status;
3614 }
3615
3616 static int be_suspend(struct pci_dev *pdev, pm_message_t state)
3617 {
3618         struct be_adapter *adapter = pci_get_drvdata(pdev);
3619         struct net_device *netdev =  adapter->netdev;
3620
3621         cancel_delayed_work_sync(&adapter->work);
3622         if (adapter->wol)
3623                 be_setup_wol(adapter, true);
3624
3625         netif_device_detach(netdev);
3626         if (netif_running(netdev)) {
3627                 rtnl_lock();
3628                 be_close(netdev);
3629                 rtnl_unlock();
3630         }
3631         be_clear(adapter);
3632
3633         be_msix_disable(adapter);
3634         pci_save_state(pdev);
3635         pci_disable_device(pdev);
3636         pci_set_power_state(pdev, pci_choose_state(pdev, state));
3637         return 0;
3638 }
3639
3640 static int be_resume(struct pci_dev *pdev)
3641 {
3642         int status = 0;
3643         struct be_adapter *adapter = pci_get_drvdata(pdev);
3644         struct net_device *netdev =  adapter->netdev;
3645
3646         netif_device_detach(netdev);
3647
3648         status = pci_enable_device(pdev);
3649         if (status)
3650                 return status;
3651
3652         pci_set_power_state(pdev, 0);
3653         pci_restore_state(pdev);
3654
3655         be_msix_enable(adapter);
3656         /* tell fw we're ready to fire cmds */
3657         status = be_cmd_fw_init(adapter);
3658         if (status)
3659                 return status;
3660
3661         be_setup(adapter);
3662         if (netif_running(netdev)) {
3663                 rtnl_lock();
3664                 be_open(netdev);
3665                 rtnl_unlock();
3666         }
3667         netif_device_attach(netdev);
3668
3669         if (adapter->wol)
3670                 be_setup_wol(adapter, false);
3671
3672         schedule_delayed_work(&adapter->work, msecs_to_jiffies(100));
3673         return 0;
3674 }
3675
3676 /*
3677  * An FLR will stop BE from DMAing any data.
3678  */
3679 static void be_shutdown(struct pci_dev *pdev)
3680 {
3681         struct be_adapter *adapter = pci_get_drvdata(pdev);
3682
3683         if (!adapter)
3684                 return;
3685
3686         cancel_delayed_work_sync(&adapter->work);
3687
3688         netif_device_detach(adapter->netdev);
3689
3690         if (adapter->wol)
3691                 be_setup_wol(adapter, true);
3692
3693         be_cmd_reset_function(adapter);
3694
3695         pci_disable_device(pdev);
3696 }
3697
3698 static pci_ers_result_t be_eeh_err_detected(struct pci_dev *pdev,
3699                                 pci_channel_state_t state)
3700 {
3701         struct be_adapter *adapter = pci_get_drvdata(pdev);
3702         struct net_device *netdev =  adapter->netdev;
3703
3704         dev_err(&adapter->pdev->dev, "EEH error detected\n");
3705
3706         adapter->eeh_err = true;
3707
3708         netif_device_detach(netdev);
3709
3710         if (netif_running(netdev)) {
3711                 rtnl_lock();
3712                 be_close(netdev);
3713                 rtnl_unlock();
3714         }
3715         be_clear(adapter);
3716
3717         if (state == pci_channel_io_perm_failure)
3718                 return PCI_ERS_RESULT_DISCONNECT;
3719
3720         pci_disable_device(pdev);
3721
3722         return PCI_ERS_RESULT_NEED_RESET;
3723 }
3724
3725 static pci_ers_result_t be_eeh_reset(struct pci_dev *pdev)
3726 {
3727         struct be_adapter *adapter = pci_get_drvdata(pdev);
3728         int status;
3729
3730         dev_info(&adapter->pdev->dev, "EEH reset\n");
3731         adapter->eeh_err = false;
3732         adapter->ue_detected = false;
3733         adapter->fw_timeout = false;
3734
3735         status = pci_enable_device(pdev);
3736         if (status)
3737                 return PCI_ERS_RESULT_DISCONNECT;
3738
3739         pci_set_master(pdev);
3740         pci_set_power_state(pdev, 0);
3741         pci_restore_state(pdev);
3742
3743         /* Check if card is ok and fw is ready */
3744         status = be_cmd_POST(adapter);
3745         if (status)
3746                 return PCI_ERS_RESULT_DISCONNECT;
3747
3748         return PCI_ERS_RESULT_RECOVERED;
3749 }
3750
3751 static void be_eeh_resume(struct pci_dev *pdev)
3752 {
3753         int status = 0;
3754         struct be_adapter *adapter = pci_get_drvdata(pdev);
3755         struct net_device *netdev =  adapter->netdev;
3756
3757         dev_info(&adapter->pdev->dev, "EEH resume\n");
3758
3759         pci_save_state(pdev);
3760
3761         /* tell fw we're ready to fire cmds */
3762         status = be_cmd_fw_init(adapter);
3763         if (status)
3764                 goto err;
3765
3766         status = be_setup(adapter);
3767         if (status)
3768                 goto err;
3769
3770         if (netif_running(netdev)) {
3771                 status = be_open(netdev);
3772                 if (status)
3773                         goto err;
3774         }
3775         netif_device_attach(netdev);
3776         return;
3777 err:
3778         dev_err(&adapter->pdev->dev, "EEH resume failed\n");
3779 }
3780
3781 static struct pci_error_handlers be_eeh_handlers = {
3782         .error_detected = be_eeh_err_detected,
3783         .slot_reset = be_eeh_reset,
3784         .resume = be_eeh_resume,
3785 };
3786
3787 static struct pci_driver be_driver = {
3788         .name = DRV_NAME,
3789         .id_table = be_dev_ids,
3790         .probe = be_probe,
3791         .remove = be_remove,
3792         .suspend = be_suspend,
3793         .resume = be_resume,
3794         .shutdown = be_shutdown,
3795         .err_handler = &be_eeh_handlers
3796 };
3797
3798 static int __init be_init_module(void)
3799 {
3800         if (rx_frag_size != 8192 && rx_frag_size != 4096 &&
3801             rx_frag_size != 2048) {
3802                 printk(KERN_WARNING DRV_NAME
3803                         " : Module param rx_frag_size must be 2048/4096/8192."
3804                         " Using 2048\n");
3805                 rx_frag_size = 2048;
3806         }
3807
3808         return pci_register_driver(&be_driver);
3809 }
3810 module_init(be_init_module);
3811
3812 static void __exit be_exit_module(void)
3813 {
3814         pci_unregister_driver(&be_driver);
3815 }
3816 module_exit(be_exit_module);