drivers/net: Add module.h to drivers who were implicitly using it
[linux-flexiantxendom0-3.2.10.git] / drivers / net / vmxnet3 / vmxnet3_drv.c
1 /*
2  * Linux driver for VMware's vmxnet3 ethernet NIC.
3  *
4  * Copyright (C) 2008-2009, VMware, Inc. All Rights Reserved.
5  *
6  * This program is free software; you can redistribute it and/or modify it
7  * under the terms of the GNU General Public License as published by the
8  * Free Software Foundation; version 2 of the License and no later version.
9  *
10  * This program is distributed in the hope that it will be useful, but
11  * WITHOUT ANY WARRANTY; without even the implied warranty of
12  * MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE, GOOD TITLE or
13  * NON INFRINGEMENT. See the GNU General Public License for more
14  * details.
15  *
16  * You should have received a copy of the GNU General Public License
17  * along with this program; if not, write to the Free Software
18  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
19  *
20  * The full GNU General Public License is included in this distribution in
21  * the file called "COPYING".
22  *
23  * Maintained by: Shreyas Bhatewara <pv-drivers@vmware.com>
24  *
25  */
26
27 #include <linux/module.h>
28 #include <net/ip6_checksum.h>
29
30 #include "vmxnet3_int.h"
31
32 char vmxnet3_driver_name[] = "vmxnet3";
33 #define VMXNET3_DRIVER_DESC "VMware vmxnet3 virtual NIC driver"
34
35 /*
36  * PCI Device ID Table
37  * Last entry must be all 0s
38  */
39 static DEFINE_PCI_DEVICE_TABLE(vmxnet3_pciid_table) = {
40         {PCI_VDEVICE(VMWARE, PCI_DEVICE_ID_VMWARE_VMXNET3)},
41         {0}
42 };
43
44 MODULE_DEVICE_TABLE(pci, vmxnet3_pciid_table);
45
46 static atomic_t devices_found;
47
48 #define VMXNET3_MAX_DEVICES 10
49 static int enable_mq = 1;
50 static int irq_share_mode;
51
52 static void
53 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac);
54
55 /*
56  *    Enable/Disable the given intr
57  */
58 static void
59 vmxnet3_enable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
60 {
61         VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 0);
62 }
63
64
65 static void
66 vmxnet3_disable_intr(struct vmxnet3_adapter *adapter, unsigned intr_idx)
67 {
68         VMXNET3_WRITE_BAR0_REG(adapter, VMXNET3_REG_IMR + intr_idx * 8, 1);
69 }
70
71
72 /*
73  *    Enable/Disable all intrs used by the device
74  */
75 static void
76 vmxnet3_enable_all_intrs(struct vmxnet3_adapter *adapter)
77 {
78         int i;
79
80         for (i = 0; i < adapter->intr.num_intrs; i++)
81                 vmxnet3_enable_intr(adapter, i);
82         adapter->shared->devRead.intrConf.intrCtrl &=
83                                         cpu_to_le32(~VMXNET3_IC_DISABLE_ALL);
84 }
85
86
87 static void
88 vmxnet3_disable_all_intrs(struct vmxnet3_adapter *adapter)
89 {
90         int i;
91
92         adapter->shared->devRead.intrConf.intrCtrl |=
93                                         cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
94         for (i = 0; i < adapter->intr.num_intrs; i++)
95                 vmxnet3_disable_intr(adapter, i);
96 }
97
98
99 static void
100 vmxnet3_ack_events(struct vmxnet3_adapter *adapter, u32 events)
101 {
102         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_ECR, events);
103 }
104
105
106 static bool
107 vmxnet3_tq_stopped(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
108 {
109         return tq->stopped;
110 }
111
112
113 static void
114 vmxnet3_tq_start(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
115 {
116         tq->stopped = false;
117         netif_start_subqueue(adapter->netdev, tq - adapter->tx_queue);
118 }
119
120
121 static void
122 vmxnet3_tq_wake(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
123 {
124         tq->stopped = false;
125         netif_wake_subqueue(adapter->netdev, (tq - adapter->tx_queue));
126 }
127
128
129 static void
130 vmxnet3_tq_stop(struct vmxnet3_tx_queue *tq, struct vmxnet3_adapter *adapter)
131 {
132         tq->stopped = true;
133         tq->num_stop++;
134         netif_stop_subqueue(adapter->netdev, (tq - adapter->tx_queue));
135 }
136
137
138 /*
139  * Check the link state. This may start or stop the tx queue.
140  */
141 static void
142 vmxnet3_check_link(struct vmxnet3_adapter *adapter, bool affectTxQueue)
143 {
144         u32 ret;
145         int i;
146         unsigned long flags;
147
148         spin_lock_irqsave(&adapter->cmd_lock, flags);
149         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_GET_LINK);
150         ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
151         spin_unlock_irqrestore(&adapter->cmd_lock, flags);
152
153         adapter->link_speed = ret >> 16;
154         if (ret & 1) { /* Link is up. */
155                 printk(KERN_INFO "%s: NIC Link is Up %d Mbps\n",
156                        adapter->netdev->name, adapter->link_speed);
157                 if (!netif_carrier_ok(adapter->netdev))
158                         netif_carrier_on(adapter->netdev);
159
160                 if (affectTxQueue) {
161                         for (i = 0; i < adapter->num_tx_queues; i++)
162                                 vmxnet3_tq_start(&adapter->tx_queue[i],
163                                                  adapter);
164                 }
165         } else {
166                 printk(KERN_INFO "%s: NIC Link is Down\n",
167                        adapter->netdev->name);
168                 if (netif_carrier_ok(adapter->netdev))
169                         netif_carrier_off(adapter->netdev);
170
171                 if (affectTxQueue) {
172                         for (i = 0; i < adapter->num_tx_queues; i++)
173                                 vmxnet3_tq_stop(&adapter->tx_queue[i], adapter);
174                 }
175         }
176 }
177
178 static void
179 vmxnet3_process_events(struct vmxnet3_adapter *adapter)
180 {
181         int i;
182         unsigned long flags;
183         u32 events = le32_to_cpu(adapter->shared->ecr);
184         if (!events)
185                 return;
186
187         vmxnet3_ack_events(adapter, events);
188
189         /* Check if link state has changed */
190         if (events & VMXNET3_ECR_LINK)
191                 vmxnet3_check_link(adapter, true);
192
193         /* Check if there is an error on xmit/recv queues */
194         if (events & (VMXNET3_ECR_TQERR | VMXNET3_ECR_RQERR)) {
195                 spin_lock_irqsave(&adapter->cmd_lock, flags);
196                 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
197                                        VMXNET3_CMD_GET_QUEUE_STATUS);
198                 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
199
200                 for (i = 0; i < adapter->num_tx_queues; i++)
201                         if (adapter->tqd_start[i].status.stopped)
202                                 dev_err(&adapter->netdev->dev,
203                                         "%s: tq[%d] error 0x%x\n",
204                                         adapter->netdev->name, i, le32_to_cpu(
205                                         adapter->tqd_start[i].status.error));
206                 for (i = 0; i < adapter->num_rx_queues; i++)
207                         if (adapter->rqd_start[i].status.stopped)
208                                 dev_err(&adapter->netdev->dev,
209                                         "%s: rq[%d] error 0x%x\n",
210                                         adapter->netdev->name, i,
211                                         adapter->rqd_start[i].status.error);
212
213                 schedule_work(&adapter->work);
214         }
215 }
216
217 #ifdef __BIG_ENDIAN_BITFIELD
218 /*
219  * The device expects the bitfields in shared structures to be written in
220  * little endian. When CPU is big endian, the following routines are used to
221  * correctly read and write into ABI.
222  * The general technique used here is : double word bitfields are defined in
223  * opposite order for big endian architecture. Then before reading them in
224  * driver the complete double word is translated using le32_to_cpu. Similarly
225  * After the driver writes into bitfields, cpu_to_le32 is used to translate the
226  * double words into required format.
227  * In order to avoid touching bits in shared structure more than once, temporary
228  * descriptors are used. These are passed as srcDesc to following functions.
229  */
230 static void vmxnet3_RxDescToCPU(const struct Vmxnet3_RxDesc *srcDesc,
231                                 struct Vmxnet3_RxDesc *dstDesc)
232 {
233         u32 *src = (u32 *)srcDesc + 2;
234         u32 *dst = (u32 *)dstDesc + 2;
235         dstDesc->addr = le64_to_cpu(srcDesc->addr);
236         *dst = le32_to_cpu(*src);
237         dstDesc->ext1 = le32_to_cpu(srcDesc->ext1);
238 }
239
240 static void vmxnet3_TxDescToLe(const struct Vmxnet3_TxDesc *srcDesc,
241                                struct Vmxnet3_TxDesc *dstDesc)
242 {
243         int i;
244         u32 *src = (u32 *)(srcDesc + 1);
245         u32 *dst = (u32 *)(dstDesc + 1);
246
247         /* Working backwards so that the gen bit is set at the end. */
248         for (i = 2; i > 0; i--) {
249                 src--;
250                 dst--;
251                 *dst = cpu_to_le32(*src);
252         }
253 }
254
255
256 static void vmxnet3_RxCompToCPU(const struct Vmxnet3_RxCompDesc *srcDesc,
257                                 struct Vmxnet3_RxCompDesc *dstDesc)
258 {
259         int i = 0;
260         u32 *src = (u32 *)srcDesc;
261         u32 *dst = (u32 *)dstDesc;
262         for (i = 0; i < sizeof(struct Vmxnet3_RxCompDesc) / sizeof(u32); i++) {
263                 *dst = le32_to_cpu(*src);
264                 src++;
265                 dst++;
266         }
267 }
268
269
270 /* Used to read bitfield values from double words. */
271 static u32 get_bitfield32(const __le32 *bitfield, u32 pos, u32 size)
272 {
273         u32 temp = le32_to_cpu(*bitfield);
274         u32 mask = ((1 << size) - 1) << pos;
275         temp &= mask;
276         temp >>= pos;
277         return temp;
278 }
279
280
281
282 #endif  /* __BIG_ENDIAN_BITFIELD */
283
284 #ifdef __BIG_ENDIAN_BITFIELD
285
286 #   define VMXNET3_TXDESC_GET_GEN(txdesc) get_bitfield32(((const __le32 *) \
287                         txdesc) + VMXNET3_TXD_GEN_DWORD_SHIFT, \
288                         VMXNET3_TXD_GEN_SHIFT, VMXNET3_TXD_GEN_SIZE)
289 #   define VMXNET3_TXDESC_GET_EOP(txdesc) get_bitfield32(((const __le32 *) \
290                         txdesc) + VMXNET3_TXD_EOP_DWORD_SHIFT, \
291                         VMXNET3_TXD_EOP_SHIFT, VMXNET3_TXD_EOP_SIZE)
292 #   define VMXNET3_TCD_GET_GEN(tcd) get_bitfield32(((const __le32 *)tcd) + \
293                         VMXNET3_TCD_GEN_DWORD_SHIFT, VMXNET3_TCD_GEN_SHIFT, \
294                         VMXNET3_TCD_GEN_SIZE)
295 #   define VMXNET3_TCD_GET_TXIDX(tcd) get_bitfield32((const __le32 *)tcd, \
296                         VMXNET3_TCD_TXIDX_SHIFT, VMXNET3_TCD_TXIDX_SIZE)
297 #   define vmxnet3_getRxComp(dstrcd, rcd, tmp) do { \
298                         (dstrcd) = (tmp); \
299                         vmxnet3_RxCompToCPU((rcd), (tmp)); \
300                 } while (0)
301 #   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) do { \
302                         (dstrxd) = (tmp); \
303                         vmxnet3_RxDescToCPU((rxd), (tmp)); \
304                 } while (0)
305
306 #else
307
308 #   define VMXNET3_TXDESC_GET_GEN(txdesc) ((txdesc)->gen)
309 #   define VMXNET3_TXDESC_GET_EOP(txdesc) ((txdesc)->eop)
310 #   define VMXNET3_TCD_GET_GEN(tcd) ((tcd)->gen)
311 #   define VMXNET3_TCD_GET_TXIDX(tcd) ((tcd)->txdIdx)
312 #   define vmxnet3_getRxComp(dstrcd, rcd, tmp) (dstrcd) = (rcd)
313 #   define vmxnet3_getRxDesc(dstrxd, rxd, tmp) (dstrxd) = (rxd)
314
315 #endif /* __BIG_ENDIAN_BITFIELD  */
316
317
318 static void
319 vmxnet3_unmap_tx_buf(struct vmxnet3_tx_buf_info *tbi,
320                      struct pci_dev *pdev)
321 {
322         if (tbi->map_type == VMXNET3_MAP_SINGLE)
323                 pci_unmap_single(pdev, tbi->dma_addr, tbi->len,
324                                  PCI_DMA_TODEVICE);
325         else if (tbi->map_type == VMXNET3_MAP_PAGE)
326                 pci_unmap_page(pdev, tbi->dma_addr, tbi->len,
327                                PCI_DMA_TODEVICE);
328         else
329                 BUG_ON(tbi->map_type != VMXNET3_MAP_NONE);
330
331         tbi->map_type = VMXNET3_MAP_NONE; /* to help debugging */
332 }
333
334
335 static int
336 vmxnet3_unmap_pkt(u32 eop_idx, struct vmxnet3_tx_queue *tq,
337                   struct pci_dev *pdev, struct vmxnet3_adapter *adapter)
338 {
339         struct sk_buff *skb;
340         int entries = 0;
341
342         /* no out of order completion */
343         BUG_ON(tq->buf_info[eop_idx].sop_idx != tq->tx_ring.next2comp);
344         BUG_ON(VMXNET3_TXDESC_GET_EOP(&(tq->tx_ring.base[eop_idx].txd)) != 1);
345
346         skb = tq->buf_info[eop_idx].skb;
347         BUG_ON(skb == NULL);
348         tq->buf_info[eop_idx].skb = NULL;
349
350         VMXNET3_INC_RING_IDX_ONLY(eop_idx, tq->tx_ring.size);
351
352         while (tq->tx_ring.next2comp != eop_idx) {
353                 vmxnet3_unmap_tx_buf(tq->buf_info + tq->tx_ring.next2comp,
354                                      pdev);
355
356                 /* update next2comp w/o tx_lock. Since we are marking more,
357                  * instead of less, tx ring entries avail, the worst case is
358                  * that the tx routine incorrectly re-queues a pkt due to
359                  * insufficient tx ring entries.
360                  */
361                 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
362                 entries++;
363         }
364
365         dev_kfree_skb_any(skb);
366         return entries;
367 }
368
369
370 static int
371 vmxnet3_tq_tx_complete(struct vmxnet3_tx_queue *tq,
372                         struct vmxnet3_adapter *adapter)
373 {
374         int completed = 0;
375         union Vmxnet3_GenericDesc *gdesc;
376
377         gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
378         while (VMXNET3_TCD_GET_GEN(&gdesc->tcd) == tq->comp_ring.gen) {
379                 completed += vmxnet3_unmap_pkt(VMXNET3_TCD_GET_TXIDX(
380                                                &gdesc->tcd), tq, adapter->pdev,
381                                                adapter);
382
383                 vmxnet3_comp_ring_adv_next2proc(&tq->comp_ring);
384                 gdesc = tq->comp_ring.base + tq->comp_ring.next2proc;
385         }
386
387         if (completed) {
388                 spin_lock(&tq->tx_lock);
389                 if (unlikely(vmxnet3_tq_stopped(tq, adapter) &&
390                              vmxnet3_cmd_ring_desc_avail(&tq->tx_ring) >
391                              VMXNET3_WAKE_QUEUE_THRESHOLD(tq) &&
392                              netif_carrier_ok(adapter->netdev))) {
393                         vmxnet3_tq_wake(tq, adapter);
394                 }
395                 spin_unlock(&tq->tx_lock);
396         }
397         return completed;
398 }
399
400
401 static void
402 vmxnet3_tq_cleanup(struct vmxnet3_tx_queue *tq,
403                    struct vmxnet3_adapter *adapter)
404 {
405         int i;
406
407         while (tq->tx_ring.next2comp != tq->tx_ring.next2fill) {
408                 struct vmxnet3_tx_buf_info *tbi;
409
410                 tbi = tq->buf_info + tq->tx_ring.next2comp;
411
412                 vmxnet3_unmap_tx_buf(tbi, adapter->pdev);
413                 if (tbi->skb) {
414                         dev_kfree_skb_any(tbi->skb);
415                         tbi->skb = NULL;
416                 }
417                 vmxnet3_cmd_ring_adv_next2comp(&tq->tx_ring);
418         }
419
420         /* sanity check, verify all buffers are indeed unmapped and freed */
421         for (i = 0; i < tq->tx_ring.size; i++) {
422                 BUG_ON(tq->buf_info[i].skb != NULL ||
423                        tq->buf_info[i].map_type != VMXNET3_MAP_NONE);
424         }
425
426         tq->tx_ring.gen = VMXNET3_INIT_GEN;
427         tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
428
429         tq->comp_ring.gen = VMXNET3_INIT_GEN;
430         tq->comp_ring.next2proc = 0;
431 }
432
433
434 static void
435 vmxnet3_tq_destroy(struct vmxnet3_tx_queue *tq,
436                    struct vmxnet3_adapter *adapter)
437 {
438         if (tq->tx_ring.base) {
439                 pci_free_consistent(adapter->pdev, tq->tx_ring.size *
440                                     sizeof(struct Vmxnet3_TxDesc),
441                                     tq->tx_ring.base, tq->tx_ring.basePA);
442                 tq->tx_ring.base = NULL;
443         }
444         if (tq->data_ring.base) {
445                 pci_free_consistent(adapter->pdev, tq->data_ring.size *
446                                     sizeof(struct Vmxnet3_TxDataDesc),
447                                     tq->data_ring.base, tq->data_ring.basePA);
448                 tq->data_ring.base = NULL;
449         }
450         if (tq->comp_ring.base) {
451                 pci_free_consistent(adapter->pdev, tq->comp_ring.size *
452                                     sizeof(struct Vmxnet3_TxCompDesc),
453                                     tq->comp_ring.base, tq->comp_ring.basePA);
454                 tq->comp_ring.base = NULL;
455         }
456         kfree(tq->buf_info);
457         tq->buf_info = NULL;
458 }
459
460
461 /* Destroy all tx queues */
462 void
463 vmxnet3_tq_destroy_all(struct vmxnet3_adapter *adapter)
464 {
465         int i;
466
467         for (i = 0; i < adapter->num_tx_queues; i++)
468                 vmxnet3_tq_destroy(&adapter->tx_queue[i], adapter);
469 }
470
471
472 static void
473 vmxnet3_tq_init(struct vmxnet3_tx_queue *tq,
474                 struct vmxnet3_adapter *adapter)
475 {
476         int i;
477
478         /* reset the tx ring contents to 0 and reset the tx ring states */
479         memset(tq->tx_ring.base, 0, tq->tx_ring.size *
480                sizeof(struct Vmxnet3_TxDesc));
481         tq->tx_ring.next2fill = tq->tx_ring.next2comp = 0;
482         tq->tx_ring.gen = VMXNET3_INIT_GEN;
483
484         memset(tq->data_ring.base, 0, tq->data_ring.size *
485                sizeof(struct Vmxnet3_TxDataDesc));
486
487         /* reset the tx comp ring contents to 0 and reset comp ring states */
488         memset(tq->comp_ring.base, 0, tq->comp_ring.size *
489                sizeof(struct Vmxnet3_TxCompDesc));
490         tq->comp_ring.next2proc = 0;
491         tq->comp_ring.gen = VMXNET3_INIT_GEN;
492
493         /* reset the bookkeeping data */
494         memset(tq->buf_info, 0, sizeof(tq->buf_info[0]) * tq->tx_ring.size);
495         for (i = 0; i < tq->tx_ring.size; i++)
496                 tq->buf_info[i].map_type = VMXNET3_MAP_NONE;
497
498         /* stats are not reset */
499 }
500
501
502 static int
503 vmxnet3_tq_create(struct vmxnet3_tx_queue *tq,
504                   struct vmxnet3_adapter *adapter)
505 {
506         BUG_ON(tq->tx_ring.base || tq->data_ring.base ||
507                tq->comp_ring.base || tq->buf_info);
508
509         tq->tx_ring.base = pci_alloc_consistent(adapter->pdev, tq->tx_ring.size
510                            * sizeof(struct Vmxnet3_TxDesc),
511                            &tq->tx_ring.basePA);
512         if (!tq->tx_ring.base) {
513                 printk(KERN_ERR "%s: failed to allocate tx ring\n",
514                        adapter->netdev->name);
515                 goto err;
516         }
517
518         tq->data_ring.base = pci_alloc_consistent(adapter->pdev,
519                              tq->data_ring.size *
520                              sizeof(struct Vmxnet3_TxDataDesc),
521                              &tq->data_ring.basePA);
522         if (!tq->data_ring.base) {
523                 printk(KERN_ERR "%s: failed to allocate data ring\n",
524                        adapter->netdev->name);
525                 goto err;
526         }
527
528         tq->comp_ring.base = pci_alloc_consistent(adapter->pdev,
529                              tq->comp_ring.size *
530                              sizeof(struct Vmxnet3_TxCompDesc),
531                              &tq->comp_ring.basePA);
532         if (!tq->comp_ring.base) {
533                 printk(KERN_ERR "%s: failed to allocate tx comp ring\n",
534                        adapter->netdev->name);
535                 goto err;
536         }
537
538         tq->buf_info = kcalloc(tq->tx_ring.size, sizeof(tq->buf_info[0]),
539                                GFP_KERNEL);
540         if (!tq->buf_info) {
541                 printk(KERN_ERR "%s: failed to allocate tx bufinfo\n",
542                        adapter->netdev->name);
543                 goto err;
544         }
545
546         return 0;
547
548 err:
549         vmxnet3_tq_destroy(tq, adapter);
550         return -ENOMEM;
551 }
552
553 static void
554 vmxnet3_tq_cleanup_all(struct vmxnet3_adapter *adapter)
555 {
556         int i;
557
558         for (i = 0; i < adapter->num_tx_queues; i++)
559                 vmxnet3_tq_cleanup(&adapter->tx_queue[i], adapter);
560 }
561
562 /*
563  *    starting from ring->next2fill, allocate rx buffers for the given ring
564  *    of the rx queue and update the rx desc. stop after @num_to_alloc buffers
565  *    are allocated or allocation fails
566  */
567
568 static int
569 vmxnet3_rq_alloc_rx_buf(struct vmxnet3_rx_queue *rq, u32 ring_idx,
570                         int num_to_alloc, struct vmxnet3_adapter *adapter)
571 {
572         int num_allocated = 0;
573         struct vmxnet3_rx_buf_info *rbi_base = rq->buf_info[ring_idx];
574         struct vmxnet3_cmd_ring *ring = &rq->rx_ring[ring_idx];
575         u32 val;
576
577         while (num_allocated <= num_to_alloc) {
578                 struct vmxnet3_rx_buf_info *rbi;
579                 union Vmxnet3_GenericDesc *gd;
580
581                 rbi = rbi_base + ring->next2fill;
582                 gd = ring->base + ring->next2fill;
583
584                 if (rbi->buf_type == VMXNET3_RX_BUF_SKB) {
585                         if (rbi->skb == NULL) {
586                                 rbi->skb = dev_alloc_skb(rbi->len +
587                                                          NET_IP_ALIGN);
588                                 if (unlikely(rbi->skb == NULL)) {
589                                         rq->stats.rx_buf_alloc_failure++;
590                                         break;
591                                 }
592                                 rbi->skb->dev = adapter->netdev;
593
594                                 skb_reserve(rbi->skb, NET_IP_ALIGN);
595                                 rbi->dma_addr = pci_map_single(adapter->pdev,
596                                                 rbi->skb->data, rbi->len,
597                                                 PCI_DMA_FROMDEVICE);
598                         } else {
599                                 /* rx buffer skipped by the device */
600                         }
601                         val = VMXNET3_RXD_BTYPE_HEAD << VMXNET3_RXD_BTYPE_SHIFT;
602                 } else {
603                         BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE ||
604                                rbi->len  != PAGE_SIZE);
605
606                         if (rbi->page == NULL) {
607                                 rbi->page = alloc_page(GFP_ATOMIC);
608                                 if (unlikely(rbi->page == NULL)) {
609                                         rq->stats.rx_buf_alloc_failure++;
610                                         break;
611                                 }
612                                 rbi->dma_addr = pci_map_page(adapter->pdev,
613                                                 rbi->page, 0, PAGE_SIZE,
614                                                 PCI_DMA_FROMDEVICE);
615                         } else {
616                                 /* rx buffers skipped by the device */
617                         }
618                         val = VMXNET3_RXD_BTYPE_BODY << VMXNET3_RXD_BTYPE_SHIFT;
619                 }
620
621                 BUG_ON(rbi->dma_addr == 0);
622                 gd->rxd.addr = cpu_to_le64(rbi->dma_addr);
623                 gd->dword[2] = cpu_to_le32((!ring->gen << VMXNET3_RXD_GEN_SHIFT)
624                                            | val | rbi->len);
625
626                 /* Fill the last buffer but dont mark it ready, or else the
627                  * device will think that the queue is full */
628                 if (num_allocated == num_to_alloc)
629                         break;
630
631                 gd->dword[2] |= cpu_to_le32(ring->gen << VMXNET3_RXD_GEN_SHIFT);
632                 num_allocated++;
633                 vmxnet3_cmd_ring_adv_next2fill(ring);
634         }
635         rq->uncommitted[ring_idx] += num_allocated;
636
637         dev_dbg(&adapter->netdev->dev,
638                 "alloc_rx_buf: %d allocated, next2fill %u, next2comp "
639                 "%u, uncommited %u\n", num_allocated, ring->next2fill,
640                 ring->next2comp, rq->uncommitted[ring_idx]);
641
642         /* so that the device can distinguish a full ring and an empty ring */
643         BUG_ON(num_allocated != 0 && ring->next2fill == ring->next2comp);
644
645         return num_allocated;
646 }
647
648
649 static void
650 vmxnet3_append_frag(struct sk_buff *skb, struct Vmxnet3_RxCompDesc *rcd,
651                     struct vmxnet3_rx_buf_info *rbi)
652 {
653         struct skb_frag_struct *frag = skb_shinfo(skb)->frags +
654                 skb_shinfo(skb)->nr_frags;
655
656         BUG_ON(skb_shinfo(skb)->nr_frags >= MAX_SKB_FRAGS);
657
658         __skb_frag_set_page(frag, rbi->page);
659         frag->page_offset = 0;
660         skb_frag_size_set(frag, rcd->len);
661         skb->data_len += rcd->len;
662         skb->truesize += PAGE_SIZE;
663         skb_shinfo(skb)->nr_frags++;
664 }
665
666
667 static void
668 vmxnet3_map_pkt(struct sk_buff *skb, struct vmxnet3_tx_ctx *ctx,
669                 struct vmxnet3_tx_queue *tq, struct pci_dev *pdev,
670                 struct vmxnet3_adapter *adapter)
671 {
672         u32 dw2, len;
673         unsigned long buf_offset;
674         int i;
675         union Vmxnet3_GenericDesc *gdesc;
676         struct vmxnet3_tx_buf_info *tbi = NULL;
677
678         BUG_ON(ctx->copy_size > skb_headlen(skb));
679
680         /* use the previous gen bit for the SOP desc */
681         dw2 = (tq->tx_ring.gen ^ 0x1) << VMXNET3_TXD_GEN_SHIFT;
682
683         ctx->sop_txd = tq->tx_ring.base + tq->tx_ring.next2fill;
684         gdesc = ctx->sop_txd; /* both loops below can be skipped */
685
686         /* no need to map the buffer if headers are copied */
687         if (ctx->copy_size) {
688                 ctx->sop_txd->txd.addr = cpu_to_le64(tq->data_ring.basePA +
689                                         tq->tx_ring.next2fill *
690                                         sizeof(struct Vmxnet3_TxDataDesc));
691                 ctx->sop_txd->dword[2] = cpu_to_le32(dw2 | ctx->copy_size);
692                 ctx->sop_txd->dword[3] = 0;
693
694                 tbi = tq->buf_info + tq->tx_ring.next2fill;
695                 tbi->map_type = VMXNET3_MAP_NONE;
696
697                 dev_dbg(&adapter->netdev->dev,
698                         "txd[%u]: 0x%Lx 0x%x 0x%x\n",
699                         tq->tx_ring.next2fill,
700                         le64_to_cpu(ctx->sop_txd->txd.addr),
701                         ctx->sop_txd->dword[2], ctx->sop_txd->dword[3]);
702                 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
703
704                 /* use the right gen for non-SOP desc */
705                 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
706         }
707
708         /* linear part can use multiple tx desc if it's big */
709         len = skb_headlen(skb) - ctx->copy_size;
710         buf_offset = ctx->copy_size;
711         while (len) {
712                 u32 buf_size;
713
714                 if (len < VMXNET3_MAX_TX_BUF_SIZE) {
715                         buf_size = len;
716                         dw2 |= len;
717                 } else {
718                         buf_size = VMXNET3_MAX_TX_BUF_SIZE;
719                         /* spec says that for TxDesc.len, 0 == 2^14 */
720                 }
721
722                 tbi = tq->buf_info + tq->tx_ring.next2fill;
723                 tbi->map_type = VMXNET3_MAP_SINGLE;
724                 tbi->dma_addr = pci_map_single(adapter->pdev,
725                                 skb->data + buf_offset, buf_size,
726                                 PCI_DMA_TODEVICE);
727
728                 tbi->len = buf_size;
729
730                 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
731                 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
732
733                 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
734                 gdesc->dword[2] = cpu_to_le32(dw2);
735                 gdesc->dword[3] = 0;
736
737                 dev_dbg(&adapter->netdev->dev,
738                         "txd[%u]: 0x%Lx 0x%x 0x%x\n",
739                         tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
740                         le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
741                 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
742                 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
743
744                 len -= buf_size;
745                 buf_offset += buf_size;
746         }
747
748         for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
749                 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[i];
750
751                 tbi = tq->buf_info + tq->tx_ring.next2fill;
752                 tbi->map_type = VMXNET3_MAP_PAGE;
753                 tbi->dma_addr = skb_frag_dma_map(&adapter->pdev->dev, frag,
754                                                  0, skb_frag_size(frag),
755                                                  DMA_TO_DEVICE);
756
757                 tbi->len = skb_frag_size(frag);
758
759                 gdesc = tq->tx_ring.base + tq->tx_ring.next2fill;
760                 BUG_ON(gdesc->txd.gen == tq->tx_ring.gen);
761
762                 gdesc->txd.addr = cpu_to_le64(tbi->dma_addr);
763                 gdesc->dword[2] = cpu_to_le32(dw2 | skb_frag_size(frag));
764                 gdesc->dword[3] = 0;
765
766                 dev_dbg(&adapter->netdev->dev,
767                         "txd[%u]: 0x%llu %u %u\n",
768                         tq->tx_ring.next2fill, le64_to_cpu(gdesc->txd.addr),
769                         le32_to_cpu(gdesc->dword[2]), gdesc->dword[3]);
770                 vmxnet3_cmd_ring_adv_next2fill(&tq->tx_ring);
771                 dw2 = tq->tx_ring.gen << VMXNET3_TXD_GEN_SHIFT;
772         }
773
774         ctx->eop_txd = gdesc;
775
776         /* set the last buf_info for the pkt */
777         tbi->skb = skb;
778         tbi->sop_idx = ctx->sop_txd - tq->tx_ring.base;
779 }
780
781
782 /* Init all tx queues */
783 static void
784 vmxnet3_tq_init_all(struct vmxnet3_adapter *adapter)
785 {
786         int i;
787
788         for (i = 0; i < adapter->num_tx_queues; i++)
789                 vmxnet3_tq_init(&adapter->tx_queue[i], adapter);
790 }
791
792
793 /*
794  *    parse and copy relevant protocol headers:
795  *      For a tso pkt, relevant headers are L2/3/4 including options
796  *      For a pkt requesting csum offloading, they are L2/3 and may include L4
797  *      if it's a TCP/UDP pkt
798  *
799  * Returns:
800  *    -1:  error happens during parsing
801  *     0:  protocol headers parsed, but too big to be copied
802  *     1:  protocol headers parsed and copied
803  *
804  * Other effects:
805  *    1. related *ctx fields are updated.
806  *    2. ctx->copy_size is # of bytes copied
807  *    3. the portion copied is guaranteed to be in the linear part
808  *
809  */
810 static int
811 vmxnet3_parse_and_copy_hdr(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
812                            struct vmxnet3_tx_ctx *ctx,
813                            struct vmxnet3_adapter *adapter)
814 {
815         struct Vmxnet3_TxDataDesc *tdd;
816
817         if (ctx->mss) { /* TSO */
818                 ctx->eth_ip_hdr_size = skb_transport_offset(skb);
819                 ctx->l4_hdr_size = ((struct tcphdr *)
820                                    skb_transport_header(skb))->doff * 4;
821                 ctx->copy_size = ctx->eth_ip_hdr_size + ctx->l4_hdr_size;
822         } else {
823                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
824                         ctx->eth_ip_hdr_size = skb_checksum_start_offset(skb);
825
826                         if (ctx->ipv4) {
827                                 struct iphdr *iph = (struct iphdr *)
828                                                     skb_network_header(skb);
829                                 if (iph->protocol == IPPROTO_TCP)
830                                         ctx->l4_hdr_size = ((struct tcphdr *)
831                                            skb_transport_header(skb))->doff * 4;
832                                 else if (iph->protocol == IPPROTO_UDP)
833                                         /*
834                                          * Use tcp header size so that bytes to
835                                          * be copied are more than required by
836                                          * the device.
837                                          */
838                                         ctx->l4_hdr_size =
839                                                         sizeof(struct tcphdr);
840                                 else
841                                         ctx->l4_hdr_size = 0;
842                         } else {
843                                 /* for simplicity, don't copy L4 headers */
844                                 ctx->l4_hdr_size = 0;
845                         }
846                         ctx->copy_size = ctx->eth_ip_hdr_size +
847                                          ctx->l4_hdr_size;
848                 } else {
849                         ctx->eth_ip_hdr_size = 0;
850                         ctx->l4_hdr_size = 0;
851                         /* copy as much as allowed */
852                         ctx->copy_size = min((unsigned int)VMXNET3_HDR_COPY_SIZE
853                                              , skb_headlen(skb));
854                 }
855
856                 /* make sure headers are accessible directly */
857                 if (unlikely(!pskb_may_pull(skb, ctx->copy_size)))
858                         goto err;
859         }
860
861         if (unlikely(ctx->copy_size > VMXNET3_HDR_COPY_SIZE)) {
862                 tq->stats.oversized_hdr++;
863                 ctx->copy_size = 0;
864                 return 0;
865         }
866
867         tdd = tq->data_ring.base + tq->tx_ring.next2fill;
868
869         memcpy(tdd->data, skb->data, ctx->copy_size);
870         dev_dbg(&adapter->netdev->dev,
871                 "copy %u bytes to dataRing[%u]\n",
872                 ctx->copy_size, tq->tx_ring.next2fill);
873         return 1;
874
875 err:
876         return -1;
877 }
878
879
880 static void
881 vmxnet3_prepare_tso(struct sk_buff *skb,
882                     struct vmxnet3_tx_ctx *ctx)
883 {
884         struct tcphdr *tcph = (struct tcphdr *)skb_transport_header(skb);
885         if (ctx->ipv4) {
886                 struct iphdr *iph = (struct iphdr *)skb_network_header(skb);
887                 iph->check = 0;
888                 tcph->check = ~csum_tcpudp_magic(iph->saddr, iph->daddr, 0,
889                                                  IPPROTO_TCP, 0);
890         } else {
891                 struct ipv6hdr *iph = (struct ipv6hdr *)skb_network_header(skb);
892                 tcph->check = ~csum_ipv6_magic(&iph->saddr, &iph->daddr, 0,
893                                                IPPROTO_TCP, 0);
894         }
895 }
896
897
898 /*
899  * Transmits a pkt thru a given tq
900  * Returns:
901  *    NETDEV_TX_OK:      descriptors are setup successfully
902  *    NETDEV_TX_OK:      error occurred, the pkt is dropped
903  *    NETDEV_TX_BUSY:    tx ring is full, queue is stopped
904  *
905  * Side-effects:
906  *    1. tx ring may be changed
907  *    2. tq stats may be updated accordingly
908  *    3. shared->txNumDeferred may be updated
909  */
910
911 static int
912 vmxnet3_tq_xmit(struct sk_buff *skb, struct vmxnet3_tx_queue *tq,
913                 struct vmxnet3_adapter *adapter, struct net_device *netdev)
914 {
915         int ret;
916         u32 count;
917         unsigned long flags;
918         struct vmxnet3_tx_ctx ctx;
919         union Vmxnet3_GenericDesc *gdesc;
920 #ifdef __BIG_ENDIAN_BITFIELD
921         /* Use temporary descriptor to avoid touching bits multiple times */
922         union Vmxnet3_GenericDesc tempTxDesc;
923 #endif
924
925         /* conservatively estimate # of descriptors to use */
926         count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) +
927                 skb_shinfo(skb)->nr_frags + 1;
928
929         ctx.ipv4 = (vlan_get_protocol(skb) == cpu_to_be16(ETH_P_IP));
930
931         ctx.mss = skb_shinfo(skb)->gso_size;
932         if (ctx.mss) {
933                 if (skb_header_cloned(skb)) {
934                         if (unlikely(pskb_expand_head(skb, 0, 0,
935                                                       GFP_ATOMIC) != 0)) {
936                                 tq->stats.drop_tso++;
937                                 goto drop_pkt;
938                         }
939                         tq->stats.copy_skb_header++;
940                 }
941                 vmxnet3_prepare_tso(skb, &ctx);
942         } else {
943                 if (unlikely(count > VMXNET3_MAX_TXD_PER_PKT)) {
944
945                         /* non-tso pkts must not use more than
946                          * VMXNET3_MAX_TXD_PER_PKT entries
947                          */
948                         if (skb_linearize(skb) != 0) {
949                                 tq->stats.drop_too_many_frags++;
950                                 goto drop_pkt;
951                         }
952                         tq->stats.linearized++;
953
954                         /* recalculate the # of descriptors to use */
955                         count = VMXNET3_TXD_NEEDED(skb_headlen(skb)) + 1;
956                 }
957         }
958
959         spin_lock_irqsave(&tq->tx_lock, flags);
960
961         if (count > vmxnet3_cmd_ring_desc_avail(&tq->tx_ring)) {
962                 tq->stats.tx_ring_full++;
963                 dev_dbg(&adapter->netdev->dev,
964                         "tx queue stopped on %s, next2comp %u"
965                         " next2fill %u\n", adapter->netdev->name,
966                         tq->tx_ring.next2comp, tq->tx_ring.next2fill);
967
968                 vmxnet3_tq_stop(tq, adapter);
969                 spin_unlock_irqrestore(&tq->tx_lock, flags);
970                 return NETDEV_TX_BUSY;
971         }
972
973
974         ret = vmxnet3_parse_and_copy_hdr(skb, tq, &ctx, adapter);
975         if (ret >= 0) {
976                 BUG_ON(ret <= 0 && ctx.copy_size != 0);
977                 /* hdrs parsed, check against other limits */
978                 if (ctx.mss) {
979                         if (unlikely(ctx.eth_ip_hdr_size + ctx.l4_hdr_size >
980                                      VMXNET3_MAX_TX_BUF_SIZE)) {
981                                 goto hdr_too_big;
982                         }
983                 } else {
984                         if (skb->ip_summed == CHECKSUM_PARTIAL) {
985                                 if (unlikely(ctx.eth_ip_hdr_size +
986                                              skb->csum_offset >
987                                              VMXNET3_MAX_CSUM_OFFSET)) {
988                                         goto hdr_too_big;
989                                 }
990                         }
991                 }
992         } else {
993                 tq->stats.drop_hdr_inspect_err++;
994                 goto unlock_drop_pkt;
995         }
996
997         /* fill tx descs related to addr & len */
998         vmxnet3_map_pkt(skb, &ctx, tq, adapter->pdev, adapter);
999
1000         /* setup the EOP desc */
1001         ctx.eop_txd->dword[3] = cpu_to_le32(VMXNET3_TXD_CQ | VMXNET3_TXD_EOP);
1002
1003         /* setup the SOP desc */
1004 #ifdef __BIG_ENDIAN_BITFIELD
1005         gdesc = &tempTxDesc;
1006         gdesc->dword[2] = ctx.sop_txd->dword[2];
1007         gdesc->dword[3] = ctx.sop_txd->dword[3];
1008 #else
1009         gdesc = ctx.sop_txd;
1010 #endif
1011         if (ctx.mss) {
1012                 gdesc->txd.hlen = ctx.eth_ip_hdr_size + ctx.l4_hdr_size;
1013                 gdesc->txd.om = VMXNET3_OM_TSO;
1014                 gdesc->txd.msscof = ctx.mss;
1015                 le32_add_cpu(&tq->shared->txNumDeferred, (skb->len -
1016                              gdesc->txd.hlen + ctx.mss - 1) / ctx.mss);
1017         } else {
1018                 if (skb->ip_summed == CHECKSUM_PARTIAL) {
1019                         gdesc->txd.hlen = ctx.eth_ip_hdr_size;
1020                         gdesc->txd.om = VMXNET3_OM_CSUM;
1021                         gdesc->txd.msscof = ctx.eth_ip_hdr_size +
1022                                             skb->csum_offset;
1023                 } else {
1024                         gdesc->txd.om = 0;
1025                         gdesc->txd.msscof = 0;
1026                 }
1027                 le32_add_cpu(&tq->shared->txNumDeferred, 1);
1028         }
1029
1030         if (vlan_tx_tag_present(skb)) {
1031                 gdesc->txd.ti = 1;
1032                 gdesc->txd.tci = vlan_tx_tag_get(skb);
1033         }
1034
1035         /* finally flips the GEN bit of the SOP desc. */
1036         gdesc->dword[2] = cpu_to_le32(le32_to_cpu(gdesc->dword[2]) ^
1037                                                   VMXNET3_TXD_GEN);
1038 #ifdef __BIG_ENDIAN_BITFIELD
1039         /* Finished updating in bitfields of Tx Desc, so write them in original
1040          * place.
1041          */
1042         vmxnet3_TxDescToLe((struct Vmxnet3_TxDesc *)gdesc,
1043                            (struct Vmxnet3_TxDesc *)ctx.sop_txd);
1044         gdesc = ctx.sop_txd;
1045 #endif
1046         dev_dbg(&adapter->netdev->dev,
1047                 "txd[%u]: SOP 0x%Lx 0x%x 0x%x\n",
1048                 (u32)((union Vmxnet3_GenericDesc *)ctx.sop_txd -
1049                 tq->tx_ring.base), le64_to_cpu(gdesc->txd.addr),
1050                 le32_to_cpu(gdesc->dword[2]), le32_to_cpu(gdesc->dword[3]));
1051
1052         spin_unlock_irqrestore(&tq->tx_lock, flags);
1053
1054         if (le32_to_cpu(tq->shared->txNumDeferred) >=
1055                                         le32_to_cpu(tq->shared->txThreshold)) {
1056                 tq->shared->txNumDeferred = 0;
1057                 VMXNET3_WRITE_BAR0_REG(adapter,
1058                                        VMXNET3_REG_TXPROD + tq->qid * 8,
1059                                        tq->tx_ring.next2fill);
1060         }
1061
1062         return NETDEV_TX_OK;
1063
1064 hdr_too_big:
1065         tq->stats.drop_oversized_hdr++;
1066 unlock_drop_pkt:
1067         spin_unlock_irqrestore(&tq->tx_lock, flags);
1068 drop_pkt:
1069         tq->stats.drop_total++;
1070         dev_kfree_skb(skb);
1071         return NETDEV_TX_OK;
1072 }
1073
1074
1075 static netdev_tx_t
1076 vmxnet3_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
1077 {
1078         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1079
1080                 BUG_ON(skb->queue_mapping > adapter->num_tx_queues);
1081                 return vmxnet3_tq_xmit(skb,
1082                                        &adapter->tx_queue[skb->queue_mapping],
1083                                        adapter, netdev);
1084 }
1085
1086
1087 static void
1088 vmxnet3_rx_csum(struct vmxnet3_adapter *adapter,
1089                 struct sk_buff *skb,
1090                 union Vmxnet3_GenericDesc *gdesc)
1091 {
1092         if (!gdesc->rcd.cnc && adapter->netdev->features & NETIF_F_RXCSUM) {
1093                 /* typical case: TCP/UDP over IP and both csums are correct */
1094                 if ((le32_to_cpu(gdesc->dword[3]) & VMXNET3_RCD_CSUM_OK) ==
1095                                                         VMXNET3_RCD_CSUM_OK) {
1096                         skb->ip_summed = CHECKSUM_UNNECESSARY;
1097                         BUG_ON(!(gdesc->rcd.tcp || gdesc->rcd.udp));
1098                         BUG_ON(!(gdesc->rcd.v4  || gdesc->rcd.v6));
1099                         BUG_ON(gdesc->rcd.frg);
1100                 } else {
1101                         if (gdesc->rcd.csum) {
1102                                 skb->csum = htons(gdesc->rcd.csum);
1103                                 skb->ip_summed = CHECKSUM_PARTIAL;
1104                         } else {
1105                                 skb_checksum_none_assert(skb);
1106                         }
1107                 }
1108         } else {
1109                 skb_checksum_none_assert(skb);
1110         }
1111 }
1112
1113
1114 static void
1115 vmxnet3_rx_error(struct vmxnet3_rx_queue *rq, struct Vmxnet3_RxCompDesc *rcd,
1116                  struct vmxnet3_rx_ctx *ctx,  struct vmxnet3_adapter *adapter)
1117 {
1118         rq->stats.drop_err++;
1119         if (!rcd->fcs)
1120                 rq->stats.drop_fcs++;
1121
1122         rq->stats.drop_total++;
1123
1124         /*
1125          * We do not unmap and chain the rx buffer to the skb.
1126          * We basically pretend this buffer is not used and will be recycled
1127          * by vmxnet3_rq_alloc_rx_buf()
1128          */
1129
1130         /*
1131          * ctx->skb may be NULL if this is the first and the only one
1132          * desc for the pkt
1133          */
1134         if (ctx->skb)
1135                 dev_kfree_skb_irq(ctx->skb);
1136
1137         ctx->skb = NULL;
1138 }
1139
1140
1141 static int
1142 vmxnet3_rq_rx_complete(struct vmxnet3_rx_queue *rq,
1143                        struct vmxnet3_adapter *adapter, int quota)
1144 {
1145         static const u32 rxprod_reg[2] = {
1146                 VMXNET3_REG_RXPROD, VMXNET3_REG_RXPROD2
1147         };
1148         u32 num_rxd = 0;
1149         bool skip_page_frags = false;
1150         struct Vmxnet3_RxCompDesc *rcd;
1151         struct vmxnet3_rx_ctx *ctx = &rq->rx_ctx;
1152 #ifdef __BIG_ENDIAN_BITFIELD
1153         struct Vmxnet3_RxDesc rxCmdDesc;
1154         struct Vmxnet3_RxCompDesc rxComp;
1155 #endif
1156         vmxnet3_getRxComp(rcd, &rq->comp_ring.base[rq->comp_ring.next2proc].rcd,
1157                           &rxComp);
1158         while (rcd->gen == rq->comp_ring.gen) {
1159                 struct vmxnet3_rx_buf_info *rbi;
1160                 struct sk_buff *skb, *new_skb = NULL;
1161                 struct page *new_page = NULL;
1162                 int num_to_alloc;
1163                 struct Vmxnet3_RxDesc *rxd;
1164                 u32 idx, ring_idx;
1165                 struct vmxnet3_cmd_ring *ring = NULL;
1166                 if (num_rxd >= quota) {
1167                         /* we may stop even before we see the EOP desc of
1168                          * the current pkt
1169                          */
1170                         break;
1171                 }
1172                 num_rxd++;
1173                 BUG_ON(rcd->rqID != rq->qid && rcd->rqID != rq->qid2);
1174                 idx = rcd->rxdIdx;
1175                 ring_idx = rcd->rqID < adapter->num_rx_queues ? 0 : 1;
1176                 ring = rq->rx_ring + ring_idx;
1177                 vmxnet3_getRxDesc(rxd, &rq->rx_ring[ring_idx].base[idx].rxd,
1178                                   &rxCmdDesc);
1179                 rbi = rq->buf_info[ring_idx] + idx;
1180
1181                 BUG_ON(rxd->addr != rbi->dma_addr ||
1182                        rxd->len != rbi->len);
1183
1184                 if (unlikely(rcd->eop && rcd->err)) {
1185                         vmxnet3_rx_error(rq, rcd, ctx, adapter);
1186                         goto rcd_done;
1187                 }
1188
1189                 if (rcd->sop) { /* first buf of the pkt */
1190                         BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_HEAD ||
1191                                rcd->rqID != rq->qid);
1192
1193                         BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_SKB);
1194                         BUG_ON(ctx->skb != NULL || rbi->skb == NULL);
1195
1196                         if (unlikely(rcd->len == 0)) {
1197                                 /* Pretend the rx buffer is skipped. */
1198                                 BUG_ON(!(rcd->sop && rcd->eop));
1199                                 dev_dbg(&adapter->netdev->dev,
1200                                         "rxRing[%u][%u] 0 length\n",
1201                                         ring_idx, idx);
1202                                 goto rcd_done;
1203                         }
1204
1205                         skip_page_frags = false;
1206                         ctx->skb = rbi->skb;
1207                         new_skb = dev_alloc_skb(rbi->len + NET_IP_ALIGN);
1208                         if (new_skb == NULL) {
1209                                 /* Skb allocation failed, do not handover this
1210                                  * skb to stack. Reuse it. Drop the existing pkt
1211                                  */
1212                                 rq->stats.rx_buf_alloc_failure++;
1213                                 ctx->skb = NULL;
1214                                 rq->stats.drop_total++;
1215                                 skip_page_frags = true;
1216                                 goto rcd_done;
1217                         }
1218
1219                         pci_unmap_single(adapter->pdev, rbi->dma_addr, rbi->len,
1220                                          PCI_DMA_FROMDEVICE);
1221
1222                         skb_put(ctx->skb, rcd->len);
1223
1224                         /* Immediate refill */
1225                         new_skb->dev = adapter->netdev;
1226                         skb_reserve(new_skb, NET_IP_ALIGN);
1227                         rbi->skb = new_skb;
1228                         rbi->dma_addr = pci_map_single(adapter->pdev,
1229                                         rbi->skb->data, rbi->len,
1230                                         PCI_DMA_FROMDEVICE);
1231                         rxd->addr = cpu_to_le64(rbi->dma_addr);
1232                         rxd->len = rbi->len;
1233
1234                 } else {
1235                         BUG_ON(ctx->skb == NULL && !skip_page_frags);
1236
1237                         /* non SOP buffer must be type 1 in most cases */
1238                         BUG_ON(rbi->buf_type != VMXNET3_RX_BUF_PAGE);
1239                         BUG_ON(rxd->btype != VMXNET3_RXD_BTYPE_BODY);
1240
1241                         /* If an sop buffer was dropped, skip all
1242                          * following non-sop fragments. They will be reused.
1243                          */
1244                         if (skip_page_frags)
1245                                 goto rcd_done;
1246
1247                         new_page = alloc_page(GFP_ATOMIC);
1248                         if (unlikely(new_page == NULL)) {
1249                                 /* Replacement page frag could not be allocated.
1250                                  * Reuse this page. Drop the pkt and free the
1251                                  * skb which contained this page as a frag. Skip
1252                                  * processing all the following non-sop frags.
1253                                  */
1254                                 rq->stats.rx_buf_alloc_failure++;
1255                                 dev_kfree_skb(ctx->skb);
1256                                 ctx->skb = NULL;
1257                                 skip_page_frags = true;
1258                                 goto rcd_done;
1259                         }
1260
1261                         if (rcd->len) {
1262                                 pci_unmap_page(adapter->pdev,
1263                                                rbi->dma_addr, rbi->len,
1264                                                PCI_DMA_FROMDEVICE);
1265
1266                                 vmxnet3_append_frag(ctx->skb, rcd, rbi);
1267                         }
1268
1269                         /* Immediate refill */
1270                         rbi->page = new_page;
1271                         rbi->dma_addr = pci_map_page(adapter->pdev, rbi->page,
1272                                                      0, PAGE_SIZE,
1273                                                      PCI_DMA_FROMDEVICE);
1274                         rxd->addr = cpu_to_le64(rbi->dma_addr);
1275                         rxd->len = rbi->len;
1276                 }
1277
1278
1279                 skb = ctx->skb;
1280                 if (rcd->eop) {
1281                         skb->len += skb->data_len;
1282
1283                         vmxnet3_rx_csum(adapter, skb,
1284                                         (union Vmxnet3_GenericDesc *)rcd);
1285                         skb->protocol = eth_type_trans(skb, adapter->netdev);
1286
1287                         if (unlikely(rcd->ts))
1288                                 __vlan_hwaccel_put_tag(skb, rcd->tci);
1289
1290                         if (adapter->netdev->features & NETIF_F_LRO)
1291                                 netif_receive_skb(skb);
1292                         else
1293                                 napi_gro_receive(&rq->napi, skb);
1294
1295                         ctx->skb = NULL;
1296                 }
1297
1298 rcd_done:
1299                 /* device may have skipped some rx descs */
1300                 ring->next2comp = idx;
1301                 num_to_alloc = vmxnet3_cmd_ring_desc_avail(ring);
1302                 ring = rq->rx_ring + ring_idx;
1303                 while (num_to_alloc) {
1304                         vmxnet3_getRxDesc(rxd, &ring->base[ring->next2fill].rxd,
1305                                           &rxCmdDesc);
1306                         BUG_ON(!rxd->addr);
1307
1308                         /* Recv desc is ready to be used by the device */
1309                         rxd->gen = ring->gen;
1310                         vmxnet3_cmd_ring_adv_next2fill(ring);
1311                         num_to_alloc--;
1312                 }
1313
1314                 /* if needed, update the register */
1315                 if (unlikely(rq->shared->updateRxProd)) {
1316                         VMXNET3_WRITE_BAR0_REG(adapter,
1317                                 rxprod_reg[ring_idx] + rq->qid * 8,
1318                                 ring->next2fill);
1319                         rq->uncommitted[ring_idx] = 0;
1320                 }
1321
1322                 vmxnet3_comp_ring_adv_next2proc(&rq->comp_ring);
1323                 vmxnet3_getRxComp(rcd,
1324                      &rq->comp_ring.base[rq->comp_ring.next2proc].rcd, &rxComp);
1325         }
1326
1327         return num_rxd;
1328 }
1329
1330
1331 static void
1332 vmxnet3_rq_cleanup(struct vmxnet3_rx_queue *rq,
1333                    struct vmxnet3_adapter *adapter)
1334 {
1335         u32 i, ring_idx;
1336         struct Vmxnet3_RxDesc *rxd;
1337
1338         for (ring_idx = 0; ring_idx < 2; ring_idx++) {
1339                 for (i = 0; i < rq->rx_ring[ring_idx].size; i++) {
1340 #ifdef __BIG_ENDIAN_BITFIELD
1341                         struct Vmxnet3_RxDesc rxDesc;
1342 #endif
1343                         vmxnet3_getRxDesc(rxd,
1344                                 &rq->rx_ring[ring_idx].base[i].rxd, &rxDesc);
1345
1346                         if (rxd->btype == VMXNET3_RXD_BTYPE_HEAD &&
1347                                         rq->buf_info[ring_idx][i].skb) {
1348                                 pci_unmap_single(adapter->pdev, rxd->addr,
1349                                                  rxd->len, PCI_DMA_FROMDEVICE);
1350                                 dev_kfree_skb(rq->buf_info[ring_idx][i].skb);
1351                                 rq->buf_info[ring_idx][i].skb = NULL;
1352                         } else if (rxd->btype == VMXNET3_RXD_BTYPE_BODY &&
1353                                         rq->buf_info[ring_idx][i].page) {
1354                                 pci_unmap_page(adapter->pdev, rxd->addr,
1355                                                rxd->len, PCI_DMA_FROMDEVICE);
1356                                 put_page(rq->buf_info[ring_idx][i].page);
1357                                 rq->buf_info[ring_idx][i].page = NULL;
1358                         }
1359                 }
1360
1361                 rq->rx_ring[ring_idx].gen = VMXNET3_INIT_GEN;
1362                 rq->rx_ring[ring_idx].next2fill =
1363                                         rq->rx_ring[ring_idx].next2comp = 0;
1364                 rq->uncommitted[ring_idx] = 0;
1365         }
1366
1367         rq->comp_ring.gen = VMXNET3_INIT_GEN;
1368         rq->comp_ring.next2proc = 0;
1369 }
1370
1371
1372 static void
1373 vmxnet3_rq_cleanup_all(struct vmxnet3_adapter *adapter)
1374 {
1375         int i;
1376
1377         for (i = 0; i < adapter->num_rx_queues; i++)
1378                 vmxnet3_rq_cleanup(&adapter->rx_queue[i], adapter);
1379 }
1380
1381
1382 void vmxnet3_rq_destroy(struct vmxnet3_rx_queue *rq,
1383                         struct vmxnet3_adapter *adapter)
1384 {
1385         int i;
1386         int j;
1387
1388         /* all rx buffers must have already been freed */
1389         for (i = 0; i < 2; i++) {
1390                 if (rq->buf_info[i]) {
1391                         for (j = 0; j < rq->rx_ring[i].size; j++)
1392                                 BUG_ON(rq->buf_info[i][j].page != NULL);
1393                 }
1394         }
1395
1396
1397         kfree(rq->buf_info[0]);
1398
1399         for (i = 0; i < 2; i++) {
1400                 if (rq->rx_ring[i].base) {
1401                         pci_free_consistent(adapter->pdev, rq->rx_ring[i].size
1402                                             * sizeof(struct Vmxnet3_RxDesc),
1403                                             rq->rx_ring[i].base,
1404                                             rq->rx_ring[i].basePA);
1405                         rq->rx_ring[i].base = NULL;
1406                 }
1407                 rq->buf_info[i] = NULL;
1408         }
1409
1410         if (rq->comp_ring.base) {
1411                 pci_free_consistent(adapter->pdev, rq->comp_ring.size *
1412                                     sizeof(struct Vmxnet3_RxCompDesc),
1413                                     rq->comp_ring.base, rq->comp_ring.basePA);
1414                 rq->comp_ring.base = NULL;
1415         }
1416 }
1417
1418
1419 static int
1420 vmxnet3_rq_init(struct vmxnet3_rx_queue *rq,
1421                 struct vmxnet3_adapter  *adapter)
1422 {
1423         int i;
1424
1425         /* initialize buf_info */
1426         for (i = 0; i < rq->rx_ring[0].size; i++) {
1427
1428                 /* 1st buf for a pkt is skbuff */
1429                 if (i % adapter->rx_buf_per_pkt == 0) {
1430                         rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_SKB;
1431                         rq->buf_info[0][i].len = adapter->skb_buf_size;
1432                 } else { /* subsequent bufs for a pkt is frag */
1433                         rq->buf_info[0][i].buf_type = VMXNET3_RX_BUF_PAGE;
1434                         rq->buf_info[0][i].len = PAGE_SIZE;
1435                 }
1436         }
1437         for (i = 0; i < rq->rx_ring[1].size; i++) {
1438                 rq->buf_info[1][i].buf_type = VMXNET3_RX_BUF_PAGE;
1439                 rq->buf_info[1][i].len = PAGE_SIZE;
1440         }
1441
1442         /* reset internal state and allocate buffers for both rings */
1443         for (i = 0; i < 2; i++) {
1444                 rq->rx_ring[i].next2fill = rq->rx_ring[i].next2comp = 0;
1445                 rq->uncommitted[i] = 0;
1446
1447                 memset(rq->rx_ring[i].base, 0, rq->rx_ring[i].size *
1448                        sizeof(struct Vmxnet3_RxDesc));
1449                 rq->rx_ring[i].gen = VMXNET3_INIT_GEN;
1450         }
1451         if (vmxnet3_rq_alloc_rx_buf(rq, 0, rq->rx_ring[0].size - 1,
1452                                     adapter) == 0) {
1453                 /* at least has 1 rx buffer for the 1st ring */
1454                 return -ENOMEM;
1455         }
1456         vmxnet3_rq_alloc_rx_buf(rq, 1, rq->rx_ring[1].size - 1, adapter);
1457
1458         /* reset the comp ring */
1459         rq->comp_ring.next2proc = 0;
1460         memset(rq->comp_ring.base, 0, rq->comp_ring.size *
1461                sizeof(struct Vmxnet3_RxCompDesc));
1462         rq->comp_ring.gen = VMXNET3_INIT_GEN;
1463
1464         /* reset rxctx */
1465         rq->rx_ctx.skb = NULL;
1466
1467         /* stats are not reset */
1468         return 0;
1469 }
1470
1471
1472 static int
1473 vmxnet3_rq_init_all(struct vmxnet3_adapter *adapter)
1474 {
1475         int i, err = 0;
1476
1477         for (i = 0; i < adapter->num_rx_queues; i++) {
1478                 err = vmxnet3_rq_init(&adapter->rx_queue[i], adapter);
1479                 if (unlikely(err)) {
1480                         dev_err(&adapter->netdev->dev, "%s: failed to "
1481                                 "initialize rx queue%i\n",
1482                                 adapter->netdev->name, i);
1483                         break;
1484                 }
1485         }
1486         return err;
1487
1488 }
1489
1490
1491 static int
1492 vmxnet3_rq_create(struct vmxnet3_rx_queue *rq, struct vmxnet3_adapter *adapter)
1493 {
1494         int i;
1495         size_t sz;
1496         struct vmxnet3_rx_buf_info *bi;
1497
1498         for (i = 0; i < 2; i++) {
1499
1500                 sz = rq->rx_ring[i].size * sizeof(struct Vmxnet3_RxDesc);
1501                 rq->rx_ring[i].base = pci_alloc_consistent(adapter->pdev, sz,
1502                                                         &rq->rx_ring[i].basePA);
1503                 if (!rq->rx_ring[i].base) {
1504                         printk(KERN_ERR "%s: failed to allocate rx ring %d\n",
1505                                adapter->netdev->name, i);
1506                         goto err;
1507                 }
1508         }
1509
1510         sz = rq->comp_ring.size * sizeof(struct Vmxnet3_RxCompDesc);
1511         rq->comp_ring.base = pci_alloc_consistent(adapter->pdev, sz,
1512                                                   &rq->comp_ring.basePA);
1513         if (!rq->comp_ring.base) {
1514                 printk(KERN_ERR "%s: failed to allocate rx comp ring\n",
1515                        adapter->netdev->name);
1516                 goto err;
1517         }
1518
1519         sz = sizeof(struct vmxnet3_rx_buf_info) * (rq->rx_ring[0].size +
1520                                                    rq->rx_ring[1].size);
1521         bi = kzalloc(sz, GFP_KERNEL);
1522         if (!bi) {
1523                 printk(KERN_ERR "%s: failed to allocate rx bufinfo\n",
1524                        adapter->netdev->name);
1525                 goto err;
1526         }
1527         rq->buf_info[0] = bi;
1528         rq->buf_info[1] = bi + rq->rx_ring[0].size;
1529
1530         return 0;
1531
1532 err:
1533         vmxnet3_rq_destroy(rq, adapter);
1534         return -ENOMEM;
1535 }
1536
1537
1538 static int
1539 vmxnet3_rq_create_all(struct vmxnet3_adapter *adapter)
1540 {
1541         int i, err = 0;
1542
1543         for (i = 0; i < adapter->num_rx_queues; i++) {
1544                 err = vmxnet3_rq_create(&adapter->rx_queue[i], adapter);
1545                 if (unlikely(err)) {
1546                         dev_err(&adapter->netdev->dev,
1547                                 "%s: failed to create rx queue%i\n",
1548                                 adapter->netdev->name, i);
1549                         goto err_out;
1550                 }
1551         }
1552         return err;
1553 err_out:
1554         vmxnet3_rq_destroy_all(adapter);
1555         return err;
1556
1557 }
1558
1559 /* Multiple queue aware polling function for tx and rx */
1560
1561 static int
1562 vmxnet3_do_poll(struct vmxnet3_adapter *adapter, int budget)
1563 {
1564         int rcd_done = 0, i;
1565         if (unlikely(adapter->shared->ecr))
1566                 vmxnet3_process_events(adapter);
1567         for (i = 0; i < adapter->num_tx_queues; i++)
1568                 vmxnet3_tq_tx_complete(&adapter->tx_queue[i], adapter);
1569
1570         for (i = 0; i < adapter->num_rx_queues; i++)
1571                 rcd_done += vmxnet3_rq_rx_complete(&adapter->rx_queue[i],
1572                                                    adapter, budget);
1573         return rcd_done;
1574 }
1575
1576
1577 static int
1578 vmxnet3_poll(struct napi_struct *napi, int budget)
1579 {
1580         struct vmxnet3_rx_queue *rx_queue = container_of(napi,
1581                                           struct vmxnet3_rx_queue, napi);
1582         int rxd_done;
1583
1584         rxd_done = vmxnet3_do_poll(rx_queue->adapter, budget);
1585
1586         if (rxd_done < budget) {
1587                 napi_complete(napi);
1588                 vmxnet3_enable_all_intrs(rx_queue->adapter);
1589         }
1590         return rxd_done;
1591 }
1592
1593 /*
1594  * NAPI polling function for MSI-X mode with multiple Rx queues
1595  * Returns the # of the NAPI credit consumed (# of rx descriptors processed)
1596  */
1597
1598 static int
1599 vmxnet3_poll_rx_only(struct napi_struct *napi, int budget)
1600 {
1601         struct vmxnet3_rx_queue *rq = container_of(napi,
1602                                                 struct vmxnet3_rx_queue, napi);
1603         struct vmxnet3_adapter *adapter = rq->adapter;
1604         int rxd_done;
1605
1606         /* When sharing interrupt with corresponding tx queue, process
1607          * tx completions in that queue as well
1608          */
1609         if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE) {
1610                 struct vmxnet3_tx_queue *tq =
1611                                 &adapter->tx_queue[rq - adapter->rx_queue];
1612                 vmxnet3_tq_tx_complete(tq, adapter);
1613         }
1614
1615         rxd_done = vmxnet3_rq_rx_complete(rq, adapter, budget);
1616
1617         if (rxd_done < budget) {
1618                 napi_complete(napi);
1619                 vmxnet3_enable_intr(adapter, rq->comp_ring.intr_idx);
1620         }
1621         return rxd_done;
1622 }
1623
1624
1625 #ifdef CONFIG_PCI_MSI
1626
1627 /*
1628  * Handle completion interrupts on tx queues
1629  * Returns whether or not the intr is handled
1630  */
1631
1632 static irqreturn_t
1633 vmxnet3_msix_tx(int irq, void *data)
1634 {
1635         struct vmxnet3_tx_queue *tq = data;
1636         struct vmxnet3_adapter *adapter = tq->adapter;
1637
1638         if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1639                 vmxnet3_disable_intr(adapter, tq->comp_ring.intr_idx);
1640
1641         /* Handle the case where only one irq is allocate for all tx queues */
1642         if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1643                 int i;
1644                 for (i = 0; i < adapter->num_tx_queues; i++) {
1645                         struct vmxnet3_tx_queue *txq = &adapter->tx_queue[i];
1646                         vmxnet3_tq_tx_complete(txq, adapter);
1647                 }
1648         } else {
1649                 vmxnet3_tq_tx_complete(tq, adapter);
1650         }
1651         vmxnet3_enable_intr(adapter, tq->comp_ring.intr_idx);
1652
1653         return IRQ_HANDLED;
1654 }
1655
1656
1657 /*
1658  * Handle completion interrupts on rx queues. Returns whether or not the
1659  * intr is handled
1660  */
1661
1662 static irqreturn_t
1663 vmxnet3_msix_rx(int irq, void *data)
1664 {
1665         struct vmxnet3_rx_queue *rq = data;
1666         struct vmxnet3_adapter *adapter = rq->adapter;
1667
1668         /* disable intr if needed */
1669         if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1670                 vmxnet3_disable_intr(adapter, rq->comp_ring.intr_idx);
1671         napi_schedule(&rq->napi);
1672
1673         return IRQ_HANDLED;
1674 }
1675
1676 /*
1677  *----------------------------------------------------------------------------
1678  *
1679  * vmxnet3_msix_event --
1680  *
1681  *    vmxnet3 msix event intr handler
1682  *
1683  * Result:
1684  *    whether or not the intr is handled
1685  *
1686  *----------------------------------------------------------------------------
1687  */
1688
1689 static irqreturn_t
1690 vmxnet3_msix_event(int irq, void *data)
1691 {
1692         struct net_device *dev = data;
1693         struct vmxnet3_adapter *adapter = netdev_priv(dev);
1694
1695         /* disable intr if needed */
1696         if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1697                 vmxnet3_disable_intr(adapter, adapter->intr.event_intr_idx);
1698
1699         if (adapter->shared->ecr)
1700                 vmxnet3_process_events(adapter);
1701
1702         vmxnet3_enable_intr(adapter, adapter->intr.event_intr_idx);
1703
1704         return IRQ_HANDLED;
1705 }
1706
1707 #endif /* CONFIG_PCI_MSI  */
1708
1709
1710 /* Interrupt handler for vmxnet3  */
1711 static irqreturn_t
1712 vmxnet3_intr(int irq, void *dev_id)
1713 {
1714         struct net_device *dev = dev_id;
1715         struct vmxnet3_adapter *adapter = netdev_priv(dev);
1716
1717         if (adapter->intr.type == VMXNET3_IT_INTX) {
1718                 u32 icr = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_ICR);
1719                 if (unlikely(icr == 0))
1720                         /* not ours */
1721                         return IRQ_NONE;
1722         }
1723
1724
1725         /* disable intr if needed */
1726         if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1727                 vmxnet3_disable_all_intrs(adapter);
1728
1729         napi_schedule(&adapter->rx_queue[0].napi);
1730
1731         return IRQ_HANDLED;
1732 }
1733
1734 #ifdef CONFIG_NET_POLL_CONTROLLER
1735
1736 /* netpoll callback. */
1737 static void
1738 vmxnet3_netpoll(struct net_device *netdev)
1739 {
1740         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1741
1742         if (adapter->intr.mask_mode == VMXNET3_IMM_ACTIVE)
1743                 vmxnet3_disable_all_intrs(adapter);
1744
1745         vmxnet3_do_poll(adapter, adapter->rx_queue[0].rx_ring[0].size);
1746         vmxnet3_enable_all_intrs(adapter);
1747
1748 }
1749 #endif  /* CONFIG_NET_POLL_CONTROLLER */
1750
1751 static int
1752 vmxnet3_request_irqs(struct vmxnet3_adapter *adapter)
1753 {
1754         struct vmxnet3_intr *intr = &adapter->intr;
1755         int err = 0, i;
1756         int vector = 0;
1757
1758 #ifdef CONFIG_PCI_MSI
1759         if (adapter->intr.type == VMXNET3_IT_MSIX) {
1760                 for (i = 0; i < adapter->num_tx_queues; i++) {
1761                         if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1762                                 sprintf(adapter->tx_queue[i].name, "%s-tx-%d",
1763                                         adapter->netdev->name, vector);
1764                                 err = request_irq(
1765                                               intr->msix_entries[vector].vector,
1766                                               vmxnet3_msix_tx, 0,
1767                                               adapter->tx_queue[i].name,
1768                                               &adapter->tx_queue[i]);
1769                         } else {
1770                                 sprintf(adapter->tx_queue[i].name, "%s-rxtx-%d",
1771                                         adapter->netdev->name, vector);
1772                         }
1773                         if (err) {
1774                                 dev_err(&adapter->netdev->dev,
1775                                         "Failed to request irq for MSIX, %s, "
1776                                         "error %d\n",
1777                                         adapter->tx_queue[i].name, err);
1778                                 return err;
1779                         }
1780
1781                         /* Handle the case where only 1 MSIx was allocated for
1782                          * all tx queues */
1783                         if (adapter->share_intr == VMXNET3_INTR_TXSHARE) {
1784                                 for (; i < adapter->num_tx_queues; i++)
1785                                         adapter->tx_queue[i].comp_ring.intr_idx
1786                                                                 = vector;
1787                                 vector++;
1788                                 break;
1789                         } else {
1790                                 adapter->tx_queue[i].comp_ring.intr_idx
1791                                                                 = vector++;
1792                         }
1793                 }
1794                 if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE)
1795                         vector = 0;
1796
1797                 for (i = 0; i < adapter->num_rx_queues; i++) {
1798                         if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE)
1799                                 sprintf(adapter->rx_queue[i].name, "%s-rx-%d",
1800                                         adapter->netdev->name, vector);
1801                         else
1802                                 sprintf(adapter->rx_queue[i].name, "%s-rxtx-%d",
1803                                         adapter->netdev->name, vector);
1804                         err = request_irq(intr->msix_entries[vector].vector,
1805                                           vmxnet3_msix_rx, 0,
1806                                           adapter->rx_queue[i].name,
1807                                           &(adapter->rx_queue[i]));
1808                         if (err) {
1809                                 printk(KERN_ERR "Failed to request irq for MSIX"
1810                                        ", %s, error %d\n",
1811                                        adapter->rx_queue[i].name, err);
1812                                 return err;
1813                         }
1814
1815                         adapter->rx_queue[i].comp_ring.intr_idx = vector++;
1816                 }
1817
1818                 sprintf(intr->event_msi_vector_name, "%s-event-%d",
1819                         adapter->netdev->name, vector);
1820                 err = request_irq(intr->msix_entries[vector].vector,
1821                                   vmxnet3_msix_event, 0,
1822                                   intr->event_msi_vector_name, adapter->netdev);
1823                 intr->event_intr_idx = vector;
1824
1825         } else if (intr->type == VMXNET3_IT_MSI) {
1826                 adapter->num_rx_queues = 1;
1827                 err = request_irq(adapter->pdev->irq, vmxnet3_intr, 0,
1828                                   adapter->netdev->name, adapter->netdev);
1829         } else {
1830 #endif
1831                 adapter->num_rx_queues = 1;
1832                 err = request_irq(adapter->pdev->irq, vmxnet3_intr,
1833                                   IRQF_SHARED, adapter->netdev->name,
1834                                   adapter->netdev);
1835 #ifdef CONFIG_PCI_MSI
1836         }
1837 #endif
1838         intr->num_intrs = vector + 1;
1839         if (err) {
1840                 printk(KERN_ERR "Failed to request irq %s (intr type:%d), error"
1841                        ":%d\n", adapter->netdev->name, intr->type, err);
1842         } else {
1843                 /* Number of rx queues will not change after this */
1844                 for (i = 0; i < adapter->num_rx_queues; i++) {
1845                         struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
1846                         rq->qid = i;
1847                         rq->qid2 = i + adapter->num_rx_queues;
1848                 }
1849
1850
1851
1852                 /* init our intr settings */
1853                 for (i = 0; i < intr->num_intrs; i++)
1854                         intr->mod_levels[i] = UPT1_IML_ADAPTIVE;
1855                 if (adapter->intr.type != VMXNET3_IT_MSIX) {
1856                         adapter->intr.event_intr_idx = 0;
1857                         for (i = 0; i < adapter->num_tx_queues; i++)
1858                                 adapter->tx_queue[i].comp_ring.intr_idx = 0;
1859                         adapter->rx_queue[0].comp_ring.intr_idx = 0;
1860                 }
1861
1862                 printk(KERN_INFO "%s: intr type %u, mode %u, %u vectors "
1863                        "allocated\n", adapter->netdev->name, intr->type,
1864                        intr->mask_mode, intr->num_intrs);
1865         }
1866
1867         return err;
1868 }
1869
1870
1871 static void
1872 vmxnet3_free_irqs(struct vmxnet3_adapter *adapter)
1873 {
1874         struct vmxnet3_intr *intr = &adapter->intr;
1875         BUG_ON(intr->type == VMXNET3_IT_AUTO || intr->num_intrs <= 0);
1876
1877         switch (intr->type) {
1878 #ifdef CONFIG_PCI_MSI
1879         case VMXNET3_IT_MSIX:
1880         {
1881                 int i, vector = 0;
1882
1883                 if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE) {
1884                         for (i = 0; i < adapter->num_tx_queues; i++) {
1885                                 free_irq(intr->msix_entries[vector++].vector,
1886                                          &(adapter->tx_queue[i]));
1887                                 if (adapter->share_intr == VMXNET3_INTR_TXSHARE)
1888                                         break;
1889                         }
1890                 }
1891
1892                 for (i = 0; i < adapter->num_rx_queues; i++) {
1893                         free_irq(intr->msix_entries[vector++].vector,
1894                                  &(adapter->rx_queue[i]));
1895                 }
1896
1897                 free_irq(intr->msix_entries[vector].vector,
1898                          adapter->netdev);
1899                 BUG_ON(vector >= intr->num_intrs);
1900                 break;
1901         }
1902 #endif
1903         case VMXNET3_IT_MSI:
1904                 free_irq(adapter->pdev->irq, adapter->netdev);
1905                 break;
1906         case VMXNET3_IT_INTX:
1907                 free_irq(adapter->pdev->irq, adapter->netdev);
1908                 break;
1909         default:
1910                 BUG_ON(true);
1911         }
1912 }
1913
1914
1915 static void
1916 vmxnet3_restore_vlan(struct vmxnet3_adapter *adapter)
1917 {
1918         u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1919         u16 vid;
1920
1921         /* allow untagged pkts */
1922         VMXNET3_SET_VFTABLE_ENTRY(vfTable, 0);
1923
1924         for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
1925                 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1926 }
1927
1928
1929 static void
1930 vmxnet3_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
1931 {
1932         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1933
1934         if (!(netdev->flags & IFF_PROMISC)) {
1935                 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1936                 unsigned long flags;
1937
1938                 VMXNET3_SET_VFTABLE_ENTRY(vfTable, vid);
1939                 spin_lock_irqsave(&adapter->cmd_lock, flags);
1940                 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1941                                        VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1942                 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1943         }
1944
1945         set_bit(vid, adapter->active_vlans);
1946 }
1947
1948
1949 static void
1950 vmxnet3_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
1951 {
1952         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1953
1954         if (!(netdev->flags & IFF_PROMISC)) {
1955                 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
1956                 unsigned long flags;
1957
1958                 VMXNET3_CLEAR_VFTABLE_ENTRY(vfTable, vid);
1959                 spin_lock_irqsave(&adapter->cmd_lock, flags);
1960                 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
1961                                        VMXNET3_CMD_UPDATE_VLAN_FILTERS);
1962                 spin_unlock_irqrestore(&adapter->cmd_lock, flags);
1963         }
1964
1965         clear_bit(vid, adapter->active_vlans);
1966 }
1967
1968
1969 static u8 *
1970 vmxnet3_copy_mc(struct net_device *netdev)
1971 {
1972         u8 *buf = NULL;
1973         u32 sz = netdev_mc_count(netdev) * ETH_ALEN;
1974
1975         /* struct Vmxnet3_RxFilterConf.mfTableLen is u16. */
1976         if (sz <= 0xffff) {
1977                 /* We may be called with BH disabled */
1978                 buf = kmalloc(sz, GFP_ATOMIC);
1979                 if (buf) {
1980                         struct netdev_hw_addr *ha;
1981                         int i = 0;
1982
1983                         netdev_for_each_mc_addr(ha, netdev)
1984                                 memcpy(buf + i++ * ETH_ALEN, ha->addr,
1985                                        ETH_ALEN);
1986                 }
1987         }
1988         return buf;
1989 }
1990
1991
1992 static void
1993 vmxnet3_set_mc(struct net_device *netdev)
1994 {
1995         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
1996         unsigned long flags;
1997         struct Vmxnet3_RxFilterConf *rxConf =
1998                                         &adapter->shared->devRead.rxFilterConf;
1999         u8 *new_table = NULL;
2000         u32 new_mode = VMXNET3_RXM_UCAST;
2001
2002         if (netdev->flags & IFF_PROMISC) {
2003                 u32 *vfTable = adapter->shared->devRead.rxFilterConf.vfTable;
2004                 memset(vfTable, 0, VMXNET3_VFT_SIZE * sizeof(*vfTable));
2005
2006                 new_mode |= VMXNET3_RXM_PROMISC;
2007         } else {
2008                 vmxnet3_restore_vlan(adapter);
2009         }
2010
2011         if (netdev->flags & IFF_BROADCAST)
2012                 new_mode |= VMXNET3_RXM_BCAST;
2013
2014         if (netdev->flags & IFF_ALLMULTI)
2015                 new_mode |= VMXNET3_RXM_ALL_MULTI;
2016         else
2017                 if (!netdev_mc_empty(netdev)) {
2018                         new_table = vmxnet3_copy_mc(netdev);
2019                         if (new_table) {
2020                                 new_mode |= VMXNET3_RXM_MCAST;
2021                                 rxConf->mfTableLen = cpu_to_le16(
2022                                         netdev_mc_count(netdev) * ETH_ALEN);
2023                                 rxConf->mfTablePA = cpu_to_le64(virt_to_phys(
2024                                                     new_table));
2025                         } else {
2026                                 printk(KERN_INFO "%s: failed to copy mcast list"
2027                                        ", setting ALL_MULTI\n", netdev->name);
2028                                 new_mode |= VMXNET3_RXM_ALL_MULTI;
2029                         }
2030                 }
2031
2032
2033         if (!(new_mode & VMXNET3_RXM_MCAST)) {
2034                 rxConf->mfTableLen = 0;
2035                 rxConf->mfTablePA = 0;
2036         }
2037
2038         spin_lock_irqsave(&adapter->cmd_lock, flags);
2039         if (new_mode != rxConf->rxMode) {
2040                 rxConf->rxMode = cpu_to_le32(new_mode);
2041                 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2042                                        VMXNET3_CMD_UPDATE_RX_MODE);
2043                 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2044                                        VMXNET3_CMD_UPDATE_VLAN_FILTERS);
2045         }
2046
2047         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2048                                VMXNET3_CMD_UPDATE_MAC_FILTERS);
2049         spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2050
2051         kfree(new_table);
2052 }
2053
2054 void
2055 vmxnet3_rq_destroy_all(struct vmxnet3_adapter *adapter)
2056 {
2057         int i;
2058
2059         for (i = 0; i < adapter->num_rx_queues; i++)
2060                 vmxnet3_rq_destroy(&adapter->rx_queue[i], adapter);
2061 }
2062
2063
2064 /*
2065  *   Set up driver_shared based on settings in adapter.
2066  */
2067
2068 static void
2069 vmxnet3_setup_driver_shared(struct vmxnet3_adapter *adapter)
2070 {
2071         struct Vmxnet3_DriverShared *shared = adapter->shared;
2072         struct Vmxnet3_DSDevRead *devRead = &shared->devRead;
2073         struct Vmxnet3_TxQueueConf *tqc;
2074         struct Vmxnet3_RxQueueConf *rqc;
2075         int i;
2076
2077         memset(shared, 0, sizeof(*shared));
2078
2079         /* driver settings */
2080         shared->magic = cpu_to_le32(VMXNET3_REV1_MAGIC);
2081         devRead->misc.driverInfo.version = cpu_to_le32(
2082                                                 VMXNET3_DRIVER_VERSION_NUM);
2083         devRead->misc.driverInfo.gos.gosBits = (sizeof(void *) == 4 ?
2084                                 VMXNET3_GOS_BITS_32 : VMXNET3_GOS_BITS_64);
2085         devRead->misc.driverInfo.gos.gosType = VMXNET3_GOS_TYPE_LINUX;
2086         *((u32 *)&devRead->misc.driverInfo.gos) = cpu_to_le32(
2087                                 *((u32 *)&devRead->misc.driverInfo.gos));
2088         devRead->misc.driverInfo.vmxnet3RevSpt = cpu_to_le32(1);
2089         devRead->misc.driverInfo.uptVerSpt = cpu_to_le32(1);
2090
2091         devRead->misc.ddPA = cpu_to_le64(virt_to_phys(adapter));
2092         devRead->misc.ddLen = cpu_to_le32(sizeof(struct vmxnet3_adapter));
2093
2094         /* set up feature flags */
2095         if (adapter->netdev->features & NETIF_F_RXCSUM)
2096                 devRead->misc.uptFeatures |= UPT1_F_RXCSUM;
2097
2098         if (adapter->netdev->features & NETIF_F_LRO) {
2099                 devRead->misc.uptFeatures |= UPT1_F_LRO;
2100                 devRead->misc.maxNumRxSG = cpu_to_le16(1 + MAX_SKB_FRAGS);
2101         }
2102         if (adapter->netdev->features & NETIF_F_HW_VLAN_RX)
2103                 devRead->misc.uptFeatures |= UPT1_F_RXVLAN;
2104
2105         devRead->misc.mtu = cpu_to_le32(adapter->netdev->mtu);
2106         devRead->misc.queueDescPA = cpu_to_le64(adapter->queue_desc_pa);
2107         devRead->misc.queueDescLen = cpu_to_le32(
2108                 adapter->num_tx_queues * sizeof(struct Vmxnet3_TxQueueDesc) +
2109                 adapter->num_rx_queues * sizeof(struct Vmxnet3_RxQueueDesc));
2110
2111         /* tx queue settings */
2112         devRead->misc.numTxQueues =  adapter->num_tx_queues;
2113         for (i = 0; i < adapter->num_tx_queues; i++) {
2114                 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2115                 BUG_ON(adapter->tx_queue[i].tx_ring.base == NULL);
2116                 tqc = &adapter->tqd_start[i].conf;
2117                 tqc->txRingBasePA   = cpu_to_le64(tq->tx_ring.basePA);
2118                 tqc->dataRingBasePA = cpu_to_le64(tq->data_ring.basePA);
2119                 tqc->compRingBasePA = cpu_to_le64(tq->comp_ring.basePA);
2120                 tqc->ddPA           = cpu_to_le64(virt_to_phys(tq->buf_info));
2121                 tqc->txRingSize     = cpu_to_le32(tq->tx_ring.size);
2122                 tqc->dataRingSize   = cpu_to_le32(tq->data_ring.size);
2123                 tqc->compRingSize   = cpu_to_le32(tq->comp_ring.size);
2124                 tqc->ddLen          = cpu_to_le32(
2125                                         sizeof(struct vmxnet3_tx_buf_info) *
2126                                         tqc->txRingSize);
2127                 tqc->intrIdx        = tq->comp_ring.intr_idx;
2128         }
2129
2130         /* rx queue settings */
2131         devRead->misc.numRxQueues = adapter->num_rx_queues;
2132         for (i = 0; i < adapter->num_rx_queues; i++) {
2133                 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2134                 rqc = &adapter->rqd_start[i].conf;
2135                 rqc->rxRingBasePA[0] = cpu_to_le64(rq->rx_ring[0].basePA);
2136                 rqc->rxRingBasePA[1] = cpu_to_le64(rq->rx_ring[1].basePA);
2137                 rqc->compRingBasePA  = cpu_to_le64(rq->comp_ring.basePA);
2138                 rqc->ddPA            = cpu_to_le64(virt_to_phys(
2139                                                         rq->buf_info));
2140                 rqc->rxRingSize[0]   = cpu_to_le32(rq->rx_ring[0].size);
2141                 rqc->rxRingSize[1]   = cpu_to_le32(rq->rx_ring[1].size);
2142                 rqc->compRingSize    = cpu_to_le32(rq->comp_ring.size);
2143                 rqc->ddLen           = cpu_to_le32(
2144                                         sizeof(struct vmxnet3_rx_buf_info) *
2145                                         (rqc->rxRingSize[0] +
2146                                          rqc->rxRingSize[1]));
2147                 rqc->intrIdx         = rq->comp_ring.intr_idx;
2148         }
2149
2150 #ifdef VMXNET3_RSS
2151         memset(adapter->rss_conf, 0, sizeof(*adapter->rss_conf));
2152
2153         if (adapter->rss) {
2154                 struct UPT1_RSSConf *rssConf = adapter->rss_conf;
2155                 devRead->misc.uptFeatures |= UPT1_F_RSS;
2156                 devRead->misc.numRxQueues = adapter->num_rx_queues;
2157                 rssConf->hashType = UPT1_RSS_HASH_TYPE_TCP_IPV4 |
2158                                     UPT1_RSS_HASH_TYPE_IPV4 |
2159                                     UPT1_RSS_HASH_TYPE_TCP_IPV6 |
2160                                     UPT1_RSS_HASH_TYPE_IPV6;
2161                 rssConf->hashFunc = UPT1_RSS_HASH_FUNC_TOEPLITZ;
2162                 rssConf->hashKeySize = UPT1_RSS_MAX_KEY_SIZE;
2163                 rssConf->indTableSize = VMXNET3_RSS_IND_TABLE_SIZE;
2164                 get_random_bytes(&rssConf->hashKey[0], rssConf->hashKeySize);
2165                 for (i = 0; i < rssConf->indTableSize; i++)
2166                         rssConf->indTable[i] = i % adapter->num_rx_queues;
2167
2168                 devRead->rssConfDesc.confVer = 1;
2169                 devRead->rssConfDesc.confLen = sizeof(*rssConf);
2170                 devRead->rssConfDesc.confPA  = virt_to_phys(rssConf);
2171         }
2172
2173 #endif /* VMXNET3_RSS */
2174
2175         /* intr settings */
2176         devRead->intrConf.autoMask = adapter->intr.mask_mode ==
2177                                      VMXNET3_IMM_AUTO;
2178         devRead->intrConf.numIntrs = adapter->intr.num_intrs;
2179         for (i = 0; i < adapter->intr.num_intrs; i++)
2180                 devRead->intrConf.modLevels[i] = adapter->intr.mod_levels[i];
2181
2182         devRead->intrConf.eventIntrIdx = adapter->intr.event_intr_idx;
2183         devRead->intrConf.intrCtrl |= cpu_to_le32(VMXNET3_IC_DISABLE_ALL);
2184
2185         /* rx filter settings */
2186         devRead->rxFilterConf.rxMode = 0;
2187         vmxnet3_restore_vlan(adapter);
2188         vmxnet3_write_mac_addr(adapter, adapter->netdev->dev_addr);
2189
2190         /* the rest are already zeroed */
2191 }
2192
2193
2194 int
2195 vmxnet3_activate_dev(struct vmxnet3_adapter *adapter)
2196 {
2197         int err, i;
2198         u32 ret;
2199         unsigned long flags;
2200
2201         dev_dbg(&adapter->netdev->dev, "%s: skb_buf_size %d, rx_buf_per_pkt %d,"
2202                 " ring sizes %u %u %u\n", adapter->netdev->name,
2203                 adapter->skb_buf_size, adapter->rx_buf_per_pkt,
2204                 adapter->tx_queue[0].tx_ring.size,
2205                 adapter->rx_queue[0].rx_ring[0].size,
2206                 adapter->rx_queue[0].rx_ring[1].size);
2207
2208         vmxnet3_tq_init_all(adapter);
2209         err = vmxnet3_rq_init_all(adapter);
2210         if (err) {
2211                 printk(KERN_ERR "Failed to init rx queue for %s: error %d\n",
2212                        adapter->netdev->name, err);
2213                 goto rq_err;
2214         }
2215
2216         err = vmxnet3_request_irqs(adapter);
2217         if (err) {
2218                 printk(KERN_ERR "Failed to setup irq for %s: error %d\n",
2219                        adapter->netdev->name, err);
2220                 goto irq_err;
2221         }
2222
2223         vmxnet3_setup_driver_shared(adapter);
2224
2225         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, VMXNET3_GET_ADDR_LO(
2226                                adapter->shared_pa));
2227         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, VMXNET3_GET_ADDR_HI(
2228                                adapter->shared_pa));
2229         spin_lock_irqsave(&adapter->cmd_lock, flags);
2230         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2231                                VMXNET3_CMD_ACTIVATE_DEV);
2232         ret = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2233         spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2234
2235         if (ret != 0) {
2236                 printk(KERN_ERR "Failed to activate dev %s: error %u\n",
2237                        adapter->netdev->name, ret);
2238                 err = -EINVAL;
2239                 goto activate_err;
2240         }
2241
2242         for (i = 0; i < adapter->num_rx_queues; i++) {
2243                 VMXNET3_WRITE_BAR0_REG(adapter,
2244                                 VMXNET3_REG_RXPROD + i * VMXNET3_REG_ALIGN,
2245                                 adapter->rx_queue[i].rx_ring[0].next2fill);
2246                 VMXNET3_WRITE_BAR0_REG(adapter, (VMXNET3_REG_RXPROD2 +
2247                                 (i * VMXNET3_REG_ALIGN)),
2248                                 adapter->rx_queue[i].rx_ring[1].next2fill);
2249         }
2250
2251         /* Apply the rx filter settins last. */
2252         vmxnet3_set_mc(adapter->netdev);
2253
2254         /*
2255          * Check link state when first activating device. It will start the
2256          * tx queue if the link is up.
2257          */
2258         vmxnet3_check_link(adapter, true);
2259         for (i = 0; i < adapter->num_rx_queues; i++)
2260                 napi_enable(&adapter->rx_queue[i].napi);
2261         vmxnet3_enable_all_intrs(adapter);
2262         clear_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
2263         return 0;
2264
2265 activate_err:
2266         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAL, 0);
2267         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_DSAH, 0);
2268         vmxnet3_free_irqs(adapter);
2269 irq_err:
2270 rq_err:
2271         /* free up buffers we allocated */
2272         vmxnet3_rq_cleanup_all(adapter);
2273         return err;
2274 }
2275
2276
2277 void
2278 vmxnet3_reset_dev(struct vmxnet3_adapter *adapter)
2279 {
2280         unsigned long flags;
2281         spin_lock_irqsave(&adapter->cmd_lock, flags);
2282         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD, VMXNET3_CMD_RESET_DEV);
2283         spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2284 }
2285
2286
2287 int
2288 vmxnet3_quiesce_dev(struct vmxnet3_adapter *adapter)
2289 {
2290         int i;
2291         unsigned long flags;
2292         if (test_and_set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state))
2293                 return 0;
2294
2295
2296         spin_lock_irqsave(&adapter->cmd_lock, flags);
2297         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2298                                VMXNET3_CMD_QUIESCE_DEV);
2299         spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2300         vmxnet3_disable_all_intrs(adapter);
2301
2302         for (i = 0; i < adapter->num_rx_queues; i++)
2303                 napi_disable(&adapter->rx_queue[i].napi);
2304         netif_tx_disable(adapter->netdev);
2305         adapter->link_speed = 0;
2306         netif_carrier_off(adapter->netdev);
2307
2308         vmxnet3_tq_cleanup_all(adapter);
2309         vmxnet3_rq_cleanup_all(adapter);
2310         vmxnet3_free_irqs(adapter);
2311         return 0;
2312 }
2313
2314
2315 static void
2316 vmxnet3_write_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2317 {
2318         u32 tmp;
2319
2320         tmp = *(u32 *)mac;
2321         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACL, tmp);
2322
2323         tmp = (mac[5] << 8) | mac[4];
2324         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_MACH, tmp);
2325 }
2326
2327
2328 static int
2329 vmxnet3_set_mac_addr(struct net_device *netdev, void *p)
2330 {
2331         struct sockaddr *addr = p;
2332         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2333
2334         memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2335         vmxnet3_write_mac_addr(adapter, addr->sa_data);
2336
2337         return 0;
2338 }
2339
2340
2341 /* ==================== initialization and cleanup routines ============ */
2342
2343 static int
2344 vmxnet3_alloc_pci_resources(struct vmxnet3_adapter *adapter, bool *dma64)
2345 {
2346         int err;
2347         unsigned long mmio_start, mmio_len;
2348         struct pci_dev *pdev = adapter->pdev;
2349
2350         err = pci_enable_device(pdev);
2351         if (err) {
2352                 printk(KERN_ERR "Failed to enable adapter %s: error %d\n",
2353                        pci_name(pdev), err);
2354                 return err;
2355         }
2356
2357         if (pci_set_dma_mask(pdev, DMA_BIT_MASK(64)) == 0) {
2358                 if (pci_set_consistent_dma_mask(pdev, DMA_BIT_MASK(64)) != 0) {
2359                         printk(KERN_ERR "pci_set_consistent_dma_mask failed "
2360                                "for adapter %s\n", pci_name(pdev));
2361                         err = -EIO;
2362                         goto err_set_mask;
2363                 }
2364                 *dma64 = true;
2365         } else {
2366                 if (pci_set_dma_mask(pdev, DMA_BIT_MASK(32)) != 0) {
2367                         printk(KERN_ERR "pci_set_dma_mask failed for adapter "
2368                                "%s\n",  pci_name(pdev));
2369                         err = -EIO;
2370                         goto err_set_mask;
2371                 }
2372                 *dma64 = false;
2373         }
2374
2375         err = pci_request_selected_regions(pdev, (1 << 2) - 1,
2376                                            vmxnet3_driver_name);
2377         if (err) {
2378                 printk(KERN_ERR "Failed to request region for adapter %s: "
2379                        "error %d\n", pci_name(pdev), err);
2380                 goto err_set_mask;
2381         }
2382
2383         pci_set_master(pdev);
2384
2385         mmio_start = pci_resource_start(pdev, 0);
2386         mmio_len = pci_resource_len(pdev, 0);
2387         adapter->hw_addr0 = ioremap(mmio_start, mmio_len);
2388         if (!adapter->hw_addr0) {
2389                 printk(KERN_ERR "Failed to map bar0 for adapter %s\n",
2390                        pci_name(pdev));
2391                 err = -EIO;
2392                 goto err_ioremap;
2393         }
2394
2395         mmio_start = pci_resource_start(pdev, 1);
2396         mmio_len = pci_resource_len(pdev, 1);
2397         adapter->hw_addr1 = ioremap(mmio_start, mmio_len);
2398         if (!adapter->hw_addr1) {
2399                 printk(KERN_ERR "Failed to map bar1 for adapter %s\n",
2400                        pci_name(pdev));
2401                 err = -EIO;
2402                 goto err_bar1;
2403         }
2404         return 0;
2405
2406 err_bar1:
2407         iounmap(adapter->hw_addr0);
2408 err_ioremap:
2409         pci_release_selected_regions(pdev, (1 << 2) - 1);
2410 err_set_mask:
2411         pci_disable_device(pdev);
2412         return err;
2413 }
2414
2415
2416 static void
2417 vmxnet3_free_pci_resources(struct vmxnet3_adapter *adapter)
2418 {
2419         BUG_ON(!adapter->pdev);
2420
2421         iounmap(adapter->hw_addr0);
2422         iounmap(adapter->hw_addr1);
2423         pci_release_selected_regions(adapter->pdev, (1 << 2) - 1);
2424         pci_disable_device(adapter->pdev);
2425 }
2426
2427
2428 static void
2429 vmxnet3_adjust_rx_ring_size(struct vmxnet3_adapter *adapter)
2430 {
2431         size_t sz, i, ring0_size, ring1_size, comp_size;
2432         struct vmxnet3_rx_queue *rq = &adapter->rx_queue[0];
2433
2434
2435         if (adapter->netdev->mtu <= VMXNET3_MAX_SKB_BUF_SIZE -
2436                                     VMXNET3_MAX_ETH_HDR_SIZE) {
2437                 adapter->skb_buf_size = adapter->netdev->mtu +
2438                                         VMXNET3_MAX_ETH_HDR_SIZE;
2439                 if (adapter->skb_buf_size < VMXNET3_MIN_T0_BUF_SIZE)
2440                         adapter->skb_buf_size = VMXNET3_MIN_T0_BUF_SIZE;
2441
2442                 adapter->rx_buf_per_pkt = 1;
2443         } else {
2444                 adapter->skb_buf_size = VMXNET3_MAX_SKB_BUF_SIZE;
2445                 sz = adapter->netdev->mtu - VMXNET3_MAX_SKB_BUF_SIZE +
2446                                             VMXNET3_MAX_ETH_HDR_SIZE;
2447                 adapter->rx_buf_per_pkt = 1 + (sz + PAGE_SIZE - 1) / PAGE_SIZE;
2448         }
2449
2450         /*
2451          * for simplicity, force the ring0 size to be a multiple of
2452          * rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN
2453          */
2454         sz = adapter->rx_buf_per_pkt * VMXNET3_RING_SIZE_ALIGN;
2455         ring0_size = adapter->rx_queue[0].rx_ring[0].size;
2456         ring0_size = (ring0_size + sz - 1) / sz * sz;
2457         ring0_size = min_t(u32, ring0_size, VMXNET3_RX_RING_MAX_SIZE /
2458                            sz * sz);
2459         ring1_size = adapter->rx_queue[0].rx_ring[1].size;
2460         comp_size = ring0_size + ring1_size;
2461
2462         for (i = 0; i < adapter->num_rx_queues; i++) {
2463                 rq = &adapter->rx_queue[i];
2464                 rq->rx_ring[0].size = ring0_size;
2465                 rq->rx_ring[1].size = ring1_size;
2466                 rq->comp_ring.size = comp_size;
2467         }
2468 }
2469
2470
2471 int
2472 vmxnet3_create_queues(struct vmxnet3_adapter *adapter, u32 tx_ring_size,
2473                       u32 rx_ring_size, u32 rx_ring2_size)
2474 {
2475         int err = 0, i;
2476
2477         for (i = 0; i < adapter->num_tx_queues; i++) {
2478                 struct vmxnet3_tx_queue *tq = &adapter->tx_queue[i];
2479                 tq->tx_ring.size   = tx_ring_size;
2480                 tq->data_ring.size = tx_ring_size;
2481                 tq->comp_ring.size = tx_ring_size;
2482                 tq->shared = &adapter->tqd_start[i].ctrl;
2483                 tq->stopped = true;
2484                 tq->adapter = adapter;
2485                 tq->qid = i;
2486                 err = vmxnet3_tq_create(tq, adapter);
2487                 /*
2488                  * Too late to change num_tx_queues. We cannot do away with
2489                  * lesser number of queues than what we asked for
2490                  */
2491                 if (err)
2492                         goto queue_err;
2493         }
2494
2495         adapter->rx_queue[0].rx_ring[0].size = rx_ring_size;
2496         adapter->rx_queue[0].rx_ring[1].size = rx_ring2_size;
2497         vmxnet3_adjust_rx_ring_size(adapter);
2498         for (i = 0; i < adapter->num_rx_queues; i++) {
2499                 struct vmxnet3_rx_queue *rq = &adapter->rx_queue[i];
2500                 /* qid and qid2 for rx queues will be assigned later when num
2501                  * of rx queues is finalized after allocating intrs */
2502                 rq->shared = &adapter->rqd_start[i].ctrl;
2503                 rq->adapter = adapter;
2504                 err = vmxnet3_rq_create(rq, adapter);
2505                 if (err) {
2506                         if (i == 0) {
2507                                 printk(KERN_ERR "Could not allocate any rx"
2508                                        "queues. Aborting.\n");
2509                                 goto queue_err;
2510                         } else {
2511                                 printk(KERN_INFO "Number of rx queues changed "
2512                                        "to : %d.\n", i);
2513                                 adapter->num_rx_queues = i;
2514                                 err = 0;
2515                                 break;
2516                         }
2517                 }
2518         }
2519         return err;
2520 queue_err:
2521         vmxnet3_tq_destroy_all(adapter);
2522         return err;
2523 }
2524
2525 static int
2526 vmxnet3_open(struct net_device *netdev)
2527 {
2528         struct vmxnet3_adapter *adapter;
2529         int err, i;
2530
2531         adapter = netdev_priv(netdev);
2532
2533         for (i = 0; i < adapter->num_tx_queues; i++)
2534                 spin_lock_init(&adapter->tx_queue[i].tx_lock);
2535
2536         err = vmxnet3_create_queues(adapter, VMXNET3_DEF_TX_RING_SIZE,
2537                                     VMXNET3_DEF_RX_RING_SIZE,
2538                                     VMXNET3_DEF_RX_RING_SIZE);
2539         if (err)
2540                 goto queue_err;
2541
2542         err = vmxnet3_activate_dev(adapter);
2543         if (err)
2544                 goto activate_err;
2545
2546         return 0;
2547
2548 activate_err:
2549         vmxnet3_rq_destroy_all(adapter);
2550         vmxnet3_tq_destroy_all(adapter);
2551 queue_err:
2552         return err;
2553 }
2554
2555
2556 static int
2557 vmxnet3_close(struct net_device *netdev)
2558 {
2559         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2560
2561         /*
2562          * Reset_work may be in the middle of resetting the device, wait for its
2563          * completion.
2564          */
2565         while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2566                 msleep(1);
2567
2568         vmxnet3_quiesce_dev(adapter);
2569
2570         vmxnet3_rq_destroy_all(adapter);
2571         vmxnet3_tq_destroy_all(adapter);
2572
2573         clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2574
2575
2576         return 0;
2577 }
2578
2579
2580 void
2581 vmxnet3_force_close(struct vmxnet3_adapter *adapter)
2582 {
2583         int i;
2584
2585         /*
2586          * we must clear VMXNET3_STATE_BIT_RESETTING, otherwise
2587          * vmxnet3_close() will deadlock.
2588          */
2589         BUG_ON(test_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state));
2590
2591         /* we need to enable NAPI, otherwise dev_close will deadlock */
2592         for (i = 0; i < adapter->num_rx_queues; i++)
2593                 napi_enable(&adapter->rx_queue[i].napi);
2594         dev_close(adapter->netdev);
2595 }
2596
2597
2598 static int
2599 vmxnet3_change_mtu(struct net_device *netdev, int new_mtu)
2600 {
2601         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2602         int err = 0;
2603
2604         if (new_mtu < VMXNET3_MIN_MTU || new_mtu > VMXNET3_MAX_MTU)
2605                 return -EINVAL;
2606
2607         netdev->mtu = new_mtu;
2608
2609         /*
2610          * Reset_work may be in the middle of resetting the device, wait for its
2611          * completion.
2612          */
2613         while (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2614                 msleep(1);
2615
2616         if (netif_running(netdev)) {
2617                 vmxnet3_quiesce_dev(adapter);
2618                 vmxnet3_reset_dev(adapter);
2619
2620                 /* we need to re-create the rx queue based on the new mtu */
2621                 vmxnet3_rq_destroy_all(adapter);
2622                 vmxnet3_adjust_rx_ring_size(adapter);
2623                 err = vmxnet3_rq_create_all(adapter);
2624                 if (err) {
2625                         printk(KERN_ERR "%s: failed to re-create rx queues,"
2626                                 " error %d. Closing it.\n", netdev->name, err);
2627                         goto out;
2628                 }
2629
2630                 err = vmxnet3_activate_dev(adapter);
2631                 if (err) {
2632                         printk(KERN_ERR "%s: failed to re-activate, error %d. "
2633                                 "Closing it\n", netdev->name, err);
2634                         goto out;
2635                 }
2636         }
2637
2638 out:
2639         clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2640         if (err)
2641                 vmxnet3_force_close(adapter);
2642
2643         return err;
2644 }
2645
2646
2647 static void
2648 vmxnet3_declare_features(struct vmxnet3_adapter *adapter, bool dma64)
2649 {
2650         struct net_device *netdev = adapter->netdev;
2651
2652         netdev->hw_features = NETIF_F_SG | NETIF_F_RXCSUM |
2653                 NETIF_F_HW_CSUM | NETIF_F_HW_VLAN_TX |
2654                 NETIF_F_HW_VLAN_RX | NETIF_F_TSO | NETIF_F_TSO6 |
2655                 NETIF_F_LRO;
2656         if (dma64)
2657                 netdev->hw_features |= NETIF_F_HIGHDMA;
2658         netdev->vlan_features = netdev->hw_features &
2659                                 ~(NETIF_F_HW_VLAN_TX | NETIF_F_HW_VLAN_RX);
2660         netdev->features = netdev->hw_features | NETIF_F_HW_VLAN_FILTER;
2661
2662         netdev_info(adapter->netdev,
2663                 "features: sg csum vlan jf tso tsoIPv6 lro%s\n",
2664                 dma64 ? " highDMA" : "");
2665 }
2666
2667
2668 static void
2669 vmxnet3_read_mac_addr(struct vmxnet3_adapter *adapter, u8 *mac)
2670 {
2671         u32 tmp;
2672
2673         tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACL);
2674         *(u32 *)mac = tmp;
2675
2676         tmp = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_MACH);
2677         mac[4] = tmp & 0xff;
2678         mac[5] = (tmp >> 8) & 0xff;
2679 }
2680
2681 #ifdef CONFIG_PCI_MSI
2682
2683 /*
2684  * Enable MSIx vectors.
2685  * Returns :
2686  *      0 on successful enabling of required vectors,
2687  *      VMXNET3_LINUX_MIN_MSIX_VECT when only minimum number of vectors required
2688  *       could be enabled.
2689  *      number of vectors which can be enabled otherwise (this number is smaller
2690  *       than VMXNET3_LINUX_MIN_MSIX_VECT)
2691  */
2692
2693 static int
2694 vmxnet3_acquire_msix_vectors(struct vmxnet3_adapter *adapter,
2695                              int vectors)
2696 {
2697         int err = 0, vector_threshold;
2698         vector_threshold = VMXNET3_LINUX_MIN_MSIX_VECT;
2699
2700         while (vectors >= vector_threshold) {
2701                 err = pci_enable_msix(adapter->pdev, adapter->intr.msix_entries,
2702                                       vectors);
2703                 if (!err) {
2704                         adapter->intr.num_intrs = vectors;
2705                         return 0;
2706                 } else if (err < 0) {
2707                         printk(KERN_ERR "Failed to enable MSI-X for %s, error"
2708                                " %d\n", adapter->netdev->name, err);
2709                         vectors = 0;
2710                 } else if (err < vector_threshold) {
2711                         break;
2712                 } else {
2713                         /* If fails to enable required number of MSI-x vectors
2714                          * try enabling minimum number of vectors required.
2715                          */
2716                         vectors = vector_threshold;
2717                         printk(KERN_ERR "Failed to enable %d MSI-X for %s, try"
2718                                " %d instead\n", vectors, adapter->netdev->name,
2719                                vector_threshold);
2720                 }
2721         }
2722
2723         printk(KERN_INFO "Number of MSI-X interrupts which can be allocatedi"
2724                " are lower than min threshold required.\n");
2725         return err;
2726 }
2727
2728
2729 #endif /* CONFIG_PCI_MSI */
2730
2731 static void
2732 vmxnet3_alloc_intr_resources(struct vmxnet3_adapter *adapter)
2733 {
2734         u32 cfg;
2735         unsigned long flags;
2736
2737         /* intr settings */
2738         spin_lock_irqsave(&adapter->cmd_lock, flags);
2739         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
2740                                VMXNET3_CMD_GET_CONF_INTR);
2741         cfg = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_CMD);
2742         spin_unlock_irqrestore(&adapter->cmd_lock, flags);
2743         adapter->intr.type = cfg & 0x3;
2744         adapter->intr.mask_mode = (cfg >> 2) & 0x3;
2745
2746         if (adapter->intr.type == VMXNET3_IT_AUTO) {
2747                 adapter->intr.type = VMXNET3_IT_MSIX;
2748         }
2749
2750 #ifdef CONFIG_PCI_MSI
2751         if (adapter->intr.type == VMXNET3_IT_MSIX) {
2752                 int vector, err = 0;
2753
2754                 adapter->intr.num_intrs = (adapter->share_intr ==
2755                                            VMXNET3_INTR_TXSHARE) ? 1 :
2756                                            adapter->num_tx_queues;
2757                 adapter->intr.num_intrs += (adapter->share_intr ==
2758                                            VMXNET3_INTR_BUDDYSHARE) ? 0 :
2759                                            adapter->num_rx_queues;
2760                 adapter->intr.num_intrs += 1;           /* for link event */
2761
2762                 adapter->intr.num_intrs = (adapter->intr.num_intrs >
2763                                            VMXNET3_LINUX_MIN_MSIX_VECT
2764                                            ? adapter->intr.num_intrs :
2765                                            VMXNET3_LINUX_MIN_MSIX_VECT);
2766
2767                 for (vector = 0; vector < adapter->intr.num_intrs; vector++)
2768                         adapter->intr.msix_entries[vector].entry = vector;
2769
2770                 err = vmxnet3_acquire_msix_vectors(adapter,
2771                                                    adapter->intr.num_intrs);
2772                 /* If we cannot allocate one MSIx vector per queue
2773                  * then limit the number of rx queues to 1
2774                  */
2775                 if (err == VMXNET3_LINUX_MIN_MSIX_VECT) {
2776                         if (adapter->share_intr != VMXNET3_INTR_BUDDYSHARE
2777                             || adapter->num_rx_queues != 1) {
2778                                 adapter->share_intr = VMXNET3_INTR_TXSHARE;
2779                                 printk(KERN_ERR "Number of rx queues : 1\n");
2780                                 adapter->num_rx_queues = 1;
2781                                 adapter->intr.num_intrs =
2782                                                 VMXNET3_LINUX_MIN_MSIX_VECT;
2783                         }
2784                         return;
2785                 }
2786                 if (!err)
2787                         return;
2788
2789                 /* If we cannot allocate MSIx vectors use only one rx queue */
2790                 printk(KERN_INFO "Failed to enable MSI-X for %s, error %d."
2791                        "#rx queues : 1, try MSI\n", adapter->netdev->name, err);
2792
2793                 adapter->intr.type = VMXNET3_IT_MSI;
2794         }
2795
2796         if (adapter->intr.type == VMXNET3_IT_MSI) {
2797                 int err;
2798                 err = pci_enable_msi(adapter->pdev);
2799                 if (!err) {
2800                         adapter->num_rx_queues = 1;
2801                         adapter->intr.num_intrs = 1;
2802                         return;
2803                 }
2804         }
2805 #endif /* CONFIG_PCI_MSI */
2806
2807         adapter->num_rx_queues = 1;
2808         printk(KERN_INFO "Using INTx interrupt, #Rx queues: 1.\n");
2809         adapter->intr.type = VMXNET3_IT_INTX;
2810
2811         /* INT-X related setting */
2812         adapter->intr.num_intrs = 1;
2813 }
2814
2815
2816 static void
2817 vmxnet3_free_intr_resources(struct vmxnet3_adapter *adapter)
2818 {
2819         if (adapter->intr.type == VMXNET3_IT_MSIX)
2820                 pci_disable_msix(adapter->pdev);
2821         else if (adapter->intr.type == VMXNET3_IT_MSI)
2822                 pci_disable_msi(adapter->pdev);
2823         else
2824                 BUG_ON(adapter->intr.type != VMXNET3_IT_INTX);
2825 }
2826
2827
2828 static void
2829 vmxnet3_tx_timeout(struct net_device *netdev)
2830 {
2831         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
2832         adapter->tx_timeout_count++;
2833
2834         printk(KERN_ERR "%s: tx hang\n", adapter->netdev->name);
2835         schedule_work(&adapter->work);
2836         netif_wake_queue(adapter->netdev);
2837 }
2838
2839
2840 static void
2841 vmxnet3_reset_work(struct work_struct *data)
2842 {
2843         struct vmxnet3_adapter *adapter;
2844
2845         adapter = container_of(data, struct vmxnet3_adapter, work);
2846
2847         /* if another thread is resetting the device, no need to proceed */
2848         if (test_and_set_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state))
2849                 return;
2850
2851         /* if the device is closed, we must leave it alone */
2852         rtnl_lock();
2853         if (netif_running(adapter->netdev)) {
2854                 printk(KERN_INFO "%s: resetting\n", adapter->netdev->name);
2855                 vmxnet3_quiesce_dev(adapter);
2856                 vmxnet3_reset_dev(adapter);
2857                 vmxnet3_activate_dev(adapter);
2858         } else {
2859                 printk(KERN_INFO "%s: already closed\n", adapter->netdev->name);
2860         }
2861         rtnl_unlock();
2862
2863         clear_bit(VMXNET3_STATE_BIT_RESETTING, &adapter->state);
2864 }
2865
2866
2867 static int __devinit
2868 vmxnet3_probe_device(struct pci_dev *pdev,
2869                      const struct pci_device_id *id)
2870 {
2871         static const struct net_device_ops vmxnet3_netdev_ops = {
2872                 .ndo_open = vmxnet3_open,
2873                 .ndo_stop = vmxnet3_close,
2874                 .ndo_start_xmit = vmxnet3_xmit_frame,
2875                 .ndo_set_mac_address = vmxnet3_set_mac_addr,
2876                 .ndo_change_mtu = vmxnet3_change_mtu,
2877                 .ndo_set_features = vmxnet3_set_features,
2878                 .ndo_get_stats64 = vmxnet3_get_stats64,
2879                 .ndo_tx_timeout = vmxnet3_tx_timeout,
2880                 .ndo_set_rx_mode = vmxnet3_set_mc,
2881                 .ndo_vlan_rx_add_vid = vmxnet3_vlan_rx_add_vid,
2882                 .ndo_vlan_rx_kill_vid = vmxnet3_vlan_rx_kill_vid,
2883 #ifdef CONFIG_NET_POLL_CONTROLLER
2884                 .ndo_poll_controller = vmxnet3_netpoll,
2885 #endif
2886         };
2887         int err;
2888         bool dma64 = false; /* stupid gcc */
2889         u32 ver;
2890         struct net_device *netdev;
2891         struct vmxnet3_adapter *adapter;
2892         u8 mac[ETH_ALEN];
2893         int size;
2894         int num_tx_queues;
2895         int num_rx_queues;
2896
2897         if (!pci_msi_enabled())
2898                 enable_mq = 0;
2899
2900 #ifdef VMXNET3_RSS
2901         if (enable_mq)
2902                 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
2903                                     (int)num_online_cpus());
2904         else
2905 #endif
2906                 num_rx_queues = 1;
2907         num_rx_queues = rounddown_pow_of_two(num_rx_queues);
2908
2909         if (enable_mq)
2910                 num_tx_queues = min(VMXNET3_DEVICE_MAX_TX_QUEUES,
2911                                     (int)num_online_cpus());
2912         else
2913                 num_tx_queues = 1;
2914
2915         num_tx_queues = rounddown_pow_of_two(num_tx_queues);
2916         netdev = alloc_etherdev_mq(sizeof(struct vmxnet3_adapter),
2917                                    max(num_tx_queues, num_rx_queues));
2918         printk(KERN_INFO "# of Tx queues : %d, # of Rx queues : %d\n",
2919                num_tx_queues, num_rx_queues);
2920
2921         if (!netdev) {
2922                 printk(KERN_ERR "Failed to alloc ethernet device for adapter "
2923                         "%s\n", pci_name(pdev));
2924                 return -ENOMEM;
2925         }
2926
2927         pci_set_drvdata(pdev, netdev);
2928         adapter = netdev_priv(netdev);
2929         adapter->netdev = netdev;
2930         adapter->pdev = pdev;
2931
2932         spin_lock_init(&adapter->cmd_lock);
2933         adapter->shared = pci_alloc_consistent(adapter->pdev,
2934                           sizeof(struct Vmxnet3_DriverShared),
2935                           &adapter->shared_pa);
2936         if (!adapter->shared) {
2937                 printk(KERN_ERR "Failed to allocate memory for %s\n",
2938                         pci_name(pdev));
2939                 err = -ENOMEM;
2940                 goto err_alloc_shared;
2941         }
2942
2943         adapter->num_rx_queues = num_rx_queues;
2944         adapter->num_tx_queues = num_tx_queues;
2945
2946         size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
2947         size += sizeof(struct Vmxnet3_RxQueueDesc) * adapter->num_rx_queues;
2948         adapter->tqd_start = pci_alloc_consistent(adapter->pdev, size,
2949                              &adapter->queue_desc_pa);
2950
2951         if (!adapter->tqd_start) {
2952                 printk(KERN_ERR "Failed to allocate memory for %s\n",
2953                         pci_name(pdev));
2954                 err = -ENOMEM;
2955                 goto err_alloc_queue_desc;
2956         }
2957         adapter->rqd_start = (struct Vmxnet3_RxQueueDesc *)(adapter->tqd_start +
2958                                                         adapter->num_tx_queues);
2959
2960         adapter->pm_conf = kmalloc(sizeof(struct Vmxnet3_PMConf), GFP_KERNEL);
2961         if (adapter->pm_conf == NULL) {
2962                 printk(KERN_ERR "Failed to allocate memory for %s\n",
2963                         pci_name(pdev));
2964                 err = -ENOMEM;
2965                 goto err_alloc_pm;
2966         }
2967
2968 #ifdef VMXNET3_RSS
2969
2970         adapter->rss_conf = kmalloc(sizeof(struct UPT1_RSSConf), GFP_KERNEL);
2971         if (adapter->rss_conf == NULL) {
2972                 printk(KERN_ERR "Failed to allocate memory for %s\n",
2973                        pci_name(pdev));
2974                 err = -ENOMEM;
2975                 goto err_alloc_rss;
2976         }
2977 #endif /* VMXNET3_RSS */
2978
2979         err = vmxnet3_alloc_pci_resources(adapter, &dma64);
2980         if (err < 0)
2981                 goto err_alloc_pci;
2982
2983         ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_VRRS);
2984         if (ver & 1) {
2985                 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_VRRS, 1);
2986         } else {
2987                 printk(KERN_ERR "Incompatible h/w version (0x%x) for adapter"
2988                        " %s\n", ver, pci_name(pdev));
2989                 err = -EBUSY;
2990                 goto err_ver;
2991         }
2992
2993         ver = VMXNET3_READ_BAR1_REG(adapter, VMXNET3_REG_UVRS);
2994         if (ver & 1) {
2995                 VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_UVRS, 1);
2996         } else {
2997                 printk(KERN_ERR "Incompatible upt version (0x%x) for "
2998                        "adapter %s\n", ver, pci_name(pdev));
2999                 err = -EBUSY;
3000                 goto err_ver;
3001         }
3002
3003         SET_NETDEV_DEV(netdev, &pdev->dev);
3004         vmxnet3_declare_features(adapter, dma64);
3005
3006         adapter->dev_number = atomic_read(&devices_found);
3007
3008          adapter->share_intr = irq_share_mode;
3009         if (adapter->share_intr == VMXNET3_INTR_BUDDYSHARE &&
3010             adapter->num_tx_queues != adapter->num_rx_queues)
3011                 adapter->share_intr = VMXNET3_INTR_DONTSHARE;
3012
3013         vmxnet3_alloc_intr_resources(adapter);
3014
3015 #ifdef VMXNET3_RSS
3016         if (adapter->num_rx_queues > 1 &&
3017             adapter->intr.type == VMXNET3_IT_MSIX) {
3018                 adapter->rss = true;
3019                 printk(KERN_INFO "RSS is enabled.\n");
3020         } else {
3021                 adapter->rss = false;
3022         }
3023 #endif
3024
3025         vmxnet3_read_mac_addr(adapter, mac);
3026         memcpy(netdev->dev_addr,  mac, netdev->addr_len);
3027
3028         netdev->netdev_ops = &vmxnet3_netdev_ops;
3029         vmxnet3_set_ethtool_ops(netdev);
3030         netdev->watchdog_timeo = 5 * HZ;
3031
3032         INIT_WORK(&adapter->work, vmxnet3_reset_work);
3033
3034         if (adapter->intr.type == VMXNET3_IT_MSIX) {
3035                 int i;
3036                 for (i = 0; i < adapter->num_rx_queues; i++) {
3037                         netif_napi_add(adapter->netdev,
3038                                        &adapter->rx_queue[i].napi,
3039                                        vmxnet3_poll_rx_only, 64);
3040                 }
3041         } else {
3042                 netif_napi_add(adapter->netdev, &adapter->rx_queue[0].napi,
3043                                vmxnet3_poll, 64);
3044         }
3045
3046         netif_set_real_num_tx_queues(adapter->netdev, adapter->num_tx_queues);
3047         netif_set_real_num_rx_queues(adapter->netdev, adapter->num_rx_queues);
3048
3049         err = register_netdev(netdev);
3050
3051         if (err) {
3052                 printk(KERN_ERR "Failed to register adapter %s\n",
3053                         pci_name(pdev));
3054                 goto err_register;
3055         }
3056
3057         set_bit(VMXNET3_STATE_BIT_QUIESCED, &adapter->state);
3058         vmxnet3_check_link(adapter, false);
3059         atomic_inc(&devices_found);
3060         return 0;
3061
3062 err_register:
3063         vmxnet3_free_intr_resources(adapter);
3064 err_ver:
3065         vmxnet3_free_pci_resources(adapter);
3066 err_alloc_pci:
3067 #ifdef VMXNET3_RSS
3068         kfree(adapter->rss_conf);
3069 err_alloc_rss:
3070 #endif
3071         kfree(adapter->pm_conf);
3072 err_alloc_pm:
3073         pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
3074                             adapter->queue_desc_pa);
3075 err_alloc_queue_desc:
3076         pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
3077                             adapter->shared, adapter->shared_pa);
3078 err_alloc_shared:
3079         pci_set_drvdata(pdev, NULL);
3080         free_netdev(netdev);
3081         return err;
3082 }
3083
3084
3085 static void __devexit
3086 vmxnet3_remove_device(struct pci_dev *pdev)
3087 {
3088         struct net_device *netdev = pci_get_drvdata(pdev);
3089         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3090         int size = 0;
3091         int num_rx_queues;
3092
3093 #ifdef VMXNET3_RSS
3094         if (enable_mq)
3095                 num_rx_queues = min(VMXNET3_DEVICE_MAX_RX_QUEUES,
3096                                     (int)num_online_cpus());
3097         else
3098 #endif
3099                 num_rx_queues = 1;
3100         num_rx_queues = rounddown_pow_of_two(num_rx_queues);
3101
3102         cancel_work_sync(&adapter->work);
3103
3104         unregister_netdev(netdev);
3105
3106         vmxnet3_free_intr_resources(adapter);
3107         vmxnet3_free_pci_resources(adapter);
3108 #ifdef VMXNET3_RSS
3109         kfree(adapter->rss_conf);
3110 #endif
3111         kfree(adapter->pm_conf);
3112
3113         size = sizeof(struct Vmxnet3_TxQueueDesc) * adapter->num_tx_queues;
3114         size += sizeof(struct Vmxnet3_RxQueueDesc) * num_rx_queues;
3115         pci_free_consistent(adapter->pdev, size, adapter->tqd_start,
3116                             adapter->queue_desc_pa);
3117         pci_free_consistent(adapter->pdev, sizeof(struct Vmxnet3_DriverShared),
3118                             adapter->shared, adapter->shared_pa);
3119         free_netdev(netdev);
3120 }
3121
3122
3123 #ifdef CONFIG_PM
3124
3125 static int
3126 vmxnet3_suspend(struct device *device)
3127 {
3128         struct pci_dev *pdev = to_pci_dev(device);
3129         struct net_device *netdev = pci_get_drvdata(pdev);
3130         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3131         struct Vmxnet3_PMConf *pmConf;
3132         struct ethhdr *ehdr;
3133         struct arphdr *ahdr;
3134         u8 *arpreq;
3135         struct in_device *in_dev;
3136         struct in_ifaddr *ifa;
3137         unsigned long flags;
3138         int i = 0;
3139
3140         if (!netif_running(netdev))
3141                 return 0;
3142
3143         for (i = 0; i < adapter->num_rx_queues; i++)
3144                 napi_disable(&adapter->rx_queue[i].napi);
3145
3146         vmxnet3_disable_all_intrs(adapter);
3147         vmxnet3_free_irqs(adapter);
3148         vmxnet3_free_intr_resources(adapter);
3149
3150         netif_device_detach(netdev);
3151         netif_tx_stop_all_queues(netdev);
3152
3153         /* Create wake-up filters. */
3154         pmConf = adapter->pm_conf;
3155         memset(pmConf, 0, sizeof(*pmConf));
3156
3157         if (adapter->wol & WAKE_UCAST) {
3158                 pmConf->filters[i].patternSize = ETH_ALEN;
3159                 pmConf->filters[i].maskSize = 1;
3160                 memcpy(pmConf->filters[i].pattern, netdev->dev_addr, ETH_ALEN);
3161                 pmConf->filters[i].mask[0] = 0x3F; /* LSB ETH_ALEN bits */
3162
3163                 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3164                 i++;
3165         }
3166
3167         if (adapter->wol & WAKE_ARP) {
3168                 in_dev = in_dev_get(netdev);
3169                 if (!in_dev)
3170                         goto skip_arp;
3171
3172                 ifa = (struct in_ifaddr *)in_dev->ifa_list;
3173                 if (!ifa)
3174                         goto skip_arp;
3175
3176                 pmConf->filters[i].patternSize = ETH_HLEN + /* Ethernet header*/
3177                         sizeof(struct arphdr) +         /* ARP header */
3178                         2 * ETH_ALEN +          /* 2 Ethernet addresses*/
3179                         2 * sizeof(u32);        /*2 IPv4 addresses */
3180                 pmConf->filters[i].maskSize =
3181                         (pmConf->filters[i].patternSize - 1) / 8 + 1;
3182
3183                 /* ETH_P_ARP in Ethernet header. */
3184                 ehdr = (struct ethhdr *)pmConf->filters[i].pattern;
3185                 ehdr->h_proto = htons(ETH_P_ARP);
3186
3187                 /* ARPOP_REQUEST in ARP header. */
3188                 ahdr = (struct arphdr *)&pmConf->filters[i].pattern[ETH_HLEN];
3189                 ahdr->ar_op = htons(ARPOP_REQUEST);
3190                 arpreq = (u8 *)(ahdr + 1);
3191
3192                 /* The Unicast IPv4 address in 'tip' field. */
3193                 arpreq += 2 * ETH_ALEN + sizeof(u32);
3194                 *(u32 *)arpreq = ifa->ifa_address;
3195
3196                 /* The mask for the relevant bits. */
3197                 pmConf->filters[i].mask[0] = 0x00;
3198                 pmConf->filters[i].mask[1] = 0x30; /* ETH_P_ARP */
3199                 pmConf->filters[i].mask[2] = 0x30; /* ARPOP_REQUEST */
3200                 pmConf->filters[i].mask[3] = 0x00;
3201                 pmConf->filters[i].mask[4] = 0xC0; /* IPv4 TIP */
3202                 pmConf->filters[i].mask[5] = 0x03; /* IPv4 TIP */
3203                 in_dev_put(in_dev);
3204
3205                 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_FILTER;
3206                 i++;
3207         }
3208
3209 skip_arp:
3210         if (adapter->wol & WAKE_MAGIC)
3211                 pmConf->wakeUpEvents |= VMXNET3_PM_WAKEUP_MAGIC;
3212
3213         pmConf->numFilters = i;
3214
3215         adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3216         adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3217                                                                   *pmConf));
3218         adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
3219                                                                  pmConf));
3220
3221         spin_lock_irqsave(&adapter->cmd_lock, flags);
3222         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3223                                VMXNET3_CMD_UPDATE_PMCFG);
3224         spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3225
3226         pci_save_state(pdev);
3227         pci_enable_wake(pdev, pci_choose_state(pdev, PMSG_SUSPEND),
3228                         adapter->wol);
3229         pci_disable_device(pdev);
3230         pci_set_power_state(pdev, pci_choose_state(pdev, PMSG_SUSPEND));
3231
3232         return 0;
3233 }
3234
3235
3236 static int
3237 vmxnet3_resume(struct device *device)
3238 {
3239         int err, i = 0;
3240         unsigned long flags;
3241         struct pci_dev *pdev = to_pci_dev(device);
3242         struct net_device *netdev = pci_get_drvdata(pdev);
3243         struct vmxnet3_adapter *adapter = netdev_priv(netdev);
3244         struct Vmxnet3_PMConf *pmConf;
3245
3246         if (!netif_running(netdev))
3247                 return 0;
3248
3249         /* Destroy wake-up filters. */
3250         pmConf = adapter->pm_conf;
3251         memset(pmConf, 0, sizeof(*pmConf));
3252
3253         adapter->shared->devRead.pmConfDesc.confVer = cpu_to_le32(1);
3254         adapter->shared->devRead.pmConfDesc.confLen = cpu_to_le32(sizeof(
3255                                                                   *pmConf));
3256         adapter->shared->devRead.pmConfDesc.confPA = cpu_to_le64(virt_to_phys(
3257                                                                  pmConf));
3258
3259         netif_device_attach(netdev);
3260         pci_set_power_state(pdev, PCI_D0);
3261         pci_restore_state(pdev);
3262         err = pci_enable_device_mem(pdev);
3263         if (err != 0)
3264                 return err;
3265
3266         pci_enable_wake(pdev, PCI_D0, 0);
3267
3268         spin_lock_irqsave(&adapter->cmd_lock, flags);
3269         VMXNET3_WRITE_BAR1_REG(adapter, VMXNET3_REG_CMD,
3270                                VMXNET3_CMD_UPDATE_PMCFG);
3271         spin_unlock_irqrestore(&adapter->cmd_lock, flags);
3272         vmxnet3_alloc_intr_resources(adapter);
3273         vmxnet3_request_irqs(adapter);
3274         for (i = 0; i < adapter->num_rx_queues; i++)
3275                 napi_enable(&adapter->rx_queue[i].napi);
3276         vmxnet3_enable_all_intrs(adapter);
3277
3278         return 0;
3279 }
3280
3281 static const struct dev_pm_ops vmxnet3_pm_ops = {
3282         .suspend = vmxnet3_suspend,
3283         .resume = vmxnet3_resume,
3284 };
3285 #endif
3286
3287 static struct pci_driver vmxnet3_driver = {
3288         .name           = vmxnet3_driver_name,
3289         .id_table       = vmxnet3_pciid_table,
3290         .probe          = vmxnet3_probe_device,
3291         .remove         = __devexit_p(vmxnet3_remove_device),
3292 #ifdef CONFIG_PM
3293         .driver.pm      = &vmxnet3_pm_ops,
3294 #endif
3295 };
3296
3297
3298 static int __init
3299 vmxnet3_init_module(void)
3300 {
3301         printk(KERN_INFO "%s - version %s\n", VMXNET3_DRIVER_DESC,
3302                 VMXNET3_DRIVER_VERSION_REPORT);
3303         return pci_register_driver(&vmxnet3_driver);
3304 }
3305
3306 module_init(vmxnet3_init_module);
3307
3308
3309 static void
3310 vmxnet3_exit_module(void)
3311 {
3312         pci_unregister_driver(&vmxnet3_driver);
3313 }
3314
3315 module_exit(vmxnet3_exit_module);
3316
3317 MODULE_AUTHOR("VMware, Inc.");
3318 MODULE_DESCRIPTION(VMXNET3_DRIVER_DESC);
3319 MODULE_LICENSE("GPL v2");
3320 MODULE_VERSION(VMXNET3_DRIVER_VERSION_STRING);