virtio_net: invoke softirqs after __napi_schedule
[linux-flexiantxendom0-3.2.10.git] / drivers / net / virtio_net.c
index 74636c5..cbefe67 100644 (file)
 #include <linux/virtio_net.h>
 #include <linux/scatterlist.h>
 #include <linux/if_vlan.h>
+#include <linux/slab.h>
 
 static int napi_weight = 128;
 module_param(napi_weight, int, 0444);
 
-static int csum = 1, gso = 1;
+static bool csum = true, gso = true;
 module_param(csum, bool, 0444);
 module_param(gso, bool, 0444);
 
@@ -38,9 +39,18 @@ module_param(gso, bool, 0444);
 #define GOOD_COPY_LEN  128
 
 #define VIRTNET_SEND_COMMAND_SG_MAX    2
+#define VIRTNET_DRIVER_VERSION "1.0.0"
 
-struct virtnet_info
-{
+struct virtnet_stats {
+       struct u64_stats_sync syncp;
+       u64 tx_bytes;
+       u64 tx_packets;
+
+       u64 rx_bytes;
+       u64 rx_packets;
+};
+
+struct virtnet_info {
        struct virtio_device *vdev;
        struct virtqueue *rvq, *svq, *cvq;
        struct net_device *dev;
@@ -56,15 +66,18 @@ struct virtnet_info
        /* Host will merge rx buffers for big packets (shake it! shake it!) */
        bool mergeable_rx_bufs;
 
-       /* Receive & send queues. */
-       struct sk_buff_head recv;
-       struct sk_buff_head send;
+       /* Active statistics */
+       struct virtnet_stats __percpu *stats;
 
        /* Work struct for refilling if we run low on memory. */
        struct delayed_work refill;
 
        /* Chain pages by the private ptr. */
        struct page *pages;
+
+       /* fragments + linear part + virtio header */
+       struct scatterlist rx_sg[MAX_SKB_FRAGS + 2];
+       struct scatterlist tx_sg[MAX_SKB_FRAGS + 2];
 };
 
 struct skb_vnet_hdr {
@@ -75,34 +88,44 @@ struct skb_vnet_hdr {
        unsigned int num_sg;
 };
 
+struct padded_vnet_hdr {
+       struct virtio_net_hdr hdr;
+       /*
+        * virtio_net_hdr should be in a separated sg buffer because of a
+        * QEMU bug, and data sg buffer shares same page with this header sg.
+        * This padding makes next sg 16 byte aligned after virtio_net_hdr.
+        */
+       char padding[6];
+};
+
 static inline struct skb_vnet_hdr *skb_vnet_hdr(struct sk_buff *skb)
 {
        return (struct skb_vnet_hdr *)skb->cb;
 }
 
-static void give_a_page(struct virtnet_info *vi, struct page *page)
-{
-       page->private = (unsigned long)vi->pages;
-       vi->pages = page;
-}
-
-static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
+/*
+ * private is used to chain pages for big packets, put the whole
+ * most recent used list in the beginning for reuse
+ */
+static void give_pages(struct virtnet_info *vi, struct page *page)
 {
-       unsigned int i;
+       struct page *end;
 
-       for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
-               give_a_page(vi, skb_shinfo(skb)->frags[i].page);
-       skb_shinfo(skb)->nr_frags = 0;
-       skb->data_len = 0;
+       /* Find end of list, sew whole thing into vi->pages. */
+       for (end = page; end->private; end = (struct page *)end->private);
+       end->private = (unsigned long)vi->pages;
+       vi->pages = page;
 }
 
 static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
 {
        struct page *p = vi->pages;
 
-       if (p)
+       if (p) {
                vi->pages = (struct page *)p->private;
-       else
+               /* clear private here, it is used to chain pages */
+               p->private = 0;
+       } else
                p = alloc_page(gfp_mask);
        return p;
 }
@@ -112,108 +135,166 @@ static void skb_xmit_done(struct virtqueue *svq)
        struct virtnet_info *vi = svq->vdev->priv;
 
        /* Suppress further interrupts. */
-       svq->vq_ops->disable_cb(svq);
+       virtqueue_disable_cb(svq);
 
        /* We were probably waiting for more output buffers. */
        netif_wake_queue(vi->dev);
 }
 
-static void receive_skb(struct net_device *dev, struct sk_buff *skb,
-                       unsigned len)
+static void set_skb_frag(struct sk_buff *skb, struct page *page,
+                        unsigned int offset, unsigned int *len)
 {
-       struct virtnet_info *vi = netdev_priv(dev);
-       struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
-       int err;
-       int i;
+       int size = min((unsigned)PAGE_SIZE - offset, *len);
+       int i = skb_shinfo(skb)->nr_frags;
 
-       if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
-               pr_debug("%s: short packet %i\n", dev->name, len);
-               dev->stats.rx_length_errors++;
-               goto drop;
-       }
+       __skb_fill_page_desc(skb, i, page, offset, size);
 
-       if (vi->mergeable_rx_bufs) {
-               unsigned int copy;
-               char *p = page_address(skb_shinfo(skb)->frags[0].page);
+       skb->data_len += size;
+       skb->len += size;
+       skb->truesize += PAGE_SIZE;
+       skb_shinfo(skb)->nr_frags++;
+       *len -= size;
+}
 
-               if (len > PAGE_SIZE)
-                       len = PAGE_SIZE;
-               len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
+/* Called from bottom half context */
+static struct sk_buff *page_to_skb(struct virtnet_info *vi,
+                                  struct page *page, unsigned int len)
+{
+       struct sk_buff *skb;
+       struct skb_vnet_hdr *hdr;
+       unsigned int copy, hdr_len, offset;
+       char *p;
 
-               memcpy(&hdr->mhdr, p, sizeof(hdr->mhdr));
-               p += sizeof(hdr->mhdr);
+       p = page_address(page);
 
-               copy = len;
-               if (copy > skb_tailroom(skb))
-                       copy = skb_tailroom(skb);
+       /* copy small packet so we can reuse these pages for small data */
+       skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
+       if (unlikely(!skb))
+               return NULL;
 
-               memcpy(skb_put(skb, copy), p, copy);
+       hdr = skb_vnet_hdr(skb);
 
-               len -= copy;
+       if (vi->mergeable_rx_bufs) {
+               hdr_len = sizeof hdr->mhdr;
+               offset = hdr_len;
+       } else {
+               hdr_len = sizeof hdr->hdr;
+               offset = sizeof(struct padded_vnet_hdr);
+       }
 
-               if (!len) {
-                       give_a_page(vi, skb_shinfo(skb)->frags[0].page);
-                       skb_shinfo(skb)->nr_frags--;
-               } else {
-                       skb_shinfo(skb)->frags[0].page_offset +=
-                               sizeof(hdr->mhdr) + copy;
-                       skb_shinfo(skb)->frags[0].size = len;
-                       skb->data_len += len;
-                       skb->len += len;
-               }
+       memcpy(hdr, p, hdr_len);
 
-               while (--hdr->mhdr.num_buffers) {
-                       struct sk_buff *nskb;
+       len -= hdr_len;
+       p += offset;
 
-                       i = skb_shinfo(skb)->nr_frags;
-                       if (i >= MAX_SKB_FRAGS) {
-                               pr_debug("%s: packet too long %d\n", dev->name,
-                                        len);
-                               dev->stats.rx_length_errors++;
-                               goto drop;
-                       }
+       copy = len;
+       if (copy > skb_tailroom(skb))
+               copy = skb_tailroom(skb);
+       memcpy(skb_put(skb, copy), p, copy);
 
-                       nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
-                       if (!nskb) {
-                               pr_debug("%s: rx error: %d buffers missing\n",
-                                        dev->name, hdr->mhdr.num_buffers);
-                               dev->stats.rx_length_errors++;
-                               goto drop;
-                       }
+       len -= copy;
+       offset += copy;
 
-                       __skb_unlink(nskb, &vi->recv);
-                       vi->num--;
+       /*
+        * Verify that we can indeed put this data into a skb.
+        * This is here to handle cases when the device erroneously
+        * tries to receive more than is possible. This is usually
+        * the case of a broken device.
+        */
+       if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
+               if (net_ratelimit())
+                       pr_debug("%s: too much data\n", skb->dev->name);
+               dev_kfree_skb(skb);
+               return NULL;
+       }
+
+       while (len) {
+               set_skb_frag(skb, page, offset, &len);
+               page = (struct page *)page->private;
+               offset = 0;
+       }
 
-                       skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0];
-                       skb_shinfo(nskb)->nr_frags = 0;
-                       kfree_skb(nskb);
+       if (page)
+               give_pages(vi, page);
 
-                       if (len > PAGE_SIZE)
-                               len = PAGE_SIZE;
+       return skb;
+}
 
-                       skb_shinfo(skb)->frags[i].size = len;
-                       skb_shinfo(skb)->nr_frags++;
-                       skb->data_len += len;
-                       skb->len += len;
+static int receive_mergeable(struct virtnet_info *vi, struct sk_buff *skb)
+{
+       struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
+       struct page *page;
+       int num_buf, i, len;
+
+       num_buf = hdr->mhdr.num_buffers;
+       while (--num_buf) {
+               i = skb_shinfo(skb)->nr_frags;
+               if (i >= MAX_SKB_FRAGS) {
+                       pr_debug("%s: packet too long\n", skb->dev->name);
+                       skb->dev->stats.rx_length_errors++;
+                       return -EINVAL;
                }
-       } else {
-               len -= sizeof(hdr->hdr);
+               page = virtqueue_get_buf(vi->rvq, &len);
+               if (!page) {
+                       pr_debug("%s: rx error: %d buffers missing\n",
+                                skb->dev->name, hdr->mhdr.num_buffers);
+                       skb->dev->stats.rx_length_errors++;
+                       return -EINVAL;
+               }
+
+               if (len > PAGE_SIZE)
+                       len = PAGE_SIZE;
+
+               set_skb_frag(skb, page, 0, &len);
+
+               --vi->num;
+       }
+       return 0;
+}
 
-               if (len <= MAX_PACKET_LEN)
-                       trim_pages(vi, skb);
+static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
+       struct sk_buff *skb;
+       struct page *page;
+       struct skb_vnet_hdr *hdr;
+
+       if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
+               pr_debug("%s: short packet %i\n", dev->name, len);
+               dev->stats.rx_length_errors++;
+               if (vi->mergeable_rx_bufs || vi->big_packets)
+                       give_pages(vi, buf);
+               else
+                       dev_kfree_skb(buf);
+               return;
+       }
 
-               err = pskb_trim(skb, len);
-               if (err) {
-                       pr_debug("%s: pskb_trim failed %i %d\n", dev->name,
-                                len, err);
+       if (!vi->mergeable_rx_bufs && !vi->big_packets) {
+               skb = buf;
+               len -= sizeof(struct virtio_net_hdr);
+               skb_trim(skb, len);
+       } else {
+               page = buf;
+               skb = page_to_skb(vi, page, len);
+               if (unlikely(!skb)) {
                        dev->stats.rx_dropped++;
-                       goto drop;
+                       give_pages(vi, page);
+                       return;
                }
+               if (vi->mergeable_rx_bufs)
+                       if (receive_mergeable(vi, skb)) {
+                               dev_kfree_skb(skb);
+                               return;
+                       }
        }
 
-       skb->truesize += skb->data_len;
-       dev->stats.rx_bytes += skb->len;
-       dev->stats.rx_packets++;
+       hdr = skb_vnet_hdr(skb);
+
+       u64_stats_update_begin(&stats->syncp);
+       stats->rx_bytes += skb->len;
+       stats->rx_packets++;
+       u64_stats_update_end(&stats->syncp);
 
        if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
                pr_debug("Needs csum!\n");
@@ -221,6 +302,8 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
                                          hdr->hdr.csum_start,
                                          hdr->hdr.csum_offset))
                        goto frame_err;
+       } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
+               skb->ip_summed = CHECKSUM_UNNECESSARY;
        }
 
        skb->protocol = eth_type_trans(skb, dev);
@@ -267,114 +350,125 @@ static void receive_skb(struct net_device *dev, struct sk_buff *skb,
 
 frame_err:
        dev->stats.rx_frame_errors++;
-drop:
        dev_kfree_skb(skb);
 }
 
-static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
+static int add_recvbuf_small(struct virtnet_info *vi, gfp_t gfp)
 {
        struct sk_buff *skb;
-       struct scatterlist sg[2+MAX_SKB_FRAGS];
-       int num, err, i;
-       bool oom = false;
-
-       sg_init_table(sg, 2+MAX_SKB_FRAGS);
-       do {
-               struct skb_vnet_hdr *hdr;
+       struct skb_vnet_hdr *hdr;
+       int err;
 
-               skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
-               if (unlikely(!skb)) {
-                       oom = true;
-                       break;
-               }
+       skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp);
+       if (unlikely(!skb))
+               return -ENOMEM;
 
-               skb_put(skb, MAX_PACKET_LEN);
+       skb_put(skb, MAX_PACKET_LEN);
 
-               hdr = skb_vnet_hdr(skb);
-               sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
+       hdr = skb_vnet_hdr(skb);
+       sg_set_buf(vi->rx_sg, &hdr->hdr, sizeof hdr->hdr);
 
-               if (vi->big_packets) {
-                       for (i = 0; i < MAX_SKB_FRAGS; i++) {
-                               skb_frag_t *f = &skb_shinfo(skb)->frags[i];
-                               f->page = get_a_page(vi, gfp);
-                               if (!f->page)
-                                       break;
+       skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
 
-                               f->page_offset = 0;
-                               f->size = PAGE_SIZE;
+       err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb, gfp);
+       if (err < 0)
+               dev_kfree_skb(skb);
 
-                               skb->data_len += PAGE_SIZE;
-                               skb->len += PAGE_SIZE;
+       return err;
+}
 
-                               skb_shinfo(skb)->nr_frags++;
-                       }
+static int add_recvbuf_big(struct virtnet_info *vi, gfp_t gfp)
+{
+       struct page *first, *list = NULL;
+       char *p;
+       int i, err, offset;
+
+       /* page in vi->rx_sg[MAX_SKB_FRAGS + 1] is list tail */
+       for (i = MAX_SKB_FRAGS + 1; i > 1; --i) {
+               first = get_a_page(vi, gfp);
+               if (!first) {
+                       if (list)
+                               give_pages(vi, list);
+                       return -ENOMEM;
                }
+               sg_set_buf(&vi->rx_sg[i], page_address(first), PAGE_SIZE);
 
-               num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
-               skb_queue_head(&vi->recv, skb);
+               /* chain new page in list head to match sg */
+               first->private = (unsigned long)list;
+               list = first;
+       }
 
-               err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
-               if (err < 0) {
-                       skb_unlink(skb, &vi->recv);
-                       trim_pages(vi, skb);
-                       kfree_skb(skb);
-                       break;
-               }
-               vi->num++;
-       } while (err >= num);
-       if (unlikely(vi->num > vi->max))
-               vi->max = vi->num;
-       vi->rvq->vq_ops->kick(vi->rvq);
-       return !oom;
+       first = get_a_page(vi, gfp);
+       if (!first) {
+               give_pages(vi, list);
+               return -ENOMEM;
+       }
+       p = page_address(first);
+
+       /* vi->rx_sg[0], vi->rx_sg[1] share the same page */
+       /* a separated vi->rx_sg[0] for virtio_net_hdr only due to QEMU bug */
+       sg_set_buf(&vi->rx_sg[0], p, sizeof(struct virtio_net_hdr));
+
+       /* vi->rx_sg[1] for data packet, from offset */
+       offset = sizeof(struct padded_vnet_hdr);
+       sg_set_buf(&vi->rx_sg[1], p + offset, PAGE_SIZE - offset);
+
+       /* chain first in list head */
+       first->private = (unsigned long)list;
+       err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
+                               first, gfp);
+       if (err < 0)
+               give_pages(vi, first);
+
+       return err;
 }
 
-/* Returns false if we couldn't fill entirely (OOM). */
-static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
+static int add_recvbuf_mergeable(struct virtnet_info *vi, gfp_t gfp)
 {
-       struct sk_buff *skb;
-       struct scatterlist sg[1];
+       struct page *page;
        int err;
-       bool oom = false;
 
-       if (!vi->mergeable_rx_bufs)
-               return try_fill_recv_maxbufs(vi, gfp);
-
-       do {
-               skb_frag_t *f;
+       page = get_a_page(vi, gfp);
+       if (!page)
+               return -ENOMEM;
 
-               skb = netdev_alloc_skb_ip_align(vi->dev, GOOD_COPY_LEN);
-               if (unlikely(!skb)) {
-                       oom = true;
-                       break;
-               }
+       sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
 
-               f = &skb_shinfo(skb)->frags[0];
-               f->page = get_a_page(vi, gfp);
-               if (!f->page) {
-                       oom = true;
-                       kfree_skb(skb);
-                       break;
-               }
+       err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page, gfp);
+       if (err < 0)
+               give_pages(vi, page);
 
-               f->page_offset = 0;
-               f->size = PAGE_SIZE;
+       return err;
+}
 
-               skb_shinfo(skb)->nr_frags++;
+/*
+ * Returns false if we couldn't fill entirely (OOM).
+ *
+ * Normally run in the receive path, but can also be run from ndo_open
+ * before we're receiving packets, or from refill_work which is
+ * careful to disable receiving (using napi_disable).
+ */
+static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
+{
+       int err;
+       bool oom;
 
-               sg_init_one(sg, page_address(f->page), PAGE_SIZE);
-               skb_queue_head(&vi->recv, skb);
+       do {
+               if (vi->mergeable_rx_bufs)
+                       err = add_recvbuf_mergeable(vi, gfp);
+               else if (vi->big_packets)
+                       err = add_recvbuf_big(vi, gfp);
+               else
+                       err = add_recvbuf_small(vi, gfp);
 
-               err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
-               if (err < 0) {
-                       skb_unlink(skb, &vi->recv);
-                       kfree_skb(skb);
+               oom = err == -ENOMEM;
+               if (err < 0)
                        break;
-               }
-               vi->num++;
+               ++vi->num;
        } while (err > 0);
        if (unlikely(vi->num > vi->max))
                vi->max = vi->num;
-       vi->rvq->vq_ops->kick(vi->rvq);
+       virtqueue_kick(vi->rvq);
        return !oom;
 }
 
@@ -383,11 +477,27 @@ static void skb_recv_done(struct virtqueue *rvq)
        struct virtnet_info *vi = rvq->vdev->priv;
        /* Schedule NAPI, Suppress further interrupts if successful. */
        if (napi_schedule_prep(&vi->napi)) {
-               rvq->vq_ops->disable_cb(rvq);
+               virtqueue_disable_cb(rvq);
                __napi_schedule(&vi->napi);
        }
 }
 
+static void virtnet_napi_enable(struct virtnet_info *vi)
+{
+       napi_enable(&vi->napi);
+
+       /* If all buffers were filled by other side before we napi_enabled, we
+        * won't get another interrupt, so process any outstanding packets
+        * now.  virtnet_poll wants re-enable the queue, so we disable here.
+        * We synchronize against interrupts via NAPI_STATE_SCHED */
+       if (napi_schedule_prep(&vi->napi)) {
+               virtqueue_disable_cb(vi->rvq);
+               local_bh_disable();
+               __napi_schedule(&vi->napi);
+               local_bh_enable();
+       }
+}
+
 static void refill_work(struct work_struct *work)
 {
        struct virtnet_info *vi;
@@ -395,42 +505,40 @@ static void refill_work(struct work_struct *work)
 
        vi = container_of(work, struct virtnet_info, refill.work);
        napi_disable(&vi->napi);
-       try_fill_recv(vi, GFP_KERNEL);
-       still_empty = (vi->num == 0);
-       napi_enable(&vi->napi);
+       still_empty = !try_fill_recv(vi, GFP_KERNEL);
+       virtnet_napi_enable(vi);
 
        /* In theory, this can happen: if we don't get any buffers in
         * we will *never* try to fill again. */
        if (still_empty)
-               schedule_delayed_work(&vi->refill, HZ/2);
+               queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2);
 }
 
 static int virtnet_poll(struct napi_struct *napi, int budget)
 {
        struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
-       struct sk_buff *skb = NULL;
+       void *buf;
        unsigned int len, received = 0;
 
 again:
        while (received < budget &&
-              (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
-               __skb_unlink(skb, &vi->recv);
-               receive_skb(vi->dev, skb, len);
-               vi->num--;
+              (buf = virtqueue_get_buf(vi->rvq, &len)) != NULL) {
+               receive_buf(vi->dev, buf, len);
+               --vi->num;
                received++;
        }
 
        if (vi->num < vi->max / 2) {
                if (!try_fill_recv(vi, GFP_ATOMIC))
-                       schedule_delayed_work(&vi->refill, 0);
+                       queue_delayed_work(system_nrt_wq, &vi->refill, 0);
        }
 
        /* Out of packets? */
        if (received < budget) {
                napi_complete(napi);
-               if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
-                   && napi_schedule_prep(napi)) {
-                       vi->rvq->vq_ops->disable_cb(vi->rvq);
+               if (unlikely(!virtqueue_enable_cb(vi->rvq)) &&
+                   napi_schedule_prep(napi)) {
+                       virtqueue_disable_cb(vi->rvq);
                        __napi_schedule(napi);
                        goto again;
                }
@@ -443,12 +551,16 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
 {
        struct sk_buff *skb;
        unsigned int len, tot_sgs = 0;
+       struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
 
-       while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
+       while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
                pr_debug("Sent skb %p\n", skb);
-               __skb_unlink(skb, &vi->send);
-               vi->dev->stats.tx_bytes += skb->len;
-               vi->dev->stats.tx_packets++;
+
+               u64_stats_update_begin(&stats->syncp);
+               stats->tx_bytes += skb->len;
+               stats->tx_packets++;
+               u64_stats_update_end(&stats->syncp);
+
                tot_sgs += skb_vnet_hdr(skb)->num_sg;
                dev_kfree_skb_any(skb);
        }
@@ -457,17 +569,14 @@ static unsigned int free_old_xmit_skbs(struct virtnet_info *vi)
 
 static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
 {
-       struct scatterlist sg[2+MAX_SKB_FRAGS];
        struct skb_vnet_hdr *hdr = skb_vnet_hdr(skb);
        const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
 
-       sg_init_table(sg, 2+MAX_SKB_FRAGS);
-
        pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
 
        if (skb->ip_summed == CHECKSUM_PARTIAL) {
                hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
-               hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
+               hdr->hdr.csum_start = skb_checksum_start_offset(skb);
                hdr->hdr.csum_offset = skb->csum_offset;
        } else {
                hdr->hdr.flags = 0;
@@ -496,12 +605,13 @@ static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
 
        /* Encode metadata header at front. */
        if (vi->mergeable_rx_bufs)
-               sg_set_buf(sg, &hdr->mhdr, sizeof(hdr->mhdr));
+               sg_set_buf(vi->tx_sg, &hdr->mhdr, sizeof hdr->mhdr);
        else
-               sg_set_buf(sg, &hdr->hdr, sizeof(hdr->hdr));
+               sg_set_buf(vi->tx_sg, &hdr->hdr, sizeof hdr->hdr);
 
-       hdr->num_sg = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
-       return vi->svq->vq_ops->add_buf(vi->svq, sg, hdr->num_sg, 0, skb);
+       hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
+       return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
+                                0, skb, GFP_ATOMIC);
 }
 
 static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
@@ -509,7 +619,6 @@ static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
        struct virtnet_info *vi = netdev_priv(dev);
        int capacity;
 
-again:
        /* Free up any pending old buffers before queueing new ones. */
        free_old_xmit_skbs(vi);
 
@@ -518,25 +627,22 @@ again:
 
        /* This can happen with OOM and indirect buffers. */
        if (unlikely(capacity < 0)) {
-               netif_stop_queue(dev);
-               dev_warn(&dev->dev, "Unexpected full queue\n");
-               if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
-                       vi->svq->vq_ops->disable_cb(vi->svq);
-                       netif_start_queue(dev);
-                       goto again;
+               if (likely(capacity == -ENOMEM)) {
+                       if (net_ratelimit())
+                               dev_warn(&dev->dev,
+                                        "TX queue failure: out of memory\n");
+               } else {
+                       dev->stats.tx_fifo_errors++;
+                       if (net_ratelimit())
+                               dev_warn(&dev->dev,
+                                        "Unexpected TX queue failure: %d\n",
+                                        capacity);
                }
-               return NETDEV_TX_BUSY;
+               dev->stats.tx_dropped++;
+               kfree_skb(skb);
+               return NETDEV_TX_OK;
        }
-       vi->svq->vq_ops->kick(vi->svq);
-
-       /*
-        * Put new one in send queue.  You'd expect we'd need this before
-        * xmit_skb calls add_buf(), since the callback can be triggered
-        * immediately after that.  But since the callback just triggers
-        * another call back here, normal network xmit locking prevents the
-        * race.
-        */
-       __skb_queue_head(&vi->send, skb);
+       virtqueue_kick(vi->svq);
 
        /* Don't wait up for transmitted skbs to be freed. */
        skb_orphan(skb);
@@ -546,12 +652,12 @@ again:
         * before it gets out of hand.  Naturally, this wastes entries. */
        if (capacity < 2+MAX_SKB_FRAGS) {
                netif_stop_queue(dev);
-               if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
+               if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) {
                        /* More just got used, free them then recheck. */
                        capacity += free_old_xmit_skbs(vi);
                        if (capacity >= 2+MAX_SKB_FRAGS) {
                                netif_start_queue(dev);
-                               vi->svq->vq_ops->disable_cb(vi->svq);
+                               virtqueue_disable_cb(vi->svq);
                        }
                }
        }
@@ -576,6 +682,40 @@ static int virtnet_set_mac_address(struct net_device *dev, void *p)
        return 0;
 }
 
+static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
+                                              struct rtnl_link_stats64 *tot)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       int cpu;
+       unsigned int start;
+
+       for_each_possible_cpu(cpu) {
+               struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
+               u64 tpackets, tbytes, rpackets, rbytes;
+
+               do {
+                       start = u64_stats_fetch_begin(&stats->syncp);
+                       tpackets = stats->tx_packets;
+                       tbytes   = stats->tx_bytes;
+                       rpackets = stats->rx_packets;
+                       rbytes   = stats->rx_bytes;
+               } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+               tot->rx_packets += rpackets;
+               tot->tx_packets += tpackets;
+               tot->rx_bytes   += rbytes;
+               tot->tx_bytes   += tbytes;
+       }
+
+       tot->tx_dropped = dev->stats.tx_dropped;
+       tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+       tot->rx_dropped = dev->stats.rx_dropped;
+       tot->rx_length_errors = dev->stats.rx_length_errors;
+       tot->rx_frame_errors = dev->stats.rx_frame_errors;
+
+       return tot;
+}
+
 #ifdef CONFIG_NET_POLL_CONTROLLER
 static void virtnet_netpoll(struct net_device *dev)
 {
@@ -589,16 +729,11 @@ static int virtnet_open(struct net_device *dev)
 {
        struct virtnet_info *vi = netdev_priv(dev);
 
-       napi_enable(&vi->napi);
+       /* Make sure we have some buffers: if oom use wq. */
+       if (!try_fill_recv(vi, GFP_KERNEL))
+               queue_delayed_work(system_nrt_wq, &vi->refill, 0);
 
-       /* If all buffers were filled by other side before we napi_enabled, we
-        * won't get another interrupt, so process any outstanding packets
-        * now.  virtnet_poll wants re-enable the queue, so we disable here.
-        * We synchronize against interrupts via NAPI_STATE_SCHED */
-       if (napi_schedule_prep(&vi->napi)) {
-               vi->rvq->vq_ops->disable_cb(vi->rvq);
-               __napi_schedule(&vi->napi);
-       }
+       virtnet_napi_enable(vi);
        return 0;
 }
 
@@ -633,15 +768,15 @@ static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
                sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
        sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
 
-       BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
+       BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);
 
-       vi->cvq->vq_ops->kick(vi->cvq);
+       virtqueue_kick(vi->cvq);
 
        /*
         * Spin for a response, the kick causes an ioport write, trapping
         * into the hypervisor, so the request should be handled immediately.
         */
-       while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
+       while (!virtqueue_get_buf(vi->cvq, &tmp))
                cpu_relax();
 
        return status == VIRTIO_NET_OK;
@@ -651,30 +786,22 @@ static int virtnet_close(struct net_device *dev)
 {
        struct virtnet_info *vi = netdev_priv(dev);
 
+       /* Make sure refill_work doesn't re-enable napi! */
+       cancel_delayed_work_sync(&vi->refill);
        napi_disable(&vi->napi);
 
        return 0;
 }
 
-static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
-{
-       struct virtnet_info *vi = netdev_priv(dev);
-       struct virtio_device *vdev = vi->vdev;
-
-       if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
-               return -ENOSYS;
-
-       return ethtool_op_set_tx_hw_csum(dev, data);
-}
-
 static void virtnet_set_rx_mode(struct net_device *dev)
 {
        struct virtnet_info *vi = netdev_priv(dev);
        struct scatterlist sg[2];
        u8 promisc, allmulti;
        struct virtio_net_ctrl_mac *mac_data;
-       struct dev_addr_list *addr;
        struct netdev_hw_addr *ha;
+       int uc_count;
+       int mc_count;
        void *buf;
        int i;
 
@@ -701,9 +828,12 @@ static void virtnet_set_rx_mode(struct net_device *dev)
                dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
                         allmulti ? "en" : "dis");
 
+       uc_count = netdev_uc_count(dev);
+       mc_count = netdev_mc_count(dev);
        /* MAC filter - use one buffer for both lists */
-       mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) +
-                                (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
+       buf = kzalloc(((uc_count + mc_count) * ETH_ALEN) +
+                     (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
+       mac_data = buf;
        if (!buf) {
                dev_warn(&dev->dev, "No memory for MAC address buffer\n");
                return;
@@ -712,24 +842,24 @@ static void virtnet_set_rx_mode(struct net_device *dev)
        sg_init_table(sg, 2);
 
        /* Store the unicast list and count in the front of the buffer */
-       mac_data->entries = dev->uc.count;
+       mac_data->entries = uc_count;
        i = 0;
-       list_for_each_entry(ha, &dev->uc.list, list)
+       netdev_for_each_uc_addr(ha, dev)
                memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
 
        sg_set_buf(&sg[0], mac_data,
-                  sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN));
+                  sizeof(mac_data->entries) + (uc_count * ETH_ALEN));
 
        /* multicast list and count fill the end */
-       mac_data = (void *)&mac_data->macs[dev->uc.count][0];
+       mac_data = (void *)&mac_data->macs[uc_count][0];
 
-       mac_data->entries = dev->mc_count;
-       addr = dev->mc_list;
-       for (i = 0; i < dev->mc_count; i++, addr = addr->next)
-               memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
+       mac_data->entries = mc_count;
+       i = 0;
+       netdev_for_each_mc_addr(ha, dev)
+               memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
 
        sg_set_buf(&sg[1], mac_data,
-                  sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN));
+                  sizeof(mac_data->entries) + (mc_count * ETH_ALEN));
 
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
                                  VIRTIO_NET_CTRL_MAC_TABLE_SET,
@@ -739,7 +869,7 @@ static void virtnet_set_rx_mode(struct net_device *dev)
        kfree(buf);
 }
 
-static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
 {
        struct virtnet_info *vi = netdev_priv(dev);
        struct scatterlist sg;
@@ -749,9 +879,10 @@ static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
                                  VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
                dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
+       return 0;
 }
 
-static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
 {
        struct virtnet_info *vi = netdev_priv(dev);
        struct scatterlist sg;
@@ -761,14 +892,38 @@ static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
        if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
                                  VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
                dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
+       return 0;
+}
+
+static void virtnet_get_ringparam(struct net_device *dev,
+                               struct ethtool_ringparam *ring)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+
+       ring->rx_max_pending = virtqueue_get_vring_size(vi->rvq);
+       ring->tx_max_pending = virtqueue_get_vring_size(vi->svq);
+       ring->rx_pending = ring->rx_max_pending;
+       ring->tx_pending = ring->tx_max_pending;
+
+}
+
+
+static void virtnet_get_drvinfo(struct net_device *dev,
+                               struct ethtool_drvinfo *info)
+{
+       struct virtnet_info *vi = netdev_priv(dev);
+       struct virtio_device *vdev = vi->vdev;
+
+       strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+       strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
+       strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
+
 }
 
 static const struct ethtool_ops virtnet_ethtool_ops = {
-       .set_tx_csum = virtnet_set_tx_csum,
-       .set_sg = ethtool_op_set_sg,
-       .set_tso = ethtool_op_set_tso,
-       .set_ufo = ethtool_op_set_ufo,
+       .get_drvinfo = virtnet_get_drvinfo,
        .get_link = ethtool_op_get_link,
+       .get_ringparam = virtnet_get_ringparam,
 };
 
 #define MIN_MTU 68
@@ -790,6 +945,7 @@ static const struct net_device_ops virtnet_netdev = {
        .ndo_set_mac_address = virtnet_set_mac_address,
        .ndo_set_rx_mode     = virtnet_set_rx_mode,
        .ndo_change_mtu      = virtnet_change_mtu,
+       .ndo_get_stats64     = virtnet_stats,
        .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
        .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
 #ifdef CONFIG_NET_POLL_CONTROLLER
@@ -801,12 +957,10 @@ static void virtnet_update_status(struct virtnet_info *vi)
 {
        u16 v;
 
-       if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
-               return;
-
-       vi->vdev->config->get(vi->vdev,
+       if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
                              offsetof(struct virtio_net_config, status),
-                             &v, sizeof(v));
+                             &v) < 0)
+               return;
 
        /* Ignore unknown (future) status bits */
        v &= VIRTIO_NET_S_LINK_UP;
@@ -832,15 +986,38 @@ static void virtnet_config_changed(struct virtio_device *vdev)
        virtnet_update_status(vi);
 }
 
+static int init_vqs(struct virtnet_info *vi)
+{
+       struct virtqueue *vqs[3];
+       vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
+       const char *names[] = { "input", "output", "control" };
+       int nvqs, err;
+
+       /* We expect two virtqueues, receive then send,
+        * and optionally control. */
+       nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
+
+       err = vi->vdev->config->find_vqs(vi->vdev, nvqs, vqs, callbacks, names);
+       if (err)
+               return err;
+
+       vi->rvq = vqs[0];
+       vi->svq = vqs[1];
+
+       if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
+               vi->cvq = vqs[2];
+
+               if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
+                       vi->dev->features |= NETIF_F_HW_VLAN_FILTER;
+       }
+       return 0;
+}
+
 static int virtnet_probe(struct virtio_device *vdev)
 {
        int err;
        struct net_device *dev;
        struct virtnet_info *vi;
-       struct virtqueue *vqs[3];
-       vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
-       const char *names[] = { "input", "output", "control" };
-       int nvqs;
 
        /* Allocate ourselves a network device with room for our info */
        dev = alloc_etherdev(sizeof(struct virtnet_info));
@@ -848,37 +1025,44 @@ static int virtnet_probe(struct virtio_device *vdev)
                return -ENOMEM;
 
        /* Set up network device as normal. */
+       dev->priv_flags |= IFF_UNICAST_FLT;
        dev->netdev_ops = &virtnet_netdev;
        dev->features = NETIF_F_HIGHDMA;
+
        SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
        SET_NETDEV_DEV(dev, &vdev->dev);
 
        /* Do we support "hardware" checksums? */
-       if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
                /* This opens up the world of extra features. */
-               dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
-               if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
-                       dev->features |= NETIF_F_TSO | NETIF_F_UFO
+               dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+               if (csum)
+                       dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+
+               if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
+                       dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
                                | NETIF_F_TSO_ECN | NETIF_F_TSO6;
                }
                /* Individual feature bits: what can host handle? */
-               if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
-                       dev->features |= NETIF_F_TSO;
-               if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
-                       dev->features |= NETIF_F_TSO6;
-               if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
-                       dev->features |= NETIF_F_TSO_ECN;
-               if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
-                       dev->features |= NETIF_F_UFO;
+               if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
+                       dev->hw_features |= NETIF_F_TSO;
+               if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
+                       dev->hw_features |= NETIF_F_TSO6;
+               if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
+                       dev->hw_features |= NETIF_F_TSO_ECN;
+               if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
+                       dev->hw_features |= NETIF_F_UFO;
+
+               if (gso)
+                       dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
+               /* (!csum && gso) case will be fixed by register_netdev() */
        }
 
        /* Configuration may specify what MAC to use.  Otherwise random. */
-       if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
-               vdev->config->get(vdev,
+       if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
                                  offsetof(struct virtio_net_config, mac),
-                                 dev->dev_addr, dev->addr_len);
-       } else
-               random_ether_addr(dev->dev_addr);
+                                 dev->dev_addr, dev->addr_len) < 0)
+               eth_hw_addr_random(dev);
 
        /* Set up our device-specific information */
        vi = netdev_priv(dev);
@@ -887,38 +1071,27 @@ static int virtnet_probe(struct virtio_device *vdev)
        vi->vdev = vdev;
        vdev->priv = vi;
        vi->pages = NULL;
+       vi->stats = alloc_percpu(struct virtnet_stats);
+       err = -ENOMEM;
+       if (vi->stats == NULL)
+               goto free;
+
        INIT_DELAYED_WORK(&vi->refill, refill_work);
+       sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
+       sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
 
        /* If we can receive ANY GSO packets, we must allocate large ones. */
-       if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
-           || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
-           || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
+       if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4) ||
+           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6) ||
+           virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
                vi->big_packets = true;
 
        if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
                vi->mergeable_rx_bufs = true;
 
-       /* We expect two virtqueues, receive then send,
-        * and optionally control. */
-       nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
-
-       err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
+       err = init_vqs(vi);
        if (err)
-               goto free;
-
-       vi->rvq = vqs[0];
-       vi->svq = vqs[1];
-
-       if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
-               vi->cvq = vqs[2];
-
-               if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
-                       dev->features |= NETIF_F_HW_VLAN_FILTER;
-       }
-
-       /* Initialize our empty receive and send queues. */
-       skb_queue_head_init(&vi->recv);
-       skb_queue_head_init(&vi->send);
+               goto free_stats;
 
        err = register_netdev(dev);
        if (err) {
@@ -935,51 +1108,119 @@ static int virtnet_probe(struct virtio_device *vdev)
                goto unregister;
        }
 
-       vi->status = VIRTIO_NET_S_LINK_UP;
-       virtnet_update_status(vi);
-       netif_carrier_on(dev);
+       /* Assume link up if device can't report link status,
+          otherwise get link status from config. */
+       if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
+               netif_carrier_off(dev);
+               virtnet_update_status(vi);
+       } else {
+               vi->status = VIRTIO_NET_S_LINK_UP;
+               netif_carrier_on(dev);
+       }
 
        pr_debug("virtnet: registered device %s\n", dev->name);
        return 0;
 
 unregister:
        unregister_netdev(dev);
-       cancel_delayed_work_sync(&vi->refill);
 free_vqs:
        vdev->config->del_vqs(vdev);
+free_stats:
+       free_percpu(vi->stats);
 free:
        free_netdev(dev);
        return err;
 }
 
+static void free_unused_bufs(struct virtnet_info *vi)
+{
+       void *buf;
+       while (1) {
+               buf = virtqueue_detach_unused_buf(vi->svq);
+               if (!buf)
+                       break;
+               dev_kfree_skb(buf);
+       }
+       while (1) {
+               buf = virtqueue_detach_unused_buf(vi->rvq);
+               if (!buf)
+                       break;
+               if (vi->mergeable_rx_bufs || vi->big_packets)
+                       give_pages(vi, buf);
+               else
+                       dev_kfree_skb(buf);
+               --vi->num;
+       }
+       BUG_ON(vi->num != 0);
+}
+
+static void remove_vq_common(struct virtnet_info *vi)
+{
+       vi->vdev->config->reset(vi->vdev);
+
+       /* Free unused buffers in both send and recv, if any. */
+       free_unused_bufs(vi);
+
+       vi->vdev->config->del_vqs(vi->vdev);
+
+       while (vi->pages)
+               __free_pages(get_a_page(vi, GFP_KERNEL), 0);
+}
+
 static void __devexit virtnet_remove(struct virtio_device *vdev)
 {
        struct virtnet_info *vi = vdev->priv;
-       struct sk_buff *skb;
 
-       /* Stop all the virtqueues. */
-       vdev->config->reset(vdev);
+       unregister_netdev(vi->dev);
 
-       /* Free our skbs in send and recv queues, if any. */
-       while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
-               kfree_skb(skb);
-               vi->num--;
-       }
-       __skb_queue_purge(&vi->send);
+       remove_vq_common(vi);
 
-       BUG_ON(vi->num != 0);
+       free_percpu(vi->stats);
+       free_netdev(vi->dev);
+}
 
-       unregister_netdev(vi->dev);
+#ifdef CONFIG_PM
+static int virtnet_freeze(struct virtio_device *vdev)
+{
+       struct virtnet_info *vi = vdev->priv;
+
+       virtqueue_disable_cb(vi->rvq);
+       virtqueue_disable_cb(vi->svq);
+       if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
+               virtqueue_disable_cb(vi->cvq);
+
+       netif_device_detach(vi->dev);
        cancel_delayed_work_sync(&vi->refill);
 
-       vdev->config->del_vqs(vi->vdev);
+       if (netif_running(vi->dev))
+               napi_disable(&vi->napi);
 
-       while (vi->pages)
-               __free_pages(get_a_page(vi, GFP_KERNEL), 0);
+       remove_vq_common(vi);
 
-       free_netdev(vi->dev);
+       return 0;
 }
 
+static int virtnet_restore(struct virtio_device *vdev)
+{
+       struct virtnet_info *vi = vdev->priv;
+       int err;
+
+       err = init_vqs(vi);
+       if (err)
+               return err;
+
+       if (netif_running(vi->dev))
+               virtnet_napi_enable(vi);
+
+       netif_device_attach(vi->dev);
+
+       if (!try_fill_recv(vi, GFP_KERNEL))
+               queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+
+       return 0;
+}
+#endif
+
 static struct virtio_device_id id_table[] = {
        { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
        { 0 },
@@ -1004,6 +1245,10 @@ static struct virtio_driver virtio_net_driver = {
        .probe =        virtnet_probe,
        .remove =       __devexit_p(virtnet_remove),
        .config_changed = virtnet_config_changed,
+#ifdef CONFIG_PM
+       .freeze =       virtnet_freeze,
+       .restore =      virtnet_restore,
+#endif
 };
 
 static int __init init(void)