static int napi_weight = 128;
module_param(napi_weight, int, 0444);
-static int csum = 1, gso = 1;
+static bool csum = true, gso = true;
module_param(csum, bool, 0444);
module_param(gso, bool, 0444);
#define GOOD_COPY_LEN 128
#define VIRTNET_SEND_COMMAND_SG_MAX 2
+#define VIRTNET_DRIVER_VERSION "1.0.0"
+
+struct virtnet_stats {
+ struct u64_stats_sync syncp;
+ u64 tx_bytes;
+ u64 tx_packets;
+
+ u64 rx_bytes;
+ u64 rx_packets;
+};
struct virtnet_info {
struct virtio_device *vdev;
/* Host will merge rx buffers for big packets (shake it! shake it!) */
bool mergeable_rx_bufs;
+ /* Active statistics */
+ struct virtnet_stats __percpu *stats;
+
/* Work struct for refilling if we run low on memory. */
struct delayed_work refill;
static void set_skb_frag(struct sk_buff *skb, struct page *page,
unsigned int offset, unsigned int *len)
{
+ int size = min((unsigned)PAGE_SIZE - offset, *len);
int i = skb_shinfo(skb)->nr_frags;
- skb_frag_t *f;
- f = &skb_shinfo(skb)->frags[i];
- f->size = min((unsigned)PAGE_SIZE - offset, *len);
- f->page_offset = offset;
- f->page = page;
+ __skb_fill_page_desc(skb, i, page, offset, size);
- skb->data_len += f->size;
- skb->len += f->size;
+ skb->data_len += size;
+ skb->len += size;
+ skb->truesize += PAGE_SIZE;
skb_shinfo(skb)->nr_frags++;
- *len -= f->size;
+ *len -= size;
}
+/* Called from bottom half context */
static struct sk_buff *page_to_skb(struct virtnet_info *vi,
struct page *page, unsigned int len)
{
len -= copy;
offset += copy;
+ /*
+ * Verify that we can indeed put this data into a skb.
+ * This is here to handle cases when the device erroneously
+ * tries to receive more than is possible. This is usually
+ * the case of a broken device.
+ */
+ if (unlikely(len > MAX_SKB_FRAGS * PAGE_SIZE)) {
+ if (net_ratelimit())
+ pr_debug("%s: too much data\n", skb->dev->name);
+ dev_kfree_skb(skb);
+ return NULL;
+ }
+
while (len) {
set_skb_frag(skb, page, offset, &len);
page = (struct page *)page->private;
skb->dev->stats.rx_length_errors++;
return -EINVAL;
}
-
page = virtqueue_get_buf(vi->rvq, &len);
if (!page) {
pr_debug("%s: rx error: %d buffers missing\n",
skb->dev->stats.rx_length_errors++;
return -EINVAL;
}
+
if (len > PAGE_SIZE)
len = PAGE_SIZE;
static void receive_buf(struct net_device *dev, void *buf, unsigned int len)
{
struct virtnet_info *vi = netdev_priv(dev);
+ struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
struct sk_buff *skb;
struct page *page;
struct skb_vnet_hdr *hdr;
}
hdr = skb_vnet_hdr(skb);
- skb->truesize += skb->data_len;
- dev->stats.rx_bytes += skb->len;
- dev->stats.rx_packets++;
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->rx_bytes += skb->len;
+ stats->rx_packets++;
+ u64_stats_update_end(&stats->syncp);
if (hdr->hdr.flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
pr_debug("Needs csum!\n");
hdr->hdr.csum_start,
hdr->hdr.csum_offset))
goto frame_err;
+ } else if (hdr->hdr.flags & VIRTIO_NET_HDR_F_DATA_VALID) {
+ skb->ip_summed = CHECKSUM_UNNECESSARY;
}
skb->protocol = eth_type_trans(skb, dev);
struct skb_vnet_hdr *hdr;
int err;
- skb = netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN);
+ skb = __netdev_alloc_skb_ip_align(vi->dev, MAX_PACKET_LEN, gfp);
if (unlikely(!skb))
return -ENOMEM;
skb_to_sgvec(skb, vi->rx_sg + 1, 0, skb->len);
- err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb);
+ err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 2, skb, gfp);
if (err < 0)
dev_kfree_skb(skb);
/* chain first in list head */
first->private = (unsigned long)list;
err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, MAX_SKB_FRAGS + 2,
- first);
+ first, gfp);
if (err < 0)
give_pages(vi, first);
sg_init_one(vi->rx_sg, page_address(page), PAGE_SIZE);
- err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page);
+ err = virtqueue_add_buf(vi->rvq, vi->rx_sg, 0, 1, page, gfp);
if (err < 0)
give_pages(vi, page);
return err;
}
-/* Returns false if we couldn't fill entirely (OOM). */
+/*
+ * Returns false if we couldn't fill entirely (OOM).
+ *
+ * Normally run in the receive path, but can also be run from ndo_open
+ * before we're receiving packets, or from refill_work which is
+ * careful to disable receiving (using napi_disable).
+ */
static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
{
int err;
- bool oom = false;
+ bool oom;
do {
if (vi->mergeable_rx_bufs)
else
err = add_recvbuf_small(vi, gfp);
- if (err < 0) {
- oom = true;
+ oom = err == -ENOMEM;
+ if (err < 0)
break;
- }
++vi->num;
} while (err > 0);
if (unlikely(vi->num > vi->max))
}
}
+static void virtnet_napi_enable(struct virtnet_info *vi)
+{
+ napi_enable(&vi->napi);
+
+ /* If all buffers were filled by other side before we napi_enabled, we
+ * won't get another interrupt, so process any outstanding packets
+ * now. virtnet_poll wants re-enable the queue, so we disable here.
+ * We synchronize against interrupts via NAPI_STATE_SCHED */
+ if (napi_schedule_prep(&vi->napi)) {
+ virtqueue_disable_cb(vi->rvq);
+ local_bh_disable();
+ __napi_schedule(&vi->napi);
+ local_bh_enable();
+ }
+}
+
static void refill_work(struct work_struct *work)
{
struct virtnet_info *vi;
vi = container_of(work, struct virtnet_info, refill.work);
napi_disable(&vi->napi);
still_empty = !try_fill_recv(vi, GFP_KERNEL);
- napi_enable(&vi->napi);
+ virtnet_napi_enable(vi);
/* In theory, this can happen: if we don't get any buffers in
* we will *never* try to fill again. */
if (still_empty)
- schedule_delayed_work(&vi->refill, HZ/2);
+ queue_delayed_work(system_nrt_wq, &vi->refill, HZ/2);
}
static int virtnet_poll(struct napi_struct *napi, int budget)
if (vi->num < vi->max / 2) {
if (!try_fill_recv(vi, GFP_ATOMIC))
- schedule_delayed_work(&vi->refill, 0);
+ queue_delayed_work(system_nrt_wq, &vi->refill, 0);
}
/* Out of packets? */
{
struct sk_buff *skb;
unsigned int len, tot_sgs = 0;
+ struct virtnet_stats *stats = this_cpu_ptr(vi->stats);
while ((skb = virtqueue_get_buf(vi->svq, &len)) != NULL) {
pr_debug("Sent skb %p\n", skb);
- vi->dev->stats.tx_bytes += skb->len;
- vi->dev->stats.tx_packets++;
+
+ u64_stats_update_begin(&stats->syncp);
+ stats->tx_bytes += skb->len;
+ stats->tx_packets++;
+ u64_stats_update_end(&stats->syncp);
+
tot_sgs += skb_vnet_hdr(skb)->num_sg;
dev_kfree_skb_any(skb);
}
if (skb->ip_summed == CHECKSUM_PARTIAL) {
hdr->hdr.flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
- hdr->hdr.csum_start = skb->csum_start - skb_headroom(skb);
+ hdr->hdr.csum_start = skb_checksum_start_offset(skb);
hdr->hdr.csum_offset = skb->csum_offset;
} else {
hdr->hdr.flags = 0;
hdr->num_sg = skb_to_sgvec(skb, vi->tx_sg + 1, 0, skb->len) + 1;
return virtqueue_add_buf(vi->svq, vi->tx_sg, hdr->num_sg,
- 0, skb);
+ 0, skb, GFP_ATOMIC);
}
static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
struct virtnet_info *vi = netdev_priv(dev);
int capacity;
-again:
/* Free up any pending old buffers before queueing new ones. */
free_old_xmit_skbs(vi);
/* This can happen with OOM and indirect buffers. */
if (unlikely(capacity < 0)) {
- netif_stop_queue(dev);
- dev_warn(&dev->dev, "Unexpected full queue\n");
- if (unlikely(!virtqueue_enable_cb(vi->svq))) {
- virtqueue_disable_cb(vi->svq);
- netif_start_queue(dev);
- goto again;
+ if (likely(capacity == -ENOMEM)) {
+ if (net_ratelimit())
+ dev_warn(&dev->dev,
+ "TX queue failure: out of memory\n");
+ } else {
+ dev->stats.tx_fifo_errors++;
+ if (net_ratelimit())
+ dev_warn(&dev->dev,
+ "Unexpected TX queue failure: %d\n",
+ capacity);
}
- return NETDEV_TX_BUSY;
+ dev->stats.tx_dropped++;
+ kfree_skb(skb);
+ return NETDEV_TX_OK;
}
virtqueue_kick(vi->svq);
* before it gets out of hand. Naturally, this wastes entries. */
if (capacity < 2+MAX_SKB_FRAGS) {
netif_stop_queue(dev);
- if (unlikely(!virtqueue_enable_cb(vi->svq))) {
+ if (unlikely(!virtqueue_enable_cb_delayed(vi->svq))) {
/* More just got used, free them then recheck. */
capacity += free_old_xmit_skbs(vi);
if (capacity >= 2+MAX_SKB_FRAGS) {
return 0;
}
+static struct rtnl_link_stats64 *virtnet_stats(struct net_device *dev,
+ struct rtnl_link_stats64 *tot)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ int cpu;
+ unsigned int start;
+
+ for_each_possible_cpu(cpu) {
+ struct virtnet_stats *stats = per_cpu_ptr(vi->stats, cpu);
+ u64 tpackets, tbytes, rpackets, rbytes;
+
+ do {
+ start = u64_stats_fetch_begin(&stats->syncp);
+ tpackets = stats->tx_packets;
+ tbytes = stats->tx_bytes;
+ rpackets = stats->rx_packets;
+ rbytes = stats->rx_bytes;
+ } while (u64_stats_fetch_retry(&stats->syncp, start));
+
+ tot->rx_packets += rpackets;
+ tot->tx_packets += tpackets;
+ tot->rx_bytes += rbytes;
+ tot->tx_bytes += tbytes;
+ }
+
+ tot->tx_dropped = dev->stats.tx_dropped;
+ tot->tx_fifo_errors = dev->stats.tx_fifo_errors;
+ tot->rx_dropped = dev->stats.rx_dropped;
+ tot->rx_length_errors = dev->stats.rx_length_errors;
+ tot->rx_frame_errors = dev->stats.rx_frame_errors;
+
+ return tot;
+}
+
#ifdef CONFIG_NET_POLL_CONTROLLER
static void virtnet_netpoll(struct net_device *dev)
{
{
struct virtnet_info *vi = netdev_priv(dev);
- napi_enable(&vi->napi);
+ /* Make sure we have some buffers: if oom use wq. */
+ if (!try_fill_recv(vi, GFP_KERNEL))
+ queue_delayed_work(system_nrt_wq, &vi->refill, 0);
- /* If all buffers were filled by other side before we napi_enabled, we
- * won't get another interrupt, so process any outstanding packets
- * now. virtnet_poll wants re-enable the queue, so we disable here.
- * We synchronize against interrupts via NAPI_STATE_SCHED */
- if (napi_schedule_prep(&vi->napi)) {
- virtqueue_disable_cb(vi->rvq);
- __napi_schedule(&vi->napi);
- }
+ virtnet_napi_enable(vi);
return 0;
}
sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
- BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi) < 0);
+ BUG_ON(virtqueue_add_buf(vi->cvq, sg, out, in, vi, GFP_ATOMIC) < 0);
virtqueue_kick(vi->cvq);
{
struct virtnet_info *vi = netdev_priv(dev);
+ /* Make sure refill_work doesn't re-enable napi! */
+ cancel_delayed_work_sync(&vi->refill);
napi_disable(&vi->napi);
return 0;
}
-static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
-{
- struct virtnet_info *vi = netdev_priv(dev);
- struct virtio_device *vdev = vi->vdev;
-
- if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
- return -ENOSYS;
-
- return ethtool_op_set_tx_hw_csum(dev, data);
-}
-
static void virtnet_set_rx_mode(struct net_device *dev)
{
struct virtnet_info *vi = netdev_priv(dev);
kfree(buf);
}
-static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
+ return 0;
}
-static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
+static int virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
{
struct virtnet_info *vi = netdev_priv(dev);
struct scatterlist sg;
if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
+ return 0;
+}
+
+static void virtnet_get_ringparam(struct net_device *dev,
+ struct ethtool_ringparam *ring)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+
+ ring->rx_max_pending = virtqueue_get_vring_size(vi->rvq);
+ ring->tx_max_pending = virtqueue_get_vring_size(vi->svq);
+ ring->rx_pending = ring->rx_max_pending;
+ ring->tx_pending = ring->tx_max_pending;
+
+}
+
+
+static void virtnet_get_drvinfo(struct net_device *dev,
+ struct ethtool_drvinfo *info)
+{
+ struct virtnet_info *vi = netdev_priv(dev);
+ struct virtio_device *vdev = vi->vdev;
+
+ strlcpy(info->driver, KBUILD_MODNAME, sizeof(info->driver));
+ strlcpy(info->version, VIRTNET_DRIVER_VERSION, sizeof(info->version));
+ strlcpy(info->bus_info, virtio_bus_name(vdev), sizeof(info->bus_info));
+
}
static const struct ethtool_ops virtnet_ethtool_ops = {
- .set_tx_csum = virtnet_set_tx_csum,
- .set_sg = ethtool_op_set_sg,
- .set_tso = ethtool_op_set_tso,
- .set_ufo = ethtool_op_set_ufo,
+ .get_drvinfo = virtnet_get_drvinfo,
.get_link = ethtool_op_get_link,
+ .get_ringparam = virtnet_get_ringparam,
};
#define MIN_MTU 68
.ndo_set_mac_address = virtnet_set_mac_address,
.ndo_set_rx_mode = virtnet_set_rx_mode,
.ndo_change_mtu = virtnet_change_mtu,
+ .ndo_get_stats64 = virtnet_stats,
.ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
.ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
#ifdef CONFIG_NET_POLL_CONTROLLER
{
u16 v;
- if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
- return;
-
- vi->vdev->config->get(vi->vdev,
+ if (virtio_config_val(vi->vdev, VIRTIO_NET_F_STATUS,
offsetof(struct virtio_net_config, status),
- &v, sizeof(v));
+ &v) < 0)
+ return;
/* Ignore unknown (future) status bits */
v &= VIRTIO_NET_S_LINK_UP;
virtnet_update_status(vi);
}
+static int init_vqs(struct virtnet_info *vi)
+{
+ struct virtqueue *vqs[3];
+ vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
+ const char *names[] = { "input", "output", "control" };
+ int nvqs, err;
+
+ /* We expect two virtqueues, receive then send,
+ * and optionally control. */
+ nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
+
+ err = vi->vdev->config->find_vqs(vi->vdev, nvqs, vqs, callbacks, names);
+ if (err)
+ return err;
+
+ vi->rvq = vqs[0];
+ vi->svq = vqs[1];
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
+ vi->cvq = vqs[2];
+
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
+ vi->dev->features |= NETIF_F_HW_VLAN_FILTER;
+ }
+ return 0;
+}
+
static int virtnet_probe(struct virtio_device *vdev)
{
int err;
struct net_device *dev;
struct virtnet_info *vi;
- struct virtqueue *vqs[3];
- vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
- const char *names[] = { "input", "output", "control" };
- int nvqs;
/* Allocate ourselves a network device with room for our info */
dev = alloc_etherdev(sizeof(struct virtnet_info));
return -ENOMEM;
/* Set up network device as normal. */
+ dev->priv_flags |= IFF_UNICAST_FLT;
dev->netdev_ops = &virtnet_netdev;
dev->features = NETIF_F_HIGHDMA;
+
SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
SET_NETDEV_DEV(dev, &vdev->dev);
/* Do we support "hardware" checksums? */
- if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
/* This opens up the world of extra features. */
- dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
- if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
- dev->features |= NETIF_F_TSO | NETIF_F_UFO
+ dev->hw_features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+ if (csum)
+ dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
+
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
+ dev->hw_features |= NETIF_F_TSO | NETIF_F_UFO
| NETIF_F_TSO_ECN | NETIF_F_TSO6;
}
/* Individual feature bits: what can host handle? */
- if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
- dev->features |= NETIF_F_TSO;
- if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
- dev->features |= NETIF_F_TSO6;
- if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
- dev->features |= NETIF_F_TSO_ECN;
- if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
- dev->features |= NETIF_F_UFO;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
+ dev->hw_features |= NETIF_F_TSO;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
+ dev->hw_features |= NETIF_F_TSO6;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
+ dev->hw_features |= NETIF_F_TSO_ECN;
+ if (virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
+ dev->hw_features |= NETIF_F_UFO;
+
+ if (gso)
+ dev->features |= dev->hw_features & (NETIF_F_ALL_TSO|NETIF_F_UFO);
+ /* (!csum && gso) case will be fixed by register_netdev() */
}
/* Configuration may specify what MAC to use. Otherwise random. */
- if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
- vdev->config->get(vdev,
+ if (virtio_config_val_len(vdev, VIRTIO_NET_F_MAC,
offsetof(struct virtio_net_config, mac),
- dev->dev_addr, dev->addr_len);
- } else
- random_ether_addr(dev->dev_addr);
+ dev->dev_addr, dev->addr_len) < 0)
+ eth_hw_addr_random(dev);
/* Set up our device-specific information */
vi = netdev_priv(dev);
vi->vdev = vdev;
vdev->priv = vi;
vi->pages = NULL;
+ vi->stats = alloc_percpu(struct virtnet_stats);
+ err = -ENOMEM;
+ if (vi->stats == NULL)
+ goto free;
+
INIT_DELAYED_WORK(&vi->refill, refill_work);
sg_init_table(vi->rx_sg, ARRAY_SIZE(vi->rx_sg));
sg_init_table(vi->tx_sg, ARRAY_SIZE(vi->tx_sg));
if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
vi->mergeable_rx_bufs = true;
- /* We expect two virtqueues, receive then send,
- * and optionally control. */
- nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
-
- err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
+ err = init_vqs(vi);
if (err)
- goto free;
-
- vi->rvq = vqs[0];
- vi->svq = vqs[1];
-
- if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
- vi->cvq = vqs[2];
-
- if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
- dev->features |= NETIF_F_HW_VLAN_FILTER;
- }
+ goto free_stats;
err = register_netdev(dev);
if (err) {
goto unregister;
}
- vi->status = VIRTIO_NET_S_LINK_UP;
- virtnet_update_status(vi);
- netif_carrier_on(dev);
+ /* Assume link up if device can't report link status,
+ otherwise get link status from config. */
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS)) {
+ netif_carrier_off(dev);
+ virtnet_update_status(vi);
+ } else {
+ vi->status = VIRTIO_NET_S_LINK_UP;
+ netif_carrier_on(dev);
+ }
pr_debug("virtnet: registered device %s\n", dev->name);
return 0;
unregister:
unregister_netdev(dev);
- cancel_delayed_work_sync(&vi->refill);
free_vqs:
vdev->config->del_vqs(vdev);
+free_stats:
+ free_percpu(vi->stats);
free:
free_netdev(dev);
return err;
BUG_ON(vi->num != 0);
}
+static void remove_vq_common(struct virtnet_info *vi)
+{
+ vi->vdev->config->reset(vi->vdev);
+
+ /* Free unused buffers in both send and recv, if any. */
+ free_unused_bufs(vi);
+
+ vi->vdev->config->del_vqs(vi->vdev);
+
+ while (vi->pages)
+ __free_pages(get_a_page(vi, GFP_KERNEL), 0);
+}
+
static void __devexit virtnet_remove(struct virtio_device *vdev)
{
struct virtnet_info *vi = vdev->priv;
- /* Stop all the virtqueues. */
- vdev->config->reset(vdev);
+ unregister_netdev(vi->dev);
+ remove_vq_common(vi);
- unregister_netdev(vi->dev);
+ free_percpu(vi->stats);
+ free_netdev(vi->dev);
+}
+
+#ifdef CONFIG_PM
+static int virtnet_freeze(struct virtio_device *vdev)
+{
+ struct virtnet_info *vi = vdev->priv;
+
+ virtqueue_disable_cb(vi->rvq);
+ virtqueue_disable_cb(vi->svq);
+ if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ))
+ virtqueue_disable_cb(vi->cvq);
+
+ netif_device_detach(vi->dev);
cancel_delayed_work_sync(&vi->refill);
- /* Free unused buffers in both send and recv, if any. */
- free_unused_bufs(vi);
+ if (netif_running(vi->dev))
+ napi_disable(&vi->napi);
- vdev->config->del_vqs(vi->vdev);
+ remove_vq_common(vi);
- while (vi->pages)
- __free_pages(get_a_page(vi, GFP_KERNEL), 0);
+ return 0;
+}
- free_netdev(vi->dev);
+static int virtnet_restore(struct virtio_device *vdev)
+{
+ struct virtnet_info *vi = vdev->priv;
+ int err;
+
+ err = init_vqs(vi);
+ if (err)
+ return err;
+
+ if (netif_running(vi->dev))
+ virtnet_napi_enable(vi);
+
+ netif_device_attach(vi->dev);
+
+ if (!try_fill_recv(vi, GFP_KERNEL))
+ queue_delayed_work(system_nrt_wq, &vi->refill, 0);
+
+ return 0;
}
+#endif
static struct virtio_device_id id_table[] = {
{ VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
.probe = virtnet_probe,
.remove = __devexit_p(virtnet_remove),
.config_changed = virtnet_config_changed,
+#ifdef CONFIG_PM
+ .freeze = virtnet_freeze,
+ .restore = virtnet_restore,
+#endif
};
static int __init init(void)