1 /******************************************************************************
2 * Virtual network driver for conversing with remote driver backends.
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
6 * Copyright (C) 2007 Solarflare Communications, Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 #include <linux/module.h>
34 #include <linux/version.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/string.h>
39 #include <linux/errno.h>
40 #include <linux/netdevice.h>
41 #include <linux/inetdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/skbuff.h>
44 #include <linux/init.h>
45 #include <linux/bitops.h>
46 #include <linux/ethtool.h>
48 #include <linux/if_ether.h>
50 #include <linux/moduleparam.h>
52 #include <net/pkt_sched.h>
53 #include <net/route.h>
54 #include <asm/uaccess.h>
55 #include <xen/evtchn.h>
56 #include <xen/xenbus.h>
57 #include <xen/interface/io/netif.h>
58 #include <xen/interface/memory.h>
59 #include <xen/balloon.h>
61 #include <asm/maddr.h>
62 #include <asm/uaccess.h>
63 #include <xen/interface/grant_table.h>
64 #include <xen/gnttab.h>
65 #include <xen/net-util.h>
72 #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
77 * Mutually-exclusive module options to select receive data path:
78 * rx_copy : Packets are copied by network backend into local memory
79 * rx_flip : Page containing packet data is transferred to our ownership
80 * For fully-virtualised guests there is no option - copying must be used.
81 * For paravirtualised guests, flipping is the default.
84 static bool MODPARM_rx_copy;
85 module_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
86 MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
87 static bool MODPARM_rx_flip;
88 module_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
89 MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
91 # define MODPARM_rx_copy true
92 # define MODPARM_rx_flip false
95 #define RX_COPY_THRESHOLD 256
97 /* If we don't have GSO, fake things up so that we never try to use it. */
98 #if defined(NETIF_F_GSO)
100 #define HAVE_TSO 1 /* TSO is a subset of GSO */
101 #define HAVE_CSUM_OFFLOAD 1
102 static inline void dev_disable_gso_features(struct net_device *dev)
104 /* Turn off all GSO bits except ROBUST. */
105 dev->features &= ~NETIF_F_GSO_MASK;
106 dev->features |= NETIF_F_GSO_ROBUST;
108 #elif defined(NETIF_F_TSO)
112 /* Some older kernels cannot cope with incorrect checksums,
113 * particularly in netfilter. I'm not sure there is 100% correlation
114 * with the presence of NETIF_F_TSO but it appears to be a good first
117 #define HAVE_CSUM_OFFLOAD 0
119 #define gso_size tso_size
120 #define gso_segs tso_segs
121 static inline void dev_disable_gso_features(struct net_device *dev)
123 /* Turn off all TSO bits. */
124 dev->features &= ~NETIF_F_TSO;
126 static inline int skb_is_gso(const struct sk_buff *skb)
128 return skb_shinfo(skb)->tso_size;
130 static inline int skb_gso_ok(struct sk_buff *skb, int features)
132 return (features & NETIF_F_TSO);
135 #define netif_skb_features(skb) ((skb)->dev->features)
136 static inline int netif_needs_gso(struct sk_buff *skb, int features)
138 return skb_is_gso(skb) &&
139 (!skb_gso_ok(skb, features) ||
140 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
145 #define HAVE_CSUM_OFFLOAD 0
146 #define netif_needs_gso(skb, feat) 0
147 #define dev_disable_gso_features(dev) ((void)0)
148 #define ethtool_op_set_tso(dev, data) (-ENOSYS)
151 struct netfront_rx_info {
152 struct netif_rx_response rx;
153 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
157 * Implement our own carrier flag: the network stack's version causes delays
158 * when the carrier is re-enabled (in particular, dev_activate() may not
159 * immediately be called, which can cause packet loss).
161 #define netfront_carrier_on(netif) ((netif)->carrier = 1)
162 #define netfront_carrier_off(netif) ((netif)->carrier = 0)
163 #define netfront_carrier_ok(netif) ((netif)->carrier)
166 * Access macros for acquiring freeing slots in tx_skbs[].
169 static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id)
172 list[0] = (void *)(unsigned long)id;
175 static inline unsigned short get_id_from_freelist(struct sk_buff **list)
177 unsigned int id = (unsigned int)(unsigned long)list[0];
182 static inline int xennet_rxidx(RING_IDX idx)
184 return idx & (NET_RX_RING_SIZE - 1);
187 static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
190 int i = xennet_rxidx(ri);
191 struct sk_buff *skb = np->rx_skbs[i];
192 np->rx_skbs[i] = NULL;
196 static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
199 int i = xennet_rxidx(ri);
200 grant_ref_t ref = np->grant_rx_ref[i];
201 np->grant_rx_ref[i] = GRANT_INVALID_REF;
205 #define DPRINTK(fmt, args...) \
206 pr_debug("netfront (%s:%d) " fmt, \
207 __FUNCTION__, __LINE__, ##args)
208 #define IPRINTK(fmt, args...) pr_info("netfront: " fmt, ##args)
209 #define WPRINTK(fmt, args...) pr_warning("netfront: " fmt, ##args)
211 static int setup_device(struct xenbus_device *, struct netfront_info *);
212 static struct net_device *create_netdev(struct xenbus_device *);
214 static void end_access(int, void *);
215 static void netif_release_rings(struct netfront_info *);
216 static void netif_disconnect_backend(struct netfront_info *);
218 static int network_connect(struct net_device *);
219 static void network_tx_buf_gc(struct net_device *);
220 static void network_alloc_rx_buffers(struct net_device *);
222 static irqreturn_t netif_int(int irq, void *dev_id);
225 static int xennet_sysfs_addif(struct net_device *netdev);
226 static void xennet_sysfs_delif(struct net_device *netdev);
227 #else /* !CONFIG_SYSFS */
228 #define xennet_sysfs_addif(dev) (0)
229 #define xennet_sysfs_delif(dev) do { } while(0)
232 static inline bool xennet_can_sg(struct net_device *dev)
234 return dev->features & NETIF_F_SG;
238 * Work around net.ipv4.conf.*.arp_notify not being enabled by default.
240 static void __devinit netfront_enable_arp_notify(struct netfront_info *info)
243 struct in_device *in_dev;
246 in_dev = __in_dev_get_rtnl(info->netdev);
247 if (in_dev && !IN_DEV_CONF_GET(in_dev, ARP_NOTIFY))
248 IN_DEV_CONF_SET(in_dev, ARP_NOTIFY, 1);
251 pr_warn("Cannot enable ARP notification on %s\n",
252 info->xbdev->nodename);
257 * Entry point to this code when a new device is created. Allocate the basic
258 * structures and the ring buffers for communication with the backend, and
259 * inform the backend of the appropriate details for those.
261 static int __devinit netfront_probe(struct xenbus_device *dev,
262 const struct xenbus_device_id *id)
265 struct net_device *netdev;
266 struct netfront_info *info;
268 netdev = create_netdev(dev);
269 if (IS_ERR(netdev)) {
270 err = PTR_ERR(netdev);
271 xenbus_dev_fatal(dev, err, "creating netdev");
275 info = netdev_priv(netdev);
276 dev_set_drvdata(&dev->dev, info);
278 err = register_netdev(info->netdev);
280 pr_warning("%s: register_netdev err=%d\n",
285 netfront_enable_arp_notify(info);
287 err = xennet_sysfs_addif(info->netdev);
289 unregister_netdev(info->netdev);
290 pr_warning("%s: add sysfs failed err=%d\n",
299 dev_set_drvdata(&dev->dev, NULL);
303 static int __devexit netfront_remove(struct xenbus_device *dev)
305 struct netfront_info *info = dev_get_drvdata(&dev->dev);
307 DPRINTK("%s\n", dev->nodename);
309 netfront_accelerator_call_remove(info, dev);
311 netif_disconnect_backend(info);
313 del_timer_sync(&info->rx_refill_timer);
315 xennet_sysfs_delif(info->netdev);
317 unregister_netdev(info->netdev);
319 free_percpu(info->stats);
321 free_netdev(info->netdev);
327 static int netfront_suspend(struct xenbus_device *dev)
329 struct netfront_info *info = dev_get_drvdata(&dev->dev);
330 return netfront_accelerator_suspend(info, dev);
334 static int netfront_suspend_cancel(struct xenbus_device *dev)
336 struct netfront_info *info = dev_get_drvdata(&dev->dev);
337 return netfront_accelerator_suspend_cancel(info, dev);
342 * We are reconnecting to the backend, due to a suspend/resume, or a backend
343 * driver restart. We tear down our netif structure and recreate it, but
344 * leave the device-layer structures intact so that this is transparent to the
345 * rest of the kernel.
347 static int netfront_resume(struct xenbus_device *dev)
349 struct netfront_info *info = dev_get_drvdata(&dev->dev);
351 DPRINTK("%s\n", dev->nodename);
353 netfront_accelerator_resume(info, dev);
355 netif_disconnect_backend(info);
359 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
361 char *s, *e, *macstr;
364 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
366 return PTR_ERR(macstr);
368 for (i = 0; i < ETH_ALEN; i++) {
369 mac[i] = simple_strtoul(s, &e, 16);
370 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
381 /* Common code used when first setting up, and when resuming. */
382 static int talk_to_backend(struct xenbus_device *dev,
383 struct netfront_info *info)
386 struct xenbus_transaction xbt;
389 /* Read mac only in the first setup. */
390 if (!is_valid_ether_addr(info->mac)) {
391 err = xen_net_read_mac(dev, info->mac);
393 xenbus_dev_fatal(dev, err, "parsing %s/mac",
399 /* Create shared ring, alloc event channel. */
400 err = setup_device(dev, info);
404 /* This will load an accelerator if one is configured when the
406 netfront_accelerator_add_watch(info);
409 err = xenbus_transaction_start(&xbt);
411 xenbus_dev_fatal(dev, err, "starting transaction");
415 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u",
418 message = "writing tx ring-ref";
419 goto abort_transaction;
421 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u",
424 message = "writing rx ring-ref";
425 goto abort_transaction;
427 err = xenbus_printf(xbt, dev->nodename,
428 "event-channel", "%u",
429 irq_to_evtchn_port(info->irq));
431 message = "writing event-channel";
432 goto abort_transaction;
435 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
436 info->copying_receiver);
438 message = "writing request-rx-copy";
439 goto abort_transaction;
442 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
444 message = "writing feature-rx-notify";
445 goto abort_transaction;
448 err = xenbus_printf(xbt, dev->nodename, "feature-no-csum-offload",
449 "%d", !HAVE_CSUM_OFFLOAD);
451 message = "writing feature-no-csum-offload";
452 goto abort_transaction;
455 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
457 message = "writing feature-sg";
458 goto abort_transaction;
461 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d",
464 message = "writing feature-gso-tcpv4";
465 goto abort_transaction;
468 err = xenbus_transaction_end(xbt, 0);
472 xenbus_dev_fatal(dev, err, "completing transaction");
479 xenbus_transaction_end(xbt, 1);
480 xenbus_dev_fatal(dev, err, "%s", message);
482 netfront_accelerator_call_remove(info, dev);
483 netif_disconnect_backend(info);
488 static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
490 struct netif_tx_sring *txs;
491 struct netif_rx_sring *rxs;
493 struct net_device *netdev = info->netdev;
495 info->tx_ring_ref = GRANT_INVALID_REF;
496 info->rx_ring_ref = GRANT_INVALID_REF;
497 info->rx.sring = NULL;
498 info->tx.sring = NULL;
501 txs = (struct netif_tx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
504 xenbus_dev_fatal(dev, err, "allocating tx ring page");
507 SHARED_RING_INIT(txs);
508 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
510 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
512 free_page((unsigned long)txs);
515 info->tx_ring_ref = err;
517 rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_NOIO | __GFP_HIGH);
520 xenbus_dev_fatal(dev, err, "allocating rx ring page");
523 SHARED_RING_INIT(rxs);
524 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
526 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
528 free_page((unsigned long)rxs);
531 info->rx_ring_ref = err;
533 memcpy(netdev->dev_addr, info->mac, ETH_ALEN);
535 err = bind_listening_port_to_irqhandler(
536 dev->otherend_id, netif_int, 0, netdev->name, netdev);
544 netif_release_rings(info);
549 * Callback received when the backend's state changes.
551 static void backend_changed(struct xenbus_device *dev,
552 enum xenbus_state backend_state)
554 struct netfront_info *np = dev_get_drvdata(&dev->dev);
555 struct net_device *netdev = np->netdev;
557 DPRINTK("%s\n", xenbus_strstate(backend_state));
559 switch (backend_state) {
560 case XenbusStateInitialising:
561 case XenbusStateInitialised:
562 case XenbusStateReconfiguring:
563 case XenbusStateReconfigured:
564 case XenbusStateUnknown:
565 case XenbusStateClosed:
568 case XenbusStateInitWait:
569 if (dev->state != XenbusStateInitialising)
571 if (network_connect(netdev) != 0)
573 xenbus_switch_state(dev, XenbusStateConnected);
576 case XenbusStateConnected:
577 netif_notify_peers(netdev);
580 case XenbusStateClosing:
581 xenbus_frontend_closed(dev);
586 static inline int netfront_tx_slot_available(struct netfront_info *np)
588 return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
589 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
593 static inline void network_maybe_wake_tx(struct net_device *dev)
595 struct netfront_info *np = netdev_priv(dev);
597 if (unlikely(netif_queue_stopped(dev)) &&
598 netfront_tx_slot_available(np) &&
599 likely(netif_running(dev)) &&
600 netfront_check_accelerator_queue_ready(dev, np))
601 netif_wake_queue(dev);
605 int netfront_check_queue_ready(struct net_device *dev)
607 struct netfront_info *np = netdev_priv(dev);
609 return unlikely(netif_queue_stopped(dev)) &&
610 netfront_tx_slot_available(np) &&
611 likely(netif_running(dev));
613 EXPORT_SYMBOL(netfront_check_queue_ready);
615 static int network_open(struct net_device *dev)
617 struct netfront_info *np = netdev_priv(dev);
619 napi_enable(&np->napi);
621 spin_lock_bh(&np->rx_lock);
622 if (netfront_carrier_ok(np)) {
623 network_alloc_rx_buffers(dev);
624 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
625 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){
626 netfront_accelerator_call_stop_napi_irq(np, dev);
628 napi_schedule(&np->napi);
631 spin_unlock_bh(&np->rx_lock);
633 netif_start_queue(dev);
638 static void network_tx_buf_gc(struct net_device *dev)
642 struct netfront_info *np = netdev_priv(dev);
645 BUG_ON(!netfront_carrier_ok(np));
648 prod = np->tx.sring->rsp_prod;
649 rmb(); /* Ensure we see responses up to 'rp'. */
651 for (cons = np->tx.rsp_cons; cons != prod; cons++) {
652 struct netif_tx_response *txrsp;
654 txrsp = RING_GET_RESPONSE(&np->tx, cons);
655 if (txrsp->status == XEN_NETIF_RSP_NULL)
659 skb = np->tx_skbs[id];
660 if (unlikely(gnttab_query_foreign_access(
661 np->grant_tx_ref[id]) != 0)) {
662 pr_alert("network_tx_buf_gc: grant still"
663 " in use by backend domain\n");
666 gnttab_end_foreign_access_ref(np->grant_tx_ref[id]);
667 gnttab_release_grant_reference(
668 &np->gref_tx_head, np->grant_tx_ref[id]);
669 np->grant_tx_ref[id] = GRANT_INVALID_REF;
670 add_id_to_freelist(np->tx_skbs, id);
671 dev_kfree_skb_irq(skb);
674 np->tx.rsp_cons = prod;
677 * Set a new event, then check for race with update of tx_cons.
678 * Note that it is essential to schedule a callback, no matter
679 * how few buffers are pending. Even if there is space in the
680 * transmit ring, higher layers may be blocked because too much
681 * data is outstanding: in such cases notification from Xen is
682 * likely to be the only kick that we'll get.
684 np->tx.sring->rsp_event =
685 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
687 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
689 network_maybe_wake_tx(dev);
692 static void rx_refill_timeout(unsigned long data)
694 struct net_device *dev = (struct net_device *)data;
695 struct netfront_info *np = netdev_priv(dev);
697 netfront_accelerator_call_stop_napi_irq(np, dev);
699 napi_schedule(&np->napi);
702 static void network_alloc_rx_buffers(struct net_device *dev)
705 struct netfront_info *np = netdev_priv(dev);
708 int i, batch_target, notify;
709 RING_IDX req_prod = np->rx.req_prod_pvt;
710 struct xen_memory_reservation reservation;
715 netif_rx_request_t *req;
717 if (unlikely(!netfront_carrier_ok(np)))
721 * Allocate skbuffs greedily, even though we batch updates to the
722 * receive ring. This creates a less bursty demand on the memory
723 * allocator, so should reduce the chance of failed allocation requests
724 * both for ourself and for other kernel subsystems.
726 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
727 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
729 * Allocate an skb and a page. Do not use __dev_alloc_skb as
730 * that will allocate page-sized buffers which is not
732 * 16 bytes added as necessary headroom for netif_receive_skb.
734 skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN,
735 GFP_ATOMIC | __GFP_NOWARN);
739 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
743 /* Any skbuffs queued for refill? Force them out. */
746 /* Could not allocate any skbuffs. Try again later. */
747 mod_timer(&np->rx_refill_timer,
752 skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */
753 __skb_fill_page_desc(skb, 0, page, 0, 0);
754 skb_shinfo(skb)->nr_frags = 1;
755 __skb_queue_tail(&np->rx_batch, skb);
758 /* Is the batch large enough to be worthwhile? */
759 if (i < (np->rx_target/2)) {
760 if (req_prod > np->rx.sring->req_prod)
765 /* Adjust our fill target if we risked running out of buffers. */
766 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
767 ((np->rx_target *= 2) > np->rx_max_target))
768 np->rx_target = np->rx_max_target;
771 for (nr_flips = i = 0; ; i++) {
772 if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
777 id = xennet_rxidx(req_prod + i);
779 BUG_ON(np->rx_skbs[id]);
780 np->rx_skbs[id] = skb;
782 ref = gnttab_claim_grant_reference(&np->gref_rx_head);
783 BUG_ON((signed short)ref < 0);
784 np->grant_rx_ref[id] = ref;
786 page = skb_frag_page(skb_shinfo(skb)->frags);
787 pfn = page_to_pfn(page);
788 vaddr = page_address(page);
790 req = RING_GET_REQUEST(&np->rx, req_prod + i);
791 if (!np->copying_receiver) {
792 gnttab_grant_foreign_transfer_ref(ref,
793 np->xbdev->otherend_id,
795 np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn);
796 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
797 /* Remove this page before passing
799 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
800 MULTI_update_va_mapping(np->rx_mcl+i,
801 (unsigned long)vaddr,
806 gnttab_grant_foreign_access_ref(ref,
807 np->xbdev->otherend_id,
816 if ( nr_flips != 0 ) {
817 /* Tell the ballon driver what is going on. */
818 balloon_update_driver_allowance(i);
820 set_xen_guest_handle(reservation.extent_start,
822 reservation.nr_extents = nr_flips;
823 reservation.extent_order = 0;
824 reservation.address_bits = 0;
825 reservation.domid = DOMID_SELF;
827 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
828 /* After all PTEs have been zapped, flush the TLB. */
829 np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
830 UVMF_TLB_FLUSH|UVMF_ALL;
832 /* Give away a batch of pages. */
833 np->rx_mcl[i].op = __HYPERVISOR_memory_op;
834 np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
835 np->rx_mcl[i].args[1] = (unsigned long)&reservation;
837 /* Zap PTEs and give away pages in one big
839 if (unlikely(HYPERVISOR_multicall(np->rx_mcl, i+1)))
842 /* Check return status of HYPERVISOR_memory_op(). */
843 if (unlikely(np->rx_mcl[i].result != i))
844 panic("Unable to reduce memory reservation\n");
846 BUG_ON(np->rx_mcl[nr_flips].result);
848 if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
850 panic("Unable to reduce memory reservation\n");
856 /* Above is a suitable barrier to ensure backend will see requests. */
857 np->rx.req_prod_pvt = req_prod + i;
859 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
861 notify_remote_via_irq(np->irq);
864 static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
865 struct netif_tx_request *tx)
867 struct netfront_info *np = netdev_priv(dev);
868 char *data = skb->data;
870 RING_IDX prod = np->tx.req_prod_pvt;
871 int frags = skb_shinfo(skb)->nr_frags;
872 unsigned int offset = offset_in_page(data);
873 unsigned int len = skb_headlen(skb);
878 while (len > PAGE_SIZE - offset) {
879 tx->size = PAGE_SIZE - offset;
880 tx->flags |= XEN_NETTXF_more_data;
885 id = get_id_from_freelist(np->tx_skbs);
886 np->tx_skbs[id] = skb_get(skb);
887 tx = RING_GET_REQUEST(&np->tx, prod++);
889 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
890 BUG_ON((signed short)ref < 0);
892 mfn = virt_to_mfn(data);
893 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
896 tx->gref = np->grant_tx_ref[id] = ref;
902 for (i = 0; i < frags; i++) {
903 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
905 tx->flags |= XEN_NETTXF_more_data;
907 id = get_id_from_freelist(np->tx_skbs);
908 np->tx_skbs[id] = skb_get(skb);
909 tx = RING_GET_REQUEST(&np->tx, prod++);
911 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
912 BUG_ON((signed short)ref < 0);
914 mfn = pfn_to_mfn(page_to_pfn(skb_frag_page(frag)));
915 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
918 tx->gref = np->grant_tx_ref[id] = ref;
919 tx->offset = frag->page_offset;
920 tx->size = skb_frag_size(frag);
924 np->tx.req_prod_pvt = prod;
927 static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
930 struct netfront_info *np = netdev_priv(dev);
931 struct netfront_stats *stats = this_cpu_ptr(np->stats);
932 struct netif_tx_request *tx;
933 struct netif_extra_info *extra;
934 char *data = skb->data;
937 unsigned long mfn, flags;
939 int frags = skb_shinfo(skb)->nr_frags;
940 unsigned int offset = offset_in_page(data);
941 unsigned int len = skb_headlen(skb);
943 /* Check the fast path, if hooks are available */
944 if (np->accel_vif_state.hooks &&
945 np->accel_vif_state.hooks->start_xmit(skb, dev)) {
946 /* Fast path has sent this packet */
950 frags += DIV_ROUND_UP(offset + len, PAGE_SIZE);
951 if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
952 pr_alert("xennet: skb rides the rocket: %d frags\n", frags);
957 spin_lock_irqsave(&np->tx_lock, flags);
959 if (unlikely(!netfront_carrier_ok(np) ||
960 (frags > 1 && !xennet_can_sg(dev)) ||
961 netif_needs_gso(skb, netif_skb_features(skb)))) {
962 spin_unlock_irqrestore(&np->tx_lock, flags);
966 i = np->tx.req_prod_pvt;
968 id = get_id_from_freelist(np->tx_skbs);
969 np->tx_skbs[id] = skb;
971 tx = RING_GET_REQUEST(&np->tx, i);
974 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
975 BUG_ON((signed short)ref < 0);
976 mfn = virt_to_mfn(data);
977 gnttab_grant_foreign_access_ref(
978 ref, np->xbdev->otherend_id, mfn, GTF_readonly);
979 tx->gref = np->grant_tx_ref[id] = ref;
986 if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
987 tx->flags |= XEN_NETTXF_csum_blank | XEN_NETTXF_data_validated;
988 else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
989 tx->flags |= XEN_NETTXF_data_validated;
992 if (skb_shinfo(skb)->gso_size) {
993 struct netif_extra_info *gso = (struct netif_extra_info *)
994 RING_GET_REQUEST(&np->tx, ++i);
997 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
999 tx->flags |= XEN_NETTXF_extra_info;
1001 gso->u.gso.size = skb_shinfo(skb)->gso_size;
1002 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
1004 gso->u.gso.features = 0;
1006 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
1012 np->tx.req_prod_pvt = i + 1;
1014 xennet_make_frags(skb, dev, tx);
1015 tx->size = skb->len;
1017 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
1019 notify_remote_via_irq(np->irq);
1021 u64_stats_update_begin(&stats->syncp);
1022 stats->tx_bytes += skb->len;
1023 stats->tx_packets++;
1024 u64_stats_update_end(&stats->syncp);
1025 dev->trans_start = jiffies;
1027 /* Note: It is not safe to access skb after network_tx_buf_gc()! */
1028 network_tx_buf_gc(dev);
1030 if (!netfront_tx_slot_available(np))
1031 netif_stop_queue(dev);
1033 spin_unlock_irqrestore(&np->tx_lock, flags);
1035 return NETDEV_TX_OK;
1038 dev->stats.tx_dropped++;
1040 return NETDEV_TX_OK;
1043 static irqreturn_t netif_int(int irq, void *dev_id)
1045 struct net_device *dev = dev_id;
1046 struct netfront_info *np = netdev_priv(dev);
1047 unsigned long flags;
1049 spin_lock_irqsave(&np->tx_lock, flags);
1051 if (likely(netfront_carrier_ok(np))) {
1052 network_tx_buf_gc(dev);
1053 /* Under tx_lock: protects access to rx shared-ring indexes. */
1054 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) {
1055 netfront_accelerator_call_stop_napi_irq(np, dev);
1057 napi_schedule(&np->napi);
1061 spin_unlock_irqrestore(&np->tx_lock, flags);
1066 static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
1069 int new = xennet_rxidx(np->rx.req_prod_pvt);
1071 BUG_ON(np->rx_skbs[new]);
1072 np->rx_skbs[new] = skb;
1073 np->grant_rx_ref[new] = ref;
1074 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
1075 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
1076 np->rx.req_prod_pvt++;
1079 int xennet_get_extras(struct netfront_info *np,
1080 struct netif_extra_info *extras, RING_IDX rp)
1083 struct netif_extra_info *extra;
1084 RING_IDX cons = np->rx.rsp_cons;
1088 struct sk_buff *skb;
1091 if (unlikely(cons + 1 == rp)) {
1092 if (net_ratelimit())
1093 WPRINTK("Missing extra info\n");
1098 extra = (struct netif_extra_info *)
1099 RING_GET_RESPONSE(&np->rx, ++cons);
1101 if (unlikely(!extra->type ||
1102 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1103 if (net_ratelimit())
1104 WPRINTK("Invalid extra type: %d\n",
1108 memcpy(&extras[extra->type - 1], extra,
1112 skb = xennet_get_rx_skb(np, cons);
1113 ref = xennet_get_rx_ref(np, cons);
1114 xennet_move_rx_slot(np, skb, ref);
1115 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1117 np->rx.rsp_cons = cons;
1121 static int xennet_get_responses(struct netfront_info *np,
1122 struct netfront_rx_info *rinfo, RING_IDX rp,
1123 struct sk_buff_head *list,
1124 int *pages_flipped_p)
1126 int pages_flipped = *pages_flipped_p;
1127 struct mmu_update *mmu;
1128 struct multicall_entry *mcl;
1129 struct netif_rx_response *rx = &rinfo->rx;
1130 struct netif_extra_info *extras = rinfo->extras;
1131 RING_IDX cons = np->rx.rsp_cons;
1132 struct sk_buff *skb = xennet_get_rx_skb(np, cons);
1133 grant_ref_t ref = xennet_get_rx_ref(np, cons);
1134 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
1139 if (rx->flags & XEN_NETRXF_extra_info) {
1140 err = xennet_get_extras(np, extras, rp);
1141 cons = np->rx.rsp_cons;
1147 if (unlikely(rx->status < 0 ||
1148 rx->offset + rx->status > PAGE_SIZE)) {
1149 if (net_ratelimit())
1150 WPRINTK("rx->offset: %x, size: %u\n",
1151 rx->offset, rx->status);
1152 xennet_move_rx_slot(np, skb, ref);
1158 * This definitely indicates a bug, either in this driver or in
1159 * the backend driver. In future this should flag the bad
1160 * situation to the system controller to reboot the backed.
1162 if (ref == GRANT_INVALID_REF) {
1163 if (net_ratelimit())
1164 WPRINTK("Bad rx response id %d.\n", rx->id);
1169 if (!np->copying_receiver) {
1170 /* Memory pressure, insufficient buffer
1172 if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
1173 if (net_ratelimit())
1174 WPRINTK("Unfulfilled rx req "
1175 "(id=%d, st=%d).\n",
1176 rx->id, rx->status);
1177 xennet_move_rx_slot(np, skb, ref);
1182 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1183 /* Remap the page. */
1184 const struct page *page =
1185 skb_frag_page(skb_shinfo(skb)->frags);
1186 unsigned long pfn = page_to_pfn(page);
1187 void *vaddr = page_address(page);
1189 mcl = np->rx_mcl + pages_flipped;
1190 mmu = np->rx_mmu + pages_flipped;
1192 MULTI_update_va_mapping(mcl,
1193 (unsigned long)vaddr,
1197 mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
1198 | MMU_MACHPHYS_UPDATE;
1201 set_phys_to_machine(pfn, mfn);
1205 ret = gnttab_end_foreign_access_ref(ref);
1209 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1211 __skb_queue_tail(list, skb);
1214 if (!(rx->flags & XEN_NETRXF_more_data))
1217 if (cons + frags == rp) {
1218 if (net_ratelimit())
1219 WPRINTK("Need more frags\n");
1224 rx = RING_GET_RESPONSE(&np->rx, cons + frags);
1225 skb = xennet_get_rx_skb(np, cons + frags);
1226 ref = xennet_get_rx_ref(np, cons + frags);
1230 if (unlikely(frags > max)) {
1231 if (net_ratelimit())
1232 WPRINTK("Too many frags\n");
1237 np->rx.rsp_cons = cons + frags;
1239 *pages_flipped_p = pages_flipped;
1244 static RING_IDX xennet_fill_frags(struct netfront_info *np,
1245 struct sk_buff *skb,
1246 struct sk_buff_head *list)
1248 struct skb_shared_info *shinfo = skb_shinfo(skb);
1249 int nr_frags = shinfo->nr_frags;
1250 RING_IDX cons = np->rx.rsp_cons;
1251 struct sk_buff *nskb;
1253 while ((nskb = __skb_dequeue(list))) {
1254 struct netif_rx_response *rx =
1255 RING_GET_RESPONSE(&np->rx, ++cons);
1257 __skb_fill_page_desc(skb, nr_frags,
1258 skb_frag_page(skb_shinfo(nskb)->frags),
1259 rx->offset, rx->status);
1261 skb->data_len += rx->status;
1263 skb_shinfo(nskb)->nr_frags = 0;
1269 shinfo->nr_frags = nr_frags;
1273 static int xennet_set_skb_gso(struct sk_buff *skb,
1274 struct netif_extra_info *gso)
1276 if (!gso->u.gso.size) {
1277 if (net_ratelimit())
1278 WPRINTK("GSO size must not be zero.\n");
1282 /* Currently only TCPv4 S.O. is supported. */
1283 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1284 if (net_ratelimit())
1285 WPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
1290 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1292 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1294 /* Header must be checked, and gso_segs computed. */
1295 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1297 skb_shinfo(skb)->gso_segs = 0;
1301 if (net_ratelimit())
1302 WPRINTK("GSO unsupported by this kernel.\n");
1307 static int netif_poll(struct napi_struct *napi, int budget)
1309 struct netfront_info *np = container_of(napi, struct netfront_info, napi);
1310 struct netfront_stats *stats = this_cpu_ptr(np->stats);
1311 struct net_device *dev = np->netdev;
1312 struct sk_buff *skb;
1313 struct netfront_rx_info rinfo;
1314 struct netif_rx_response *rx = &rinfo.rx;
1315 struct netif_extra_info *extras = rinfo.extras;
1317 struct multicall_entry *mcl;
1318 int work_done, more_to_do = 1, accel_more_to_do = 1;
1319 struct sk_buff_head rxq;
1320 struct sk_buff_head errq;
1321 struct sk_buff_head tmpq;
1322 unsigned long flags;
1324 int pages_flipped = 0;
1327 spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */
1329 if (unlikely(!netfront_carrier_ok(np))) {
1330 spin_unlock(&np->rx_lock);
1334 skb_queue_head_init(&rxq);
1335 skb_queue_head_init(&errq);
1336 skb_queue_head_init(&tmpq);
1338 rp = np->rx.sring->rsp_prod;
1339 rmb(); /* Ensure we see queued responses up to 'rp'. */
1341 i = np->rx.rsp_cons;
1343 while ((i != rp) && (work_done < budget)) {
1344 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
1345 memset(extras, 0, sizeof(rinfo.extras));
1347 err = xennet_get_responses(np, &rinfo, rp, &tmpq,
1350 if (unlikely(err)) {
1352 while ((skb = __skb_dequeue(&tmpq)))
1353 __skb_queue_tail(&errq, skb);
1354 dev->stats.rx_errors++;
1355 i = np->rx.rsp_cons;
1359 skb = __skb_dequeue(&tmpq);
1361 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1362 struct netif_extra_info *gso;
1363 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1365 if (unlikely(xennet_set_skb_gso(skb, gso))) {
1366 __skb_queue_head(&tmpq, skb);
1367 np->rx.rsp_cons += skb_queue_len(&tmpq);
1372 NETFRONT_SKB_CB(skb)->page =
1373 skb_frag_page(skb_shinfo(skb)->frags);
1374 NETFRONT_SKB_CB(skb)->offset = rx->offset;
1377 if (len > RX_COPY_THRESHOLD)
1378 len = RX_COPY_THRESHOLD;
1381 if (rx->status > len) {
1382 skb_shinfo(skb)->frags[0].page_offset =
1384 skb_frag_size_set(skb_shinfo(skb)->frags,
1386 skb->data_len = rx->status - len;
1388 __skb_fill_page_desc(skb, 0, NULL, 0, 0);
1389 skb_shinfo(skb)->nr_frags = 0;
1392 i = xennet_fill_frags(np, skb, &tmpq);
1395 * Truesize must approximates the size of true data plus
1396 * any supervisor overheads. Adding hypervisor overheads
1397 * has been shown to significantly reduce achievable
1398 * bandwidth with the default receive buffer size. It is
1399 * therefore not wise to account for it here.
1401 * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to
1402 * RX_COPY_THRESHOLD + the supervisor overheads. Here, we
1403 * add the size of the data pulled in xennet_fill_frags().
1405 * We also adjust for any unused space in the main data
1406 * area by subtracting (RX_COPY_THRESHOLD - len). This is
1407 * especially important with drivers which split incoming
1408 * packets into header and data, using only 66 bytes of
1409 * the main data area (see the e1000 driver for example.)
1410 * On such systems, without this last adjustement, our
1411 * achievable receive throughout using the standard receive
1412 * buffer size was cut by 25%(!!!).
1414 skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
1415 skb->len += skb->data_len;
1417 if (rx->flags & XEN_NETRXF_csum_blank)
1418 skb->ip_summed = CHECKSUM_PARTIAL;
1419 else if (rx->flags & XEN_NETRXF_data_validated)
1420 skb->ip_summed = CHECKSUM_UNNECESSARY;
1422 skb->ip_summed = CHECKSUM_NONE;
1424 u64_stats_update_begin(&stats->syncp);
1425 stats->rx_packets++;
1426 stats->rx_bytes += skb->len;
1427 u64_stats_update_end(&stats->syncp);
1429 __skb_queue_tail(&rxq, skb);
1431 np->rx.rsp_cons = ++i;
1435 if (pages_flipped) {
1436 /* Some pages are no longer absent... */
1437 balloon_update_driver_allowance(-pages_flipped);
1439 /* Do all the remapping work and M2P updates. */
1440 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1441 mcl = np->rx_mcl + pages_flipped;
1442 mcl->op = __HYPERVISOR_mmu_update;
1443 mcl->args[0] = (unsigned long)np->rx_mmu;
1444 mcl->args[1] = pages_flipped;
1446 mcl->args[3] = DOMID_SELF;
1447 err = HYPERVISOR_multicall_check(np->rx_mcl,
1454 __skb_queue_purge(&errq);
1456 while ((skb = __skb_dequeue(&rxq)) != NULL) {
1457 struct page *page = NETFRONT_SKB_CB(skb)->page;
1458 void *vaddr = page_address(page);
1459 unsigned offset = NETFRONT_SKB_CB(skb)->offset;
1461 memcpy(skb->data, vaddr + offset, skb_headlen(skb));
1463 if (page != skb_frag_page(skb_shinfo(skb)->frags))
1466 /* Ethernet work: Delayed to here as it peeks the header. */
1467 skb->protocol = eth_type_trans(skb, dev);
1469 if (skb_checksum_setup(skb, &np->rx_gso_csum_fixups)) {
1475 netif_receive_skb(skb);
1478 /* If we get a callback with very few responses, reduce fill target. */
1479 /* NB. Note exponential increase, linear decrease. */
1480 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1481 ((3*np->rx_target) / 4)) &&
1482 (--np->rx_target < np->rx_min_target))
1483 np->rx_target = np->rx_min_target;
1485 network_alloc_rx_buffers(dev);
1487 if (work_done < budget) {
1488 /* there's some spare capacity, try the accelerated path */
1489 int accel_budget = budget - work_done;
1490 int accel_budget_start = accel_budget;
1492 if (np->accel_vif_state.hooks) {
1494 np->accel_vif_state.hooks->netdev_poll
1495 (dev, &accel_budget);
1496 work_done += (accel_budget_start - accel_budget);
1498 accel_more_to_do = 0;
1501 if (work_done < budget) {
1502 local_irq_save(flags);
1504 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
1506 if (!more_to_do && !accel_more_to_do &&
1507 np->accel_vif_state.hooks) {
1509 * Slow path has nothing more to do, see if
1510 * fast path is likewise
1513 np->accel_vif_state.hooks->start_napi_irq(dev);
1516 if (!more_to_do && !accel_more_to_do)
1517 __napi_complete(napi);
1519 local_irq_restore(flags);
1522 spin_unlock(&np->rx_lock);
1527 static void netif_release_tx_bufs(struct netfront_info *np)
1529 struct sk_buff *skb;
1532 for (i = 1; i <= NET_TX_RING_SIZE; i++) {
1533 if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET)
1536 skb = np->tx_skbs[i];
1537 gnttab_end_foreign_access_ref(np->grant_tx_ref[i]);
1538 gnttab_release_grant_reference(
1539 &np->gref_tx_head, np->grant_tx_ref[i]);
1540 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1541 add_id_to_freelist(np->tx_skbs, i);
1542 dev_kfree_skb_irq(skb);
1546 static void netif_release_rx_bufs_flip(struct netfront_info *np)
1548 struct mmu_update *mmu = np->rx_mmu;
1549 struct multicall_entry *mcl = np->rx_mcl;
1550 struct sk_buff_head free_list;
1551 struct sk_buff *skb;
1553 int xfer = 0, noxfer = 0, unused = 0;
1556 skb_queue_head_init(&free_list);
1558 spin_lock_bh(&np->rx_lock);
1560 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1563 if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) {
1568 skb = np->rx_skbs[id];
1569 mfn = gnttab_end_foreign_transfer_ref(ref);
1570 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1571 np->grant_rx_ref[id] = GRANT_INVALID_REF;
1572 add_id_to_freelist(np->rx_skbs, id);
1574 page = skb_frag_page(skb_shinfo(skb)->frags);
1577 balloon_release_driver_page(page);
1578 skb_shinfo(skb)->nr_frags = 0;
1584 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1585 /* Remap the page. */
1586 unsigned long pfn = page_to_pfn(page);
1587 void *vaddr = page_address(page);
1589 MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
1590 pfn_pte_ma(mfn, PAGE_KERNEL),
1593 mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
1594 | MMU_MACHPHYS_UPDATE;
1598 set_phys_to_machine(pfn, mfn);
1600 __skb_queue_tail(&free_list, skb);
1604 DPRINTK("%s: %d xfer, %d noxfer, %d unused\n",
1605 __FUNCTION__, xfer, noxfer, unused);
1608 /* Some pages are no longer absent... */
1609 balloon_update_driver_allowance(-xfer);
1611 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1612 /* Do all the remapping work and M2P updates. */
1613 mcl->op = __HYPERVISOR_mmu_update;
1614 mcl->args[0] = (unsigned long)np->rx_mmu;
1615 mcl->args[1] = mmu - np->rx_mmu;
1617 mcl->args[3] = DOMID_SELF;
1619 rc = HYPERVISOR_multicall_check(
1620 np->rx_mcl, mcl - np->rx_mcl, NULL);
1625 __skb_queue_purge(&free_list);
1627 spin_unlock_bh(&np->rx_lock);
1630 static void netif_release_rx_bufs_copy(struct netfront_info *np)
1632 struct sk_buff *skb;
1634 int busy = 0, inuse = 0;
1636 spin_lock_bh(&np->rx_lock);
1638 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1639 ref = np->grant_rx_ref[i];
1641 if (ref == GRANT_INVALID_REF)
1646 skb = np->rx_skbs[i];
1648 if (!gnttab_end_foreign_access_ref(ref))
1654 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1655 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1656 add_id_to_freelist(np->rx_skbs, i);
1662 DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n",
1663 __FUNCTION__, busy, inuse, NET_RX_RING_SIZE);
1665 spin_unlock_bh(&np->rx_lock);
1668 static int network_close(struct net_device *dev)
1670 struct netfront_info *np = netdev_priv(dev);
1671 netif_stop_queue(np->netdev);
1672 napi_disable(&np->napi);
1677 static int xennet_set_mac_address(struct net_device *dev, void *p)
1679 struct netfront_info *np = netdev_priv(dev);
1680 struct sockaddr *addr = p;
1682 if (netif_running(dev))
1685 if (!is_valid_ether_addr(addr->sa_data))
1686 return -EADDRNOTAVAIL;
1688 memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
1689 memcpy(np->mac, addr->sa_data, ETH_ALEN);
1694 static int xennet_change_mtu(struct net_device *dev, int mtu)
1696 int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
1704 static struct rtnl_link_stats64 *xennet_get_stats64(struct net_device *dev,
1705 struct rtnl_link_stats64 *tot)
1707 struct netfront_info *np = netdev_priv(dev);
1710 netfront_accelerator_call_get_stats(np, dev);
1712 for_each_possible_cpu(cpu) {
1713 struct netfront_stats *stats = per_cpu_ptr(np->stats, cpu);
1714 u64 rx_packets, rx_bytes, tx_packets, tx_bytes;
1718 start = u64_stats_fetch_begin_bh(&stats->syncp);
1720 rx_packets = stats->rx_packets;
1721 tx_packets = stats->tx_packets;
1722 rx_bytes = stats->rx_bytes;
1723 tx_bytes = stats->tx_bytes;
1724 } while (u64_stats_fetch_retry_bh(&stats->syncp, start));
1726 tot->rx_packets += rx_packets;
1727 tot->tx_packets += tx_packets;
1728 tot->rx_bytes += rx_bytes;
1729 tot->tx_bytes += tx_bytes;
1732 tot->rx_errors = dev->stats.rx_errors;
1733 tot->tx_dropped = dev->stats.tx_dropped;
1738 static const struct xennet_stat {
1739 char name[ETH_GSTRING_LEN];
1741 } xennet_stats[] = {
1743 "rx_gso_csum_fixups",
1744 offsetof(struct netfront_info, rx_gso_csum_fixups) / sizeof(long)
1748 static int xennet_get_sset_count(struct net_device *dev, int sset)
1752 return ARRAY_SIZE(xennet_stats);
1757 static void xennet_get_ethtool_stats(struct net_device *dev,
1758 struct ethtool_stats *stats, u64 *data)
1760 unsigned long *np = netdev_priv(dev);
1763 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1764 data[i] = np[xennet_stats[i].offset];
1767 static void xennet_get_strings(struct net_device *dev, u32 stringset, u8 *data)
1771 switch (stringset) {
1773 for (i = 0; i < ARRAY_SIZE(xennet_stats); i++)
1774 memcpy(data + i * ETH_GSTRING_LEN,
1775 xennet_stats[i].name, ETH_GSTRING_LEN);
1780 static void netfront_get_drvinfo(struct net_device *dev,
1781 struct ethtool_drvinfo *info)
1783 strcpy(info->driver, "netfront");
1784 strlcpy(info->bus_info, dev_name(dev->dev.parent),
1785 ARRAY_SIZE(info->bus_info));
1788 static int network_connect(struct net_device *dev)
1790 struct netfront_info *np = netdev_priv(dev);
1791 int i, requeue_idx, err;
1792 struct sk_buff *skb;
1794 netif_rx_request_t *req;
1795 unsigned int feature_rx_copy, feature_rx_flip;
1797 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1798 "feature-rx-copy", "%u", &feature_rx_copy);
1800 feature_rx_copy = 0;
1801 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1802 "feature-rx-flip", "%u", &feature_rx_flip);
1804 feature_rx_flip = 1;
1807 * Copy packets on receive path if:
1808 * (a) This was requested by user, and the backend supports it; or
1809 * (b) Flipping was requested, but this is unsupported by the backend.
1811 np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) ||
1812 (MODPARM_rx_flip && !feature_rx_flip));
1814 err = talk_to_backend(np->xbdev, np);
1819 netdev_update_features(dev);
1822 DPRINTK("device %s has %sing receive path.\n",
1823 dev->name, np->copying_receiver ? "copy" : "flipp");
1825 spin_lock_bh(&np->rx_lock);
1826 spin_lock_irq(&np->tx_lock);
1829 * Recovery procedure:
1830 * NB. Freelist index entries are always going to be less than
1831 * PAGE_OFFSET, whereas pointers to skbs will always be equal or
1832 * greater than PAGE_OFFSET: we use this property to distinguish
1836 /* Step 1: Discard all pending TX packet fragments. */
1837 netif_release_tx_bufs(np);
1839 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1840 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1843 if (!np->rx_skbs[i])
1846 skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
1847 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1848 req = RING_GET_REQUEST(&np->rx, requeue_idx);
1849 pfn = page_to_pfn(skb_frag_page(skb_shinfo(skb)->frags));
1851 if (!np->copying_receiver) {
1852 gnttab_grant_foreign_transfer_ref(
1853 ref, np->xbdev->otherend_id, pfn);
1855 gnttab_grant_foreign_access_ref(
1856 ref, np->xbdev->otherend_id,
1857 pfn_to_mfn(pfn), 0);
1860 req->id = requeue_idx;
1865 np->rx.req_prod_pvt = requeue_idx;
1868 * Step 3: All public and private state should now be sane. Get
1869 * ready to start sending and receiving packets and give the driver
1870 * domain a kick because we've probably just requeued some
1873 netfront_carrier_on(np);
1874 notify_remote_via_irq(np->irq);
1875 network_tx_buf_gc(dev);
1876 network_alloc_rx_buffers(dev);
1878 spin_unlock_irq(&np->tx_lock);
1879 spin_unlock_bh(&np->rx_lock);
1884 static void netif_uninit(struct net_device *dev)
1886 struct netfront_info *np = netdev_priv(dev);
1887 netif_release_tx_bufs(np);
1888 if (np->copying_receiver)
1889 netif_release_rx_bufs_copy(np);
1891 netif_release_rx_bufs_flip(np);
1892 gnttab_free_grant_references(np->gref_tx_head);
1893 gnttab_free_grant_references(np->gref_rx_head);
1896 static const struct ethtool_ops network_ethtool_ops =
1898 .get_drvinfo = netfront_get_drvinfo,
1899 .get_link = ethtool_op_get_link,
1901 .get_sset_count = xennet_get_sset_count,
1902 .get_ethtool_stats = xennet_get_ethtool_stats,
1903 .get_strings = xennet_get_strings,
1907 static ssize_t show_rxbuf_min(struct device *dev,
1908 struct device_attribute *attr, char *buf)
1910 struct netfront_info *info = netdev_priv(to_net_dev(dev));
1912 return sprintf(buf, "%u\n", info->rx_min_target);
1915 static ssize_t store_rxbuf_min(struct device *dev,
1916 struct device_attribute *attr,
1917 const char *buf, size_t len)
1919 struct net_device *netdev = to_net_dev(dev);
1920 struct netfront_info *np = netdev_priv(netdev);
1922 unsigned long target;
1924 if (!capable(CAP_NET_ADMIN))
1927 target = simple_strtoul(buf, &endp, 0);
1931 if (target < RX_MIN_TARGET)
1932 target = RX_MIN_TARGET;
1933 if (target > RX_MAX_TARGET)
1934 target = RX_MAX_TARGET;
1936 spin_lock_bh(&np->rx_lock);
1937 if (target > np->rx_max_target)
1938 np->rx_max_target = target;
1939 np->rx_min_target = target;
1940 if (target > np->rx_target)
1941 np->rx_target = target;
1943 network_alloc_rx_buffers(netdev);
1945 spin_unlock_bh(&np->rx_lock);
1949 static ssize_t show_rxbuf_max(struct device *dev,
1950 struct device_attribute *attr, char *buf)
1952 struct netfront_info *info = netdev_priv(to_net_dev(dev));
1954 return sprintf(buf, "%u\n", info->rx_max_target);
1957 static ssize_t store_rxbuf_max(struct device *dev,
1958 struct device_attribute *attr,
1959 const char *buf, size_t len)
1961 struct net_device *netdev = to_net_dev(dev);
1962 struct netfront_info *np = netdev_priv(netdev);
1964 unsigned long target;
1966 if (!capable(CAP_NET_ADMIN))
1969 target = simple_strtoul(buf, &endp, 0);
1973 if (target < RX_MIN_TARGET)
1974 target = RX_MIN_TARGET;
1975 if (target > RX_MAX_TARGET)
1976 target = RX_MAX_TARGET;
1978 spin_lock_bh(&np->rx_lock);
1979 if (target < np->rx_min_target)
1980 np->rx_min_target = target;
1981 np->rx_max_target = target;
1982 if (target < np->rx_target)
1983 np->rx_target = target;
1985 network_alloc_rx_buffers(netdev);
1987 spin_unlock_bh(&np->rx_lock);
1991 static ssize_t show_rxbuf_cur(struct device *dev,
1992 struct device_attribute *attr, char *buf)
1994 struct netfront_info *info = netdev_priv(to_net_dev(dev));
1996 return sprintf(buf, "%u\n", info->rx_target);
1999 static struct device_attribute xennet_attrs[] = {
2000 __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
2001 __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
2002 __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
2005 static int xennet_sysfs_addif(struct net_device *netdev)
2010 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
2011 error = device_create_file(&netdev->dev,
2020 device_remove_file(&netdev->dev, &xennet_attrs[i]);
2024 static void xennet_sysfs_delif(struct net_device *netdev)
2028 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
2029 device_remove_file(&netdev->dev, &xennet_attrs[i]);
2032 #endif /* CONFIG_SYSFS */
2036 * Nothing to do here. Virtual interface is point-to-point and the
2037 * physical interface is probably promiscuous anyway.
2039 static void network_set_multicast_list(struct net_device *dev)
2043 static netdev_features_t xennet_fix_features(struct net_device *dev,
2044 netdev_features_t features)
2046 struct netfront_info *np = netdev_priv(dev);
2049 if (features & NETIF_F_SG) {
2050 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
2055 features &= ~NETIF_F_SG;
2058 if (features & NETIF_F_TSO) {
2059 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
2060 "feature-gso-tcpv4", "%d", &val) < 0)
2064 features &= ~NETIF_F_TSO;
2070 static int xennet_set_features(struct net_device *dev,
2071 netdev_features_t features)
2073 if (!(features & NETIF_F_SG) && dev->mtu > ETH_DATA_LEN) {
2074 netdev_info(dev, "Reducing MTU because no SG offload");
2075 dev->mtu = ETH_DATA_LEN;
2081 #ifdef CONFIG_NET_POLL_CONTROLLER
2082 static void xennet_poll_controller(struct net_device *dev)
2088 static const struct net_device_ops xennet_netdev_ops = {
2089 .ndo_uninit = netif_uninit,
2090 .ndo_open = network_open,
2091 .ndo_stop = network_close,
2092 .ndo_start_xmit = network_start_xmit,
2093 .ndo_set_rx_mode = network_set_multicast_list,
2094 .ndo_set_mac_address = xennet_set_mac_address,
2095 .ndo_validate_addr = eth_validate_addr,
2096 .ndo_fix_features = xennet_fix_features,
2097 .ndo_set_features = xennet_set_features,
2098 #ifdef CONFIG_NET_POLL_CONTROLLER
2099 .ndo_poll_controller = xennet_poll_controller,
2101 .ndo_change_mtu = xennet_change_mtu,
2102 .ndo_get_stats64 = xennet_get_stats64,
2105 static struct net_device * __devinit create_netdev(struct xenbus_device *dev)
2108 struct net_device *netdev = NULL;
2109 struct netfront_info *np = NULL;
2111 netdev = alloc_etherdev(sizeof(struct netfront_info));
2113 return ERR_PTR(-ENOMEM);
2115 np = netdev_priv(netdev);
2118 spin_lock_init(&np->tx_lock);
2119 spin_lock_init(&np->rx_lock);
2121 init_accelerator_vif(np, dev);
2123 skb_queue_head_init(&np->rx_batch);
2124 np->rx_target = RX_DFL_MIN_TARGET;
2125 np->rx_min_target = RX_DFL_MIN_TARGET;
2126 np->rx_max_target = RX_MAX_TARGET;
2128 init_timer(&np->rx_refill_timer);
2129 np->rx_refill_timer.data = (unsigned long)netdev;
2130 np->rx_refill_timer.function = rx_refill_timeout;
2133 np->stats = alloc_percpu(struct netfront_stats);
2134 if (np->stats == NULL)
2137 /* Initialise {tx,rx}_skbs as a free chain containing every entry. */
2138 for (i = 0; i <= NET_TX_RING_SIZE; i++) {
2139 np->tx_skbs[i] = (void *)((unsigned long) i+1);
2140 np->grant_tx_ref[i] = GRANT_INVALID_REF;
2143 for (i = 0; i < NET_RX_RING_SIZE; i++) {
2144 np->rx_skbs[i] = NULL;
2145 np->grant_rx_ref[i] = GRANT_INVALID_REF;
2148 /* A grant for every tx ring slot */
2149 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
2150 &np->gref_tx_head) < 0) {
2151 pr_alert("#### netfront can't alloc tx grant refs\n");
2153 goto exit_free_stats;
2155 /* A grant for every rx ring slot */
2156 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
2157 &np->gref_rx_head) < 0) {
2158 pr_alert("#### netfront can't alloc rx grant refs\n");
2163 netdev->netdev_ops = &xennet_netdev_ops;
2164 netif_napi_add(netdev, &np->napi, netif_poll, 64);
2165 netdev->features = NETIF_F_IP_CSUM | NETIF_F_RXCSUM |
2167 netdev->hw_features = NETIF_F_IP_CSUM | NETIF_F_SG | NETIF_F_TSO;
2170 * Assume that all hw features are available for now. This set
2171 * will be adjusted by the call to netdev_update_features() in
2172 * xennet_connect() which is the earliest point where we can
2173 * negotiate with the backend regarding supported features.
2175 netdev->features |= netdev->hw_features;
2177 SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
2178 SET_NETDEV_DEV(netdev, &dev->dev);
2180 np->netdev = netdev;
2182 netfront_carrier_off(np);
2187 gnttab_free_grant_references(np->gref_tx_head);
2189 free_percpu(np->stats);
2191 free_netdev(netdev);
2192 return ERR_PTR(err);
2195 static void netif_release_rings(struct netfront_info *info)
2197 end_access(info->tx_ring_ref, info->tx.sring);
2198 end_access(info->rx_ring_ref, info->rx.sring);
2199 info->tx_ring_ref = GRANT_INVALID_REF;
2200 info->rx_ring_ref = GRANT_INVALID_REF;
2201 info->tx.sring = NULL;
2202 info->rx.sring = NULL;
2205 static void netif_disconnect_backend(struct netfront_info *info)
2207 /* Stop old i/f to prevent errors whilst we rebuild the state. */
2208 spin_lock_bh(&info->rx_lock);
2209 spin_lock_irq(&info->tx_lock);
2210 netfront_carrier_off(info);
2211 spin_unlock_irq(&info->tx_lock);
2212 spin_unlock_bh(&info->rx_lock);
2215 unbind_from_irqhandler(info->irq, info->netdev);
2218 netif_release_rings(info);
2222 static void end_access(int ref, void *page)
2224 if (ref != GRANT_INVALID_REF)
2225 gnttab_end_foreign_access(ref, (unsigned long)page);
2229 /* ** Driver registration ** */
2232 static const struct xenbus_device_id netfront_ids[] = {
2236 MODULE_ALIAS("xen:vif");
2238 static DEFINE_XENBUS_DRIVER(netfront, ,
2239 .probe = netfront_probe,
2240 .remove = __devexit_p(netfront_remove),
2241 .suspend = netfront_suspend,
2242 .suspend_cancel = netfront_suspend_cancel,
2243 .resume = netfront_resume,
2244 .otherend_changed = backend_changed,
2248 static int __init netif_init(void)
2250 if (!is_running_on_xen())
2254 if (MODPARM_rx_flip && MODPARM_rx_copy) {
2255 WPRINTK("Cannot specify both rx_copy and rx_flip.\n");
2259 if (!MODPARM_rx_flip && !MODPARM_rx_copy)
2260 MODPARM_rx_copy = true; /* Default is to copy. */
2265 IPRINTK("Initialising virtual ethernet driver.\n");
2267 return xenbus_register_frontend(&netfront_driver);
2269 module_init(netif_init);
2272 static void __exit netif_exit(void)
2274 xenbus_unregister_driver(&netfront_driver);
2278 module_exit(netif_exit);
2280 MODULE_LICENSE("Dual BSD/GPL");