1 /******************************************************************************
2 * Virtual network driver for conversing with remote driver backends.
4 * Copyright (c) 2002-2005, K A Fraser
5 * Copyright (c) 2005, XenSource Ltd
6 * Copyright (C) 2007 Solarflare Communications, Inc.
8 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License version 2
10 * as published by the Free Software Foundation; or, when distributed
11 * separately from the Linux kernel or incorporated into other
12 * software packages, subject to the following license:
14 * Permission is hereby granted, free of charge, to any person obtaining a copy
15 * of this source file (the "Software"), to deal in the Software without
16 * restriction, including without limitation the rights to use, copy, modify,
17 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
18 * and to permit persons to whom the Software is furnished to do so, subject to
19 * the following conditions:
21 * The above copyright notice and this permission notice shall be included in
22 * all copies or substantial portions of the Software.
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
25 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
26 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
27 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
28 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
29 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
33 #include <linux/module.h>
34 #include <linux/version.h>
35 #include <linux/kernel.h>
36 #include <linux/sched.h>
37 #include <linux/slab.h>
38 #include <linux/string.h>
39 #include <linux/errno.h>
40 #include <linux/netdevice.h>
41 #include <linux/inetdevice.h>
42 #include <linux/etherdevice.h>
43 #include <linux/skbuff.h>
44 #include <linux/init.h>
45 #include <linux/bitops.h>
46 #include <linux/ethtool.h>
48 #include <linux/if_ether.h>
50 #include <linux/moduleparam.h>
52 #include <net/pkt_sched.h>
54 #include <net/route.h>
55 #include <asm/uaccess.h>
56 #include <xen/evtchn.h>
57 #include <xen/xenbus.h>
58 #include <xen/interface/io/netif.h>
59 #include <xen/interface/memory.h>
60 #include <xen/balloon.h>
62 #include <asm/maddr.h>
63 #include <asm/uaccess.h>
64 #include <xen/interface/grant_table.h>
65 #include <xen/gnttab.h>
66 #include <xen/hypercall.h>
73 #define NETFRONT_SKB_CB(skb) ((struct netfront_cb *)((skb)->cb))
78 * Mutually-exclusive module options to select receive data path:
79 * rx_copy : Packets are copied by network backend into local memory
80 * rx_flip : Page containing packet data is transferred to our ownership
81 * For fully-virtualised guests there is no option - copying must be used.
82 * For paravirtualised guests, flipping is the default.
85 static int MODPARM_rx_copy = 0;
86 module_param_named(rx_copy, MODPARM_rx_copy, bool, 0);
87 MODULE_PARM_DESC(rx_copy, "Copy packets from network card (rather than flip)");
88 static int MODPARM_rx_flip = 0;
89 module_param_named(rx_flip, MODPARM_rx_flip, bool, 0);
90 MODULE_PARM_DESC(rx_flip, "Flip packets from network card (rather than copy)");
92 static const int MODPARM_rx_copy = 1;
93 static const int MODPARM_rx_flip = 0;
96 #define RX_COPY_THRESHOLD 256
98 /* If we don't have GSO, fake things up so that we never try to use it. */
99 #if defined(NETIF_F_GSO)
101 #define HAVE_TSO 1 /* TSO is a subset of GSO */
102 #define HAVE_CSUM_OFFLOAD 1
103 static inline void dev_disable_gso_features(struct net_device *dev)
105 /* Turn off all GSO bits except ROBUST. */
106 dev->features &= (1 << NETIF_F_GSO_SHIFT) - 1;
107 dev->features |= NETIF_F_GSO_ROBUST;
109 #elif defined(NETIF_F_TSO)
113 /* Some older kernels cannot cope with incorrect checksums,
114 * particularly in netfilter. I'm not sure there is 100% correlation
115 * with the presence of NETIF_F_TSO but it appears to be a good first
118 #define HAVE_CSUM_OFFLOAD 0
120 #define gso_size tso_size
121 #define gso_segs tso_segs
122 static inline void dev_disable_gso_features(struct net_device *dev)
124 /* Turn off all TSO bits. */
125 dev->features &= ~NETIF_F_TSO;
127 static inline int skb_is_gso(const struct sk_buff *skb)
129 return skb_shinfo(skb)->tso_size;
131 static inline int skb_gso_ok(struct sk_buff *skb, int features)
133 return (features & NETIF_F_TSO);
136 static inline int netif_needs_gso(struct net_device *dev, struct sk_buff *skb)
138 return skb_is_gso(skb) &&
139 (!skb_gso_ok(skb, dev->features) ||
140 unlikely(skb->ip_summed != CHECKSUM_PARTIAL));
145 #define HAVE_CSUM_OFFLOAD 0
146 #define netif_needs_gso(dev, skb) 0
147 #define dev_disable_gso_features(dev) ((void)0)
148 #define ethtool_op_set_tso(dev, data) (-ENOSYS)
151 #define GRANT_INVALID_REF 0
153 struct netfront_rx_info {
154 struct netif_rx_response rx;
155 struct netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
159 * Implement our own carrier flag: the network stack's version causes delays
160 * when the carrier is re-enabled (in particular, dev_activate() may not
161 * immediately be called, which can cause packet loss).
163 #define netfront_carrier_on(netif) ((netif)->carrier = 1)
164 #define netfront_carrier_off(netif) ((netif)->carrier = 0)
165 #define netfront_carrier_ok(netif) ((netif)->carrier)
168 * Access macros for acquiring freeing slots in tx_skbs[].
171 static inline void add_id_to_freelist(struct sk_buff **list, unsigned short id)
174 list[0] = (void *)(unsigned long)id;
177 static inline unsigned short get_id_from_freelist(struct sk_buff **list)
179 unsigned int id = (unsigned int)(unsigned long)list[0];
184 static inline int xennet_rxidx(RING_IDX idx)
186 return idx & (NET_RX_RING_SIZE - 1);
189 static inline struct sk_buff *xennet_get_rx_skb(struct netfront_info *np,
192 int i = xennet_rxidx(ri);
193 struct sk_buff *skb = np->rx_skbs[i];
194 np->rx_skbs[i] = NULL;
198 static inline grant_ref_t xennet_get_rx_ref(struct netfront_info *np,
201 int i = xennet_rxidx(ri);
202 grant_ref_t ref = np->grant_rx_ref[i];
203 np->grant_rx_ref[i] = GRANT_INVALID_REF;
207 #define DPRINTK(fmt, args...) \
208 pr_debug("netfront (%s:%d) " fmt, \
209 __FUNCTION__, __LINE__, ##args)
210 #define IPRINTK(fmt, args...) \
211 printk(KERN_INFO "netfront: " fmt, ##args)
212 #define WPRINTK(fmt, args...) \
213 printk(KERN_WARNING "netfront: " fmt, ##args)
215 static int setup_device(struct xenbus_device *, struct netfront_info *);
216 static struct net_device *create_netdev(struct xenbus_device *);
218 static void end_access(int, void *);
219 static void netif_disconnect_backend(struct netfront_info *);
221 static int network_connect(struct net_device *);
222 static void network_tx_buf_gc(struct net_device *);
223 static void network_alloc_rx_buffers(struct net_device *);
224 static void send_fake_arp(struct net_device *);
226 static irqreturn_t netif_int(int irq, void *dev_id);
229 static int xennet_sysfs_addif(struct net_device *netdev);
230 static void xennet_sysfs_delif(struct net_device *netdev);
231 #else /* !CONFIG_SYSFS */
232 #define xennet_sysfs_addif(dev) (0)
233 #define xennet_sysfs_delif(dev) do { } while(0)
236 static inline int xennet_can_sg(struct net_device *dev)
238 return dev->features & NETIF_F_SG;
242 * Entry point to this code when a new device is created. Allocate the basic
243 * structures and the ring buffers for communication with the backend, and
244 * inform the backend of the appropriate details for those.
246 static int __devinit netfront_probe(struct xenbus_device *dev,
247 const struct xenbus_device_id *id)
250 struct net_device *netdev;
251 struct netfront_info *info;
253 netdev = create_netdev(dev);
254 if (IS_ERR(netdev)) {
255 err = PTR_ERR(netdev);
256 xenbus_dev_fatal(dev, err, "creating netdev");
260 info = netdev_priv(netdev);
261 dev->dev.driver_data = info;
263 err = register_netdev(info->netdev);
265 printk(KERN_WARNING "%s: register_netdev err=%d\n",
270 err = xennet_sysfs_addif(info->netdev);
272 unregister_netdev(info->netdev);
273 printk(KERN_WARNING "%s: add sysfs failed err=%d\n",
282 dev->dev.driver_data = NULL;
286 static int __devexit netfront_remove(struct xenbus_device *dev)
288 struct netfront_info *info = dev->dev.driver_data;
290 DPRINTK("%s\n", dev->nodename);
292 netfront_accelerator_call_remove(info, dev);
294 netif_disconnect_backend(info);
296 del_timer_sync(&info->rx_refill_timer);
298 xennet_sysfs_delif(info->netdev);
300 unregister_netdev(info->netdev);
302 free_netdev(info->netdev);
308 static int netfront_suspend(struct xenbus_device *dev)
310 struct netfront_info *info = dev->dev.driver_data;
311 return netfront_accelerator_suspend(info, dev);
315 static int netfront_suspend_cancel(struct xenbus_device *dev)
317 struct netfront_info *info = dev->dev.driver_data;
318 return netfront_accelerator_suspend_cancel(info, dev);
323 * We are reconnecting to the backend, due to a suspend/resume, or a backend
324 * driver restart. We tear down our netif structure and recreate it, but
325 * leave the device-layer structures intact so that this is transparent to the
326 * rest of the kernel.
328 static int netfront_resume(struct xenbus_device *dev)
330 struct netfront_info *info = dev->dev.driver_data;
332 DPRINTK("%s\n", dev->nodename);
334 netfront_accelerator_resume(info, dev);
336 netif_disconnect_backend(info);
340 static int xen_net_read_mac(struct xenbus_device *dev, u8 mac[])
342 char *s, *e, *macstr;
345 macstr = s = xenbus_read(XBT_NIL, dev->nodename, "mac", NULL);
347 return PTR_ERR(macstr);
349 for (i = 0; i < ETH_ALEN; i++) {
350 mac[i] = simple_strtoul(s, &e, 16);
351 if ((s == e) || (*e != ((i == ETH_ALEN-1) ? '\0' : ':'))) {
362 /* Common code used when first setting up, and when resuming. */
363 static int talk_to_backend(struct xenbus_device *dev,
364 struct netfront_info *info)
367 struct xenbus_transaction xbt;
370 err = xen_net_read_mac(dev, info->mac);
372 xenbus_dev_fatal(dev, err, "parsing %s/mac", dev->nodename);
376 /* Create shared ring, alloc event channel. */
377 err = setup_device(dev, info);
381 /* This will load an accelerator if one is configured when the
383 netfront_accelerator_add_watch(info);
386 err = xenbus_transaction_start(&xbt);
388 xenbus_dev_fatal(dev, err, "starting transaction");
392 err = xenbus_printf(xbt, dev->nodename, "tx-ring-ref","%u",
395 message = "writing tx ring-ref";
396 goto abort_transaction;
398 err = xenbus_printf(xbt, dev->nodename, "rx-ring-ref","%u",
401 message = "writing rx ring-ref";
402 goto abort_transaction;
404 err = xenbus_printf(xbt, dev->nodename,
405 "event-channel", "%u",
406 irq_to_evtchn_port(info->irq));
408 message = "writing event-channel";
409 goto abort_transaction;
412 err = xenbus_printf(xbt, dev->nodename, "request-rx-copy", "%u",
413 info->copying_receiver);
415 message = "writing request-rx-copy";
416 goto abort_transaction;
419 err = xenbus_printf(xbt, dev->nodename, "feature-rx-notify", "%d", 1);
421 message = "writing feature-rx-notify";
422 goto abort_transaction;
425 err = xenbus_printf(xbt, dev->nodename, "feature-no-csum-offload",
426 "%d", !HAVE_CSUM_OFFLOAD);
428 message = "writing feature-no-csum-offload";
429 goto abort_transaction;
432 err = xenbus_printf(xbt, dev->nodename, "feature-sg", "%d", 1);
434 message = "writing feature-sg";
435 goto abort_transaction;
438 err = xenbus_printf(xbt, dev->nodename, "feature-gso-tcpv4", "%d",
441 message = "writing feature-gso-tcpv4";
442 goto abort_transaction;
445 err = xenbus_transaction_end(xbt, 0);
449 xenbus_dev_fatal(dev, err, "completing transaction");
456 xenbus_transaction_end(xbt, 1);
457 xenbus_dev_fatal(dev, err, "%s", message);
459 netfront_accelerator_call_remove(info, dev);
460 netif_disconnect_backend(info);
465 static int setup_device(struct xenbus_device *dev, struct netfront_info *info)
467 struct netif_tx_sring *txs;
468 struct netif_rx_sring *rxs;
470 struct net_device *netdev = info->netdev;
472 info->tx_ring_ref = GRANT_INVALID_REF;
473 info->rx_ring_ref = GRANT_INVALID_REF;
474 info->rx.sring = NULL;
475 info->tx.sring = NULL;
478 txs = (struct netif_tx_sring *)get_zeroed_page(GFP_KERNEL|__GFP_HIGH);
481 xenbus_dev_fatal(dev, err, "allocating tx ring page");
484 SHARED_RING_INIT(txs);
485 FRONT_RING_INIT(&info->tx, txs, PAGE_SIZE);
487 err = xenbus_grant_ring(dev, virt_to_mfn(txs));
489 free_page((unsigned long)txs);
492 info->tx_ring_ref = err;
494 rxs = (struct netif_rx_sring *)get_zeroed_page(GFP_KERNEL|__GFP_HIGH);
497 xenbus_dev_fatal(dev, err, "allocating rx ring page");
500 SHARED_RING_INIT(rxs);
501 FRONT_RING_INIT(&info->rx, rxs, PAGE_SIZE);
503 err = xenbus_grant_ring(dev, virt_to_mfn(rxs));
505 free_page((unsigned long)rxs);
508 info->rx_ring_ref = err;
510 memcpy(netdev->dev_addr, info->mac, ETH_ALEN);
512 err = bind_listening_port_to_irqhandler(
513 dev->otherend_id, netif_int, IRQF_SAMPLE_RANDOM, netdev->name,
526 * Callback received when the backend's state changes.
528 static void backend_changed(struct xenbus_device *dev,
529 enum xenbus_state backend_state)
531 struct netfront_info *np = dev->dev.driver_data;
532 struct net_device *netdev = np->netdev;
534 DPRINTK("%s\n", xenbus_strstate(backend_state));
536 switch (backend_state) {
537 case XenbusStateInitialising:
538 case XenbusStateInitialised:
539 case XenbusStateConnected:
540 case XenbusStateUnknown:
541 case XenbusStateClosed:
544 case XenbusStateInitWait:
545 if (dev->state != XenbusStateInitialising)
547 if (network_connect(netdev) != 0)
549 xenbus_switch_state(dev, XenbusStateConnected);
550 send_fake_arp(netdev);
553 case XenbusStateClosing:
554 xenbus_frontend_closed(dev);
559 /** Send a packet on a net device to encourage switches to learn the
560 * MAC. We send a fake ARP request.
563 * @return 0 on success, error code otherwise
565 static void send_fake_arp(struct net_device *dev)
571 dst_ip = INADDR_BROADCAST;
572 src_ip = inet_select_addr(dev, dst_ip, RT_SCOPE_LINK);
574 /* No IP? Then nothing to do. */
578 skb = arp_create(ARPOP_REPLY, ETH_P_ARP,
580 /*dst_hw*/ NULL, /*src_hw*/ NULL,
581 /*target_hw*/ dev->dev_addr);
589 static inline int netfront_tx_slot_available(struct netfront_info *np)
591 return ((np->tx.req_prod_pvt - np->tx.rsp_cons) <
592 (TX_MAX_TARGET - MAX_SKB_FRAGS - 2));
596 static inline void network_maybe_wake_tx(struct net_device *dev)
598 struct netfront_info *np = netdev_priv(dev);
600 if (unlikely(netif_queue_stopped(dev)) &&
601 netfront_tx_slot_available(np) &&
602 likely(netif_running(dev)) &&
603 netfront_check_accelerator_queue_ready(dev, np))
604 netif_wake_queue(dev);
608 int netfront_check_queue_ready(struct net_device *dev)
610 struct netfront_info *np = netdev_priv(dev);
612 return unlikely(netif_queue_stopped(dev)) &&
613 netfront_tx_slot_available(np) &&
614 likely(netif_running(dev));
616 EXPORT_SYMBOL(netfront_check_queue_ready);
619 static int network_open(struct net_device *dev)
621 struct netfront_info *np = netdev_priv(dev);
623 memset(&np->stats, 0, sizeof(np->stats));
625 spin_lock_bh(&np->rx_lock);
626 if (netfront_carrier_ok(np)) {
627 network_alloc_rx_buffers(dev);
628 np->rx.sring->rsp_event = np->rx.rsp_cons + 1;
629 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)){
630 netfront_accelerator_call_stop_napi_irq(np, dev);
632 netif_rx_schedule(dev, &np->napi);
635 spin_unlock_bh(&np->rx_lock);
637 network_maybe_wake_tx(dev);
642 static void network_tx_buf_gc(struct net_device *dev)
646 struct netfront_info *np = netdev_priv(dev);
649 BUG_ON(!netfront_carrier_ok(np));
652 prod = np->tx.sring->rsp_prod;
653 rmb(); /* Ensure we see responses up to 'rp'. */
655 for (cons = np->tx.rsp_cons; cons != prod; cons++) {
656 struct netif_tx_response *txrsp;
658 txrsp = RING_GET_RESPONSE(&np->tx, cons);
659 if (txrsp->status == NETIF_RSP_NULL)
663 skb = np->tx_skbs[id];
664 if (unlikely(gnttab_query_foreign_access(
665 np->grant_tx_ref[id]) != 0)) {
666 printk(KERN_ALERT "network_tx_buf_gc: warning "
667 "-- grant still in use by backend "
671 gnttab_end_foreign_access_ref(np->grant_tx_ref[id]);
672 gnttab_release_grant_reference(
673 &np->gref_tx_head, np->grant_tx_ref[id]);
674 np->grant_tx_ref[id] = GRANT_INVALID_REF;
675 add_id_to_freelist(np->tx_skbs, id);
676 dev_kfree_skb_irq(skb);
679 np->tx.rsp_cons = prod;
682 * Set a new event, then check for race with update of tx_cons.
683 * Note that it is essential to schedule a callback, no matter
684 * how few buffers are pending. Even if there is space in the
685 * transmit ring, higher layers may be blocked because too much
686 * data is outstanding: in such cases notification from Xen is
687 * likely to be the only kick that we'll get.
689 np->tx.sring->rsp_event =
690 prod + ((np->tx.sring->req_prod - prod) >> 1) + 1;
692 } while ((cons == prod) && (prod != np->tx.sring->rsp_prod));
694 network_maybe_wake_tx(dev);
697 static void rx_refill_timeout(unsigned long data)
699 struct net_device *dev = (struct net_device *)data;
700 struct netfront_info *np = netdev_priv(dev);
702 netfront_accelerator_call_stop_napi_irq(np, dev);
704 netif_rx_schedule(dev, &np->napi);
707 static void network_alloc_rx_buffers(struct net_device *dev)
710 struct netfront_info *np = netdev_priv(dev);
713 int i, batch_target, notify;
714 RING_IDX req_prod = np->rx.req_prod_pvt;
715 struct xen_memory_reservation reservation;
720 netif_rx_request_t *req;
722 if (unlikely(!netfront_carrier_ok(np)))
726 * Allocate skbuffs greedily, even though we batch updates to the
727 * receive ring. This creates a less bursty demand on the memory
728 * allocator, so should reduce the chance of failed allocation requests
729 * both for ourself and for other kernel subsystems.
731 batch_target = np->rx_target - (req_prod - np->rx.rsp_cons);
732 for (i = skb_queue_len(&np->rx_batch); i < batch_target; i++) {
734 * Allocate an skb and a page. Do not use __dev_alloc_skb as
735 * that will allocate page-sized buffers which is not
737 * 16 bytes added as necessary headroom for netif_receive_skb.
739 skb = alloc_skb(RX_COPY_THRESHOLD + 16 + NET_IP_ALIGN,
740 GFP_ATOMIC | __GFP_NOWARN);
744 page = alloc_page(GFP_ATOMIC | __GFP_NOWARN);
748 /* Any skbuffs queued for refill? Force them out. */
751 /* Could not allocate any skbuffs. Try again later. */
752 mod_timer(&np->rx_refill_timer,
757 skb_reserve(skb, 16 + NET_IP_ALIGN); /* mimic dev_alloc_skb() */
758 skb_shinfo(skb)->frags[0].page = page;
759 skb_shinfo(skb)->nr_frags = 1;
760 __skb_queue_tail(&np->rx_batch, skb);
763 /* Is the batch large enough to be worthwhile? */
764 if (i < (np->rx_target/2)) {
765 if (req_prod > np->rx.sring->req_prod)
770 /* Adjust our fill target if we risked running out of buffers. */
771 if (((req_prod - np->rx.sring->rsp_prod) < (np->rx_target / 4)) &&
772 ((np->rx_target *= 2) > np->rx_max_target))
773 np->rx_target = np->rx_max_target;
776 for (nr_flips = i = 0; ; i++) {
777 if ((skb = __skb_dequeue(&np->rx_batch)) == NULL)
782 id = xennet_rxidx(req_prod + i);
784 BUG_ON(np->rx_skbs[id]);
785 np->rx_skbs[id] = skb;
787 ref = gnttab_claim_grant_reference(&np->gref_rx_head);
788 BUG_ON((signed short)ref < 0);
789 np->grant_rx_ref[id] = ref;
791 pfn = page_to_pfn(skb_shinfo(skb)->frags[0].page);
792 vaddr = page_address(skb_shinfo(skb)->frags[0].page);
794 req = RING_GET_REQUEST(&np->rx, req_prod + i);
795 if (!np->copying_receiver) {
796 gnttab_grant_foreign_transfer_ref(ref,
797 np->xbdev->otherend_id,
799 np->rx_pfn_array[nr_flips] = pfn_to_mfn(pfn);
800 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
801 /* Remove this page before passing
803 set_phys_to_machine(pfn, INVALID_P2M_ENTRY);
804 MULTI_update_va_mapping(np->rx_mcl+i,
805 (unsigned long)vaddr,
810 gnttab_grant_foreign_access_ref(ref,
811 np->xbdev->otherend_id,
820 if ( nr_flips != 0 ) {
821 /* Tell the ballon driver what is going on. */
822 balloon_update_driver_allowance(i);
824 set_xen_guest_handle(reservation.extent_start,
826 reservation.nr_extents = nr_flips;
827 reservation.extent_order = 0;
828 reservation.address_bits = 0;
829 reservation.domid = DOMID_SELF;
831 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
832 /* After all PTEs have been zapped, flush the TLB. */
833 np->rx_mcl[i-1].args[MULTI_UVMFLAGS_INDEX] =
834 UVMF_TLB_FLUSH|UVMF_ALL;
836 /* Give away a batch of pages. */
837 np->rx_mcl[i].op = __HYPERVISOR_memory_op;
838 np->rx_mcl[i].args[0] = XENMEM_decrease_reservation;
839 np->rx_mcl[i].args[1] = (unsigned long)&reservation;
841 /* Zap PTEs and give away pages in one big
843 if (unlikely(HYPERVISOR_multicall(np->rx_mcl, i+1)))
846 /* Check return status of HYPERVISOR_memory_op(). */
847 if (unlikely(np->rx_mcl[i].result != i))
848 panic("Unable to reduce memory reservation\n");
850 BUG_ON(np->rx_mcl[i].result);
852 if (HYPERVISOR_memory_op(XENMEM_decrease_reservation,
854 panic("Unable to reduce memory reservation\n");
860 /* Above is a suitable barrier to ensure backend will see requests. */
861 np->rx.req_prod_pvt = req_prod + i;
863 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->rx, notify);
865 notify_remote_via_irq(np->irq);
868 static void xennet_make_frags(struct sk_buff *skb, struct net_device *dev,
869 struct netif_tx_request *tx)
871 struct netfront_info *np = netdev_priv(dev);
872 char *data = skb->data;
874 RING_IDX prod = np->tx.req_prod_pvt;
875 int frags = skb_shinfo(skb)->nr_frags;
876 unsigned int offset = offset_in_page(data);
877 unsigned int len = skb_headlen(skb);
882 while (len > PAGE_SIZE - offset) {
883 tx->size = PAGE_SIZE - offset;
884 tx->flags |= NETTXF_more_data;
889 id = get_id_from_freelist(np->tx_skbs);
890 np->tx_skbs[id] = skb_get(skb);
891 tx = RING_GET_REQUEST(&np->tx, prod++);
893 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
894 BUG_ON((signed short)ref < 0);
896 mfn = virt_to_mfn(data);
897 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
900 tx->gref = np->grant_tx_ref[id] = ref;
906 for (i = 0; i < frags; i++) {
907 skb_frag_t *frag = skb_shinfo(skb)->frags + i;
909 tx->flags |= NETTXF_more_data;
911 id = get_id_from_freelist(np->tx_skbs);
912 np->tx_skbs[id] = skb_get(skb);
913 tx = RING_GET_REQUEST(&np->tx, prod++);
915 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
916 BUG_ON((signed short)ref < 0);
918 mfn = pfn_to_mfn(page_to_pfn(frag->page));
919 gnttab_grant_foreign_access_ref(ref, np->xbdev->otherend_id,
922 tx->gref = np->grant_tx_ref[id] = ref;
923 tx->offset = frag->page_offset;
924 tx->size = frag->size;
928 np->tx.req_prod_pvt = prod;
931 static int network_start_xmit(struct sk_buff *skb, struct net_device *dev)
934 struct netfront_info *np = netdev_priv(dev);
935 struct netif_tx_request *tx;
936 struct netif_extra_info *extra;
937 char *data = skb->data;
942 int frags = skb_shinfo(skb)->nr_frags;
943 unsigned int offset = offset_in_page(data);
944 unsigned int len = skb_headlen(skb);
946 /* Check the fast path, if hooks are available */
947 if (np->accel_vif_state.hooks &&
948 np->accel_vif_state.hooks->start_xmit(skb, dev)) {
949 /* Fast path has sent this packet */
953 frags += (offset + len + PAGE_SIZE - 1) / PAGE_SIZE;
954 if (unlikely(frags > MAX_SKB_FRAGS + 1)) {
955 printk(KERN_ALERT "xennet: skb rides the rocket: %d frags\n",
961 spin_lock_irq(&np->tx_lock);
963 if (unlikely(!netfront_carrier_ok(np) ||
964 (frags > 1 && !xennet_can_sg(dev)) ||
965 netif_needs_gso(dev, skb))) {
966 spin_unlock_irq(&np->tx_lock);
970 i = np->tx.req_prod_pvt;
972 id = get_id_from_freelist(np->tx_skbs);
973 np->tx_skbs[id] = skb;
975 tx = RING_GET_REQUEST(&np->tx, i);
978 ref = gnttab_claim_grant_reference(&np->gref_tx_head);
979 BUG_ON((signed short)ref < 0);
980 mfn = virt_to_mfn(data);
981 gnttab_grant_foreign_access_ref(
982 ref, np->xbdev->otherend_id, mfn, GTF_readonly);
983 tx->gref = np->grant_tx_ref[id] = ref;
990 if (skb->ip_summed == CHECKSUM_PARTIAL) /* local packet? */
991 tx->flags |= NETTXF_csum_blank | NETTXF_data_validated;
993 if (skb->proto_data_valid) /* remote but checksummed? */
994 tx->flags |= NETTXF_data_validated;
998 if (skb_shinfo(skb)->gso_size) {
999 struct netif_extra_info *gso = (struct netif_extra_info *)
1000 RING_GET_REQUEST(&np->tx, ++i);
1003 extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
1005 tx->flags |= NETTXF_extra_info;
1007 gso->u.gso.size = skb_shinfo(skb)->gso_size;
1008 gso->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
1010 gso->u.gso.features = 0;
1012 gso->type = XEN_NETIF_EXTRA_TYPE_GSO;
1018 np->tx.req_prod_pvt = i + 1;
1020 xennet_make_frags(skb, dev, tx);
1021 tx->size = skb->len;
1023 RING_PUSH_REQUESTS_AND_CHECK_NOTIFY(&np->tx, notify);
1025 notify_remote_via_irq(np->irq);
1027 np->stats.tx_bytes += skb->len;
1028 np->stats.tx_packets++;
1030 /* Note: It is not safe to access skb after network_tx_buf_gc()! */
1031 network_tx_buf_gc(dev);
1033 if (!netfront_tx_slot_available(np))
1034 netif_stop_queue(dev);
1036 spin_unlock_irq(&np->tx_lock);
1041 np->stats.tx_dropped++;
1046 static irqreturn_t netif_int(int irq, void *dev_id)
1048 struct net_device *dev = dev_id;
1049 struct netfront_info *np = netdev_priv(dev);
1050 unsigned long flags;
1052 spin_lock_irqsave(&np->tx_lock, flags);
1054 if (likely(netfront_carrier_ok(np))) {
1055 network_tx_buf_gc(dev);
1056 /* Under tx_lock: protects access to rx shared-ring indexes. */
1057 if (RING_HAS_UNCONSUMED_RESPONSES(&np->rx)) {
1058 netfront_accelerator_call_stop_napi_irq(np, dev);
1060 netif_rx_schedule(dev, &np->napi);
1064 spin_unlock_irqrestore(&np->tx_lock, flags);
1069 static void xennet_move_rx_slot(struct netfront_info *np, struct sk_buff *skb,
1072 int new = xennet_rxidx(np->rx.req_prod_pvt);
1074 BUG_ON(np->rx_skbs[new]);
1075 np->rx_skbs[new] = skb;
1076 np->grant_rx_ref[new] = ref;
1077 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->id = new;
1078 RING_GET_REQUEST(&np->rx, np->rx.req_prod_pvt)->gref = ref;
1079 np->rx.req_prod_pvt++;
1082 int xennet_get_extras(struct netfront_info *np,
1083 struct netif_extra_info *extras, RING_IDX rp)
1086 struct netif_extra_info *extra;
1087 RING_IDX cons = np->rx.rsp_cons;
1091 struct sk_buff *skb;
1094 if (unlikely(cons + 1 == rp)) {
1095 if (net_ratelimit())
1096 WPRINTK("Missing extra info\n");
1101 extra = (struct netif_extra_info *)
1102 RING_GET_RESPONSE(&np->rx, ++cons);
1104 if (unlikely(!extra->type ||
1105 extra->type >= XEN_NETIF_EXTRA_TYPE_MAX)) {
1106 if (net_ratelimit())
1107 WPRINTK("Invalid extra type: %d\n",
1111 memcpy(&extras[extra->type - 1], extra,
1115 skb = xennet_get_rx_skb(np, cons);
1116 ref = xennet_get_rx_ref(np, cons);
1117 xennet_move_rx_slot(np, skb, ref);
1118 } while (extra->flags & XEN_NETIF_EXTRA_FLAG_MORE);
1120 np->rx.rsp_cons = cons;
1124 static int xennet_get_responses(struct netfront_info *np,
1125 struct netfront_rx_info *rinfo, RING_IDX rp,
1126 struct sk_buff_head *list,
1127 int *pages_flipped_p)
1129 int pages_flipped = *pages_flipped_p;
1130 struct mmu_update *mmu;
1131 struct multicall_entry *mcl;
1132 struct netif_rx_response *rx = &rinfo->rx;
1133 struct netif_extra_info *extras = rinfo->extras;
1134 RING_IDX cons = np->rx.rsp_cons;
1135 struct sk_buff *skb = xennet_get_rx_skb(np, cons);
1136 grant_ref_t ref = xennet_get_rx_ref(np, cons);
1137 int max = MAX_SKB_FRAGS + (rx->status <= RX_COPY_THRESHOLD);
1142 if (rx->flags & NETRXF_extra_info) {
1143 err = xennet_get_extras(np, extras, rp);
1144 cons = np->rx.rsp_cons;
1150 if (unlikely(rx->status < 0 ||
1151 rx->offset + rx->status > PAGE_SIZE)) {
1152 if (net_ratelimit())
1153 WPRINTK("rx->offset: %x, size: %u\n",
1154 rx->offset, rx->status);
1155 xennet_move_rx_slot(np, skb, ref);
1161 * This definitely indicates a bug, either in this driver or in
1162 * the backend driver. In future this should flag the bad
1163 * situation to the system controller to reboot the backed.
1165 if (ref == GRANT_INVALID_REF) {
1166 if (net_ratelimit())
1167 WPRINTK("Bad rx response id %d.\n", rx->id);
1172 if (!np->copying_receiver) {
1173 /* Memory pressure, insufficient buffer
1175 if (!(mfn = gnttab_end_foreign_transfer_ref(ref))) {
1176 if (net_ratelimit())
1177 WPRINTK("Unfulfilled rx req "
1178 "(id=%d, st=%d).\n",
1179 rx->id, rx->status);
1180 xennet_move_rx_slot(np, skb, ref);
1185 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1186 /* Remap the page. */
1188 skb_shinfo(skb)->frags[0].page;
1189 unsigned long pfn = page_to_pfn(page);
1190 void *vaddr = page_address(page);
1192 mcl = np->rx_mcl + pages_flipped;
1193 mmu = np->rx_mmu + pages_flipped;
1195 MULTI_update_va_mapping(mcl,
1196 (unsigned long)vaddr,
1200 mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
1201 | MMU_MACHPHYS_UPDATE;
1204 set_phys_to_machine(pfn, mfn);
1208 ret = gnttab_end_foreign_access_ref(ref);
1212 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1214 __skb_queue_tail(list, skb);
1217 if (!(rx->flags & NETRXF_more_data))
1220 if (cons + frags == rp) {
1221 if (net_ratelimit())
1222 WPRINTK("Need more frags\n");
1227 rx = RING_GET_RESPONSE(&np->rx, cons + frags);
1228 skb = xennet_get_rx_skb(np, cons + frags);
1229 ref = xennet_get_rx_ref(np, cons + frags);
1233 if (unlikely(frags > max)) {
1234 if (net_ratelimit())
1235 WPRINTK("Too many frags\n");
1240 np->rx.rsp_cons = cons + frags;
1242 *pages_flipped_p = pages_flipped;
1247 static RING_IDX xennet_fill_frags(struct netfront_info *np,
1248 struct sk_buff *skb,
1249 struct sk_buff_head *list)
1251 struct skb_shared_info *shinfo = skb_shinfo(skb);
1252 int nr_frags = shinfo->nr_frags;
1253 RING_IDX cons = np->rx.rsp_cons;
1254 skb_frag_t *frag = shinfo->frags + nr_frags;
1255 struct sk_buff *nskb;
1257 while ((nskb = __skb_dequeue(list))) {
1258 struct netif_rx_response *rx =
1259 RING_GET_RESPONSE(&np->rx, ++cons);
1261 frag->page = skb_shinfo(nskb)->frags[0].page;
1262 frag->page_offset = rx->offset;
1263 frag->size = rx->status;
1265 skb->data_len += rx->status;
1267 skb_shinfo(nskb)->nr_frags = 0;
1274 shinfo->nr_frags = nr_frags;
1278 static int xennet_set_skb_gso(struct sk_buff *skb,
1279 struct netif_extra_info *gso)
1281 if (!gso->u.gso.size) {
1282 if (net_ratelimit())
1283 WPRINTK("GSO size must not be zero.\n");
1287 /* Currently only TCPv4 S.O. is supported. */
1288 if (gso->u.gso.type != XEN_NETIF_GSO_TYPE_TCPV4) {
1289 if (net_ratelimit())
1290 WPRINTK("Bad GSO type %d.\n", gso->u.gso.type);
1295 skb_shinfo(skb)->gso_size = gso->u.gso.size;
1297 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
1299 /* Header must be checked, and gso_segs computed. */
1300 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
1302 skb_shinfo(skb)->gso_segs = 0;
1306 if (net_ratelimit())
1307 WPRINTK("GSO unsupported by this kernel.\n");
1312 static int netif_poll(struct napi_struct *napi, int budget)
1314 struct netfront_info *np = container_of(napi, struct netfront_info, napi);
1315 struct net_device *dev = np->netdev;
1316 struct sk_buff *skb;
1317 struct netfront_rx_info rinfo;
1318 struct netif_rx_response *rx = &rinfo.rx;
1319 struct netif_extra_info *extras = rinfo.extras;
1321 struct multicall_entry *mcl;
1322 int work_done, more_to_do = 1, accel_more_to_do = 1;
1323 struct sk_buff_head rxq;
1324 struct sk_buff_head errq;
1325 struct sk_buff_head tmpq;
1326 unsigned long flags;
1328 int pages_flipped = 0;
1331 spin_lock(&np->rx_lock); /* no need for spin_lock_bh() in ->poll() */
1333 if (unlikely(!netfront_carrier_ok(np))) {
1334 spin_unlock(&np->rx_lock);
1338 skb_queue_head_init(&rxq);
1339 skb_queue_head_init(&errq);
1340 skb_queue_head_init(&tmpq);
1342 rp = np->rx.sring->rsp_prod;
1343 rmb(); /* Ensure we see queued responses up to 'rp'. */
1345 i = np->rx.rsp_cons;
1347 while ((i != rp) && (work_done < budget)) {
1348 memcpy(rx, RING_GET_RESPONSE(&np->rx, i), sizeof(*rx));
1349 memset(extras, 0, sizeof(rinfo.extras));
1351 err = xennet_get_responses(np, &rinfo, rp, &tmpq,
1354 if (unlikely(err)) {
1356 while ((skb = __skb_dequeue(&tmpq)))
1357 __skb_queue_tail(&errq, skb);
1358 np->stats.rx_errors++;
1359 i = np->rx.rsp_cons;
1363 skb = __skb_dequeue(&tmpq);
1365 if (extras[XEN_NETIF_EXTRA_TYPE_GSO - 1].type) {
1366 struct netif_extra_info *gso;
1367 gso = &extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
1369 if (unlikely(xennet_set_skb_gso(skb, gso))) {
1370 __skb_queue_head(&tmpq, skb);
1371 np->rx.rsp_cons += skb_queue_len(&tmpq);
1376 NETFRONT_SKB_CB(skb)->page = skb_shinfo(skb)->frags[0].page;
1377 NETFRONT_SKB_CB(skb)->offset = rx->offset;
1380 if (len > RX_COPY_THRESHOLD)
1381 len = RX_COPY_THRESHOLD;
1384 if (rx->status > len) {
1385 skb_shinfo(skb)->frags[0].page_offset =
1387 skb_shinfo(skb)->frags[0].size = rx->status - len;
1388 skb->data_len = rx->status - len;
1390 skb_shinfo(skb)->frags[0].page = NULL;
1391 skb_shinfo(skb)->nr_frags = 0;
1394 i = xennet_fill_frags(np, skb, &tmpq);
1397 * Truesize must approximates the size of true data plus
1398 * any supervisor overheads. Adding hypervisor overheads
1399 * has been shown to significantly reduce achievable
1400 * bandwidth with the default receive buffer size. It is
1401 * therefore not wise to account for it here.
1403 * After alloc_skb(RX_COPY_THRESHOLD), truesize is set to
1404 * RX_COPY_THRESHOLD + the supervisor overheads. Here, we
1405 * add the size of the data pulled in xennet_fill_frags().
1407 * We also adjust for any unused space in the main data
1408 * area by subtracting (RX_COPY_THRESHOLD - len). This is
1409 * especially important with drivers which split incoming
1410 * packets into header and data, using only 66 bytes of
1411 * the main data area (see the e1000 driver for example.)
1412 * On such systems, without this last adjustement, our
1413 * achievable receive throughout using the standard receive
1414 * buffer size was cut by 25%(!!!).
1416 skb->truesize += skb->data_len - (RX_COPY_THRESHOLD - len);
1417 skb->len += skb->data_len;
1420 * Old backends do not assert data_validated but we
1421 * can infer it from csum_blank so test both flags.
1423 if (rx->flags & (NETRXF_data_validated|NETRXF_csum_blank))
1424 skb->ip_summed = CHECKSUM_UNNECESSARY;
1426 skb->ip_summed = CHECKSUM_NONE;
1428 skb->proto_data_valid = (skb->ip_summed != CHECKSUM_NONE);
1429 skb->proto_csum_blank = !!(rx->flags & NETRXF_csum_blank);
1431 np->stats.rx_packets++;
1432 np->stats.rx_bytes += skb->len;
1434 __skb_queue_tail(&rxq, skb);
1436 np->rx.rsp_cons = ++i;
1440 if (pages_flipped) {
1441 /* Some pages are no longer absent... */
1442 balloon_update_driver_allowance(-pages_flipped);
1444 /* Do all the remapping work and M2P updates. */
1445 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1446 mcl = np->rx_mcl + pages_flipped;
1447 mcl->op = __HYPERVISOR_mmu_update;
1448 mcl->args[0] = (unsigned long)np->rx_mmu;
1449 mcl->args[1] = pages_flipped;
1451 mcl->args[3] = DOMID_SELF;
1452 err = HYPERVISOR_multicall_check(np->rx_mcl,
1459 while ((skb = __skb_dequeue(&errq)))
1462 while ((skb = __skb_dequeue(&rxq)) != NULL) {
1463 struct page *page = NETFRONT_SKB_CB(skb)->page;
1464 void *vaddr = page_address(page);
1465 unsigned offset = NETFRONT_SKB_CB(skb)->offset;
1467 memcpy(skb->data, vaddr + offset, skb_headlen(skb));
1469 if (page != skb_shinfo(skb)->frags[0].page)
1472 /* Ethernet work: Delayed to here as it peeks the header. */
1473 skb->protocol = eth_type_trans(skb, dev);
1476 netif_receive_skb(skb);
1477 dev->last_rx = jiffies;
1480 /* If we get a callback with very few responses, reduce fill target. */
1481 /* NB. Note exponential increase, linear decrease. */
1482 if (((np->rx.req_prod_pvt - np->rx.sring->rsp_prod) >
1483 ((3*np->rx_target) / 4)) &&
1484 (--np->rx_target < np->rx_min_target))
1485 np->rx_target = np->rx_min_target;
1487 network_alloc_rx_buffers(dev);
1489 if (work_done < budget) {
1490 /* there's some spare capacity, try the accelerated path */
1491 int accel_budget = budget - work_done;
1492 int accel_budget_start = accel_budget;
1494 if (np->accel_vif_state.hooks) {
1496 np->accel_vif_state.hooks->netdev_poll
1497 (dev, &accel_budget);
1498 work_done += (accel_budget_start - accel_budget);
1500 accel_more_to_do = 0;
1503 if (work_done < budget) {
1504 local_irq_save(flags);
1506 RING_FINAL_CHECK_FOR_RESPONSES(&np->rx, more_to_do);
1508 if (!more_to_do && !accel_more_to_do &&
1509 np->accel_vif_state.hooks) {
1511 * Slow path has nothing more to do, see if
1512 * fast path is likewise
1515 np->accel_vif_state.hooks->start_napi_irq(dev);
1518 if (!more_to_do && !accel_more_to_do)
1519 __netif_rx_complete(dev, napi);
1521 local_irq_restore(flags);
1524 spin_unlock(&np->rx_lock);
1529 static void netif_release_tx_bufs(struct netfront_info *np)
1531 struct sk_buff *skb;
1534 for (i = 1; i <= NET_TX_RING_SIZE; i++) {
1535 if ((unsigned long)np->tx_skbs[i] < PAGE_OFFSET)
1538 skb = np->tx_skbs[i];
1539 gnttab_end_foreign_access_ref(np->grant_tx_ref[i]);
1540 gnttab_release_grant_reference(
1541 &np->gref_tx_head, np->grant_tx_ref[i]);
1542 np->grant_tx_ref[i] = GRANT_INVALID_REF;
1543 add_id_to_freelist(np->tx_skbs, i);
1544 dev_kfree_skb_irq(skb);
1548 static void netif_release_rx_bufs_flip(struct netfront_info *np)
1550 struct mmu_update *mmu = np->rx_mmu;
1551 struct multicall_entry *mcl = np->rx_mcl;
1552 struct sk_buff_head free_list;
1553 struct sk_buff *skb;
1555 int xfer = 0, noxfer = 0, unused = 0;
1558 skb_queue_head_init(&free_list);
1560 spin_lock_bh(&np->rx_lock);
1562 for (id = 0; id < NET_RX_RING_SIZE; id++) {
1563 if ((ref = np->grant_rx_ref[id]) == GRANT_INVALID_REF) {
1568 skb = np->rx_skbs[id];
1569 mfn = gnttab_end_foreign_transfer_ref(ref);
1570 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1571 np->grant_rx_ref[id] = GRANT_INVALID_REF;
1572 add_id_to_freelist(np->rx_skbs, id);
1575 struct page *page = skb_shinfo(skb)->frags[0].page;
1576 balloon_release_driver_page(page);
1577 skb_shinfo(skb)->nr_frags = 0;
1583 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1584 /* Remap the page. */
1585 struct page *page = skb_shinfo(skb)->frags[0].page;
1586 unsigned long pfn = page_to_pfn(page);
1587 void *vaddr = page_address(page);
1589 MULTI_update_va_mapping(mcl, (unsigned long)vaddr,
1590 pfn_pte_ma(mfn, PAGE_KERNEL),
1593 mmu->ptr = ((maddr_t)mfn << PAGE_SHIFT)
1594 | MMU_MACHPHYS_UPDATE;
1598 set_phys_to_machine(pfn, mfn);
1600 __skb_queue_tail(&free_list, skb);
1604 DPRINTK("%s: %d xfer, %d noxfer, %d unused\n",
1605 __FUNCTION__, xfer, noxfer, unused);
1608 /* Some pages are no longer absent... */
1609 balloon_update_driver_allowance(-xfer);
1611 if (!xen_feature(XENFEAT_auto_translated_physmap)) {
1612 /* Do all the remapping work and M2P updates. */
1613 mcl->op = __HYPERVISOR_mmu_update;
1614 mcl->args[0] = (unsigned long)np->rx_mmu;
1615 mcl->args[1] = mmu - np->rx_mmu;
1617 mcl->args[3] = DOMID_SELF;
1619 rc = HYPERVISOR_multicall_check(
1620 np->rx_mcl, mcl - np->rx_mcl, NULL);
1625 while ((skb = __skb_dequeue(&free_list)) != NULL)
1628 spin_unlock_bh(&np->rx_lock);
1631 static void netif_release_rx_bufs_copy(struct netfront_info *np)
1633 struct sk_buff *skb;
1635 int busy = 0, inuse = 0;
1637 spin_lock_bh(&np->rx_lock);
1639 for (i = 0; i < NET_RX_RING_SIZE; i++) {
1640 ref = np->grant_rx_ref[i];
1642 if (ref == GRANT_INVALID_REF)
1647 skb = np->rx_skbs[i];
1649 if (!gnttab_end_foreign_access_ref(ref))
1655 gnttab_release_grant_reference(&np->gref_rx_head, ref);
1656 np->grant_rx_ref[i] = GRANT_INVALID_REF;
1657 add_id_to_freelist(np->rx_skbs, i);
1659 skb_shinfo(skb)->nr_frags = 0;
1664 DPRINTK("%s: Unable to release %d of %d inuse grant references out of %ld total.\n",
1665 __FUNCTION__, busy, inuse, NET_RX_RING_SIZE);
1667 spin_unlock_bh(&np->rx_lock);
1670 static int network_close(struct net_device *dev)
1672 struct netfront_info *np = netdev_priv(dev);
1673 netif_stop_queue(np->netdev);
1678 static struct net_device_stats *network_get_stats(struct net_device *dev)
1680 struct netfront_info *np = netdev_priv(dev);
1682 netfront_accelerator_call_get_stats(np, dev);
1686 static int xennet_change_mtu(struct net_device *dev, int mtu)
1688 int max = xennet_can_sg(dev) ? 65535 - ETH_HLEN : ETH_DATA_LEN;
1696 static int xennet_set_sg(struct net_device *dev, u32 data)
1699 struct netfront_info *np = netdev_priv(dev);
1702 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend, "feature-sg",
1707 } else if (dev->mtu > ETH_DATA_LEN)
1708 dev->mtu = ETH_DATA_LEN;
1710 return ethtool_op_set_sg(dev, data);
1713 static int xennet_set_tso(struct net_device *dev, u32 data)
1716 struct netfront_info *np = netdev_priv(dev);
1719 if (xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1720 "feature-gso-tcpv4", "%d", &val) < 0)
1726 return ethtool_op_set_tso(dev, data);
1729 static void xennet_set_features(struct net_device *dev)
1731 dev_disable_gso_features(dev);
1732 xennet_set_sg(dev, 0);
1734 /* We need checksum offload to enable scatter/gather and TSO. */
1735 if (!(dev->features & NETIF_F_IP_CSUM))
1738 if (xennet_set_sg(dev, 1))
1741 /* Before 2.6.9 TSO seems to be unreliable so do not enable it
1744 if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,9))
1745 xennet_set_tso(dev, 1);
1748 static int network_connect(struct net_device *dev)
1750 struct netfront_info *np = netdev_priv(dev);
1751 int i, requeue_idx, err;
1752 struct sk_buff *skb;
1754 netif_rx_request_t *req;
1755 unsigned int feature_rx_copy, feature_rx_flip;
1757 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1758 "feature-rx-copy", "%u", &feature_rx_copy);
1760 feature_rx_copy = 0;
1761 err = xenbus_scanf(XBT_NIL, np->xbdev->otherend,
1762 "feature-rx-flip", "%u", &feature_rx_flip);
1764 feature_rx_flip = 1;
1767 * Copy packets on receive path if:
1768 * (a) This was requested by user, and the backend supports it; or
1769 * (b) Flipping was requested, but this is unsupported by the backend.
1771 np->copying_receiver = ((MODPARM_rx_copy && feature_rx_copy) ||
1772 (MODPARM_rx_flip && !feature_rx_flip));
1774 err = talk_to_backend(np->xbdev, np);
1778 xennet_set_features(dev);
1780 DPRINTK("device %s has %sing receive path.\n",
1781 dev->name, np->copying_receiver ? "copy" : "flipp");
1783 spin_lock_bh(&np->rx_lock);
1784 spin_lock_irq(&np->tx_lock);
1787 * Recovery procedure:
1788 * NB. Freelist index entries are always going to be less than
1789 * PAGE_OFFSET, whereas pointers to skbs will always be equal or
1790 * greater than PAGE_OFFSET: we use this property to distinguish
1794 /* Step 1: Discard all pending TX packet fragments. */
1795 netif_release_tx_bufs(np);
1797 /* Step 2: Rebuild the RX buffer freelist and the RX ring itself. */
1798 for (requeue_idx = 0, i = 0; i < NET_RX_RING_SIZE; i++) {
1799 if (!np->rx_skbs[i])
1802 skb = np->rx_skbs[requeue_idx] = xennet_get_rx_skb(np, i);
1803 ref = np->grant_rx_ref[requeue_idx] = xennet_get_rx_ref(np, i);
1804 req = RING_GET_REQUEST(&np->rx, requeue_idx);
1806 if (!np->copying_receiver) {
1807 gnttab_grant_foreign_transfer_ref(
1808 ref, np->xbdev->otherend_id,
1809 page_to_pfn(skb_shinfo(skb)->frags->page));
1811 gnttab_grant_foreign_access_ref(
1812 ref, np->xbdev->otherend_id,
1813 pfn_to_mfn(page_to_pfn(skb_shinfo(skb)->
1818 req->id = requeue_idx;
1823 np->rx.req_prod_pvt = requeue_idx;
1826 * Step 3: All public and private state should now be sane. Get
1827 * ready to start sending and receiving packets and give the driver
1828 * domain a kick because we've probably just requeued some
1831 netfront_carrier_on(np);
1832 notify_remote_via_irq(np->irq);
1833 network_tx_buf_gc(dev);
1834 network_alloc_rx_buffers(dev);
1836 spin_unlock_irq(&np->tx_lock);
1837 spin_unlock_bh(&np->rx_lock);
1842 static void netif_uninit(struct net_device *dev)
1844 struct netfront_info *np = netdev_priv(dev);
1845 netif_release_tx_bufs(np);
1846 if (np->copying_receiver)
1847 netif_release_rx_bufs_copy(np);
1849 netif_release_rx_bufs_flip(np);
1850 gnttab_free_grant_references(np->gref_tx_head);
1851 gnttab_free_grant_references(np->gref_rx_head);
1854 static struct ethtool_ops network_ethtool_ops =
1856 .get_tx_csum = ethtool_op_get_tx_csum,
1857 .set_tx_csum = ethtool_op_set_tx_csum,
1858 .get_sg = ethtool_op_get_sg,
1859 .set_sg = xennet_set_sg,
1861 .get_tso = ethtool_op_get_tso,
1862 .set_tso = xennet_set_tso,
1864 .get_link = ethtool_op_get_link,
1868 static ssize_t show_rxbuf_min(struct device *dev,
1869 struct device_attribute *attr, char *buf)
1871 struct netfront_info *info = netdev_priv(to_net_dev(dev));
1873 return sprintf(buf, "%u\n", info->rx_min_target);
1876 static ssize_t store_rxbuf_min(struct device *dev,
1877 struct device_attribute *attr,
1878 const char *buf, size_t len)
1880 struct net_device *netdev = to_net_dev(dev);
1881 struct netfront_info *np = netdev_priv(netdev);
1883 unsigned long target;
1885 if (!capable(CAP_NET_ADMIN))
1888 target = simple_strtoul(buf, &endp, 0);
1892 if (target < RX_MIN_TARGET)
1893 target = RX_MIN_TARGET;
1894 if (target > RX_MAX_TARGET)
1895 target = RX_MAX_TARGET;
1897 spin_lock_bh(&np->rx_lock);
1898 if (target > np->rx_max_target)
1899 np->rx_max_target = target;
1900 np->rx_min_target = target;
1901 if (target > np->rx_target)
1902 np->rx_target = target;
1904 network_alloc_rx_buffers(netdev);
1906 spin_unlock_bh(&np->rx_lock);
1910 static ssize_t show_rxbuf_max(struct device *dev,
1911 struct device_attribute *attr, char *buf)
1913 struct netfront_info *info = netdev_priv(to_net_dev(dev));
1915 return sprintf(buf, "%u\n", info->rx_max_target);
1918 static ssize_t store_rxbuf_max(struct device *dev,
1919 struct device_attribute *attr,
1920 const char *buf, size_t len)
1922 struct net_device *netdev = to_net_dev(dev);
1923 struct netfront_info *np = netdev_priv(netdev);
1925 unsigned long target;
1927 if (!capable(CAP_NET_ADMIN))
1930 target = simple_strtoul(buf, &endp, 0);
1934 if (target < RX_MIN_TARGET)
1935 target = RX_MIN_TARGET;
1936 if (target > RX_MAX_TARGET)
1937 target = RX_MAX_TARGET;
1939 spin_lock_bh(&np->rx_lock);
1940 if (target < np->rx_min_target)
1941 np->rx_min_target = target;
1942 np->rx_max_target = target;
1943 if (target < np->rx_target)
1944 np->rx_target = target;
1946 network_alloc_rx_buffers(netdev);
1948 spin_unlock_bh(&np->rx_lock);
1952 static ssize_t show_rxbuf_cur(struct device *dev,
1953 struct device_attribute *attr, char *buf)
1955 struct netfront_info *info = netdev_priv(to_net_dev(dev));
1957 return sprintf(buf, "%u\n", info->rx_target);
1960 static struct device_attribute xennet_attrs[] = {
1961 __ATTR(rxbuf_min, S_IRUGO|S_IWUSR, show_rxbuf_min, store_rxbuf_min),
1962 __ATTR(rxbuf_max, S_IRUGO|S_IWUSR, show_rxbuf_max, store_rxbuf_max),
1963 __ATTR(rxbuf_cur, S_IRUGO, show_rxbuf_cur, NULL),
1966 static int xennet_sysfs_addif(struct net_device *netdev)
1971 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++) {
1972 error = device_create_file(&netdev->dev,
1981 device_remove_file(&netdev->dev, &xennet_attrs[i]);
1985 static void xennet_sysfs_delif(struct net_device *netdev)
1989 for (i = 0; i < ARRAY_SIZE(xennet_attrs); i++)
1990 device_remove_file(&netdev->dev, &xennet_attrs[i]);
1993 #endif /* CONFIG_SYSFS */
1997 * Nothing to do here. Virtual interface is point-to-point and the
1998 * physical interface is probably promiscuous anyway.
2000 static void network_set_multicast_list(struct net_device *dev)
2004 static struct net_device * __devinit create_netdev(struct xenbus_device *dev)
2007 struct net_device *netdev = NULL;
2008 struct netfront_info *np = NULL;
2010 netdev = alloc_etherdev(sizeof(struct netfront_info));
2012 printk(KERN_WARNING "%s> alloc_etherdev failed.\n",
2014 return ERR_PTR(-ENOMEM);
2017 np = netdev_priv(netdev);
2020 spin_lock_init(&np->tx_lock);
2021 spin_lock_init(&np->rx_lock);
2023 init_accelerator_vif(np, dev);
2025 skb_queue_head_init(&np->rx_batch);
2026 np->rx_target = RX_DFL_MIN_TARGET;
2027 np->rx_min_target = RX_DFL_MIN_TARGET;
2028 np->rx_max_target = RX_MAX_TARGET;
2030 init_timer(&np->rx_refill_timer);
2031 np->rx_refill_timer.data = (unsigned long)netdev;
2032 np->rx_refill_timer.function = rx_refill_timeout;
2034 /* Initialise {tx,rx}_skbs as a free chain containing every entry. */
2035 for (i = 0; i <= NET_TX_RING_SIZE; i++) {
2036 np->tx_skbs[i] = (void *)((unsigned long) i+1);
2037 np->grant_tx_ref[i] = GRANT_INVALID_REF;
2040 for (i = 0; i < NET_RX_RING_SIZE; i++) {
2041 np->rx_skbs[i] = NULL;
2042 np->grant_rx_ref[i] = GRANT_INVALID_REF;
2045 /* A grant for every tx ring slot */
2046 if (gnttab_alloc_grant_references(TX_MAX_TARGET,
2047 &np->gref_tx_head) < 0) {
2048 printk(KERN_ALERT "#### netfront can't alloc tx grant refs\n");
2052 /* A grant for every rx ring slot */
2053 if (gnttab_alloc_grant_references(RX_MAX_TARGET,
2054 &np->gref_rx_head) < 0) {
2055 printk(KERN_ALERT "#### netfront can't alloc rx grant refs\n");
2060 netdev->open = network_open;
2061 netdev->hard_start_xmit = network_start_xmit;
2062 netdev->stop = network_close;
2063 netdev->get_stats = network_get_stats;
2064 netif_napi_add(netdev, &np->napi, netif_poll, 64);
2065 netdev->set_multicast_list = network_set_multicast_list;
2066 netdev->uninit = netif_uninit;
2067 netdev->change_mtu = xennet_change_mtu;
2068 netdev->features = NETIF_F_IP_CSUM;
2070 SET_ETHTOOL_OPS(netdev, &network_ethtool_ops);
2071 SET_NETDEV_DEV(netdev, &dev->dev);
2073 np->netdev = netdev;
2075 netfront_carrier_off(np);
2080 gnttab_free_grant_references(np->gref_tx_head);
2082 free_netdev(netdev);
2083 return ERR_PTR(err);
2088 * We use this notifier to send out a fake ARP reply to reset switches and
2089 * router ARP caches when an IP interface is brought up on a VIF.
2092 inetdev_notify(struct notifier_block *this, unsigned long event, void *ptr)
2094 struct in_ifaddr *ifa = (struct in_ifaddr *)ptr;
2095 struct net_device *dev = ifa->ifa_dev->dev;
2097 /* UP event and is it one of our devices? */
2098 if (event == NETDEV_UP && dev->open == network_open)
2104 static struct notifier_block notifier_inetdev = {
2105 .notifier_call = inetdev_notify,
2112 static void netif_disconnect_backend(struct netfront_info *info)
2114 /* Stop old i/f to prevent errors whilst we rebuild the state. */
2115 spin_lock_bh(&info->rx_lock);
2116 spin_lock_irq(&info->tx_lock);
2117 netfront_carrier_off(info);
2118 spin_unlock_irq(&info->tx_lock);
2119 spin_unlock_bh(&info->rx_lock);
2122 unbind_from_irqhandler(info->irq, info->netdev);
2125 end_access(info->tx_ring_ref, info->tx.sring);
2126 end_access(info->rx_ring_ref, info->rx.sring);
2127 info->tx_ring_ref = GRANT_INVALID_REF;
2128 info->rx_ring_ref = GRANT_INVALID_REF;
2129 info->tx.sring = NULL;
2130 info->rx.sring = NULL;
2134 static void end_access(int ref, void *page)
2136 if (ref != GRANT_INVALID_REF)
2137 gnttab_end_foreign_access(ref, (unsigned long)page);
2141 /* ** Driver registration ** */
2144 static struct xenbus_device_id netfront_ids[] = {
2148 MODULE_ALIAS("xen:vif");
2151 static struct xenbus_driver netfront = {
2153 .ids = netfront_ids,
2154 .probe = netfront_probe,
2155 .remove = __devexit_p(netfront_remove),
2156 .suspend = netfront_suspend,
2157 .suspend_cancel = netfront_suspend_cancel,
2158 .resume = netfront_resume,
2159 .otherend_changed = backend_changed,
2163 static int __init netif_init(void)
2165 if (!is_running_on_xen())
2169 if (MODPARM_rx_flip && MODPARM_rx_copy) {
2170 WPRINTK("Cannot specify both rx_copy and rx_flip.\n");
2174 if (!MODPARM_rx_flip && !MODPARM_rx_copy)
2175 MODPARM_rx_flip = 1; /* Default is to flip. */
2178 if (is_initial_xendomain())
2183 IPRINTK("Initialising virtual ethernet driver.\n");
2186 (void)register_inetaddr_notifier(¬ifier_inetdev);
2189 return xenbus_register_frontend(&netfront);
2191 module_init(netif_init);
2194 static void __exit netif_exit(void)
2196 if (is_initial_xendomain())
2200 unregister_inetaddr_notifier(¬ifier_inetdev);
2205 return xenbus_unregister_driver(&netfront);
2207 module_exit(netif_exit);
2209 MODULE_LICENSE("Dual BSD/GPL");