1 /****************************************************************************
2 * Solarflare driver for Xen network acceleration
4 * Copyright 2006-2008: Solarflare Communications Inc,
5 * 9501 Jeronimo Road, Suite 250,
6 * Irvine, CA 92618, USA
8 * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation, incorporated herein by reference.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 ****************************************************************************
25 #include <linux/module.h>
26 #include <linux/skbuff.h>
27 #include <linux/netdevice.h>
29 /* drivers/xen/netfront/netfront.h */
33 #include "accel_bufs.h"
34 #include "accel_util.h"
35 #include "accel_msg_iface.h"
36 #include "accel_ssr.h"
42 #define NETFRONT_ACCEL_VNIC_FROM_NETDEV(_nd) \
43 ((netfront_accel_vnic *)((struct netfront_info *)netdev_priv(net_dev))->accel_priv)
45 static int netfront_accel_netdev_start_xmit(struct sk_buff *skb,
46 struct net_device *net_dev)
48 netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
49 struct netfront_info *np =
50 (struct netfront_info *)netdev_priv(net_dev);
52 unsigned long flags1, flags2;
56 /* Take our tx lock and hold for the duration */
57 spin_lock_irqsave(&vnic->tx_lock, flags1);
59 if (!vnic->tx_enabled) {
64 handled = netfront_accel_vi_tx_post(vnic, skb);
65 if (handled == NETFRONT_ACCEL_STATUS_BUSY) {
66 BUG_ON(vnic->net_dev != net_dev);
67 DPRINTK("%s stopping queue\n", __FUNCTION__);
69 /* Need netfront's tx_lock and vnic tx_lock to write tx_skb */
70 spin_lock_irqsave(&np->tx_lock, flags2);
71 BUG_ON(vnic->tx_skb != NULL);
73 netif_stop_queue(net_dev);
74 spin_unlock_irqrestore(&np->tx_lock, flags2);
76 NETFRONT_ACCEL_STATS_OP(vnic->stats.queue_stops++);
79 if (handled == NETFRONT_ACCEL_STATUS_CANT)
85 spin_unlock_irqrestore(&vnic->tx_lock, flags1);
91 static int netfront_accel_netdev_poll(struct net_device *net_dev, int *budget)
93 netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
94 int rx_allowed = *budget, rx_done;
98 /* Can check this without lock as modifier excludes polls */
99 if (!vnic->poll_enabled)
102 rx_done = netfront_accel_vi_poll(vnic, rx_allowed);
105 NETFRONT_ACCEL_STATS_OP(vnic->stats.poll_call_count++);
107 VPRINTK("%s: done %d allowed %d\n",
108 __FUNCTION__, rx_done, rx_allowed);
110 netfront_accel_ssr_end_of_burst(vnic, &vnic->ssr_state);
112 if (rx_done < rx_allowed) {
116 NETFRONT_ACCEL_STATS_OP(vnic->stats.poll_reschedule_count++);
118 return 1; /* More to do. */
123 * Process request from netfront to start napi interrupt
124 * mode. (i.e. enable interrupts as it's finished polling)
126 static int netfront_accel_start_napi_interrupts(struct net_device *net_dev)
128 netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
131 BUG_ON(vnic == NULL);
134 * Can check this without lock as writer excludes poll before
137 if (!vnic->poll_enabled)
140 if (!netfront_accel_vi_enable_interrupts(vnic)) {
142 * There was something there, tell caller we had
148 spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
149 vnic->irq_enabled = 1;
150 netfront_accel_enable_net_interrupts(vnic);
151 spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
158 * Process request from netfront to stop napi interrupt
159 * mode. (i.e. disable interrupts as it's starting to poll
161 static void netfront_accel_stop_napi_interrupts(struct net_device *net_dev)
163 netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
166 BUG_ON(vnic == NULL);
168 spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
170 if (!vnic->poll_enabled) {
171 spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
175 netfront_accel_disable_net_interrupts(vnic);
176 vnic->irq_enabled = 0;
177 spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
181 static int netfront_accel_check_ready(struct net_device *net_dev)
183 netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
185 BUG_ON(vnic == NULL);
187 /* Read of tx_skb is protected by netfront's tx_lock */
188 return vnic->tx_skb == NULL;
192 static int netfront_accel_get_stats(struct net_device *net_dev,
193 struct net_device_stats *devst,
194 struct netfront_stats *lnkst)
196 netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
197 struct netfront_accel_netdev_stats now;
199 BUG_ON(vnic == NULL);
201 now.fastpath_rx_pkts = vnic->netdev_stats.fastpath_rx_pkts;
202 now.fastpath_rx_bytes = vnic->netdev_stats.fastpath_rx_bytes;
203 now.fastpath_rx_errors = vnic->netdev_stats.fastpath_rx_errors;
204 now.fastpath_tx_pkts = vnic->netdev_stats.fastpath_tx_pkts;
205 now.fastpath_tx_bytes = vnic->netdev_stats.fastpath_tx_bytes;
206 now.fastpath_tx_errors = vnic->netdev_stats.fastpath_tx_errors;
208 lnkst->rx_packets += (now.fastpath_rx_pkts -
209 vnic->stats_last_read.fastpath_rx_pkts);
210 lnkst->rx_bytes += (now.fastpath_rx_bytes -
211 vnic->stats_last_read.fastpath_rx_bytes);
212 devst->rx_errors += (now.fastpath_rx_errors -
213 vnic->stats_last_read.fastpath_rx_errors);
214 lnkst->tx_packets += (now.fastpath_tx_pkts -
215 vnic->stats_last_read.fastpath_tx_pkts);
216 lnkst->tx_bytes += (now.fastpath_tx_bytes -
217 vnic->stats_last_read.fastpath_tx_bytes);
218 devst->tx_errors += (now.fastpath_tx_errors -
219 vnic->stats_last_read.fastpath_tx_errors);
221 vnic->stats_last_read = now;
227 struct netfront_accel_hooks accel_hooks = {
228 .new_device = &netfront_accel_probe,
229 .remove = &netfront_accel_remove,
230 .netdev_poll = &netfront_accel_netdev_poll,
231 .start_xmit = &netfront_accel_netdev_start_xmit,
232 .start_napi_irq = &netfront_accel_start_napi_interrupts,
233 .stop_napi_irq = &netfront_accel_stop_napi_interrupts,
234 .check_ready = &netfront_accel_check_ready,
235 .get_stats = &netfront_accel_get_stats
239 unsigned sfc_netfront_max_pages = NETFRONT_ACCEL_DEFAULT_BUF_PAGES;
240 module_param_named (max_pages, sfc_netfront_max_pages, uint, 0644);
241 MODULE_PARM_DESC(max_pages, "Number of buffer pages to request");
243 unsigned sfc_netfront_buffer_split = 2;
244 module_param_named (buffer_split, sfc_netfront_buffer_split, uint, 0644);
245 MODULE_PARM_DESC(buffer_split,
246 "Fraction of buffers to use for TX, rest for RX");
249 const char *frontend_name = "sfc_netfront";
251 struct workqueue_struct *netfront_accel_workqueue;
253 static int __init netfront_accel_init(void)
257 gcov_provider_init(THIS_MODULE);
261 * If we're running on dom0, netfront hasn't initialised
262 * itself, so we need to keep away
264 if (is_initial_xendomain())
267 if (!is_pow2(sizeof(struct net_accel_msg)))
268 EPRINTK("%s: bad structure size\n", __FUNCTION__);
270 netfront_accel_workqueue = create_workqueue(frontend_name);
272 netfront_accel_debugfs_init();
274 rc = netfront_accelerator_loaded(NETFRONT_ACCEL_VERSION,
275 frontend_name, &accel_hooks);
278 EPRINTK("Xen netfront accelerator version mismatch\n");
284 * In future may want to add backwards compatibility
285 * and accept certain subsets of previous versions
287 EPRINTK("Xen netfront accelerator version mismatch\n");
294 netfront_accel_debugfs_fini();
295 flush_workqueue(netfront_accel_workqueue);
296 destroy_workqueue(netfront_accel_workqueue);
298 gcov_provider_fini(THIS_MODULE);
302 module_init(netfront_accel_init);
304 static void __exit netfront_accel_exit(void)
306 if (is_initial_xendomain())
309 DPRINTK("%s: unhooking\n", __FUNCTION__);
311 /* Unhook from normal netfront */
312 netfront_accelerator_stop(frontend_name);
314 DPRINTK("%s: done\n", __FUNCTION__);
316 netfront_accel_debugfs_fini();
318 flush_workqueue(netfront_accel_workqueue);
320 destroy_workqueue(netfront_accel_workqueue);
323 gcov_provider_fini(THIS_MODULE);
327 module_exit(netfront_accel_exit);
329 MODULE_LICENSE("GPL");