- Update Xen patches to 3.3-rc5 and c/s 1157.
[linux-flexiantxendom0-3.2.10.git] / drivers / xen / sfc_netfront / accel_netfront.c
1 /****************************************************************************
2  * Solarflare driver for Xen network acceleration
3  *
4  * Copyright 2006-2008: Solarflare Communications Inc,
5  *                      9501 Jeronimo Road, Suite 250,
6  *                      Irvine, CA 92618, USA
7  *
8  * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License version 2 as published
12  * by the Free Software Foundation, incorporated herein by reference.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
22  ****************************************************************************
23  */
24
25 #include <linux/module.h>
26 #include <linux/skbuff.h>
27 #include <linux/netdevice.h>
28
29 /* drivers/xen/netfront/netfront.h */
30 #include "netfront.h"
31
32 #include "accel.h"
33 #include "accel_bufs.h"
34 #include "accel_util.h"
35 #include "accel_msg_iface.h"
36 #include "accel_ssr.h"
37  
38 #ifdef EFX_GCOV
39 #include "gcov.h"
40 #endif
41
42 #define NETFRONT_ACCEL_VNIC_FROM_NETDEV(_nd)                            \
43         ((netfront_accel_vnic *)((struct netfront_info *)netdev_priv(net_dev))->accel_priv)
44
45 static int netfront_accel_netdev_start_xmit(struct sk_buff *skb,
46                                             struct net_device *net_dev)
47 {
48         netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
49         struct netfront_info *np = 
50                 (struct netfront_info *)netdev_priv(net_dev);
51         int handled, rc;
52         unsigned long flags1, flags2;
53
54         BUG_ON(vnic == NULL);
55
56         /* Take our tx lock and hold for the duration */
57         spin_lock_irqsave(&vnic->tx_lock, flags1);
58
59         if (!vnic->tx_enabled) {
60                 rc = 0;
61                 goto unlock_out;
62         }
63
64         handled = netfront_accel_vi_tx_post(vnic, skb);
65         if (handled == NETFRONT_ACCEL_STATUS_BUSY) {
66                 BUG_ON(vnic->net_dev != net_dev);
67                 DPRINTK("%s stopping queue\n", __FUNCTION__);
68
69                 /* Need netfront's tx_lock and vnic tx_lock to write tx_skb */
70                 spin_lock_irqsave(&np->tx_lock, flags2);
71                 BUG_ON(vnic->tx_skb != NULL);
72                 vnic->tx_skb = skb;
73                 netif_stop_queue(net_dev);
74                 spin_unlock_irqrestore(&np->tx_lock, flags2);
75
76                 NETFRONT_ACCEL_STATS_OP(vnic->stats.queue_stops++);
77         }
78
79         if (handled == NETFRONT_ACCEL_STATUS_CANT)
80                 rc = 0;
81         else
82                 rc = 1;
83
84 unlock_out:
85         spin_unlock_irqrestore(&vnic->tx_lock, flags1);
86
87         return rc;
88 }
89
90
91 static int netfront_accel_netdev_poll(struct net_device *net_dev, int *budget)
92 {
93         netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
94         int rx_allowed = *budget, rx_done;
95         
96         BUG_ON(vnic == NULL);
97
98         /* Can check this without lock as modifier excludes polls */ 
99         if (!vnic->poll_enabled)
100                 return 0;
101
102         rx_done = netfront_accel_vi_poll(vnic, rx_allowed);
103         *budget -= rx_done;
104         
105         NETFRONT_ACCEL_STATS_OP(vnic->stats.poll_call_count++);
106
107         VPRINTK("%s: done %d allowed %d\n",
108                 __FUNCTION__, rx_done, rx_allowed);
109
110         netfront_accel_ssr_end_of_burst(vnic, &vnic->ssr_state);
111
112         if (rx_done < rx_allowed) {
113                  return 0; /* Done */
114         }
115         
116         NETFRONT_ACCEL_STATS_OP(vnic->stats.poll_reschedule_count++);
117
118         return 1; /* More to do. */
119 }
120
121
122 /*
123  * Process request from netfront to start napi interrupt
124  * mode. (i.e. enable interrupts as it's finished polling)
125  */
126 static int netfront_accel_start_napi_interrupts(struct net_device *net_dev) 
127 {
128         netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
129         unsigned long flags;
130
131         BUG_ON(vnic == NULL);
132         
133         /*
134          * Can check this without lock as writer excludes poll before
135          * modifying
136          */
137         if (!vnic->poll_enabled)
138                 return 0;
139
140         if (!netfront_accel_vi_enable_interrupts(vnic)) {
141                 /* 
142                  * There was something there, tell caller we had
143                  * something to do.
144                  */
145                 return 1;
146         }
147
148         spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
149         vnic->irq_enabled = 1;
150         netfront_accel_enable_net_interrupts(vnic);
151         spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
152
153         return 0;
154 }
155
156
157 /*
158  * Process request from netfront to stop napi interrupt
159  * mode. (i.e. disable interrupts as it's starting to poll 
160  */
161 static void netfront_accel_stop_napi_interrupts(struct net_device *net_dev) 
162 {
163         netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
164         unsigned long flags;
165
166         BUG_ON(vnic == NULL);
167
168         spin_lock_irqsave(&vnic->irq_enabled_lock, flags);
169
170         if (!vnic->poll_enabled) {
171                 spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
172                 return;
173         }
174
175         netfront_accel_disable_net_interrupts(vnic);
176         vnic->irq_enabled = 0;
177         spin_unlock_irqrestore(&vnic->irq_enabled_lock, flags);
178 }
179
180
181 static int netfront_accel_check_ready(struct net_device *net_dev)
182 {
183         netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
184
185         BUG_ON(vnic == NULL);
186
187         /* Read of tx_skb is protected by netfront's tx_lock */ 
188         return vnic->tx_skb == NULL;
189 }
190
191
192 static int netfront_accel_get_stats(struct net_device *net_dev,
193                                     struct net_device_stats *devst,
194                                     struct netfront_stats *lnkst)
195 {
196         netfront_accel_vnic *vnic = NETFRONT_ACCEL_VNIC_FROM_NETDEV(net_dev);
197         struct netfront_accel_netdev_stats now;
198
199         BUG_ON(vnic == NULL);
200
201         now.fastpath_rx_pkts   = vnic->netdev_stats.fastpath_rx_pkts;
202         now.fastpath_rx_bytes  = vnic->netdev_stats.fastpath_rx_bytes;
203         now.fastpath_rx_errors = vnic->netdev_stats.fastpath_rx_errors;
204         now.fastpath_tx_pkts   = vnic->netdev_stats.fastpath_tx_pkts;
205         now.fastpath_tx_bytes  = vnic->netdev_stats.fastpath_tx_bytes;
206         now.fastpath_tx_errors = vnic->netdev_stats.fastpath_tx_errors;
207         
208         lnkst->rx_packets += (now.fastpath_rx_pkts -
209                               vnic->stats_last_read.fastpath_rx_pkts);
210         lnkst->rx_bytes   += (now.fastpath_rx_bytes -
211                               vnic->stats_last_read.fastpath_rx_bytes);
212         devst->rx_errors  += (now.fastpath_rx_errors -
213                               vnic->stats_last_read.fastpath_rx_errors);
214         lnkst->tx_packets += (now.fastpath_tx_pkts -
215                               vnic->stats_last_read.fastpath_tx_pkts);
216         lnkst->tx_bytes   += (now.fastpath_tx_bytes -
217                               vnic->stats_last_read.fastpath_tx_bytes);
218         devst->tx_errors  += (now.fastpath_tx_errors -
219                               vnic->stats_last_read.fastpath_tx_errors);
220         
221         vnic->stats_last_read = now;
222
223         return 0;
224 }
225
226
227 struct netfront_accel_hooks accel_hooks = {
228         .new_device         = &netfront_accel_probe,
229         .remove         = &netfront_accel_remove,
230         .netdev_poll       = &netfront_accel_netdev_poll,
231         .start_xmit         = &netfront_accel_netdev_start_xmit,
232         .start_napi_irq = &netfront_accel_start_napi_interrupts,
233         .stop_napi_irq   = &netfront_accel_stop_napi_interrupts,
234         .check_ready       = &netfront_accel_check_ready,
235         .get_stats           = &netfront_accel_get_stats
236 };
237
238
239 unsigned sfc_netfront_max_pages = NETFRONT_ACCEL_DEFAULT_BUF_PAGES;
240 module_param_named (max_pages, sfc_netfront_max_pages, uint, 0644);
241 MODULE_PARM_DESC(max_pages, "Number of buffer pages to request");
242
243 unsigned sfc_netfront_buffer_split = 2;
244 module_param_named (buffer_split, sfc_netfront_buffer_split, uint, 0644);
245 MODULE_PARM_DESC(buffer_split, 
246                  "Fraction of buffers to use for TX, rest for RX");
247
248
249 const char *frontend_name = "sfc_netfront";
250
251 struct workqueue_struct *netfront_accel_workqueue;
252
253 static int __init netfront_accel_init(void)
254 {
255         int rc;
256 #ifdef EFX_GCOV 
257         gcov_provider_init(THIS_MODULE);
258 #endif
259
260         /*
261          * If we're running on dom0, netfront hasn't initialised
262          * itself, so we need to keep away
263          */
264         if (is_initial_xendomain())
265                 return 0;
266
267         if (!is_pow2(sizeof(struct net_accel_msg)))
268                 EPRINTK("%s: bad structure size\n", __FUNCTION__);
269
270         netfront_accel_workqueue = create_workqueue(frontend_name);
271
272         netfront_accel_debugfs_init();
273
274         rc = netfront_accelerator_loaded(NETFRONT_ACCEL_VERSION,
275                                          frontend_name, &accel_hooks);
276
277         if (rc < 0) {
278                 EPRINTK("Xen netfront accelerator version mismatch\n");
279                 goto fail;
280         }
281
282         if (rc > 0) {
283                 /* 
284                  * In future may want to add backwards compatibility
285                  * and accept certain subsets of previous versions
286                  */
287                 EPRINTK("Xen netfront accelerator version mismatch\n");
288                 goto fail;
289         }
290
291         return 0;
292
293  fail:
294         netfront_accel_debugfs_fini();
295         flush_workqueue(netfront_accel_workqueue);
296         destroy_workqueue(netfront_accel_workqueue);
297 #ifdef EFX_GCOV
298         gcov_provider_fini(THIS_MODULE);
299 #endif
300         return -EINVAL;
301 }
302 module_init(netfront_accel_init);
303
304 static void __exit netfront_accel_exit(void)
305 {
306         if (is_initial_xendomain())
307                 return;
308
309         DPRINTK("%s: unhooking\n", __FUNCTION__);
310
311         /* Unhook from normal netfront */
312         netfront_accelerator_stop(frontend_name);
313
314         DPRINTK("%s: done\n", __FUNCTION__);
315
316         netfront_accel_debugfs_fini();
317
318         flush_workqueue(netfront_accel_workqueue);
319
320         destroy_workqueue(netfront_accel_workqueue);
321
322 #ifdef EFX_GCOV
323         gcov_provider_fini(THIS_MODULE);
324 #endif
325         return;
326 }
327 module_exit(netfront_accel_exit);
328
329 MODULE_LICENSE("GPL");
330