Added patch headers.
[linux-flexiantxendom0-3.2.10.git] / drivers / xen / sfc_netback / accel_solarflare.c
1 /****************************************************************************
2  * Solarflare driver for Xen network acceleration
3  *
4  * Copyright 2006-2008: Solarflare Communications Inc,
5  *                      9501 Jeronimo Road, Suite 250,
6  *                      Irvine, CA 92618, USA
7  *
8  * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License version 2 as published
12  * by the Free Software Foundation, incorporated herein by reference.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
22  ****************************************************************************
23  */
24
25 #include "common.h"
26
27 #include "accel.h"
28 #include "accel_solarflare.h"
29 #include "accel_msg_iface.h"
30 #include "accel_util.h"
31
32 #include "accel_cuckoo_hash.h"
33
34 #include "ci/driver/resource/efx_vi.h"
35
36 #include "ci/efrm/nic_table.h" 
37 #include "ci/efhw/public.h"
38
39 #include <xen/evtchn.h>
40 #include <xen/driver_util.h>
41 #include <linux/list.h>
42 #include <linux/mutex.h>
43
44 #include "driverlink_api.h"
45
46 #define SF_XEN_RX_USR_BUF_SIZE 2048
47
48 struct falcon_bend_accel_priv {
49         struct efx_vi_state *efx_vih;
50
51         /*! Array of pointers to dma_map state, used so VNIC can
52          *  request their removal in a single message
53          */
54         struct efx_vi_dma_map_state **dma_maps;
55         /*! Index into dma_maps */
56         int dma_maps_index; 
57
58         /*! Serialises access to filters */
59         spinlock_t filter_lock;      
60         /*! Bitmap of which filters are free */
61         unsigned long free_filters;      
62         /*! Used for index normalisation */
63         u32 filter_idx_mask;            
64         struct netback_accel_filter_spec *fspecs; 
65         cuckoo_hash_table filter_hash_table;
66
67         u32 txdmaq_gnt;
68         u32 rxdmaq_gnt;
69         u32 doorbell_gnt;
70         u32 evq_rptr_gnt;
71         u32 evq_mem_gnts[EF_HW_FALCON_EVQ_PAGES];
72         u32 evq_npages;
73 };
74
75 /* Forward declaration */
76 static int netback_accel_filter_init(struct netback_accel *);
77 static void netback_accel_filter_shutdown(struct netback_accel *);
78
79 /**************************************************************************
80  * 
81  * Driverlink stuff
82  *
83  **************************************************************************/
84
85 struct driverlink_port {
86         struct list_head link;
87         enum net_accel_hw_type type;
88         struct net_device *net_dev;
89         struct efx_dl_device *efx_dl_dev;
90         void *fwd_priv;
91 };
92
93 static struct list_head dl_ports;
94
95 /* This mutex protects global state, such as the dl_ports list */
96 DEFINE_MUTEX(accel_mutex);
97
98 static int init_done = 0;
99
100 /* The DL callbacks */
101
102
103 #if defined(EFX_USE_FASTCALL)
104 static enum efx_veto fastcall
105 #else
106 static enum efx_veto
107 #endif
108 bend_dl_tx_packet(struct efx_dl_device *efx_dl_dev,
109                   struct sk_buff *skb)
110 {
111         struct driverlink_port *port = efx_dl_dev->priv;
112
113         BUG_ON(port == NULL);
114
115         NETBACK_ACCEL_STATS_OP(global_stats.dl_tx_packets++);
116         if (skb_mac_header_was_set(skb))
117                 netback_accel_tx_packet(skb, port->fwd_priv);
118         else {
119                 DPRINTK("Ignoring packet with missing mac address\n");
120                 NETBACK_ACCEL_STATS_OP(global_stats.dl_tx_bad_packets++);
121         }
122         return EFX_ALLOW_PACKET;
123 }
124
125 /* EFX_USE_FASTCALL */
126 #if defined(EFX_USE_FASTCALL)
127 static enum efx_veto fastcall
128 #else
129 static enum efx_veto
130 #endif
131 bend_dl_rx_packet(struct efx_dl_device *efx_dl_dev,
132                   const char *pkt_buf, int pkt_len)
133 {
134         struct driverlink_port *port = efx_dl_dev->priv;
135         struct netback_pkt_buf pkt;
136         struct ethhdr *eh;
137
138         BUG_ON(port == NULL);
139
140         pkt.mac.raw = (char *)pkt_buf;
141         pkt.nh.raw = (char *)pkt_buf + ETH_HLEN;
142         eh = (struct ethhdr *)pkt_buf;
143         pkt.protocol = eh->h_proto;
144
145         NETBACK_ACCEL_STATS_OP(global_stats.dl_rx_packets++);
146         netback_accel_rx_packet(&pkt, port->fwd_priv);
147         return EFX_ALLOW_PACKET;
148 }
149
150
151 /* Callbacks we'd like to get from the netdriver through driverlink */
152 struct efx_dl_callbacks bend_dl_callbacks =
153         {
154                 .tx_packet = bend_dl_tx_packet,
155                 .rx_packet = bend_dl_rx_packet,
156         };
157
158
159 static struct netback_accel_hooks accel_hooks = {
160         THIS_MODULE,
161         &netback_accel_probe,
162         &netback_accel_remove
163 };
164
165
166 /* Driver link probe - register our callbacks */
167 static int bend_dl_probe(struct efx_dl_device *efx_dl_dev,
168                          const struct net_device *net_dev,
169                          const struct efx_dl_device_info *dev_info,
170                          const char* silicon_rev)
171 {
172         int rc;
173         enum net_accel_hw_type type;
174         struct driverlink_port *port;
175
176         DPRINTK("%s: %s\n", __FUNCTION__, silicon_rev);
177
178         if (strcmp(silicon_rev, "falcon/a1") == 0)
179                 type = NET_ACCEL_MSG_HWTYPE_FALCON_A;
180         else if (strcmp(silicon_rev, "falcon/b0") == 0)
181                 type = NET_ACCEL_MSG_HWTYPE_FALCON_B;
182         else if (strcmp(silicon_rev, "siena/a0") == 0)
183                 type = NET_ACCEL_MSG_HWTYPE_SIENA_A;
184         else {
185                 EPRINTK("%s: unsupported silicon %s\n", __FUNCTION__,
186                         silicon_rev);
187                 rc = -EINVAL;
188                 goto fail1;
189         }
190         
191         port = kmalloc(sizeof(struct driverlink_port), GFP_KERNEL);
192         if (port == NULL) {
193                 EPRINTK("%s: no memory for dl probe\n", __FUNCTION__);
194                 rc = -ENOMEM;
195                 goto fail1;
196         }
197
198         port->efx_dl_dev = efx_dl_dev;
199         efx_dl_dev->priv = port;
200
201         port->fwd_priv = netback_accel_init_fwd_port();
202         if (port->fwd_priv == NULL) {
203                 EPRINTK("%s: failed to set up forwarding for port\n",
204                         __FUNCTION__);
205                 rc = -ENOMEM;
206                 goto fail2;
207         }
208
209         rc = efx_dl_register_callbacks(efx_dl_dev, &bend_dl_callbacks);
210         if (rc != 0) {
211                 EPRINTK("%s: register_callbacks failed\n", __FUNCTION__);
212                 goto fail3;
213         }
214
215         port->type = type;
216         port->net_dev = (struct net_device *)net_dev;
217
218         mutex_lock(&accel_mutex);
219         list_add(&port->link, &dl_ports);
220         mutex_unlock(&accel_mutex);
221
222         rc = netback_connect_accelerator(NETBACK_ACCEL_VERSION, 0,
223                                          port->net_dev->name, &accel_hooks);
224
225         if (rc < 0) {
226                 EPRINTK("Xen netback accelerator version mismatch\n");
227                 goto fail4;
228         } else if (rc > 0) {
229                 /*
230                  * In future may want to add backwards compatibility
231                  * and accept certain subsets of previous versions
232                  */
233                 EPRINTK("Xen netback accelerator version mismatch\n");
234                 goto fail4;
235         } 
236
237         return 0;
238
239  fail4:
240         mutex_lock(&accel_mutex);
241         list_del(&port->link);
242         mutex_unlock(&accel_mutex);
243
244         efx_dl_unregister_callbacks(efx_dl_dev, &bend_dl_callbacks);
245  fail3: 
246         netback_accel_shutdown_fwd_port(port->fwd_priv);
247  fail2:
248         efx_dl_dev->priv = NULL;
249         kfree(port);
250  fail1:
251         return rc;
252 }
253
254
255 static void bend_dl_remove(struct efx_dl_device *efx_dl_dev)
256 {
257         struct driverlink_port *port;
258
259         DPRINTK("Unregistering driverlink callbacks.\n");
260
261         mutex_lock(&accel_mutex);
262
263         port = (struct driverlink_port *)efx_dl_dev->priv;
264
265         BUG_ON(list_empty(&dl_ports));
266         BUG_ON(port == NULL);
267         BUG_ON(port->efx_dl_dev != efx_dl_dev);
268
269         netback_disconnect_accelerator(0, port->net_dev->name);
270
271         list_del(&port->link);
272
273         mutex_unlock(&accel_mutex);
274
275         efx_dl_unregister_callbacks(efx_dl_dev, &bend_dl_callbacks);
276         netback_accel_shutdown_fwd_port(port->fwd_priv);
277
278         efx_dl_dev->priv = NULL;
279         kfree(port);
280
281         return;
282 }
283
284
285 static void bend_dl_reset_suspend(struct efx_dl_device *efx_dl_dev)
286 {
287         struct driverlink_port *port;
288
289         DPRINTK("Driverlink reset suspend.\n");
290
291         mutex_lock(&accel_mutex);
292
293         port = (struct driverlink_port *)efx_dl_dev->priv;
294         BUG_ON(list_empty(&dl_ports));
295         BUG_ON(port == NULL);
296         BUG_ON(port->efx_dl_dev != efx_dl_dev);
297
298         netback_disconnect_accelerator(0, port->net_dev->name);
299         mutex_unlock(&accel_mutex);
300 }
301
302
303 static void bend_dl_reset_resume(struct efx_dl_device *efx_dl_dev, int ok)
304 {
305         int rc;
306         struct driverlink_port *port;
307
308         DPRINTK("Driverlink reset resume.\n");
309         
310         if (!ok)
311                 return;
312
313         port = (struct driverlink_port *)efx_dl_dev->priv;
314         BUG_ON(list_empty(&dl_ports));
315         BUG_ON(port == NULL);
316         BUG_ON(port->efx_dl_dev != efx_dl_dev);
317
318         rc = netback_connect_accelerator(NETBACK_ACCEL_VERSION, 0,
319                                          port->net_dev->name, &accel_hooks);
320         if (rc != 0) {
321                 EPRINTK("Xen netback accelerator version mismatch\n");
322
323                 mutex_lock(&accel_mutex);
324                 list_del(&port->link);
325                 mutex_unlock(&accel_mutex);
326
327                 efx_dl_unregister_callbacks(efx_dl_dev, &bend_dl_callbacks);
328
329                 netback_accel_shutdown_fwd_port(port->fwd_priv);
330
331                 efx_dl_dev->priv = NULL;
332                 kfree(port);
333         }
334 }
335
336
337 static struct efx_dl_driver bend_dl_driver = 
338         {
339                 .name = "SFC Xen backend",
340                 .probe = bend_dl_probe,
341                 .remove = bend_dl_remove,
342                 .reset_suspend = bend_dl_reset_suspend,
343                 .reset_resume = bend_dl_reset_resume
344         };
345
346
347 int netback_accel_sf_init(void)
348 {
349         int rc, nic_i;
350         struct efhw_nic *nic;
351
352         INIT_LIST_HEAD(&dl_ports);
353
354         rc = efx_dl_register_driver(&bend_dl_driver);
355         /* If we couldn't find the NET driver, give up */
356         if (rc == -ENOENT)
357                 return rc;
358         
359         if (rc == 0) {
360                 EFRM_FOR_EACH_NIC(nic_i, nic)
361                         falcon_nic_set_rx_usr_buf_size(nic, 
362                                                        SF_XEN_RX_USR_BUF_SIZE);
363         }
364
365         init_done = (rc == 0);
366         return rc;
367 }
368
369
370 void netback_accel_sf_shutdown(void)
371 {
372         if (!init_done)
373                 return;
374         DPRINTK("Unregistering driverlink driver\n");
375
376         /*
377          * This will trigger removal callbacks for all the devices, which
378          * will unregister their callbacks, disconnect from netfront, etc.
379          */
380         efx_dl_unregister_driver(&bend_dl_driver);
381 }
382
383
384 int netback_accel_sf_hwtype(struct netback_accel *bend)
385 {
386         struct driverlink_port *port;
387
388         mutex_lock(&accel_mutex);
389
390         list_for_each_entry(port, &dl_ports, link) {
391                 if (strcmp(bend->nicname, port->net_dev->name) == 0) {
392                         bend->hw_type = port->type;
393                         bend->accel_setup = netback_accel_setup_vnic_hw;
394                         bend->accel_shutdown = netback_accel_shutdown_vnic_hw;
395                         bend->fwd_priv = port->fwd_priv;
396                         bend->net_dev = port->net_dev;
397                         mutex_unlock(&accel_mutex);
398                         return 0;
399                 }
400         }
401
402         mutex_unlock(&accel_mutex);
403
404         EPRINTK("Failed to identify backend device '%s' with a NIC\n",
405                 bend->nicname);
406
407         return -ENOENT;
408 }
409
410
411 /****************************************************************************
412  * Resource management code
413  ***************************************************************************/
414
415 static int alloc_page_state(struct netback_accel *bend, int max_pages)
416 {
417         struct falcon_bend_accel_priv *accel_hw_priv;
418
419         if (max_pages < 0 || max_pages > bend->quotas.max_buf_pages) {
420                 EPRINTK("%s: invalid max_pages: %d\n", __FUNCTION__, max_pages);
421                 return -EINVAL;
422         }
423
424         accel_hw_priv = kzalloc(sizeof(struct falcon_bend_accel_priv),
425                                 GFP_KERNEL);
426         if (accel_hw_priv == NULL) {
427                 EPRINTK("%s: no memory for accel_hw_priv\n", __FUNCTION__);
428                 return -ENOMEM;
429         }
430
431         accel_hw_priv->dma_maps = kzalloc
432                 (sizeof(struct efx_vi_dma_map_state **) * 
433                  (max_pages / NET_ACCEL_MSG_MAX_PAGE_REQ), GFP_KERNEL);
434         if (accel_hw_priv->dma_maps == NULL) {
435                 EPRINTK("%s: no memory for dma_maps\n", __FUNCTION__);
436                 kfree(accel_hw_priv);
437                 return -ENOMEM;
438         }
439
440         bend->buffer_maps = kzalloc(sizeof(struct vm_struct *) * max_pages, 
441                                     GFP_KERNEL);
442         if (bend->buffer_maps == NULL) {
443                 EPRINTK("%s: no memory for buffer_maps\n", __FUNCTION__);
444                 kfree(accel_hw_priv->dma_maps);
445                 kfree(accel_hw_priv);
446                 return -ENOMEM;
447         }
448
449         bend->buffer_addrs = kzalloc(sizeof(u64) * max_pages, GFP_KERNEL);
450         if (bend->buffer_addrs == NULL) {
451                 kfree(bend->buffer_maps);
452                 kfree(accel_hw_priv->dma_maps);
453                 kfree(accel_hw_priv);
454                 return -ENOMEM;
455         }
456
457         bend->accel_hw_priv = accel_hw_priv;
458
459         return 0;
460 }
461
462
463 static int free_page_state(struct netback_accel *bend)
464 {
465         struct falcon_bend_accel_priv *accel_hw_priv;
466
467         DPRINTK("%s: %p\n", __FUNCTION__, bend);
468
469         accel_hw_priv = bend->accel_hw_priv;
470
471         if (accel_hw_priv) {
472                 kfree(accel_hw_priv->dma_maps);
473                 kfree(bend->buffer_maps);
474                 kfree(bend->buffer_addrs);
475                 kfree(accel_hw_priv);
476                 bend->accel_hw_priv = NULL;
477                 bend->max_pages = 0;
478         }
479
480         return 0;
481 }
482
483
484 /* The timeout event callback for the event q */
485 static void bend_evq_timeout(void *context, int is_timeout)
486 {
487         struct netback_accel *bend = (struct netback_accel *)context;
488         if (is_timeout) {
489                 /* Pass event to vnic front end driver */
490                 VPRINTK("timeout event to %d\n", bend->net_channel);
491                 NETBACK_ACCEL_STATS_OP(bend->stats.evq_timeouts++);
492                 notify_remote_via_irq(bend->net_channel_irq);
493         } else {
494                 /* It's a wakeup event, used by Falcon */
495                 VPRINTK("wakeup to %d\n", bend->net_channel);
496                 NETBACK_ACCEL_STATS_OP(bend->stats.evq_wakeups++);
497                 notify_remote_via_irq(bend->net_channel_irq);
498         }
499 }
500
501
502 /*
503  * Create the eventq and associated gubbins for communication with the
504  * front end vnic driver
505  */
506 static int ef_get_vnic(struct netback_accel *bend)
507 {
508         struct falcon_bend_accel_priv *accel_hw_priv;
509         int rc = 0;
510
511         BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_NONE);
512
513         /* Allocate page related state and accel_hw_priv */
514         rc = alloc_page_state(bend, bend->max_pages);
515         if (rc != 0) {
516                 EPRINTK("Failed to allocate page state: %d\n", rc);
517                 return rc;
518         }
519
520         accel_hw_priv = bend->accel_hw_priv;
521
522         rc = efx_vi_alloc(&accel_hw_priv->efx_vih, bend->net_dev->ifindex);
523         if (rc != 0) {
524                 EPRINTK("%s: efx_vi_alloc failed %d\n", __FUNCTION__, rc);
525                 free_page_state(bend);
526                 return rc;
527         }
528
529         rc = efx_vi_eventq_register_callback(accel_hw_priv->efx_vih,
530                                              bend_evq_timeout,
531                                              bend);
532         if (rc != 0) {
533                 EPRINTK("%s: register_callback failed %d\n", __FUNCTION__, rc);
534                 efx_vi_free(accel_hw_priv->efx_vih);
535                 free_page_state(bend);
536                 return rc;
537         }
538
539         bend->hw_state = NETBACK_ACCEL_RES_ALLOC;
540         
541         return 0;
542 }
543
544
545 static void ef_free_vnic(struct netback_accel *bend)
546 {
547         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
548
549         BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_ALLOC);
550
551         efx_vi_eventq_kill_callback(accel_hw_priv->efx_vih);
552
553         DPRINTK("Hardware is freeable. Will proceed.\n");
554
555         efx_vi_free(accel_hw_priv->efx_vih);
556         accel_hw_priv->efx_vih = NULL;
557
558         VPRINTK("Free page state...\n");
559         free_page_state(bend);
560
561         bend->hw_state = NETBACK_ACCEL_RES_NONE;
562 }
563
564
565 static inline void ungrant_or_crash(grant_ref_t gntref, int domain) {
566         if (net_accel_ungrant_page(gntref) == -EBUSY)
567                 net_accel_shutdown_remote(domain);
568 }
569
570
571 static void netback_accel_release_hwinfo(struct netback_accel *bend)
572 {
573         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
574         int i;
575
576         DPRINTK("Remove dma q grants %d %d\n", accel_hw_priv->txdmaq_gnt,
577                 accel_hw_priv->rxdmaq_gnt);
578         ungrant_or_crash(accel_hw_priv->txdmaq_gnt, bend->far_end);
579         ungrant_or_crash(accel_hw_priv->rxdmaq_gnt, bend->far_end);
580
581         DPRINTK("Remove doorbell grant %d\n", accel_hw_priv->doorbell_gnt);
582         ungrant_or_crash(accel_hw_priv->doorbell_gnt, bend->far_end);
583
584         if (bend->hw_type == NET_ACCEL_MSG_HWTYPE_FALCON_A) {
585                 DPRINTK("Remove rptr grant %d\n", accel_hw_priv->evq_rptr_gnt);
586                 ungrant_or_crash(accel_hw_priv->evq_rptr_gnt, bend->far_end);
587         }
588
589         for (i = 0; i < accel_hw_priv->evq_npages; i++) {
590                 DPRINTK("Remove evq grant %d\n", accel_hw_priv->evq_mem_gnts[i]);
591                 ungrant_or_crash(accel_hw_priv->evq_mem_gnts[i], bend->far_end);
592         }
593
594         bend->hw_state = NETBACK_ACCEL_RES_FILTER;
595
596         return;
597 }
598
599
600 static int ef_bend_hwinfo_falcon_common(struct netback_accel *bend, 
601                                         struct net_accel_hw_falcon_b *hwinfo)
602 {
603         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
604         struct efx_vi_hw_resource_metadata res_mdata;
605         struct efx_vi_hw_resource res_array[EFX_VI_HW_RESOURCE_MAXSIZE];
606         int rc, len = EFX_VI_HW_RESOURCE_MAXSIZE, i, pfn = 0;
607         unsigned long txdmaq_pfn = 0, rxdmaq_pfn = 0;
608
609         rc = efx_vi_hw_resource_get_phys(accel_hw_priv->efx_vih, &res_mdata,
610                                          res_array, &len);
611         if (rc != 0) {
612                 DPRINTK("%s: resource_get_phys returned %d\n",
613                         __FUNCTION__, rc);
614                 return rc;
615         }
616
617         hwinfo->nic_arch = res_mdata.nic_arch;
618         hwinfo->nic_variant = res_mdata.nic_variant;
619         hwinfo->nic_revision = res_mdata.nic_revision;
620
621         hwinfo->evq_order = res_mdata.evq_order;
622         hwinfo->evq_offs = res_mdata.evq_offs;
623         hwinfo->evq_capacity = res_mdata.evq_capacity;
624         hwinfo->instance = res_mdata.instance;
625         hwinfo->rx_capacity = res_mdata.rx_capacity;
626         hwinfo->tx_capacity = res_mdata.tx_capacity;
627
628         VPRINTK("evq_order %d evq_offs %d evq_cap %d inst %d rx_cap %d tx_cap %d\n",
629                 hwinfo->evq_order, hwinfo->evq_offs, hwinfo->evq_capacity,
630                 hwinfo->instance, hwinfo->rx_capacity, hwinfo->tx_capacity);
631
632         for (i = 0; i < len; i++) {
633                 struct efx_vi_hw_resource *res = &(res_array[i]);
634                 switch (res->type) {
635                 case EFX_VI_HW_RESOURCE_TXDMAQ:
636                         txdmaq_pfn = page_to_pfn(virt_to_page(res->address));
637                         break;
638                 case EFX_VI_HW_RESOURCE_RXDMAQ: 
639                         rxdmaq_pfn = page_to_pfn(virt_to_page(res->address));
640                         break;
641                 case EFX_VI_HW_RESOURCE_EVQTIMER:
642                         break;
643                 case EFX_VI_HW_RESOURCE_EVQRPTR:
644                 case EFX_VI_HW_RESOURCE_EVQRPTR_OFFSET:
645                         hwinfo->evq_rptr = res->address;
646                         break;
647                 case EFX_VI_HW_RESOURCE_EVQMEMKVA: 
648                         accel_hw_priv->evq_npages =  1 << res_mdata.evq_order;
649                         pfn = page_to_pfn(virt_to_page(res->address));
650                         break;
651                 case EFX_VI_HW_RESOURCE_BELLPAGE:
652                         hwinfo->doorbell_mfn  = res->address;
653                         break;
654                 default:
655                         EPRINTK("%s: Unknown hardware resource type %d\n",
656                                 __FUNCTION__, res->type);
657                         break;
658                 }
659         }
660
661         VPRINTK("Passing txdmaq page pfn %lx\n", txdmaq_pfn);
662         rc = net_accel_grant_page(bend->hdev_data, pfn_to_mfn(txdmaq_pfn), 0);
663         if (rc < 0)
664                 goto fail0;
665         accel_hw_priv->txdmaq_gnt = hwinfo->txdmaq_gnt = rc;
666
667         VPRINTK("Passing rxdmaq page pfn %lx\n", rxdmaq_pfn);
668         rc = net_accel_grant_page(bend->hdev_data, pfn_to_mfn(rxdmaq_pfn), 0);
669         if (rc < 0)
670                 goto fail1;
671         accel_hw_priv->rxdmaq_gnt = hwinfo->rxdmaq_gnt = rc;
672
673         VPRINTK("Passing doorbell page mfn %x\n", hwinfo->doorbell_mfn);
674         /* Make the relevant H/W pages mappable by the far end */
675         rc = net_accel_grant_page(bend->hdev_data, hwinfo->doorbell_mfn, 1);
676         if (rc < 0)
677                 goto fail2;
678         accel_hw_priv->doorbell_gnt = hwinfo->doorbell_gnt = rc;
679         
680         /* Now do the same for the memory pages */
681         /* Convert the page + length we got back for the evq to grants. */
682         for (i = 0; i < accel_hw_priv->evq_npages; i++) {
683                 rc = net_accel_grant_page(bend->hdev_data, pfn_to_mfn(pfn), 0);
684                 if (rc < 0)
685                         goto fail3;
686                 accel_hw_priv->evq_mem_gnts[i] = hwinfo->evq_mem_gnts[i] = rc;
687
688                 VPRINTK("Got grant %u for evq pfn %x\n", hwinfo->evq_mem_gnts[i], 
689                         pfn);
690                 pfn++;
691         }
692
693         return 0;
694
695  fail3:
696         for (i = i - 1; i >= 0; i--) {
697                 ungrant_or_crash(accel_hw_priv->evq_mem_gnts[i], bend->far_end);
698         }
699         ungrant_or_crash(accel_hw_priv->doorbell_gnt, bend->far_end);
700  fail2:
701         ungrant_or_crash(accel_hw_priv->rxdmaq_gnt, bend->far_end);
702  fail1:
703         ungrant_or_crash(accel_hw_priv->txdmaq_gnt, bend->far_end);     
704  fail0:
705         return rc;
706 }
707
708
709 static int ef_bend_hwinfo_falcon_a(struct netback_accel *bend, 
710                                    struct net_accel_hw_falcon_a *hwinfo)
711 {
712         int rc, i;
713         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
714
715         if ((rc = ef_bend_hwinfo_falcon_common(bend, &hwinfo->common)) != 0)
716                 return rc;
717
718         /*
719          * Note that unlike the above, where the message field is the
720          * page number, here evq_rptr is the entire address because
721          * it is currently a pointer into the densely mapped timer page.
722          */
723         VPRINTK("Passing evq_rptr pfn %x for rptr %x\n", 
724                 hwinfo->common.evq_rptr >> PAGE_SHIFT,
725                 hwinfo->common.evq_rptr);
726         rc = net_accel_grant_page(bend->hdev_data, 
727                                   hwinfo->common.evq_rptr >> PAGE_SHIFT, 0);
728         if (rc < 0) {
729                 /* Undo ef_bend_hwinfo_falcon_common() */
730                 ungrant_or_crash(accel_hw_priv->txdmaq_gnt, bend->far_end);
731                 ungrant_or_crash(accel_hw_priv->rxdmaq_gnt, bend->far_end);
732                 ungrant_or_crash(accel_hw_priv->doorbell_gnt, bend->far_end);
733                 for (i = 0; i < accel_hw_priv->evq_npages; i++) {
734                         ungrant_or_crash(accel_hw_priv->evq_mem_gnts[i],
735                                          bend->far_end);
736                 }
737                 return rc;
738         }
739
740         accel_hw_priv->evq_rptr_gnt = hwinfo->evq_rptr_gnt = rc;
741         VPRINTK("evq_rptr_gnt got %d\n", hwinfo->evq_rptr_gnt);
742         
743         return 0;
744 }
745
746
747 static int ef_bend_hwinfo_falcon_b(struct netback_accel *bend, 
748                                    struct net_accel_hw_falcon_b *hwinfo)
749 {
750         return ef_bend_hwinfo_falcon_common(bend, hwinfo);
751 }
752
753
754 /*
755  * Fill in the message with a description of the hardware resources, based on
756  * the H/W type
757  */
758 static int netback_accel_hwinfo(struct netback_accel *bend, 
759                                 struct net_accel_msg_hw *msgvi)
760 {
761         int rc = 0;
762         
763         BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_FILTER);
764
765         msgvi->type = bend->hw_type;
766         switch (bend->hw_type) {
767         case NET_ACCEL_MSG_HWTYPE_FALCON_A:
768                 rc = ef_bend_hwinfo_falcon_a(bend, &msgvi->resources.falcon_a);
769                 break;
770         case NET_ACCEL_MSG_HWTYPE_FALCON_B:
771         case NET_ACCEL_MSG_HWTYPE_SIENA_A:
772                 rc = ef_bend_hwinfo_falcon_b(bend, &msgvi->resources.falcon_b);
773                 break;
774         case NET_ACCEL_MSG_HWTYPE_NONE:
775                 /* Nothing to do. The slow path should just work. */
776                 break;
777         }
778
779         if (rc == 0)
780                 bend->hw_state = NETBACK_ACCEL_RES_HWINFO;
781                 
782         return rc;
783 }
784
785
786 /* Allocate hardware resources and make them available to the client domain */
787 int netback_accel_setup_vnic_hw(struct netback_accel *bend)
788 {
789         struct net_accel_msg msg;
790         int err;
791
792         /* Allocate the event queue, VI and so on. */
793         err = ef_get_vnic(bend);
794         if (err) {
795                 EPRINTK("Failed to allocate hardware resource for bend:"
796                         "error %d\n", err);
797                 return err;
798         }
799
800         /* Set up the filter management */
801         err = netback_accel_filter_init(bend);
802         if (err) {
803                 EPRINTK("Filter setup failed, error %d", err);
804                 ef_free_vnic(bend);
805                 return err;
806         }
807
808         net_accel_msg_init(&msg, NET_ACCEL_MSG_SETHW);
809
810         /*
811          * Extract the low-level hardware info we will actually pass to the
812          * other end, and set up the grants/ioremap permissions needed
813          */
814         err = netback_accel_hwinfo(bend, &msg.u.hw);
815
816         if (err != 0) {
817                 netback_accel_filter_shutdown(bend);
818                 ef_free_vnic(bend);
819                 return err;
820         }
821
822         /* Send the message, this is a reply to a hello-reply */
823         err = net_accel_msg_reply_notify(bend->shared_page, 
824                                          bend->msg_channel_irq, 
825                                          &bend->to_domU, &msg);
826
827         /*
828          * The message should succeed as it's logically a reply and we
829          * guarantee space for replies, but a misbehaving frontend
830          * could result in that behaviour, so be tolerant
831          */
832         if (err != 0) {
833                 netback_accel_release_hwinfo(bend);
834                 netback_accel_filter_shutdown(bend);
835                 ef_free_vnic(bend);
836         }
837
838         return err;
839 }
840
841
842 /* Free hardware resources  */
843 void netback_accel_shutdown_vnic_hw(struct netback_accel *bend)
844 {
845         /*
846          * Only try and release resources if accel_hw_priv was setup,
847          * otherwise there is nothing to do as we're on "null-op"
848          * acceleration
849          */
850         switch (bend->hw_state) {
851         case NETBACK_ACCEL_RES_HWINFO:
852                 VPRINTK("Release hardware resources\n");
853                 netback_accel_release_hwinfo(bend);
854                 /* deliberate drop through */
855         case NETBACK_ACCEL_RES_FILTER:          
856                 VPRINTK("Free filters...\n");
857                 netback_accel_filter_shutdown(bend);
858                 /* deliberate drop through */
859         case NETBACK_ACCEL_RES_ALLOC:
860                 VPRINTK("Free vnic...\n");
861                 ef_free_vnic(bend);
862                 /* deliberate drop through */
863         case NETBACK_ACCEL_RES_NONE:
864                 break;
865         default:
866                 BUG();
867         }
868 }
869
870 /**************************************************************************
871  * 
872  * Buffer table stuff
873  *
874  **************************************************************************/
875
876 /*
877  * Undo any allocation that netback_accel_msg_rx_buffer_map() has made
878  * if it fails half way through
879  */
880 static inline void buffer_map_cleanup(struct netback_accel *bend, int i)
881 {
882         while (i > 0) {
883                 i--;
884                 bend->buffer_maps_index--;
885                 net_accel_unmap_device_page(bend->hdev_data, 
886                                             bend->buffer_maps[bend->buffer_maps_index],
887                                             bend->buffer_addrs[bend->buffer_maps_index]);
888         }
889 }
890
891
892 int netback_accel_add_buffers(struct netback_accel *bend, int pages, int log2_pages,
893                               u32 *grants, u32 *buf_addr_out)
894 {
895         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
896         unsigned long long addr_array[NET_ACCEL_MSG_MAX_PAGE_REQ];
897         int rc, i, index;
898         u64 dev_bus_addr;
899
900         /* Make sure we can't overflow the dma_maps array */
901         if (accel_hw_priv->dma_maps_index >= 
902             bend->max_pages / NET_ACCEL_MSG_MAX_PAGE_REQ) {
903                 EPRINTK("%s: too many buffer table allocations: %d %d\n",
904                         __FUNCTION__, accel_hw_priv->dma_maps_index, 
905                         bend->max_pages / NET_ACCEL_MSG_MAX_PAGE_REQ);
906                 return -EINVAL;
907         }
908
909         /* Make sure we can't overflow the buffer_maps array */
910         if (bend->buffer_maps_index + pages > bend->max_pages) {
911                 EPRINTK("%s: too many pages mapped: %d + %d > %d\n", 
912                         __FUNCTION__, bend->buffer_maps_index,
913                         pages, bend->max_pages);
914                 return -EINVAL;
915         }
916
917         for (i = 0; i < pages; i++) {
918                 VPRINTK("%s: mapping page %d\n", __FUNCTION__, i);
919                 rc = net_accel_map_device_page
920                         (bend->hdev_data, grants[i],
921                          &bend->buffer_maps[bend->buffer_maps_index],
922                          &dev_bus_addr);
923     
924                 if (rc != 0) {
925                         EPRINTK("error in net_accel_map_device_page\n");
926                         buffer_map_cleanup(bend, i);
927                         return rc;
928                 }
929                 
930                 bend->buffer_addrs[bend->buffer_maps_index] = dev_bus_addr;
931
932                 bend->buffer_maps_index++;
933
934                 addr_array[i] = dev_bus_addr;
935         }
936
937         VPRINTK("%s: mapping dma addresses to vih %p\n", __FUNCTION__, 
938                 accel_hw_priv->efx_vih);
939
940         index = accel_hw_priv->dma_maps_index;
941         if ((rc = efx_vi_dma_map_addrs(accel_hw_priv->efx_vih, addr_array, pages,
942                                        &(accel_hw_priv->dma_maps[index]))) < 0) {
943                 EPRINTK("error in dma_map_pages\n");
944                 buffer_map_cleanup(bend, i);
945                 return rc;
946         }
947
948         accel_hw_priv->dma_maps_index++;
949         NETBACK_ACCEL_STATS_OP(bend->stats.num_buffer_pages += pages);
950
951         //DPRINTK("%s: getting map address\n", __FUNCTION__);
952
953         *buf_addr_out = efx_vi_dma_get_map_addr(accel_hw_priv->efx_vih, 
954                                                 accel_hw_priv->dma_maps[index]);
955
956         //DPRINTK("%s: done\n", __FUNCTION__);
957
958         return 0;
959 }
960
961
962 int netback_accel_remove_buffers(struct netback_accel *bend)
963 {
964         /* Only try to free buffers if accel_hw_priv was setup */
965         if (bend->hw_state != NETBACK_ACCEL_RES_NONE) {
966                 struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
967                 int i;
968
969                 efx_vi_reset(accel_hw_priv->efx_vih);
970
971                 while (accel_hw_priv->dma_maps_index > 0) {
972                         accel_hw_priv->dma_maps_index--;
973                         i = accel_hw_priv->dma_maps_index;
974                         efx_vi_dma_unmap_addrs(accel_hw_priv->efx_vih, 
975                                                accel_hw_priv->dma_maps[i]);
976                 }
977                 
978                 while (bend->buffer_maps_index > 0) {
979                         VPRINTK("Unmapping granted buffer %d\n", 
980                                 bend->buffer_maps_index);
981                         bend->buffer_maps_index--;
982                         i = bend->buffer_maps_index;
983                         net_accel_unmap_device_page(bend->hdev_data, 
984                                                     bend->buffer_maps[i],
985                                                     bend->buffer_addrs[i]);
986                 }
987
988                 NETBACK_ACCEL_STATS_OP(bend->stats.num_buffer_pages = 0);
989         }
990
991         return 0;
992 }
993
994 /**************************************************************************
995  * 
996  * Filter stuff
997  *
998  **************************************************************************/
999
1000 static int netback_accel_filter_init(struct netback_accel *bend)
1001 {
1002         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
1003         int i, rc;
1004
1005         BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_ALLOC);
1006
1007         spin_lock_init(&accel_hw_priv->filter_lock);
1008
1009         if ((rc = cuckoo_hash_init(&accel_hw_priv->filter_hash_table, 
1010                                    5 /* space for 32 filters */, 8)) != 0) {
1011                 EPRINTK("Failed to initialise filter hash table\n");
1012                 return rc;
1013         }
1014
1015         accel_hw_priv->fspecs = kzalloc(sizeof(struct netback_accel_filter_spec) *
1016                                         bend->quotas.max_filters,
1017                                         GFP_KERNEL);
1018
1019         if (accel_hw_priv->fspecs == NULL) {
1020                 EPRINTK("No memory for filter specs.\n");
1021                 cuckoo_hash_destroy(&accel_hw_priv->filter_hash_table);
1022                 return -ENOMEM;
1023         }
1024
1025         for (i = 0; i < bend->quotas.max_filters; i++) {
1026                 accel_hw_priv->free_filters |= (1 << i);
1027         }
1028
1029         /* Base mask on highest set bit in max_filters  */
1030         accel_hw_priv->filter_idx_mask = (1 << fls(bend->quotas.max_filters)) - 1;
1031         VPRINTK("filter setup: max is %x mask is %x\n",
1032                 bend->quotas.max_filters, accel_hw_priv->filter_idx_mask);
1033
1034         bend->hw_state = NETBACK_ACCEL_RES_FILTER;
1035
1036         return 0;
1037 }
1038
1039
1040 static inline void make_filter_key(cuckoo_hash_ip_key *key,  
1041                                    struct netback_accel_filter_spec *filt)
1042
1043 {
1044         key->local_ip = filt->destip_be;
1045         key->local_port = filt->destport_be;
1046         key->proto = filt->proto;
1047 }
1048
1049
1050 static inline 
1051 void netback_accel_free_filter(struct falcon_bend_accel_priv *accel_hw_priv,
1052                                int filter)
1053 {
1054         cuckoo_hash_ip_key filter_key;
1055
1056         if (!(accel_hw_priv->free_filters & (1 << filter))) {
1057                 efx_vi_filter_stop(accel_hw_priv->efx_vih, 
1058                                    accel_hw_priv->fspecs[filter].filter_handle);
1059                 make_filter_key(&filter_key, &(accel_hw_priv->fspecs[filter]));
1060                 if (cuckoo_hash_remove(&accel_hw_priv->filter_hash_table,
1061                                        (cuckoo_hash_key *)&filter_key)) {
1062                         EPRINTK("%s: Couldn't find filter to remove from table\n",
1063                                 __FUNCTION__);
1064                         BUG();
1065                 }
1066         }
1067 }
1068
1069
1070 static void netback_accel_filter_shutdown(struct netback_accel *bend)
1071 {
1072         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
1073         int i;
1074         unsigned long flags;
1075
1076         BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_FILTER);
1077
1078         spin_lock_irqsave(&accel_hw_priv->filter_lock, flags);
1079
1080         BUG_ON(accel_hw_priv->fspecs == NULL);
1081
1082         for (i = 0; i < bend->quotas.max_filters; i++) {
1083                 netback_accel_free_filter(accel_hw_priv, i);
1084         }
1085         
1086         kfree(accel_hw_priv->fspecs);
1087         accel_hw_priv->fspecs = NULL;
1088         accel_hw_priv->free_filters = 0;
1089         
1090         cuckoo_hash_destroy(&accel_hw_priv->filter_hash_table);
1091
1092         spin_unlock_irqrestore(&accel_hw_priv->filter_lock, flags);
1093
1094         bend->hw_state = NETBACK_ACCEL_RES_ALLOC;
1095 }
1096
1097
1098 /*! Suggest a filter to replace when we want to insert a new one and have
1099  *  none free.
1100  */
1101 static unsigned get_victim_filter(struct netback_accel *bend)
1102 {
1103         /*
1104          * We could attempt to get really clever, and may do at some
1105          * point, but random replacement is v. cheap and low on
1106          * pathological worst cases.
1107          */
1108         unsigned index, cycles;
1109
1110         rdtscl(cycles);
1111
1112         /*
1113          * Some doubt about the quality of the bottom few bits, so
1114          * throw 'em * away
1115          */
1116         index = (cycles >> 4) & ((struct falcon_bend_accel_priv *)
1117                                  bend->accel_hw_priv)->filter_idx_mask;
1118         /*
1119          * We don't enforce that the number of filters is a power of
1120          * two, but the masking gets us to within one subtraction of a
1121          * valid index
1122          */
1123         if (index >= bend->quotas.max_filters)
1124                 index -= bend->quotas.max_filters;
1125         DPRINTK("backend %s->%d has no free filters. Filter %d will be evicted\n",
1126                 bend->nicname, bend->far_end, index);
1127         return index;
1128 }
1129
1130
1131 /* Add a filter for the specified IP/port to the backend */
1132 int 
1133 netback_accel_filter_check_add(struct netback_accel *bend, 
1134                                struct netback_accel_filter_spec *filt)
1135 {
1136         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
1137         struct netback_accel_filter_spec *fs;
1138         unsigned filter_index;
1139         unsigned long flags;
1140         int rc, recycling = 0;
1141         cuckoo_hash_ip_key filter_key, evict_key;
1142
1143         BUG_ON(filt->proto != IPPROTO_TCP && filt->proto != IPPROTO_UDP);
1144
1145         DPRINTK("Will add %s filter for dst ip %08x and dst port %d\n", 
1146                 (filt->proto == IPPROTO_TCP) ? "TCP" : "UDP",
1147                 be32_to_cpu(filt->destip_be), be16_to_cpu(filt->destport_be));
1148
1149         spin_lock_irqsave(&accel_hw_priv->filter_lock, flags);
1150         /*
1151          * Check to see if we're already filtering this IP address and
1152          * port. Happens if you insert a filter mid-stream as there
1153          * are many packets backed up to be delivered to dom0 already
1154          */
1155         make_filter_key(&filter_key, filt);
1156         if (cuckoo_hash_lookup(&accel_hw_priv->filter_hash_table, 
1157                                (cuckoo_hash_key *)(&filter_key), 
1158                                &filter_index)) {
1159                 DPRINTK("Found matching filter %d already in table\n", 
1160                         filter_index);
1161                 rc = -1;
1162                 goto out;
1163         }
1164
1165         if (accel_hw_priv->free_filters == 0) {
1166                 filter_index = get_victim_filter(bend);
1167                 recycling = 1;
1168         } else {
1169                 filter_index = __ffs(accel_hw_priv->free_filters);
1170                 clear_bit(filter_index, &accel_hw_priv->free_filters);
1171         }
1172
1173         fs = &accel_hw_priv->fspecs[filter_index];
1174
1175         if (recycling) {
1176                 DPRINTK("Removing filter index %d handle %p\n", filter_index,
1177                         fs->filter_handle);
1178
1179                 if ((rc = efx_vi_filter_stop(accel_hw_priv->efx_vih, 
1180                                              fs->filter_handle)) != 0) {
1181                         EPRINTK("Couldn't clear NIC filter table entry %d\n", rc);
1182                 }
1183
1184                 make_filter_key(&evict_key, fs);
1185                 if (cuckoo_hash_remove(&accel_hw_priv->filter_hash_table,
1186                                        (cuckoo_hash_key *)&evict_key)) {
1187                         EPRINTK("Couldn't find filter to remove from table\n");
1188                         BUG();
1189                 }
1190                 NETBACK_ACCEL_STATS_OP(bend->stats.num_filters--);
1191         }
1192
1193         /* Update the filter spec with new details */
1194         *fs = *filt;
1195
1196         if ((rc = cuckoo_hash_add(&accel_hw_priv->filter_hash_table, 
1197                                   (cuckoo_hash_key *)&filter_key, filter_index,
1198                                   1)) != 0) {
1199                 EPRINTK("Error (%d) adding filter to table\n", rc);
1200                 accel_hw_priv->free_filters |= (1 << filter_index);
1201                 goto out;
1202         }
1203
1204         rc = efx_vi_filter(accel_hw_priv->efx_vih, filt->proto, filt->destip_be,
1205                            filt->destport_be, 
1206                            (struct filter_resource_t **)&fs->filter_handle);
1207
1208         if (rc != 0) {
1209                 EPRINTK("Hardware filter insertion failed. Error %d\n", rc);
1210                 accel_hw_priv->free_filters |= (1 << filter_index);
1211                 cuckoo_hash_remove(&accel_hw_priv->filter_hash_table, 
1212                                    (cuckoo_hash_key *)&filter_key);
1213                 rc = -1;
1214                 goto out;
1215         }
1216
1217         NETBACK_ACCEL_STATS_OP(bend->stats.num_filters++);
1218
1219         VPRINTK("%s: success index %d handle %p\n", __FUNCTION__, filter_index, 
1220                 fs->filter_handle);
1221
1222         rc = filter_index;
1223  out:
1224         spin_unlock_irqrestore(&accel_hw_priv->filter_lock, flags);
1225         return rc;
1226 }
1227
1228
1229 /* Remove a filter entry for the specific device and IP/port */
1230 static void netback_accel_filter_remove(struct netback_accel *bend, 
1231                                         int filter_index)
1232 {
1233         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
1234
1235         BUG_ON(accel_hw_priv->free_filters & (1 << filter_index));
1236         netback_accel_free_filter(accel_hw_priv, filter_index);
1237         accel_hw_priv->free_filters |= (1 << filter_index);
1238 }
1239
1240
1241 /* Remove a filter entry for the specific device and IP/port */
1242 void netback_accel_filter_remove_spec(struct netback_accel *bend, 
1243                                       struct netback_accel_filter_spec *filt)
1244 {
1245         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
1246         unsigned filter_found;
1247         unsigned long flags;
1248         cuckoo_hash_ip_key filter_key;
1249         struct netback_accel_filter_spec *fs;
1250
1251         if (filt->proto == IPPROTO_TCP) {
1252                 DPRINTK("Remove TCP filter for dst ip %08x and dst port %d\n",
1253                         be32_to_cpu(filt->destip_be),
1254                         be16_to_cpu(filt->destport_be));
1255         } else if (filt->proto == IPPROTO_UDP) {
1256                 DPRINTK("Remove UDP filter for dst ip %08x and dst port %d\n",
1257                         be32_to_cpu(filt->destip_be),
1258                         be16_to_cpu(filt->destport_be));
1259         } else {
1260                 /*
1261                  * This could be provoked by an evil frontend, so can't
1262                  * BUG(), but harmless as it should fail tests below 
1263                  */
1264                 DPRINTK("Non-TCP/UDP filter dst ip %08x and dst port %d\n",
1265                         be32_to_cpu(filt->destip_be),
1266                         be16_to_cpu(filt->destport_be));
1267         }
1268
1269         spin_lock_irqsave(&accel_hw_priv->filter_lock, flags);
1270
1271         make_filter_key(&filter_key, filt);
1272         if (!cuckoo_hash_lookup(&accel_hw_priv->filter_hash_table, 
1273                                (cuckoo_hash_key *)(&filter_key), 
1274                                &filter_found)) {
1275                 EPRINTK("Couldn't find matching filter already in table\n");
1276                 goto out;
1277         }
1278         
1279         /* Do a full check to make sure we've not had a hash collision */
1280         fs = &accel_hw_priv->fspecs[filter_found];
1281         if (fs->destip_be == filt->destip_be &&
1282             fs->destport_be == filt->destport_be &&
1283             fs->proto == filt->proto &&
1284             !memcmp(fs->mac, filt->mac, ETH_ALEN)) {
1285                 netback_accel_filter_remove(bend, filter_found);
1286         } else {
1287                 EPRINTK("Entry in hash table does not match filter spec\n");
1288                 goto out;
1289         }
1290
1291  out:
1292         spin_unlock_irqrestore(&accel_hw_priv->filter_lock, flags);
1293 }