- Update Xen patches to 3.3-rc5 and c/s 1157.
[linux-flexiantxendom0-3.2.10.git] / drivers / xen / sfc_netback / accel_solarflare.c
1 /****************************************************************************
2  * Solarflare driver for Xen network acceleration
3  *
4  * Copyright 2006-2008: Solarflare Communications Inc,
5  *                      9501 Jeronimo Road, Suite 250,
6  *                      Irvine, CA 92618, USA
7  *
8  * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License version 2 as published
12  * by the Free Software Foundation, incorporated herein by reference.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
22  ****************************************************************************
23  */
24
25 #include "common.h"
26
27 #include "accel.h"
28 #include "accel_solarflare.h"
29 #include "accel_msg_iface.h"
30 #include "accel_util.h"
31
32 #include "accel_cuckoo_hash.h"
33
34 #include "ci/driver/resource/efx_vi.h"
35
36 #include "ci/efrm/nic_table.h" 
37 #include "ci/efhw/public.h"
38
39 #include <xen/evtchn.h>
40 #include <linux/list.h>
41 #include <linux/mutex.h>
42
43 #include "driverlink_api.h"
44
45 #define SF_XEN_RX_USR_BUF_SIZE 2048
46
47 struct falcon_bend_accel_priv {
48         struct efx_vi_state *efx_vih;
49
50         /*! Array of pointers to dma_map state, used so VNIC can
51          *  request their removal in a single message
52          */
53         struct efx_vi_dma_map_state **dma_maps;
54         /*! Index into dma_maps */
55         int dma_maps_index; 
56
57         /*! Serialises access to filters */
58         spinlock_t filter_lock;      
59         /*! Bitmap of which filters are free */
60         unsigned long free_filters;      
61         /*! Used for index normalisation */
62         u32 filter_idx_mask;            
63         struct netback_accel_filter_spec *fspecs; 
64         cuckoo_hash_table filter_hash_table;
65
66         u32 txdmaq_gnt;
67         u32 rxdmaq_gnt;
68         u32 doorbell_gnt;
69         u32 evq_rptr_gnt;
70         u32 evq_mem_gnts[EF_HW_FALCON_EVQ_PAGES];
71         u32 evq_npages;
72 };
73
74 /* Forward declaration */
75 static int netback_accel_filter_init(struct netback_accel *);
76 static void netback_accel_filter_shutdown(struct netback_accel *);
77
78 /**************************************************************************
79  * 
80  * Driverlink stuff
81  *
82  **************************************************************************/
83
84 struct driverlink_port {
85         struct list_head link;
86         enum net_accel_hw_type type;
87         struct net_device *net_dev;
88         struct efx_dl_device *efx_dl_dev;
89         void *fwd_priv;
90 };
91
92 static struct list_head dl_ports;
93
94 /* This mutex protects global state, such as the dl_ports list */
95 DEFINE_MUTEX(accel_mutex);
96
97 static int init_done = 0;
98
99 /* The DL callbacks */
100
101
102 #if defined(EFX_USE_FASTCALL)
103 static enum efx_veto fastcall
104 #else
105 static enum efx_veto
106 #endif
107 bend_dl_tx_packet(struct efx_dl_device *efx_dl_dev,
108                   struct sk_buff *skb)
109 {
110         struct driverlink_port *port = efx_dl_dev->priv;
111
112         BUG_ON(port == NULL);
113
114         NETBACK_ACCEL_STATS_OP(global_stats.dl_tx_packets++);
115         if (skb_mac_header_was_set(skb))
116                 netback_accel_tx_packet(skb, port->fwd_priv);
117         else {
118                 DPRINTK("Ignoring packet with missing mac address\n");
119                 NETBACK_ACCEL_STATS_OP(global_stats.dl_tx_bad_packets++);
120         }
121         return EFX_ALLOW_PACKET;
122 }
123
124 /* EFX_USE_FASTCALL */
125 #if defined(EFX_USE_FASTCALL)
126 static enum efx_veto fastcall
127 #else
128 static enum efx_veto
129 #endif
130 bend_dl_rx_packet(struct efx_dl_device *efx_dl_dev,
131                   const char *pkt_buf, int pkt_len)
132 {
133         struct driverlink_port *port = efx_dl_dev->priv;
134         struct netback_pkt_buf pkt;
135         struct ethhdr *eh;
136
137         BUG_ON(port == NULL);
138
139         pkt.mac.raw = (char *)pkt_buf;
140         pkt.nh.raw = (char *)pkt_buf + ETH_HLEN;
141         eh = (struct ethhdr *)pkt_buf;
142         pkt.protocol = eh->h_proto;
143
144         NETBACK_ACCEL_STATS_OP(global_stats.dl_rx_packets++);
145         netback_accel_rx_packet(&pkt, port->fwd_priv);
146         return EFX_ALLOW_PACKET;
147 }
148
149
150 /* Callbacks we'd like to get from the netdriver through driverlink */
151 struct efx_dl_callbacks bend_dl_callbacks =
152         {
153                 .tx_packet = bend_dl_tx_packet,
154                 .rx_packet = bend_dl_rx_packet,
155         };
156
157
158 static struct netback_accel_hooks accel_hooks = {
159         THIS_MODULE,
160         &netback_accel_probe,
161         &netback_accel_remove
162 };
163
164
165 /* Driver link probe - register our callbacks */
166 static int bend_dl_probe(struct efx_dl_device *efx_dl_dev,
167                          const struct net_device *net_dev,
168                          const struct efx_dl_device_info *dev_info,
169                          const char* silicon_rev)
170 {
171         int rc;
172         enum net_accel_hw_type type;
173         struct driverlink_port *port;
174
175         DPRINTK("%s: %s\n", __FUNCTION__, silicon_rev);
176
177         if (strcmp(silicon_rev, "falcon/a1") == 0)
178                 type = NET_ACCEL_MSG_HWTYPE_FALCON_A;
179         else if (strcmp(silicon_rev, "falcon/b0") == 0)
180                 type = NET_ACCEL_MSG_HWTYPE_FALCON_B;
181         else if (strcmp(silicon_rev, "siena/a0") == 0)
182                 type = NET_ACCEL_MSG_HWTYPE_SIENA_A;
183         else {
184                 EPRINTK("%s: unsupported silicon %s\n", __FUNCTION__,
185                         silicon_rev);
186                 rc = -EINVAL;
187                 goto fail1;
188         }
189         
190         port = kmalloc(sizeof(struct driverlink_port), GFP_KERNEL);
191         if (port == NULL) {
192                 EPRINTK("%s: no memory for dl probe\n", __FUNCTION__);
193                 rc = -ENOMEM;
194                 goto fail1;
195         }
196
197         port->efx_dl_dev = efx_dl_dev;
198         efx_dl_dev->priv = port;
199
200         port->fwd_priv = netback_accel_init_fwd_port();
201         if (port->fwd_priv == NULL) {
202                 EPRINTK("%s: failed to set up forwarding for port\n",
203                         __FUNCTION__);
204                 rc = -ENOMEM;
205                 goto fail2;
206         }
207
208         rc = efx_dl_register_callbacks(efx_dl_dev, &bend_dl_callbacks);
209         if (rc != 0) {
210                 EPRINTK("%s: register_callbacks failed\n", __FUNCTION__);
211                 goto fail3;
212         }
213
214         port->type = type;
215         port->net_dev = (struct net_device *)net_dev;
216
217         mutex_lock(&accel_mutex);
218         list_add(&port->link, &dl_ports);
219         mutex_unlock(&accel_mutex);
220
221         rc = netback_connect_accelerator(NETBACK_ACCEL_VERSION, 0,
222                                          port->net_dev->name, &accel_hooks);
223
224         if (rc < 0) {
225                 EPRINTK("Xen netback accelerator version mismatch\n");
226                 goto fail4;
227         } else if (rc > 0) {
228                 /*
229                  * In future may want to add backwards compatibility
230                  * and accept certain subsets of previous versions
231                  */
232                 EPRINTK("Xen netback accelerator version mismatch\n");
233                 goto fail4;
234         } 
235
236         return 0;
237
238  fail4:
239         mutex_lock(&accel_mutex);
240         list_del(&port->link);
241         mutex_unlock(&accel_mutex);
242
243         efx_dl_unregister_callbacks(efx_dl_dev, &bend_dl_callbacks);
244  fail3: 
245         netback_accel_shutdown_fwd_port(port->fwd_priv);
246  fail2:
247         efx_dl_dev->priv = NULL;
248         kfree(port);
249  fail1:
250         return rc;
251 }
252
253
254 static void bend_dl_remove(struct efx_dl_device *efx_dl_dev)
255 {
256         struct driverlink_port *port;
257
258         DPRINTK("Unregistering driverlink callbacks.\n");
259
260         mutex_lock(&accel_mutex);
261
262         port = (struct driverlink_port *)efx_dl_dev->priv;
263
264         BUG_ON(list_empty(&dl_ports));
265         BUG_ON(port == NULL);
266         BUG_ON(port->efx_dl_dev != efx_dl_dev);
267
268         netback_disconnect_accelerator(0, port->net_dev->name);
269
270         list_del(&port->link);
271
272         mutex_unlock(&accel_mutex);
273
274         efx_dl_unregister_callbacks(efx_dl_dev, &bend_dl_callbacks);
275         netback_accel_shutdown_fwd_port(port->fwd_priv);
276
277         efx_dl_dev->priv = NULL;
278         kfree(port);
279
280         return;
281 }
282
283
284 static void bend_dl_reset_suspend(struct efx_dl_device *efx_dl_dev)
285 {
286         struct driverlink_port *port;
287
288         DPRINTK("Driverlink reset suspend.\n");
289
290         mutex_lock(&accel_mutex);
291
292         port = (struct driverlink_port *)efx_dl_dev->priv;
293         BUG_ON(list_empty(&dl_ports));
294         BUG_ON(port == NULL);
295         BUG_ON(port->efx_dl_dev != efx_dl_dev);
296
297         netback_disconnect_accelerator(0, port->net_dev->name);
298         mutex_unlock(&accel_mutex);
299 }
300
301
302 static void bend_dl_reset_resume(struct efx_dl_device *efx_dl_dev, int ok)
303 {
304         int rc;
305         struct driverlink_port *port;
306
307         DPRINTK("Driverlink reset resume.\n");
308         
309         if (!ok)
310                 return;
311
312         port = (struct driverlink_port *)efx_dl_dev->priv;
313         BUG_ON(list_empty(&dl_ports));
314         BUG_ON(port == NULL);
315         BUG_ON(port->efx_dl_dev != efx_dl_dev);
316
317         rc = netback_connect_accelerator(NETBACK_ACCEL_VERSION, 0,
318                                          port->net_dev->name, &accel_hooks);
319         if (rc != 0) {
320                 EPRINTK("Xen netback accelerator version mismatch\n");
321
322                 mutex_lock(&accel_mutex);
323                 list_del(&port->link);
324                 mutex_unlock(&accel_mutex);
325
326                 efx_dl_unregister_callbacks(efx_dl_dev, &bend_dl_callbacks);
327
328                 netback_accel_shutdown_fwd_port(port->fwd_priv);
329
330                 efx_dl_dev->priv = NULL;
331                 kfree(port);
332         }
333 }
334
335
336 static struct efx_dl_driver bend_dl_driver = 
337         {
338                 .name = "SFC Xen backend",
339                 .probe = bend_dl_probe,
340                 .remove = bend_dl_remove,
341                 .reset_suspend = bend_dl_reset_suspend,
342                 .reset_resume = bend_dl_reset_resume
343         };
344
345
346 int netback_accel_sf_init(void)
347 {
348         int rc, nic_i;
349         struct efhw_nic *nic;
350
351         INIT_LIST_HEAD(&dl_ports);
352
353         rc = efx_dl_register_driver(&bend_dl_driver);
354         /* If we couldn't find the NET driver, give up */
355         if (rc == -ENOENT)
356                 return rc;
357         
358         if (rc == 0) {
359                 EFRM_FOR_EACH_NIC(nic_i, nic)
360                         falcon_nic_set_rx_usr_buf_size(nic, 
361                                                        SF_XEN_RX_USR_BUF_SIZE);
362         }
363
364         init_done = (rc == 0);
365         return rc;
366 }
367
368
369 void netback_accel_sf_shutdown(void)
370 {
371         if (!init_done)
372                 return;
373         DPRINTK("Unregistering driverlink driver\n");
374
375         /*
376          * This will trigger removal callbacks for all the devices, which
377          * will unregister their callbacks, disconnect from netfront, etc.
378          */
379         efx_dl_unregister_driver(&bend_dl_driver);
380 }
381
382
383 int netback_accel_sf_hwtype(struct netback_accel *bend)
384 {
385         struct driverlink_port *port;
386
387         mutex_lock(&accel_mutex);
388
389         list_for_each_entry(port, &dl_ports, link) {
390                 if (strcmp(bend->nicname, port->net_dev->name) == 0) {
391                         bend->hw_type = port->type;
392                         bend->accel_setup = netback_accel_setup_vnic_hw;
393                         bend->accel_shutdown = netback_accel_shutdown_vnic_hw;
394                         bend->fwd_priv = port->fwd_priv;
395                         bend->net_dev = port->net_dev;
396                         mutex_unlock(&accel_mutex);
397                         return 0;
398                 }
399         }
400
401         mutex_unlock(&accel_mutex);
402
403         EPRINTK("Failed to identify backend device '%s' with a NIC\n",
404                 bend->nicname);
405
406         return -ENOENT;
407 }
408
409
410 /****************************************************************************
411  * Resource management code
412  ***************************************************************************/
413
414 static int alloc_page_state(struct netback_accel *bend, int max_pages)
415 {
416         struct falcon_bend_accel_priv *accel_hw_priv;
417
418         if (max_pages < 0 || max_pages > bend->quotas.max_buf_pages) {
419                 EPRINTK("%s: invalid max_pages: %d\n", __FUNCTION__, max_pages);
420                 return -EINVAL;
421         }
422
423         accel_hw_priv = kzalloc(sizeof(struct falcon_bend_accel_priv),
424                                 GFP_KERNEL);
425         if (accel_hw_priv == NULL) {
426                 EPRINTK("%s: no memory for accel_hw_priv\n", __FUNCTION__);
427                 return -ENOMEM;
428         }
429
430         accel_hw_priv->dma_maps = kzalloc
431                 (sizeof(struct efx_vi_dma_map_state **) * 
432                  (max_pages / NET_ACCEL_MSG_MAX_PAGE_REQ), GFP_KERNEL);
433         if (accel_hw_priv->dma_maps == NULL) {
434                 EPRINTK("%s: no memory for dma_maps\n", __FUNCTION__);
435                 kfree(accel_hw_priv);
436                 return -ENOMEM;
437         }
438
439         bend->buffer_maps = kzalloc(sizeof(struct vm_struct *) * max_pages, 
440                                     GFP_KERNEL);
441         if (bend->buffer_maps == NULL) {
442                 EPRINTK("%s: no memory for buffer_maps\n", __FUNCTION__);
443                 kfree(accel_hw_priv->dma_maps);
444                 kfree(accel_hw_priv);
445                 return -ENOMEM;
446         }
447
448         bend->buffer_addrs = kzalloc(sizeof(u64) * max_pages, GFP_KERNEL);
449         if (bend->buffer_addrs == NULL) {
450                 kfree(bend->buffer_maps);
451                 kfree(accel_hw_priv->dma_maps);
452                 kfree(accel_hw_priv);
453                 return -ENOMEM;
454         }
455
456         bend->accel_hw_priv = accel_hw_priv;
457
458         return 0;
459 }
460
461
462 static int free_page_state(struct netback_accel *bend)
463 {
464         struct falcon_bend_accel_priv *accel_hw_priv;
465
466         DPRINTK("%s: %p\n", __FUNCTION__, bend);
467
468         accel_hw_priv = bend->accel_hw_priv;
469
470         if (accel_hw_priv) {
471                 kfree(accel_hw_priv->dma_maps);
472                 kfree(bend->buffer_maps);
473                 kfree(bend->buffer_addrs);
474                 kfree(accel_hw_priv);
475                 bend->accel_hw_priv = NULL;
476                 bend->max_pages = 0;
477         }
478
479         return 0;
480 }
481
482
483 /* The timeout event callback for the event q */
484 static void bend_evq_timeout(void *context, int is_timeout)
485 {
486         struct netback_accel *bend = (struct netback_accel *)context;
487         if (is_timeout) {
488                 /* Pass event to vnic front end driver */
489                 VPRINTK("timeout event to %d\n", bend->net_channel);
490                 NETBACK_ACCEL_STATS_OP(bend->stats.evq_timeouts++);
491                 notify_remote_via_irq(bend->net_channel_irq);
492         } else {
493                 /* It's a wakeup event, used by Falcon */
494                 VPRINTK("wakeup to %d\n", bend->net_channel);
495                 NETBACK_ACCEL_STATS_OP(bend->stats.evq_wakeups++);
496                 notify_remote_via_irq(bend->net_channel_irq);
497         }
498 }
499
500
501 /*
502  * Create the eventq and associated gubbins for communication with the
503  * front end vnic driver
504  */
505 static int ef_get_vnic(struct netback_accel *bend)
506 {
507         struct falcon_bend_accel_priv *accel_hw_priv;
508         int rc = 0;
509
510         BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_NONE);
511
512         /* Allocate page related state and accel_hw_priv */
513         rc = alloc_page_state(bend, bend->max_pages);
514         if (rc != 0) {
515                 EPRINTK("Failed to allocate page state: %d\n", rc);
516                 return rc;
517         }
518
519         accel_hw_priv = bend->accel_hw_priv;
520
521         rc = efx_vi_alloc(&accel_hw_priv->efx_vih, bend->net_dev->ifindex);
522         if (rc != 0) {
523                 EPRINTK("%s: efx_vi_alloc failed %d\n", __FUNCTION__, rc);
524                 free_page_state(bend);
525                 return rc;
526         }
527
528         rc = efx_vi_eventq_register_callback(accel_hw_priv->efx_vih,
529                                              bend_evq_timeout,
530                                              bend);
531         if (rc != 0) {
532                 EPRINTK("%s: register_callback failed %d\n", __FUNCTION__, rc);
533                 efx_vi_free(accel_hw_priv->efx_vih);
534                 free_page_state(bend);
535                 return rc;
536         }
537
538         bend->hw_state = NETBACK_ACCEL_RES_ALLOC;
539         
540         return 0;
541 }
542
543
544 static void ef_free_vnic(struct netback_accel *bend)
545 {
546         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
547
548         BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_ALLOC);
549
550         efx_vi_eventq_kill_callback(accel_hw_priv->efx_vih);
551
552         DPRINTK("Hardware is freeable. Will proceed.\n");
553
554         efx_vi_free(accel_hw_priv->efx_vih);
555         accel_hw_priv->efx_vih = NULL;
556
557         VPRINTK("Free page state...\n");
558         free_page_state(bend);
559
560         bend->hw_state = NETBACK_ACCEL_RES_NONE;
561 }
562
563
564 static inline void ungrant_or_crash(grant_ref_t gntref, int domain) {
565         if (net_accel_ungrant_page(gntref) == -EBUSY)
566                 net_accel_shutdown_remote(domain);
567 }
568
569
570 static void netback_accel_release_hwinfo(struct netback_accel *bend)
571 {
572         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
573         int i;
574
575         DPRINTK("Remove dma q grants %d %d\n", accel_hw_priv->txdmaq_gnt,
576                 accel_hw_priv->rxdmaq_gnt);
577         ungrant_or_crash(accel_hw_priv->txdmaq_gnt, bend->far_end);
578         ungrant_or_crash(accel_hw_priv->rxdmaq_gnt, bend->far_end);
579
580         DPRINTK("Remove doorbell grant %d\n", accel_hw_priv->doorbell_gnt);
581         ungrant_or_crash(accel_hw_priv->doorbell_gnt, bend->far_end);
582
583         if (bend->hw_type == NET_ACCEL_MSG_HWTYPE_FALCON_A) {
584                 DPRINTK("Remove rptr grant %d\n", accel_hw_priv->evq_rptr_gnt);
585                 ungrant_or_crash(accel_hw_priv->evq_rptr_gnt, bend->far_end);
586         }
587
588         for (i = 0; i < accel_hw_priv->evq_npages; i++) {
589                 DPRINTK("Remove evq grant %d\n", accel_hw_priv->evq_mem_gnts[i]);
590                 ungrant_or_crash(accel_hw_priv->evq_mem_gnts[i], bend->far_end);
591         }
592
593         bend->hw_state = NETBACK_ACCEL_RES_FILTER;
594
595         return;
596 }
597
598
599 static int ef_bend_hwinfo_falcon_common(struct netback_accel *bend, 
600                                         struct net_accel_hw_falcon_b *hwinfo)
601 {
602         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
603         struct efx_vi_hw_resource_metadata res_mdata;
604         struct efx_vi_hw_resource res_array[EFX_VI_HW_RESOURCE_MAXSIZE];
605         int rc, len = EFX_VI_HW_RESOURCE_MAXSIZE, i, pfn = 0;
606         unsigned long txdmaq_pfn = 0, rxdmaq_pfn = 0;
607
608         rc = efx_vi_hw_resource_get_phys(accel_hw_priv->efx_vih, &res_mdata,
609                                          res_array, &len);
610         if (rc != 0) {
611                 DPRINTK("%s: resource_get_phys returned %d\n",
612                         __FUNCTION__, rc);
613                 return rc;
614         }
615
616         hwinfo->nic_arch = res_mdata.nic_arch;
617         hwinfo->nic_variant = res_mdata.nic_variant;
618         hwinfo->nic_revision = res_mdata.nic_revision;
619
620         hwinfo->evq_order = res_mdata.evq_order;
621         hwinfo->evq_offs = res_mdata.evq_offs;
622         hwinfo->evq_capacity = res_mdata.evq_capacity;
623         hwinfo->instance = res_mdata.instance;
624         hwinfo->rx_capacity = res_mdata.rx_capacity;
625         hwinfo->tx_capacity = res_mdata.tx_capacity;
626
627         VPRINTK("evq_order %d evq_offs %d evq_cap %d inst %d rx_cap %d tx_cap %d\n",
628                 hwinfo->evq_order, hwinfo->evq_offs, hwinfo->evq_capacity,
629                 hwinfo->instance, hwinfo->rx_capacity, hwinfo->tx_capacity);
630
631         for (i = 0; i < len; i++) {
632                 struct efx_vi_hw_resource *res = &(res_array[i]);
633                 switch (res->type) {
634                 case EFX_VI_HW_RESOURCE_TXDMAQ:
635                         txdmaq_pfn = page_to_pfn(virt_to_page(res->address));
636                         break;
637                 case EFX_VI_HW_RESOURCE_RXDMAQ: 
638                         rxdmaq_pfn = page_to_pfn(virt_to_page(res->address));
639                         break;
640                 case EFX_VI_HW_RESOURCE_EVQTIMER:
641                         break;
642                 case EFX_VI_HW_RESOURCE_EVQRPTR:
643                 case EFX_VI_HW_RESOURCE_EVQRPTR_OFFSET:
644                         hwinfo->evq_rptr = res->address;
645                         break;
646                 case EFX_VI_HW_RESOURCE_EVQMEMKVA: 
647                         accel_hw_priv->evq_npages =  1 << res_mdata.evq_order;
648                         pfn = page_to_pfn(virt_to_page(res->address));
649                         break;
650                 case EFX_VI_HW_RESOURCE_BELLPAGE:
651                         hwinfo->doorbell_mfn  = res->address;
652                         break;
653                 default:
654                         EPRINTK("%s: Unknown hardware resource type %d\n",
655                                 __FUNCTION__, res->type);
656                         break;
657                 }
658         }
659
660         VPRINTK("Passing txdmaq page pfn %lx\n", txdmaq_pfn);
661         rc = net_accel_grant_page(bend->hdev_data, pfn_to_mfn(txdmaq_pfn), 0);
662         if (rc < 0)
663                 goto fail0;
664         accel_hw_priv->txdmaq_gnt = hwinfo->txdmaq_gnt = rc;
665
666         VPRINTK("Passing rxdmaq page pfn %lx\n", rxdmaq_pfn);
667         rc = net_accel_grant_page(bend->hdev_data, pfn_to_mfn(rxdmaq_pfn), 0);
668         if (rc < 0)
669                 goto fail1;
670         accel_hw_priv->rxdmaq_gnt = hwinfo->rxdmaq_gnt = rc;
671
672         VPRINTK("Passing doorbell page mfn %x\n", hwinfo->doorbell_mfn);
673         /* Make the relevant H/W pages mappable by the far end */
674         rc = net_accel_grant_page(bend->hdev_data, hwinfo->doorbell_mfn, 1);
675         if (rc < 0)
676                 goto fail2;
677         accel_hw_priv->doorbell_gnt = hwinfo->doorbell_gnt = rc;
678         
679         /* Now do the same for the memory pages */
680         /* Convert the page + length we got back for the evq to grants. */
681         for (i = 0; i < accel_hw_priv->evq_npages; i++) {
682                 rc = net_accel_grant_page(bend->hdev_data, pfn_to_mfn(pfn), 0);
683                 if (rc < 0)
684                         goto fail3;
685                 accel_hw_priv->evq_mem_gnts[i] = hwinfo->evq_mem_gnts[i] = rc;
686
687                 VPRINTK("Got grant %u for evq pfn %x\n", hwinfo->evq_mem_gnts[i], 
688                         pfn);
689                 pfn++;
690         }
691
692         return 0;
693
694  fail3:
695         for (i = i - 1; i >= 0; i--) {
696                 ungrant_or_crash(accel_hw_priv->evq_mem_gnts[i], bend->far_end);
697         }
698         ungrant_or_crash(accel_hw_priv->doorbell_gnt, bend->far_end);
699  fail2:
700         ungrant_or_crash(accel_hw_priv->rxdmaq_gnt, bend->far_end);
701  fail1:
702         ungrant_or_crash(accel_hw_priv->txdmaq_gnt, bend->far_end);     
703  fail0:
704         return rc;
705 }
706
707
708 static int ef_bend_hwinfo_falcon_a(struct netback_accel *bend, 
709                                    struct net_accel_hw_falcon_a *hwinfo)
710 {
711         int rc, i;
712         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
713
714         if ((rc = ef_bend_hwinfo_falcon_common(bend, &hwinfo->common)) != 0)
715                 return rc;
716
717         /*
718          * Note that unlike the above, where the message field is the
719          * page number, here evq_rptr is the entire address because
720          * it is currently a pointer into the densely mapped timer page.
721          */
722         VPRINTK("Passing evq_rptr pfn %x for rptr %x\n", 
723                 hwinfo->common.evq_rptr >> PAGE_SHIFT,
724                 hwinfo->common.evq_rptr);
725         rc = net_accel_grant_page(bend->hdev_data, 
726                                   hwinfo->common.evq_rptr >> PAGE_SHIFT, 0);
727         if (rc < 0) {
728                 /* Undo ef_bend_hwinfo_falcon_common() */
729                 ungrant_or_crash(accel_hw_priv->txdmaq_gnt, bend->far_end);
730                 ungrant_or_crash(accel_hw_priv->rxdmaq_gnt, bend->far_end);
731                 ungrant_or_crash(accel_hw_priv->doorbell_gnt, bend->far_end);
732                 for (i = 0; i < accel_hw_priv->evq_npages; i++) {
733                         ungrant_or_crash(accel_hw_priv->evq_mem_gnts[i],
734                                          bend->far_end);
735                 }
736                 return rc;
737         }
738
739         accel_hw_priv->evq_rptr_gnt = hwinfo->evq_rptr_gnt = rc;
740         VPRINTK("evq_rptr_gnt got %d\n", hwinfo->evq_rptr_gnt);
741         
742         return 0;
743 }
744
745
746 static int ef_bend_hwinfo_falcon_b(struct netback_accel *bend, 
747                                    struct net_accel_hw_falcon_b *hwinfo)
748 {
749         return ef_bend_hwinfo_falcon_common(bend, hwinfo);
750 }
751
752
753 /*
754  * Fill in the message with a description of the hardware resources, based on
755  * the H/W type
756  */
757 static int netback_accel_hwinfo(struct netback_accel *bend, 
758                                 struct net_accel_msg_hw *msgvi)
759 {
760         int rc = 0;
761         
762         BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_FILTER);
763
764         msgvi->type = bend->hw_type;
765         switch (bend->hw_type) {
766         case NET_ACCEL_MSG_HWTYPE_FALCON_A:
767                 rc = ef_bend_hwinfo_falcon_a(bend, &msgvi->resources.falcon_a);
768                 break;
769         case NET_ACCEL_MSG_HWTYPE_FALCON_B:
770         case NET_ACCEL_MSG_HWTYPE_SIENA_A:
771                 rc = ef_bend_hwinfo_falcon_b(bend, &msgvi->resources.falcon_b);
772                 break;
773         case NET_ACCEL_MSG_HWTYPE_NONE:
774                 /* Nothing to do. The slow path should just work. */
775                 break;
776         }
777
778         if (rc == 0)
779                 bend->hw_state = NETBACK_ACCEL_RES_HWINFO;
780                 
781         return rc;
782 }
783
784
785 /* Allocate hardware resources and make them available to the client domain */
786 int netback_accel_setup_vnic_hw(struct netback_accel *bend)
787 {
788         struct net_accel_msg msg;
789         int err;
790
791         /* Allocate the event queue, VI and so on. */
792         err = ef_get_vnic(bend);
793         if (err) {
794                 EPRINTK("Failed to allocate hardware resource for bend:"
795                         "error %d\n", err);
796                 return err;
797         }
798
799         /* Set up the filter management */
800         err = netback_accel_filter_init(bend);
801         if (err) {
802                 EPRINTK("Filter setup failed, error %d", err);
803                 ef_free_vnic(bend);
804                 return err;
805         }
806
807         net_accel_msg_init(&msg, NET_ACCEL_MSG_SETHW);
808
809         /*
810          * Extract the low-level hardware info we will actually pass to the
811          * other end, and set up the grants/ioremap permissions needed
812          */
813         err = netback_accel_hwinfo(bend, &msg.u.hw);
814
815         if (err != 0) {
816                 netback_accel_filter_shutdown(bend);
817                 ef_free_vnic(bend);
818                 return err;
819         }
820
821         /* Send the message, this is a reply to a hello-reply */
822         err = net_accel_msg_reply_notify(bend->shared_page, 
823                                          bend->msg_channel_irq, 
824                                          &bend->to_domU, &msg);
825
826         /*
827          * The message should succeed as it's logically a reply and we
828          * guarantee space for replies, but a misbehaving frontend
829          * could result in that behaviour, so be tolerant
830          */
831         if (err != 0) {
832                 netback_accel_release_hwinfo(bend);
833                 netback_accel_filter_shutdown(bend);
834                 ef_free_vnic(bend);
835         }
836
837         return err;
838 }
839
840
841 /* Free hardware resources  */
842 void netback_accel_shutdown_vnic_hw(struct netback_accel *bend)
843 {
844         /*
845          * Only try and release resources if accel_hw_priv was setup,
846          * otherwise there is nothing to do as we're on "null-op"
847          * acceleration
848          */
849         switch (bend->hw_state) {
850         case NETBACK_ACCEL_RES_HWINFO:
851                 VPRINTK("Release hardware resources\n");
852                 netback_accel_release_hwinfo(bend);
853                 /* deliberate drop through */
854         case NETBACK_ACCEL_RES_FILTER:          
855                 VPRINTK("Free filters...\n");
856                 netback_accel_filter_shutdown(bend);
857                 /* deliberate drop through */
858         case NETBACK_ACCEL_RES_ALLOC:
859                 VPRINTK("Free vnic...\n");
860                 ef_free_vnic(bend);
861                 /* deliberate drop through */
862         case NETBACK_ACCEL_RES_NONE:
863                 break;
864         default:
865                 BUG();
866         }
867 }
868
869 /**************************************************************************
870  * 
871  * Buffer table stuff
872  *
873  **************************************************************************/
874
875 /*
876  * Undo any allocation that netback_accel_msg_rx_buffer_map() has made
877  * if it fails half way through
878  */
879 static inline void buffer_map_cleanup(struct netback_accel *bend, int i)
880 {
881         while (i > 0) {
882                 i--;
883                 bend->buffer_maps_index--;
884                 net_accel_unmap_device_page(bend->hdev_data, 
885                                             bend->buffer_maps[bend->buffer_maps_index],
886                                             bend->buffer_addrs[bend->buffer_maps_index]);
887         }
888 }
889
890
891 int netback_accel_add_buffers(struct netback_accel *bend, int pages, int log2_pages,
892                               u32 *grants, u32 *buf_addr_out)
893 {
894         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
895         unsigned long long addr_array[NET_ACCEL_MSG_MAX_PAGE_REQ];
896         int rc, i, index;
897         u64 dev_bus_addr;
898
899         /* Make sure we can't overflow the dma_maps array */
900         if (accel_hw_priv->dma_maps_index >= 
901             bend->max_pages / NET_ACCEL_MSG_MAX_PAGE_REQ) {
902                 EPRINTK("%s: too many buffer table allocations: %d %d\n",
903                         __FUNCTION__, accel_hw_priv->dma_maps_index, 
904                         bend->max_pages / NET_ACCEL_MSG_MAX_PAGE_REQ);
905                 return -EINVAL;
906         }
907
908         /* Make sure we can't overflow the buffer_maps array */
909         if (bend->buffer_maps_index + pages > bend->max_pages) {
910                 EPRINTK("%s: too many pages mapped: %d + %d > %d\n", 
911                         __FUNCTION__, bend->buffer_maps_index,
912                         pages, bend->max_pages);
913                 return -EINVAL;
914         }
915
916         for (i = 0; i < pages; i++) {
917                 VPRINTK("%s: mapping page %d\n", __FUNCTION__, i);
918                 rc = net_accel_map_device_page
919                         (bend->hdev_data, grants[i],
920                          &bend->buffer_maps[bend->buffer_maps_index],
921                          &dev_bus_addr);
922     
923                 if (rc != 0) {
924                         EPRINTK("error in net_accel_map_device_page\n");
925                         buffer_map_cleanup(bend, i);
926                         return rc;
927                 }
928                 
929                 bend->buffer_addrs[bend->buffer_maps_index] = dev_bus_addr;
930
931                 bend->buffer_maps_index++;
932
933                 addr_array[i] = dev_bus_addr;
934         }
935
936         VPRINTK("%s: mapping dma addresses to vih %p\n", __FUNCTION__, 
937                 accel_hw_priv->efx_vih);
938
939         index = accel_hw_priv->dma_maps_index;
940         if ((rc = efx_vi_dma_map_addrs(accel_hw_priv->efx_vih, addr_array, pages,
941                                        &(accel_hw_priv->dma_maps[index]))) < 0) {
942                 EPRINTK("error in dma_map_pages\n");
943                 buffer_map_cleanup(bend, i);
944                 return rc;
945         }
946
947         accel_hw_priv->dma_maps_index++;
948         NETBACK_ACCEL_STATS_OP(bend->stats.num_buffer_pages += pages);
949
950         //DPRINTK("%s: getting map address\n", __FUNCTION__);
951
952         *buf_addr_out = efx_vi_dma_get_map_addr(accel_hw_priv->efx_vih, 
953                                                 accel_hw_priv->dma_maps[index]);
954
955         //DPRINTK("%s: done\n", __FUNCTION__);
956
957         return 0;
958 }
959
960
961 int netback_accel_remove_buffers(struct netback_accel *bend)
962 {
963         /* Only try to free buffers if accel_hw_priv was setup */
964         if (bend->hw_state != NETBACK_ACCEL_RES_NONE) {
965                 struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
966                 int i;
967
968                 efx_vi_reset(accel_hw_priv->efx_vih);
969
970                 while (accel_hw_priv->dma_maps_index > 0) {
971                         accel_hw_priv->dma_maps_index--;
972                         i = accel_hw_priv->dma_maps_index;
973                         efx_vi_dma_unmap_addrs(accel_hw_priv->efx_vih, 
974                                                accel_hw_priv->dma_maps[i]);
975                 }
976                 
977                 while (bend->buffer_maps_index > 0) {
978                         VPRINTK("Unmapping granted buffer %d\n", 
979                                 bend->buffer_maps_index);
980                         bend->buffer_maps_index--;
981                         i = bend->buffer_maps_index;
982                         net_accel_unmap_device_page(bend->hdev_data, 
983                                                     bend->buffer_maps[i],
984                                                     bend->buffer_addrs[i]);
985                 }
986
987                 NETBACK_ACCEL_STATS_OP(bend->stats.num_buffer_pages = 0);
988         }
989
990         return 0;
991 }
992
993 /**************************************************************************
994  * 
995  * Filter stuff
996  *
997  **************************************************************************/
998
999 static int netback_accel_filter_init(struct netback_accel *bend)
1000 {
1001         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
1002         int i, rc;
1003
1004         BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_ALLOC);
1005
1006         spin_lock_init(&accel_hw_priv->filter_lock);
1007
1008         if ((rc = cuckoo_hash_init(&accel_hw_priv->filter_hash_table, 
1009                                    5 /* space for 32 filters */, 8)) != 0) {
1010                 EPRINTK("Failed to initialise filter hash table\n");
1011                 return rc;
1012         }
1013
1014         accel_hw_priv->fspecs = kzalloc(sizeof(struct netback_accel_filter_spec) *
1015                                         bend->quotas.max_filters,
1016                                         GFP_KERNEL);
1017
1018         if (accel_hw_priv->fspecs == NULL) {
1019                 EPRINTK("No memory for filter specs.\n");
1020                 cuckoo_hash_destroy(&accel_hw_priv->filter_hash_table);
1021                 return -ENOMEM;
1022         }
1023
1024         for (i = 0; i < bend->quotas.max_filters; i++) {
1025                 accel_hw_priv->free_filters |= (1 << i);
1026         }
1027
1028         /* Base mask on highest set bit in max_filters  */
1029         accel_hw_priv->filter_idx_mask = (1 << fls(bend->quotas.max_filters)) - 1;
1030         VPRINTK("filter setup: max is %x mask is %x\n",
1031                 bend->quotas.max_filters, accel_hw_priv->filter_idx_mask);
1032
1033         bend->hw_state = NETBACK_ACCEL_RES_FILTER;
1034
1035         return 0;
1036 }
1037
1038
1039 static inline void make_filter_key(cuckoo_hash_ip_key *key,  
1040                                    struct netback_accel_filter_spec *filt)
1041
1042 {
1043         key->local_ip = filt->destip_be;
1044         key->local_port = filt->destport_be;
1045         key->proto = filt->proto;
1046 }
1047
1048
1049 static inline 
1050 void netback_accel_free_filter(struct falcon_bend_accel_priv *accel_hw_priv,
1051                                int filter)
1052 {
1053         cuckoo_hash_ip_key filter_key;
1054
1055         if (!(accel_hw_priv->free_filters & (1 << filter))) {
1056                 efx_vi_filter_stop(accel_hw_priv->efx_vih, 
1057                                    accel_hw_priv->fspecs[filter].filter_handle);
1058                 make_filter_key(&filter_key, &(accel_hw_priv->fspecs[filter]));
1059                 if (cuckoo_hash_remove(&accel_hw_priv->filter_hash_table,
1060                                        (cuckoo_hash_key *)&filter_key)) {
1061                         EPRINTK("%s: Couldn't find filter to remove from table\n",
1062                                 __FUNCTION__);
1063                         BUG();
1064                 }
1065         }
1066 }
1067
1068
1069 static void netback_accel_filter_shutdown(struct netback_accel *bend)
1070 {
1071         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
1072         int i;
1073         unsigned long flags;
1074
1075         BUG_ON(bend->hw_state != NETBACK_ACCEL_RES_FILTER);
1076
1077         spin_lock_irqsave(&accel_hw_priv->filter_lock, flags);
1078
1079         BUG_ON(accel_hw_priv->fspecs == NULL);
1080
1081         for (i = 0; i < bend->quotas.max_filters; i++) {
1082                 netback_accel_free_filter(accel_hw_priv, i);
1083         }
1084         
1085         kfree(accel_hw_priv->fspecs);
1086         accel_hw_priv->fspecs = NULL;
1087         accel_hw_priv->free_filters = 0;
1088         
1089         cuckoo_hash_destroy(&accel_hw_priv->filter_hash_table);
1090
1091         spin_unlock_irqrestore(&accel_hw_priv->filter_lock, flags);
1092
1093         bend->hw_state = NETBACK_ACCEL_RES_ALLOC;
1094 }
1095
1096
1097 /*! Suggest a filter to replace when we want to insert a new one and have
1098  *  none free.
1099  */
1100 static unsigned get_victim_filter(struct netback_accel *bend)
1101 {
1102         /*
1103          * We could attempt to get really clever, and may do at some
1104          * point, but random replacement is v. cheap and low on
1105          * pathological worst cases.
1106          */
1107         unsigned index, cycles;
1108
1109         rdtscl(cycles);
1110
1111         /*
1112          * Some doubt about the quality of the bottom few bits, so
1113          * throw 'em * away
1114          */
1115         index = (cycles >> 4) & ((struct falcon_bend_accel_priv *)
1116                                  bend->accel_hw_priv)->filter_idx_mask;
1117         /*
1118          * We don't enforce that the number of filters is a power of
1119          * two, but the masking gets us to within one subtraction of a
1120          * valid index
1121          */
1122         if (index >= bend->quotas.max_filters)
1123                 index -= bend->quotas.max_filters;
1124         DPRINTK("backend %s->%d has no free filters. Filter %d will be evicted\n",
1125                 bend->nicname, bend->far_end, index);
1126         return index;
1127 }
1128
1129
1130 /* Add a filter for the specified IP/port to the backend */
1131 int 
1132 netback_accel_filter_check_add(struct netback_accel *bend, 
1133                                struct netback_accel_filter_spec *filt)
1134 {
1135         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
1136         struct netback_accel_filter_spec *fs;
1137         unsigned filter_index;
1138         unsigned long flags;
1139         int rc, recycling = 0;
1140         cuckoo_hash_ip_key filter_key, evict_key;
1141
1142         BUG_ON(filt->proto != IPPROTO_TCP && filt->proto != IPPROTO_UDP);
1143
1144         DPRINTK("Will add %s filter for dst ip %08x and dst port %d\n", 
1145                 (filt->proto == IPPROTO_TCP) ? "TCP" : "UDP",
1146                 be32_to_cpu(filt->destip_be), be16_to_cpu(filt->destport_be));
1147
1148         spin_lock_irqsave(&accel_hw_priv->filter_lock, flags);
1149         /*
1150          * Check to see if we're already filtering this IP address and
1151          * port. Happens if you insert a filter mid-stream as there
1152          * are many packets backed up to be delivered to dom0 already
1153          */
1154         make_filter_key(&filter_key, filt);
1155         if (cuckoo_hash_lookup(&accel_hw_priv->filter_hash_table, 
1156                                (cuckoo_hash_key *)(&filter_key), 
1157                                &filter_index)) {
1158                 DPRINTK("Found matching filter %d already in table\n", 
1159                         filter_index);
1160                 rc = -1;
1161                 goto out;
1162         }
1163
1164         if (accel_hw_priv->free_filters == 0) {
1165                 filter_index = get_victim_filter(bend);
1166                 recycling = 1;
1167         } else {
1168                 filter_index = __ffs(accel_hw_priv->free_filters);
1169                 clear_bit(filter_index, &accel_hw_priv->free_filters);
1170         }
1171
1172         fs = &accel_hw_priv->fspecs[filter_index];
1173
1174         if (recycling) {
1175                 DPRINTK("Removing filter index %d handle %p\n", filter_index,
1176                         fs->filter_handle);
1177
1178                 if ((rc = efx_vi_filter_stop(accel_hw_priv->efx_vih, 
1179                                              fs->filter_handle)) != 0) {
1180                         EPRINTK("Couldn't clear NIC filter table entry %d\n", rc);
1181                 }
1182
1183                 make_filter_key(&evict_key, fs);
1184                 if (cuckoo_hash_remove(&accel_hw_priv->filter_hash_table,
1185                                        (cuckoo_hash_key *)&evict_key)) {
1186                         EPRINTK("Couldn't find filter to remove from table\n");
1187                         BUG();
1188                 }
1189                 NETBACK_ACCEL_STATS_OP(bend->stats.num_filters--);
1190         }
1191
1192         /* Update the filter spec with new details */
1193         *fs = *filt;
1194
1195         if ((rc = cuckoo_hash_add(&accel_hw_priv->filter_hash_table, 
1196                                   (cuckoo_hash_key *)&filter_key, filter_index,
1197                                   1)) != 0) {
1198                 EPRINTK("Error (%d) adding filter to table\n", rc);
1199                 accel_hw_priv->free_filters |= (1 << filter_index);
1200                 goto out;
1201         }
1202
1203         rc = efx_vi_filter(accel_hw_priv->efx_vih, filt->proto, filt->destip_be,
1204                            filt->destport_be, 
1205                            (struct filter_resource_t **)&fs->filter_handle);
1206
1207         if (rc != 0) {
1208                 EPRINTK("Hardware filter insertion failed. Error %d\n", rc);
1209                 accel_hw_priv->free_filters |= (1 << filter_index);
1210                 cuckoo_hash_remove(&accel_hw_priv->filter_hash_table, 
1211                                    (cuckoo_hash_key *)&filter_key);
1212                 rc = -1;
1213                 goto out;
1214         }
1215
1216         NETBACK_ACCEL_STATS_OP(bend->stats.num_filters++);
1217
1218         VPRINTK("%s: success index %d handle %p\n", __FUNCTION__, filter_index, 
1219                 fs->filter_handle);
1220
1221         rc = filter_index;
1222  out:
1223         spin_unlock_irqrestore(&accel_hw_priv->filter_lock, flags);
1224         return rc;
1225 }
1226
1227
1228 /* Remove a filter entry for the specific device and IP/port */
1229 static void netback_accel_filter_remove(struct netback_accel *bend, 
1230                                         int filter_index)
1231 {
1232         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
1233
1234         BUG_ON(accel_hw_priv->free_filters & (1 << filter_index));
1235         netback_accel_free_filter(accel_hw_priv, filter_index);
1236         accel_hw_priv->free_filters |= (1 << filter_index);
1237 }
1238
1239
1240 /* Remove a filter entry for the specific device and IP/port */
1241 void netback_accel_filter_remove_spec(struct netback_accel *bend, 
1242                                       struct netback_accel_filter_spec *filt)
1243 {
1244         struct falcon_bend_accel_priv *accel_hw_priv = bend->accel_hw_priv;
1245         unsigned filter_found;
1246         unsigned long flags;
1247         cuckoo_hash_ip_key filter_key;
1248         struct netback_accel_filter_spec *fs;
1249
1250         if (filt->proto == IPPROTO_TCP) {
1251                 DPRINTK("Remove TCP filter for dst ip %08x and dst port %d\n",
1252                         be32_to_cpu(filt->destip_be),
1253                         be16_to_cpu(filt->destport_be));
1254         } else if (filt->proto == IPPROTO_UDP) {
1255                 DPRINTK("Remove UDP filter for dst ip %08x and dst port %d\n",
1256                         be32_to_cpu(filt->destip_be),
1257                         be16_to_cpu(filt->destport_be));
1258         } else {
1259                 /*
1260                  * This could be provoked by an evil frontend, so can't
1261                  * BUG(), but harmless as it should fail tests below 
1262                  */
1263                 DPRINTK("Non-TCP/UDP filter dst ip %08x and dst port %d\n",
1264                         be32_to_cpu(filt->destip_be),
1265                         be16_to_cpu(filt->destport_be));
1266         }
1267
1268         spin_lock_irqsave(&accel_hw_priv->filter_lock, flags);
1269
1270         make_filter_key(&filter_key, filt);
1271         if (!cuckoo_hash_lookup(&accel_hw_priv->filter_hash_table, 
1272                                (cuckoo_hash_key *)(&filter_key), 
1273                                &filter_found)) {
1274                 EPRINTK("Couldn't find matching filter already in table\n");
1275                 goto out;
1276         }
1277         
1278         /* Do a full check to make sure we've not had a hash collision */
1279         fs = &accel_hw_priv->fspecs[filter_found];
1280         if (fs->destip_be == filt->destip_be &&
1281             fs->destport_be == filt->destport_be &&
1282             fs->proto == filt->proto &&
1283             !memcmp(fs->mac, filt->mac, ETH_ALEN)) {
1284                 netback_accel_filter_remove(bend, filter_found);
1285         } else {
1286                 EPRINTK("Entry in hash table does not match filter spec\n");
1287                 goto out;
1288         }
1289
1290  out:
1291         spin_unlock_irqrestore(&accel_hw_priv->filter_lock, flags);
1292 }