1 /******************************************************************************
2 * arch/xen/drivers/netif/backend/common.h
4 * This program is free software; you can redistribute it and/or
5 * modify it under the terms of the GNU General Public License version 2
6 * as published by the Free Software Foundation; or, when distributed
7 * separately from the Linux kernel or incorporated into other
8 * software packages, subject to the following license:
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this source file (the "Software"), to deal in the Software without
12 * restriction, including without limitation the rights to use, copy, modify,
13 * merge, publish, distribute, sublicense, and/or sell copies of the Software,
14 * and to permit persons to whom the Software is furnished to do so, subject to
15 * the following conditions:
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
23 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
25 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
29 #ifndef __NETIF__BACKEND__COMMON_H__
30 #define __NETIF__BACKEND__COMMON_H__
32 #include <linux/module.h>
33 #include <linux/interrupt.h>
34 #include <linux/slab.h>
37 #include <linux/netdevice.h>
38 #include <linux/etherdevice.h>
39 #include <linux/wait.h>
40 #include <xen/interface/io/netif.h>
41 #include <xen/xenbus.h>
42 #include <xen/interface/event_channel.h>
44 #define DPRINTK(_f, _a...) \
45 pr_debug("(file=%s, line=%d) " _f, \
46 __FILE__ , __LINE__ , ## _a )
47 #define IPRINTK(fmt, args...) pr_info("xen_net: " fmt, ##args)
48 #define WPRINTK(fmt, args...) pr_warning("xen_net: " fmt, ##args)
50 typedef struct netif_st {
51 /* Unique identifier for this interface. */
60 /* The shared rings and indexes. */
61 netif_tx_back_ring_t tx;
62 netif_rx_back_ring_t rx;
63 struct vm_struct *tx_comms_area;
64 struct vm_struct *rx_comms_area;
66 /* Flags that must not be set in dev->features */
67 int features_disabled;
69 /* Frontend feature information. */
74 /* Internal feature information. */
75 u8 can_queue:1; /* can queue packets for receiver? */
76 u8 copying_receiver:1; /* copy packets to receiver? */
78 /* Allow netif_be_start_xmit() to peek ahead in the rx request ring. */
79 RING_IDX rx_req_cons_peek;
81 /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */
82 unsigned long credit_bytes;
83 unsigned long credit_usec;
84 unsigned long remaining_credit;
85 struct timer_list credit_timeout;
87 /* Enforce draining of the transmit queue. */
88 struct timer_list tx_queue_timeout;
91 unsigned long nr_copied_skbs;
92 unsigned long rx_gso_csum_fixups;
94 /* Miscellaneous private stuff. */
95 struct list_head list; /* scheduling list */
97 struct net_device *dev;
101 wait_queue_head_t waiting_to_free;
105 * Implement our own carrier flag: the network stack's version causes delays
106 * when the carrier is re-enabled (in particular, dev_activate() may not
107 * immediately be called, which can cause packet loss; also the etherbridge
108 * can be rather lazy in activating its port).
110 #define netback_carrier_on(netif) ((netif)->carrier = 1)
111 #define netback_carrier_off(netif) ((netif)->carrier = 0)
112 #define netback_carrier_ok(netif) ((netif)->carrier)
116 NETBK_DELAYED_COPY_SKB,
117 NETBK_ALWAYS_COPY_SKB,
120 extern int netbk_copy_skb_mode;
122 /* Function pointers into netback accelerator plugin modules */
123 struct netback_accel_hooks {
124 struct module *owner;
125 int (*probe)(struct xenbus_device *dev);
126 int (*remove)(struct xenbus_device *dev);
129 /* Structure to track the state of a netback accelerator plugin */
130 struct netback_accelerator {
131 struct list_head link;
135 struct netback_accel_hooks *hooks;
138 struct backend_info {
139 struct xenbus_device *dev;
141 enum xenbus_state frontend_state;
142 struct xenbus_watch hotplug_status_watch;
143 int have_hotplug_status_watch:1;
145 /* State relating to the netback accelerator */
146 void *netback_accel_priv;
147 /* The accelerator that this backend is currently using */
148 struct netback_accelerator *accelerator;
151 #define NETBACK_ACCEL_VERSION 0x00010001
154 * Connect an accelerator plugin module to netback. Returns zero on
155 * success, < 0 on error, > 0 (with highest version number supported)
156 * if version mismatch.
158 extern int netback_connect_accelerator(unsigned version,
159 int id, const char *eth_name,
160 struct netback_accel_hooks *hooks);
161 /* Disconnect a previously connected accelerator plugin module */
162 extern void netback_disconnect_accelerator(int id, const char *eth_name);
166 void netback_probe_accelerators(struct backend_info *be,
167 struct xenbus_device *dev);
169 void netback_remove_accelerators(struct backend_info *be,
170 struct xenbus_device *dev);
172 void netif_accel_init(void);
175 #define NET_TX_RING_SIZE __CONST_RING_SIZE(netif_tx, PAGE_SIZE)
176 #define NET_RX_RING_SIZE __CONST_RING_SIZE(netif_rx, PAGE_SIZE)
178 void netif_disconnect(struct backend_info *be);
180 netif_t *netif_alloc(struct device *parent, domid_t domid, unsigned int handle);
181 int netif_map(struct backend_info *be, grant_ref_t tx_ring_ref,
182 grant_ref_t rx_ring_ref, evtchn_port_t evtchn);
184 #define netif_get(_b) (atomic_inc(&(_b)->refcnt))
185 #define netif_put(_b) \
187 if ( atomic_dec_and_test(&(_b)->refcnt) ) \
188 wake_up(&(_b)->waiting_to_free); \
191 void netif_xenbus_init(void);
193 #define netif_schedulable(netif) \
194 (netif_running((netif)->dev) && netback_carrier_ok(netif))
196 void netif_schedule_work(netif_t *netif);
197 void netif_deschedule_work(netif_t *netif);
199 int netif_be_start_xmit(struct sk_buff *skb, struct net_device *dev);
200 irqreturn_t netif_be_int(int irq, void *dev_id);
202 static inline int netbk_can_queue(struct net_device *dev)
204 netif_t *netif = netdev_priv(dev);
205 return netif->can_queue;
208 static inline int netbk_can_sg(struct net_device *dev)
210 netif_t *netif = netdev_priv(dev);
211 return netif->can_sg;
214 struct pending_tx_info {
215 netif_tx_request_t req;
218 typedef unsigned int pending_ring_idx_t;
220 struct netbk_rx_meta {
226 struct netbk_tx_pending_inuse {
227 struct list_head list;
228 unsigned long alloc_time;
231 #define MAX_PENDING_REQS (1U << CONFIG_XEN_NETDEV_TX_SHIFT)
232 #define MAX_MFN_ALLOC 64
237 struct tasklet_struct net_tx_tasklet;
238 struct tasklet_struct net_rx_tasklet;
241 wait_queue_head_t netbk_action_wq;
242 struct task_struct *task;
246 struct sk_buff_head rx_queue;
247 struct sk_buff_head tx_queue;
249 struct timer_list net_timer;
250 struct timer_list tx_pending_timer;
252 pending_ring_idx_t pending_prod;
253 pending_ring_idx_t pending_cons;
254 pending_ring_idx_t dealloc_prod;
255 pending_ring_idx_t dealloc_cons;
257 struct list_head pending_inuse_head;
258 struct list_head schedule_list;
260 spinlock_t schedule_list_lock;
261 spinlock_t release_lock;
263 struct page **mmap_pages;
266 unsigned int alloc_index;
268 struct pending_tx_info pending_tx_info[MAX_PENDING_REQS];
269 struct netbk_tx_pending_inuse pending_inuse[MAX_PENDING_REQS];
270 struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS];
271 struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS];
273 grant_handle_t grant_tx_handle[MAX_PENDING_REQS];
274 u16 pending_ring[MAX_PENDING_REQS];
275 u16 dealloc_ring[MAX_PENDING_REQS];
277 struct multicall_entry rx_mcl[NET_RX_RING_SIZE+3];
278 struct mmu_update rx_mmu[NET_RX_RING_SIZE];
279 struct gnttab_transfer grant_trans_op[NET_RX_RING_SIZE];
280 struct gnttab_copy grant_copy_op[NET_RX_RING_SIZE];
281 DECLARE_BITMAP(rx_notify, NR_DYNIRQS);
282 #if !defined(NR_DYNIRQS)
284 #elif NR_DYNIRQS <= 0x10000
285 u16 notify_list[NET_RX_RING_SIZE];
287 int notify_list[NET_RX_RING_SIZE];
289 struct netbk_rx_meta meta[NET_RX_RING_SIZE];
291 unsigned long mfn_list[MAX_MFN_ALLOC];
294 extern struct xen_netbk *xen_netbk;
295 extern unsigned int netbk_nr_groups;
297 #endif /* __NETIF__BACKEND__COMMON_H__ */