1 /****************************************************************************
2 * Solarflare driver for Xen network acceleration
4 * Copyright 2006-2008: Solarflare Communications Inc,
5 * 9501 Jeronimo Road, Suite 250,
6 * Irvine, CA 92618, USA
8 * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation, incorporated herein by reference.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 ****************************************************************************
25 #include <xen/evtchn.h>
28 #include "accel_msg_iface.h"
29 #include "accel_util.h"
30 #include "accel_solarflare.h"
32 /* Send a HELLO to front end to start things off */
33 void netback_accel_msg_tx_hello(struct netback_accel *bend, unsigned version)
35 unsigned long lock_state;
36 struct net_accel_msg *msg =
37 net_accel_msg_start_send(bend->shared_page,
38 &bend->to_domU, &lock_state);
39 /* The queue _cannot_ be full, we're the first users. */
40 EPRINTK_ON(msg == NULL);
43 net_accel_msg_init(msg, NET_ACCEL_MSG_HELLO);
44 msg->u.hello.version = version;
45 msg->u.hello.max_pages = bend->quotas.max_buf_pages;
46 VPRINTK("Sending hello to channel %d\n", bend->msg_channel);
47 net_accel_msg_complete_send_notify(bend->shared_page,
50 bend->msg_channel_irq);
54 /* Send a local mac message to vnic */
55 static void netback_accel_msg_tx_localmac(struct netback_accel *bend,
56 int type, const void *mac)
58 unsigned long lock_state;
59 struct net_accel_msg *msg;
61 BUG_ON(bend == NULL || mac == NULL);
63 VPRINTK("Sending local mac message: %pM\n", mac);
65 msg = net_accel_msg_start_send(bend->shared_page, &bend->to_domU,
69 net_accel_msg_init(msg, NET_ACCEL_MSG_LOCALMAC);
70 msg->u.localmac.flags = type;
71 memcpy(msg->u.localmac.mac, mac, ETH_ALEN);
72 net_accel_msg_complete_send_notify(bend->shared_page,
75 bend->msg_channel_irq);
78 * TODO if this happens we may leave a domU
79 * fastpathing packets when they should be delivered
80 * locally. Solution is get domU to timeout entries
81 * in its fastpath lookup table when it receives no RX
84 EPRINTK("%s: saw full queue, may need ARP timer to recover\n",
89 /* Send an add local mac message to vnic */
90 void netback_accel_msg_tx_new_localmac(struct netback_accel *bend,
93 netback_accel_msg_tx_localmac(bend, NET_ACCEL_MSG_ADD, mac);
97 static int netback_accel_msg_rx_buffer_map(struct netback_accel *bend,
98 struct net_accel_msg *msg)
102 /* Can only allocate in power of two */
103 log2_pages = log2_ge(msg->u.mapbufs.pages, 0);
104 if (msg->u.mapbufs.pages != pow2(log2_pages)) {
105 EPRINTK("%s: Can only alloc bufs in power of 2 sizes (%d)\n",
106 __FUNCTION__, msg->u.mapbufs.pages);
112 * Sanity. Assumes NET_ACCEL_MSG_MAX_PAGE_REQ is same for
113 * both directions/domains
115 if (msg->u.mapbufs.pages > NET_ACCEL_MSG_MAX_PAGE_REQ) {
116 EPRINTK("%s: too many pages in a single message: %d %d\n",
117 __FUNCTION__, msg->u.mapbufs.pages,
118 NET_ACCEL_MSG_MAX_PAGE_REQ);
123 if ((rc = netback_accel_add_buffers(bend, msg->u.mapbufs.pages,
124 log2_pages, msg->u.mapbufs.grants,
125 &msg->u.mapbufs.buf)) < 0) {
129 msg->id |= NET_ACCEL_MSG_REPLY;
134 EPRINTK("%s: err_out\n", __FUNCTION__);
135 msg->id |= NET_ACCEL_MSG_ERROR | NET_ACCEL_MSG_REPLY;
140 /* Hint from frontend that one of our filters is out of date */
141 static int netback_accel_process_fastpath(struct netback_accel *bend,
142 struct net_accel_msg *msg)
144 struct netback_accel_filter_spec spec;
146 if (msg->u.fastpath.flags & NET_ACCEL_MSG_REMOVE) {
148 * Would be nice to BUG() this but would leave us
149 * vulnerable to naughty frontend
151 EPRINTK_ON(msg->u.fastpath.flags & NET_ACCEL_MSG_ADD);
153 memcpy(spec.mac, msg->u.fastpath.mac, ETH_ALEN);
154 spec.destport_be = msg->u.fastpath.port;
155 spec.destip_be = msg->u.fastpath.ip;
156 spec.proto = msg->u.fastpath.proto;
158 netback_accel_filter_remove_spec(bend, &spec);
165 /* Flow control for message queues */
166 inline void set_queue_not_full(struct netback_accel *bend)
168 if (!test_and_set_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL_B,
169 (unsigned long *)&bend->shared_page->aflags))
170 notify_remote_via_irq(bend->msg_channel_irq);
172 VPRINTK("queue not full bit already set, not signalling\n");
176 /* Flow control for message queues */
177 inline void set_queue_full(struct netback_accel *bend)
179 if (!test_and_set_bit(NET_ACCEL_MSG_AFLAGS_QUEUE0FULL_B,
180 (unsigned long *)&bend->shared_page->aflags))
181 notify_remote_via_irq(bend->msg_channel_irq);
183 VPRINTK("queue full bit already set, not signalling\n");
187 void netback_accel_set_interface_state(struct netback_accel *bend, int up)
189 bend->shared_page->net_dev_up = up;
190 if (!test_and_set_bit(NET_ACCEL_MSG_AFLAGS_NETUPDOWN_B,
191 (unsigned long *)&bend->shared_page->aflags))
192 notify_remote_via_irq(bend->msg_channel_irq);
194 VPRINTK("interface up/down bit already set, not signalling\n");
198 static int check_rx_hello_version(unsigned version)
200 /* Should only happen if there's been a version mismatch */
201 BUG_ON(version == NET_ACCEL_MSG_VERSION);
203 if (version > NET_ACCEL_MSG_VERSION) {
204 /* Newer protocol, we must refuse */
208 if (version < NET_ACCEL_MSG_VERSION) {
210 * We are newer, so have discretion to accept if we
211 * wish. For now however, just reject
220 static int process_rx_msg(struct netback_accel *bend,
221 struct net_accel_msg *msg)
226 case NET_ACCEL_MSG_REPLY | NET_ACCEL_MSG_HELLO:
227 /* Reply to a HELLO; mark ourselves as connected */
228 DPRINTK("got Hello reply, version %.8x\n",
229 msg->u.hello.version);
232 * Check that we've not successfully done this
233 * already. NB no check at the moment that this reply
234 * comes after we've actually sent a HELLO as that's
235 * not possible with the current code structure
237 if (bend->hw_state != NETBACK_ACCEL_RES_NONE)
240 /* Store max_pages for accel_setup */
241 if (msg->u.hello.max_pages > bend->quotas.max_buf_pages) {
242 EPRINTK("More pages than quota allows (%d > %d)\n",
243 msg->u.hello.max_pages,
244 bend->quotas.max_buf_pages);
245 /* Force it down to the quota */
246 msg->u.hello.max_pages = bend->quotas.max_buf_pages;
248 bend->max_pages = msg->u.hello.max_pages;
250 /* Set up the hardware visible to the other end */
251 err = bend->accel_setup(bend);
254 DPRINTK("Hello gave accel_setup error %d\n", err);
255 netback_accel_set_closing(bend);
258 * Now add the context so that packet
259 * forwarding will commence
261 netback_accel_fwd_set_context(bend->mac, bend,
265 case NET_ACCEL_MSG_REPLY | NET_ACCEL_MSG_HELLO | NET_ACCEL_MSG_ERROR:
266 EPRINTK("got Hello error, versions us:%.8x them:%.8x\n",
267 NET_ACCEL_MSG_VERSION, msg->u.hello.version);
269 if (bend->hw_state != NETBACK_ACCEL_RES_NONE)
272 if (msg->u.hello.version != NET_ACCEL_MSG_VERSION) {
273 /* Error is due to version mismatch */
274 err = check_rx_hello_version(msg->u.hello.version);
277 * It's OK to be compatible, send
278 * another hello with compatible version
280 netback_accel_msg_tx_hello
281 (bend, msg->u.hello.version);
284 * Tell frontend that we're not going to
285 * send another HELLO by going to Closing.
287 netback_accel_set_closing(bend);
291 case NET_ACCEL_MSG_MAPBUF:
292 VPRINTK("Got mapped buffers request %d\n",
293 msg->u.mapbufs.reqid);
295 if (bend->hw_state == NETBACK_ACCEL_RES_NONE)
299 * Frontend wants a buffer table entry for the
302 err = netback_accel_msg_rx_buffer_map(bend, msg);
303 if (net_accel_msg_reply_notify(bend->shared_page,
304 bend->msg_channel_irq,
305 &bend->to_domU, msg)) {
307 * This is fatal as we can't tell the frontend
308 * about the problem through the message
309 * queue, and so would otherwise stalemate
311 netback_accel_set_closing(bend);
314 case NET_ACCEL_MSG_FASTPATH:
315 DPRINTK("Got fastpath request\n");
317 if (bend->hw_state == NETBACK_ACCEL_RES_NONE)
320 err = netback_accel_process_fastpath(bend, msg);
323 EPRINTK("Huh? Message code is %x\n", msg->id);
331 /* Demultiplex an IRQ from the frontend driver. */
332 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
333 void netback_accel_msg_rx_handler(struct work_struct *arg)
335 void netback_accel_msg_rx_handler(void *bend_void)
338 struct net_accel_msg msg;
339 int err, queue_was_full = 0;
340 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
341 struct netback_accel *bend =
342 container_of(arg, struct netback_accel, handle_msg);
344 struct netback_accel *bend = (struct netback_accel *)bend_void;
347 mutex_lock(&bend->bend_mutex);
350 * This happens when the shared pages have been unmapped, but
351 * the workqueue not flushed yet
353 if (bend->shared_page == NULL)
356 if ((bend->shared_page->aflags &
357 NET_ACCEL_MSG_AFLAGS_TO_DOM0_MASK) != 0) {
358 if (bend->shared_page->aflags &
359 NET_ACCEL_MSG_AFLAGS_QUEUE0NOTFULL) {
360 /* We've been told there may now be space. */
361 clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUE0NOTFULL_B,
362 (unsigned long *)&bend->shared_page->aflags);
365 if (bend->shared_page->aflags &
366 NET_ACCEL_MSG_AFLAGS_QUEUEUFULL) {
367 clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUFULL_B,
368 (unsigned long *)&bend->shared_page->aflags);
373 while ((err = net_accel_msg_recv(bend->shared_page, &bend->from_domU,
375 err = process_rx_msg(bend, &msg);
378 EPRINTK("%s: Error %d\n", __FUNCTION__, err);
384 /* There will be space now if we can make any. */
386 set_queue_not_full(bend);
388 mutex_unlock(&bend->bend_mutex);