- Update Xen patches to 3.3-rc5 and c/s 1157.
[linux-flexiantxendom0-3.2.10.git] / drivers / xen / sfc_netback / accel_msg.c
1 /****************************************************************************
2  * Solarflare driver for Xen network acceleration
3  *
4  * Copyright 2006-2008: Solarflare Communications Inc,
5  *                      9501 Jeronimo Road, Suite 250,
6  *                      Irvine, CA 92618, USA
7  *
8  * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License version 2 as published
12  * by the Free Software Foundation, incorporated herein by reference.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
22  ****************************************************************************
23  */
24
25 #include <xen/evtchn.h>
26
27 #include "accel.h"
28 #include "accel_msg_iface.h"
29 #include "accel_util.h"
30 #include "accel_solarflare.h"
31
32 /* Send a HELLO to front end to start things off */
33 void netback_accel_msg_tx_hello(struct netback_accel *bend, unsigned version)
34 {
35         unsigned long lock_state;
36         struct net_accel_msg *msg = 
37                 net_accel_msg_start_send(bend->shared_page,
38                                          &bend->to_domU, &lock_state);
39         /* The queue _cannot_ be full, we're the first users. */
40         EPRINTK_ON(msg == NULL);
41
42         if (msg != NULL) {
43                 net_accel_msg_init(msg, NET_ACCEL_MSG_HELLO);
44                 msg->u.hello.version = version;
45                 msg->u.hello.max_pages = bend->quotas.max_buf_pages; 
46                 VPRINTK("Sending hello to channel %d\n", bend->msg_channel);
47                 net_accel_msg_complete_send_notify(bend->shared_page, 
48                                                    &bend->to_domU,
49                                                    &lock_state, 
50                                                    bend->msg_channel_irq);
51         }
52 }
53
54 /* Send a local mac message to vnic */
55 static void netback_accel_msg_tx_localmac(struct netback_accel *bend, 
56                                           int type, const void *mac)
57 {
58         unsigned long lock_state;
59         struct net_accel_msg *msg;
60
61         BUG_ON(bend == NULL || mac == NULL);
62
63         VPRINTK("Sending local mac message: %pM\n", mac);
64         
65         msg = net_accel_msg_start_send(bend->shared_page, &bend->to_domU,
66                                        &lock_state);
67         
68         if (msg != NULL) {
69                 net_accel_msg_init(msg, NET_ACCEL_MSG_LOCALMAC);
70                 msg->u.localmac.flags = type;
71                 memcpy(msg->u.localmac.mac, mac, ETH_ALEN);
72                 net_accel_msg_complete_send_notify(bend->shared_page, 
73                                                    &bend->to_domU,
74                                                    &lock_state, 
75                                                    bend->msg_channel_irq);
76         } else {
77                 /*
78                  * TODO if this happens we may leave a domU
79                  * fastpathing packets when they should be delivered
80                  * locally.  Solution is get domU to timeout entries
81                  * in its fastpath lookup table when it receives no RX
82                  * traffic
83                  */
84                 EPRINTK("%s: saw full queue, may need ARP timer to recover\n",
85                         __FUNCTION__);
86         }
87 }
88
89 /* Send an add local mac message to vnic */
90 void netback_accel_msg_tx_new_localmac(struct netback_accel *bend,
91                                        const void *mac)
92 {
93         netback_accel_msg_tx_localmac(bend, NET_ACCEL_MSG_ADD, mac);
94 }
95
96
97 static int netback_accel_msg_rx_buffer_map(struct netback_accel *bend, 
98                                            struct net_accel_msg *msg)
99 {
100         int log2_pages, rc;
101
102         /* Can only allocate in power of two */
103         log2_pages = log2_ge(msg->u.mapbufs.pages, 0);
104         if (msg->u.mapbufs.pages != pow2(log2_pages)) {
105                 EPRINTK("%s: Can only alloc bufs in power of 2 sizes (%d)\n",
106                         __FUNCTION__, msg->u.mapbufs.pages);
107                 rc = -EINVAL;
108                 goto err_out;
109         }
110   
111         /*
112          * Sanity.  Assumes NET_ACCEL_MSG_MAX_PAGE_REQ is same for
113          * both directions/domains
114          */
115         if (msg->u.mapbufs.pages > NET_ACCEL_MSG_MAX_PAGE_REQ) {
116                 EPRINTK("%s: too many pages in a single message: %d %d\n", 
117                         __FUNCTION__, msg->u.mapbufs.pages,
118                         NET_ACCEL_MSG_MAX_PAGE_REQ);
119                 rc = -EINVAL;
120                 goto err_out;
121         }
122   
123         if ((rc = netback_accel_add_buffers(bend, msg->u.mapbufs.pages, 
124                                             log2_pages, msg->u.mapbufs.grants, 
125                                             &msg->u.mapbufs.buf)) < 0) {
126                 goto err_out;
127         }
128
129         msg->id |= NET_ACCEL_MSG_REPLY;
130   
131         return 0;
132
133  err_out:
134         EPRINTK("%s: err_out\n", __FUNCTION__);
135         msg->id |= NET_ACCEL_MSG_ERROR | NET_ACCEL_MSG_REPLY;
136         return rc;
137 }
138
139
140 /* Hint from frontend that one of our filters is out of date */
141 static int netback_accel_process_fastpath(struct netback_accel *bend, 
142                                           struct net_accel_msg *msg)
143 {
144         struct netback_accel_filter_spec spec;
145
146         if (msg->u.fastpath.flags & NET_ACCEL_MSG_REMOVE) {
147                 /* 
148                  * Would be nice to BUG() this but would leave us
149                  * vulnerable to naughty frontend
150                  */
151                 EPRINTK_ON(msg->u.fastpath.flags & NET_ACCEL_MSG_ADD);
152                 
153                 memcpy(spec.mac, msg->u.fastpath.mac, ETH_ALEN);
154                 spec.destport_be = msg->u.fastpath.port;
155                 spec.destip_be = msg->u.fastpath.ip;
156                 spec.proto = msg->u.fastpath.proto;
157
158                 netback_accel_filter_remove_spec(bend, &spec);
159         }
160
161         return 0;
162 }
163
164
165 /* Flow control for message queues */
166 inline void set_queue_not_full(struct netback_accel *bend)
167 {
168         if (!test_and_set_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUNOTFULL_B, 
169                               (unsigned long *)&bend->shared_page->aflags))
170                 notify_remote_via_irq(bend->msg_channel_irq);
171         else
172                 VPRINTK("queue not full bit already set, not signalling\n");
173 }
174
175
176 /* Flow control for message queues */
177 inline void set_queue_full(struct netback_accel *bend)
178 {
179         if (!test_and_set_bit(NET_ACCEL_MSG_AFLAGS_QUEUE0FULL_B,
180                               (unsigned long *)&bend->shared_page->aflags))
181                 notify_remote_via_irq(bend->msg_channel_irq);
182         else
183                 VPRINTK("queue full bit already set, not signalling\n");
184 }
185
186
187 void netback_accel_set_interface_state(struct netback_accel *bend, int up)
188 {
189         bend->shared_page->net_dev_up = up;
190         if (!test_and_set_bit(NET_ACCEL_MSG_AFLAGS_NETUPDOWN_B, 
191                              (unsigned long *)&bend->shared_page->aflags))
192                 notify_remote_via_irq(bend->msg_channel_irq);
193         else
194                 VPRINTK("interface up/down bit already set, not signalling\n");
195 }
196
197
198 static int check_rx_hello_version(unsigned version) 
199 {
200         /* Should only happen if there's been a version mismatch */
201         BUG_ON(version == NET_ACCEL_MSG_VERSION);
202
203         if (version > NET_ACCEL_MSG_VERSION) {
204                 /* Newer protocol, we must refuse */
205                 return -EPROTO;
206         }
207
208         if (version < NET_ACCEL_MSG_VERSION) {
209                 /*
210                  * We are newer, so have discretion to accept if we
211                  * wish.  For now however, just reject
212                  */
213                 return -EPROTO;
214         }
215
216         return -EINVAL;
217 }
218
219
220 static int process_rx_msg(struct netback_accel *bend,
221                           struct net_accel_msg *msg)
222 {
223         int err = 0;
224                       
225         switch (msg->id) {
226         case NET_ACCEL_MSG_REPLY | NET_ACCEL_MSG_HELLO:
227                 /* Reply to a HELLO; mark ourselves as connected */
228                 DPRINTK("got Hello reply, version %.8x\n",
229                         msg->u.hello.version);
230                 
231                 /*
232                  * Check that we've not successfully done this
233                  * already.  NB no check at the moment that this reply
234                  * comes after we've actually sent a HELLO as that's
235                  * not possible with the current code structure
236                  */
237                 if (bend->hw_state != NETBACK_ACCEL_RES_NONE)
238                         return -EPROTO;
239
240                 /* Store max_pages for accel_setup */
241                 if (msg->u.hello.max_pages > bend->quotas.max_buf_pages) {
242                         EPRINTK("More pages than quota allows (%d > %d)\n",
243                                 msg->u.hello.max_pages, 
244                                 bend->quotas.max_buf_pages);
245                         /* Force it down to the quota */
246                         msg->u.hello.max_pages = bend->quotas.max_buf_pages;
247                 }
248                 bend->max_pages = msg->u.hello.max_pages;
249                 
250                 /* Set up the hardware visible to the other end */
251                 err = bend->accel_setup(bend);
252                 if (err) {
253                         /* This is fatal */
254                         DPRINTK("Hello gave accel_setup error %d\n", err);
255                         netback_accel_set_closing(bend);
256                 } else {
257                         /*
258                          * Now add the context so that packet
259                          * forwarding will commence
260                          */
261                         netback_accel_fwd_set_context(bend->mac, bend, 
262                                                       bend->fwd_priv);
263                 }
264                 break;
265         case NET_ACCEL_MSG_REPLY | NET_ACCEL_MSG_HELLO | NET_ACCEL_MSG_ERROR:
266                 EPRINTK("got Hello error, versions us:%.8x them:%.8x\n",
267                         NET_ACCEL_MSG_VERSION, msg->u.hello.version);
268
269                 if (bend->hw_state != NETBACK_ACCEL_RES_NONE)
270                         return -EPROTO;
271
272                 if (msg->u.hello.version != NET_ACCEL_MSG_VERSION) {
273                         /* Error is due to version mismatch */
274                         err = check_rx_hello_version(msg->u.hello.version);
275                         if (err == 0) {
276                                 /*
277                                  * It's OK to be compatible, send
278                                  * another hello with compatible version
279                                  */
280                                 netback_accel_msg_tx_hello
281                                         (bend, msg->u.hello.version);
282                         } else {
283                                 /*
284                                  * Tell frontend that we're not going to
285                                  * send another HELLO by going to Closing.
286                                  */
287                                 netback_accel_set_closing(bend);
288                         }
289                 } 
290                 break;
291         case NET_ACCEL_MSG_MAPBUF:
292                 VPRINTK("Got mapped buffers request %d\n",
293                         msg->u.mapbufs.reqid);
294
295                 if (bend->hw_state == NETBACK_ACCEL_RES_NONE)
296                         return -EPROTO;
297
298                 /*
299                  * Frontend wants a buffer table entry for the
300                  * supplied pages
301                  */
302                 err = netback_accel_msg_rx_buffer_map(bend, msg);
303                 if (net_accel_msg_reply_notify(bend->shared_page,
304                                                bend->msg_channel_irq, 
305                                                &bend->to_domU, msg)) {
306                         /*
307                          * This is fatal as we can't tell the frontend
308                          * about the problem through the message
309                          * queue, and so would otherwise stalemate
310                          */
311                         netback_accel_set_closing(bend);
312                 }
313                 break;
314         case NET_ACCEL_MSG_FASTPATH:
315                 DPRINTK("Got fastpath request\n");
316
317                 if (bend->hw_state == NETBACK_ACCEL_RES_NONE)
318                         return -EPROTO;
319
320                 err = netback_accel_process_fastpath(bend, msg);
321                 break;
322         default:
323                 EPRINTK("Huh? Message code is %x\n", msg->id);
324                 err = -EPROTO;
325                 break;
326         }
327         return err;
328 }
329
330
331 /*  Demultiplex an IRQ from the frontend driver.  */
332 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
333 void netback_accel_msg_rx_handler(struct work_struct *arg)
334 #else
335 void netback_accel_msg_rx_handler(void *bend_void)
336 #endif
337 {
338         struct net_accel_msg msg;
339         int err, queue_was_full = 0;
340 #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,20)
341         struct netback_accel *bend = 
342                 container_of(arg, struct netback_accel, handle_msg);
343 #else
344         struct netback_accel *bend = (struct netback_accel *)bend_void;
345 #endif
346
347         mutex_lock(&bend->bend_mutex);
348
349         /*
350          * This happens when the shared pages have been unmapped, but
351          * the workqueue not flushed yet
352          */
353         if (bend->shared_page == NULL)
354                 goto done;
355
356         if ((bend->shared_page->aflags &
357              NET_ACCEL_MSG_AFLAGS_TO_DOM0_MASK) != 0) {
358                 if (bend->shared_page->aflags &
359                     NET_ACCEL_MSG_AFLAGS_QUEUE0NOTFULL) {
360                         /* We've been told there may now be space. */
361                         clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUE0NOTFULL_B, 
362                                   (unsigned long *)&bend->shared_page->aflags);
363                 }
364
365                 if (bend->shared_page->aflags &
366                     NET_ACCEL_MSG_AFLAGS_QUEUEUFULL) {
367                         clear_bit(NET_ACCEL_MSG_AFLAGS_QUEUEUFULL_B, 
368                                   (unsigned long *)&bend->shared_page->aflags);
369                         queue_was_full = 1;
370                 }
371         }
372
373         while ((err = net_accel_msg_recv(bend->shared_page, &bend->from_domU,
374                                          &msg)) == 0) {
375                 err = process_rx_msg(bend, &msg);
376                 
377                 if (err != 0) {
378                         EPRINTK("%s: Error %d\n", __FUNCTION__, err);
379                         goto err;
380                 }
381         }
382
383  err:
384         /* There will be space now if we can make any. */
385         if (queue_was_full) 
386                 set_queue_not_full(bend);
387  done:
388         mutex_unlock(&bend->bend_mutex);
389
390         return;
391 }