- Update Xen patches to 3.3-rc5 and c/s 1157.
[linux-flexiantxendom0-3.2.10.git] / drivers / xen / sfc_netfront / accel_bufs.c
1 /****************************************************************************
2  * Solarflare driver for Xen network acceleration
3  *
4  * Copyright 2006-2008: Solarflare Communications Inc,
5  *                      9501 Jeronimo Road, Suite 250,
6  *                      Irvine, CA 92618, USA
7  *
8  * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
9  *
10  * This program is free software; you can redistribute it and/or modify it
11  * under the terms of the GNU General Public License version 2 as published
12  * by the Free Software Foundation, incorporated herein by reference.
13  *
14  * This program is distributed in the hope that it will be useful,
15  * but WITHOUT ANY WARRANTY; without even the implied warranty of
16  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
17  * GNU General Public License for more details.
18  *
19  * You should have received a copy of the GNU General Public License
20  * along with this program; if not, write to the Free Software
21  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
22  ****************************************************************************
23  */
24
25 #include <xen/gnttab.h>
26
27 #include "accel_bufs.h"
28 #include "accel_util.h"
29
30 #include "accel.h"
31
32
33 static int 
34 netfront_accel_alloc_buf_desc_blocks(struct netfront_accel_bufinfo *manager,
35                                      int pages)
36 {
37         manager->desc_blocks = 
38                 kzalloc(sizeof(struct netfront_accel_pkt_desc *) * 
39                         NETFRONT_ACCEL_BUF_NUM_BLOCKS(pages), GFP_KERNEL);
40         if (manager->desc_blocks == NULL) {
41                 return -ENOMEM;
42         }
43         
44         return 0;
45 }
46
47 static int 
48 netfront_accel_alloc_buf_lists(struct netfront_accel_bufpages *bufpages,
49                                int pages)
50 {
51         bufpages->page_list = kmalloc(pages * sizeof(void *), GFP_KERNEL);
52         if (bufpages->page_list == NULL) {
53                 return -ENOMEM;
54         }
55
56         bufpages->grant_list = kzalloc(pages * sizeof(grant_ref_t), GFP_KERNEL);
57         if (bufpages->grant_list == NULL) {
58                 kfree(bufpages->page_list);
59                 bufpages->page_list = NULL;
60                 return -ENOMEM;
61         }
62
63         return 0;
64 }
65
66
67 int netfront_accel_alloc_buffer_mem(struct netfront_accel_bufpages *bufpages,
68                                     struct netfront_accel_bufinfo *rx_manager,
69                                     struct netfront_accel_bufinfo *tx_manager,
70                                     int pages)
71 {
72         int n, rc;
73
74         if ((rc = netfront_accel_alloc_buf_desc_blocks
75              (rx_manager, pages - (pages / sfc_netfront_buffer_split))) < 0) {
76                 goto rx_fail;
77         }
78
79         if ((rc = netfront_accel_alloc_buf_desc_blocks
80              (tx_manager, pages / sfc_netfront_buffer_split)) < 0) {
81                 goto tx_fail;
82         }
83
84         if ((rc = netfront_accel_alloc_buf_lists(bufpages, pages)) < 0) {
85                 goto lists_fail;
86         }
87
88         for (n = 0; n < pages; n++) {
89                 void *tmp = (void*)__get_free_page(GFP_KERNEL);
90                 if (tmp == NULL)
91                         break;
92
93                 bufpages->page_list[n] = tmp;
94         }
95
96         if (n != pages) {
97                 EPRINTK("%s: not enough pages: %d != %d\n", __FUNCTION__, n, 
98                         pages);
99                 for (; n >= 0; n--)
100                         free_page((unsigned long)(bufpages->page_list[n]));
101                 rc = -ENOMEM;
102                 goto pages_fail;
103         }
104
105         bufpages->max_pages = pages;
106         bufpages->page_reqs = 0;
107
108         return 0;
109
110  pages_fail:
111         kfree(bufpages->page_list);
112         kfree(bufpages->grant_list);
113
114         bufpages->page_list = NULL;
115         bufpages->grant_list = NULL;
116  lists_fail:
117         kfree(tx_manager->desc_blocks);
118         tx_manager->desc_blocks = NULL;
119
120  tx_fail:
121         kfree(rx_manager->desc_blocks);
122         rx_manager->desc_blocks = NULL;
123  rx_fail:
124         return rc;
125 }
126
127
128 void netfront_accel_free_buffer_mem(struct netfront_accel_bufpages *bufpages,
129                                     struct netfront_accel_bufinfo *rx_manager,
130                                     struct netfront_accel_bufinfo *tx_manager)
131 {
132         int i;
133
134         for (i = 0; i < bufpages->max_pages; i++) {
135                 if (bufpages->grant_list[i] != 0)
136                         net_accel_ungrant_page(bufpages->grant_list[i]);
137                 free_page((unsigned long)(bufpages->page_list[i]));
138         }
139
140         if (bufpages->max_pages) {
141                 kfree(bufpages->page_list);
142                 kfree(bufpages->grant_list);
143                 kfree(rx_manager->desc_blocks);
144                 kfree(tx_manager->desc_blocks);
145         }
146 }
147
148
149 /*
150  * Allocate memory for the buffer manager and create a lock.  If no
151  * lock is supplied its own is allocated.
152  */
153 struct netfront_accel_bufinfo *netfront_accel_init_bufs(spinlock_t *lock)
154 {
155         struct netfront_accel_bufinfo *res = kmalloc(sizeof(*res), GFP_KERNEL);
156         if (res != NULL) {
157                 res->npages = res->nused = 0;
158                 res->first_free = -1;
159
160                 if (lock == NULL) {
161                         res->lock = kmalloc(sizeof(*res->lock), GFP_KERNEL);
162                         if (res->lock == NULL) {
163                                 kfree(res);
164                                 return NULL;
165                         }
166                         spin_lock_init(res->lock);
167                         res->internally_locked = 1;
168                 } else {
169                         res->lock = lock;
170                         res->internally_locked = 0;
171                 }
172                 
173                 res->desc_blocks = NULL;
174         }
175
176         return res;
177 }
178
179
180 void netfront_accel_fini_bufs(struct netfront_accel_bufinfo *bufs)
181 {
182         if (bufs->internally_locked)
183                 kfree(bufs->lock);
184         kfree(bufs);
185 }
186
187
188 int netfront_accel_buf_map_request(struct xenbus_device *dev,
189                                    struct netfront_accel_bufpages *bufpages,
190                                    struct net_accel_msg *msg, 
191                                    int pages, int offset)
192 {
193         int i, mfn;
194         int err;
195
196         net_accel_msg_init(msg, NET_ACCEL_MSG_MAPBUF);
197
198         BUG_ON(pages > NET_ACCEL_MSG_MAX_PAGE_REQ);
199
200         msg->u.mapbufs.pages = pages;
201
202         for (i = 0; i < msg->u.mapbufs.pages; i++) {
203                 /* 
204                  * This can happen if we tried to send this message
205                  * earlier but the queue was full.
206                  */
207                 if (bufpages->grant_list[offset+i] != 0) {
208                         msg->u.mapbufs.grants[i] = 
209                                 bufpages->grant_list[offset+i];
210                         continue;
211                 }
212
213                 mfn = virt_to_mfn(bufpages->page_list[offset+i]);
214                 VPRINTK("%s: Granting page %d, mfn %08x\n",
215                         __FUNCTION__, i, mfn);
216
217                 bufpages->grant_list[offset+i] =
218                         net_accel_grant_page(dev, mfn, 0);
219                 msg->u.mapbufs.grants[i] = bufpages->grant_list[offset+i];
220
221                 if (msg->u.mapbufs.grants[i] < 0) {
222                         EPRINTK("%s: Failed to grant buffer: %d\n",
223                                 __FUNCTION__, msg->u.mapbufs.grants[i]);
224                         err = -EIO;
225                         goto error;
226                 }
227         }
228
229         /* This is interpreted on return as the offset in the the page_list */
230         msg->u.mapbufs.reqid = offset;
231
232         return 0;
233
234 error:
235         /* Ungrant all the pages we've successfully granted. */
236         for (i--; i >= 0; i--) {
237                 net_accel_ungrant_page(bufpages->grant_list[offset+i]);
238                 bufpages->grant_list[offset+i] = 0;
239         }
240         return err;
241 }
242
243
244 /* Process a response to a buffer request. */
245 int netfront_accel_add_bufs(struct netfront_accel_bufpages *bufpages,
246                             struct netfront_accel_bufinfo *manager, 
247                             struct net_accel_msg *msg)
248 {
249         int msg_pages, page_offset, i, newtot;
250         int old_block_count, new_block_count;
251         u32 msg_buf;
252         unsigned long flags;
253
254         VPRINTK("%s: manager %p msg %p\n", __FUNCTION__, manager, msg);
255
256         BUG_ON(msg->id != (NET_ACCEL_MSG_MAPBUF | NET_ACCEL_MSG_REPLY));
257
258         msg_pages = msg->u.mapbufs.pages;
259         msg_buf = msg->u.mapbufs.buf;
260         page_offset = msg->u.mapbufs.reqid;
261
262         spin_lock_irqsave(manager->lock, flags);
263         newtot = manager->npages + msg_pages;
264         old_block_count = 
265                 (manager->npages + NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK - 1) >>
266                 NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK_SHIFT;
267         new_block_count = 
268                 (newtot + NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK - 1) >>
269                 NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK_SHIFT;
270
271         for (i = old_block_count; i < new_block_count; i++) {
272                 struct netfront_accel_pkt_desc *block;
273                 if (manager->desc_blocks[i] != NULL) {
274                         VPRINTK("Not needed\n");
275                         continue;
276                 }
277                 block = kzalloc(NETFRONT_ACCEL_BUFS_PER_BLOCK * 
278                                 sizeof(netfront_accel_pkt_desc), GFP_ATOMIC);
279                 if (block == NULL) {
280                         spin_unlock_irqrestore(manager->lock, flags);
281                         return -ENOMEM;
282                 }
283                 manager->desc_blocks[i] = block;
284         }
285         for (i = manager->npages; i < newtot; i++) {
286                 int k, j = i - manager->npages;
287                 int block_num;
288                 int block_idx;
289                 struct netfront_accel_pkt_desc *pkt;
290
291                 block_num = i >> NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK_SHIFT;
292                 block_idx = (NETFRONT_ACCEL_BUFS_PER_PAGE*i)
293                         & (NETFRONT_ACCEL_BUFS_PER_BLOCK-1);
294
295                 pkt = manager->desc_blocks[block_num] + block_idx;
296                 
297                 for (k = 0; k < NETFRONT_ACCEL_BUFS_PER_PAGE; k++) {
298                         BUG_ON(page_offset + j >= bufpages->max_pages);
299
300                         pkt[k].buf_id = NETFRONT_ACCEL_BUFS_PER_PAGE * i + k;
301                         pkt[k].pkt_kva = bufpages->page_list[page_offset + j] +
302                                 (PAGE_SIZE/NETFRONT_ACCEL_BUFS_PER_PAGE) * k;
303                         pkt[k].pkt_buff_addr = msg_buf +
304                                 (PAGE_SIZE/NETFRONT_ACCEL_BUFS_PER_PAGE) * 
305                                 (NETFRONT_ACCEL_BUFS_PER_PAGE * j + k);
306                         pkt[k].next_free = manager->first_free;
307                         manager->first_free = pkt[k].buf_id;
308                         *(int*)(pkt[k].pkt_kva) = pkt[k].buf_id;
309
310                         VPRINTK("buf %d desc %p kva %p buffaddr %x\n",
311                                 pkt[k].buf_id, &(pkt[k]), pkt[k].pkt_kva, 
312                                 pkt[k].pkt_buff_addr);
313                 }
314         }
315         manager->npages = newtot;
316         spin_unlock_irqrestore(manager->lock, flags);
317         VPRINTK("Added %d pages. Total is now %d\n", msg_pages,
318                 manager->npages);
319         return 0;
320 }
321
322
323 netfront_accel_pkt_desc *
324 netfront_accel_buf_find(struct netfront_accel_bufinfo *manager, u16 id)
325 {
326         netfront_accel_pkt_desc *pkt;
327         int block_num = id >> NETFRONT_ACCEL_BUFS_PER_BLOCK_SHIFT;
328         int block_idx = id & (NETFRONT_ACCEL_BUFS_PER_BLOCK - 1);
329         BUG_ON(id >= manager->npages * NETFRONT_ACCEL_BUFS_PER_PAGE);
330         BUG_ON(block_idx >= NETFRONT_ACCEL_BUFS_PER_BLOCK);
331         pkt = manager->desc_blocks[block_num] + block_idx;
332         return pkt;
333 }
334
335
336 /* Allocate a buffer from the buffer manager */
337 netfront_accel_pkt_desc *
338 netfront_accel_buf_get(struct netfront_accel_bufinfo *manager)
339 {
340         int bufno = -1;
341         netfront_accel_pkt_desc *buf = NULL;
342         unsigned long flags = 0;
343
344         /* Any spare? */
345         if (manager->first_free == -1)
346                 return NULL;
347         /* Take lock */
348         if (manager->internally_locked)
349                 spin_lock_irqsave(manager->lock, flags);
350         bufno = manager->first_free;
351         if (bufno != -1) {
352                 buf = netfront_accel_buf_find(manager, bufno);
353                 manager->first_free = buf->next_free;
354                 manager->nused++;
355         }
356         /* Release lock */
357         if (manager->internally_locked)
358                 spin_unlock_irqrestore(manager->lock, flags);
359
360         /* Tell the world */
361         VPRINTK("Allocated buffer %i, buffaddr %x\n", bufno,
362                 buf->pkt_buff_addr);
363
364         return buf;
365 }
366
367
368 /* Release a buffer back to the buffer manager pool */
369 int netfront_accel_buf_put(struct netfront_accel_bufinfo *manager, u16 id)
370 {
371         netfront_accel_pkt_desc *buf = netfront_accel_buf_find(manager, id);
372         unsigned long flags = 0;
373         unsigned was_empty = 0;
374         int bufno = id;
375
376         VPRINTK("Freeing buffer %i\n", id);
377         BUG_ON(id == (u16)-1);
378
379         if (manager->internally_locked)
380                 spin_lock_irqsave(manager->lock, flags);
381
382         if (manager->first_free == -1)
383                 was_empty = 1;
384
385         buf->next_free = manager->first_free;
386         manager->first_free = bufno;
387         manager->nused--;
388
389         if (manager->internally_locked)
390                 spin_unlock_irqrestore(manager->lock, flags);
391
392         return was_empty;
393 }