1 /****************************************************************************
2 * Solarflare driver for Xen network acceleration
4 * Copyright 2006-2008: Solarflare Communications Inc,
5 * 9501 Jeronimo Road, Suite 250,
6 * Irvine, CA 92618, USA
8 * Maintained by Solarflare Communications <linux-xen-drivers@solarflare.com>
10 * This program is free software; you can redistribute it and/or modify it
11 * under the terms of the GNU General Public License version 2 as published
12 * by the Free Software Foundation, incorporated herein by reference.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software
21 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
22 ****************************************************************************
25 #include <xen/gnttab.h>
27 #include "accel_bufs.h"
28 #include "accel_util.h"
34 netfront_accel_alloc_buf_desc_blocks(struct netfront_accel_bufinfo *manager,
37 manager->desc_blocks =
38 kzalloc(sizeof(struct netfront_accel_pkt_desc *) *
39 NETFRONT_ACCEL_BUF_NUM_BLOCKS(pages), GFP_KERNEL);
40 if (manager->desc_blocks == NULL) {
48 netfront_accel_alloc_buf_lists(struct netfront_accel_bufpages *bufpages,
51 bufpages->page_list = kmalloc(pages * sizeof(void *), GFP_KERNEL);
52 if (bufpages->page_list == NULL) {
56 bufpages->grant_list = kzalloc(pages * sizeof(grant_ref_t), GFP_KERNEL);
57 if (bufpages->grant_list == NULL) {
58 kfree(bufpages->page_list);
59 bufpages->page_list = NULL;
67 int netfront_accel_alloc_buffer_mem(struct netfront_accel_bufpages *bufpages,
68 struct netfront_accel_bufinfo *rx_manager,
69 struct netfront_accel_bufinfo *tx_manager,
74 if ((rc = netfront_accel_alloc_buf_desc_blocks
75 (rx_manager, pages - (pages / sfc_netfront_buffer_split))) < 0) {
79 if ((rc = netfront_accel_alloc_buf_desc_blocks
80 (tx_manager, pages / sfc_netfront_buffer_split)) < 0) {
84 if ((rc = netfront_accel_alloc_buf_lists(bufpages, pages)) < 0) {
88 for (n = 0; n < pages; n++) {
89 void *tmp = (void*)__get_free_page(GFP_KERNEL);
93 bufpages->page_list[n] = tmp;
97 EPRINTK("%s: not enough pages: %d != %d\n", __FUNCTION__, n,
100 free_page((unsigned long)(bufpages->page_list[n]));
105 bufpages->max_pages = pages;
106 bufpages->page_reqs = 0;
111 kfree(bufpages->page_list);
112 kfree(bufpages->grant_list);
114 bufpages->page_list = NULL;
115 bufpages->grant_list = NULL;
117 kfree(tx_manager->desc_blocks);
118 tx_manager->desc_blocks = NULL;
121 kfree(rx_manager->desc_blocks);
122 rx_manager->desc_blocks = NULL;
128 void netfront_accel_free_buffer_mem(struct netfront_accel_bufpages *bufpages,
129 struct netfront_accel_bufinfo *rx_manager,
130 struct netfront_accel_bufinfo *tx_manager)
134 for (i = 0; i < bufpages->max_pages; i++) {
135 if (bufpages->grant_list[i] != 0)
136 net_accel_ungrant_page(bufpages->grant_list[i]);
137 free_page((unsigned long)(bufpages->page_list[i]));
140 if (bufpages->max_pages) {
141 kfree(bufpages->page_list);
142 kfree(bufpages->grant_list);
143 kfree(rx_manager->desc_blocks);
144 kfree(tx_manager->desc_blocks);
150 * Allocate memory for the buffer manager and create a lock. If no
151 * lock is supplied its own is allocated.
153 struct netfront_accel_bufinfo *netfront_accel_init_bufs(spinlock_t *lock)
155 struct netfront_accel_bufinfo *res = kmalloc(sizeof(*res), GFP_KERNEL);
157 res->npages = res->nused = 0;
158 res->first_free = -1;
161 res->lock = kmalloc(sizeof(*res->lock), GFP_KERNEL);
162 if (res->lock == NULL) {
166 spin_lock_init(res->lock);
167 res->internally_locked = 1;
170 res->internally_locked = 0;
173 res->desc_blocks = NULL;
180 void netfront_accel_fini_bufs(struct netfront_accel_bufinfo *bufs)
182 if (bufs->internally_locked)
188 int netfront_accel_buf_map_request(struct xenbus_device *dev,
189 struct netfront_accel_bufpages *bufpages,
190 struct net_accel_msg *msg,
191 int pages, int offset)
196 net_accel_msg_init(msg, NET_ACCEL_MSG_MAPBUF);
198 BUG_ON(pages > NET_ACCEL_MSG_MAX_PAGE_REQ);
200 msg->u.mapbufs.pages = pages;
202 for (i = 0; i < msg->u.mapbufs.pages; i++) {
204 * This can happen if we tried to send this message
205 * earlier but the queue was full.
207 if (bufpages->grant_list[offset+i] != 0) {
208 msg->u.mapbufs.grants[i] =
209 bufpages->grant_list[offset+i];
213 mfn = virt_to_mfn(bufpages->page_list[offset+i]);
214 VPRINTK("%s: Granting page %d, mfn %08x\n",
215 __FUNCTION__, i, mfn);
217 bufpages->grant_list[offset+i] =
218 net_accel_grant_page(dev, mfn, 0);
219 msg->u.mapbufs.grants[i] = bufpages->grant_list[offset+i];
221 if (msg->u.mapbufs.grants[i] < 0) {
222 EPRINTK("%s: Failed to grant buffer: %d\n",
223 __FUNCTION__, msg->u.mapbufs.grants[i]);
229 /* This is interpreted on return as the offset in the the page_list */
230 msg->u.mapbufs.reqid = offset;
235 /* Ungrant all the pages we've successfully granted. */
236 for (i--; i >= 0; i--) {
237 net_accel_ungrant_page(bufpages->grant_list[offset+i]);
238 bufpages->grant_list[offset+i] = 0;
244 /* Process a response to a buffer request. */
245 int netfront_accel_add_bufs(struct netfront_accel_bufpages *bufpages,
246 struct netfront_accel_bufinfo *manager,
247 struct net_accel_msg *msg)
249 int msg_pages, page_offset, i, newtot;
250 int old_block_count, new_block_count;
254 VPRINTK("%s: manager %p msg %p\n", __FUNCTION__, manager, msg);
256 BUG_ON(msg->id != (NET_ACCEL_MSG_MAPBUF | NET_ACCEL_MSG_REPLY));
258 msg_pages = msg->u.mapbufs.pages;
259 msg_buf = msg->u.mapbufs.buf;
260 page_offset = msg->u.mapbufs.reqid;
262 spin_lock_irqsave(manager->lock, flags);
263 newtot = manager->npages + msg_pages;
265 (manager->npages + NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK - 1) >>
266 NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK_SHIFT;
268 (newtot + NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK - 1) >>
269 NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK_SHIFT;
271 for (i = old_block_count; i < new_block_count; i++) {
272 struct netfront_accel_pkt_desc *block;
273 if (manager->desc_blocks[i] != NULL) {
274 VPRINTK("Not needed\n");
277 block = kzalloc(NETFRONT_ACCEL_BUFS_PER_BLOCK *
278 sizeof(netfront_accel_pkt_desc), GFP_ATOMIC);
280 spin_unlock_irqrestore(manager->lock, flags);
283 manager->desc_blocks[i] = block;
285 for (i = manager->npages; i < newtot; i++) {
286 int k, j = i - manager->npages;
289 struct netfront_accel_pkt_desc *pkt;
291 block_num = i >> NETFRONT_ACCEL_BUF_PAGES_PER_BLOCK_SHIFT;
292 block_idx = (NETFRONT_ACCEL_BUFS_PER_PAGE*i)
293 & (NETFRONT_ACCEL_BUFS_PER_BLOCK-1);
295 pkt = manager->desc_blocks[block_num] + block_idx;
297 for (k = 0; k < NETFRONT_ACCEL_BUFS_PER_PAGE; k++) {
298 BUG_ON(page_offset + j >= bufpages->max_pages);
300 pkt[k].buf_id = NETFRONT_ACCEL_BUFS_PER_PAGE * i + k;
301 pkt[k].pkt_kva = bufpages->page_list[page_offset + j] +
302 (PAGE_SIZE/NETFRONT_ACCEL_BUFS_PER_PAGE) * k;
303 pkt[k].pkt_buff_addr = msg_buf +
304 (PAGE_SIZE/NETFRONT_ACCEL_BUFS_PER_PAGE) *
305 (NETFRONT_ACCEL_BUFS_PER_PAGE * j + k);
306 pkt[k].next_free = manager->first_free;
307 manager->first_free = pkt[k].buf_id;
308 *(int*)(pkt[k].pkt_kva) = pkt[k].buf_id;
310 VPRINTK("buf %d desc %p kva %p buffaddr %x\n",
311 pkt[k].buf_id, &(pkt[k]), pkt[k].pkt_kva,
312 pkt[k].pkt_buff_addr);
315 manager->npages = newtot;
316 spin_unlock_irqrestore(manager->lock, flags);
317 VPRINTK("Added %d pages. Total is now %d\n", msg_pages,
323 netfront_accel_pkt_desc *
324 netfront_accel_buf_find(struct netfront_accel_bufinfo *manager, u16 id)
326 netfront_accel_pkt_desc *pkt;
327 int block_num = id >> NETFRONT_ACCEL_BUFS_PER_BLOCK_SHIFT;
328 int block_idx = id & (NETFRONT_ACCEL_BUFS_PER_BLOCK - 1);
329 BUG_ON(id >= manager->npages * NETFRONT_ACCEL_BUFS_PER_PAGE);
330 BUG_ON(block_idx >= NETFRONT_ACCEL_BUFS_PER_BLOCK);
331 pkt = manager->desc_blocks[block_num] + block_idx;
336 /* Allocate a buffer from the buffer manager */
337 netfront_accel_pkt_desc *
338 netfront_accel_buf_get(struct netfront_accel_bufinfo *manager)
341 netfront_accel_pkt_desc *buf = NULL;
342 unsigned long flags = 0;
345 if (manager->first_free == -1)
348 if (manager->internally_locked)
349 spin_lock_irqsave(manager->lock, flags);
350 bufno = manager->first_free;
352 buf = netfront_accel_buf_find(manager, bufno);
353 manager->first_free = buf->next_free;
357 if (manager->internally_locked)
358 spin_unlock_irqrestore(manager->lock, flags);
361 VPRINTK("Allocated buffer %i, buffaddr %x\n", bufno,
368 /* Release a buffer back to the buffer manager pool */
369 int netfront_accel_buf_put(struct netfront_accel_bufinfo *manager, u16 id)
371 netfront_accel_pkt_desc *buf = netfront_accel_buf_find(manager, id);
372 unsigned long flags = 0;
373 unsigned was_empty = 0;
376 VPRINTK("Freeing buffer %i\n", id);
377 BUG_ON(id == (u16)-1);
379 if (manager->internally_locked)
380 spin_lock_irqsave(manager->lock, flags);
382 if (manager->first_free == -1)
385 buf->next_free = manager->first_free;
386 manager->first_free = bufno;
389 if (manager->internally_locked)
390 spin_unlock_irqrestore(manager->lock, flags);