- supported.conf: Added sparse_keymap (eeepc_laptop depends on it)
[linux-flexiantxendom0-3.2.10.git] / drivers / net / sfc / sfc_resource / iobufset_resource.c
1 /****************************************************************************
2  * Driver for Solarflare network controllers -
3  *          resource management for Xen backend, OpenOnload, etc
4  *           (including support for SFE4001 10GBT NIC)
5  *
6  * This file contains non-contiguous I/O buffers support.
7  *
8  * Copyright 2005-2007: Solarflare Communications Inc,
9  *                      9501 Jeronimo Road, Suite 250,
10  *                      Irvine, CA 92618, USA
11  *
12  * Developed and maintained by Solarflare Communications:
13  *                      <linux-xen-drivers@solarflare.com>
14  *                      <onload-dev@solarflare.com>
15  *
16  * Certain parts of the driver were implemented by
17  *          Alexandra Kossovsky <Alexandra.Kossovsky@oktetlabs.ru>
18  *          OKTET Labs Ltd, Russia,
19  *          http://oktetlabs.ru, <info@oktetlabs.ru>
20  *          by request of Solarflare Communications
21  *
22  *
23  * This program is free software; you can redistribute it and/or modify it
24  * under the terms of the GNU General Public License version 2 as published
25  * by the Free Software Foundation, incorporated herein by reference.
26  *
27  * This program is distributed in the hope that it will be useful,
28  * but WITHOUT ANY WARRANTY; without even the implied warranty of
29  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
30  * GNU General Public License for more details.
31  *
32  * You should have received a copy of the GNU General Public License
33  * along with this program; if not, write to the Free Software
34  * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA  02110-1301  USA
35  ****************************************************************************
36  */
37
38 #include <ci/efrm/nic_table.h>
39 #include <ci/efhw/iopage.h>
40 #include <ci/driver/efab/hardware.h>
41 #include <ci/efrm/private.h>
42 #include <ci/efrm/iobufset.h>
43 #include <ci/efrm/vi_resource_manager.h>
44 #include <ci/efrm/buffer_table.h>
45 #include <ci/efrm/efrm_client.h>
46 #include "efrm_internal.h"
47
48
49 #define EFRM_IOBUFSET_MAX_NUM_INSTANCES 0x00010000
50
51 struct iobufset_resource_manager {
52         struct efrm_resource_manager rm;
53         struct kfifo *free_ids;
54 };
55
56 struct iobufset_resource_manager *efrm_iobufset_manager;
57
58 #define iobsrs(rs1)  iobufset_resource(rs1)
59
60 /* Returns size of iobufset resource data structure. */
61 static inline size_t iobsrs_size(int n_pages)
62 {
63         return offsetof(struct iobufset_resource, bufs) +
64             n_pages * sizeof(struct efhw_iopage);
65 }
66
67 void efrm_iobufset_resource_free(struct iobufset_resource *rs)
68 {
69         unsigned int i;
70         int id;
71
72         EFRM_RESOURCE_ASSERT_VALID(&rs->rs, 1);
73
74         if (!rs->linked && rs->buf_tbl_alloc.base != (unsigned) -1)
75                 efrm_buffer_table_free(&rs->buf_tbl_alloc);
76
77         /* see comment on call to efhw_iopage_alloc in the alloc routine above
78            for discussion on use of efrm_nic_tablep->a_nic here */
79         EFRM_ASSERT(efrm_nic_tablep->a_nic);
80         if (rs->linked) {
81                 /* Nothing to do. */
82         } else if (rs->chunk_order == 0) {
83                 for (i = 0; i < rs->n_bufs; ++i)
84                         efhw_iopage_free(efrm_nic_tablep->a_nic, &rs->bufs[i]);
85         } else {
86                 /* it is important that this is executed in increasing page
87                  * order because some implementations of
88                  * efhw_iopages_init_from_iopage() assume this */
89                 for (i = 0; i < rs->n_bufs;
90                      i += rs->pages_per_contiguous_chunk) {
91                         struct efhw_iopages iopages;
92                         efhw_iopages_init_from_iopage(&iopages, &rs->bufs[i],
93                                                     rs->chunk_order);
94                         efhw_iopages_free(efrm_nic_tablep->a_nic, &iopages);
95                 }
96         }
97
98         /* free the instance number */
99         id = EFRM_RESOURCE_INSTANCE(rs->rs.rs_handle);
100         EFRM_VERIFY_EQ(kfifo_put(efrm_iobufset_manager->free_ids,
101                                  (unsigned char *)&id, sizeof(id)), sizeof(id));
102
103         efrm_vi_resource_release(rs->evq);
104         if (rs->linked)
105                 efrm_iobufset_resource_release(rs->linked);
106
107         efrm_client_put(rs->rs.rs_client);
108         if (iobsrs_size(rs->n_bufs) < PAGE_SIZE) {
109                 EFRM_DO_DEBUG(memset(rs, 0, sizeof(*rs)));
110                 kfree(rs);
111         } else {
112                 EFRM_DO_DEBUG(memset(rs, 0, sizeof(*rs)));
113                 vfree(rs);
114         }
115 }
116 EXPORT_SYMBOL(efrm_iobufset_resource_free);
117
118
119 void efrm_iobufset_resource_release(struct iobufset_resource *iobrs)
120 {
121         if (__efrm_resource_release(&iobrs->rs))
122                 efrm_iobufset_resource_free(iobrs);
123 }
124 EXPORT_SYMBOL(efrm_iobufset_resource_release);
125
126
127
128 int
129 efrm_iobufset_resource_alloc(int32_t n_pages,
130                              int32_t pages_per_contiguous_chunk,
131                              struct vi_resource *vi_evq,
132                              struct iobufset_resource *linked,
133                              bool phys_addr_mode,
134                              struct iobufset_resource **iobrs_out)
135 {
136         struct iobufset_resource *iobrs;
137         int rc, instance, object_size;
138         unsigned int i;
139
140         EFRM_ASSERT(iobrs_out);
141         EFRM_ASSERT(efrm_iobufset_manager);
142         EFRM_RESOURCE_MANAGER_ASSERT_VALID(&efrm_iobufset_manager->rm);
143         EFRM_RESOURCE_ASSERT_VALID(&vi_evq->rs, 0);
144         EFRM_ASSERT(EFRM_RESOURCE_TYPE(vi_evq->rs.rs_handle) ==
145                     EFRM_RESOURCE_VI);
146         EFRM_ASSERT(efrm_nic_tablep->a_nic);
147
148         if (linked) {
149                 /* This resource will share properties and memory with
150                  * another.  Only difference is that we'll program it into
151                  * the buffer table of another nic.
152                  */
153                 n_pages = linked->n_bufs;
154                 pages_per_contiguous_chunk = linked->pages_per_contiguous_chunk;
155                 phys_addr_mode = linked->buf_tbl_alloc.base == (unsigned) -1;
156         }
157
158         /* allocate the resource data structure. */
159         object_size = iobsrs_size(n_pages);
160         if (object_size < PAGE_SIZE) {
161                 /* this should be OK from a tasklet */
162                 /* Necessary to do atomic alloc() as this
163                    can be called from a weird-ass iSCSI context that is
164                    !in_interrupt but is in_atomic - See BUG3163 */
165                 iobrs = kmalloc(object_size, GFP_ATOMIC);
166         } else {                /* can't do this within a tasklet */
167 #ifndef NDEBUG
168                 if (in_interrupt() || in_atomic()) {
169                         EFRM_ERR("%s(): alloc->u.iobufset.in_n_pages=%d",
170                                  __func__, n_pages);
171                         EFRM_ASSERT(!in_interrupt());
172                         EFRM_ASSERT(!in_atomic());
173                 }
174 #endif
175                 iobrs = (struct iobufset_resource *) vmalloc(object_size);
176         }
177         if (iobrs == NULL) {
178                 EFRM_WARN("%s: failed to allocate container", __func__);
179                 rc = -ENOMEM;
180                 goto fail1;
181         }
182
183         /* Allocate an instance number. */
184         rc = kfifo_get(efrm_iobufset_manager->free_ids,
185                        (unsigned char *)&instance, sizeof(instance));
186         if (rc != sizeof(instance)) {
187                 EFRM_WARN("%s: out of instances", __func__);
188                 EFRM_ASSERT(rc == 0);
189                 rc = -EBUSY;
190                 goto fail3;
191         }
192
193         efrm_resource_init(&iobrs->rs, EFRM_RESOURCE_IOBUFSET, instance);
194
195         iobrs->evq = vi_evq;
196         iobrs->linked = linked;
197         iobrs->n_bufs = n_pages;
198         iobrs->pages_per_contiguous_chunk = pages_per_contiguous_chunk;
199         iobrs->chunk_order = fls(iobrs->pages_per_contiguous_chunk - 1);
200         iobrs->buf_tbl_alloc.base = (unsigned) -1;
201
202         EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " %u pages", __func__,
203                    EFRM_RESOURCE_PRI_ARG(iobrs->rs.rs_handle), iobrs->n_bufs);
204
205         /* Allocate the iobuffers. */
206         if (linked) {
207                 memcpy(iobrs->bufs, linked->bufs,
208                        iobrs->n_bufs * sizeof(iobrs->bufs[0]));
209         } else if (iobrs->chunk_order == 0) {
210                 memset(iobrs->bufs, 0, iobrs->n_bufs * sizeof(iobrs->bufs[0]));
211                 for (i = 0; i < iobrs->n_bufs; ++i) {
212                         /* due to bug2426 we have to specifiy a NIC when
213                          * allocating a DMAable page, which is a bit messy.
214                          * For now we assume that if the page is suitable
215                          * (e.g. DMAable) by one nic (efrm_nic_tablep->a_nic),
216                          * it is suitable for all NICs.
217                          * XXX I bet that breaks in Solaris.
218                          */
219                         rc = efhw_iopage_alloc(efrm_nic_tablep->a_nic,
220                                              &iobrs->bufs[i]);
221                         if (rc < 0) {
222                                 EFRM_WARN("%s: failed (rc %d) to allocate "
223                                           "page (i=%u)", __func__, rc, i);
224                                 goto fail4;
225                         }
226                 }
227         } else {
228                 struct efhw_iopages iopages;
229                 unsigned j;
230
231                 memset(iobrs->bufs, 0, iobrs->n_bufs * sizeof(iobrs->bufs[0]));
232                 for (i = 0; i < iobrs->n_bufs;
233                      i += iobrs->pages_per_contiguous_chunk) {
234                         rc = efhw_iopages_alloc(efrm_nic_tablep->a_nic,
235                                                 &iopages, iobrs->chunk_order);
236                         if (rc < 0) {
237                                 EFRM_WARN("%s: failed (rc %d) to allocate "
238                                           "pages (i=%u order %d)",
239                                           __func__, rc, i,
240                                           iobrs->chunk_order);
241                                 goto fail4;
242                         }
243                         for (j = 0; j < iobrs->pages_per_contiguous_chunk;
244                              j++) {
245                                 /* some implementation of
246                                  * efhw_iopage_init_from_iopages() rely on
247                                  * this function being called for
248                                  * _all_ pages in the chunk */
249                                 efhw_iopage_init_from_iopages(
250                                                         &iobrs->bufs[i + j],
251                                                         &iopages, j);
252                         }
253                 }
254         }
255
256         if (!phys_addr_mode) {
257                 unsigned owner_id = EFAB_VI_RESOURCE_INSTANCE(iobrs->evq);
258
259                 if (!linked) {
260                         /* Allocate space in the NIC's buffer table. */
261                         rc = efrm_buffer_table_alloc(fls(iobrs->n_bufs - 1),
262                                                      &iobrs->buf_tbl_alloc);
263                         if (rc < 0) {
264                                 EFRM_WARN("%s: failed (%d) to alloc %d buffer "
265                                           "table entries", __func__, rc,
266                                           iobrs->n_bufs);
267                                 goto fail5;
268                         }
269                         EFRM_ASSERT(((unsigned)1 << iobrs->buf_tbl_alloc.order)
270                                     >= (unsigned) iobrs->n_bufs);
271                 } else {
272                         iobrs->buf_tbl_alloc = linked->buf_tbl_alloc;
273                 }
274
275                 /* Initialise the buffer table entries. */
276                 for (i = 0; i < iobrs->n_bufs; ++i) {
277                         /*\ ?? \TODO burst them! */
278                         efrm_buffer_table_set(&iobrs->buf_tbl_alloc,
279                                               vi_evq->rs.rs_client->nic,
280                                               i,
281                                               efhw_iopage_dma_addr(&iobrs->
282                                                                    bufs[i]),
283                                               owner_id);
284                 }
285                 efrm_buffer_table_commit();
286         }
287
288         EFRM_TRACE("%s: " EFRM_RESOURCE_FMT " %d pages @ "
289                    EFHW_BUFFER_ADDR_FMT, __func__,
290                    EFRM_RESOURCE_PRI_ARG(iobrs->rs.rs_handle),
291                    iobrs->n_bufs, EFHW_BUFFER_ADDR(iobrs->buf_tbl_alloc.base,
292                                                    0));
293         efrm_resource_ref(&iobrs->evq->rs);
294         if (linked != NULL)
295                 efrm_resource_ref(&linked->rs);
296         efrm_client_add_resource(vi_evq->rs.rs_client, &iobrs->rs);
297         *iobrs_out = iobrs;
298         return 0;
299
300 fail5:
301         i = iobrs->n_bufs;
302 fail4:
303         /* see comment on call to efhw_iopage_alloc above for a discussion
304          * on use of efrm_nic_tablep->a_nic here */
305         if (linked) {
306                 /* Nothing to do. */
307         } else if (iobrs->chunk_order == 0) {
308                 while (i--) {
309                         struct efhw_iopage *page = &iobrs->bufs[i];
310                         efhw_iopage_free(efrm_nic_tablep->a_nic, page);
311                 }
312         } else {
313                 unsigned int j;
314                 for (j = 0; j < i; j += iobrs->pages_per_contiguous_chunk) {
315                         struct efhw_iopages iopages;
316
317                         EFRM_ASSERT(j % iobrs->pages_per_contiguous_chunk
318                                     == 0);
319                         /* it is important that this is executed in increasing
320                          * page order because some implementations of
321                          * efhw_iopages_init_from_iopage() assume this */
322                         efhw_iopages_init_from_iopage(&iopages,
323                                                       &iobrs->bufs[j],
324                                                       iobrs->chunk_order);
325                         efhw_iopages_free(efrm_nic_tablep->a_nic, &iopages);
326                 }
327         }
328 fail3:
329         if (object_size < PAGE_SIZE)
330                 kfree(iobrs);
331         else
332                 vfree(iobrs);
333 fail1:
334         return rc;
335 }
336 EXPORT_SYMBOL(efrm_iobufset_resource_alloc);
337
338 static void iobufset_rm_dtor(struct efrm_resource_manager *rm)
339 {
340         EFRM_ASSERT(&efrm_iobufset_manager->rm == rm);
341         kfifo_vfree(efrm_iobufset_manager->free_ids);
342 }
343
344 int
345 efrm_create_iobufset_resource_manager(struct efrm_resource_manager **rm_out)
346 {
347         int rc, max;
348
349         EFRM_ASSERT(rm_out);
350
351         efrm_iobufset_manager =
352             kmalloc(sizeof(*efrm_iobufset_manager), GFP_KERNEL);
353         if (efrm_iobufset_manager == 0)
354                 return -ENOMEM;
355         memset(efrm_iobufset_manager, 0, sizeof(*efrm_iobufset_manager));
356
357         /*
358          * Bug 1145, 1370: We need to set initial size of both the resource
359          * table and instance id table so they never need to grow as we
360          * want to be allocate new iobufset at tasklet time. Lets make
361          * a pessimistic guess at maximum number of iobufsets possible.
362          * Could be less because
363          *   - jumbo frames have same no of packets per iobufset BUT more
364          *     pages per buffer
365          *   - buffer table entries used independently of iobufsets by
366          *     sendfile
367          *
368          * Based on TCP/IP stack setting of PKTS_PER_SET_S=5 ...
369          *  - can't use this define here as it breaks the layering.
370          */
371 #define MIN_PAGES_PER_IOBUFSET  (1 << 4)
372
373         max = efrm_buffer_table_size() / MIN_PAGES_PER_IOBUFSET;
374         max = min_t(int, max, EFRM_IOBUFSET_MAX_NUM_INSTANCES);
375
376         /* HACK: There currently exists an option to allocate buffers that
377          * are not programmed into the buffer table, so the max number is
378          * not limited by the buffer table size.  I'm hoping this usage
379          * will go away eventually.
380          */
381         max = 32768;
382
383         rc = efrm_kfifo_id_ctor(&efrm_iobufset_manager->free_ids,
384                                 0, max, &efrm_iobufset_manager->rm.rm_lock);
385         if (rc != 0)
386                 goto fail1;
387
388         rc = efrm_resource_manager_ctor(&efrm_iobufset_manager->rm,
389                                         iobufset_rm_dtor, "IOBUFSET",
390                                         EFRM_RESOURCE_IOBUFSET);
391         if (rc < 0)
392                 goto fail2;
393
394         *rm_out = &efrm_iobufset_manager->rm;
395         return 0;
396
397 fail2:
398         kfifo_vfree(efrm_iobufset_manager->free_ids);
399 fail1:
400         EFRM_DO_DEBUG(memset(efrm_iobufset_manager, 0,
401                              sizeof(*efrm_iobufset_manager)));
402         kfree(efrm_iobufset_manager);
403         return rc;
404 }